Search Results

Search found 7209 results on 289 pages for 'names'.

Page 54/289 | < Previous Page | 50 51 52 53 54 55 56 57 58 59 60 61  | Next Page >

  • Blackjack game reshuffling problem-edited

    - by Jam
    I am trying to make a blackjack game where before each new round, the program checks to make sure that the deck has 7 cards per player. And if it doesn't, the deck clears, repopulates, and reshuffles. I have most of the problem down, but for some reason at the start of every deal it reshuffles the deck more than once, and I can't figure out why. Help, please. Here's what I have so far: (P.S. the imported cards and games modules aren't part of the problem, I'm fairly sure my problem lies in the deal() function of my BJ_Deck class.) import cards, games class BJ_Card(cards.Card): """ A Blackjack Card. """ ACE_VALUE = 1 def get_value(self): if self.is_face_up: value = BJ_Card.RANKS.index(self.rank) + 1 if value > 10: value = 10 else: value = None return value value = property(get_value) class BJ_Deck(cards.Deck): """ A Blackjack Deck. """ def populate(self): for suit in BJ_Card.SUITS: for rank in BJ_Card.RANKS: self.cards.append(BJ_Card(rank, suit)) def deal(self, hands, per_hand=1): for rounds in range(per_hand): if len(self.cards)>=7*(len(hands)): print "Reshuffling the deck." self.cards=[] self.populate() self.shuffle() for hand in hands: top_card=self.cards[0] self.give(top_card, hand) class BJ_Hand(cards.Hand): """ A Blackjack Hand. """ def __init__(self, name): super(BJ_Hand, self).__init__() self.name = name def __str__(self): rep = self.name + ":\t" + super(BJ_Hand, self).__str__() if self.total: rep += "(" + str(self.total) + ")" return rep def get_total(self): # if a card in the hand has value of None, then total is None for card in self.cards: if not card.value: return None # add up card values, treat each Ace as 1 total = 0 for card in self.cards: total += card.value # determine if hand contains an Ace contains_ace = False for card in self.cards: if card.value == BJ_Card.ACE_VALUE: contains_ace = True # if hand contains Ace and total is low enough, treat Ace as 11 if contains_ace and total <= 11: # add only 10 since we've already added 1 for the Ace total += 10 return total total = property(get_total) def is_busted(self): return self.total > 21 class BJ_Player(BJ_Hand): """ A Blackjack Player. """ def is_hitting(self): response = games.ask_yes_no("\n" + self.name + ", do you want a hit? (Y/N): ") return response == "y" def bust(self): print self.name, "busts." self.lose() def lose(self): print self.name, "loses." def win(self): print self.name, "wins." def push(self): print self.name, "pushes." class BJ_Dealer(BJ_Hand): """ A Blackjack Dealer. """ def is_hitting(self): return self.total < 17 def bust(self): print self.name, "busts." def flip_first_card(self): first_card = self.cards[0] first_card.flip() class BJ_Game(object): """ A Blackjack Game. """ def __init__(self, names): self.players = [] for name in names: player = BJ_Player(name) self.players.append(player) self.dealer = BJ_Dealer("Dealer") self.deck = BJ_Deck() self.deck.populate() self.deck.shuffle() def get_still_playing(self): remaining = [] for player in self.players: if not player.is_busted(): remaining.append(player) return remaining # list of players still playing (not busted) this round still_playing = property(get_still_playing) def __additional_cards(self, player): while not player.is_busted() and player.is_hitting(): self.deck.deal([player]) print player if player.is_busted(): player.bust() def play(self): # deal initial 2 cards to everyone self.deck.deal(self.players + [self.dealer], per_hand = 2) self.dealer.flip_first_card() # hide dealer's first card for player in self.players: print player print self.dealer # deal additional cards to players for player in self.players: self.__additional_cards(player) self.dealer.flip_first_card() # reveal dealer's first if not self.still_playing: # since all players have busted, just show the dealer's hand print self.dealer else: # deal additional cards to dealer print self.dealer self.__additional_cards(self.dealer) if self.dealer.is_busted(): # everyone still playing wins for player in self.still_playing: player.win() else: # compare each player still playing to dealer for player in self.still_playing: if player.total > self.dealer.total: player.win() elif player.total < self.dealer.total: player.lose() else: player.push() # remove everyone's cards for player in self.players: player.clear() self.dealer.clear() def main(): print "\t\tWelcome to Blackjack!\n" names = [] number = games.ask_number("How many players? (1 - 7): ", low = 1, high = 8) for i in range(number): name = raw_input("Enter player name: ") names.append(name) print game = BJ_Game(names) again = None while again != "n": game.play() again = games.ask_yes_no("\nDo you want to play again?: ") main() raw_input("\n\nPress the enter key to exit.") Since someone decided to call this 'psychic-debugging', I'll go ahead and tell you what the modules are then. Here's the cards module: class Card(object): """ A playing card. """ RANKS = ["A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K"] SUITS = ["c", "d", "h", "s"] def __init__(self, rank, suit, face_up = True): self.rank = rank self.suit = suit self.is_face_up = face_up def __str__(self): if self.is_face_up: rep = self.rank + self.suit else: rep = "XX" return rep def flip(self): self.is_face_up = not self.is_face_up class Hand(object): """ A hand of playing cards. """ def init(self): self.cards = [] def __str__(self): if self.cards: rep = "" for card in self.cards: rep += str(card) + "\t" else: rep = "<empty>" return rep def clear(self): self.cards = [] def add(self, card): self.cards.append(card) def give(self, card, other_hand): self.cards.remove(card) other_hand.add(card) class Deck(Hand): """ A deck of playing cards. """ def populate(self): for suit in Card.SUITS: for rank in Card.RANKS: self.add(Card(rank, suit)) def shuffle(self): import random random.shuffle(self.cards) def deal(self, hands, per_hand = 1): for rounds in range(per_hand): for hand in hands: if self.cards: top_card = self.cards[0] self.give(top_card, hand) else: print "Can't continue deal. Out of cards!" if name == "main": print "This is a module with classes for playing cards." raw_input("\n\nPress the enter key to exit.") And here's the games module: class Player(object): """ A player for a game. """ def __init__(self, name, score = 0): self.name = name self.score = score def __str__(self): rep = self.name + ":\t" + str(self.score) return rep def ask_yes_no(question): """Ask a yes or no question.""" response = None while response not in ("y", "n"): response = raw_input(question).lower() return response def ask_number(question, low, high): """Ask for a number within a range.""" response = None while response not in range(low, high): response = int(raw_input(question)) return response if name == "main": print "You ran this module directly (and did not 'import' it)." raw_input("\n\nPress the enter key to exit.")

    Read the article

  • web service not working on GlassFish

    - by Gunjan Shah
    I am generating web service client in Eclipse Helios by Axis 1.4 version. The client stubs are working fine as per the expectation by using local main programs. But When I deploy the stub and application on GlassFish Server, I am getting the following exception : [#|2012-10-16T03:36:12.166-0700|SEVERE|glassfish3.1|javax.enterprise.system.std.com.sun.enterprise.server.logging|_ThreadID=101;_ThreadName=Thread-1;|java.lang.IllegalStateException: WEB9031: WebappClassLoader unable to load resource [META-INF/services/org.apache.axis.EngineConfigurationFactory], because it has not yet been started, or was already stopped at org.glassfish.web.loader.WebappClassLoader.findResourceInternal(WebappClassLoader.java:2074) at org.glassfish.web.loader.WebappClassLoader.findResource(WebappClassLoader.java:1034) at org.glassfish.web.loader.WebappClassLoader.getResource(WebappClassLoader.java:1169) at org.glassfish.web.loader.WebappClassLoader.getResource(WebappClassLoader.java:1135) at org.apache.commons.discovery.jdk.JDK12Hooks.getResources(JDK12Hooks.java:149) at org.apache.commons.discovery.resource.DiscoverResources$1.getNextResources(DiscoverResources.java:153) at org.apache.commons.discovery.resource.DiscoverResources$1.getNextResource(DiscoverResources.java:129) at org.apache.commons.discovery.resource.DiscoverResources$1.hasNext(DiscoverResources.java:116) at org.apache.commons.discovery.resource.names.DiscoverNamesInFile$1.getNextClassNames(DiscoverNamesInFile.java:186) at org.apache.commons.discovery.resource.names.DiscoverNamesInFile$1.getNextClassName(DiscoverNamesInFile.java:170) at org.apache.commons.discovery.resource.names.DiscoverNamesInFile$1.hasNext(DiscoverNamesInFile.java:157) at org.apache.commons.discovery.resource.names.NameDiscoverers$1.getNextIterator(NameDiscoverers.java:143) at org.apache.commons.discovery.resource.names.NameDiscoverers$1.hasNext(NameDiscoverers.java:126) at org.apache.commons.discovery.resource.classes.ResourceClassDiscoverImpl$1.getNextResource(ResourceClassDiscoverImpl.java:159) at org.apache.commons.discovery.resource.classes.ResourceClassDiscoverImpl$1.hasNext(ResourceClassDiscoverImpl.java:147) at org.apache.axis.configuration.EngineConfigurationFactoryFinder$1.run(EngineConfigurationFactoryFinder.java:120) at java.security.AccessController.doPrivileged(Native Method) at org.apache.axis.configuration.EngineConfigurationFactoryFinder.newFactory(EngineConfigurationFactoryFinder.java:113) at org.apache.axis.configuration.EngineConfigurationFactoryFinder.newFactory(EngineConfigurationFactoryFinder.java:160) at org.apache.axis.client.Service.getEngineConfiguration(Service.java:813) at org.apache.axis.client.Service.getAxisClient(Service.java:104) at org.apache.axis.client.Service.<init>(Service.java:113) at com.payback.mobile.GreenCardServiceLocator.<init>(GreenCardServiceLocator.java:12) at com.pbgc.web.service.client.PentaloonServiceClient.getGreenCardService(PentaloonServiceClient.java:50) at com.pbgc.web.service.provider.LoginService.authenticateUser(LoginService.java:30) at com.pbgc.web.action.LoginAction.doLogin(LoginAction.java:44) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) at java.lang.reflect.Method.invoke(Method.java:597) at com.opensymphony.xwork2.DefaultActionInvocation.invokeAction(DefaultActionInvocation.java:452) at com.opensymphony.xwork2.DefaultActionInvocation.invokeActionOnly(DefaultActionInvocation.java:291) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:254) at com.pbgc.web.interceptor.SecurityManager.intercept(SecurityManager.java:45) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.DefaultWorkflowInterceptor.doIntercept(DefaultWorkflowInterceptor.java:176) at com.opensymphony.xwork2.interceptor.MethodFilterInterceptor.intercept(MethodFilterInterceptor.java:98) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.ConversionErrorInterceptor.intercept(ConversionErrorInterceptor.java:133) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.ParametersInterceptor.doIntercept(ParametersInterceptor.java:207) at com.opensymphony.xwork2.interceptor.MethodFilterInterceptor.intercept(MethodFilterInterceptor.java:98) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.ParametersInterceptor.doIntercept(ParametersInterceptor.java:207) at com.opensymphony.xwork2.interceptor.MethodFilterInterceptor.intercept(MethodFilterInterceptor.java:98) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.StaticParametersInterceptor.intercept(StaticParametersInterceptor.java:190) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at org.apache.struts2.interceptor.MultiselectInterceptor.intercept(MultiselectInterceptor.java:75) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at org.apache.struts2.interceptor.CheckboxInterceptor.intercept(CheckboxInterceptor.java:94) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.ChainingInterceptor.intercept(ChainingInterceptor.java:145) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.PrepareInterceptor.doIntercept(PrepareInterceptor.java:171) at com.opensymphony.xwork2.interceptor.MethodFilterInterceptor.intercept(MethodFilterInterceptor.java:98) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at org.apache.struts2.interceptor.ServletConfigInterceptor.intercept(ServletConfigInterceptor.java:164) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at com.opensymphony.xwork2.interceptor.ExceptionMappingInterceptor.intercept(ExceptionMappingInterceptor.java:187) at com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:248) at org.apache.struts2.impl.StrutsActionProxy.execute(StrutsActionProxy.java:52) at org.apache.struts2.dispatcher.Dispatcher.serviceAction(Dispatcher.java:498) at org.apache.struts2.dispatcher.ng.ExecuteOperations.executeAction(ExecuteOperations.java:77) at org.apache.struts2.dispatcher.ng.filter.StrutsPrepareAndExecuteFilter.doFilter(StrutsPrepareAndExecuteFilter.java:91) at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:256) at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:215) at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:279) at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:175) at org.apache.catalina.core.StandardPipeline.doInvoke(StandardPipeline.java:655) at org.apache.catalina.core.StandardPipeline.invoke(StandardPipeline.java:595) at com.sun.enterprise.web.WebPipeline.invoke(WebPipeline.java:98) at com.sun.enterprise.web.PESessionLockingStandardPipeline.invoke(PESessionLockingStandardPipeline.java:91) at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:162) at org.apache.catalina.connector.CoyoteAdapter.doService(CoyoteAdapter.java:326) at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:227) at com.sun.enterprise.v3.services.impl.ContainerMapper.service(ContainerMapper.java:228) at com.sun.grizzly.http.ProcessorTask.invokeAdapter(ProcessorTask.java:822) at com.sun.grizzly.http.ProcessorTask.doProcess(ProcessorTask.java:719) at com.sun.grizzly.http.ProcessorTask.process(ProcessorTask.java:1013) at com.sun.grizzly.http.DefaultProtocolFilter.execute(DefaultProtocolFilter.java:225) at com.sun.grizzly.DefaultProtocolChain.executeProtocolFilter(DefaultProtocolChain.java:137) at com.sun.grizzly.DefaultProtocolChain.execute(DefaultProtocolChain.java:104) at com.sun.grizzly.DefaultProtocolChain.execute(DefaultProtocolChain.java:90) at co|#] [#|2012-10-16T03:36:12.166-0700|SEVERE|glassfish3.1|javax.enterprise.system.std.com.sun.enterprise.server.logging|_ThreadID=101;_ThreadName=Thread-1;|m.sun.grizzly.http.HttpProtocolChain.execute(HttpProtocolChain.java:79) at com.sun.grizzly.ProtocolChainContextTask.doCall(ProtocolChainContextTask.java:54) at com.sun.grizzly.SelectionKeyContextTask.call(SelectionKeyContextTask.java:59) at com.sun.grizzly.ContextTask.run(ContextTask.java:71) at com.sun.grizzly.util.AbstractThreadPool$Worker.doWork(AbstractThreadPool.java:532) at com.sun.grizzly.util.AbstractThreadPool$Worker.run(AbstractThreadPool.java:513) at java.lang.Thread.run(Thread.java:619) |#] Can anyone tell me, why its happening ? Its happening only when I deploy the application on GlassFish server. Thanks, Gunjan.

    Read the article

  • RIF PRD: Presentation syntax issues

    - by Charles Young
    Over Christmas I got to play a bit with the W3C RIF PRD and came across a few issues which I thought I would record for posterity. Specifically, I was working on a grammar for the presentation syntax using a GLR grammar parser tool (I was using the current CTP of ‘M’ (MGrammer) and Intellipad – I do so hope the MS guys don’t kill off M and Intellipad now they have dropped the other parts of SQL Server Modelling). I realise that the presentation syntax is non-normative and that any issues with it do not therefore compromise the standard. However, presentation syntax is useful in its own right, and it would be great to iron out any issues in a future revision of the standard. The main issues are actually not to do with the grammar at all, but rather with the ‘running example’ in the RIF PRD recommendation. I started with the code provided in Example 9.1. There are several discrepancies when compared with the EBNF rules documented in the standard. Broadly the problems can be categorised as follows: ·      Parenthesis mismatch – the wrong number of parentheses are used in various places. For example, in GoldRule, the RHS of the rule (the ‘Then’) is nested in the LHS (‘the If’). In NewCustomerAndWidgetRule, the RHS is orphaned from the LHS. Together with additional incorrect parenthesis, this leads to orphanage of UnknownStatusRule from the entire Document. ·      Invalid use of parenthesis in ‘Forall’ constructs. Parenthesis should not be used to enclose formulae. Removal of the invalid parenthesis gave me a feeling of inconsistency when comparing formulae in Forall to formulae in If. The use of parenthesis is not actually inconsistent in these two context, but in an If construct it ‘feels’ as if you are enclosing formulae in parenthesis in a LISP-like fashion. In reality, the parenthesis is simply being used to group subordinate syntax elements. The fact that an If construct can contain only a single formula as an immediate child adds to this feeling of inconsistency. ·      Invalid representation of compact URIs (CURIEs) in the context of Frame productions. In several places the URIs are not qualified with a namespace prefix (‘ex1:’). This conflicts with the definition of CURIEs in the RIF Datatypes and Built-Ins 1.0 document. Here are the productions: CURIE          ::= PNAME_LN                  | PNAME_NS PNAME_LN       ::= PNAME_NS PN_LOCAL PNAME_NS       ::= PN_PREFIX? ':' PN_LOCAL       ::= ( PN_CHARS_U | [0-9] ) ((PN_CHARS|'.')* PN_CHARS)? PN_CHARS       ::= PN_CHARS_U                  | '-' | [0-9] | #x00B7                  | [#x0300-#x036F] | [#x203F-#x2040] PN_CHARS_U     ::= PN_CHARS_BASE                  | '_' PN_CHARS_BASE ::= [A-Z] | [a-z] | [#x00C0-#x00D6] | [#x00D8-#x00F6]                  | [#x00F8-#x02FF] | [#x0370-#x037D] | [#x037F-#x1FFF]                  | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF]                  | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD]                  | [#x10000-#xEFFFF] PN_PREFIX      ::= PN_CHARS_BASE ((PN_CHARS|'.')* PN_CHARS)? The more I look at CURIEs, the more my head hurts! The RIF specification allows prefixes and colons without local names, which surprised me. However, the CURIE Syntax 1.0 working group note specifically states that this form is supported…and then promptly provides a syntactic definition that seems to preclude it! However, on (much) deeper inspection, it appears that ‘ex1:’ (for example) is allowed, but would really represent a ‘fragment’ of the ‘reference’, rather than a prefix! Ouch! This is so completely ambiguous that it surely calls into question the whole CURIE specification.   In any case, RIF does not allow local names without a prefix. ·      Missing ‘External’ specifiers for built-in functions and predicates.  The EBNF specification enforces this for terms within frames, but does not appear to enforce (what I believe is) the correct use of External on built-in predicates. In any case, the running example only specifies ‘External’ once on the predicate in UnknownStatusRule. External() is required in several other places. ·      The List used on the LHS of UnknownStatusRule is comma-delimited. This is not supported by the EBNF definition. Similarly, the argument list of pred:list-contains is illegally comma-delimited. ·      Unnecessary use of conjunction around a single formula in DiscountRule. This is strictly legal in the EBNF, but redundant.   All the above issues concern the presentation syntax used in the running example. There are a few minor issues with the grammar itself. Note that Michael Kiefer stated in his paper “Rule Interchange Format: The Framework” that: “The presentation syntax of RIF … is an abstract syntax and, as such, it omits certain details that might be important for unambiguous parsing.” ·      The grammar cannot differentiate unambiguously between strategies and priorities on groups. A processor is forced to resolve this by detecting the use of IRIs and integers. This could easily be fixed in the grammar.   ·      The grammar cannot unambiguously parse the ‘->’ operator in frames. Specifically, ‘-’ characters are allowed in PN_LOCAL names and hence a parser cannot determine if ‘status->’ is (‘status’ ‘->’) or (‘status-’ ‘>’).   One way to fix this is to amend the PN_LOCAL production as follows: PN_LOCAL ::= ( PN_CHARS_U | [0-9] ) ((PN_CHARS|'.')* ((PN_CHARS)-('-')))? However, unilaterally changing the definition of this production, which is defined in the SPARQL Query Language for RDF specification, makes me uncomfortable. ·      I assume that the presentation syntax is case-sensitive. I couldn’t find this stated anywhere in the documentation, but function/predicate names do appear to be documented as being case-sensitive. ·      The EBNF does not specify whitespace handling. A couple of productions (RULE and ACTION_BLOCK) are crafted to enforce the use of whitespace. This is not necessary. It seems inconsistent with the rest of the specification and can cause parsing issues. In addition, the Const production exhibits whitespaces issues. The intention may have been to disallow the use of whitespace around ‘^^’, but any direct implementation of the EBNF will probably allow whitespace between ‘^^’ and the SYMSPACE. Of course, I am being a little nit-picking about all this. On the whole, the EBNF translated very smoothly and directly to ‘M’ (MGrammar) and proved to be fairly complete. I have encountered far worse issues when translating other EBNF specifications into usable grammars.   I can’t imagine there would be any difficulty in implementing the same grammar in Antlr, COCO/R, gppg, XText, Bison, etc. A general observation, which repeats a point made above, is that the use of parenthesis in the presentation syntax can feel inconsistent and un-intuitive.   It isn’t actually inconsistent, but I think the presentation syntax could be improved by adopting braces, rather than parenthesis, to delimit subordinate syntax elements in a similar way to so many programming languages. The familiarity of braces would communicate the structure of the syntax more clearly to people like me.  If braces were adopted, parentheses could be retained around ‘var (frame | ‘new()’) constructs in action blocks. This use of parenthesis feels very LISP-like, and I think that this is my issue. It’s as if the presentation syntax represents the deformed love-child of LISP and C. In some places (specifically, action blocks), parenthesis is used in a LISP-like fashion. In other places it is used like braces in C. I find this quite confusing. Here is a corrected version of the running example (Example 9.1) in compliant presentation syntax: Document(    Prefix( ex1 <http://example.com/2009/prd2> )    (* ex1:CheckoutRuleset *)  Group rif:forwardChaining (     (* ex1:GoldRule *)    Group 10 (      Forall ?customer such that And(?customer # ex1:Customer                                     ?customer[ex1:status->"Silver"])        (Forall ?shoppingCart such that ?customer[ex1:shoppingCart->?shoppingCart]           (If Exists ?value (And(?shoppingCart[ex1:value->?value]                                  External(pred:numeric-greater-than-or-equal(?value 2000))))            Then Do(Modify(?customer[ex1:status->"Gold"])))))      (* ex1:DiscountRule *)    Group (      Forall ?customer such that ?customer # ex1:Customer        (If Or( ?customer[ex1:status->"Silver"]                ?customer[ex1:status->"Gold"])         Then Do ((?s ?customer[ex1:shoppingCart-> ?s])                  (?v ?s[ex1:value->?v])                  Modify(?s [ex1:value->External(func:numeric-multiply (?v 0.95))]))))      (* ex1:NewCustomerAndWidgetRule *)    Group (      Forall ?customer such that And(?customer # ex1:Customer                                     ?customer[ex1:status->"New"] )        (If Exists ?shoppingCart ?item                   (And(?customer[ex1:shoppingCart->?shoppingCart]                        ?shoppingCart[ex1:containsItem->?item]                        ?item # ex1:Widget ) )         Then Do( (?s ?customer[ex1:shoppingCart->?s])                  (?val ?s[ex1:value->?val])                  (?voucher ?customer[ex1:voucher->?voucher])                  Retract(?customer[ex1:voucher->?voucher])                  Retract(?voucher)                  Modify(?s[ex1:value->External(func:numeric-multiply(?val 0.90))]))))      (* ex1:UnknownStatusRule *)    Group (      Forall ?customer such that ?customer # ex1:Customer        (If Not(Exists ?status                       (And(?customer[ex1:status->?status]                            External(pred:list-contains(List("New" "Bronze" "Silver" "Gold") ?status)) )))         Then Do( Execute(act:print(External(func:concat("New customer: " ?customer))))                  Assert(?customer[ex1:status->"New"]))))  ) )   I hope that helps someone out there :-)

    Read the article

  • Windows Azure Virtual Machine Readiness and Capacity Assessment for SQL Server

    - by SQLOS Team
    Windows Azure Virtual Machine Readiness and Capacity Assessment for Windows Server Machine Running SQL Server With the release of MAP Toolkit 8.0 Beta, we have added a new scenario to assess your Windows Azure Virtual Machine Readiness. The MAP 8.0 Beta performs a comprehensive assessment of Windows Servers running SQL Server to determine you level of readiness to migrate an on-premise physical or virtual machine to Windows Azure Virtual Machines. The MAP Toolkit then offers suggested changes to prepare the machines for migration, such as upgrading the operating system or SQL Server. MAP Toolkit 8.0 Beta is available for download here Your participation and feedback is very important to make the MAP Toolkit work better for you. We encourage you to participate in the beta program and provide your feedback at [email protected] or through one of our surveys. Now, let’s walk through the MAP Toolkit task for completing the Windows Azure Virtual Machine assessment and capacity planning. The tasks include the following: Perform an inventory View the Windows Azure VM Readiness results and report Collect performance data for determine VM sizing View the Windows Azure Capacity results and report Perform an inventory: 1. To perform an inventory against a single machine or across a complete environment, choose Perform an Inventory to launch the Inventory and Assessment Wizard as shown below: 2. After the Inventory and Assessment Wizard launches, select either the Windows computers or SQL Server scenario to inventory Windows machines. HINT: If you don’t care about completely inventorying a machine, just select the SQL Server scenario. Click Next to Continue. 3. On the Discovery Methods page, select how you want to discover computers and then click Next to continue. Description of Discovery Methods: Use Active Directory Domain Services -- This method allows you to query a domain controller via the Lightweight Directory Access Protocol (LDAP) and select computers in all or specific domains, containers, or OUs. Use this method if all computers and devices are in AD DS. Windows networking protocols --  This method uses the WIN32 LAN Manager application programming interfaces to query the Computer Browser service for computers in workgroups and Windows NT 4.0–based domains. If the computers on the network are not joined to an Active Directory domain, use only the Windows networking protocols option to find computers. System Center Configuration Manager (SCCM) -- This method enables you to inventory computers managed by System Center Configuration Manager (SCCM). You need to provide credentials to the System Center Configuration Manager server in order to inventory the managed computers. When you select this option, the MAP Toolkit will query SCCM for a list of computers and then MAP will connect to these computers. Scan an IP address range -- This method allows you to specify the starting address and ending address of an IP address range. The wizard will then scan all IP addresses in the range and inventory only those computers. Note: This option can perform poorly, if many IP addresses aren’t being used within the range. Manually enter computer names and credentials -- Use this method if you want to inventory a small number of specific computers. Import computer names from a files -- Using this method, you can create a text file with a list of computer names that will be inventoried. 4. On the All Computers Credentials page, enter the accounts that have administrator rights to connect to the discovered machines. This does not need to a domain account, but needs to be a local administrator. I have entered my domain account that is an administrator on my local machine. Click Next after one or more accounts have been added. NOTE: The MAP Toolkit primarily uses Windows Management Instrumentation (WMI) to collect hardware, device, and software information from the remote computers. In order for the MAP Toolkit to successfully connect and inventory computers in your environment, you have to configure your machines to inventory through WMI and also allow your firewall to enable remote access through WMI. The MAP Toolkit also requires remote registry access for certain assessments. In addition to enabling WMI, you need accounts with administrative privileges to access desktops and servers in your environment. 5. On the Credentials Order page, select the order in which want the MAP Toolkit to connect to the machine and SQL Server. Generally just accept the defaults and click Next. 6. On the Enter Computers Manually page, click Create to pull up at dialog to enter one or more computer names. 7. On the Summary page confirm your settings and then click Finish. After clicking Finish the inventory process will start, as shown below: Windows Azure Readiness results and report After the inventory progress has completed, you can review the results under the Database scenario. On the tile, you will see the number of Windows Server machine with SQL Server that were analyzed, the number of machines that are ready to move without changes and the number of machines that require further changes. If you click this Azure VM Readiness tile, you will see additional details and can generate the Windows Azure VM Readiness Report. After the report is generated, select View | Saved Reports and Proposals to view the location of the report. Open up WindowsAzureVMReadiness* report in Excel. On the Windows tab, you can see the results of the assessment. This report has a column for the Operating System and SQL Server assessment and provides a recommendation on how to resolve, if there a component is not supported. Collect Performance Data Launch the Performance Wizard to collect performance information for the Windows Server machines that you would like the MAP Toolkit to suggest a Windows Azure VM size for. Windows Azure Capacity results and report After the performance metrics are collected, the Azure VM Capacity title will display the number of Virtual Machine sizes that are suggested for the Windows Server and Linux machines that were analyzed. You can then click on the Azure VM Capacity tile to see the capacity details and generate the Windows Azure VM Capacity Report. Within this report, you can view the performance data that was collected and the Virtual Machine sizes.   MAP Toolkit 8.0 Beta is available for download here Your participation and feedback is very important to make the MAP Toolkit work better for you. We encourage you to participate in the beta program and provide your feedback at [email protected] or through one of our surveys. Useful References: Windows Azure Homepage How to guides for Windows Azure Virtual Machines Provisioning a SQL Server Virtual Machine on Windows Azure Windows Azure Pricing     Peter Saddow Senior Program Manager – MAP Toolkit Team

    Read the article

  • My form php is not working and I can't figure out where I went wrong

    - by user1081524
    I'm fairly new to all this, but I've created a form, and this is what I've written to send it. I've used "[email protected]" here instead of the real address <?php /* Set e-mail recipient */ $myemail = "[email protected]"; /* Check all form inputs using check_input function */ $names = check_input($_POST['names'], "Please return to our Application Form and enter your and your future spouse's names."); $weddingtype = check_input($_POST['weddingtype'], "Please return to our Application Form and fill in what kind of wedding you will be having."); $religioussect = check_input($_POST['religioussect'], "Please return to our Application Form and tell us about your religion and wedding traditions."); $dateone = check_input($_POST['dateone'], "Please return to our Application Form and give us the date for at least one event."); $eventone = check_input($_POST['eventone'], "Please return to our Application Form and list at least one event."); $locationone = check_input($_POST['locationone'], "Please return to our Application Form and give us the location for at least one event."); $durationone = check_input($_POST['durationone'], "Please return to our Application Form and give us the duration of at least one event."); $typeone = check_input($_POST['typeone'], "Please return to our Application Form and tell us whether you would like video, photo or both for at least one event."); $datetwo = $_POST['datetwo']; $eventtwo = $_POST['eventtwo']; $locationtwo = $_POST['locationtwo']; $durationtwo = $_POST['durationtwo']; $typetwo = $_POST['typetwo']; $datethree = $_POST['datethree']; $eventthree = $_POST['eventthree']; $locationthree = $_POST['locationthree']; $durationthree = $_POST['durationthree']; $typethree = $_POST['typethree']; $datefour = $_POST['datefour']; $eventfour = $_POST['eventfour']; $locationfour = $_POST['locationfour']; $durationfour = $_POST['durationfour']; $typefour = $_POST['typefour']; $guests1 = check_input($_POST['guests1'], "Please return to our Application Form and tell us how many guests will attend at least one event."); $guests2 = $_POST['guests2']; $guests3 = $_POST['guests3']; $guests4 = $_POST['guests4']; $concerns = $_POST['concerns']; if(!isset($_POST['submit'])){ $subject = "Quote Application"; /*Message for the e-mail */ $message = "Hello! Another happy couple has filled out a Quote Application Form :D Hooray! Their names are $names. What sort of wedding are they having? '$weddingtype'. What religious sect and wedding traditions are they following? '$religioussect'. Now for their wedding events... Ooh boy! 1. $dateone $eventone $durationone $typeone Estimated guests: $guests1 $locationone 2. $datetwo $eventtwo $durationtwo $typetwo Estimated guests: $guests2 $locationtwo 3. $datethree $eventthree $durationthree $typethree Estimated guests: $guests3 $locationthree 4. $datefour $eventfour $durationfour $typefour Estimated guests: $guests4 $locationfour Any concerns the couple have follow here: '$concerns' You better be ready to get to work now! And also, have a really good day :) "; /* Functions used */ function check_input($data, $problem='') { $data = trim($data); $data = stripslashes($data); $data = htmlspecialchars($data); if ($problem && strlen($data) == 0) { show_error($problem); } return $data; } function show_error($myError) { ?> <b>We apologize for the inconvenience, an error occurred.</b><br /> <?php echo $myError; ?> <?php exit(); } /* Send the message using mail() function */ mail($myemail, $subject, $message); /* Redirect visitor to the thank you page */ header('Location: thanks.html'); exit(); ?> Please help me find what I'm doing wrong, I'm barely a beginner here. Thanks in advance!

    Read the article

  • Restoring databases to a set drive and directory

    - by okeofs
     Restoring databases to a set drive and directory Introduction Often people say that necessity is the mother of invention. In this case I was faced with the dilemma of having to restore several databases, with multiple ‘ndf’ files, and having to restore them with different physical file names, drives and directories on servers other than the servers from which they originated. As most of us would do, I went to Google to see if I could find some code to achieve this task and found some interesting snippets on Pinal Dave’s website. Naturally, I had to take it further than the code snippet, HOWEVER it was a great place to start. Creating a temp table to hold database file details First off, I created a temp table which would hold the details of the individual data files within the database. Although there are a plethora of fields (within the temp table below), I utilize LogicalName only within this example. The temporary table structure may be seen below:   create table #tmp ( LogicalName nvarchar(128)  ,PhysicalName nvarchar(260)  ,Type char(1)  ,FileGroupName nvarchar(128)  ,Size numeric(20,0)  ,MaxSize numeric(20,0), Fileid tinyint, CreateLSN numeric(25,0), DropLSN numeric(25, 0), UniqueID uniqueidentifier, ReadOnlyLSN numeric(25,0), ReadWriteLSN numeric(25,0), BackupSizeInBytes bigint, SourceBlocSize int, FileGroupId int, LogGroupGUID uniqueidentifier, DifferentialBaseLSN numeric(25,0), DifferentialBaseGUID uniqueidentifier, IsReadOnly bit, IsPresent bit,  TDEThumbPrint varchar(50) )    We now declare and populate a variable(@path), setting the variable to the path to our SOURCE database backup. declare @path varchar(50) set @path = 'P:\DATA\MYDATABASE.bak'   From this point, we insert the file details of our database into the temp table. Note that we do so by utilizing a restore statement HOWEVER doing so in ‘filelistonly’ mode.   insert #tmp EXEC ('restore filelistonly from disk = ''' + @path + '''')   At this point, I depart from what I gleaned from Pinal Dave.   I now instantiate a few more local variables. The use of each variable will be evident within the cursor (which follows):   Declare @RestoreString as Varchar(max) Declare @NRestoreString as NVarchar(max) Declare @LogicalName  as varchar(75) Declare @counter as int Declare @rows as int set @counter = 1 select @rows = COUNT(*) from #tmp  -- Count the number of records in the temp                                    -- table   Declaring and populating the cursor At this point I do realize that many people are cringing about the use of a cursor. Being an Oracle professional as well, I have learnt that there is a time and place for cursors. I would remind the reader that the data that will be read into the cursor is from a local temp table and as such, any locking of the records (within the temp table) is not really an issue.   DECLARE MY_CURSOR Cursor  FOR  Select LogicalName  From #tmp   Parsing the logical names from within the cursor. A small caveat that works in our favour,  is that the first logical name (of our database) is the logical name of the primary data file (.mdf). Other files, except for the very last logical name, belong to secondary data files. The last logical name is that of our database log file.   I now open my cursor and populate the variable @RestoreString Open My_Cursor  set @RestoreString =  'RESTORE DATABASE [MYDATABASE] FROM DISK = N''P:\DATA\ MYDATABASE.bak''' + ' with  '   We now fetch the first record from the temp table.   Fetch NEXT FROM MY_Cursor INTO @LogicalName   While there are STILL records left within the cursor, we dynamically build our restore string. Note that we are using concatenation to create ‘one big restore executable string’.   Note also that the target physical file name is hardwired, as is the target directory.   While (@@FETCH_STATUS <> -1) BEGIN IF (@@FETCH_STATUS <> -2) -- As long as there are no rows missing select @RestoreString = case  when @counter = 1 then -- This is the mdf file    @RestoreString + 'move  N''' + @LogicalName + '''' + ' TO N’’X:\DATA1\'+ @LogicalName + '.mdf' + '''' + ', '   -- OK, if it passes through here we are dealing with an .ndf file -- Note that Counter must be greater than 1 and less than the number of rows.   when @counter > 1 and @counter < @rows then -- These are the ndf file(s)    @RestoreString + 'move  N''' + @LogicalName + '''' + ' TO N’’X:\DATA1\'+ @LogicalName + '.ndf' + '''' + ', '   -- OK, if it passes through here we are dealing with the log file When @LogicalName like '%log%' then    @RestoreString + 'move  N''' + @LogicalName + '''' + ' TO N’’X:\DATA1\'+ @LogicalName + '.ldf' +'''' end --Increment the counter   set @counter = @counter + 1 FETCH NEXT FROM MY_CURSOR INTO @LogicalName END   At this point we have populated the varchar(max) variable @RestoreString with a concatenation of all the necessary file names. What we now need to do is to run the sp_executesql stored procedure, to effect the restore.   First, we must place our ‘concatenated string’ into an nvarchar based variable. Obviously this will only work as long as the length of @RestoreString is less than varchar(max) / 2.   set @NRestoreString = @RestoreString EXEC sp_executesql @NRestoreString   Upon completion of this step, the database should be restored to the server. I now close and deallocate the cursor, and to be clean, I would also drop my temp table.   CLOSE MY_CURSOR DEALLOCATE MY_CURSOR GO   Conclusion Restoration of databases on different servers with different physical names and on different drives are a fact of life. Through the use of a few variables and a simple cursor, we may achieve an efficient and effective way to achieve this task.

    Read the article

  • WD MBWE II (White Strip Light) 2TB - unable to access data

    - by user210477
    I have a WD MBWE II (White Strip Light) 2TB - (WD20000H2NC-00) Was working fine until a few days ago. I guess there was a power failure and after that I am unable to access the 'Public' or the 'Download' folder anymore. I have been searching for answers everywhere but came up empty handed. Web GUI still works, SSH works. I hooked up both the drives on my PC and UFS Explorer sees the drive. But so far I am unable to retrieve any of my data. I do not remember what RAID setting I used when I first got the drive. I can see from GUI that it is set as "Stripe". The drive contains 10 years of family pictures which I really do not want to loose. Sadly and stupidly, I didn't even keep a backup of this drive. Can somebody please help or point me in the right direction. Thank you in advance for your help. Disk Utility on Ubuntu reports 1405 bad sectors on one drive. How can I retrieve my data? Please help. Logs below: ~ # mdadm --detail /dev/md[012345678] /dev/md0: Version : 0.90 Creation Time : Wed Jul 15 08:36:17 2009 Raid Level : raid1 Array Size : 1959872 (1914.26 MiB 2006.91 MB) Used Dev Size : 1959872 (1914.26 MiB 2006.91 MB) Raid Devices : 2 Total Devices : 2 Preferred Minor : 0 Persistence : Superblock is persistent Update Time : Fri Nov 1 13:53:29 2013 State : clean Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 UUID : 04f7a661:98983b3b:26b29e4f:9b646adb Events : 0.266 Number Major Minor RaidDevice State 0 8 1 0 active sync /dev/sda1 1 8 17 1 active sync /dev/sdb1 /dev/md1: Version : 0.90 Creation Time : Wed Jul 15 08:36:18 2009 Raid Level : raid1 Array Size : 256896 (250.92 MiB 263.06 MB) Used Dev Size : 256896 (250.92 MiB 263.06 MB) Raid Devices : 2 Total Devices : 2 Preferred Minor : 1 Persistence : Superblock is persistent Update Time : Wed Oct 30 22:08:21 2013 State : clean Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 UUID : aaa7b859:c475312d:efc5a766:6526b867 Events : 0.10 Number Major Minor RaidDevice State 0 8 2 0 active sync /dev/sda2 1 8 18 1 active sync /dev/sdb2 /dev/md2: Version : 0.90 Creation Time : Sat Sep 25 10:01:26 2010 Raid Level : raid0 Array Size : 1947045760 (1856.85 GiB 1993.77 GB) Raid Devices : 2 Total Devices : 2 Preferred Minor : 2 Persistence : Superblock is persistent Update Time : Fri Nov 1 13:30:53 2013 State : active Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 Chunk Size : 64K UUID : 01dae60a:6831077b:77f74530:8680c183 Events : 0.97 Number Major Minor RaidDevice State 0 8 4 0 active sync /dev/sda4 1 8 20 1 active sync /dev/sdb4 /dev/md3: Version : 0.90 Creation Time : Wed Jul 15 08:36:18 2009 Raid Level : raid1 Array Size : 987904 (964.91 MiB 1011.61 MB) Used Dev Size : 987904 (964.91 MiB 1011.61 MB) Raid Devices : 2 Total Devices : 2 Preferred Minor : 3 Persistence : Superblock is persistent Update Time : Fri Nov 1 13:26:33 2013 State : clean Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 UUID : 3f4099f2:72e6171b:5ba962fd:48464a62 Events : 0.54 Number Major Minor RaidDevice State 0 8 3 0 active sync /dev/sda3 1 8 19 1 active sync /dev/sdb3 mdadm: md device /dev/md4 does not appear to be active. mdadm: md device /dev/md5 does not appear to be active. mdadm: md device /dev/md6 does not appear to be active. mdadm: md device /dev/md7 does not appear to be active. mdadm: md device /dev/md8 does not appear to be active. ~ # cat /etc/mtab securityfs /sys/kernel/security securityfs rw 0 0 /dev/md2 /DataVolume xfs rw,usrquota 0 0 /dev/md4 /ExtendVolume xfs rw,usrquota 0 0 ~ # df -k Filesystem 1k-blocks Used Available Use% Mounted on /dev/md0 1929044 145092 1685960 8% / /dev/md3 972344 123452 799500 13% /var /dev/ram0 63412 20 63392 0% /mnt/ram ~ # mdadm -D /dev/md2 /dev/md2: Version : 0.90 Creation Time : Sat Sep 25 10:01:26 2010 Raid Level : raid0 Array Size : 1947045760 (1856.85 GiB 1993.77 GB) Raid Devices : 2 Total Devices : 2 Preferred Minor : 2 Persistence : Superblock is persistent Update Time : Fri Nov 1 13:30:53 2013 State : active Active Devices : 2 Working Devices : 2 Failed Devices : 0 Spare Devices : 0 Chunk Size : 64K UUID : 01dae60a:6831077b:77f74530:8680c183 Events : 0.97 Number Major Minor RaidDevice State 0 8 4 0 active sync /dev/sda4 1 8 20 1 active sync /dev/sdb4 ~ # mdadm -D /dev/md4 mdadm: md device /dev/md4 does not appear to be active. ~ # mount /dev/root on / type ext3 (rw,noatime,data=ordered) proc on /proc type proc (rw) sys on /sys type sysfs (rw) /dev/pts on /dev/pts type devpts (rw) securityfs on /sys/kernel/security type securityfs (rw) /dev/md3 on /var type ext3 (rw,noatime,data=ordered) /dev/ram0 on /mnt/ram type tmpfs (rw) ~ # cat /var/log/messages Oct 29 18:04:50 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 29 18:04:59 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 29 18:04:59 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 29 18:17:45 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 29 18:17:53 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 29 18:17:53 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 00:50:11 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 30 00:50:19 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 00:50:19 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 16:29:47 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 30 16:30:00 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 16:30:00 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 18:27:22 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 30 18:27:30 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 18:27:30 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 19:06:03 shmotashNAS daemon.warn wixEvent[3462]: Network Link - NIC 1 link is down. Oct 30 19:06:10 shmotashNAS daemon.info wixEvent[3462]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 19:06:10 shmotashNAS daemon.info wixEvent[3462]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 19:14:58 shmotashNAS daemon.warn wixEvent[3462]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Oct 30 19:20:05 shmotashNAS daemon.alert wixEvent[3462]: Thermal Alarm - System temperature exceeded threshold.(66 degrees) Oct 30 19:58:29 shmotashNAS daemon.alert wixEvent[3462]: HDD SMART - HDD 1 SMART Health Status: Failed. Oct 30 22:05:39 shmotashNAS daemon.info init: Starting pid 13043, console /dev/null: '/usr/bin/killall' Oct 30 22:05:39 shmotashNAS syslog.info System log daemon exiting. Oct 30 22:08:09 shmotashNAS syslog.info syslogd started: BusyBox v1.1.1 Oct 30 22:08:09 shmotashNAS daemon.warn wixEvent[3557]: Network Link - NIC 1 link is down. Oct 30 22:08:19 shmotashNAS daemon.info wixEvent[3557]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 22:08:25 shmotashNAS daemon.warn wixEvent[3557]: Network Link - NIC 1 link is down. Oct 30 22:08:37 shmotashNAS daemon.info wixEvent[3557]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 22:08:44 shmotashNAS daemon.warn wixEvent[3557]: Network Link - NIC 1 link is down. Oct 30 22:08:46 shmotashNAS syslog.info miocrawler: +++++++++++++++ START OF ./miocrawler at 2013:10:30 - 22:08:46 [Version 01.09.00.96] ++++++++++++++ Oct 30 22:08:46 shmotashNAS syslog.info miocrawler: mc_db_init ... Oct 30 22:08:46 shmotashNAS syslog.info miocrawler: ****** database does not exist. ret = -1, creating path Oct 30 22:08:49 shmotashNAS syslog.info miocrawler: === mc_db_init ...Done. Oct 30 22:08:50 shmotashNAS syslog.info miocrawler: mcUtilsInit() Creating free queue pool Oct 30 22:08:51 shmotashNAS syslog.info miocrawler: === mcUtilsInit() Done. Oct 30 22:08:51 shmotashNAS syslog.info miocrawler: === inotify init done. Oct 30 22:08:51 shmotashNAS syslog.info miocrawler: mc_trans_updater_init() ... Oct 30 22:08:52 shmotashNAS syslog.info miocrawler: === mc_trans_updater_init() ...Done. Oct 30 22:08:52 shmotashNAS syslog.info miocrawler: === Walking directory done. Oct 30 22:08:57 shmotashNAS daemon.info wixEvent[3557]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 22:08:57 shmotashNAS daemon.info wixEvent[3557]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 22:08:57 shmotashNAS daemon.info wixEvent[3557]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 22:08:57 shmotashNAS daemon.info wixEvent[3557]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 22:09:10 shmotashNAS daemon.info init: Starting pid 4605, console /dev/null: '/bin/touch' Oct 30 22:09:10 shmotashNAS daemon.info init: Starting pid 4607, console /dev/ttyS0: '/sbin/getty' Oct 30 22:09:10 shmotashNAS daemon.info wixEvent[3557]: System Startup - System startup. Oct 30 22:09:16 shmotashNAS daemon.warn wixEvent[3557]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Oct 30 22:14:14 shmotashNAS daemon.warn wixEvent[3557]: Network Link - NIC 1 link is down. Oct 30 22:14:21 shmotashNAS daemon.info wixEvent[3557]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 22:14:21 shmotashNAS daemon.info wixEvent[3557]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 22:29:36 shmotashNAS daemon.warn wixEvent[3557]: System Reboot - System will reboot. Oct 30 22:29:40 shmotashNAS daemon.info init: Starting pid 5974, console /dev/null: '/usr/bin/killall' Oct 30 22:29:40 shmotashNAS syslog.info System log daemon exiting. Oct 30 22:47:56 shmotashNAS syslog.info syslogd started: BusyBox v1.1.1 Oct 30 22:47:56 shmotashNAS daemon.warn wixEvent[3461]: Network Link - NIC 1 link is down. Oct 30 22:48:02 shmotashNAS daemon.info wixEvent[3461]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 22:48:02 shmotashNAS daemon.info wixEvent[3461]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 22:48:09 shmotashNAS syslog.info miocrawler: +++++++++++++++ START OF ./miocrawler at 2013:10:30 - 22:48:09 [Version 01.09.00.96] ++++++++++++++ Oct 30 22:48:09 shmotashNAS syslog.info miocrawler: mc_db_init ... Oct 30 22:48:09 shmotashNAS syslog.info miocrawler: ++++++++ database exists: ret = 0 Oct 30 22:48:10 shmotashNAS syslog.info miocrawler: === mc_db_init ...Done. Oct 30 22:48:10 shmotashNAS syslog.info miocrawler: mcUtilsInit() Creating free queue pool Oct 30 22:48:11 shmotashNAS syslog.info miocrawler: === mcUtilsInit() Done. Oct 30 22:48:11 shmotashNAS syslog.info miocrawler: === inotify init done. Oct 30 22:48:11 shmotashNAS syslog.info miocrawler: mc_trans_updater_init() ... Oct 30 22:48:11 shmotashNAS syslog.info miocrawler: === mc_trans_updater_init() ...Done. Oct 30 22:48:11 shmotashNAS syslog.info miocrawler: === Walking directory done. Oct 30 22:48:27 shmotashNAS daemon.info init: Starting pid 4079, console /dev/null: '/bin/touch' Oct 30 22:48:27 shmotashNAS daemon.info init: Starting pid 4080, console /dev/ttyS0: '/sbin/getty' Oct 30 22:48:28 shmotashNAS daemon.info wixEvent[3461]: System Startup - System startup. Oct 30 22:49:01 shmotashNAS daemon.warn wixEvent[3461]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Oct 30 23:51:11 shmotashNAS daemon.warn wixEvent[3461]: System Reboot - System will reboot. Oct 30 23:51:16 shmotashNAS daemon.info init: Starting pid 6498, console /dev/null: '/usr/bin/killall' Oct 30 23:51:16 shmotashNAS syslog.info System log daemon exiting. Oct 30 23:54:19 shmotashNAS syslog.info syslogd started: BusyBox v1.1.1 Oct 30 23:55:37 shmotashNAS daemon.info wixEvent[3476]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 30 23:55:37 shmotashNAS daemon.info wixEvent[3476]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 30 23:55:44 shmotashNAS syslog.info miocrawler: +++++++++++++++ START OF ./miocrawler at 2013:10:30 - 23:55:44 [Version 01.09.00.96] ++++++++++++++ Oct 30 23:55:44 shmotashNAS syslog.info miocrawler: mc_db_init ... Oct 30 23:55:44 shmotashNAS syslog.info miocrawler: ++++++++ database exists: ret = 0 Oct 30 23:55:45 shmotashNAS syslog.info miocrawler: === mc_db_init ...Done. Oct 30 23:55:45 shmotashNAS syslog.info miocrawler: mcUtilsInit() Creating free queue pool Oct 30 23:55:46 shmotashNAS syslog.info miocrawler: === mcUtilsInit() Done. Oct 30 23:55:46 shmotashNAS syslog.info miocrawler: === inotify init done. Oct 30 23:55:46 shmotashNAS syslog.info miocrawler: mc_trans_updater_init() ... Oct 30 23:55:46 shmotashNAS syslog.info miocrawler: === mc_trans_updater_init() ...Done. Oct 30 23:55:46 shmotashNAS syslog.info miocrawler: === Walking directory done. Oct 30 23:55:58 shmotashNAS daemon.info init: Starting pid 4115, console /dev/null: '/bin/touch' Oct 30 23:55:58 shmotashNAS daemon.info init: Starting pid 4116, console /dev/ttyS0: '/sbin/getty' Oct 30 23:55:58 shmotashNAS daemon.info wixEvent[3476]: System Startup - System startup. Oct 30 23:56:33 shmotashNAS daemon.warn wixEvent[3476]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Oct 31 00:29:14 shmotashNAS auth.info sshd[5409]: Server listening on 0.0.0.0 port 22. Oct 31 00:31:25 shmotashNAS auth.info sshd[5486]: Accepted password for root from 192.168.1.100 port 50785 ssh2 Oct 31 00:33:44 shmotashNAS auth.info sshd[5565]: Accepted password for root from 192.168.1.100 port 50817 ssh2 Oct 31 00:36:39 shmotashNAS daemon.info init: Starting pid 5680, console /dev/null: '/usr/bin/killall' Oct 31 00:36:39 shmotashNAS syslog.info System log daemon exiting. Oct 31 00:40:44 shmotashNAS syslog.info syslogd started: BusyBox v1.1.1 Oct 31 00:40:51 shmotashNAS daemon.info wixEvent[3464]: Network Link - NIC 1 link is up 100 Mbps full duplex. Oct 31 00:40:51 shmotashNAS daemon.info wixEvent[3464]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Oct 31 00:41:00 shmotashNAS syslog.info miocrawler: +++++++++++++++ START OF ./miocrawler at 2013:10:31 - 00:41:00 [Version 01.09.00.96] ++++++++++++++ Oct 31 00:41:00 shmotashNAS syslog.info miocrawler: mc_db_init ... Oct 31 00:41:00 shmotashNAS syslog.info miocrawler: ++++++++ database exists: ret = 0 Oct 31 00:41:00 shmotashNAS syslog.info miocrawler: === mc_db_init ...Done. Oct 31 00:41:01 shmotashNAS syslog.info miocrawler: mcUtilsInit() Creating free queue pool Oct 31 00:41:02 shmotashNAS syslog.info miocrawler: === mcUtilsInit() Done. Oct 31 00:41:02 shmotashNAS syslog.info miocrawler: === inotify init done. Oct 31 00:41:02 shmotashNAS syslog.info miocrawler: mc_trans_updater_init() ... Oct 31 00:41:02 shmotashNAS syslog.info miocrawler: === mc_trans_updater_init() ...Done. Oct 31 00:41:02 shmotashNAS syslog.info miocrawler: === Walking directory done. Oct 31 00:41:14 shmotashNAS daemon.info init: Starting pid 4101, console /dev/null: '/bin/touch' Oct 31 00:41:14 shmotashNAS daemon.info init: Starting pid 4102, console /dev/ttyS0: '/sbin/getty' Oct 31 00:41:15 shmotashNAS daemon.info wixEvent[3464]: System Startup - System startup. Oct 31 00:41:47 shmotashNAS daemon.warn wixEvent[3464]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Oct 31 01:13:19 shmotashNAS daemon.info init: Starting pid 5385, console /dev/null: '/usr/bin/killall' Oct 31 01:13:19 shmotashNAS syslog.info System log daemon exiting. Nov 1 13:26:25 shmotashNAS syslog.info syslogd started: BusyBox v1.1.1 Nov 1 13:26:32 shmotashNAS daemon.info wixEvent[3471]: Network Link - NIC 1 link is up 100 Mbps full duplex. Nov 1 13:26:32 shmotashNAS daemon.info wixEvent[3471]: Network IP Address - NIC 1 use static IP address 192.168.1.102 Nov 1 13:26:38 shmotashNAS syslog.info miocrawler: +++++++++++++++ START OF ./miocrawler at 2013:11:01 - 13:26:38 [Version 01.09.00.96] ++++++++++++++ Nov 1 13:26:38 shmotashNAS syslog.info miocrawler: mc_db_init ... Nov 1 13:26:38 shmotashNAS syslog.info miocrawler: ++++++++ database exists: ret = 0 Nov 1 13:26:39 shmotashNAS syslog.info miocrawler: === mc_db_init ...Done. Nov 1 13:26:39 shmotashNAS syslog.info miocrawler: mcUtilsInit() Creating free queue pool Nov 1 13:26:40 shmotashNAS syslog.info miocrawler: === mcUtilsInit() Done. Nov 1 13:26:40 shmotashNAS syslog.info miocrawler: === inotify init done. Nov 1 13:26:40 shmotashNAS syslog.info miocrawler: mc_trans_updater_init() ... Nov 1 13:26:40 shmotashNAS syslog.info miocrawler: === mc_trans_updater_init() ...Done. Nov 1 13:26:40 shmotashNAS syslog.info miocrawler: === Walking directory done. Nov 1 13:26:52 shmotashNAS daemon.info init: Starting pid 4078, console /dev/null: '/bin/touch' Nov 1 13:26:52 shmotashNAS daemon.info init: Starting pid 4079, console /dev/ttyS0: '/sbin/getty' Nov 1 13:26:52 shmotashNAS daemon.info wixEvent[3471]: System Startup - System startup. Nov 1 13:27:28 shmotashNAS daemon.warn wixEvent[3471]: Media Server - Media Server cannot find the path to one or more of the default folders: /Public/Shared Music, /Public/Shared Pictures or /Public/Shared Videos. Please verify that these folders have not been removed or that the names have not been changed. Nov 1 13:44:48 shmotashNAS auth.info sshd[5375]: Accepted password for root from 192.168.1.103 port 50217 ssh2 Nov 1 13:51:08 shmotashNAS auth.info sshd[5894]: Accepted password for root from 192.168.1.103 port 50380 ssh2

    Read the article

  • SQL SERVER – Auto Complete and Format T-SQL Code – Devart SQL Complete

    - by pinaldave
    Some people call it laziness, some will call it efficiency, some think it is the right thing to do. At any rate, tools are meant to make a job easier, and I like to use various tools. If we consider the history of the world, if we all wanted to keep traditional practices, we would have never invented the wheel.  But as time progressed, people wanted convenience and efficiency, which then led to laziness. Wanting a more efficient way to do something is not inherently lazy.  That’s how I see any efficiency tools. A few days ago I found Devart SQL Complete.  It took less than a minute to install, and after installation it just worked without needing any tweaking.  Once I started using it I was impressed with how fast it formats SQL code – you can write down any terms or even copy and paste.  You can start typing right away, and it will complete keywords, object names, and fragmentations. It completes statement expressions.  How many times do we write insert, update, delete?  Take this example: to alter a stored procedure name, we don’t remember the code written in it, you have to write it over again, or go back to SQL Server Studio Manager to create and alter which is very difficult.  With SQL Complete , you can write “alter stored procedure,” and it will finish it for you, and you can modify as needed. I love to write code, and I love well-written code.  When I am working with clients, and I find people whose code have not been written properly, I feel a little uncomfortable.  It is difficult to deal with code that is in the wrong case, with no line breaks, no white spaces, improper indents, and no text wrapping.  The worst thing to encounter is code that goes all the way to the right side, and you have to scroll a million times because there are no breaks or indents.  SQL Complete will take care of this for you – if a developer is too lazy for proper formatting, then Devart’s SQL formatter tool will make them better, not lazier. SQL Management Studio gives information about your code when you hover your mouse over it, however SQL Complete goes further in it, going into the work table, and the current rate idea, too. It gives you more information about the parameters; and last but not least, it will just take you to the help file of code navigation.  It will open object explorer in a document viewer.  You can start going through the various properties of your code – a very important thing to do. Here are are interesting Intellisense examples: 1) We are often very lazy to expand *however, when we are using SQL Complete we can just mouse over the * and it will give us all the the column names and we can select the appropriate columns. 2) We can put the cursor after * and it will give us option to expand it to all the column names by pressing the Tab key. 3) Here is one more Intellisense feature I really liked it. I always alias my tables and I always select the alias with special logic. When I was using SQL Complete I selected just a tablename (without schema name) and…(just like below image) … and it autocompleted the schema and alias name (the way I needed it). I believe using SQL Complete we can work faster.  It supports all versions of SQL Server, and works SQL formatting.  Many businesses perform code review and have code standards, so why not use an efficiency tool on everyone’s computer and make sure the code is written correctly from the first time?  If you’re interested in this tool, there are free editions available.  If you like it, you can buy it.  I bought it because it works.  I love it, and I want to hear all your opinions on it, too. You can get the product for FREE.  Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQL Utility, T SQL, Technology

    Read the article

  • Why is Apache ignoring VirtualHost directive for first name in hosts file?

    - by Peter Taylor
    Standard pre-emptive disclaimer: host names, IP addresses, and directories are anonymised. Problem We have a server with Apache 2.2 (WAMP) listening on one IP and IIS listening on another. An ASP.Net application running under IIS needs to do some simple GETs from the PHP applications running under Apache to build a unified search results page. This is a virtual server, so the internal IPs are mapped somehow to external ones. The internal DNS system doesn't resolve the publicly published names under which the applications are accessed externally, so the obvious solution was to add them to etc/hosts with the internal IP address: 127.0.0.1 localhost # 10.0.1.17 is the IP address Apache listens on 10.0.1.17 phpappone.example.com 10.0.1.17 phpapptwo.example.com After restarting Apache, phpappone.example.com stopped working. Instead of returning pages from that app, Apache was returning pages from the default site. The other PHP apps worked fine. Relevant configuration httpd.conf, summarised, says: ServerAdmin [email protected] ServerRoot "c:/server/Apache2" ServerName www.example.com Listen 10.0.1.17:80 Listen 10.0.1.17:443 # Not obviously related config options elided # Nothing obviously astandard # If you want more details, post a comment DocumentRoot "c:/server/Apache2/htdocs" <Directory /> Options FollowSymLinks AllowOverride None Order deny,allow Deny from all </Directory> # Fallback for unknown host names <Directory "c:/server/Apache2/htdocs"> Options Indexes FollowSymLinks AllowOverride None Order allow,deny Allow from all </Directory> # PHP apps common config <Directory "C:/Inetpub/wwwroot/phpapps"> Options FollowSymLinks -Indexes +ExecCGI AllowOverride All Order Allow,Deny Allow from All </Directory> # Virtual hosts NameVirtualHost 10.0.1.17:80 NameVirtualHost 10.0.1.17:443 <VirtualHost _default_:80> </VirtualHost> <VirtualHost _default_:443> SSLEngine On SSLCertificateFile "certs/example.crt" SSLCertificateKeyFile "certs/example.key" </VirtualHost> Include conf/vhosts/*.conf and the vhosts files are e.g. <VirtualHost 10.0.1.17:80> ServerName phpappone.example.com DocumentRoot "c:/Inetpub/wwwroot/phpapps/phpappone" </VirtualHost> <VirtualHost 10.0.1.17:443> ServerName phpappone.example.com DocumentRoot "c:/Inetpub/wwwroot/phpapps/phpappone" SSLEngine On SSLCertificateFile "certs/example.crt" SSLCertificateKeyFile "certs/example.key" </VirtualHost> Buggy behaviour or our misunderstanding? The documentation for name-based virtual hosts says that Now when a request arrives, the server will first check if it is using an IP address that matches the NameVirtualHost. If it is, then it will look at each <VirtualHost> section with a matching IP address and try to find one where the ServerName or ServerAlias matches the requested hostname. If it finds one, then it uses the configuration for that server. If no matching virtual host is found, then the first listed virtual host that matches the IP address will be used. Yet that isn't what we observe. It seems that if the hostname is the first hostname listed against the IP address in etc/hosts then it uses the configuration from the main server and skips the virtual host lookup. Workarounds The workaround we've put in place for the time being is to add a fake line to the hosts file: 127.0.0.1 localhost # 10.0.1.17 is the IP address Apache listens on 10.0.1.17 fakename.example.com 10.0.1.17 phpappone.example.com 10.0.1.17 phpapptwo.example.com This fixes the problem, but it's not very elegant. In addition, it seems a bit brittle: reordering lines in the hosts file (or deleting the nonsense value) can break it. The other obvious workaround is to make the main server configuration match that of the troublesome virtual host, but that is equally brittle. A third option, which is just ugly, would be to change the ASP.Net code to take separate config items for the IP address and the hostname and to implement HTTP manually. Ugh. The question Is there a good solution to this problem which localises any "Do not touch this!" explanations to the Apache config files?

    Read the article

  • E-Business Suite Plug-in 12.1.0.1 for Enterprise Manager 12c Now Available

    - by Steven Chan (Oracle Development)
    Oracle E-Business Suite Plug-in 12.1.0.1.0 is now available for use with Oracle Enterprise Manager 12c.  Oracle E-Business Suite Plug-in 12.1.0.1 is an integral part of Oracle Enterprise Manager 12 Application Management Suite for Oracle E-Business Suite. This latest plug-in extends EM 12c Cloud Control with E-Business Suite specific system management capabilities and features enhanced change management support. The Oracle Enterprise Manager 12c Application Management Suite for Oracle E-Business Suite includes: Oracle E-Business Suite Plug-in 12.1.0.1 combines functionality that was available in the previously-standalone Application Management Pack for Oracle E-Business Suite and Application Change Management Pack for Oracle E-Business Suite with Oracle Real User Experience Insight Oracle Configuration & Compliance capabilities  Features that were previously available in the standalone management packs are now packaged in the Oracle E-Business Suite Plug-in, which is certified with Oracle Enterprise Manager 12c Cloud Control:  Functionality previously available for Application Management Pack (AMP) is now classified as “System Management for Oracle E-Business Suite” within the plug-in. Functionality previously available for Application Change Management Pack (ACMP) is now classified as “Change Management for Oracle E-Business Suite” within the plug-in. The Application Configuration Console and the Configuration Change Console are now native components of Oracle Enterprise Manager 12c. System Management Enhancements General Oracle Enterprise Manager 12c Base Platform uptake: All components of the management suite are certified with Oracle Enterprise Manager 12c Cloud Control. Security Privilege Delegation: The Oracle E-Business Suite Plug-in now extends Enterprise Manager’s privilege delegation through Sudo and PowerBroker to Oracle E-Business Suite Plug-in host targets. Privileges and Roles for Managing Oracle E-Business Suite: This release includes new ready-to-use target and resource privileges to monitor, manage, and perform Change Management functionality. Cloning Named Credentials Uptake in Cloning: The Clone module transactions now let users leverage the Named Credential feature introduced in Enterprise Manager 12c, thereby passing all the benefits of Named Credentials features in Enterprise Manager to the Oracle E-Business Suite Plug-in users. Smart Clone improvements: In addition to the existing 11i support that was available on previous releases, the new Oracle E-Business Suite Plug-in widens the coverage supporting Oracle E-Business Suite releases 12.0.x and 12.1.x. The new and improved Smart Clone UI supports the adding of "pre and post" custom steps to a copy of the ready-to-use cloning deployment procedure. Now a user can pass parameters to the custom steps through the interview screen of the UI as well as pass ready-to-use parameters to the custom steps. Additional configuration enhancements are included for configuring RAC targets databases, such as the ability to customize listener names and the option to configure with Virtual IP or Scan IP. Change Management Enhancements Customization Manager Support for longer file names: Customization Manager now handles file names up to thirty characters in length. Patch Manager Queuing of Patch Manager Runs: This feature allows patch runs to queue up if Patch Manager detects a specific target is in a blackout state. Multi-node system patching: The patch run interview has been enhanced to allow Enterprise Manager Administrator to choose which nodes adpatch will run on. New AD Administration Options: The patch run interview has been extended to include AD Administration Options "Relink Application Programs", "Generate Product Jars Files", "Generate Report Files", and "Generate Form Files". Downloads Fresh install For new customers or existing customers wishing to perform a fresh install Enterprise Manager Store (within Enterprise Manager 12c) Oracle Software Delivery Cloud Upgrades For existing customers wishing to upgrade their AMP 4.0 or AMP 3.1 installations Oracle Technology Network Getting Started with Oracle E-Business Suite Plug-In, Release 12.1.0.1 (Note 1434392.1) Prerequisites Enterprise Manager Cloud Control 12cOne or more of the following Oracle E-Business Suite Releases Release 11.5.10 CU2 with 11i.ATG_PF.H.RUP6 or higher Release 12.0.4 with R12.ATG_PF.A.delta.6 Release 12.1 with R12.ATG_PF.B.delta.3 Platforms and OS Release certification information is available from My Oracle Support via the Certification page. Search for "Oracle Application Management Pack for Oracle E-Business Suite and release 12.1.0.1.0." Related Articles Oracle E-Business Suite Plug-in 4.0 Released for OEM 11g (11.1.0.1)

    Read the article

  • SQL SERVER – Puzzle #1 – Querying Pattern Ranges and Wild Cards

    - by Pinal Dave
    Note: Read at the end of the blog post how you can get five Joes 2 Pros Book #1 and a surprise gift. I have been blogging for almost 7 years and every other day I receive questions about Querying Pattern Ranges. The most common way to solve the problem is to use Wild Cards. However, not everyone knows how to use wild card properly. SQL Queries 2012 Joes 2 Pros Volume 1 – The SQL Queries 2012 Hands-On Tutorial for Beginners Book On Amazon | Book On Flipkart Learn SQL Server get all the five parts combo kit Kit on Amazon | Kit on Flipkart Many people know wildcards are great for finding patterns in character data. There are also some special sequences with wildcards that can give you even more power. This series from SQL Queries 2012 Joes 2 Pros® Volume 1 will show you some of these cool tricks. All supporting files are available with a free download from the www.Joes2Pros.com web site. This example is from the SQL 2012 series Volume 1 in the file SQLQueries2012Vol1Chapter2.2Setup.sql. If you need help setting up then look in the “Free Videos” section on Joes2Pros under “Getting Started” called “How to install your labs” Querying Pattern Ranges The % wildcard character represents any number of characters of any length. Let’s find all first names that end in the letter ‘A’. By using the percentage ‘%’ sign with the letter ‘A’, we achieve this goal using the code sample below: SELECT * FROM Employee WHERE FirstName LIKE '%A' To find all FirstName values beginning with the letters ‘A’ or ‘B’ we can use two predicates in our WHERE clause, by separating them with the OR statement. Finding names beginning with an ‘A’ or ‘B’ is easy and this works fine until we want a larger range of letters as in the example below for ‘A’ thru ‘K’: SELECT * FROM Employee WHERE FirstName LIKE 'A%' OR FirstName LIKE 'B%' OR FirstName LIKE 'C%' OR FirstName LIKE 'D%' OR FirstName LIKE 'E%' OR FirstName LIKE 'F%' OR FirstName LIKE 'G%' OR FirstName LIKE 'H%' OR FirstName LIKE 'I%' OR FirstName LIKE 'J%' OR FirstName LIKE 'K%' The previous query does find FirstName values beginning with the letters ‘A’ thru ‘K’. However, when a query requires a large range of letters, the LIKE operator has an even better option. Since the first letter of the FirstName field can be ‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’, ‘I’, ‘J’ or ‘K’, simply list all these choices inside a set of square brackets followed by the ‘%’ wildcard, as in the example below: SELECT * FROM Employee WHERE FirstName LIKE '[ABCDEFGHIJK]%' A more elegant example of this technique recognizes that all these letters are in a continuous range, so we really only need to list the first and last letter of the range inside the square brackets, followed by the ‘%’ wildcard allowing for any number of characters after the first letter in the range. Note: A predicate that uses a range will not work with the ‘=’ operator (equals sign). It will neither raise an error, nor produce a result set. --Bad query (will not error or return any records) SELECT * FROM Employee WHERE FirstName = '[A-K]%' Question: You want to find all first names that start with the letters A-M in your Customer table and end with the letter Z. Which SQL code would you use? a. SELECT * FROM Customer WHERE FirstName LIKE 'm%z' b. SELECT * FROM Customer WHERE FirstName LIKE 'a-m%z' c. SELECT * FROM Customer WHERE FirstName LIKE 'a-m%z' d. SELECT * FROM Customer WHERE FirstName LIKE '[a-m]%z' e. SELECT * FROM Customer WHERE FirstName LIKE '[a-m]z%' f. SELECT * FROM Customer WHERE FirstName LIKE '[a-m]%z' g. SELECT * FROM Customer WHERE FirstName LIKE '[a-m]z%' Contest Leave a valid answer before June 18, 2013 in the comment section. 5 winners will be selected from all the valid answers and will receive Joes 2 Pros Book #1. 1 Lucky person will get a surprise gift from Joes 2 Pros. The contest is open for all the countries where Amazon ships the book (USA, UK, Canada, India and many others). Special Note: Read all the options before you provide valid answer as there is a small trick hidden in answers. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Joes 2 Pros, PostADay, SQL, SQL Authority, SQL Puzzle, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Simple MVVM Walkthrough – Refactored

    - by Sean Feldman
    JR has put together a good introduction post into MVVM pattern. I love kick start examples that serve the purpose well. And even more than that I love examples that also can pass the real world projects check. So I took the sample code and refactored it slightly for a few aspects that a lot of developers might raise a brow. Michael has mentioned model (entity) visibility from view. I agree on that. A few other items that don’t settle are using property names as string (magical strings) and Saver class internal casting of a parameter (custom code for each Saver command). Fixing a property names usage is a straight forward exercise – leverage expressions. Something simple like this would do the initial job: class PropertyOf<T> { public static string Resolve(Expression<Func<T, object>> expression) { var member = expression.Body as MemberExpression; return member.Member.Name; } } With this, refactoring of properties names becomes an easy task, with confidence that an old property name string will not get left behind. An updated Invoice would look like this: public class Invoice : INotifyPropertyChanged { private int id; private string receiver; public event PropertyChangedEventHandler PropertyChanged; private void OnPropertyChanged(string propertyName) { if (PropertyChanged != null) { PropertyChanged(this, new PropertyChangedEventArgs(propertyName)); } } public int Id { get { return id; } set { if (id != value) { id = value; OnPropertyChanged(PropertyOf<Invoice>.Resolve(x => x.Id)); } } } public string Receiver { get { return receiver; } set { receiver = value; OnPropertyChanged(PropertyOf<Invoice>.Resolve(x => x.Receiver)); } } } For the saver, I decided to change it a little so now it becomes a “view-model agnostic” command, one that can be used for multiple commands/view-models. Updated Saver code now accepts an action at construction time and executes that action. No more black magic internal class Command : ICommand { private readonly Action executeAction; public Command(Action executeAction) { this.executeAction = executeAction; } public bool CanExecute(object parameter) { return true; } public event EventHandler CanExecuteChanged; public void Execute(object parameter) { // no more black magic executeAction(); } } Change in InvoiceViewModel is instantiation of Saver command and execution action for the specific command. public ICommand SaveCommand { get { if (saveCommand == null) saveCommand = new Command(ExecuteAction); return saveCommand; } set { saveCommand = value; } } private void ExecuteAction() { DisplayMessage = string.Format("Thanks for creating invoice: {0} {1}", Invoice.Id, Invoice.Receiver); } This way internal knowledge of InvoiceViewModel remains in InvoiceViewModel and Command (ex-Saver) is view-model agnostic. Now the sample is not only a good introduction, but also has some practicality in it. My 5 cents on the subject. Sample code MvvmSimple2.zip

    Read the article

  • [Linq to SQL] Multiple foreign keys to the same table

    - by cdonner
    I have a reference table with all sorts of controlled value lookup data for gender, address type, contact type, etc. Many tables have multiple foreign keys to this reference table I also have many-to-many association tables that have two foreign keys to the same table. Unfortunately, when these tables are pulled into a Linq model and the DBML is generated, SQLMetal does not look at the names of the foreign key columns, or the names of the constraints, but only at the target table. So I end up with members called Reference1, Reference2, ... not very maintenance-friendly. Example: <Association Name="tb_reference_tb_account" Member="tb_reference" <====== ThisKey="shipping_preference_type_id" OtherKey="id" Type="tb_reference" IsForeignKey="true" /> <Association Name="tb_reference_tb_account1" Member="tb_reference1" <====== ThisKey="status_type_id" OtherKey="id" Type="tb_reference" IsForeignKey="true" /> I can go into the DBML and manually change the member names, of course, but this would mean I can no longer round-trip my database schema. This is not an option at the current stage of the model, which is still evolving. Splitting the reference table into n individual tables is also not desirable. I can probably write a script that runs against the XML after each generation and replaces the member name with something derived from ThisKey (since I adhere to a naming convention for these types of keys). Has anybody found a better solution to this problem?

    Read the article

  • EJB3 JNDI Lookup Failure in JEE application client

    - by Hank
    I'm trying to access an EJB3 from a JEE client-application, but keep getting nothing but lookup failures. My JEE Application 'CoreServer' is exposing a number of beans with remote interfaces. I have no problem accessing them from a Web Application deployed on the same Glassfish v3.0.1. Now I'm trying to access it from a client-application: public class Main { public static void main(String[] args) { CampaignControllerRemote bean = null; try { InitialContext ctx = new InitialContext(); bean = (CampaignControllerRemote) ctx.lookup("java:global/CoreServer/CampaignController"); } catch (Exception e) { System.out.println(e.getMessage()); } if (bean != null) { Campaign campaign = bean.get(361); if (campaign != null) { System.out.println("Got "+ campaign); } } } } When I run deploy it to Glassfish and run it from the appclient, I get this error: Lookup failed for 'java:global/CoreServer/CampaignController' in SerialContext targetHost=localhost,targetPort=3700,orb'sInitialHost=localhost,orb'sInitialPort=3700 However, that's exactly the same JNDI-name I use when I lookup the bean from the WebApplication (via SessionContext, not InitialContext - does that matter?). Also, when I deploy 'CoreServer', Glassfish reports: Portable JNDI names for EJB CampaignController : [java:global/CoreServer/CampaignController!mvs.api.CampaignControllerRemote, java:global/CoreServer/CampaignController] Glassfish-specific (Non-portable) JNDI names for EJB CampaignController : [mvs.api.CampaignControllerRemote, mvs.api.CampaignControllerRemote#mvs.api.CampaignControllerRemote] I tried all four names, none worked. Is the appclient unable to access beans with (only) Remote interfaces?

    Read the article

  • C++ Set Erase Entry Question

    - by Wallace
    Hi. I encountered a problem here. I'm using C++ multiset. This is the test file. Score: 3-1 Ben Steven Score: 1-0 Ben Score: 0-0 Score: 1-1 Cole Score: 1-2 Ben I'm using while loop and ifstream (fin1) to read in from the test file above. multiset<string, less<string> > myset; while(!fin1.eof()) { fin1 >> scoreName; if(scoreName == "Score:") { //calculates number of matches played } else { goalCheck = scoreName.substr(1,1); if(goalCheck == "-") { string lGoal, rGoal; lGoal = scoreName.substr(0,1); rGoal = scoreName.substr(2,1); int leftGoal, rightGoal; leftGoal = atoi(lGoal.c_str()); rightGoal = atoi(rGoal.c_str()); if(leftGoal > rightGoal) //if team wins { //some computations } else if(leftGoal < rightGoal) //if team loses { //computations } else if(leftGoal == rightGoal) //if team draws { //computations } else { myset.insert(myset.begin(), scoreName); } } } I'm inserting all names into myset (regardless of wins/loses/draws) in my last else statement. But I only require the names of those matches who won/draw. Those names whose matches lost will not be included in myset. In the test file above, there's only one match that lost (1-2) and I wanted to remove "Ben". How can I do that? I tried to use myset.erase(), but I'm not sure how to get it point to Ben and remove it from myset. Any help is much appreciated. Thanks.

    Read the article

  • Subset a data.frame by list and apply function on each part, by rows

    - by aL3xa
    This may seem as a typical plyr problem, but I have something different in mind. Here's the function that I want to optimize (skip the for loop). # dummy data set.seed(1985) lst <- list(a=1:10, b=11:15, c=16:20) m <- matrix(round(runif(200, 1, 7)), 10) m <- as.data.frame(m) dfsub <- function(dt, lst, fun) { # check whether dt is `data.frame` stopifnot (is.data.frame(dt)) # check if vectors in lst are "whole" / integer # vector elements should be column indexes is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol # fall if any non-integers in list idx <- rapply(lst, is.wholenumber) stopifnot(idx) # check for list length stopifnot(ncol(dt) == length(idx)) # subset the data subs <- list() for (i in 1:length(lst)) { # apply function on each part, by row subs[[i]] <- apply(dt[ , lst[[i]]], 1, fun) } # preserve names names(subs) <- names(lst) # convert to data.frame subs <- as.data.frame(subs) # guess what =) return(subs) } And now a short demonstration... actually, I'm about to explain what I primarily intended to do. I wanted to subset a data.frame by vectors gathered in list object. Since this is a part of code from a function that accompanies data manipulation in psychological research, you can consider m as a results from personality questionnaire (10 subjects, 20 vars). Vectors in list hold column indexes that define questionnaire subscales (e.g. personality traits). Each subscale is defined by several items (columns in data.frame). If we presuppose that the score on each subscale is nothing more than sum (or some other function) of row values (results on that part of questionnaire for each subject), you could run: > dfsub(m, lst, sum) a b c 1 46 20 24 2 41 24 21 3 41 13 12 4 37 14 18 5 57 18 25 6 27 18 18 7 28 17 20 8 31 18 23 9 38 14 15 10 41 14 22 I took a glance at this function and I must admit that this little loop isn't spoiling the code at all... BUT, if there's an easier/efficient way of doing this, please, let me know!

    Read the article

  • Help matching fields between two classes

    - by Michael
    I'm not too experienced with Java yet, and I'm hoping someone can steer me in the right direction because right now I feel like I'm just beating my head against a wall... The first class is called MeasuredParams, and it's got 40+ numeric fields (height, weight, waistSize, wristSize - some int, but mostly double). The second class is a statistical classifier called Classifier. It's been trained on a subset of the MeasuredParams fields. The names of the fields that the Classifier has been trained on is stored, in order, in an array called reqdFields. What I need to do is load a new array, toClassify, with the values stored in the fields from MeasuredParams that match the field list (including order) found in reqdFields. I can make any changes necessary to the MeasuredParams class, but I'm stuck with Classifier as it is. My brute-force approach was to get rid of the fields in MeasuredParams and use an arrayList instead, and store the field names in an Enum object to act as an index pointer. Then loop through the reqdFields list, one element at a time, and find the matching name in the Enum object to find the correct position in the arrayList. Load the value stored at that positon into toClassify, and then continue on to the next element in reqdFields. I'm not sure how exactly I would search through the Enum object - it would be a lot easier if the field names were stored in a second arrayList. But then the index positions between the two would have to stay matched, and I'm back to using an Enum. I think. I've been running around in circles all afternoon, and I keep thinking there must be an easier way of doing it. I'm just stuck right now and can't see past what I've started. Any help would be GREATLY appreciated. Thanks so much! Michael

    Read the article

  • How can I write reusable Javascript?

    - by RenderIn
    I've started to wrap my functions inside of Objects, e.g.: var Search = { carSearch: function(color) { }, peopleSearch: function(name) { }, ... } This helps a lot with readability, but I continue to have issues with reusabilty. To be more specific, the difficulty is in two areas: Receiving parameters. A lot of times I will have a search screen with multiple input fields and a button that calls the javascript search function. I have to either put a bunch of code in the onclick of the button to retrieve and then martial the values from the input fields into the function call, or I have to hardcode the HTML input field names/IDs so that I can subsequently retrieve them with Javascript. The solution I've settled on for this is to pass the field names/IDs into the function, which it then uses to retrieve the values from the input fields. This is simple but really seems improper. Returning values. The effect of most Javascript calls tends to be one in which some visual on the screen changes directly, or as a result of another action performed in the call. Reusability is toast when I put these screen-altering effects at the end of a function. For example, after a search is completed I need to display the results on the screen. How do others handle these issues? Putting my thinking cap on leads me to believe that I need to have an page-specific layer of Javascript between each use in my application and the generic methods I create which are to be used application-wide. Using the previous example, I would have a search button whose onclick calls a myPageSpecificSearchFunction, in which the search field IDs/names are hardcoded, which marshals the parameters and calls the generic search function. The generic function would return data/objects/variables only, and would not directly read from or make any changes to the DOM. The page-specific search function would then receive this data back and alter the DOM appropriately. Am I on the right path or is there a better pattern to handle the reuse of Javascript objects/methods?

    Read the article

  • How can I turn a column name into a result value in SQL Server?

    - by Brennan
    I have a table which has essentially boolean values in a legacy database. The column names are stored as string values in another table so I need to match the column names of one table to a string value in another table. I know there has to be a way to do this directly with SQL in SQL Server but it is beyond me. My initial thought was to use PIVOT but it is not enabled by default and enabling it would likely be a difficult process with pushing that change to the Production database. I would prefer to use what is enabled by default. I am considering using COALESCE to translate the boolean value to the string that value that I need. This will be a manual process. I think I will also use a table variable to insert the results of the first query into that variable and use those results to do the second query. I still have the problem that the columns are on a single row so I wish I could easily pivot the values to put the column names in the result set as strings. But if I could easily do that I could easily write the query with a sub-select. Any tips are welcome.

    Read the article

  • ERROR! (Using Excel's named ranges from C#)

    - by mcoolbeth
    In the following, I am trying to persist a set of objects in an excel worksheet. Each time the function is called to store a value, it should allocate the next cell of the A column to store that object. However, an exception is thrown by the Interop library on the first call to get_Range(). (right after the catch block) Does anyone know what I am doing wrong? private void AddName(string name, object value) { Excel.Worksheet jresheet; try { jresheet = (Excel.Worksheet)_app.ActiveWorkbook.Sheets["jreTemplates"]; } catch { jresheet = (Excel.Worksheet)_app.ActiveWorkbook.Sheets.Add(Type.Missing, Type.Missing, Type.Missing, Type.Missing); jresheet.Visible = Microsoft.Office.Interop.Excel.XlSheetVisibility.xlSheetVeryHidden; jresheet.Name = "jreTemplates"; jresheet.Names.Add("next", "A1", true, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing); } Excel.Range cell = jresheet.get_Range("next", Type.Missing); cell.Value2 = value; string address = ((Excel.Name)cell.Name).Name; _app.ActiveWorkbook.Names.Add(name, address, false, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing); cell = cell.get_Offset(1, 0); jresheet.Names.Add("next", ((Excel.Name)cell.Name).Name, true, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing, Type.Missing); }

    Read the article

  • Efficient alternative to merge() when building dataframe from json files with R?

    - by Bryan
    I have written the following code which works, but is painfully slow once I start executing it over thousands of records: require("RJSONIO") people_data <- data.frame(person_id=numeric(0)) json_data <- fromJSON(json_file) n_people <- length(json_data) for(lender in 1:n_people) { person_dataframe <- as.data.frame(t(unlist(json_data[[person]]))) people_data <- merge(people_data, person_dataframe, all=TRUE) } output_file <- paste("people_data",".csv") write.csv(people_data, file=output_file) I am attempting to build a unified data table from a series of json-formated files. The fromJSON() function reads in the data as lists of lists. Each element of the list is a person, which then contains a list of the attributes for that person. For example: [[1]] person_id name gender hair_color [[2]] person_id name location gender height [[...]] structure(list(person_id = "Amy123", name = "Amy", gender = "F", hair_color = "brown"), .Names = c("person_id", "name", "gender", "hair_color")) structure(list(person_id = "matt53", name = "Matt", location = structure(c(47231, "IN"), .Names = c("zip_code", "state")), gender = "M", height = 172), .Names = c("person_id", "name", "location", "gender", "height")) The end result of the code above is matrix where the columns are every person-attribute that appears in the structure above, and the rows are the relevant values for each person. As you can see though, some data is missing for some of the people, so I need to ensure those show up as NA and make sure things end up in the right columns. Further, location itself is a vector with two components: state and zip_code, meaning it needs to be flattened to location.state and location.zip_code before it can be merged with another person record; this is what I use unlist() for. I then keep the running master table in people_data. The above code works, but do you know of a more efficient way to accomplish what I'm trying to do? It appears the merge() is slowing this to a crawl... I have hundreds of files with hundreds of people in each file. Thanks! Bryan

    Read the article

  • Is there a way to effect user defined data types in MySQL?

    - by Dancrumb
    I have a database which stores (among other things), the following pieces of information: Hardware IDs BIGINTs Storage Capacities BIGINTs Hardware Names VARCHARs World Wide Port Names VARCHARs I'd like to be able to capture a more refined definition of these datatypes. For instance, the hardware IDs have no numerical significance, so I don't care how they are formatted when displayed. The Storage Capacities, however, are cardinal numbers and, at a user's request, I'd like to present them with thousands and decimal separators, e.g. 123,456.789. Thus, I'd like to refine BIGINT into, say ID_NUMBER and CARDINAL. The same with Hardware Names, which are simple text and WWPNs, which are hexstrings, e.g. 24:68:AC:E0. Thus, I'd like to refine VARCHAR into ENGLISH_WORD and HEXSTRING. The specific datatypes I made up are just for illustrative purposes. I'd like to keep all this information in one place and I'm wondering if anybody knows of a good way to hold this all in my MySQL table definitions. I could use the Comment field of the table definition, but that smells fishy to me. One approach would be to define the data structure elsewhere and use that definition to generate my CREATE TABLEs, but that would be a major rework of the code that I currently have, so I'm looking for alternatives. Any suggestions? The application language in use is Perl, if that helps.

    Read the article

  • using jquery growl with php/mysql

    - by jeansymolanza
    on my database i am planning to create a table storing messages to alert users of anything they need to do. i am looking at using a jQuery growl like notification method but im confused at how i would begin building it. the data would be added into the database using the standard MYSQL insert method from a form but how would i select messages from the database to display using the jQuery growl. would this require the use of ajax? this is the javascript code i have so far, i was wondering how i would implement the php code alongside it so that i can pull out data from my tables to display as notifications: <script type="text/javascript"> // In case you don't have firebug... if (!window.console || !console.firebug) { var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"]; window.console = {}; for (var i = 0; i < names.length; ++i) window.console[names[i]] = function() {}; } (function($){ $(document).ready(function(){ // This specifies how many messages can be pooled out at any given time. // If there are more notifications raised then the pool, the others are // placed into queue and rendered after the other have disapeared. $.jGrowl.defaults.pool = 5; var i = 1; var y = 1; setInterval( function() { if ( i < 3 ) { $.jGrowl("Message " + i, { sticky: true, log: function() { console.log("Creating message " + i + "..."); }, beforeOpen: function() { console.log("Rendering message " + y + "..."); y++; } }); } i++; } , 1000 ); }); })(jQuery); </script> <p> thanking you in advance and God bless

    Read the article

  • Comparable and Comparator contract with regards to null

    - by polygenelubricants
    Comparable contract specifies that e.compareTo(null) must throw NullPointerException. From the API: Note that null is not an instance of any class, and e.compareTo(null) should throw a NullPointerException even though e.equals(null) returns false. On the other hand, Comparator API mentions nothing about what needs to happen when comparing null. Consider the following attempt of a generic method that takes a Comparable, and return a Comparator for it that puts null as the minimum element. static <T extends Comparable<? super T>> Comparator<T> nullComparableComparator() { return new Comparator<T>() { @Override public int compare(T el1, T el2) { return el1 == null ? -1 : el2 == null ? +1 : el1.compareTo(el2); } }; } This allows us to do the following: List<Integer> numbers = new ArrayList<Integer>( Arrays.asList(3, 2, 1, null, null, 0) ); Comparator<Integer> numbersComp = nullComparableComparator(); Collections.sort(numbers, numbersComp); System.out.println(numbers); // "[null, null, 0, 1, 2, 3]" List<String> names = new ArrayList<String>( Arrays.asList("Bob", null, "Alice", "Carol") ); Comparator<String> namesComp = nullComparableComparator(); Collections.sort(names, namesComp); System.out.println(names); // "[null, Alice, Bob, Carol]" So the questions are: Is this an acceptable use of a Comparator, or is it violating an unwritten rule regarding comparing null and throwing NullPointerException? Is it ever a good idea to even have to sort a List containing null elements, or is that a sure sign of a design error?

    Read the article

  • Creating ListView with check boxes...Android

    - by deewangan
    Hello, I am trying to create a listview that has check box beside each item. (i am following a code from the book android), but i can't get it to work, every time i run it, it crashes. since i am very new to this android staff, i have no clue what to do to get it to work, any help is appreciated.the code is below. *i have created two layout files in names of list and list_item. the code for the main activity: public class ListDemoActivity extends ListActivity { /** Called when the activity is first created. */ String[] listItems = {"exploring", "android","list", "activities"}; private SimpleCursorAdapter adapter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.lists); //setListAdapter(new ArrayAdapter(this,android.R.layout.simple_list_item_1, listItems)); //setContentView(R.layout.lists); Cursor c = getContentResolver().query(People.CONTENT_URI, null, null, null, null); startManagingCursor(c); String[] cols = new String[]{People.NAME}; int[] names = new int[]{R.id.row_tv}; adapter = new SimpleCursorAdapter(this,R.layout.list_item,c,cols,names); this.setListAdapter(adapter); } } the lists file content: <?xml version="1.0" encoding="utf-8"?> and the list_item file content: <?xml version="1.0" encoding="utf-8"?>

    Read the article

< Previous Page | 50 51 52 53 54 55 56 57 58 59 60 61  | Next Page >