Search Results

Search found 10194 results on 408 pages for 'raw types'.

Page 399/408 | < Previous Page | 395 396 397 398 399 400 401 402 403 404 405 406  | Next Page >

  • [SOLVED] Iphone NSXMLParser NSCFString memory leak

    - by atticusalien
    I am building an app that parses an rss feed. In the app there are two different types of feeds with different names for the elements in the feed, so I have created an NSXMLParser NSObject that takes the name of the elements of each feed before parsing. Here is my code: NewsFeedParser.h #import @interface NewsFeedParser : NSObject { NSInteger NewsSelectedCategory; NSXMLParser *NSXMLNewsParser; NSMutableArray *newsCategories; NSMutableDictionary *NewsItem; NSMutableString *NewsCurrentElement, *NewsCurrentElement1, *NewsCurrentElement2, *NewsCurrentElement3; NSString *NewsItemType, *NewsElement1, *NewsElement2, *NewsElement3; NSInteger NewsNumElements; } - (void) parseXMLFileAtURL:(NSString *)URL; @property(nonatomic, retain) NSString *NewsItemType; @property(nonatomic, retain) NSString *NewsElement1; @property(nonatomic, retain) NSString *NewsElement2; @property(nonatomic, retain) NSString *NewsElement3; @property(nonatomic, retain) NSMutableArray *newsCategories; @property(assign, nonatomic) NSInteger NewsNumElements; @end NewsFeedParser.m #import "NewsFeedParser.h" @implementation NewsFeedParser @synthesize NewsItemType; @synthesize NewsElement1; @synthesize NewsElement2; @synthesize NewsElement3; @synthesize newsCategories; @synthesize NewsNumElements; - (void)parserDidStartDocument:(NSXMLParser *)parser{ } - (void)parseXMLFileAtURL:(NSString *)URL { newsCategories = [[NSMutableArray alloc] init]; URL = [URL stringByReplacingOccurrencesOfString:@" " withString:@""]; URL = [URL stringByReplacingOccurrencesOfString:@"\n" withString:@""]; URL = [URL stringByReplacingOccurrencesOfString:@" " withString:@""]; //you must then convert the path to a proper NSURL or it won't work NSURL *xmlURL = [NSURL URLWithString:URL]; // here, for some reason you have to use NSClassFromString when trying to alloc NSXMLParser, otherwise you will get an object not found error // this may be necessary only for the toolchain [[NSURLCache sharedURLCache] setMemoryCapacity:0]; [[NSURLCache sharedURLCache] setDiskCapacity:0]; NSXMLNewsParser = [[NSXMLParser alloc] initWithContentsOfURL:xmlURL]; // Set self as the delegate of the parser so that it will receive the parser delegate methods callbacks. [NSXMLNewsParser setDelegate:self]; // Depending on the XML document you're parsing, you may want to enable these features of NSXMLParser. [NSXMLNewsParser setShouldProcessNamespaces:NO]; [NSXMLNewsParser setShouldReportNamespacePrefixes:NO]; [NSXMLNewsParser setShouldResolveExternalEntities:NO]; [NSXMLNewsParser parse]; [NSXMLNewsParser release]; } - (void)parser:(NSXMLParser *)parser parseErrorOccurred:(NSError *)parseError { NSString * errorString = [NSString stringWithFormat:@"Unable to download story feed from web site (Error code %i )", [parseError code]]; NSLog(@"error parsing XML: %@", errorString); UIAlertView * errorAlert = [[UIAlertView alloc] initWithTitle:@"Error loading content" message:errorString delegate:self cancelButtonTitle:@"OK" otherButtonTitles:nil]; [errorAlert show]; [errorAlert release]; [errorString release]; } - (void)parser:(NSXMLParser *)parser didStartElement:(NSString *)elementName namespaceURI:(NSString *)namespaceURI qualifiedName:(NSString *)qName attributes:(NSDictionary *)attributeDict{ NewsCurrentElement = [elementName copy]; if ([elementName isEqualToString:NewsItemType]) { // clear out our story item caches... NewsItem = [[NSMutableDictionary alloc] init]; NewsCurrentElement1 = [[NSMutableString alloc] init]; NewsCurrentElement2 = [[NSMutableString alloc] init]; if(NewsNumElements == 3) { NewsCurrentElement3 = [[NSMutableString alloc] init]; } } } - (void)parser:(NSXMLParser *)parser didEndElement:(NSString *)elementName namespaceURI:(NSString *)namespaceURI qualifiedName:(NSString *)qName{ if ([elementName isEqualToString:NewsItemType]) { // save values to an item, then store that item into the array... [NewsItem setObject:NewsCurrentElement1 forKey:NewsElement1]; [NewsItem setObject:NewsCurrentElement2 forKey:NewsElement2]; if(NewsNumElements == 3) { [NewsItem setObject:NewsCurrentElement3 forKey:NewsElement3]; } [newsCategories addObject:[[NewsItem copy] autorelease]]; [NewsCurrentElement release]; [NewsCurrentElement1 release]; [NewsCurrentElement2 release]; if(NewsNumElements == 3) { [NewsCurrentElement3 release]; } [NewsItem release]; } } - (void)parser:(NSXMLParser *)parser foundCharacters:(NSString *)string { //NSLog(@"found characters: %@", string); // save the characters for the current item... if ([NewsCurrentElement isEqualToString:NewsElement1]) { [NewsCurrentElement1 appendString:string]; } else if ([NewsCurrentElement isEqualToString:NewsElement2]) { [NewsCurrentElement2 appendString:string]; } else if (NewsNumElements == 3 && [NewsCurrentElement isEqualToString:NewsElement3]) { [NewsCurrentElement3 appendString:string]; } } - (void)dealloc { [super dealloc]; [newsCategories release]; [NewsItemType release]; [NewsElement1 release]; [NewsElement2 release]; [NewsElement3 release]; } When I create an instance of the class I do like so: NewsFeedParser *categoriesParser = [[NewsFeedParser alloc] init]; if(newsCat == 0) { categoriesParser.NewsItemType = @"article"; categoriesParser.NewsElement1 = @"category"; categoriesParser.NewsElement2 = @"catid"; } else { categoriesParser.NewsItemType = @"article"; categoriesParser.NewsElement1 = @"category"; categoriesParser.NewsElement2 = @"feedUrl"; } [categoriesParser parseXMLFileAtURL:feedUrl]; newsCategories = [[NSMutableArray alloc] initWithArray:categoriesParser.newsCategories copyItems:YES]; [self.tableView reloadData]; [categoriesParser release]; If I run the app with the leaks instrument, the leaks point to the [NSXMLNewsParser parse] call in the NewsFeedParser.m. Here is a screen shot of the Leaks instrument with the NSCFStrings leaking: http://img139.imageshack.us/img139/3997/leaks.png For the life of me I can't figure out where these leaks are coming from. Any help would be greatly appreciated.

    Read the article

  • migrating webclient to WCF; WCF client serializes parametername of method

    - by Wouter
    I'm struggling with migrating from webservice/webclient architecture to WCF architecture. The object are very complex, with lots of nested xsd's and different namespaces. Proxy classes are generated by adding a Web Reference to an original wsdl with 30+ webmethods and using xsd.exe for generating the missing SOAPFault objects. My pilot WCF Service consists of only 1 webmethod which matches the exact syntax of one of the original methods: 1 object as parameter, returning 1 other object as result value. I greated a WCF Interface using those proxy classes, using attributes: XMLSerializerFormat and ServiceContract on the interface, OperationContract on one method from original wsdl specifying Action, ReplyAction, all with the proper namespaces. I create incoming client messages using SoapUI; I generated a project from the original WSDL files (causing the SoapUI project to have 30+ methods) and created one new Request at the one implemented WebMethod, changed the url to my wcf webservice and send the message. Because of the specified (Reply-)Action in the OperationContractAttribute, the message is actually received and properly deserialized into an object. To get this far (40 hours of googling), a lot of frustration led me to using a custom endpoint in which the WCF 'wrapped tags' are removed, the namespaces for nested types are corrected, and the generated wsdl get's flattened (for better compatibility with other tools then MS VisualStudio). Interface code is this: [XmlSerializerFormat(Use = OperationFormatUse.Literal, Style = OperationFormatStyle.Document, SupportFaults = true)] [ServiceContract(Namespace = Constants.NamespaceStufZKN)] public interface IOntvangAsynchroon { [OperationContract(Action = Constants.NamespaceStufZKN + "/zakLk01", ReplyAction = Constants.NamespaceStufZKN + "/zakLk01", Name = "zakLk01")] [FaultContract(typeof(Fo03Bericht), Namespace = Constants.NamespaceStuf)] Bv03Bericht zakLk01([XmlElement("zakLk01", Namespace = Constants.NamespaceStufZKN)] ZAKLk01 zakLk011); When I use a Webclient in code to send a message, everything works. My problem is, when I use a WCF client. I use ChannelFactory< IOntvangAsynchroon to send a message. But the generated xml looks different: it includes the parametername of the method! It took me a lot of time to figure this one out, but here's what happens: Correct xml (stripped soap envelope): <soap:Body> <zakLk01 xmlns="http://www.egem.nl/StUF/sector/zkn/0310"> <stuurgegevens> <berichtcode xmlns="http://www.egem.nl/StUF/StUF0301">Bv01</berichtcode> <zender xmlns="http://www.egem.nl/StUF/StUF0301"> <applicatie>ONBEKEND</applicatie> </zender> </stuurgegevens> <parameters> </parameters> </zakLk01> </soap:Body> Bad xml: <soap:Body> <zakLk01 xmlns="http://www.egem.nl/StUF/sector/zkn/0310"> <zakLk011> <stuurgegevens> <berichtcode xmlns="http://www.egem.nl/StUF/StUF0301">Bv01</berichtcode> <zender xmlns="http://www.egem.nl/StUF/StUF0301"> <applicatie>ONBEKEND</applicatie> </zender> </stuurgegevens> <parameters> </parameters> </zakLk011> </zakLk01> </soap:Body> Notice the 'zakLk011' element? It is the name of the parameter of the method in my interface! So NOW it is zakLk011, but it when my parameter name was 'zakLk01', the xml seemed to contain some magical duplicate of the tag above, but without namespace. Of course, you can imagine me going crazy over what was happening before finding out it was the parametername! I know have actually created a WCF Service, at which I cannot send messages using a WCF Client anymore. For clarity: The method does get invoked using the WCF Client on my webservice, but the parameter object is empty. Because I'm using a custom endpoint to log the incoming xml, I can see the message is received fine, but just with the wrong syntax! WCF client code: ZAKLk01 stufbericht = MessageFactory.CreateZAKLk01(); ChannelFactory<IOntvangAsynchroon> factory = new ChannelFactory<IOntvangAsynchroon>(new BasicHttpBinding(), new EndpointAddress("http://localhost:8193/Roxit/Link/zkn0310")); factory.Endpoint.Behaviors.Add(new LinkEndpointBehavior()); IOntvangAsynchroon client = factory.CreateChannel(); client.zakLk01(stufbericht); I am not using a generated client, i just reference the webservice like i am lot's of times. Can anyone please help me? I can't google anything on this...

    Read the article

  • C question: Padding bits in unsigned integers and bitwise operations (C89)

    - by Anonymous Question Guy
    I have a lot of code that performs bitwise operations on unsigned integers. I wrote my code with the assumption that those operations were on integers of fixed width without any padding bits. For example an array of 32 bit unsigned integers of which all 32 bits available for each integer. I'm looking to make my code more portable and I'm focused on making sure I'm C89 compliant (in this case). One of the issues that I've come across is possible padded integers. Take this extreme example, taken from the GMP manual: However on Cray vector systems it may be noted that short and int are always stored in 8 bytes (and with sizeof indicating that) but use only 32 or 46 bits. The nails feature can account for this, by passing for instance 8*sizeof(int)-INT_BIT. I've also read about this type of padding in other places. I actually read of a post on SO last night (forgive me, I don't have the link and I'm going to cite something similar from memory) where if you have, say, a double with 60 usable bits the other 4 could be used for padding and those padding bits could serve some internal purpose so they cannot be modified. So let's say for example my code is compiled on a platform where an unsigned int type is sized at 4 bytes, each byte being 8 bits, however the most significant 2 bits are padding bits. Would UINT_MAX in that case be 0x3FFFFFFF (1073741823) ? #include <stdio.h> #include <stdlib.h> /* padding bits represented by underscores */ int main( int argc, char **argv ) { unsigned int a = 0x2AAAAAAA; /* __101010101010101010101010101010 */ unsigned int b = 0x15555555; /* __010101010101010101010101010101 */ unsigned int c = a ^ b; /* ?? __111111111111111111111111111111 */ unsigned int d = c << 5; /* ?? __111111111111111111111111100000 */ unsigned int e = d >> 5; /* ?? __000001111111111111111111111111 */ printf( "a: %X\nb: %X\nc: %X\nd: %X\ne: %X\n", a, b, c, d, e ); return 0; } is it safe to XOR two integers with padding bits? wouldn't I XOR whatever the padding bits are? I can't find this behavior covered in C89. furthermore is the c var guaranteed to be 0x3FFFFFFF or if for example the two padding bits were both on in a or b would c be 0xFFFFFFFF ? same question with d and e. am i manipulating the padding bits by shifting? I would expect to see this below, assuming 32 bits with the 2 most significant bits used for padding, but I want to know if something like this is guaranteed: a: 2AAAAAAA b: 15555555 c: 3FFFFFFF d: 3FFFFFE0 e: 01FFFFFF Also are padding bits always the most significant bits or could they be the least significant bits? Thanks guys EDIT 12/19/2010 5PM EST: Christoph has answered my question. Thanks! I had also asked (above) whether padding bits are always the most significant bits. This is cited in the rationale for the C99 standard, and the answer is no. I am playing it safe and assuming the same for C89. Here is specifically what the C99 rationale says for §6.2.6.2 (Representation of Integer Types): Padding bits are user-accessible in an unsigned integer type. For example, suppose a machine uses a pair of 16-bit shorts (each with its own sign bit) to make up a 32-bit int and the sign bit of the lower short is ignored when used in this 32-bit int. Then, as a 32-bit signed int, there is a padding bit (in the middle of the 32 bits) that is ignored in determining the value of the 32-bit signed int. But, if this 32-bit item is treated as a 32-bit unsigned int, then that padding bit is visible to the user’s program. The C committee was told that there is a machine that works this way, and that is one reason that padding bits were added to C99. Footnotes 44 and 45 mention that parity bits might be padding bits. The committee does not know of any machines with user-accessible parity bits within an integer. Therefore, the committee is not aware of any machines that treat parity bits as padding bits. EDIT 12/28/2010 3PM EST: I found an interesting discussion on comp.lang.c from a few months ago. Bitwise Operator Effects on Padding Bits (VelocityReviews reader) Bitwise Operator Effects on Padding Bits (Google Groups alternate link) One point made by Dietmar which I found interesting: Let's note that padding bits are not necessary for the existence of trap representations; combinations of value bits which do not represent a value of the object type would also do.

    Read the article

  • Problem with From field in contact form and mail() function

    - by Matthew
    I've got a contact form with 3 fields and a textarea... I use jQuery to validate it and then php to send emails. This contact form works fine but, when I receive an email, From field isn't correct. I'd like to want that From field shows text typed in the Name field of the contact form. Now I get a From field like this: <[email protected]> For example, if an user types "Matthew" in the name field, I'd like to want that this word "Matthew" appears in the From field. This is my code (XHTML, jQuery, PHP): <div id="contact"> <h3 id="formHeader">Send Us a Message!</h3> <form id="contactForm" method="post" action=""> <div id="risposta"></div> <!-- End Risposta Div --> <span>Name:</span> <input type="text" id="formName" value="" /><br /> <span>E-mail:</span> <input type="text" id="formEmail" value="" /><br /> <span>Subject:</span> <input type="text" id="formSubject" value="" /><br /> <span>Message:</span> <textarea id="formMessage" rows="9" cols="20"></textarea><br /> <input type="submit" id="formSend" value="Send" /> </form> </div> <script type="text/javascript"> $(document).ready(function(){ $("#formSend").click(function(){ var valid = ''; var nome = $("#formName").val(); var mail = $("#formEmail").val(); var oggetto = $("#formSubject").val(); var messaggio = $("#formMessage").val(); if (nome.length<1) { valid += '<span>Name field empty.</span><br />'; } if (!mail.match(/^([a-z0-9._-]+@[a-z0-9._-]+\.[a-z]{2,4}$)/i)) { valid += '<span>Email not valid or empty field.</span><br />'; } if (oggetto.length<1) { valid += '<span>Subject field empty.</span><br />'; } if (valid!='') { $("#risposta").fadeIn("slow"); $("#risposta").html("<span><b>Error:</b></span><br />"+valid); $("#risposta").css("background-color","#ffc0c0"); } else { var datastr ='nome=' + nome + '&mail=' + mail + '&oggetto=' + oggetto + '&messaggio=' + encodeURIComponent(messaggio); $("#risposta").css("display", "block"); $("#risposta").css("background-color","#FFFFA0"); $("#risposta").html("<span>Sending message...</span>"); $("#risposta").fadeIn("slow"); setTimeout("send('"+datastr+"')",2000); } return false; }); }); function send(datastr){ $.ajax({ type: "POST", url: "contactForm.php", data: datastr, cache: false, success: function(html) { $("#risposta").fadeIn("slow"); $("#risposta").html('<span>Message successfully sent.</span>'); $("#risposta").css("background-color","#e1ffc0"); setTimeout('$("#risposta").fadeOut("slow")',2000); } }); } </script> <?php $mail = $_POST['mail']; $nome = $_POST['nome']; $oggetto = $_POST['oggetto']; $text = $_POST['messaggio']; $ip = $_SERVER['REMOTE_ADDR']; $to = "[email protected]"; $message = $text."<br /><br />IP: ".$ip."<br />"; $headers = "From: $nome \n"; $headers .= "Reply-To: $mail \n"; $headers .= "MIME-Version: 1.0 \n"; $headers .= "Content-Type: text/html; charset=UTF-8 \n"; mail($to, $oggetto, $message, $headers); ?>

    Read the article

  • setIncludesSubentities: in an NSFetchRequest is broken for entities across multiple persistent store

    - by SG
    Prior art which doesn't quite address this: http://stackoverflow.com/questions/1774359/core-data-migration-error-message-model-does-not-contain-configuration-xyz I have narrowed this down to a specific issue. It takes a minute to set up, though; please bear with me. The gist of the issue is that a persistentStoreCoordinator (apparently) cannot preserve the part of an object graph where a managedObject is marked as a subentity of another when they are stored in different files. Here goes... 1) I have 2 xcdatamodel files, each containing a single entity. In runtime, when the managed object model is constructed, I manually define one entity as subentity of another using setSubentities:. This is because defining subentities across multiple files in the editor is not supported yet. I then return the complete model with modelByMergingModels. //Works! [mainEntity setSubentities:canvasEntities]; NSLog(@"confirm %@ is super for %@", [[[canvasEntities lastObject] superentity] name], [[canvasEntities lastObject] name]); //Output: "confirm Note is super for Browser" 2) I have modified the persistentStoreCoordinator method so that it sets a different store for each entity. Technically, it uses configurations, and each entity has one and only one configuration defined. //Also works! for ( NSString *configName in [[HACanvasPluginManager shared].registeredCanvasTypes valueForKey:@"viewControllerClassName"] ) { storeUrl = [NSURL fileURLWithPath:[[self applicationDocumentsDirectory] stringByAppendingPathComponent:[configName stringByAppendingPathExtension:@"sqlite"]]]; //NSLog(@"entities for configuration '%@': %@", configName, [[[self managedObjectModel] entitiesForConfiguration:configName] valueForKey:@"name"]); //Output: "entities for configuration 'HATextCanvasController': (Note)" //Output: "entities for configuration 'HAWebCanvasController': (Browser)" if (![persistentStoreCoordinator addPersistentStoreWithType:NSSQLiteStoreType configuration:configName URL:storeUrl options:options error:&error]) //etc 3) I have a fetchRequest set for the parent entity, with setIncludesSubentities: and setAffectedStores: just to be sure we get both 1) and 2) covered. When inserting objects of either entity, they both are added to the context and they both are fetched by the fetchedResultsController and displayed in the tableView as expected. // Create the fetch request for the entity. NSFetchRequest *fetchRequest = [[NSFetchRequest alloc] init]; [fetchRequest setEntity:entity]; [fetchRequest setIncludesSubentities:YES]; //NECESSARY to fetch all canvas types [fetchRequest setSortDescriptors:sortDescriptors]; [fetchRequest setFetchBatchSize:20]; // Set the batch size to a suitable number. [fetchRequest setAffectedStores:[[managedObjectContext persistentStoreCoordinator] persistentStores]]; [fetchRequest setReturnsObjectsAsFaults:NO]; Here is where it starts misbehaving: after closing and relaunching the app, ONLY THE PARENT ENTITY is fetched. If I change the entity of the request using setEntity: to the entity for 'Note', all notes are fetched. If I change it to the entity for 'Browser', all the browsers are fetched. Let me reiterate that during the run in which an object is first inserted into the context, it will appear in the list. It is only after save and relaunch that a fetch request fails to traverse the hierarchy. Therefore, I can only conclude that it is the storage of the inheritance that is the problem. Let's recap why: - Both entities can be created, inserted into the context, and viewed, so the model is working - Both entities can be fetched with a single request, so the inheritance is working - I can confirm that the files are being stored separately and objects are going into their appropriate stores, so saving is working - Launching the app with either entity set for the request works, so retrieval from the store is working - This also means that traversing different stores with the request is working - By using a single store instead of multiple, the problem goes away completely, so creating, storing, fetching, viewing etc is working correctly. This leaves only one culprit (to my mind): the inheritance I'm setting with setSubentities: is effective only for objects creating during the session. Either objects/entities are being stored stripped of the inheritance info, or entity inheritance as defined programmatically only applies to new instances, or both. Either of these is unacceptable. Either it's a bug or I am way, way off course. I have been at this every which way for two days; any insight is greatly appreciated. The current workaround - just using a single store - works completely, except it won't be future-proof in the event that I remove one of the models from the app etc. It also boggles the mind because I can't see why you would have all this infrastructure for storing across multiple stores and for setting affected stores in fetch requests if it by core definition (of setSubentities:) doesn't work.

    Read the article

  • Threading extra state through a parser in Scala

    - by Travis Brown
    I'll give you the tl;dr up front I'm trying to use the state monad transformer in Scalaz 7 to thread extra state through a parser, and I'm having trouble doing anything useful without writing a lot of t m a -> t m b versions of m a -> m b methods. An example parsing problem Suppose I have a string containing nested parentheses with digits inside them: val input = "((617)((0)(32)))" I also have a stream of fresh variable names (characters, in this case): val names = Stream('a' to 'z': _*) I want to pull a name off the top of the stream and assign it to each parenthetical expression as I parse it, and then map that name to a string representing the contents of the parentheses, with the nested parenthetical expressions (if any) replaced by their names. To make this more concrete, here's what I'd want the output to look like for the example input above: val target = Map( 'a' -> "617", 'b' -> "0", 'c' -> "32", 'd' -> "bc", 'e' -> "ad" ) There may be either a string of digits or arbitrarily many sub-expressions at a given level, but these two kinds of content won't be mixed in a single parenthetical expression. To keep things simple, we'll assume that the stream of names will never contain either duplicates or digits, and that it will always contain enough names for our input. Using parser combinators with a bit of mutable state The example above is a slightly simplified version of the parsing problem in this Stack Overflow question. I answered that question with a solution that looked roughly like this: import scala.util.parsing.combinator._ class ParenParser(names: Iterator[Char]) extends RegexParsers { def paren: Parser[List[(Char, String)]] = "(" ~> contents <~ ")" ^^ { case (s, m) => (names.next -> s) :: m } def contents: Parser[(String, List[(Char, String)])] = "\\d+".r ^^ (_ -> Nil) | rep1(paren) ^^ ( ps => ps.map(_.head._1).mkString -> ps.flatten ) def parse(s: String) = parseAll(paren, s).map(_.toMap) } It's not too bad, but I'd prefer to avoid the mutable state. What I want Haskell's Parsec library makes adding user state to a parser trivially easy: import Control.Applicative ((*>), (<$>), (<*)) import Data.Map (fromList) import Text.Parsec paren = do (s, m) <- char '(' *> contents <* char ')' h : t <- getState putState t return $ (h, s) : m where contents = flip (,) [] <$> many1 digit <|> (\ps -> (map (fst . head) ps, concat ps)) <$> many1 paren main = print $ runParser (fromList <$> paren) ['a'..'z'] "example" "((617)((0)(32)))" This is a fairly straightforward translation of my Scala parser above, but without mutable state. What I've tried I'm trying to get as close to the Parsec solution as I can using Scalaz's state monad transformer, so instead of Parser[A] I'm working with StateT[Parser, Stream[Char], A]. I have a "solution" that allows me to write the following: import scala.util.parsing.combinator._ import scalaz._, Scalaz._ object ParenParser extends ExtraStateParsers[Stream[Char]] with RegexParsers { protected implicit def monadInstance = parserMonad(this) def paren: ESP[List[(Char, String)]] = (lift("(" ) ~> contents <~ lift(")")).flatMap { case (s, m) => get.flatMap( names => put(names.tail).map(_ => (names.head -> s) :: m) ) } def contents: ESP[(String, List[(Char, String)])] = lift("\\d+".r ^^ (_ -> Nil)) | rep1(paren).map( ps => ps.map(_.head._1).mkString -> ps.flatten ) def parse(s: String, names: Stream[Char]) = parseAll(paren.eval(names), s).map(_.toMap) } This works, and it's not that much less concise than either the mutable state version or the Parsec version. But my ExtraStateParsers is ugly as sin—I don't want to try your patience more than I already have, so I won't include it here (although here's a link, if you really want it). I've had to write new versions of every Parser and Parsers method I use above for my ExtraStateParsers and ESP types (rep1, ~>, <~, and |, in case you're counting). If I had needed to use other combinators, I'd have had to write new state transformer-level versions of them as well. Is there a cleaner way to do this? I'd love to see an example of a Scalaz 7's state monad transformer being used to thread state through a parser, but Scala 6 or Haskell examples would also be useful.

    Read the article

  • Why does this XML validation via XSD fail in libxml2 (but succeed in xmllint) and how do I fix it?

    - by mtree
    If I run this XML validation via xmllint: xmllint --noout --schema schema.xsd test.xml I get this success message: .../test.xml validates However if I run the same validation via libxml2's C API: int result = xmlSchemaValidateDoc(...) I get a return value of 1845 and this failure message: Element '{http://example.com/XMLSchema/1.0}foo': No matching global declaration available for the validation root. Which I can make absolutely no sense of. :( schema.xsd: <?xml version="1.0" encoding="utf-8" ?> <!DOCTYPE xs:schema PUBLIC "-//W3C//DTD XMLSCHEMA 200102//EN" "XMLSchema.dtd" > <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://example.com/XMLSchema/1.0" targetNamespace="http://example.com/XMLSchema/1.0" elementFormDefault="qualified" attributeFormDefault="unqualified"> <xs:element name="foo"> </xs:element> </xs:schema> test.xml: <?xml version="1.0" encoding="UTF-8"?> <foo xmlns="http://example.com/XMLSchema/1.0"> </foo> main.c: #include <stdio.h> #include <sys/stat.h> #include <sys/types.h> #include <string.h> #include <libxml/parser.h> #include <libxml/valid.h> #include <libxml/xmlschemas.h> u_int32_t get_file_size(const char *file_name) { struct stat buf; if ( stat(file_name, &buf) != 0 ) return(0); return (unsigned int)buf.st_size; } void handleValidationError(void *ctx, const char *format, ...) { char *errMsg; va_list args; va_start(args, format); vasprintf(&errMsg, format, args); va_end(args); fprintf(stderr, "Validation Error: %s", errMsg); free(errMsg); } int main (int argc, const char * argv[]) { const char *xsdPath = argv[1]; const char *xmlPath = argv[2]; printf("\n"); printf("XSD File: %s\n", xsdPath); printf("XML File: %s\n", xmlPath); int xmlLength = get_file_size(xmlPath); char *xmlSource = (char *)malloc(sizeof(char) * xmlLength); FILE *p = fopen(xmlPath, "r"); char c; unsigned int i = 0; while ((c = fgetc(p)) != EOF) { xmlSource[i++] = c; } printf("\n"); printf("XML Source:\n\n%s\n", xmlSource); fclose(p); printf("\n"); int result = 42; xmlSchemaParserCtxtPtr parserCtxt = NULL; xmlSchemaPtr schema = NULL; xmlSchemaValidCtxtPtr validCtxt = NULL; xmlDocPtr xmlDocumentPointer = xmlParseMemory(xmlSource, xmlLength); parserCtxt = xmlSchemaNewParserCtxt(xsdPath); if (parserCtxt == NULL) { fprintf(stderr, "Could not create XSD schema parsing context.\n"); goto leave; } schema = xmlSchemaParse(parserCtxt); if (schema == NULL) { fprintf(stderr, "Could not parse XSD schema.\n"); goto leave; } validCtxt = xmlSchemaNewValidCtxt(schema); if (!validCtxt) { fprintf(stderr, "Could not create XSD schema validation context.\n"); goto leave; } xmlSetStructuredErrorFunc(NULL, NULL); xmlSetGenericErrorFunc(NULL, handleValidationError); xmlThrDefSetStructuredErrorFunc(NULL, NULL); xmlThrDefSetGenericErrorFunc(NULL, handleValidationError); result = xmlSchemaValidateDoc(validCtxt, xmlDocumentPointer); leave: if (parserCtxt) { xmlSchemaFreeParserCtxt(parserCtxt); } if (schema) { xmlSchemaFree(schema); } if (validCtxt) { xmlSchemaFreeValidCtxt(validCtxt); } printf("\n"); printf("Validation successful: %s (result: %d)\n", (result == 0) ? "YES" : "NO", result); return 0; } console output: XSD File: /Users/dephiniteloop/Desktop/xml_validate/schema.xsd XML File: /Users/dephiniteloop/Desktop/xml_validate/test.gkml XML Source: <?xml version="1.0" encoding="UTF-8"?> <foo xmlns="http://example.com/XMLSchema/1.0"> </foo> Validation Error: Element '{http://example.com/XMLSchema/1.0}foo': No matching global declaration available for the validation root. Validation successful: NO (result: 1845) In case it matters: I'm on OSX 10.6.7 with its default libxml2.dylib (/Developer/SDKs/MacOSX10.6.sdk/usr/lib/libxml2.2.7.3.dylib)

    Read the article

  • Mutating the expression tree of a predicate to target another type

    - by Jon
    Intro In the application I 'm currently working on, there are two kinds of each business object: the "ActiveRecord" type, and the "DataContract" type. So for example, we have: namespace ActiveRecord { class Widget { public int Id { get; set; } } } namespace DataContracts { class Widget { public int Id { get; set; } } } The database access layer takes care of "translating" between hierarchies: you can tell it to update a DataContracts.Widget, and it will magically create an ActiveRecord.Widget with the same property values and save that. The problem I have surfaced when attempting to refactor this database access layer. The Problem I want to add methods like the following to the database access layer: // Widget is DataContract.Widget interface DbAccessLayer { IEnumerable<Widget> GetMany(Expression<Func<Widget, bool>> predicate); } The above is a simple general-use "get" method with custom predicate. The only point of interest is that I 'm not passing in an anonymous function but rather an expression tree. This is done because inside DbAccessLayer we have to query ActiveRecord.Widget efficiently (LINQ to SQL) and not have the database return all ActiveRecord.Widget instances and then filter the enumerable collection. We need to pass in an expression tree, so we ask for one as the parameter for GetMany. The snag: the parameter we have needs to be magically transformed from an Expression<Func<DataContract.Widget, bool>> to an Expression<Func<ActiveRecord.Widget, bool>>. This is where I haven't managed to pull it off... Attempted Solution What we 'd like to do inside GetMany is: IEnumerable<DataContract.Widget> GetMany( Expression<Func<DataContract.Widget, bool>> predicate) { var lambda = Expression.Lambda<Func<ActiveRecord.Widget, bool>>( predicate.Body, predicate.Parameters); // use lambda to query ActiveRecord.Widget and return some value } This won't work because in a typical scenario, for example if: predicate == w => w.Id == 0; ...the expression tree contains a MemberAccessExpression instance which has a MemberInfo property (named Member) that point to members of DataContract.Widget. There are also ParameterExpression instances both in the expression tree and in its parameter expression collection (predicate.Parameters); After searching a bit, I found System.Linq.Expressions.ExpressionVisitor (its source can be found here in the context of a how-to, very helpful) which is a convenient way to modify an expression tree. Armed with this, I implemented a visitor. This simple visitor only takes care of changing the types in member access and parameter expressions. It may not be complete, but it's fine for the expression w => w.Id == 0. internal class Visitor : ExpressionVisitor { private readonly Func<Type, Type> dataContractToActiveRecordTypeConverter; public Visitor(Func<Type, Type> dataContractToActiveRecordTypeConverter) { this.dataContractToActiveRecordTypeConverter = dataContractToActiveRecordTypeConverter; } protected override Expression VisitMember(MemberExpression node) { var dataContractType = node.Member.ReflectedType; var activeRecordType = this.dataContractToActiveRecordTypeConverter(dataContractType); var converted = Expression.MakeMemberAccess( base.Visit(node.Expression), activeRecordType.GetProperty(node.Member.Name)); return converted; } protected override Expression VisitParameter(ParameterExpression node) { var dataContractType = node.Type; var activeRecordType = this.dataContractToActiveRecordTypeConverter(dataContractType); return Expression.Parameter(activeRecordType, node.Name); } } With this visitor, GetMany becomes: IEnumerable<DataContract.Widget> GetMany( Expression<Func<DataContract.Widget, bool>> predicate) { var visitor = new Visitor(...); var lambda = Expression.Lambda<Func<ActiveRecord.Widget, bool>>( visitor.Visit(predicate.Body), predicate.Parameters.Select(p => visitor.Visit(p)); var widgets = ActiveRecord.Widget.Repository().Where(lambda); // This is just for reference, see below Expression<Func<ActiveRecord.Widget, bool>> referenceLambda = w => w.Id == 0; // Here we 'd convert the widgets to instances of DataContract.Widget and // return them -- this has nothing to do with the question though. } Results The good news is that lambda is constructed just fine. The bad news is that it isn't working; it's blowing up on me when I try to use it (the exception messages are really not helpful at all). I have examined the lambda my code produces and a hardcoded lambda with the same expression; they look exactly the same. I spent hours in the debugger trying to find some difference, but I can't. When predicate is w => w.Id == 0, lambda looks exactly like referenceLambda. But the latter works with e.g. IQueryable<T>.Where, while the former does not (I have tried this in the immediate window of the debugger). I should also mention that when predicate is w => true, it all works just fine. Therefore I am assuming that I 'm not doing enough work in Visitor, but I can't find any more leads to follow on. Can someone point me in the right direction? Thanks in advance for your help!

    Read the article

  • Is there an equivalent to Java's ClassFileTransformer in .NET? (a way to replace a class)

    - by Alix
    I've been searching for this for quite a while with no luck so far. Is there an equivalent to Java's ClassFileTransformer in .NET? Basically, I want to create a class CustomClassFileTransformer (which in Java would implement the interface ClassFileTransformer) that gets called whenever a class is loaded, and is allowed to tweak it and replace it with the tweaked version. I know there are frameworks that do similar things, but I was looking for something more straightforward, like implementing my own ClassFileTransformer. Is it possible? EDIT #1. More details about why I need this: Basically, I have a C# application and I need to monitor the instructions it wants to run in order to detect read or write operations to fields (operations Ldfld and Stfld) and insert some instructions before the read/write takes place. I know how to do this (except for the part where I need to be invoked to replace the class): for every method whose code I want to monitor, I must: Get the method's MethodBody using MethodBase.GetMethodBody() Transform it to byte array with MethodBody.GetILAsByteArray(). The byte[] it returns contains the bytecode. Analyse the bytecode as explained here, possibly inserting new instructions or deleting/modifying existing ones by changing the contents of the array. Create a new method and use the new bytecode to create its body, with MethodBuilder.CreateMethodBody(byte[] il, int count), where il is the array with the bytecode. I put all these tweaked methods in a new class and use the new class to replace the one that was originally going to be loaded. An alternative to replacing classes would be somehow getting notified whenever a method is invoked. Then I'd replace the call to that method with a call to my own tweaked method, which I would tweak only the first time is invoked and then I'd put it in a dictionary for future uses, to reduce overhead (for future calls I'll just look up the method and invoke it; I won't need to analyse the bytecode again). I'm currently investigating ways to do this and LinFu looks pretty interesting, but if there was something like a ClassFileTransformer it would be much simpler: I just rewrite the class, replace it, and let the code run without monitoring anything. An additional note: the classes may be sealed. I want to be able to replace any kind of class, I cannot impose restrictions on their attributes. EDIT #2. Why I need to do this at runtime. I need to monitor everything that is going on so that I can detect every access to data. This applies to the code of library classes as well. However, I cannot know in advance which classes are going to be used, and even if I knew every possible class that may get loaded it would be a huge performance hit to tweak all of them instead of waiting to see whether they actually get invoked or not. POSSIBLE (BUT PRETTY HARDCORE) SOLUTION. In case anyone is interested (and I see the question has been faved, so I guess someone is), this is what I'm looking at right now. Basically I'd have to implement the profiling API and I'll register for the events that I'm interested in, in my case whenever a JIT compilation starts. An extract of the blogpost: In your ICorProfilerCallback2::ModuleLoadFinished callback, you call ICorProfilerInfo2::GetModuleMetadata to get a pointer to a metadata interface on that module. QI for the metadata interface you want. Search MSDN for "IMetaDataImport", and grope through the table of contents to find topics on the metadata interfaces. Once you're in metadata-land, you have access to all the types in the module, including their fields and function prototypes. You may need to parse metadata signatures and this signature parser may be of use to you. In your ICorProfilerCallback2::JITCompilationStarted callback, you may use ICorProfilerInfo2::GetILFunctionBody to inspect the original IL, and ICorProfilerInfo2::GetILFunctionBodyAllocator and then ICorProfilerInfo2::SetILFunctionBody to replace that IL with your own. The great news: I get notified when a JIT compilation starts and I can replace the bytecode right there, without having to worry about replacing the class, etc. The not-so-great news: you cannot invoke managed code from the API's callback methods, which makes sense but means I'm on my own parsing the IL code, etc, as opposed to be able to use Cecil, which would've been a breeze. I don't think there's a simpler way to do this without using AOP frameworks (such as PostSharp). If anyone has any other idea please let me know. I'm not marking the question as answered yet.

    Read the article

  • Intellij Idea 13.x and ASM 5.x library incompatible?

    - by Jarrod Roberson
    I can't get Intellij Idea 13.0 to compile my code against ASM 5.0.3 I have a multi-module Maven project. It compiles and installs successfully. Apparently com.google.findbugs:findbugs has a dependency on asm:asm:3.3 and I want to use org.ow2.asm:asm:5.0.3 to manipulate some bytecode. So in the parent pom.xml I exclude the asm:asm:3.3 dependencies from the classpath. This works fine when I run mvn install from the command line. I can't get the Build - Make Project menu selection to work in Intellij Idea. Here is the relevant parts of my pom.xml files. parent.pom <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>5.0.3</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm-tree</artifactId> <version>5.0.3</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm-util</artifactId> <version>5.0.3</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm-commons</artifactId> <version>5.0.3</version> </dependency> <dependency> <groupId>com.google.code.findbugs</groupId> <artifactId>findbugs</artifactId> <version>2.0.3</version> <exclusions> <exclusion> <groupId>asm</groupId> <artifactId>asm</artifactId> </exclusion> <exclusion> <groupId>asm</groupId> <artifactId>asm-commons</artifactId> </exclusion> <exclusion> <groupId>asm</groupId> <artifactId>asm-tree</artifactId> </exclusion> </exclusions> </dependency> Here is the code that is failing 18 public static void main(final String[] args) throws IOException 19 { 20 final InputStream is = NotEmptyTest.class.getResourceAsStream("/com/vertigrated/annotation/NotEmptyTest.class"); 21 final ClassReader cr = new ClassReader(is); 22 final ClassNode cn = new ClassNode(); 23 cr.accept(cn, 0); 24 for (final MethodNode mn : cn.methods) 25 { 26 - 38 snipped for brevity 39 } 40 } 41 } Here is the error message: Information:Using javac 1.7.0_25 to compile java sources Information:java: Errors occurred while compiling module 'tests' Information:Compilation completed with 1 error and 2 warnings in 2 sec Information:1 error Information:2 warnings /<path to my source code>/NotEmptyTest.java Error:Error:line (24)java: incompatible types required: org.objectweb.asm.tree.MethodNode found: java.lang.Object Warning:Warning:java: /<path to my project>//NotEmptyTest.java uses unchecked or unsafe operations. Warning:Warning:java: Recompile with -Xlint:unchecked for details. As you can see in the screen capture, it reports the correct version of the libraries in the Javadoc but the AutoComplete shows the old 3.3 non-typesafe return value of List instead of List<MethodNode>: Here is what Maven knows, which is correct: [INFO] --- maven-dependency-plugin:2.8:list (default-cli) @ tests --- [INFO] [INFO] The following files have been resolved: [INFO] com.google.code.findbugs:bcel:jar:2.0.1:compile [INFO] junit:junit:jar:4.11:test [INFO] xml-apis:xml-apis:jar:1.0.b2:compile [INFO] com.apple:AppleJavaExtensions:jar:1.4:compile [INFO] javax.inject:javax.inject:jar:1:compile [INFO] jaxen:jaxen:jar:1.1.6:compile [INFO] org.ow2.asm:asm-util:jar:5.0.3:compile [INFO] com.google.inject:guice:jar:3.0:compile [INFO] dom4j:dom4j:jar:1.6.1:compile [INFO] com.google.code.findbugs:jFormatString:jar:2.0.1:compile [INFO] net.jcip:jcip-annotations:jar:1.0:compile [INFO] org.ow2.asm:asm-tree:jar:5.0.3:compile [INFO] commons-lang:commons-lang:jar:2.6:compile [INFO] com.google.code.findbugs:jsr305:jar:2.0.1:compile [INFO] org.hamcrest:hamcrest-core:jar:1.3:test [INFO] aopalliance:aopalliance:jar:1.0:compile [INFO] com.google.code.findbugs:findbugs:jar:2.0.3:compile [INFO] org.ow2.asm:asm-commons:jar:5.0.3:compile [INFO] org.ow2.asm:asm:jar:5.0.3:compile How do I get Intellij Idea to use the correct dependency internally?

    Read the article

  • Implementation question involving implementing an interface

    - by Vivin Paliath
    I'm writing a set of collection classes for different types of Trees. I'm doing this as a learning exercise and I'm also hoping it turns out to be something useful. I really want to do this the right way and so I've been reading Effective Java and I've also been looking at the way Joshua Bloch implemented the collection classes by looking at the source. I seem to have a fair idea of what is being done, but I still have a few things to sort out. I have a Node<T> interface and an AbstractNode<T> class that implements the Node interface. I then created a GenericNode<T> (a node that can have 0 to n children, and that is part of an n-ary tree) class that extends AbstractNode<T> and implements Node<T>. This part was easy. Next, I created a Tree<T> interface and an AbstractTree<T> class that implements the Tree<T> interface. After that, I started writing a GenericTree<T> class that extends AbstractTree<T> and implements Tree<T>. This is where I started having problems. As far as the design is concerned, a GenericTree<T> can only consist of nodes of type GenericTreeNode<T>. This includes the root. In my Tree<T> interface I have: public interface Tree<T> { void setRoot(Node<T> root); Node<T> getRoot(); List<Node<T>> postOrder(); ... rest omitted ... } And, AbstractTree<T> implements this interface: public abstract class AbstractTree<T> implements Tree<T> { protected Node<T> root; protected AbstractTree() { } protected AbstractTree(Node<T> root) { this.root = root; } public void setRoot(Node<T> root) { this.root = root; } public Node<T> getRoot() { return this.root; } ... rest omitted ... } In GenericTree<T>, I can have: public GenericTree(Node<T> root) { super(root); } But what this means is that you can create a generic tree using any subtype of Node<T>. You can also set the root of a tree to any subtype of Node<T>. I want to be able to restrict the type of the node to the type of the tree that it can represent. To fix this, I can do this: public GenericTree(GenericNode<T> root) { super(root); } However, setRoot still accepts a parameter of type Node<T>. Which means a user can still create a tree with the wrong type of root node. How do I enforce this constraint? The only way I can think of doing is either: Do an instanceof which limits the check to runtime. I'm not a huge fan of this. Remove setRoot from the interface and have the base class implement this method. This means that it is not part of the contract and anyone who wants to make a new type of tree needs to remember to implement this method. Is there a better way? The second question I have concerns the return type of postOrder which is List<Node<T>>. This means that if a user is operating on a GenericTree<T> object and calls postOrder, he or she receives a list that consists of Node<T> objects. This means when iterating through (using a foreach construct) they would have perform an explicit cast to GenericNode<T> if they want to use methods that are only defined in that class. I don't like having to place this burden on the user. What are my options in this case? I can only think of removing the method from the interface and have the subclass implement this method making sure that it returns a list of appropriate subtype of Node<T>. However, this once again removes it from the contract and it's anyone who wants to create a new type of tree has to remember to implement this method. Is there a better way?

    Read the article

  • Reverse engineering windows mobile live search CellID location awareness protocol (yikes)...

    - by Jean-Charles
    I wasn't sure of how to form the question so I apologize if the title is misleading. Additionally, you may want to get some coffee and take a seat for this one ... It's long. Basically, I'm trying to reverse engineer the protocol used by the Windows Mobile Live Search application to get location based on cellID. Before I go on, I am aware of other open source services (such as OpenCellID) but this is more for the sake of education and a bit for redundancy. According to the packets I captured, a POST request is made to ... mobile.search.live.com/positionlookupservice_1/service.aspx ... with a few specific headers (agent, content-length, etc) and no body. Once this goes through, the server sends back a 100-Continue response. At this point, the application submits this data (I chopped off the packet header): 00 00 00 01 00 00 00 05 55 54 ........UT 46 2d 38 05 65 6e 2d 55 53 05 65 6e 2d 55 53 01 F-8.en-US.en-US. 06 44 65 76 69 63 65 05 64 75 6d 6d 79 01 06 02 .Device.dummy... 50 4c 08 0e 52 65 76 65 72 73 65 47 65 6f 63 6f PL..ReverseGeoco 64 65 01 07 0b 47 50 53 43 68 69 70 49 6e 66 6f de...GPSChipInfo 01 20 06 09 43 65 6c 6c 54 6f 77 65 72 06 03 43 . ..CellTower..C 47 49 08 03 4d 43 43 b6 02 07 03 4d 4e 43 03 34 GI..MCC....MNC.4 31 30 08 03 4c 41 43 cf 36 08 02 43 49 fd 01 00 10..LAC.6..CI... 00 00 00 ... And receives this in response (packet and HTTP response headers chopped): 00 00 00 01 00 00 00 00 01 06 02 50 4c ...........PL 06 08 4c 6f 63 61 6c 69 74 79 06 08 4c 6f 63 61 ..Locality..Loca 74 69 6f 6e 07 03 4c 61 74 09 34 32 2e 33 37 35 tion..Lat.42.375 36 32 31 07 04 4c 6f 6e 67 0a 2d 37 31 2e 31 35 621..Long.-71.15 38 39 33 38 00 07 06 52 61 64 69 75 73 09 32 30 8938...Radius.20 30 30 2e 30 30 30 30 00 42 07 0c 4c 6f 63 61 6c 00.0000.B..Local 69 74 79 4e 61 6d 65 09 57 61 74 65 72 74 6f 77 ityName.Watertow 6e 07 16 41 64 6d 69 6e 69 73 74 72 61 74 69 76 n..Administrativ 65 41 72 65 61 4e 61 6d 65 0d 4d 61 73 73 61 63 eAreaName.Massac 68 75 73 65 74 74 73 07 10 50 6f 73 74 61 6c 43 husetts..PostalC 6f 64 65 4e 75 6d 62 65 72 05 30 32 34 37 32 07 odeNumber.02472. 0b 43 6f 75 6e 74 72 79 4e 61 6d 65 0d 55 6e 69 .CountryName.Uni 74 65 64 20 53 74 61 74 65 73 00 00 00 ted States... Now, here is what I've determined so far: All strings are prepended with one byte that is the decimal equivalent of their length. There seem to be three different casts that are used throughout the request and response. They show up as one byte before the length byte. I've concluded that the three types map out as follows: 0x06 - parent element (subsequent values are children, closed with 0x00) 0x07 - string 0x08 - int? Based on these determinations, here is what the request and response look like in a more readable manner (values surrounded by brackets denote length and values surrounded by parenthesis denote a cast): \0x00\0x00\0x00\0x01\0x00\0x00\0x00 [5]UTF-8 [5]en-US [5]en-US \0x01 [6]Device [5]dummy \0x01 (6)[2]PL (8)[14]ReverseGeocode\0x01 (7)[11]GPSChipInfo[1]\0x20 (6)[9]CellTower (6)[3]CGI (8)[3]MCC\0xB6\0x02 //310 (7)[3]MNC[3]410 //410 (8)[3]LAC\0xCF\0x36 //6991 (8)[2]CI\0xFD\0x01 //259 \0x00 \0x00 \0x00 \0x00 and.. \0x00\0x00\0x00\0x01\0x00\0x00\0x00 \0x00\0x01 (6)[2]PL (6)[8]Locality (6)[8]Location (7)[3]Lat[9]42.375621 (7)[4]Long[10]-71.158938 \0x00 (7)[6]Radius[9]2000.0000 \0x00 \0x42 //"B" ... Has to do with GSM (7)[12]LocalityName[9]Watertown (7)[22]AdministrativeAreaName[13]Massachusetts (7)[16]PostalCodeNumber[5]02472 (7)[11]CountryName[13]United States \0x00 \0x00\0x00 My analysis seems to work out pretty well except for a few things: The 0x01s throughout confuse me ... At first I thought they were some sort of base level element terminators but I'm not certain. I'm not sure the 7-byte header is, in fact, a seven byte header. I wonder if it's maybe 4 bytes and that the three remaining 0x00s are of some other significance. The trailing 0x00s. Why is it that there is only one on the request but two on the response? The type 8 cast mentioned above ... I can't seem to figure out how those values are being encoded. I added comments to those lines with what the values should correspond to. Any advice on these four points will be greatly appreciated. And yes, these packets were captured in Watertown, MA. :)

    Read the article

  • linux thread synchronization

    - by johnnycrash
    I am new to linux and linux threads. I have spent some time googling to try to understand the differences between all the functions available for thread synchronization. I still have some questions. I have found all of these different types of synchronizations, each with a number of functions for locking, unlocking, testing the lock, etc. gcc atomic operations futexes mutexes spinlocks seqlocks rculocks conditions semaphores My current (but probably flawed) understanding is this: semaphores are process wide, involve the filesystem (virtually I assume), and are probably the slowest. Futexes might be the base locking mechanism used by mutexes, spinlocks, seqlocks, and rculocks. Futexes might be faster than the locking mechanisms that are based on them. Spinlocks dont block and thus avoid context swtiches. However they avoid the context switch at the expense of consuming all the cycles on a CPU until the lock is released (spinning). They should only should be used on multi processor systems for obvious reasons. Never sleep in a spinlock. The seq lock just tells you when you finished your work if a writer changed the data the work was based on. You have to go back and repeat the work in this case. Atomic operations are the fastest synch call, and probably are used in all the above locking mechanisms. You do not want to use atomic operations on all the fields in your shared data. You want to use a lock (mutex, futex, spin, seq, rcu) or a single atomic opertation on a lock flag when you are accessing multiple data fields. My questions go like this: Am I right so far with my assumptions? Does anyone know the cpu cycle cost of the various options? I am adding parallelism to the app so we can get better wall time response at the expense of running fewer app instances per box. Performances is the utmost consideration. I don't want to consume cpu with context switching, spinning, or lots of extra cpu cycles to read and write shared memory. I am absolutely concerned with number of cpu cycles consumed. Which (if any) of the locks prevent interruption of a thread by the scheduler or interrupt...or am I just an idiot and all synchonization mechanisms do this. What kinds of interruption are prevented? Can I block all threads or threads just on the locking thread's CPU? This question stems from my fear of interrupting a thread holding a lock for a very commonly used function. I expect that the scheduler might schedule any number of other workers who will likely run into this function and then block because it was locked. A lot of context switching would be wasted until the thread with the lock gets rescheduled and finishes. I can re-write this function to minimize lock time, but still it is so commonly called I would like to use a lock that prevents interruption...across all processors. I am writing user code...so I get software interrupts, not hardware ones...right? I should stay away from any functions (spin/seq locks) that have the word "irq" in them. Which locks are for writing kernel or driver code and which are meant for user mode? Does anyone think using an atomic operation to have multiple threads move through a linked list is nuts? I am thinking to atomicly change the current item pointer to the next item in the list. If the attempt works, then the thread can safely use the data the current item pointed to before it was moved. Other threads would now be moved along the list. futexes? Any reason to use them instead of mutexes? Is there a better way than using a condition to sleep a thread when there is no work? When using gcc atomic ops, specifically the test_and_set, can I get a performance increase by doing a non atomic test first and then using test_and_set to confirm? *I know this will be case specific, so here is the case. There is a large collection of work items, say thousands. Each work item has a flag that is initialized to 0. When a thread has exclusive access to the work item, the flag will be one. There will be lots of worker threads. Any time a thread is looking for work, they can non atomicly test for 1. If they read a 1, we know for certain that the work is unavailable. If they read a zero, they need to perform the atomic test_and_set to confirm. So if the atomic test_and_set is 500 cpu cycles because it is disabling pipelining, causes cpu's to communicate and L2 caches to flush/fill .... and a simple test is 1 cycle .... then as long as I had a better ratio of 500 to 1 when it came to stumbling upon already completed work items....this would be a win.* I hope to use mutexes or spinlocks to sparilngly protect sections of code that I want only one thread on the SYSTEM (not jsut the CPU) to access at a time. I hope to sparingly use gcc atomic ops to select work and minimize use of mutexes and spinlocks. For instance: a flag in a work item can be checked to see if a thread has worked it (0=no, 1=yes or in progress). A simple test_and_set tells the thread if it has work or needs to move on. I hope to use conditions to wake up threads when there is work. Thanks!

    Read the article

  • How to create a SOAP REQUEST using ASP.NET (VB) without using Visual

    - by user311691
    Hi all , I urgently need your help . I am new to consuming a web service using SOAP protocol. I have been given a demo webservice URL which ends in .WSDL and NOT .asml?WSDL. The problem is I cannot add a web reference using Visual studio OR Disco.exe or Wsdl.exe - This webservice has been created on a java platform and for security reasons the only way to make a invoke the webservice is at runtime using SOAP protocol IN asp.net (VB). I I have created some code but cannot seem to send the soap object to the receiving web service. If I could get a solution with step by step instructions on how I can send a SOAP REQUEST. Below is my code and all am trying to do is send a SOAP REQUEST and receive a SOAP RESPONSE which I will display in my browser. <%@ page language="vb" %> <%@ Import Namespace="System.Data"%> <%@ Import Namespace="System.Xml"%> <%@ Import Namespace="System.Net"%> <%@ Import Namespace="System.IO"%> <%@ Import Namespace="System.Text"%> <script runat=server> Private Sub Page_Load() Dim objHTTPReq As HttpWebRequest Dim WebserviceUrl As String = "http://xx.xx.xx:8084/asy/wsdl/asy.wsdl" objHTTPReq = CType(WebRequest.Create(WebserviceUrl), HttpWebRequest) Dim soapXML As String soapXML = "<?xml version='1.0' encoding='utf-8'?>" & _ " <soap:Envelope xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'" & _ " xmlns:xsd='http://www.w3.org/2001/XMLSchema'"& _ " xmlns:soap='http://schemas.xmlsoap.org/soap/envelope/' >"& _ " <soap:Body> "& _ " <validatePaymentData xmlns='http://asybanks.webservices.asycuda.org'> " & _ " <bankCode>"& bankCode &"</bankCode> " & _ " <PaymentDataType>" & _ " <paymentType>"& payment_type &"</paymentType> " & _ " <amount>"& ass_amount &"</amount> " & _ " <ReferenceType>" & _ " <year>"& year &"</year> " & _ " <customsOfficeCode>"& station &"</customsOfficeCode> " & _ " </ReferenceType>" & _ " <accountNumber>"& zra_account &"</accountNumber> " & _ " </PaymentDataType> " & _ " </validatePaymentData> " & _ " </soap:Body> " & _ " </soap:Envelope> " objHTTPReq.Headers.Add("SOAPAction", "http://asybanks.webservices.asycuda.org") objHTTPReq.ContentType = "text/xml; charset=utf-8" objHTTPReq.ContentLength = soapXML.Length objHTTPReq.Accept = "text/xml" objHTTPReq.Method = "POST" Dim objHTTPRes As HttpWebResponse = CType(objHTTPReq.GetResponse(), HttpWebResponse) Dim dataStream As Stream = objHTTPRes.GetResponseStream() Dim reader As StreamReader = new StreamReader(dataStream) Dim responseFromServer As String = reader.ReadToEnd() OurXml.text = responseFromServer End Sub </script> <html xmlns="http://www.w3.org/1999/xhtml"> <head runat="server"> <title> XML TRANSACTION SIMULATION - N@W@ TJ </title> </head> <body> <form id="form1" runat="server"> <div> <p>ZRA test Feedback:</p> <asp:label id="OurXml" runat="server"/> </div> </form> </body> </html> the demo webservice looks like this: <?xml version="1.0" encoding="UTF-8" ?> - <!-- WEB SERVICE JAVA DEMO --> - <definitions targetNamespace="http://asybanks.webservices.asycuda.org" xmlns="http://schemas.xmlsoap.org/wsdl/" xmlns:apachesoap="http://xml.apache.org/xml-soap" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:y="http://asybanks.webservices.asycuda.org"> - <types> - <xs:schema elementFormDefault="qualified" targetNamespace="http://asybanks.webservices.asycuda.org" xmlns="http://www.w3.org/2001/XMLSchema"> SOME OTHER INFORMATION AT THE BOTTOM <soap:address location="http://xx.xx.xx:8084/asy/services/asy" /> </port> </service> </definitions> From the above excerpt of the wsdl url webservice, I am not sure which namespace to use for soapACTION - please advise.... Please if you could comment every stage of a soap request and provide a working demo - I would be most grateful as I would be learning rather than just assuming stuff :)

    Read the article

  • C# Reading and Writing a Char[] to and from a Byte[] - Updated with Solution

    - by Simon G
    Hi, I have a byte array of around 10,000 bytes which is basically a blob from delphi that contains char, string, double and arrays of various types. This need to be read in and updated via C#. I've created a very basic reader that gets the byte array from the db and converts the bytes to the relevant object type when accessing the property which works fine. My problem is when I try to write to a specific char[] item, it doesn't seem to update the byte array. I've created the following extensions for reading and writing: public static class CharExtension { public static byte ToByte( this char c ) { return Convert.ToByte( c ); } public static byte ToByte( this char c, int position, byte[] blob ) { byte b = c.ToByte(); blob[position] = b; return b; } } public static class CharArrayExtension { public static byte[] ToByteArray( this char[] c ) { byte[] b = new byte[c.Length]; for ( int i = 1; i < c.Length; i++ ) { b[i] = c[i].ToByte(); } return b; } public static byte[] ToByteArray( this char[] c, int positon, int length, byte[] blob ) { byte[] b = c.ToByteArray(); Array.Copy( b, 0, blob, positon, length ); return b; } } public static class ByteExtension { public static char ToChar( this byte[] b, int position ) { return Convert.ToChar( b[position] ); } } public static class ByteArrayExtension { public static char[] ToCharArray( this byte[] b, int position, int length ) { char[] c = new char[length]; for ( int i = 0; i < length; i++ ) { c[i] = b.ToChar( position ); position += 1; } return c; } } to read and write chars and char arrays my code looks like: Byte[] _Blob; // set from a db field public char ubin { get { return _tariffBlob.ToChar( 14 ); } set { value.ToByte( 14, _Blob ); } } public char[] usercaplas { get { return _tariffBlob.ToCharArray( 2035, 10 ); } set { value.ToByteArray( 2035, 10, _Blob ); } } So to write to the objects I can do: ubin = 'C'; // this will update the byte[] usercaplas = new char[10] { 'A', 'B', etc. }; // this will update the byte[] usercaplas[3] = 'C'; // this does not update the byte[] I know the reason is that the setter property is not being called but I want to know is there a way around this using code similar to what I already have? I know a possible solution is to use a private variable called _usercaplas that I set and update as needed however as the byte array is nearly 10,000 bytes in length the class is already long and I would like a simpler approach as to reduce the overall code length and complexity. Thank Solution Here's my solution should anyone want it. If you have a better way of doing then let me know please. First I created a new class for the array: public class CharArrayList : ArrayList { char[] arr; private byte[] blob; private int length = 0; private int position = 0; public CharArrayList( byte[] blob, int position, int length ) { this.blob = blob; this.length = length; this.position = position; PopulateInternalArray(); SetArray(); } private void PopulateInternalArray() { arr = blob.ToCharArray( position, length ); } private void SetArray() { foreach ( char c in arr ) { this.Add( c ); } } private void UpdateInternalArray() { this.Clear(); SetArray(); } public char this[int i] { get { return arr[i]; } set { arr[i] = value; UpdateInternalArray(); } } } Then I created a couple of extension methods to help with converting to a byte[] public static byte[] ToByteArray( this CharArrayList c ) { byte[] b = new byte[c.Count]; for ( int i = 0; i < c.Count; i++ ) { b[i] = Convert.ToChar( c[i] ).ToByte(); } return b; } public static byte[] ToByteArray( this CharArrayList c, byte[] blob, int position, int length ) { byte[] b = c.ToByteArray(); Array.Copy( b, 0, blob, position, length ); return b; } So to read and write to the object: private CharArrayList _usercaplass; public CharArrayList usercaplas { get { if ( _usercaplass == null ) _usercaplass = new CharArrayList( _tariffBlob, 2035, 100 ); return _usercaplass; } set { _usercaplass = value; _usercaplass.ToByteArray( _tariffBlob, 2035, 100 ); } } As mentioned before its not an ideal solutions as I have to have private variables and extra code in the setter but I couldnt see a way around it.

    Read the article

  • Pointers to Derived Class Objects Losing vfptr

    - by duckworthd
    To begin, I am trying to write a run-of-the-mill, simple Ray Tracer. In my Ray Tracer, I have multiple types of geometries in the world, all derived from a base class called "SceneObject". I've included the header for it here. /** Interface for all objects that will appear in a scene */ class SceneObject { public: mat4 M, M_inv; Color c; SceneObject(); ~SceneObject(); /** The transformation matrix to be applied to all points of this object. Identity leaves the object in world frame. */ void setMatrix(mat4 M); void setMatrix(MatrixStack mStack); void getMatrix(mat4& M); /** The color of the object */ void setColor(Color c); void getColor(Color& c); /** Alter one portion of the color, leaving the rest as they were. */ void setDiffuse(vec3 rgb); void setSpecular(vec3 rgb); void setEmission(vec3 rgb); void setAmbient(vec3 rgb); void setShininess(double s); /** Fills 'inter' with information regarding an intersection between this object and 'ray'. Ray should be in world frame. */ virtual void intersect(Intersection& inter, Ray ray) = 0; /** Returns a copy of this SceneObject */ virtual SceneObject* clone() = 0; /** Print information regarding this SceneObject for debugging */ virtual void print() = 0; }; As you can see, I've included a couple virtual functions to be implemented elsewhere. In this case, I have only two derived class -- Sphere and Triangle, both of which implement the missing member functions. Finally, I have a Parser class, which is full of static methods that do the actual "Ray Tracing" part. Here's a couple snippets for relevant portions void Parser::trace(Camera cam, Scene scene, string outputFile, int maxDepth) { int width = cam.getNumXPixels(); int height = cam.getNumYPixels(); vector<vector<vec3>> colors; colors.clear(); for (int i = 0; i< width; i++) { vector<vec3> ys; for (int j = 0; j<height; j++) { Intersection intrsct; Ray ray; cam.getRay(ray, i, j); vec3 color; printf("Obtaining color for Ray[%d,%d]\n", i,j); getColor(color, scene, ray, maxDepth); ys.push_back(color); } colors.push_back(ys); } printImage(colors, width, height, outputFile); } void Parser::getColor(vec3& color, Scene scene, Ray ray, int numBounces) { Intersection inter; scene.intersect(inter,ray); if(inter.isIntersecting()){ Color c; inter.getColor(c); c.getAmbient(color); } else { color = vec3(0,0,0); } } Right now, I've forgone the true Ray Tracing part and instead simply return the color of the first object hit, if any. As you have no doubt noticed, the only way the computer knows that a ray has intersected an object is through Scene.intersect(), which I also include. void Scene::intersect(Intersection& i, Ray r) { Intersection result; result.setDistance(numeric_limits<double>::infinity()); result.setIsIntersecting(false); double oldDist; result.getDistance(oldDist); /* Cycle through all objects, making result the closest one */ for(int ind=0; ind<objects.size(); ind++){ SceneObject* thisObj = objects[ind]; Intersection betterIntersect; thisObj->intersect(betterIntersect, r); double newDist; betterIntersect.getDistance(newDist); if (newDist < oldDist){ result = betterIntersect; oldDist = newDist; } } i = result; } Alright, now for the problem. I begin by creating a scene and filling it with objects outside of the Parser::trace() method. Now for some odd reason, I cast Ray for i=j=0 and everything works wonderfully. However, by the time the second ray is cast all of the objects stored in my Scene no longer recognize their vfptr's! I stepped through the code with a debugger and found that the information to all the vfptr's are lost somewhere between the end of getColor() and the continuation of the loop. However, if I change the arguments of getColor() to use a Scene& instead of a Scene, then no loss occurs. What crazy voodoo is this?

    Read the article

  • Help required in adding new methods, properties into existing classes dynamically

    - by Bepenfriends
    Hi All, I am not sure whether it is possible to achieve this kind of implementation in Dot Net. Below are the information Currently we are on an application which is done in COM+, ASP, XSL, XML technologies. It is a multi tier architecture application in which COM+ acts as the BAL. The execution steps for any CRUD operation will be defined using a seperate UI which uses XML to store the information. BAL reads the XML and understands the execution steps which are defined and executes corresponding methods in DLL. Much like EDM we have our custom model (using XML) which determines which property of object is searchable, retrievable etc. Based on this information BAL constructs queries and calls procedures to get the data. In the current application both BAL and DAL are heavily customizable without doing any code change. the results will be transmitted to presentation layer in XML format which constructs the UI based on the data recieved. Now I am creating a migration project which deals with employee information. It is also going to follow the N Tier architecture in which the presentation layer communicates with BAL which connects to DAL to return the Data. Here is the problem, In our existing version we are handling every information as XML in its native form (no converstion of object etc), but in the migration project, Team is really interested in utilizing the OOP model of development where every information which is sent from BAL need to be converted to objects of its respective types (example employeeCollection, Address Collection etc). If we have the static number of data returned from BAL we can have a class which contains those nodes as properties and we can access the same. But in our case the data returned from our BAL need to be customized. How can we handle the customization in presentation layer which is converting the result to an Object. Below is an example of the XML returned <employees> <employee> <firstName>Employee 1 First Name</firstName> <lastName>Employee 1 Last Name</lastName> <addresses> <address> <addressType>1</addressType> <StreetName>Street name1</StreetName> <RegionName>Region name</RegionName> <address> <address> <addressType>2</addressType> <StreetName>Street name2</StreetName> <RegionName>Region name</RegionName> <address> <address> <addressType>3</addressType> <StreetName>Street name3</StreetName> <RegionName>Region name</RegionName> <address> <addresses> </employee> <employee> <firstName>Employee 2 First Name</firstName> <lastName>Employee 2 Last Name</lastName> <addresses> <address> <addressType>1</addressType> <StreetName>Street name1</StreetName> <RegionName>Region name</RegionName> <address> <address> <addressType>2</addressType> <StreetName>Street name2</StreetName> <RegionName>Region name</RegionName> <address> <addresses> </employee> </employees> If these are the only columns then i can write a class which is like public class Address{ public int AddressType {get;set;}; public string StreetName {get;set;}; public string RegionName {get;set;}; } public class Employee{ public string FirstName {get; set;} public string LastName {get; set;} public string AddressCollection {get; set;} } public class EmployeeCollection : List<Employee>{ public bool Add (Employee Data){ .... } } public class AddressCollection : List<Address>{ public bool Add (Address Data){ .... } } This class will be provided to customers and consultants as DLLs. We will not provide the source code for the same. Now when the consultants or customers does customization(example adding country to address and adding passport information object with employee object) they must be able to access those properties in these classes, but without source code they will not be able to do those modifications.which makes the application useless. Is there is any way to acomplish this in DotNet. I thought of using Anonymous classes but, the problem with Anonymous classes are we can not have methods in it. I am not sure how can i fit the collection objects (which will be inturn an anonymous class) Not sure about datagrid / user control binding etc. I also thought of using CODEDom to create classes runtime but not sure about the meory, performance issues. also the classes must be created only once and must use the same till there is another change. Kindly help me out in this problem. Any kind of help meterial/ cryptic code/ links will be helpful.

    Read the article

  • What's wrong with Bundler working with RubyGems to push a Git repo to Heroku?

    - by stanigator
    I've made sure that all the files are in the root of the repository as recommended in this discussion. However, as I follow the instructions in this section of the book, I can't get through the section without the problems. What do you think is happening with my system that's causing the error? I have no clue at the moment of what the problem means despite reading the following in the log. Thanks in advance for your help! stanley@ubuntu:~/rails_sample/first_app$ git push heroku master Warning: Permanently added the RSA host key for IP address '50.19.85.156' to the list of known hosts. Counting objects: 96, done. Compressing objects: 100% (79/79), done. Writing objects: 100% (96/96), 28.81 KiB, done. Total 96 (delta 22), reused 0 (delta 0) -----> Heroku receiving push -----> Ruby/Rails app detected -----> Installing dependencies using Bundler version 1.2.0.pre Running: bundle install --without development:test --path vendor/bundle --binstubs bin/ --deployment Fetching gem metadata from https://rubygems.org/....... Installing rake (0.9.2.2) Installing i18n (0.6.0) Installing multi_json (1.3.5) Installing activesupport (3.2.3) Installing builder (3.0.0) Installing activemodel (3.2.3) Installing erubis (2.7.0) Installing journey (1.0.3) Installing rack (1.4.1) Installing rack-cache (1.2) Installing rack-test (0.6.1) Installing hike (1.2.1) Installing tilt (1.3.3) Installing sprockets (2.1.3) Installing actionpack (3.2.3) Installing mime-types (1.18) Installing polyglot (0.3.3) Installing treetop (1.4.10) Installing mail (2.4.4) Installing actionmailer (3.2.3) Installing arel (3.0.2) Installing tzinfo (0.3.33) Installing activerecord (3.2.3) Installing activeresource (3.2.3) Installing coffee-script-source (1.3.3) Installing execjs (1.3.2) Installing coffee-script (2.2.0) Installing rack-ssl (1.3.2) Installing json (1.7.3) with native extensions Installing rdoc (3.12) Installing thor (0.14.6) Installing railties (3.2.3) Installing coffee-rails (3.2.2) Installing jquery-rails (2.0.2) Using bundler (1.2.0.pre) Installing rails (3.2.3) Installing sass (3.1.18) Installing sass-rails (3.2.5) Installing sqlite3 (1.3.6) with native extensions Gem::Installer::ExtensionBuildError: ERROR: Failed to build gem native extension. /usr/local/bin/ruby extconf.rb checking for sqlite3.h... no sqlite3.h is missing. Try 'port install sqlite3 +universal' or 'yum install sqlite-devel' and check your shared library search path (the location where your sqlite3 shared library is located). *** extconf.rb failed *** Could not create Makefile due to some reason, probably lack of necessary libraries and/or headers. Check the mkmf.log file for more details. You may need configuration options. Provided configuration options: --with-opt-dir --without-opt-dir --with-opt-include --without-opt-include=${opt-dir}/include --with-opt-lib --without-opt-lib=${opt-dir}/lib --with-make-prog --without-make-prog --srcdir=. --curdir --ruby=/usr/local/bin/ruby --with-sqlite3-dir --without-sqlite3-dir --with-sqlite3-include --without-sqlite3-include=${sqlite3-dir}/include --with-sqlite3-lib --without-sqlite3-lib=${sqlite3-dir}/lib --enable-local --disable-local Gem files will remain installed in /tmp/build_3tplrxvj7qa81/vendor/bundle/ruby/1.9.1/gems/sqlite3-1.3.6 for inspection. Results logged to /tmp/build_3tplrxvj7qa81/vendor/bundle/ruby/1.9.1/gems/sqlite3-1.3.6/ext/sqlite3/gem_make.out An error occurred while installing sqlite3 (1.3.6), and Bundler cannot continue. Make sure that `gem install sqlite3 -v '1.3.6'` succeeds before bundling. ! ! Failed to install gems via Bundler. ! ! Heroku push rejected, failed to compile Ruby/rails app To [email protected]:growing-mountain-2788.git ! [remote rejected] master -> master (pre-receive hook declined) error: failed to push some refs to '[email protected]:growing-mountain-2788.git' ------Gemfile------------------------ As requested, here's the auto-generated gemfile: source 'https://rubygems.org' gem 'rails', '3.2.3' # Bundle edge Rails instead: # gem 'rails', :git => 'git://github.com/rails/rails.git' gem 'sqlite3' gem 'json' # Gems used only for assets and not required # in production environments by default. group :assets do gem 'sass-rails', '~> 3.2.3' gem 'coffee-rails', '~> 3.2.1' # See https://github.com/sstephenson/execjs#readme for more supported runtimes # gem 'therubyracer', :platform => :ruby gem 'uglifier', '>= 1.0.3' end gem 'jquery-rails' # To use ActiveModel has_secure_password # gem 'bcrypt-ruby', '~> 3.0.0' # To use Jbuilder templates for JSON # gem 'jbuilder' # Use unicorn as the app server # gem 'unicorn' # Deploy with Capistrano # gem 'capistrano' # To use debugger # gem 'ruby-debug'

    Read the article

  • getaddrinfo appears to return different results between Windows and Ubuntu?

    - by MrDuk
    I have the following two sets of code: Windows #undef UNICODE #include <winsock2.h> #include <ws2tcpip.h> #include <stdio.h> // link with Ws2_32.lib #pragma comment (lib, "Ws2_32.lib") int __cdecl main(int argc, char **argv) { //----------------------------------------- // Declare and initialize variables WSADATA wsaData; int iResult; INT iRetval; DWORD dwRetval; argv[1] = "www.google.com"; argv[2] = "80"; int i = 1; struct addrinfo *result = NULL; struct addrinfo *ptr = NULL; struct addrinfo hints; struct sockaddr_in *sockaddr_ipv4; // struct sockaddr_in6 *sockaddr_ipv6; LPSOCKADDR sockaddr_ip; char ipstringbuffer[46]; DWORD ipbufferlength = 46; /* // Validate the parameters if (argc != 3) { printf("usage: %s <hostname> <servicename>\n", argv[0]); printf("getaddrinfo provides protocol-independent translation\n"); printf(" from an ANSI host name to an IP address\n"); printf("%s example usage\n", argv[0]); printf(" %s www.contoso.com 0\n", argv[0]); return 1; } */ // Initialize Winsock iResult = WSAStartup(MAKEWORD(2, 2), &wsaData); if (iResult != 0) { printf("WSAStartup failed: %d\n", iResult); return 1; } //-------------------------------- // Setup the hints address info structure // which is passed to the getaddrinfo() function ZeroMemory( &hints, sizeof(hints) ); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; // hints.ai_protocol = IPPROTO_TCP; printf("Calling getaddrinfo with following parameters:\n"); printf("\tnodename = %s\n", argv[1]); printf("\tservname (or port) = %s\n\n", argv[2]); //-------------------------------- // Call getaddrinfo(). If the call succeeds, // the result variable will hold a linked list // of addrinfo structures containing response // information dwRetval = getaddrinfo(argv[1], argv[2], &hints, &result); if ( dwRetval != 0 ) { printf("getaddrinfo failed with error: %d\n", dwRetval); WSACleanup(); return 1; } printf("getaddrinfo returned success\n"); // Retrieve each address and print out the hex bytes for(ptr=result; ptr != NULL ;ptr=ptr->ai_next) { printf("getaddrinfo response %d\n", i++); printf("\tFlags: 0x%x\n", ptr->ai_flags); printf("\tFamily: "); switch (ptr->ai_family) { case AF_UNSPEC: printf("Unspecified\n"); break; case AF_INET: printf("AF_INET (IPv4)\n"); sockaddr_ipv4 = (struct sockaddr_in *) ptr->ai_addr; printf("\tIPv4 address %s\n", inet_ntoa(sockaddr_ipv4->sin_addr) ); break; case AF_INET6: printf("AF_INET6 (IPv6)\n"); // the InetNtop function is available on Windows Vista and later // sockaddr_ipv6 = (struct sockaddr_in6 *) ptr->ai_addr; // printf("\tIPv6 address %s\n", // InetNtop(AF_INET6, &sockaddr_ipv6->sin6_addr, ipstringbuffer, 46) ); // We use WSAAddressToString since it is supported on Windows XP and later sockaddr_ip = (LPSOCKADDR) ptr->ai_addr; // The buffer length is changed by each call to WSAAddresstoString // So we need to set it for each iteration through the loop for safety ipbufferlength = 46; iRetval = WSAAddressToString(sockaddr_ip, (DWORD) ptr->ai_addrlen, NULL, ipstringbuffer, &ipbufferlength ); if (iRetval) printf("WSAAddressToString failed with %u\n", WSAGetLastError() ); else printf("\tIPv6 address %s\n", ipstringbuffer); break; case AF_NETBIOS: printf("AF_NETBIOS (NetBIOS)\n"); break; default: printf("Other %ld\n", ptr->ai_family); break; } printf("\tSocket type: "); switch (ptr->ai_socktype) { case 0: printf("Unspecified\n"); break; case SOCK_STREAM: printf("SOCK_STREAM (stream)\n"); break; case SOCK_DGRAM: printf("SOCK_DGRAM (datagram) \n"); break; case SOCK_RAW: printf("SOCK_RAW (raw) \n"); break; case SOCK_RDM: printf("SOCK_RDM (reliable message datagram)\n"); break; case SOCK_SEQPACKET: printf("SOCK_SEQPACKET (pseudo-stream packet)\n"); break; default: printf("Other %ld\n", ptr->ai_socktype); break; } printf("\tProtocol: "); switch (ptr->ai_protocol) { case 0: printf("Unspecified\n"); break; case IPPROTO_TCP: printf("IPPROTO_TCP (TCP)\n"); break; case IPPROTO_UDP: printf("IPPROTO_UDP (UDP) \n"); break; default: printf("Other %ld\n", ptr->ai_protocol); break; } printf("\tLength of this sockaddr: %d\n", ptr->ai_addrlen); printf("\tCanonical name: %s\n", ptr->ai_canonname); } freeaddrinfo(result); WSACleanup(); return 0; } Ubuntu /* ** listener.c -- a datagram sockets "server" demo */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <string.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> #define MYPORT "4950" // the port users will be connecting to #define MAXBUFLEN 100 // get sockaddr, IPv4 or IPv6: void *get_in_addr(struct sockaddr *sa) { if (sa->sa_family == AF_INET) { return &(((struct sockaddr_in*)sa)->sin_addr); } return &(((struct sockaddr_in6*)sa)->sin6_addr); } int main(void) { int sockfd; struct addrinfo hints, *servinfo, *p; int rv; int numbytes; struct sockaddr_storage their_addr; char buf[MAXBUFLEN]; socklen_t addr_len; char s[INET6_ADDRSTRLEN]; memset(&hints, 0, sizeof hints); hints.ai_family = AF_UNSPEC; // set to AF_INET to force IPv4 hints.ai_socktype = SOCK_DGRAM; hints.ai_flags = AI_PASSIVE; // use my IP if ((rv = getaddrinfo(NULL, MYPORT, &hints, &servinfo)) != 0) { fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv)); return 1; } // loop through all the results and bind to the first we can for(p = servinfo; p != NULL; p = p->ai_next) { if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1) { perror("listener: socket"); continue; } if (bind(sockfd, p->ai_addr, p->ai_addrlen) == -1) { close(sockfd); perror("listener: bind"); continue; } break; } if (p == NULL) { fprintf(stderr, "listener: failed to bind socket\n"); return 2; } freeaddrinfo(servinfo); printf("listener: waiting to recvfrom...\n"); addr_len = sizeof their_addr; if ((numbytes = recvfrom(sockfd, buf, MAXBUFLEN-1 , 0, (struct sockaddr *)&their_addr, &addr_len)) == -1) { perror("recvfrom"); exit(1); } printf("listener: got packet from %s\n", inet_ntop(their_addr.ss_family, get_in_addr((struct sockaddr *)&their_addr), s, sizeof s)); printf("listener: packet is %d bytes long\n", numbytes); buf[numbytes] = '\0'; printf("listener: packet contains \"%s\"\n", buf); close(sockfd); return 0; } When I attempt www.google.com, I don't get the ipv6 socket returned on Windows - why is this? Outputs: (ubuntu) caleb@ub1:~/Documents/dev/cs438/mp0/MP0$ ./a.out www.google.com IP addresses for www.google.com: IPv4: 74.125.228.115 IPv4: 74.125.228.116 IPv4: 74.125.228.112 IPv4: 74.125.228.113 IPv4: 74.125.228.114 IPv6: 2607:f8b0:4004:803::1010 Outputs: (win) Calling getaddrinfo with following parameters: nodename = www.google.com servname (or port) = 80 getaddrinfo returned success getaddrinfo response 1 Flags: 0x0 Family: AF_INET (IPv4) IPv4 address 74.125.228.114 Socket type: SOCK_STREAM (stream) Protocol: Unspecified Length of this sockaddr: 16 Canonical name: (null) getaddrinfo response 2 Flags: 0x0 Family: AF_INET (IPv4) IPv4 address 74.125.228.115 Socket type: SOCK_STREAM (stream) Protocol: Unspecified Length of this sockaddr: 16 Canonical name: (null) getaddrinfo response 3 Flags: 0x0 Family: AF_INET (IPv4) IPv4 address 74.125.228.116 Socket type: SOCK_STREAM (stream) Protocol: Unspecified Length of this sockaddr: 16 Canonical name: (null) getaddrinfo response 4 Flags: 0x0 Family: AF_INET (IPv4) IPv4 address 74.125.228.112 Socket type: SOCK_STREAM (stream) Protocol: Unspecified Length of this sockaddr: 16 Canonical name: (null) getaddrinfo response 5 Flags: 0x0 Family: AF_INET (IPv4) IPv4 address 74.125.228.113 Socket type: SOCK_STREAM (stream) Protocol: Unspecified Length of this sockaddr: 16 Canonical name: (null)

    Read the article

  • asp .net MVC 2.0 Validation

    - by ANDyW
    Hi I’m trying to do some validation in asp .net MVC 2.0 for my application. I want to have some nice client side validation. Validation should be done most time on model side with DataAnnotations with custom attributes( like CompareTo, StringLenght, MinPasswordLenght (from Membership.MinimumumpassworkdLenght value). For that purpose I tried to use xval with jquery.validation. Some specific thing is that most of forms will be working with ajax and most problems are when I want to validate form with ajax. Here is link for sample project http://www.sendspace.com/file/m9gl54 . I got two forms as controls ValidFormControl1.ascx, ValidFormControl2.ascx <% using (Ajax.BeginForm("CreateValidForm", "Test", new AjaxOptions { HttpMethod = "Post" })) {%> <div id="validationSummary1"> <%= Html.ValidationSummary(true)%> </div> <fieldset> <legend>Fields</legend> <div class="editor-label"> <%= Html.LabelFor(model => model.Name)%> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.Name)%> <%= Html.ValidationMessageFor(model => model.Name)%> </div> <div class="editor-label"> <%= Html.LabelFor(model => model.Email)%> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.Email)%> <%= Html.ValidationMessageFor(model => model.Email)%> </div> <div class="editor-label"> <%= Html.LabelFor(model => model.Password)%> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.Password)%> <%= Html.ValidationMessageFor(model => model.Password)%> </div> <div class="editor-label"> <%= Html.LabelFor(model => model.ConfirmPassword)%> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.ConfirmPassword)%> <%= Html.ValidationMessageFor(model => model.ConfirmPassword)%> </div> <p> <input type="submit" value="Create" /> </p> </fieldset> <% } %> <%= Html.ClientSideValidation<ValidModel>() .UseValidationSummary("validationSummary1", "Please fix the following problems:") %> Both look same the difference is only validation summaryID (validationSummary1, validationSummary2). Both controls are rendered on one page : Form2 <%Html.RenderPartial("~/Views/Test/ValidFormControl2.ascx", null); %> Form1 <%Html.RenderPartial("~/Views/Test/ValidFormControl.ascx", null); %> Validation property First problem, when we have two controls with same type to validate it don’t work becosue html elements are rendered by field name ( so we have two element with same name “Password” ). Only first form will be validated by client side. The worst thing is that even if we have different types and their fields name is same validation won’t work too ( this thing is what I need to repair it will be stupid to name some unique properites for validation ). Is there any solution for this ? Custom attributes validation Next thing custom attributes validation ( All those error are when I use Ajax for on normal form validation is working without problem. ): CompareTo - Simple compare to that is done in mvc template for account model ( class attribute saying with two property will be compared ) , and it wasn’t show on page. To do it I created own CachingRulesProvider with compareRule and my Attribute. Maybe there is more easy way to do it? StringLenght with minimum and maximum value, I won’t describe how I done it but is there any easy whey to do it? Validation summary When I have two two control on page all summary validation information goes to first control validation summary element, even xval generated script say that elementID are different for summary. Any one know how to repair it? Validation Information Is there any option to turn on messages on place where is Html.ValidationMessageFor(model = model.ConfirmPassword). Becsoue for me it isn’t show up. I would like to have summary and near field information too not only red border. Any one know how to do it? Ajax submit Anyone know how to do easy without massive code in javascript to do submit via javascript. This will be used to change input submit to href element (a). Both look same the difference is only validation summaryID

    Read the article

  • Condition Variable in Shared Memory - is this code POSIX-conformant?

    - by GrahamS
    We've been trying to use a mutex and condition variable to synchronise access to named shared memory on a LynuxWorks LynxOS-SE system (POSIX-conformant). One shared memory block is called "/sync" and contains the mutex and condition variable, the other is "/data" and contains the actual data we are syncing access to. We're seeing failures from pthread_cond_signal() if both processes don't perform the mmap() calls in exactly the same order, or if one process mmaps in some other piece of shared memory before it mmaps the sync memory. This example code is about as short as I can make it: #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/file.h> #include <stdlib.h> #include <pthread.h> #include <errno.h> #include <iostream> #include <string> using namespace std; static const string shm_name_sync("/sync"); static const string shm_name_data("/data"); struct shared_memory_sync { pthread_mutex_t mutex; pthread_cond_t condition; }; struct shared_memory_data { int a; int b; }; //Create 2 shared memory objects // - sync contains 2 shared synchronisation objects (mutex and condition) // - data not important void create() { // Create and map 'sync' shared memory int fd_sync = shm_open(shm_name_sync.c_str(), O_CREAT|O_RDWR, S_IRUSR|S_IWUSR); ftruncate(fd_sync, sizeof(shared_memory_sync)); void* addr_sync = mmap(0, sizeof(shared_memory_sync), PROT_READ|PROT_WRITE, MAP_SHARED, fd_sync, 0); shared_memory_sync* p_sync = static_cast<shared_memory_sync*> (addr_sync); // init the cond and mutex pthread_condattr_t cond_attr; pthread_condattr_init(&cond_attr); pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED); pthread_cond_init(&(p_sync->condition), &cond_attr); pthread_condattr_destroy(&cond_attr); pthread_mutexattr_t m_attr; pthread_mutexattr_init(&m_attr); pthread_mutexattr_setpshared(&m_attr, PTHREAD_PROCESS_SHARED); pthread_mutex_init(&(p_sync->mutex), &m_attr); pthread_mutexattr_destroy(&m_attr); // Create the 'data' shared memory int fd_data = shm_open(shm_name_data.c_str(), O_CREAT|O_RDWR, S_IRUSR|S_IWUSR); ftruncate(fd_data, sizeof(shared_memory_data)); void* addr_data = mmap(0, sizeof(shared_memory_data), PROT_READ|PROT_WRITE, MAP_SHARED, fd_data, 0); shared_memory_data* p_data = static_cast<shared_memory_data*> (addr_data); // Run the second process while it sleeps here. sleep(10); int res = pthread_cond_signal(&(p_sync->condition)); assert(res==0); // <--- !!!THIS ASSERT WILL FAIL ON LYNXOS!!! munmap(addr_sync, sizeof(shared_memory_sync)); shm_unlink(shm_name_sync.c_str()); munmap(addr_data, sizeof(shared_memory_data)); shm_unlink(shm_name_data.c_str()); } //Open the same 2 shared memory objects but in reverse order // - data // - sync void open() { sleep(2); int fd_data = shm_open(shm_name_data.c_str(), O_RDWR, S_IRUSR|S_IWUSR); void* addr_data = mmap(0, sizeof(shared_memory_data), PROT_READ|PROT_WRITE, MAP_SHARED, fd_data, 0); shared_memory_data* p_data = static_cast<shared_memory_data*> (addr_data); int fd_sync = shm_open(shm_name_sync.c_str(), O_RDWR, S_IRUSR|S_IWUSR); void* addr_sync = mmap(0, sizeof(shared_memory_sync), PROT_READ|PROT_WRITE, MAP_SHARED, fd_sync, 0); shared_memory_sync* p_sync = static_cast<shared_memory_sync*> (addr_sync); // Wait on the condvar pthread_mutex_lock(&(p_sync->mutex)); pthread_cond_wait(&(p_sync->condition), &(p_sync->mutex)); pthread_mutex_unlock(&(p_sync->mutex)); munmap(addr_sync, sizeof(shared_memory_sync)); munmap(addr_data, sizeof(shared_memory_data)); } int main(int argc, char** argv) { if(argc>1) { open(); } else { create(); } return (0); } Run this program with no args, then another copy with args, and the first one will fail at the assert checking the pthread_cond_signal(). But change the open() function to mmap() the "/sync" memory first and it will all work fine. This seems like a major bug in LynxOS but LynuxWorks claim that using mutex and condition variable in this way is not covered by the POSIX standard, so they are not interested. Can anyone determine if this code does violate POSIX? Or does anyone have any convincing documentation that it is POSIX compliant?

    Read the article

  • System.Timers.Timer leaking due to "direct delegate roots"

    - by alimbada
    Apologies for the rather verbose and long-winded post, but this problem's been perplexing me for a few weeks now so I'm posting as much information as I can in order to get this resolved quickly. We have a WPF UserControl which is being loaded by a 3rd party app. The 3rd party app is a presentation application which loads and unloads controls on a schedule defined by an XML file which is downloaded from a server. Our control, when it is loaded into the application makes a web request to a web service and uses the data from the response to display some information. We're using an MVVM architecture for the control. The entry point of the control is a method that is implementing an interface exposed by the main app and this is where the control's configuration is set up. This is also where I set the DataContext of our control to our MainViewModel. The MainViewModel has two other view models as properties and the main UserControl has two child controls. Depending on the data received from the web service, the main UserControl decides which child control to display, e.g. if there is a HTTP error or the data received is not valid, then display child control A, otherwise display child control B. As you'd expect, these two child controls bind two separate view models each of which is a property of MainViewModel. Now child control B (which is displayed when the data is valid) has a RefreshService property/field. RefreshService is an object that is responsible for updating the model in a number of ways and contains 4 System.Timers.Timers; a _modelRefreshTimer a _viewRefreshTimer a _pageSwitchTimer a _retryFeedRetrievalOnErrorTimer (this is only enabled when something goes wrong with retrieving data). I should mention at this point that there are two types of data; the first changes every minute, the second changes every few hours. The controls' configuration decides which type we are using/displaying. If data is of the first type then we update the model quite frequently (every 30 seconds) using the _modelRefreshTimer's events. If the data is of the second type then we update the model after a longer interval. However, the view still needs to be refreshed every 30 seconds as stale data needs to be removed from the view (hence the _viewRefreshTimer). The control also paginates the data so we can see more than we can fit on the display area. This works by breaking the data up into Lists and switching the CurrentPage (which is a List) property of the view model to the right List. This is done by handling the _pageSwitchTimer's Elapsed event. Now the problem My problem is that the control, when removed from the visual tree doesn't dispose of it's timers. This was first noticed when we started getting an unusually high number of requests on the web server end very soon after deploying this control and found that requests were being made at least once a second! We found that the timers were living on and not stopping hours after the control had been removed from view and that the more timers there were the more requests piled up at the web server. My first solution was to implement IDisposable for the RefreshService and do some clean up when the control's UnLoaded event was fired. Within the RefreshServices Dispose method I've set Enabled to false for all the timers, then used the Stop() method on all of them. I've then called Dispose() too and set them to null. None of this worked. After some reading around I found that event handlers may hold references to Timers and prevent them from being disposed and collected. After some more reading and researching I found that the best way around this was to use the Weak Event Pattern. Using this blog and this blog I've managed to work around the shortcomings in the Weak Event pattern. However, none of this solves the problem. Timers are still not being disabled or stopped (let alone disposed) and web requests are continuing to build up. Mem Profiler tells me that "This type has N instances that are directly rooted by a delegate. This can indicate the delegate has not been properly removed" (where N is the number of instances). As far as I can tell though, all listeners of the Elapsed event for the timers are being removed during the cleanup so I can't understand why the timers continue to run. Thanks for reading. Eagerly awaiting your suggestions/comments/solutions (if you got this far :-p)

    Read the article

  • C++0x rvalue references - lvalues-rvalue binding

    - by Doug
    This is a follow-on question to http://stackoverflow.com/questions/2748866/c0x-rvalue-references-and-temporaries In the previous question, I asked how this code should work: void f(const std::string &); //less efficient void f(std::string &&); //more efficient void g(const char * arg) { f(arg); } It seems that the move overload should probably be called because of the implicit temporary, and this happens in GCC but not MSVC (or the EDG front-end used in MSVC's Intellisense). What about this code? void f(std::string &&); //NB: No const string & overload supplied void g1(const char * arg) { f(arg); } void g2(const std::string & arg) { f(arg); } It seems that, based on the answers to my previous question that function g1 is legal (and is accepted by GCC 4.3-4.5, but not by MSVC). However, GCC and MSVC both reject g2 because of clause 13.3.3.1.4/3, which prohibits lvalues from binding to rvalue ref arguments. I understand the rationale behind this - it is explained in N2831 "Fixing a safety problem with rvalue references". I also think that GCC is probably implementing this clause as intended by the authors of that paper, because the original patch to GCC was written by one of the authors (Doug Gregor). However, I don't this is quite intuitive. To me, (a) a const string & is conceptually closer to a string && than a const char *, and (b) the compiler could create a temporary string in g2, as if it were written like this: void g2(const std::string & arg) { f(std::string(arg)); } Indeed, sometimes the copy constructor is considered to be an implicit conversion operator. Syntactically, this is suggested by the form of a copy constructor, and the standard even mentions this specifically in clause 13.3.3.1.2/4, where the copy constructor for derived-base conversions is given a higher conversion rank than other implicit conversions: A conversion of an expression of class type to the same class type is given Exact Match rank, and a conversion of an expression of class type to a base class of that type is given Conversion rank, in spite of the fact that a copy/move constructor (i.e., a user-defined conversion function) is called for those cases. (I assume this is used when passing a derived class to a function like void h(Base), which takes a base class by value.) Motivation My motivation for asking this is something like the question asked in http://stackoverflow.com/questions/2696156/how-to-reduce-redundant-code-when-adding-new-c0x-rvalue-reference-operator-over ("How to reduce redundant code when adding new c++0x rvalue reference operator overloads"). If you have a function that accepts a number of potentially-moveable arguments, and would move them if it can (e.g. a factory function/constructor: Object create_object(string, vector<string>, string) or the like), and want to move or copy each argument as appropriate, you quickly start writing a lot of code. If the argument types are movable, then one could just write one version that accepts the arguments by value, as above. But if the arguments are (legacy) non-movable-but-swappable classes a la C++03, and you can't change them, then writing rvalue reference overloads is more efficient. So if lvalues did bind to rvalues via an implicit copy, then you could write just one overload like create_object(legacy_string &&, legacy_vector<legacy_string> &&, legacy_string &&) and it would more or less work like providing all the combinations of rvalue/lvalue reference overloads - actual arguments that were lvalues would get copied and then bound to the arguments, actual arguments that were rvalues would get directly bound. Questions My questions are then: Is this a valid interpretation of the standard? It seems that it's not the conventional or intended one, at any rate. Does it make intuitive sense? Is there a problem with this idea that I"m not seeing? It seems like you could get copies being quietly created when that's not exactly expected, but that's the status quo in places in C++03 anyway. Also, it would make some overloads viable when they're currently not, but I don't see it being a problem in practice. Is this a significant enough improvement that it would be worth making e.g. an experimental patch for GCC?

    Read the article

  • xs:choice unbounded list

    - by Matt
    I want to define an XSD schema for an XML document, example below: <?xml version="1.0" encoding="utf-8"?> <view xmlns="http://localhost/model_data" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://localhost/model_data XMLSchemaView.xsd" path="wibble" id="wibble"> <text name="PageTitle">Homepage</text> <text name="Keywords">home foo bar</text> <image name="MainImage"> <description>lolem ipsum</description> <title>i haz it</title> <url>/images/main-image.jpg</url> <type>image/jpeg</type> <alt>alt text for image</alt> <width>400</width> <height>300</height> </image> <link name="TermsAndConditionsLink"> <url>/tnc.html</url> <title>Terms and Conditions</title> <target>_blank</target> </link> </view> There's a view root element and then an unknown number of field elements (of various types). I'm using the following XSD schema: <?xml version="1.0" encoding="utf-8"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://localhost/model_data" targetNamespace="http://localhost/model_data" id="XMLSchema1"> <xs:element name="text" type="text_field"/> <xs:element name="view" type="model_data"/> <xs:complexType name="model_data"> <xs:choice maxOccurs="unbounded"> <xs:element name="text" type="text_field"/> <xs:element name="image" type="image_field"/> <xs:element name="link" type="link_field"/> </xs:choice> <xs:attribute name="path" type="xs:string"/> <xs:attribute name="id" type="xs:string"/> </xs:complexType> <xs:complexType name="image_field"> <xs:all> <xs:element name="description" type="xs:string"/> <xs:element name="title" type="xs:string"/> <xs:element name="type" type="xs:string"/> <xs:element name="url" type="xs:string"/> <xs:element name="alt" type="xs:string"/> <xs:element name="height" type="xs:int"/> <xs:element name="width" type="xs:int"/> </xs:all> <xs:attribute name="name" type="xs:string"/> </xs:complexType> <xs:complexType name="text_field"> <xs:simpleContent> <xs:extension base="xs:string"> <xs:attribute name="name" type="xs:string"/> </xs:extension> </xs:simpleContent> </xs:complexType> <xs:complexType name="link_field"> <xs:all> <xs:element name="target" type="xs:string"/> <xs:element name="title" type="xs:string"/> <xs:element name="url" type="xs:string"/> </xs:all> <xs:attribute name="name" type="xs:string"/> </xs:complexType> </xs:schema> This looks like it should work to me, but it doesn't and I always get the following error: Element <text> is not allowed under element <view>. Reason: The following elements are expected at this location (see below) <text> <image> <link> Error location: view / text Details cvc-model-group: Element <text> unexpected by type 'model_data' of element <view>. cvc-elt.5.2.1: The element <view> is not valid with respect to the actual type definition 'model_data'. I've never really used XSD schemas before, so I'd really appreciate it if someone could point out where I'm going wrong.

    Read the article

  • php - upload script mkdir saying file already exists when same directory even though different filename

    - by neeko
    my upload script says my file already exists when i try upload even though different filename <?php // Start a session for error reporting session_start(); ?> <?php // Check, if username session is NOT set then this page will jump to login page if (!isset($_SESSION['username'])) { header('Location: index.html'); } // Call our connection file include('config.php'); // Check to see if the type of file uploaded is a valid image type function is_valid_type($file) { // This is an array that holds all the valid image MIME types $valid_types = array("image/jpg", "image/JPG", "image/jpeg", "image/bmp", "image/gif", "image/png"); if (in_array($file['type'], $valid_types)) return 1; return 0; } // Just a short function that prints out the contents of an array in a manner that's easy to read // I used this function during debugging but it serves no purpose at run time for this example function showContents($array) { echo "<pre>"; print_r($array); echo "</pre>"; } // Set some constants // Grab the User ID we sent from our form $user_id = $_SESSION['username']; $category = $_POST['category']; // This variable is the path to the image folder where all the images are going to be stored // Note that there is a trailing forward slash $TARGET_PATH = "img/users/$category/$user_id/"; mkdir($TARGET_PATH, 0755, true); // Get our POSTed variables $fname = $_POST['fname']; $lname = $_POST['lname']; $contact = $_POST['contact']; $price = $_POST['price']; $image = $_FILES['image']; // Build our target path full string. This is where the file will be moved do // i.e. images/picture.jpg $TARGET_PATH .= $image['name']; // Make sure all the fields from the form have inputs if ( $fname == "" || $lname == "" || $image['name'] == "" ) { $_SESSION['error'] = "All fields are required"; header("Location: error.php"); exit; } // Check to make sure that our file is actually an image // You check the file type instead of the extension because the extension can easily be faked if (!is_valid_type($image)) { $_SESSION['error'] = "You must upload a jpeg, gif, or bmp"; header("Location: error.php"); exit; } // Here we check to see if a file with that name already exists // You could get past filename problems by appending a timestamp to the filename and then continuing if (file_exists($TARGET_PATH)) { $_SESSION['error'] = "A file with that name already exists"; header("Location: error.php"); exit; } // Lets attempt to move the file from its temporary directory to its new home if (move_uploaded_file($image['tmp_name'], $TARGET_PATH)) { // NOTE: This is where a lot of people make mistakes. // We are *not* putting the image into the database; we are putting a reference to the file's location on the server $imagename = $image['name']; $sql = "insert into people (price, contact, category, username, fname, lname, expire, filename) values (:price, :contact, :category, :user_id, :fname, :lname, now() + INTERVAL 1 MONTH, :imagename)"; $q = $conn->prepare($sql) or die("failed!"); $q->bindParam(':price', $price, PDO::PARAM_STR); $q->bindParam(':contact', $contact, PDO::PARAM_STR); $q->bindParam(':category', $category, PDO::PARAM_STR); $q->bindParam(':user_id', $user_id, PDO::PARAM_STR); $q->bindParam(':fname', $fname, PDO::PARAM_STR); $q->bindParam(':lname', $lname, PDO::PARAM_STR); $q->bindParam(':imagename', $imagename, PDO::PARAM_STR); $q->execute(); $sql1 = "UPDATE people SET firstname = (SELECT firstname FROM user WHERE username=:user_id1) WHERE username=:user_id2"; $q = $conn->prepare($sql1) or die("failed!"); $q->bindParam(':user_id1', $user_id, PDO::PARAM_STR); $q->bindParam(':user_id2', $user_id, PDO::PARAM_STR); $q->execute(); $sql2 = "UPDATE people SET surname = (SELECT surname FROM user WHERE username=:user_id1) WHERE username=:user_id2"; $q = $conn->prepare($sql2) or die("failed!"); $q->bindParam(':user_id1', $user_id, PDO::PARAM_STR); $q->bindParam(':user_id2', $user_id, PDO::PARAM_STR); $q->execute(); header("Location: search.php"); exit; } else { // A common cause of file moving failures is because of bad permissions on the directory attempting to be written to // Make sure you chmod the directory to be writeable $_SESSION['error'] = "Could not upload file. Check read/write persmissions on the directory"; header("Location: error.php"); exit; } ?>

    Read the article

< Previous Page | 395 396 397 398 399 400 401 402 403 404 405 406  | Next Page >