Search Results

Search found 1505 results on 61 pages for 'march ho'.

Page 59/61 | < Previous Page | 55 56 57 58 59 60 61  | Next Page >

  • EKCalendar not added to iCal

    - by Alex75
    I have a strange behavior on my iPhone. I'm creating an application that uses calendar events (EventKit). The class that use is as follows: the .h one #import "GenericManager.h" #import <EventKit/EventKit.h> #define oneDay 60*60*24 #define oneHour 60*60 @protocol CalendarManagerDelegate; @interface CalendarManager : GenericManager /* * metodo che aggiunge un evento ad un calendario di nome Name nel giorno onDate. * L'evento da aggiungere viene recuperato tramite il dataSource che è quindi * OBBLIGATORIO (!= nil). * * Restituisce YES solo se il delegate è conforme al protocollo CalendarManagerDataSource. * NO altrimenti */ + (BOOL) addEventForCalendarWithName:(NSString *) name fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate withDelegate:(id<CalendarManagerDelegate>) delegate; /* * metodo che aggiunge un evento per giorno compreso tra fromDate e toDate ad un * calendario di nome Name. L'evento da aggiungere viene recuperato tramite il dataSource * che è quindi OBBLIGATORIO (!= nil). * * Restituisce YES solo se il delegate è conforme al protocollo CalendarManagerDataSource. * NO altrimenti */ + (BOOL) addEventsForCalendarWithName:(NSString *) name fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate withDelegate:(id<CalendarManagerDelegate>) delegate; @end @protocol CalendarManagerDelegate <NSObject> // viene inviato quando il calendario necessita informazioni sull' evento da aggiungere - (void) calendarManagerDidCreateEvent:(EKEvent *) event; @end the .m one // // CalendarManager.m // AppCampeggioSingolo // // Created by CreatiWeb Srl on 12/17/12. // Copyright (c) 2012 CreatiWeb Srl. All rights reserved. // #import "CalendarManager.h" #import "Commons.h" #import <objc/message.h> @interface CalendarManager () @end @implementation CalendarManager + (void)requestToEventStore:(EKEventStore *)eventStore delegate:(id)delegate fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate name:(NSString *)name { if([eventStore respondsToSelector:@selector(requestAccessToEntityType:completion:)]) { // ios >= 6.0 [eventStore requestAccessToEntityType:EKEntityTypeEvent completion:^(BOOL granted, NSError *error) { if (granted) { [self addEventForCalendarWithName:name fromDate: fromDate toDate: toDate inEventStore:eventStore withDelegate:delegate]; } else { } }]; } else if (class_getClassMethod([EKCalendar class], @selector(calendarIdentifier)) != nil) { // ios >= 5.0 && ios < 6.0 [self addEventForCalendarWithName:name fromDate:fromDate toDate:toDate inEventStore:eventStore withDelegate:delegate]; } else { // ios < 5.0 EKCalendar *myCalendar = [eventStore defaultCalendarForNewEvents]; EKEvent *event = [self generateEventForCalendar:myCalendar fromDate: fromDate toDate: toDate inEventStore:eventStore withDelegate:delegate]; [eventStore saveEvent:event span:EKSpanThisEvent error:nil]; } } /* * metodo che recupera l'identificativo del calendario associato all'app o nil se non è mai stato creato. */ + (NSString *) identifierForCalendarName: (NSString *) name { NSString * confFileName = [self pathForFile:kCurrentCalendarFileName]; NSDictionary *confCalendar = [NSDictionary dictionaryWithContentsOfFile:confFileName]; NSString *currentIdentifier = [confCalendar objectForKey:name]; return currentIdentifier; } /* * memorizza l'identifier del calendario */ + (void) saveCalendarIdentifier:(NSString *) identifier andName: (NSString *) name { if (identifier != nil) { NSString * confFileName = [self pathForFile:kCurrentCalendarFileName]; NSMutableDictionary *confCalendar = [NSMutableDictionary dictionaryWithContentsOfFile:confFileName]; if (confCalendar == nil) { confCalendar = [NSMutableDictionary dictionaryWithCapacity:1]; } [confCalendar setObject:identifier forKey:name]; [confCalendar writeToFile:confFileName atomically:YES]; } } + (EKCalendar *)getCalendarWithName:(NSString *)name inEventStore:(EKEventStore *)eventStore withLocalSource: (EKSource *)localSource forceCreation:(BOOL) force { EKCalendar *myCalendar; NSString *identifier = [self identifierForCalendarName:name]; if (force || identifier == nil) { NSLog(@"create new calendar"); if (class_getClassMethod([EKCalendar class], @selector(calendarForEntityType:eventStore:)) != nil) { // da ios 6.0 in avanti myCalendar = [EKCalendar calendarForEntityType:EKEntityTypeEvent eventStore:eventStore]; } else { myCalendar = [EKCalendar calendarWithEventStore:eventStore]; } myCalendar.title = name; myCalendar.source = localSource; NSError *error = nil; BOOL result = [eventStore saveCalendar:myCalendar commit:YES error:&error]; if (result) { NSLog(@"Saved calendar %@ to event store. %@",myCalendar,eventStore); } else { NSLog(@"Error saving calendar: %@.", error); } [self saveCalendarIdentifier:myCalendar.calendarIdentifier andName:name]; } // You can also configure properties like the calendar color etc. The important part is to store the identifier for later use. On the other hand if you already have the identifier, you can just fetch the calendar: else { myCalendar = [eventStore calendarWithIdentifier:identifier]; NSLog(@"fetch an old-one = %@",myCalendar); } return myCalendar; } + (EKCalendar *)addEventForCalendarWithName: (NSString *) name fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate inEventStore:(EKEventStore *)eventStore withDelegate: (id<CalendarManagerDelegate>) delegate { // da ios 5.0 in avanti EKCalendar *myCalendar; EKSource *localSource = nil; for (EKSource *source in eventStore.sources) { if (source.sourceType == EKSourceTypeLocal) { localSource = source; break; } } @synchronized(self) { myCalendar = [self getCalendarWithName:name inEventStore:eventStore withLocalSource:localSource forceCreation:NO]; if (myCalendar == nil) myCalendar = [self getCalendarWithName:name inEventStore:eventStore withLocalSource:localSource forceCreation:YES]; NSLog(@"End synchronized block %@",myCalendar); } EKEvent *event = [self generateEventForCalendar:myCalendar fromDate:fromDate toDate:toDate inEventStore:eventStore withDelegate:delegate]; [eventStore saveEvent:event span:EKSpanThisEvent error:nil]; return myCalendar; } + (EKEvent *) generateEventForCalendar: (EKCalendar *) calendar fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate inEventStore:(EKEventStore *) eventStore withDelegate:(id<CalendarManagerDelegate>) delegate { EKEvent *event = [EKEvent eventWithEventStore:eventStore]; event.startDate=fromDate; event.endDate=toDate; [delegate calendarManagerDidCreateEvent:event]; [event setCalendar:calendar]; // ricerca dell'evento nel calendario, se ne trovo uno uguale non lo inserisco NSPredicate *predicate = [eventStore predicateForEventsWithStartDate:fromDate endDate:toDate calendars:[NSArray arrayWithObject:calendar]]; NSArray *matchEvents = [eventStore eventsMatchingPredicate:predicate]; if ([matchEvents count] > 0) { // ne ho trovati di gia' presenti, vediamo se uno e' quello che vogliamo inserire BOOL found = NO; for (EKEvent *fetchEvent in matchEvents) { if ([fetchEvent.title isEqualToString:event.title] && [fetchEvent.notes isEqualToString:event.notes]) { found = YES; break; } } if (found) { // esiste già e quindi non lo inserisco NSLog(@"OH NOOOOOO!!"); event = nil; } } return event; } #pragma mark - Public Methods + (BOOL) addEventForCalendarWithName:(NSString *) name fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate withDelegate:(id<CalendarManagerDelegate>) delegate { BOOL retVal = YES; EKEventStore *eventStore=[[EKEventStore alloc] init]; if ([delegate conformsToProtocol:@protocol(CalendarManagerDelegate)]) { [self requestToEventStore:eventStore delegate:delegate fromDate:fromDate toDate: toDate name:name]; } else { retVal = NO; } return retVal; } + (BOOL) addEventsForCalendarWithName:(NSString *) name fromDate:(NSDate *)fromDate toDate: (NSDate *) toDate withDelegate:(id<CalendarManagerDelegate>) delegate { BOOL retVal = YES; NSDate *dateCursor = fromDate; EKEventStore *eventStore=[[EKEventStore alloc] init]; if ([delegate conformsToProtocol:@protocol(CalendarManagerDelegate)]) { while (retVal && ([dateCursor compare:toDate] == NSOrderedAscending)) { NSDate *finish = [dateCursor dateByAddingTimeInterval:oneDay]; [self requestToEventStore:eventStore delegate:delegate fromDate: dateCursor toDate: finish name:name]; dateCursor = [dateCursor dateByAddingTimeInterval:oneDay]; } } else { retVal = NO; } return retVal; } @end In practice, on my iphone I get the log: fetch an old-one = (null) 19/12/2012 11:33:09.520 AppCampeggioSingolo [730:8 b1b] create new calendar 19/12/2012 11:33:09.558 AppCampeggioSingolo [730:8 b1b] Saved calendar EKCalendar every time I add an event, then I look and I can not find it on iCal calendar event he added. On the iPhone of a friend of mine, however, everything is working correctly. I doubt that the problem stems from the code, but just do not understand what it could be. I searched all day yesterday and part of today on google but have not found anything yet. Any help will be greatly appreciated EDIT: I forgot the call wich is [CalendarManager addEventForCalendarWithName: @"myCalendar" fromDate:fromDate toDate: toDate withDelegate:self]; in the delegate method simply set title and notes of the event like this - (void) calendarManagerDidCreateEvent:(EKEvent *) event { event.title = @"the title"; event.notes = @"some notes"; }

    Read the article

  • Am I just not understanding TDD unit testing (Asp.Net MVC project)?

    - by KallDrexx
    I am trying to figure out how to correctly and efficiently unit test my Asp.net MVC project. When I started on this project I bought the Pro ASP.Net MVC, and with that book I learned about TDD and unit testing. After seeing the examples, and the fact that I work as a software engineer in QA in my current company, I was amazed at how awesome TDD seemed to be. So I started working on my project and went gun-ho writing unit tests for my database layer, business layer, and controllers. Everything got a unit test prior to implementation. At first I thought it was awesome, but then things started to go downhill. Here are the issues I started encountering: I ended up writing application code in order to make it possible for unit tests to be performed. I don't mean this in a good way as in my code was broken and I had to fix it so the unit test pass. I mean that abstracting out the database to a mock database is impossible due to the use of linq for data retrieval (using the generic repository pattern). The reason is that with linq-sql or linq-entities you can do joins just by doing: var objs = select p from _container.Projects select p.Objects; However, if you mock the database layer out, in order to have that linq pass the unit test you must change the linq to be var objs = select p from _container.Projects join o in _container.Objects on o.ProjectId equals p.Id select o; Not only does this mean you are changing your application logic just so you can unit test it, but you are making your code less efficient for the sole purpose of testability, and getting rid of a lot of advantages using an ORM has in the first place. Furthermore, since a lot of the IDs for my models are database generated, I proved to have to write additional code to handle the non-database tests since IDs were never generated and I had to still handle those cases for the unit tests to pass, yet they would never occur in real scenarios. Thus I ended up throwing out my database unit testing. Writing unit tests for controllers was easy as long as I was returning views. However, the major part of my application (and the one that would benefit most from unit testing) is a complicated ajax web application. For various reasons I decided to change the app from returning views to returning JSON with the data I needed. After this occurred my unit tests became extremely painful to write, as I have not found any good way to write unit tests for non-trivial json. After pounding my head and wasting a ton of time trying to find a good way to unit test the JSON, I gave up and deleted all of my controller unit tests (all controller actions are focused on this part of the app so far). So finally I was left with testing the Service layer (BLL). Right now I am using EF4, however I had this issue with linq-sql as well. I chose to do the EF4 model-first approach because to me, it makes sense to do it that way (define my business objects and let the framework figure out how to translate it into the sql backend). This was fine at the beginning but now it is becoming cumbersome due to relationships. For example say I have Project, User, and Object entities. One Object must be associated to a project, and a project must be associated to a user. This is not only a database specific rule, these are my business rules as well. However, say I want to do a unit test that I am able to save an object (for a simple example). I now have to do the following code just to make sure the save worked: User usr = new User { Name = "Me" }; _userService.SaveUser(usr); Project prj = new Project { Name = "Test Project", Owner = usr }; _projectService.SaveProject(prj); Object obj = new Object { Name = "Test Object" }; _objectService.SaveObject(obj); // Perform verifications There are many issues with having to do all this just to perform one unit test. There are several issues with this. For starters, if I add a new dependency, such as all projects must belong to a category, I must go into EVERY single unit test that references a project, add code to save the category then add code to add the category to the project. This can be a HUGE effort down the road for a very simple business logic change, and yet almost none of the unit tests I will be modifying for this requirement are actually meant to test that feature/requirement. If I then add verifications to my SaveProject method, so that projects cannot be saved unless they have a name with at least 5 characters, I then have to go through every Object and Project unit test to make sure that the new requirement doesn't make any unrelated unit tests fail. If there is an issue in the UserService.SaveUser() method it will cause all project, and object unit tests to fail and it the cause won't be immediately noticeable without having to dig through the exceptions. Thus I have removed all service layer unit tests from my project. I could go on and on, but so far I have not seen any way for unit testing to actually help me and not get in my way. I can see specific cases where I can, and probably will, implement unit tests, such as making sure my data verification methods work correctly, but those cases are few and far between. Some of my issues can probably be mitigated but not without adding extra layers to my application, and thus making more points of failure just so I can unit test. Thus I have no unit tests left in my code. Luckily I heavily use source control so I can get them back if I need but I just don't see the point. Everywhere on the internet I see people talking about how great TDD unit tests are, and I'm not just talking about the fanatical people. The few people who dismiss TDD/Unit tests give bad arguments claiming they are more efficient debugging by hand through the IDE, or that their coding skills are amazing that they don't need it. I recognize that both of those arguments are utter bullocks, especially for a project that needs to be maintainable by multiple developers, but any valid rebuttals to TDD seem to be few and far between. So the point of this post is to ask, am I just not understanding how to use TDD and automatic unit tests?

    Read the article

  • Windows XP periodically disconnects, reconnects; Windows 7 doesn't

    - by einpoklum
    My setup: I have a PC with a Gigabyte GA-MA78S2H motherboard (Realtek Gigabit wired Ethernet on-board). I have the latest drivers (at least the latest driver for the NIC. I'm connecting via an Edimax BR-6216Mg (again, wired connection). For some reason I experience short periodic disconnects and reconnects. Specifically, Skype disconnects, tries to connect, succeeds after a short while; incoming SFTP sessions get dropped; using a browser, I sometime get stuck in the DNS lookup or connection to the website and a page won't load. A couple of seconds later, a reload works. All this happens with Windows XP SP3. With Windows 7, it doesn't happen. The connection is smooth (OS is sluggish though, but never mind that). Like I said, I updated the NIC driver. I tried reducing the MTU (used something called Dr. TCP), thinking maybe that would help, but it didn't. (I'm a bit but not super-knowledgeable about TCP parameters.) I'm guessing it's either a problem with the driver or some settings which are different between the two OSes. ipconfig for my adapter: Ethernet adapter Local Area Connection: Connection-specific DNS Suffix . : Description . . . . . . . . . . . : Realtek PCIe GBE Family Controller Physical Address. . . . . . . . . : 00-1D-7D-E9-72-9E Dhcp Enabled. . . . . . . . . . . : Yes Autoconfiguration Enabled . . . . : Yes IP Address. . . . . . . . . . . . : 192.168.0.2 Subnet Mask . . . . . . . . . . . : 255.255.255.0 Default Gateway . . . . . . . . . : 192.168.0.254 DHCP Server . . . . . . . . . . . : 192.168.0.254 DNS Servers . . . . . . . . . . . : 192.117.235.235 62.219.186.7 Lease Obtained. . . . . . . . . . : Saturday, March 10, 2012 8:28:20 AM Lease Expires . . . . . . . . . . : Friday, January 26, 1906 2:00:04 AM

    Read the article

  • sshd running but no PID file

    - by dunxd
    I'm recently started using monit to monitor the status of sshd on my CentOS 5.4 server. This works fine, but every so often monit reports that sshd is no longer running. This isn't true - I am still able to login to the server via ssh, however I note the following: There is no longer any PID file at /var/run/sshd.pid - after a reboot this file exists. Once it is gone, restarting sshd via service sshd restart does not create the PID file. sudo service sshd status reports openssh-daemon is stopped - again, restarting sshd does not change this, but a reboot does. sudo service sshd stop reports failed, presumably because of the missing PID file. Any idea what is going on? Update sudo netstat -lptun gives the following output relating to port 22 tcp 0 0 :::22 :::* LISTEN 20735/sshd Killing the process with this PID as suggested by @Henry and then starting sshd via service results in service sshd status recognising the process by PID again. Would still like to understand this better. RPM verify suggested by a couple of answerers shows this: sudo rpm -vV openssh openssh-server openssh-clients | grep 'S\.5' S.5....T c /etc/pam.d/sshd S.5....T c /etc/ssh/sshd_config /etc/pam.d/sshd has the following contents: #%PAM-1.0 auth include system-auth account required pam_nologin.so account include system-auth password include system-auth session optional pam_keyinit.so force revoke session include system-auth #session required pam_loginuid.so Should that last line be commented out? Update Here's the output of @YannickGirouard 's script: $ sudo ./sshd_test Searching for the process listening on port 22... Found the following PID: 21330 Command line for PID 21330: /usr/sbin/sshd Listing process(es) relating to PID 21330: UID PID PPID C STIME TTY TIME CMD root 21330 1 0 14:04 ? 00:00:00 /usr/sbin/sshd Listing RPM information about openssh packages: Name : openssh Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:50:57 AM GMT Build Host: builder10.centos.org Group : Applications/Internet Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 745390 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH implementation of SSH protocol versions 1 and 2 ------------------------------------------------------ Name : openssh-clients Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:51:04 AM GMT Build Host: builder10.centos.org Group : Applications/Internet Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 871132 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH client applications ------------------------------------------------------ Name : openssh-server Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:51:04 AM GMT Build Host: builder10.centos.org Group : System Environment/Daemons Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 492478 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH server daemon ------------------------------------------------------ However, I've since got things working by killing the process and starting afresh, as suggested by @Henry below, so perhaps I am no longer seeing the same thing. Will try again if I am seeing the issue again after next reboot. Update - 14 March Monit alerted me that sshd had disappeared, and again I am able to ssh onto the server. So now I can run the script $ sudo ./sshd_test Searching for the process listening on port 22... Found the following PID: 2208 Command line for PID 2208: /usr/sbin/sshd Listing process(es) relating to PID 2208: UID PID PPID C STIME TTY TIME CMD root 2208 1 0 Mar13 ? 00:00:00 /usr/sbin/sshd root 1885 2208 0 21:50 ? 00:00:00 sshd: dunx [priv] Listing RPM information about openssh packages: Name : openssh Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:50:57 AM GMT Build Host: builder10.centos.org Group : Applications/Internet Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 745390 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH implementation of SSH protocol versions 1 and 2 ------------------------------------------------------ Name : openssh-clients Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:51:04 AM GMT Build Host: builder10.centos.org Group : Applications/Internet Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 871132 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH client applications ------------------------------------------------------ Name : openssh-server Relocations: (not relocatable) Version : 4.3p2 Vendor: CentOS Release : 72.el5_7.5 Build Date: Tue 30 Aug 2011 12:34:14 AM BST Install Date: Sun 06 Nov 2011 12:51:04 AM GMT Build Host: builder10.centos.org Group : System Environment/Daemons Source RPM: openssh-4.3p2-72.el5_7.5.src.rpm Size : 492478 License: BSD Signature : DSA/SHA1, Fri 02 Sep 2011 01:13:01 AM BST, Key ID a8a447dce8562897 URL : http://www.openssh.com/portable.html Summary : The OpenSSH server daemon ------------------------------------------------------ Again, when I look for /var/run/sshd.pid I don't find it. $ cat /var/run/sshd.pid cat: /var/run/sshd.pid: No such file or directory $ sudo netstat -anp | grep sshd tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 2208/sshd $ sudo kill 2208 $ sudo service sshd start Starting sshd: [ OK ] $ cat /var/run/sshd.pid 3794 $ sudo service sshd status openssh-daemon (pid 3794) is running... Is it possible that sshd is restarting and not creating a pidfile for some reason?

    Read the article

  • Flex - Typed ArrayCollection as Horizontallist's dataprovider

    - by BS_C3
    Hi community! I have an ArrayCollection of objects. I'm passing this array to a horizontallist as a dataprovider and I'm using a custom itemRenderer. When executing the application, the horizontallist is displaying [object CustomClass][object CustomClass][object CustomClass][object CustomClass] I've tried casting each object in the itemrenderer as following: <mx:Label text="{(data as CustomClass).label1}"/> But it's not working... Thanks for any help you can provide. Regards, BS_C3 Edit - 09 March 2010 Let's go for some more code =) <?xml version="1.0" encoding="utf-8"?> <mx:Canvas xmlns:mx="http://www.adobe.com/2006/mxml"> <mx:Component id="Item"> <mx:VBox width="180"> <mx:HBox width="100%"> <mx:Spacer width="100%"/> <mx:Button label="x"/> </mx:HBox> <mx:Image id="thumbnail"/> <mx:Label width="100%" horizontalCenter="0" text="Collection"/> <mx:HBox width="100%"> <mx:Label width="100" text="GIA"/> <mx:Label text="{data.charg_st}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Finger Size"/> <mx:Label text="xxxxxx"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Carat"/> <mx:Label text="{data.carats}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Color"/> <mx:Label text="{data.color}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Clarity"/> <mx:Label text="{data.clarity}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Shop"/> <mx:Label text="{data.lgort_fp}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Resizing"/> <mx:Label text="{data.resizing}"/> </mx:HBox> <mx:HBox width="100%"> <mx:Label width="100" text="Price Excl. VAT"/> <mx:Label text="{data.net_price_fp}"/> </mx:HBox> </mx:VBox> </mx:Component> <mx:HorizontalList dataProvider="{GlobalData.instance.tray}" columnCount="4" rowCount="1" horizontalScrollPolicy="off" itemRenderer="{Item}" /> </mx:Canvas> FYI, the horizonalList dataprovider is an ArrayCollection of objects. Now, the horizontallist is displaying empty items... with the correct width... The arraycollection is not empty (I'm using an alert on the click event on an item, and I do retrieve the expected data). Hope this will help _< Regards, BS_C3

    Read the article

  • Solution: Testing Web Services with MSTest on Team Build

    - by Martin Hinshelwood
    Guess what. About 20 minutes after I fixed the build, Allan broke it again! Update: 4th March 2010 – After having huge problems getting this working I read Billy Wang’s post which showed me the light. The problem here is that even though the test passes locally it will not during an Automated Build. When you send your tests to the build server it does not understand that you want to spin up the web site and run tests against that! When you run the test in Visual Studio it spins up the web site anyway, but would you expect your test to pass if you told the website not to spin up? Of course not. So, when you send the code to the build server you need to tell it what to spin up. First, the best way to get the parameters you need is to right click on the method you want to test and select “Create Unit Test”. This will detect wither you are running in IIS or ASP.NET Development Server or None, and create the relevant tags. Figure: Right clicking on “SaveDefaultProjectFile” will produce a context menu with “Create Unit tests…” on it. If you use this option it will AutoDetect most of the Attributes that are required. /// <summary> ///A test for SSW.SQLDeploy.SilverlightUI.Web.Services.IProfileService.SaveDefaultProjectFile ///</summary> // TODO: Ensure that the UrlToTest attribute specifies a URL to an ASP.NET page (for example, // http://.../Default.aspx). This is necessary for the unit test to be executed on the web server, // whether you are testing a page, web service, or a WCF service. [TestMethod()] [HostType("ASP.NET")] [AspNetDevelopmentServerHost("D:\\Workspaces\\SSW\\SSW\\SqlDeploy\\DEV\\Main\\SSW.SQLDeploy.SilverlightUI.Web", "/")] [UrlToTest("http://localhost:3100/")] [DeploymentItem("SSW.SQLDeploy.SilverlightUI.Web.dll")] public void SaveDefaultProjectFileTest() { IProfileService target = new ProfileService(); // TODO: Initialize to an appropriate value string strComputerName = string.Empty; // TODO: Initialize to an appropriate value bool expected = false; // TODO: Initialize to an appropriate value bool actual; actual = target.SaveDefaultProjectFile(strComputerName); Assert.AreEqual(expected, actual); Assert.Inconclusive("Verify the correctness of this test method."); } Figure: Auto created code that shows the attributes required to run correctly in IIS or in this case ASP.NET Development Server If you are a purist and don’t like creating unit tests like this then you just need to add the three attributes manually. HostType – This attribute specified what host to use. Its an extensibility point, so you could write your own. Or you could just use “ASP.NET”. UrlToTest – This specifies the start URL. For most tests it does not matter which page you call, as long as it is a valid page otherwise your test may not run on the server, but may pass anyway. AspNetDevelopmentServerHost – This is a nasty one, it is only used if you are using ASP.NET Development Host and is unnecessary if you are using IIS. This sets the host settings and the first value MUST be the physical path to the root of your web application. OK, so all that was rubbish and I could not get anything working using the MSDN documentation. Google provided very little help until I ran into Billy Wang’s post  and I heard that heavenly music that all developers hear when understanding dawns that what they have been doing up until now is just plain stupid. I am sure that the above will work when I am doing Web Unit Tests, but there is a much easier way when doing web services. You need to add the AspNetDevelopmentServer attribute to your code. This will tell MSTest to spin up an ASP.NET Development server to host the service. Specify the path to the web application you want to use. [AspNetDevelopmentServer("WebApp1", "D:\\Workspaces\\SSW\\SSW\\SqlDeploy\\DEV\\Main\\SSW.SQLDeploy.SilverlightUI.Web")] [DeploymentItem("SSW.SQLDeploy.SilverlightUI.Web.dll")] [TestMethod] public void ProfileService_Integration_SaveDefaultProjectFile_Returns_True() { ProfileServiceClient target = new ProfileServiceClient(); bool isTrue = target.SaveDefaultProjectFile("Mav"); Assert.AreEqual(true, isTrue); } Figure: This AspNetDevelopmentServer will make sure that the specified web application is launched. Now we can run the test and have it pass, but if the dynamically assigned ASP.NET Development server port changes what happens to the details in your app.config that was generated when creating a reference to the web service? Well, it would be wrong and the test would fail. This is where Billy’s helper method comes in. Once you have created an instance of your service call, and it has loaded the config, but before you make any calls to it you need to go in and dynamically set the Endpoint address to the same address as your dynamically hosted Web Application. using System; using System.Collections.Generic; using System.Linq; using System.Text; using Microsoft.VisualStudio.TestTools.UnitTesting; using System.Reflection; using System.ServiceModel.Description; using System.ServiceModel; namespace SSW.SQLDeploy.Test { class WcfWebServiceHelper { public static bool TryUrlRedirection(object client, TestContext context, string identifier) { bool result = true; try { PropertyInfo property = client.GetType().GetProperty("Endpoint"); string webServer = context.Properties[string.Format("AspNetDevelopmentServer.{0}", identifier)].ToString(); Uri webServerUri = new Uri(webServer); ServiceEndpoint endpoint = (ServiceEndpoint)property.GetValue(client, null); EndpointAddressBuilder builder = new EndpointAddressBuilder(endpoint.Address); builder.Uri = new Uri(endpoint.Address.Uri.OriginalString.Replace(endpoint.Address.Uri.Authority, webServerUri.Authority)); endpoint.Address = builder.ToEndpointAddress(); } catch (Exception e) { context.WriteLine(e.Message); result = false; } return result; } } } Figure: This fixes a problem with the URL in your web.config not being the same as the dynamically hosted ASP.NET Development server port. We can now add a call to this method after we created the Proxy object and change the Endpoint for the Service to the correct one. This process is wrapped in an assert as if it fails there is no point in continuing. [AspNetDevelopmentServer("WebApp1", D:\\Workspaces\\SSW\\SSW\\SqlDeploy\\DEV\\Main\\SSW.SQLDeploy.SilverlightUI.Web")] [DeploymentItem("SSW.SQLDeploy.SilverlightUI.Web.dll")] [TestMethod] public void ProfileService_Integration_SaveDefaultProjectFile_Returns_True() { ProfileServiceClient target = new ProfileServiceClient(); Assert.IsTrue(WcfWebServiceHelper.TryUrlRedirection(target, TestContext, "WebApp1")); bool isTrue = target.SaveDefaultProjectFile("Mav"); Assert.AreEqual(true, isTrue); } Figure: Editing the Endpoint from the app.config on the fly to match the dynamically hosted ASP.NET Development Server URL and port is now easy. As you can imagine AspNetDevelopmentServer poses some problems of you have multiple developers. What are the chances of everyone using the same location to store the source? What about if you are using a build server, how do you tell MSTest where to look for the files? To the rescue is a property called" “%PathToWebRoot%” which is always right on the build server. It will always point to your build drop folder for your solutions web sites. Which will be “\\tfs.ssw.com.au\BuildDrop\[BuildName]\Debug\_PrecompiledWeb\” or whatever your build drop location is. So lets change the code above to add this. [AspNetDevelopmentServer("WebApp1", "%PathToWebRoot%\\SSW.SQLDeploy.SilverlightUI.Web")] [DeploymentItem("SSW.SQLDeploy.SilverlightUI.Web.dll")] [TestMethod] public void ProfileService_Integration_SaveDefaultProjectFile_Returns_True() { ProfileServiceClient target = new ProfileServiceClient(); Assert.IsTrue(WcfWebServiceHelper.TryUrlRedirection(target, TestContext, "WebApp1")); bool isTrue = target.SaveDefaultProjectFile("Mav"); Assert.AreEqual(true, isTrue); } Figure: Adding %PathToWebRoot% to the AspNetDevelopmentServer path makes it work everywhere. Now we have another problem… this will ONLY run on the build server and will fail locally as %PathToWebRoot%’s default value is “C:\Users\[profile]\Documents\Visual Studio 2010\Projects”. Well this sucks… How do we get the test to run on any build server and any developer laptop. Open “Tools | Options | Test Tools | Test Execution” in Visual Studio and you will see a field called “Web application root directory”. This is where you override that default above. Figure: You can override the default website location for tests. In my case I would put in “D:\Workspaces\SSW\SSW\SqlDeploy\DEV\Main” and all the developers working with this branch would put in the folder that they have mapped. Can you see a problem? What is I create a “$/SSW/SqlDeploy/DEV/34567” branch from Main and I want to run tests in there. Well… I would have to change the value above. This is not ideal, but as you can put your projects anywhere on a computer, it has to be done. Conclusion Although this looks convoluted and complicated there are real problems being solved here that mean that you have a test ANYWHERE solution. Any build server, any Developer workstation. Resources: http://billwg.blogspot.com/2009/06/testing-wcf-web-services.html http://tough-to-find.blogspot.com/2008/04/testing-asmx-web-services-in-visual.html http://msdn.microsoft.com/en-us/library/ms243399(VS.100).aspx http://blogs.msdn.com/dscruggs/archive/2008/09/29/web-tests-unit-tests-the-asp-net-development-server-and-code-coverage.aspx http://www.5z5.com/News/?543f8bc8b36b174f Technorati Tags: VS2010,MSTest,Team Build 2010,Team Build,Visual Studio,Visual Studio 2010,Visual Studio ALM,Team Test,Team Test 2010

    Read the article

  • September 2011 Release of the Ajax Control Toolkit

    - by Stephen Walther
    I’m happy to announce the release of the September 2011 Ajax Control Toolkit. This release has several important new features including: Date ranges – When using the Calendar extender, you can specify a start and end date and a user can pick only those dates which fall within the specified range. This was the fourth top-voted feature request for the Ajax Control Toolkit at CodePlex. Twitter Control – You can use the new Twitter control to display recent tweets associated with a particular Twitter user or tweets which match a search query. Gravatar Control – You can use the new Gravatar control to display a unique image for each user of your website. Users can upload custom images to the Gravatar.com website or the Gravatar control can display a unique, auto-generated, image for a user. You can download this release this very minute by visiting CodePlex: http://AjaxControlToolkit.CodePlex.com Alternatively, you can execute the following command from the Visual Studio NuGet console: Improvements to the Ajax Control Toolkit Calendar Control The Ajax Control Toolkit Calendar extender control is one of the most heavily used controls from the Ajax Control Toolkit. The developers on the Superexpert team spent the last sprint focusing on improving this control. There are three important changes that we made to the Calendar control: we added support for date ranges, we added support for highlighting today’s date, and we made fixes to several bugs related to time zones and daylight savings. Using Calendar Date Ranges One of the top-voted feature requests for the Ajax Control Toolkit was a request to add support for date ranges to the Calendar control (this was the fourth most voted feature request at CodePlex). With the latest release of the Ajax Control Toolkit, the Calendar extender now supports date ranges. For example, the following page illustrates how you can create a popup calendar which allows a user only to pick dates between March 2, 2009 and May 16, 2009. <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="CalendarDateRange.aspx.cs" Inherits="WebApplication1.CalendarDateRange" %> <%@ Register TagPrefix="asp" Namespace="AjaxControlToolkit" Assembly="AjaxControlToolkit" %> <html> <head runat="server"> <title>Calendar Date Range</title> </head> <body> <form id="form1" runat="server"> <asp:ToolkitScriptManager ID="tsm" runat="server" /> <asp:TextBox ID="txtHotelReservationDate" runat="server" /> <asp:CalendarExtender ID="Calendar1" TargetControlID="txtHotelReservationDate" StartDate="3/2/2009" EndDate="5/16/2009" SelectedDate="3/2/2009" runat="server" /> </form> </body> </html> This page contains three controls: an Ajax Control Toolkit ToolkitScriptManager control, a standard ASP.NET TextBox control, and an Ajax Control Toolkit CalendarExtender control. Notice that the Calendar control includes StartDate and EndDate properties which restrict the range of valid dates. The Calendar control shows days, months, and years outside of the valid range as struck out. You cannot select days, months, or years which fall outside of the range. The following video illustrates interacting with the new date range feature: If you want to experiment with a live version of the Ajax Control Toolkit Calendar extender control then you can visit the Calendar Sample Page at the Ajax Control Toolkit Sample Site. Highlighted Today’s Date Another highly requested feature for the Calendar control was support for highlighting today’s date. The Calendar control now highlights the user’s current date regardless of the user’s time zone. Fixes to Time Zone and Daylight Savings Time Bugs We fixed several significant Calendar extender bugs related to time zones and daylight savings time. For example, previously, when you set the Calendar control’s SelectedDate property to the value 1/1/2007 then the selected data would appear as 12/31/2006 or 1/1/2007 or 1/2/2007 depending on the server time zone. For example, if your server time zone was set to Samoa (UTC-11:00), then setting SelectedDate=”1/1/2007” would result in “12/31/2006” being selected in the Calendar. Users of the Calendar extender control found this behavior confusing. After careful consideration, we decided to change the Calendar extender so that it interprets all dates as UTC dates. In other words, if you set StartDate=”1/1/2007” then the Calendar extender parses the date as 1/1/2007 UTC instead of parsing the date according to the server time zone. By interpreting all dates as UTC dates, we avoid all of the reported issues with the SelectedDate property showing the wrong date. Furthermore, when you set the StartDate and EndDate properties, you know that the same StartDate and EndDate will be selected regardless of the time zone associated with the server or associated with the browser. The date 1/1/2007 will always be the date 1/1/2007. The New Twitter Control This release of the Ajax Control Toolkit introduces a new twitter control. You can use the Twitter control to display recent tweets associated with a particular twitter user. You also can use this control to show the results of a twitter search. The following page illustrates how you can use the Twitter control to display recent tweets made by Scott Hanselman: <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="TwitterProfile.aspx.cs" Inherits="WebApplication1.TwitterProfile" %> <%@ Register TagPrefix="asp" Namespace="AjaxControlToolkit" Assembly="AjaxControlToolkit" %> <html > <head runat="server"> <title>Twitter Profile</title> </head> <body> <form id="form1" runat="server"> <asp:ToolkitScriptManager ID="tsm" runat="server" /> <asp:Twitter ID="Twitter1" ScreenName="shanselman" runat="server" /> </form> </body> </html> This page includes two Ajax Control Toolkit controls: the ToolkitScriptManager control and the Twitter control. The Twitter control is set to display tweets from Scott Hanselman (shanselman): You also can use the Twitter control to display the results of a search query. For example, the following page displays all recent tweets related to the Ajax Control Toolkit: Twitter limits the number of times that you can interact with their API in an hour. Twitter recommends that you cache results on the server (https://dev.twitter.com/docs/rate-limiting). By default, the Twitter control caches results on the server for a duration of 5 minutes. You can modify the cache duration by assigning a value (in seconds) to the Twitter control's CacheDuration property. The Twitter control wraps a standard ASP.NET ListView control. You can customize the appearance of the Twitter control by modifying its LayoutTemplate, StatusTemplate, AlternatingStatusTemplate, and EmptyDataTemplate. To learn more about the new Twitter control, visit the live Twitter Sample Page. The New Gravatar Control The September 2011 release of the Ajax Control Toolkit also includes a new Gravatar control. This control makes it easy to display a unique image for each user of your website. A Gravatar is associated with an email address. You can visit Gravatar.com and upload an image and associate the image with your email address. That way, every website which uses Gravatars (such as the www.ASP.NET website) will display your image next to your name. For example, I visited the Gravatar.com website and associated an image of a Koala Bear with the email address [email protected]. The following page illustrates how you can use the Gravatar control to display the Gravatar image associated with the [email protected] email address: <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="GravatarDemo.aspx.cs" Inherits="WebApplication1.GravatarDemo" %> <%@ Register TagPrefix="asp" Namespace="AjaxControlToolkit" Assembly="AjaxControlToolkit" %> <html xmlns="http://www.w3.org/1999/xhtml"> <head id="Head1" runat="server"> <title>Gravatar Demo</title> </head> <body> <form id="form1" runat="server"> <asp:ToolkitScriptManager ID="tsm" runat="server" /> <asp:Gravatar ID="Gravatar1" Email="[email protected]" runat="server" /> </form> </body> </html> The page above simply displays the Gravatar image associated with the [email protected] email address: If a user has not uploaded an image to Gravatar.com then you can auto-generate a unique image for the user from the user email address. The Gravatar control supports four types of auto-generated images: Identicon -- A different geometric pattern is generated for each unrecognized email. MonsterId -- A different image of a monster is generated for each unrecognized email. Wavatar -- A different image of a face is generated for each unrecognized email. Retro -- A different 8-bit arcade-style face is generated for each unrecognized email. For example, there is no Gravatar image associated with the email address [email protected]. The following page displays an auto-generated MonsterId for this email address: <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="GravatarMonster.aspx.cs" Inherits="WebApplication1.GravatarMonster" %> <%@ Register TagPrefix="asp" Namespace="AjaxControlToolkit" Assembly="AjaxControlToolkit" %> <html xmlns="http://www.w3.org/1999/xhtml"> <head id="Head1" runat="server"> <title>Gravatar Monster</title> </head> <body> <form id="form1" runat="server"> <asp:ToolkitScriptManager ID="tsm" runat="server" /> <asp:Gravatar ID="Gravatar1" Email="[email protected]" DefaultImageBehavior="MonsterId" runat="server" /> </form> </body> </html> The page above generates the following image automatically from the supplied email address: To learn more about the properties of the new Gravatar control, visit the live Gravatar Sample Page. ASP.NET Connections Talk on the Ajax Control Toolkit If you are interested in learning more about the changes that we are making to the Ajax Control Toolkit then please come to my talk on the Ajax Control Toolkit at the upcoming ASP.NET Connections conference. In the talk, I will present a summary of the changes that we have made to the Ajax Control Toolkit over the last several months and discuss our future plans. Do you have ideas for new Ajax Control Toolkit controls? Ideas for improving the toolkit? Come to my talk – I would love to hear from you. You can register for the ASP.NET Connections conference by visiting the following website: Register for ASP.NET Connections   Summary The previous release of the Ajax Control Toolkit – the July 2011 Release – has had over 100,000 downloads. That is a huge number of developers who are working with the Ajax Control Toolkit. We are really excited about the new features which we added to the Ajax Control Toolkit in the latest September sprint. We hope that you find the updated Calender control, the new Twitter control, and the new Gravatar control valuable when building your ASP.NET Web Forms applications.

    Read the article

  • When should I use Areas in TFS instead of Team Projects

    - by Martin Hinshelwood
    Well, it depends…. If you are a small company that creates a finite number of internal projects then you will find it easier to create a single project for each of your products and have TFS do the heavy lifting with reporting, SharePoint sites and Version Control. But what if you are not… Update 9th March 2010 Michael Fourie gave me some feedback which I have integrated. Ed Blankenship via @edblankenship offered encouragement and a nice quote. Ewald Hofman gave me a couple of Cons, and maybe a few more soon. Ewald’s company, Avanade, currently uses Areas, but it looks like the manual management is getting too much and the project is getting cluttered. What if you are likely to have hundreds of projects, possibly with a multitude of internal and external projects? You might have 1 project for a customer or 10. This is the situation that most consultancies find themselves in and thus they need a more sustainable and maintainable option. What I am advocating is that we should have 1 “Team Project” per customer, and use areas to create “sub projects” within that single “Team Project”. "What you describe is what we generally do internally and what we recommend. We make very heavy use of area path to categorize the work within a larger project." - Brian Harry, Microsoft Technical Fellow & Product Unit Manager for Team Foundation Server   "We tend to use areas to segregate multiple projects in the same team project and it works well." - Tiago Pascoal, Visual Studio ALM MVP   "In general, I believe this approach provides consistency [to multi-product engagements] and lowers the administration and maintenance costs. All good." - Michael Fourie, Visual Studio ALM MVP   “@MrHinsh BTW, I'm very much a fan of very large, if not huge, team projects in TFS. Just FYI :) Use Areas & Iterations.” Ed Blankenship, Visual Studio ALM MVP   This would mean that SSW would have a single Team Project called “SSW” that contains all of our internal projects and consequently all of the Areas and Iteration move down one hierarchy to accommodate this. Where we would have had “\SSW\Sprint 1” we now have “\SSW\SqlDeploy\Sprint1” with “SqlDeploy” being our internal project. At the moment SSW has over 70 internal projects and more than 170 total projects in TFS. This method has long term benefits that help to simplify the support model for companies that often have limited internal support time and many projects. But, there are implications as TFS does not provide this model “out-of-the-box”. These implications stretch across Areas, Iterations, Queries, Project Portal and Version Control. Michael made a good comment, he said: I agree with your approach, assuming that in a multi-product engagement with a client, they are happy to adopt the same process template across all products. If they are not, then it’ll either be easy to convince them or there is a valid reason for having a different template - Michael Fourie, Visual Studio ALM MVP   At SSW we have a standard template that we use and this is applied across the board, to all of our projects. We even apply any changes to the core process template to all of our existing projects as well. If you have multiple projects for the same clients on multiple templates and you want to keep it that way, then this approach will not work for you. However, if you want to standardise as we have at SSW then this approach may benefit you as well. Implications around Areas Areas should be used for topological classification/isolation of work items. You can think of this as architecture areas, organisational areas or even the main features of your application. In our scenario there is an additional top level item that represents the Project / Product that we want to chop our Team Project into. Figure: Creating a sub area to represent a product/project is easy. <teamproject> <teamproject>\<Functional Area/module whatever> Becomes: <teamproject> <teamproject>\<ProjectName>\ <teamproject>\<ProjectName>\<Functional Area/module whatever> Implications around Iterations Iterations should be used for chronological classification/isolation of work items. This could include isolated time boxes, milestones or release timelines and really depends on the logical flow of your project or projects. Due to the new level in Area we need to add the same level to Iteration. This is primarily because it is unlikely that the sprints in each of your projects/products will start and end at the same time. This is just a reality of managing multiple projects. Figure: Adding the same Area value to Iteration as the top level item adds flexibility to Iteration. <teamproject>\Sprint 1 Or <teamproject>\Release 1\Sprint 1 Becomes: <teamproject>\<ProjectName>\Sprint 1 Or <teamproject>\<ProjectName>\Release 1\Sprint 1 Implications around Queries Queries are used to filter your work items based on a specified level of granularity. There are a number of queries that are built into a project created using the MSF Agile 5.0 template, but we now have multiple projects and it would be a pain to have to edit all of the work items every time we changed project, and that would only allow one team to work on one project at a time.   Figure: The Queries that are created in a normal MSF Agile 5.0 project do not quite suit our new needs. In order for project contributors to be able to query based on their project we need a couple of things. The first thing I did was to create an “_Area Template” folder that has a copy of the project layout with all the queries setup to filter based on the “_Area Template” Area and the “_Sprint template” you can see in the Area and Iteration views. Figure: The template is currently easily drag and drop, but you then need to edit the queries to point at the right Area and Iteration. This needs a tool. I then created an “Areas” folder to hold all of the area specific queries. So, when you go to create a new TFS Sub-Project you just drag “_Area Template” while holding “Ctrl” and drop it onto “Areas”. There is a little setup here. That said I managed it in around 10 minutes which is not so bad, and I can imagine it being quite easy to build a tool to create these queries Figure: These new queries can be configured in around 10 minutes, which includes setting up the Area and Iteration as well. Version Control What about your source code? Well, that is the easiest of the lot. Just create a sub folder for each of your projects/products.   Figure: Creating sub folders in source control is easy as “Right click | Create new folder”. <teamproject>\DEV\Main\ Becomes: <teamproject>\<ProjectName>\DEV\Main\ Conclusion I think it is up to each company to make a call on how you want to configure your Team Projects and it depends completely on how many projects/products you are going to have for each customer including yourself. If we decide to utilise this route it will require some configuration to get our 170+ projects into this format, and I will probably be writing some tools to help. Pros You only have one project to upgrade when a process template changes – After going through an upgrade of over 170 project prior to the changes in the RC I can tell you that that many projects is no fun. Standardises your Process Template – You will always have the same Process implementation across projects/products without exception You get tighter control over the permissions – Yes, you can do this on a standard Team Project, but it gets a lot easier with practice. You can “move” work items from one “product” to another – Have we not always wanted to do that. You can rename your projects – Wahoo: everyone wants to do this, now you can. One set of Reporting Services reports to manage – You set an area and iteration to run reports anyway, so you may as well set both. Simplified Check-In Policies– There is only one set of check-in policies per client. This simplifies administration of policies. Simplified Alerts – As alerts are applied across multiple projects this simplifies your alert rules as per client. Cons All of these cons could be mitigated by a custom tool that helps automate creation of “Sub-projects” within Team Projects. This custom tool could create areas, Iteration, permissions, SharePoint and queries. It just does not exist yet :) You need to configure the Areas and Iterations You need to configure the permissions You may need to configure sub sites for SharePoint (depends on your requirement) – If you have two projects/products in the same Team Project then you will not see the burn down for each one out-of-the-box, but rather a cumulative for the Team Project. This is not really that much of a problem as you would have to configure your burndown graphs for your current iteration anyway. note: When you create a sub site to a TFS linked portal it will inherit the settings of its parent site :) This is fantastic as it means that you can easily create sub sites and then set the Area and Iteration path in each of the reports to be the correct one. Every team wants their own customization (via Ewald Hofman) - small teams of 2 persons against teams of 30 – or even outsourcing – need their own process, you cannot allow that because everybody gets the same work item types. note: Luckily at SSW this is not a problem as our template is standardised across all projects and customers. Large list of builds (via Ewald Hofman) – As the build list in Team Explorer is just a flat list it can get very cluttered. note: I would mitigate this by removing any build that has not been run in over 30 days. The build template and workflow will still be available in version control, but it will clean the list. Feedback Now that I have explained this method, what do you think? What other pros and cons can you see? What do you think of this approach? Will you be using it? What tools would you like to support you?   Technorati Tags: Visual Studio ALM,TFS Administration,TFS,Team Foundation Server,Project Planning,TFS Customisation

    Read the article

  • Facebook require_login() in iFrame App

    - by LapKom
    Hi, I have serious problem with iframe application. I need to use many external JS libraries and other dynamic stuuf so FMBL application can't be done. When I call require_login() I get applicaition installing dialog when app is not already installed, which is ok. But then after authorization application enters an endless redirect loop with parameters like auth_token, installed and so. Yesterday I managed to fix this, but today it's broken again... What the heck is happening with FB? It's driving me crazy to find a sollution, none of ones found on net doesn't seem to be working. So far I tried: http://abhirama.wordpress.com/2010/03/07/facebook-iframe-xfbml-app/ (7th march 2010!) http://forum.developers.facebook.com/viewtopic.php?pid=156092 http://www.keywordintellect.com/facebook-development/how-to-set-up-a-facebook-iframe-application-in-php-in-5-minutes/ http://www.markdeepwell.com/2010/02/validating-a-facebook-session-within-an-iframe/ http://forum.developers.facebook.com/viewtopic.php?pid=210449 http://www.ajaxlines.com/ajax/stuff/article/facebook_fbml_rendering_in_iframe_application.php http://www.aratide.com/php/solving-the-break-out-issue-in-iframe-facebook-applications/ None of the above worked... According to those and some FB docs: http://wiki.developers.facebook.com/index.php/FB_RequireFeatures http://wiki.developers.facebook.com/index.php/Cross_Domain_Communication_Channel My example test files look as follow: <?php //Link in library. require_once '../application/vendor/Facebook/facebook.php'; //Authentication Keys $appapikey = 'XXXX'; $appsecret = 'XXXX'; //Construct the class $facebook = new Facebook($appapikey, $appsecret); //Require login $user_id = $facebook->require_login(); ?> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:fb="http://www.facebook.com/2008/fbml"> <head> <title></title> </head> <body> <script src="http://static.ak.facebook.com/js/api_lib/v0.4/FeatureLoader.js.php" type="text/javascript"></script> This is you: <fb:name uid="<?php echo $user_id?>"></fb:name> <?php var_dump($facebook->$this->facebook->api_client->friends_get())?> <script type="text/javascript"> FB_RequireFeatures(["XFBML"], function(){ FB.Facebook.init("<?=$appapikey?>", "xd_receiver.html"); }); </script> </body> </html> And cross-domain file xd_receiver.html is: <!doctype html public "-//w3c//dtd xhtml 1.0 strict//en" "http://www.w3.org/tr/xhtml1/dtd/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" > <head> <title>cross-domain receiver page</title> </head> <body> <script src="http://static.ak.facebook.com/js/api_lib/v0.4/XdCommReceiver.js" type="text/javascript"></script> </body> </html> How do I get it working? I'm using Kohana framework to do this and already replaced header('Location') with url::redirect() in facebook php library.

    Read the article

  • CSG operations on implicit surfaces with marching cubes [SOLVED]

    - by Mads Elvheim
    I render isosurfaces with marching cubes, (or perhaps marching squares as this is 2D) and I want to do set operations like set difference, intersection and union. I thought this was easy to implement, by simply choosing between two vertex scalars from two different implicit surfaces, but it is not. For my initial testing, I tried with two spheres circles, and the set operation difference. i.e A - B. One circle is moving and the other one is stationary. Here's the approach I tried when picking vertex scalars and when classifying corner vertices as inside or outside. The code is written in C++. OpenGL is used for rendering, but that's not important. Normal rendering without any CSG operations does give the expected result. void march(const vec2& cmin, //min x and y for the grid cell const vec2& cmax, //max x and y for the grid cell std::vector<vec2>& tri, float iso, float (*cmp1)(const vec2&), //distance from stationary circle float (*cmp2)(const vec2&) //distance from moving circle ) { unsigned int squareindex = 0; float scalar[4]; vec2 verts[8]; /* initial setup of the grid cell */ verts[0] = vec2(cmax.x, cmax.y); verts[2] = vec2(cmin.x, cmax.y); verts[4] = vec2(cmin.x, cmin.y); verts[6] = vec2(cmax.x, cmin.y); float s1,s2; /********************************** ********For-loop of interest****** *******Set difference between **** *******two implicit surfaces****** **********************************/ for(int i=0,j=0; i<4; ++i, j+=2){ s1 = cmp1(verts[j]); s2 = cmp2(verts[j]); if((s1 < iso)){ //if inside circle1 if((s2 < iso)){ //if inside circle2 scalar[i] = s2; //then set the scalar to the moving circle } else { scalar[i] = s1; //only inside circle1 squareindex |= (1<<i); //mark as inside } } else { scalar[i] = s1; //inside neither circle } } if(squareindex == 0) return; /* Usual interpolation between edge points to compute the new intersection points */ verts[1] = mix(iso, verts[0], verts[2], scalar[0], scalar[1]); verts[3] = mix(iso, verts[2], verts[4], scalar[1], scalar[2]); verts[5] = mix(iso, verts[4], verts[6], scalar[2], scalar[3]); verts[7] = mix(iso, verts[6], verts[0], scalar[3], scalar[0]); for(int i=0; i<10; ++i){ //10 = maxmimum 3 triangles, + one end token int index = triTable[squareindex][i]; //look up our indices for triangulation if(index == -1) break; tri.push_back(verts[index]); } } This gives me weird jaggies: It looks like the CSG operation is done without interpolation. It just "discards" the whole triangle. Do I need to interpolate in some other way, or combine the vertex scalar values? I'd love some help with this. A full testcase can be downloaded HERE EDIT: Basically, my implementation of marching squares works fine. It is my scalar field which is broken, and I wonder what the correct way would look like. Preferably I'm looking for a general approach to implement the three set operations I discussed above, for the usual primitives (circle, rectangle/square, plane)

    Read the article

  • Dotfuscator Deep Dive with WP7

    - by Bil Simser
    I thought I would share some experience with code obfuscation (specifically the Dotfuscator product) and Windows Phone 7 apps. These days twitter is a buzz with black hat and white operations coming out about how the marketplace is insecure and Microsoft failed, blah, blah, blah. So it’s that much more important to protect your intellectual property. You should protect it no matter what when releasing apps into the wild but more so when someone is paying for them. You want to protect the time and effort that went into your code and have some comfort that the casual hacker isn’t going to usurp your next best thing. Enter code obfuscation. Code obfuscation is one tool that can help protect your IP. Basically it goes into your compiled assemblies, rewrites things at an IL level (like renaming methods and classes and hiding logic flow) and rewrites it back so that the assembly or executable is still fully functional but prying eyes using a tool like ILDASM or Reflector can’t see what’s going on.  You can read more about code obfuscation here on Wikipedia. A word to the wise. Code obfuscation isn’t 100% secure. More so on the WP7 platform where the OS expects certain things to be as they were meant to be. So don’t expect 100% obfuscation of every class and every method and every property. It’s just not going to happen. What this does do is give you some level of protection but don’t put all your eggs in one basket and call it done. Like I said, this is just one step in the process. There are a few tools out there that provide code obfuscation and support the Windows Phone 7 platform (see links to other tools at the end of this post). One such tool is Dotfuscator from PreEmptive solutions. The thing about Dotfuscator is that they’ve struck a deal with Microsoft to provide a *free* copy of their commercial product for Windows Phone 7. The only drawback is that it only runs until March 31, 2010. However it’s a good place to start and the focus of this article. Getting Started When you fire up Dotfuscator you’re presented with a dialog to start a new project or load a previous one. We’ll start with a new project. You’re then looking at a somewhat blank screen that shows an Input tab (among others) and you’re probably wondering what to do? Click on the folder icon (first one) and browse to where your xap file is. At this point you can save the project and click on the arrow to start the process. Bam! You’re done. Right? Think again. The program did indeed run and create a new version of your xap (doing it’s thing and rewriting back your *obfuscated* assemblies) but let’s take a look at the assembly in Reflector to see the end result. Remember a xap file is really just a glorified zip file (or cab file if you prefer). When you ran Dotfuscator for the first time with the default settings you’ll see it created a new version of your xap in a folder under “My Documents” called “Dotfuscated” (you can configure the output directory in settings). Here’s the new xap file. Since a xap is just a zip, rename it to .cab or .zip or something and open it with your favorite unarchive program (I use WinRar but it doesn’t matter as long as it can unzip files). If you already have the xap file associated with your unarchive tool the rename isn’t needed. Once renamed extract the contents of the xap to your hard drive: Now you’ll have a folder with the contents of the xap file extracted: Double click or load up your assembly (WindowsPhoneDataBoundApplication1.dll in the example) in Reflector and let’s see the results: Hmm. That doesn’t look right. I can see all the methods and the code is all there for my LoadData method I wanted to protect. Product failure. Let’s return it for a refund. Hold your horses. We need to check out the settings in the program first. Remember when we loaded up our xap file. It started us on the Input tab but there was a settings tab before that. Wonder what it does? Here’s the default settings: Renaming Taking a closer look, all of the settings in Feature are disabled. WTF? Yeah, it leaves me scratching my head why an obfuscator by default doesn’t obfuscate. However it’s a simple fix to change these settings. Let’s enable Renaming as it sounds like a good start. Renaming obscures code by renaming methods and fields to names that are not understandable. Great. Run the tool again and go through the process of unzipping the updated xap and let’s take a look in Reflector again at our project. This looks a lot better. Lots of methods named a, b, c, d, etc. That’ll help slow hackers down a bit. What about our logic that we spent days weeks on? Let’s take a look at the LoadData method: What gives? We have renaming enabled but all of our code is still there. If you look through all your methods you’ll find it’s still sitting there out in the open. Control Flow Back to the settings page again. Let’s enable Control Flow now. Control Flow obfuscation synthesizes branching, conditional, and iterative constructs (such as if, for, and while) that produce valid executable logic, but yield non-deterministic semantic results when decompilation is attempted. In other words, the code runs as before, but decompilers cannot reproduce the original code. Do the dance again and let’s see the results in Reflector. Ahh, that’s better. Methods renamed *and* nobody can look at our LoadData method now. Life is good. More than Minimum This is the bare minimum to obfuscate your xap to at least a somewhat comfortable level. However I did find that while this worked in my Hello World demo, it didn’t work on one of my real world apps. I had to do some extra tweaking with that. Below are the screens that I used on one app that worked. I’m not sure what it was about the app that the approach above didn’t work with (maybe the extra assembly?) but it works and I’m happy with it. YMMV. Remember to test your obfuscated app on your device first before submitting to ensure you haven’t obfuscated the obfuscator. settings tab: rename tab: string encryption tab: premark tab: A few final notes Play with the settings and keep bumping up the bar to try to get as much obfuscation as you can. The more the better but remember you can overdo it. Always (always, always, always) deploy your obfuscated xap to your device and test it before submitting to the marketplace. I didn’t and got rejected because I had gone overboard with the obfuscation so the app wouldn’t launch at all. Not everything is going to be obfuscated. Specifically I don’t see a way to obfuscate auto properties and a few other language features. Again, if you crank the settings up you might hide these but I haven’t spent a lot of time optimizing the process. Some people might say to obfuscate your xaml using string encryption but again, test, test, test. Xaml is picky so too much obfuscation (or any) might disable your app or produce odd rendering effets. Remember, obfuscation is not 100% secure! Don’t rely on it as a sole way of protecting your assets. Other Tools Dotfuscator is one just product and isn’t the end-all be-all to obfuscation so check out others below. For example, Crypto can make it so Reflector doesn’t even recognize the app as a .NET one and won’t open it. Others can encrypt resources and Xaml markup files. Here are some other obfuscators that support the Windows Phone 7 platform. Feel free to give them a try and let people know your experience with them! Dotfuscator Windows Phone Edition Crypto Obfuscator for .NET DeepSea Obfuscation

    Read the article

  • Optimizing Solaris 11 SHA-1 on Intel Processors

    - by danx
    SHA-1 is a "hash" or "digest" operation that produces a 160 bit (20 byte) checksum value on arbitrary data, such as a file. It is intended to uniquely identify text and to verify it hasn't been modified. Max Locktyukhin and others at Intel have improved the performance of the SHA-1 digest algorithm using multiple techniques. This code has been incorporated into Solaris 11 and is available in the Solaris Crypto Framework via the libmd(3LIB), the industry-standard libpkcs11(3LIB) library, and Solaris kernel module sha1. The optimized code is used automatically on systems with a x86 CPU supporting SSSE3 (Intel Supplemental SSSE3). Intel microprocessor architectures that support SSSE3 include Nehalem, Westmere, Sandy Bridge microprocessor families. Further optimizations are available for microprocessors that support AVX (such as Sandy Bridge). Although SHA-1 is considered obsolete because of weaknesses found in the SHA-1 algorithm—NIST recommends using at least SHA-256, SHA-1 is still widely used and will be with us for awhile more. Collisions (the same SHA-1 result for two different inputs) can be found with moderate effort. SHA-1 is used heavily though in SSL/TLS, for example. And SHA-1 is stronger than the older MD5 digest algorithm, another digest option defined in SSL/TLS. Optimizations Review SHA-1 operates by reading an arbitrary amount of data. The data is read in 512 bit (64 byte) blocks (the last block is padded in a specific way to ensure it's a full 64 bytes). Each 64 byte block has 80 "rounds" of calculations (consisting of a mixture of "ROTATE-LEFT", "AND", and "XOR") applied to the block. Each round produces a 32-bit intermediate result, called W[i]. Here's what each round operates: The first 16 rounds, rounds 0 to 15, read the 512 bit block 32 bits at-a-time. These 32 bits is used as input to the round. The remaining rounds, rounds 16 to 79, use the results from the previous rounds as input. Specifically for round i it XORs the results of rounds i-3, i-8, i-14, and i-16 and rotates the result left 1 bit. The remaining calculations for the round is a series of AND, XOR, and ROTATE-LEFT operators on the 32-bit input and some constants. The 32-bit result is saved as W[i] for round i. The 32-bit result of the final round, W[79], is the SHA-1 checksum. Optimization: Vectorization The first 16 rounds can be vectorized (computed in parallel) because they don't depend on the output of a previous round. As for the remaining rounds, because of step 2 above, computing round i depends on the results of round i-3, W[i-3], one can vectorize 3 rounds at-a-time. Max Locktyukhin found through simple factoring, explained in detail in his article referenced below, that the dependencies of round i on the results of rounds i-3, i-8, i-14, and i-16 can be replaced instead with dependencies on the results of rounds i-6, i-16, i-28, and i-32. That is, instead of initializing intermediate result W[i] with: W[i] = (W[i-3] XOR W[i-8] XOR W[i-14] XOR W[i-16]) ROTATE-LEFT 1 Initialize W[i] as follows: W[i] = (W[i-6] XOR W[i-16] XOR W[i-28] XOR W[i-32]) ROTATE-LEFT 2 That means that 6 rounds could be vectorized at once, with no additional calculations, instead of just 3! This optimization is independent of Intel or any other microprocessor architecture, although the microprocessor has to support vectorization to use it, and exploits one of the weaknesses of SHA-1. Optimization: SSSE3 Intel SSSE3 makes use of 16 %xmm registers, each 128 bits wide. The 4 32-bit inputs to a round, W[i-6], W[i-16], W[i-28], W[i-32], all fit in one %xmm register. The following code snippet, from Max Locktyukhin's article, converted to ATT assembly syntax, computes 4 rounds in parallel with just a dozen or so SSSE3 instructions: movdqa W_minus_04, W_TMP pxor W_minus_28, W // W equals W[i-32:i-29] before XOR // W = W[i-32:i-29] ^ W[i-28:i-25] palignr $8, W_minus_08, W_TMP // W_TMP = W[i-6:i-3], combined from // W[i-4:i-1] and W[i-8:i-5] vectors pxor W_minus_16, W // W = (W[i-32:i-29] ^ W[i-28:i-25]) ^ W[i-16:i-13] pxor W_TMP, W // W = (W[i-32:i-29] ^ W[i-28:i-25] ^ W[i-16:i-13]) ^ W[i-6:i-3]) movdqa W, W_TMP // 4 dwords in W are rotated left by 2 psrld $30, W // rotate left by 2 W = (W >> 30) | (W << 2) pslld $2, W_TMP por W, W_TMP movdqa W_TMP, W // four new W values W[i:i+3] are now calculated paddd (K_XMM), W_TMP // adding 4 current round's values of K movdqa W_TMP, (WK(i)) // storing for downstream GPR instructions to read A window of the 32 previous results, W[i-1] to W[i-32] is saved in memory on the stack. This is best illustrated with a chart. Without vectorization, computing the rounds is like this (each "R" represents 1 round of SHA-1 computation): RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR With vectorization, 4 rounds can be computed in parallel: RRRRRRRRRRRRRRRRRRRR RRRRRRRRRRRRRRRRRRRR RRRRRRRRRRRRRRRRRRRR RRRRRRRRRRRRRRRRRRRR Optimization: AVX The new "Sandy Bridge" microprocessor architecture, which supports AVX, allows another interesting optimization. SSSE3 instructions have two operands, a input and an output. AVX allows three operands, two inputs and an output. In many cases two SSSE3 instructions can be combined into one AVX instruction. The difference is best illustrated with an example. Consider these two instructions from the snippet above: pxor W_minus_16, W // W = (W[i-32:i-29] ^ W[i-28:i-25]) ^ W[i-16:i-13] pxor W_TMP, W // W = (W[i-32:i-29] ^ W[i-28:i-25] ^ W[i-16:i-13]) ^ W[i-6:i-3]) With AVX they can be combined in one instruction: vpxor W_minus_16, W, W_TMP // W = (W[i-32:i-29] ^ W[i-28:i-25] ^ W[i-16:i-13]) ^ W[i-6:i-3]) This optimization is also in Solaris, although Sandy Bridge-based systems aren't widely available yet. As an exercise for the reader, AVX also has 256-bit media registers, %ymm0 - %ymm15 (a superset of 128-bit %xmm0 - %xmm15). Can %ymm registers be used to parallelize the code even more? Optimization: Solaris-specific In addition to using the Intel code described above, I performed other minor optimizations to the Solaris SHA-1 code: Increased the digest(1) and mac(1) command's buffer size from 4K to 64K, as previously done for decrypt(1) and encrypt(1). This size is well suited for ZFS file systems, but helps for other file systems as well. Optimized encode functions, which byte swap the input and output data, to copy/byte-swap 4 or 8 bytes at-a-time instead of 1 byte-at-a-time. Enhanced the Solaris mdb(1) and kmdb(1) debuggers to display all 16 %xmm and %ymm registers (mdb "$x" command). Previously they only displayed the first 8 that are available in 32-bit mode. Can't optimize if you can't debug :-). Changed the SHA-1 code to allow processing in "chunks" greater than 2 Gigabytes (64-bits) Performance I measured performance on a Sun Ultra 27 (which has a Nehalem-class Xeon 5500 Intel W3570 microprocessor @3.2GHz). Turbo mode is disabled for consistent performance measurement. Graphs are better than words and numbers, so here they are: The first graph shows the Solaris digest(1) command before and after the optimizations discussed here, contained in libmd(3LIB). I ran the digest command on a half GByte file in swapfs (/tmp) and execution time decreased from 1.35 seconds to 0.98 seconds. The second graph shows the the results of an internal microbenchmark that uses the Solaris libpkcs11(3LIB) library. The operations are on a 128 byte buffer with 10,000 iterations. The results show operations increased from 320,000 to 416,000 operations per second. Finally the third graph shows the results of an internal kernel microbenchmark that uses the Solaris /kernel/crypto/amd64/sha1 module. The operations are on a 64Kbyte buffer with 100 iterations. third graph shows the results of an internal kernel microbenchmark that uses the Solaris /kernel/crypto/amd64/sha1 module. The operations are on a 64Kbyte buffer with 100 iterations. The results show for 1 kernel thread, operations increased from 410 to 600 MBytes/second. For 8 kernel threads, operations increase from 1540 to 1940 MBytes/second. Availability This code is in Solaris 11 FCS. It is available in the 64-bit libmd(3LIB) library for 64-bit programs and is in the Solaris kernel. You must be running hardware that supports Intel's SSSE3 instructions (for example, Intel Nehalem, Westmere, or Sandy Bridge microprocessor architectures). The easiest way to determine if SSSE3 is available is with the isainfo(1) command. For example, nehalem $ isainfo -v $ isainfo -v 64-bit amd64 applications sse4.2 sse4.1 ssse3 popcnt tscp ahf cx16 sse3 sse2 sse fxsr mmx cmov amd_sysc cx8 tsc fpu 32-bit i386 applications sse4.2 sse4.1 ssse3 popcnt tscp ahf cx16 sse3 sse2 sse fxsr mmx cmov sep cx8 tsc fpu If the output also shows "avx", the Solaris executes the even-more optimized 3-operand AVX instructions for SHA-1 mentioned above: sandybridge $ isainfo -v 64-bit amd64 applications avx xsave pclmulqdq aes sse4.2 sse4.1 ssse3 popcnt tscp ahf cx16 sse3 sse2 sse fxsr mmx cmov amd_sysc cx8 tsc fpu 32-bit i386 applications avx xsave pclmulqdq aes sse4.2 sse4.1 ssse3 popcnt tscp ahf cx16 sse3 sse2 sse fxsr mmx cmov sep cx8 tsc fpu No special configuration or setup is needed to take advantage of this code. Solaris libraries and kernel automatically determine if it's running on SSSE3 or AVX-capable machines and execute the correctly-tuned code for that microprocessor. Summary The Solaris 11 Crypto Framework, via the sha1 kernel module and libmd(3LIB) and libpkcs11(3LIB) libraries, incorporated a useful SHA-1 optimization from Intel for SSSE3-capable microprocessors. As with other Solaris optimizations, they come automatically "under the hood" with the current Solaris release. References "Improving the Performance of the Secure Hash Algorithm (SHA-1)" by Max Locktyukhin (Intel, March 2010). The source for these SHA-1 optimizations used in Solaris "SHA-1", Wikipedia Good overview of SHA-1 FIPS 180-1 SHA-1 standard (FIPS, 1995) NIST Comments on Cryptanalytic Attacks on SHA-1 (2005, revised 2006)

    Read the article

  • How did I get here? My route to Android, iPhone, Windows Phone 7, and interest in Mobile Devices

    - by Wallym
    I get asked all the time how/why I got interested in mobile and jumped on this fairly early.  I tend to give half answers because it wasn't just one thing that took me to mobile, but a whole host of separate ivents culminating in a specific event where I wasdoing market research in May/June 2008.  Let me throw out the events and the facts about me: I tend to like new, different, cool stuff.  I jumped on .NET early on.  I jumped on Ajax early on.  I don't jump on every new technology that comes down the road, I'm probably the only person on the planet that doesn't "get" MVC, though I acknowledge that a lot of people do and it solves a number of problems in the default settings of ASP.NET WebForms. I remember buying an early Windows CE device. It was interesting, but dang, this stylus thing sucks. After I lost my third stylus, i just gave up.  I got my first mobile phone in early 1999.  Reception was crappy, but I could see the value in being mobile. In 1999, I worked on a manufacturing systems project.  One piece of the projects was a set of handheld devices on the shop floor.  While the UI was a crappy DOS based, yes I said DOS as in Disk Operating System Version 6.22, I could see that the wireless world was a direction I wanted to be in. In 2000, Microsoft released the first public alpha of .NET.  Very cool stuff indeed.  One piece of the puzzle was a set of mobile controls for ASP.NET.  I build numerous test apps as well as mobile version using these mobile controls.  Now, the mobile UIs of the time were based on WML, which was crap. I could real all the analysis of mobile and read all about growth rates.  Now, you have to realize that growth rates can be impressive when dealing with small numbers, but I knew it was a comer. In our first book, I got talked out of mobile because of the line from the publisher "Wally, mobile doesn't sell." Blackberry was the dominant device of the mid 2000s.  Its users were referred to as "Crackberry addicts."  Unfortunately, the mobile development experience for native apps was crap and the web experience was fairly rough as well, but if they could get the ecosystem started, other phones and better blackberryies would come out.  I finally jumped into using a blackberry. Sometime around 2006, I heard "Wally, mobile doesn't sell" again.  Now, anyone that knows me knows that someone saying something like this to me means I'll keep trying it. The phones of the mid 2000s were moving to be more graphical, but there were too many that had this idea that they had to use a stylus.  Stylus suck.  They get lost too easily. I worked on a project in 2007 and 2008 for a startup trying to answer the question of "What is there to do where I am at?"  For some reason, they wanted to be tied to PCs.  As it became obvious that they were having problems, their investor asked us to do some market research and to figure out what the marketplace did want.  One of the important things that I figured out was the we lived in a mobile world and if you had a mobile app, it need to be on a mobile device, not tied to a desktop/laptop/netbook device.  If there was any single event, this was it - I was doing some market research and sat and talked to people in a bar/restaurant in Atlanta called "The Grove" on Lavista.  The consensus of the people that I talked to was that they wanted their data where ever they were at, laptop, pc, mobile, whereever. In 2007, Apple released the iPhone.  Wow, what an impressive device, even with all the problems of a 1st generation device.  I bought an iPod Touch 1st generation to understand touch better, one of the best decisions I ever made. I decided in late 2008, to make a move into cloud, for a number of reasons.  I was working on an example app.  In April, 2009, one of my friends at Microsoft said "don't mention my name with this, but you need an iPhone front end for this app."  How do you get on the iPhone.  Well, there are a number of ways including: ObjectiveC.  Its hard to teach an old dog new tricks, and this dog knows .NET, not ObjectiveC. HTML, web, javascript optimized interface.  yeah, this is possible. PhoneGap.  Now, this is interesting, take an html interface and get it to run on the iPhone, Android, Blackberry, and other platforms.  I thought that this way made the most sense for me until......... MonoTouch.  In May/June 2009, Novell announced a way for .NET/c# developers to write apps for the iPhone.  This is the way that made the most sense to me. Titanium by Appcelerator.  This is similar in concept to PhoneGap.  I haven't played with this much but do want to learn more about it. In July, 2009, I emailed one of my contacts at Wrox to see if they would be interested in a short MonoTouch ebook in their Wrox Blox format.  I fully expected another  response along the lines of "Wally, mobile doesn't sell."  The response I got was "Wally, iPhone is H O T, get started immediately, can you have this to me before Labor Day."  Not quite the response I expected.  Thankfully, we didn't make the Labor Day, first draft date. I kept pushing back because I had a feeling that things were not going to be quite as polished and feature rich as necessary.  After all, Novell doesn't have the resouces of Microsoft's developer division. The ebook shipped on November 30, 2009. On about December, 15, 2009, my editor emailed and said "Your ebook is selling really well, lets do a full book and it by March 1 so get started."  Thankfully, guys like Craig Dunn and Chris Hardy were interested along with Martin and Ror joinged us later on. I bought my wife an iPhone 3Gs in early 2010 to go along with all my iPod Touch devices. I tried to pretend in 2010 that I wasn't that interested in mobile and still had interest in the desktop technologies.  I love the technologies and continue to use them today, but that isn't where my interest is right now.  I'm just about all mobile all the time with my energies.  Our book shipped in the beginning of July, 2010 right in the middle of the Apple FUD.I've been looking at Mobile Web as a way around the AppStores and Apple FUD problems of 2010. With all the Apple self FUD, we became interested in Android.I went up to Dino Esposito at DevConnections in Las Vegas at introduced myself. I've always tried to keep up with what Dino has been doing. I was shocked, he wanted to meet me.  We must have talked for 1.5 hours. It was way more time than I deserved. If you get a chance, go and introduce yourself to Dino. He's a great guy. Microsoft released Windows Phone 7 in the Fall of 2010.  I'm not doing development on that platform at this time.  I think they have a very interesting user interface.  The devices are being positively reviewed.  For my purposes, the devices are limited at this point in time.  We'll see what 2011 brings as far as updates to the operating system.  I need multitasking/background processing and html5 in the browser. Add that as well as acceptance in the marketplace and I'll be more interested in the device. Obviosuly, I'm now working on a MonoDroid book . I own Android and iPhone/iOS devices.  I am currently working on some startup ideas and am exploring as much in that area as I can. For 2011, I'm planning on speaking at Android Developer's Conference (AnDevCon) and Mobile Connections.  I'm really excited about this. I have a couple of magazine articles coming out in 2011 on Android and iPhone development with the Mono technologies.is Mono "The Answer"? What's "The Question?" I think it will work for me.  It might work for you, it might not.  it depends on your situation.  Its the current horse that I am riding. I might find a better horse tomorrow. So, that's how I got here.  I'm in love with mobile.  Mobile native apps on the device as well as mobile web.  I'm into all this cool stuff.  Where are you at?

    Read the article

  • Guest Post: Using IronRuby and .NET to produce the &lsquo;Hello World of WPF&rsquo;

    - by Eric Nelson
    [You might want to also read other GuestPosts on my blog – or contribute one?] On the 26th and 27th of March (2010) myself and Edd Morgan of Microsoft will be popping along to the Scottish Ruby Conference. I dabble with Ruby and I am a huge fan whilst Edd is a “proper Ruby developer”. Hence I asked Edd if he was interested in creating a guest post or two for my blog on IronRuby. This is the second of those posts. If you should stumble across this post and happen to be attending the Scottish Ruby Conference, then please do keep a look out for myself and Edd. We would both love to chat about all things Ruby and IronRuby. And… we should have (if Amazon is kind) a few books on IronRuby with us at the conference which will need to find a good home. This is me and Edd and … the book: Order on Amazon: http://bit.ly/ironrubyunleashed Using IronRuby and .NET to produce the ‘Hello World of WPF’ In my previous post I introduced, to a minor extent, IronRuby. I expanded a little on the basics of by getting a Rails app up-and-running on this .NET implementation of the Ruby language — but there wasn't much to it! So now I would like to go from simply running a pre-existing project under IronRuby to developing a whole new application demonstrating the seamless interoperability between IronRuby and .NET. In particular, we'll be using WPF (Windows Presentation Foundation) — the component of the .NET Framework stack used to create rich media and graphical interfaces. Foundations of WPF To reiterate, WPF is the engine in the .NET Framework responsible for rendering rich user interfaces and other media. It's not the only collection of libraries in the framework with the power to do this — Windows Forms does the trick, too — but it is the most powerful and flexible. Put simply, WPF really excels when you need to employ eye candy. It's all about creating impact. Whether you're presenting a document, video, a data entry form, some kind of data visualisation (which I am most hopeful for, especially in terms of IronRuby - more on that later) or chaining all of the above with some flashy animations, you're likely to find that WPF gives you the most power when developing any of these for a Windows target. Let's demonstrate this with an example. I give you what I like to consider the 'hello, world' of WPF applications: the analogue clock. Today, over my lunch break, I created a WPF-based analogue clock using IronRuby... Any normal person would have just looked at their watch. - Twitter The Sample Application: Click here to see this sample in full on GitHub. Using Windows Presentation Foundation from IronRuby to create a Clock class Invoking the Clock class   Gives you The above is by no means perfect (it was a lunch break), but I think it does the job of illustrating IronRuby's interoperability with WPF using a familiar data visualisation. I'm sure you'll want to dissect the code yourself, but allow me to step through the important bits. (By the way, feel free to run this through ir first to see what actually happens). Now we're using IronRuby - unlike my previous post where we took pure Ruby code and ran it through ir, the IronRuby interpreter, to demonstrate compatibility. The main thing of note is the very distinct parallels between .NET namespaces and Ruby modules, .NET classes and Ruby classes. I guess there's not much to say about it other than at this point, you may as well be working with a purely Ruby graphics-drawing library. You're instantiating .NET objects, but you're doing it with the standard Ruby .new method you know from Ruby as Object#new — although, the root object of all your IronRuby objects isn't actually Object, it's System.Object. You're calling methods on these objects (and classes, for example in the call to System.Windows.Controls.Canvas.SetZIndex()) using the underscored, lowercase convention established for the Ruby language. The integration is so seamless. The fact that you're using a dynamic language on top of .NET's CLR is completely abstracted from you, allowing you to just build your software. A Brief Note on Events Events are a big part of developing client applications in .NET as well as under every other environment I can think of. In case you aren't aware, event-driven programming is essentially the practice of telling your code to call a particular method, or other chunk of code (a delegate) when something happens at an unpredictable time. You can never predict when a user is going to click a button, move their mouse or perform any other kind of input, so the advent of the GUI is what necessitated event-driven programming. This is where one of my favourite aspects of the Ruby language, blocks, can really help us. In traditional C#, for instance, you may subscribe to an event (assign a block of code to execute when an event occurs) in one of two ways: by passing a reference to a named method, or by providing an anonymous code block. You'd be right for seeing the parallel here with Ruby's concept of blocks, Procs and lambdas. As demonstrated at the very end of this rather basic script, we are using .NET's System.Timers.Timer to (attempt to) update the clock every second (I know it's probably not the best way of doing this, but for example's sake). Note: Diverting a little from what I said above, the ticking of a clock is very predictable, yet we still use the event our Timer throws to do this updating as one of many ways to perform that task outside of the main thread. You'll see that all that's needed to assign a block of code to be triggered on an event is to provide that block to the method of the name of the event as it is known to the CLR. This drawback to this is that it only allows the delegation of one code block to each event. You may use the add method to subscribe multiple handlers to that event - pushing that to the end of a queue. Like so: def tick puts "tick tock" end timer.elapsed.add method(:tick) timer.elapsed.add proc { puts "tick tock" } tick_handler = lambda { puts "tick tock" } timer.elapsed.add(tick_handler)   The ability to just provide a block of code as an event handler helps IronRuby towards that very important term I keep throwing around; low ceremony. Anonymous methods are, of course, available in other more conventional .NET languages such as C# and VB but, as usual, feel ever so much more elegant and natural in IronRuby. Note: Whether it's a named method or an anonymous chunk o' code, the block you delegate to the handling of an event can take arguments - commonly, a sender object and some args. Another Brief Note on Verbosity Personally, I don't mind verbose chaining of references in my code as long as it doesn't interfere with performance - as evidenced in the example above. While I love clean code, there's a certain feeling of safety that comes with the terse explicitness of long-winded addressing and the describing of objects as opposed to ambiguity (not unlike this sentence). However, when working with IronRuby, even I grow tired of typing System::Whatever::Something. Some people enjoy simply assuming namespaces and forgetting about them, regardless of the language they're using. Don't worry, IronRuby has you covered. It is completely possible to, with a call to include, bring the contents of a .NET-converted module into context of your IronRuby code - just as you would if you wanted to bring in an 'organic' Ruby module. To refactor the style of the above example, I could place the following at the top of my Clock class: class Clock include System::Windows::Shape include System::Windows::Media include System::Windows::Threading # and so on...   And by doing so, reduce calls to System::Windows::Shapes::Ellipse.new to simply Ellipse.new or references to System::Windows::Threading::DispatcherPriority.Render to a friendlier DispatcherPriority.Render. Conclusion I hope by now you can understand better how IronRuby interoperates with .NET and how you can harness the power of the .NET framework with the dynamic nature and elegant idioms of the Ruby language. The manner and parlance of Ruby that makes it a joy to work with sets of data is, of course, present in IronRuby — couple that with WPF's capability to produce great graphics quickly and easily, and I hope you can visualise the possibilities of data visualisation using these two things. Using IronRuby and WPF together to create visual representations of data and infographics is very exciting to me. Although today, with this project, we're only presenting one simple piece of information - the time - the potential is much grander. My day-to-day job is centred around software development and UI design, specifically in the realm of healthcare, and if you were to pay a visit to our office you would behold, directly above my desk, a large plasma TV with a constantly rotating, animated slideshow of charts and infographics to help members of our team do their jobs. It's an app powered by WPF which never fails to spark some conversation with visitors whose gaze has been hooked. If only it was written in IronRuby, the pleasantly low ceremony and reduced pre-processing time for my brain would have helped greatly. Edd Morgan blog Related Links: Getting PhP and Ruby working on Windows Azure and SQL Azure

    Read the article

  • Asp.net MVC/Silverlight and Sharepoint 2010 integration

    - by Robert Koritnik
    Just a sidenote: I'm not sure whether I should post this to serverfault as well, because some MOSS admin may have some info for me as well? Additional note 1: I've found this document (Asp.net MVC 2 & Sharepoint integration) if anybody with sufficient expirience is willing to comment on its content whether this can be used in my described scenario or not. Additional note 2: I've discovered (later) that Silverlight is supported in Sharepoint 2010 so I'm considering it as well. So if anyone would comment on silverlight integration as well. A bit of explanation first (without Asp.net MVC/Silverlight) Is it possible to integrate the two? Is it possible to write an application that would share at least credential information with MOSS? I have to write a MOSS application that has to do with these technologies: MOSS 2010 Personal client certificates authentication (most probably on USB keys) Active Directory Federation Services Separate SQL DB that would serve application specific data (separate as not being part of MOSS DB) How should it work? Users should authenticate using personal certificates into MOSS 2010 There would be a certain part of MOSS that would be related to my custom application This application should only authorize certain users via AD FS - I guess these users should have a certain security claim attached to them This application should manage users (that have access to this app) with additional (app specific) security claims related to this application (as additional application level authorization rights for individual application parts) This application should use custom SQL 2008 DB heavily with its own data This application should have the possibility to integrate with external systems as well (Exchange for instance to inject calendar entries, ERP systems etc) This application should be able to export its data (from its DB) to files. I don't know if it's possible, but it would be nice if the app could add these files to MOSS and attach authorization info to them so only users with sufficient rights would be able to view/open these files. Why Asp.net MVC/Silverlight then? I'm very well versed in Asp.net MVC (also with the latest version) and I haven't done anything on Sharepoint since version 2003 (which doesn't do me no good or prepare me for the latest version in any way shape or form). This project will most probably be a death march project so I would rather write my application as a UI rich Asp.net MVC application and somehow integrate it into MOSS. But not only via a link, because I would like to at least share credentials, so users wouldn't need to re-login when accessing my app. Using Asp.net MVC I would at least have the possibility to finish on time or be less death marching. Is this at all possible? I haven't done any serious project using SIlverlight, but I will sooner or later have to. So I'm also considering a jump into it at this moment, because it still might make this application development easier than strict Sharepoint 2010. Questions Is it possible to integrate Asp.net MVC/Silverlight into MOSS as described above? If integration is not possible, would it be possible to create a completely MOSS based application that would work as described? Which parts of MOSS 2010 should I use to accomplish what I need?

    Read the article

  • compiling openss7

    - by deddihp
    hello, i got an error while compiling openss7. Do you know what happen ? Thanks.... gcc -DHAVE_CONFIG_H -I. -I. -I. -DLFS=1 -imacros ./config.h -imacros ./include/sys/config.h -I. -I./include -I./include -nostdinc -iwithprefix include -DLINUX -D__KERNEL__ -I/usr/src/linux-headers-lbm-2.6.28-11-generic -I/lib/modules/2.6.28-11-generic/build/include -Iinclude2 -I/lib/modules/2.6.28-11-generic/build/include -I/lib/modules/2.6.28-11-generic/build/arch/x86/include -include /lib/modules/2.6.28-11-generic/build/include/linux/autoconf.h -Iubuntu/include -I/lib/modules/2.6.28-11-generic/build/ubuntu/include -I/lib/modules/2.6.28-11-generic/build/arch/x86/include/asm/mach-default '-DKBUILD_STR(s)=#s' '-DKBUILD_BASENAME=KBUILD_STR('`echo libLfS_specfs_a-specfs.o | sed -e 's,lib.*_a-,,;s,\.o,,;s,-,_,g'`')' -DMODULE -D__NO_VERSION__ -DEXPORT_SYMTAB -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -fno-strict-aliasing -fno-common -Werror-implicit-function-declaration -O2 -m32 -msoft-float -mregparm=3 -freg-struct-return -mpreferred-stack-boundary=2 -march=i586 -mtune=generic -Wa,-mtune=generic32 -pipe -Wno-sign-compare -fno-asynchronous-unwind-tables -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -fno-stack-protector -fno-omit-frame-pointer -fno-optimize-sibling-calls -Wdeclaration-after-statement -Wno-pointer-sign -fwrapv -ffreestanding -c -o libLfS_specfs_a-specfs.o `test -f 'src/kernel/specfs.c' || echo './'`src/kernel/specfs.c In file included from src/kernel/specfs.c:123: src/kernel/strspecfs.c: In function ‘specfs_init_cache’: src/kernel/strspecfs.c:1406: warning: passing argument 5 of ‘kmem_cache_create’ from incompatible pointer type src/kernel/strspecfs.c:1406: error: too many arguments to function ‘kmem_cache_create’ In file included from src/kernel/specfs.c:126: src/kernel/strlookup.c: In function ‘cdev_lookup’: src/kernel/strlookup.c:508: warning: format not a string literal and no format arguments src/kernel/strlookup.c:514: warning: format not a string literal and no format arguments src/kernel/strlookup.c:521: warning: format not a string literal and no format arguments src/kernel/strlookup.c: In function ‘cdrv_lookup’: src/kernel/strlookup.c:562: warning: format not a string literal and no format arguments src/kernel/strlookup.c: In function ‘fmod_lookup’: src/kernel/strlookup.c:604: warning: format not a string literal and no format arguments src/kernel/strlookup.c: In function ‘cdev_search’: src/kernel/strlookup.c:709: warning: format not a string literal and no format arguments src/kernel/strlookup.c:716: warning: format not a string literal and no format arguments src/kernel/strlookup.c: In function ‘fmod_search’: src/kernel/strlookup.c:768: warning: format not a string literal and no format arguments src/kernel/strlookup.c: In function ‘cmin_search’: src/kernel/strlookup.c:823: warning: format not a string literal and no format arguments src/kernel/strlookup.c:830: warning: format not a string literal and no format arguments src/kernel/strlookup.c:840: warning: format not a string literal and no format arguments src/kernel/strlookup.c:848: warning: format not a string literal and no format arguments In file included from src/kernel/specfs.c:129: src/kernel/strattach.c: In function ‘check_mnt’: src/kernel/strattach.c:131: error: ‘struct vfsmount’ has no member named ‘mnt_namespace’ src/kernel/strattach.c:131: error: ‘struct task_struct’ has no member named ‘namespace’ src/kernel/strattach.c: In function ‘do_fattach’: src/kernel/strattach.c:200: error: ‘struct nameidata’ has no member named ‘dentry’ src/kernel/strattach.c:200: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:200: error: ‘struct nameidata’ has no member named ‘dentry’ src/kernel/strattach.c:203: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:208: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:208: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:208: error: ‘struct nameidata’ has no member named ‘dentry’ src/kernel/strattach.c:226: error: implicit declaration of function ‘path_release’ src/kernel/strattach.c: In function ‘do_fdetach’: src/kernel/strattach.c:253: error: ‘struct nameidata’ has no member named ‘dentry’ src/kernel/strattach.c:253: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:255: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:257: error: ‘struct nameidata’ has no member named ‘dentry’ src/kernel/strattach.c:262: error: ‘struct nameidata’ has no member named ‘mnt’ src/kernel/strattach.c:265: error: ‘struct nameidata’ has no member named ‘mnt’ In file included from src/kernel/specfs.c:132: src/kernel/strpipe.c: In function ‘do_spipe’: src/kernel/strpipe.c:372: warning: assignment discards qualifiers from pointer target type make[4]: *** [libLfS_specfs_a-specfs.o] Error 1 make[4]: Leaving directory `/home/deddihp/dev/source/openss7-0.9.2.G/streams-0.9.2.4' make[3]: *** [all-recursive] Error 1 make[3]: Leaving directory `/home/deddihp/dev/source/openss7-0.9.2.G/streams-0.9.2.4' make[2]: *** [all] Error 2 make[2]: Leaving directory `/home/deddihp/dev/source/openss7-0.9.2.G/streams-0.9.2.4' make[1]: *** [all-recursive] Error 1 make[1]: Leaving directory `/home/deddihp/dev/source/openss7-0.9.2.G' make: *** [all] Error 2

    Read the article

  • CodePlex Daily Summary for Tuesday, December 21, 2010

    CodePlex Daily Summary for Tuesday, December 21, 2010Popular ReleasesSQL Monitor - tracking sql server activities: SQL Monitor 3.0 alpha 7: 1. added script save/load in user query window 2. fixed problem with connection dialog when choosing windows auth but still ask for user name 3. auto open user table when double click one table node 4. improved alert message, added log only methodOpen NFe: Open NFe 2.0 (Beta): Última versão antes da versão final a ser lançada nos próximos dias.EnhSim: EnhSim 2.2.6 ALPHA: 2.2.6 ALPHAThis release supports WoW patch 4.03a at level 85 To use this release, you must have the Microsoft Visual C++ 2010 Redistributable Package installed. This can be downloaded from http://www.microsoft.com/downloads/en/details.aspx?FamilyID=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84 To use the GUI you must have the .NET 4.0 Framework installed. This can be downloaded from http://www.microsoft.com/downloads/en/details.aspx?FamilyID=9cfb2d51-5ff4-4491-b0e5-b386f32c0992 - Fixing up some r...LINQ to Twitter: LINQ to Twitter Beta v2.0.18: Silverlight, OAuth, 100% Twitter API coverage, streaming, extensibility via Raw Queries, and added documentation. Bug fixes.ASP.NET MVC Project Awesome (jQuery Ajax helpers): 1.4.3: Helpers (controls) that you can use to build highly responsive and interactive Ajax-enabled Web applications. These helpers include Autocomplete, AjaxDropdown, Lookup, Confirm Dialog, Popup Form, Popup and Pager new stuff: Improvements for confirm, popup, popup form RenderView controller extension the user experience for crud in live demo has been substantially improved + added search all the features are shown in the live demoGanttPlanner: GanttPlanner V1.0: GanttPlanner V1.0 include GanttPlanner.dll and also a Demo application.N2 CMS: 2.1 release candidate 3: * Web platform installer support available N2 is a lightweight CMS framework for ASP.NET. It helps you build great web sites that anyone can update. Major Changes Support for auto-implemented properties ({get;set;}, based on contribution by And Poulsen) A bunch of bugs were fixed File manager improvements (multiple file upload, resize images to fit) New image gallery Infinite scroll paging on news Content templates First time with N2? Try the demo site Download one of the templ...TweetSharp: TweetSharp v2.0.0.0 - Preview 6: Documentation for this release may be found at http://tweetsharp.codeplex.com/wikipage?title=UserGuide&referringTitle=Documentation. Note: This code is currently preview quality. Preview 6 ChangesMaintenance release with user reported fixes Preview 5 ChangesMaintenance release with user reported fixes Preview 4 ChangesReintroduced fluent interface support via satellite assembly Added entities support, entity segmentation, and ITweetable/ITweeter interfaces for client development Numer...Team Foundation Server Administration Tool: 2.1: TFS Administration Tool 2.1, is the first version of the TFS Administration Tool which is built on top of the Team Foundation Server 2010 object model. TFS Administration Tool 2.1 can be installed on machines that are running either Team Explorer 2010, or Team Foundation Server 2010.Hacker Passwords: HackerPasswords.zip: Source code, executable and documentationSubtitleTools: SubtitleTools 1.3: - Added .srt FileAssociation & Win7 ShowRecentCategory feature. - Applied UnifiedYeKe to fix Persian search problems. - Reduced file size of Persian subtitles for uploading @OSDB.Facebook C# SDK: 4.1.0: - Lots of bug fixes - Removed Dynamic Runtime Language dependencies from non-dynamic platforms. - Samples included in release for ASP.NET, MVC, Silverlight, Windows Phone 7, WPF, WinForms, and one Visual Basic Sample - Changed internal serialization to use Json.net - BREAKING CHANGE: Canvas Session is no longer support. Use Signed Request instead. Canvas Session has been deprecated by Facebook. - BREAKING CHANGE: Some renames and changes with Authorizer, CanvasAuthorizer, and Authorization ac...NuGet (formerly NuPack): NuGet 1.0 build 11217.102: Note: this release is slightly newer than RC1, and fixes a couple issues relating to updating packages to newer versions. NuGet is a free, open source developer focused package management system for the .NET platform intent on simplifying the process of incorporating third party libraries into a .NET application during development. This release is a Visual Studio 2010 extension and contains the the Package Manager Console and the Add Package Dialog. This new build targets the newer feed (h...WCF Community Site: WCF Web APIs 10.12.17: Welcome to the second release of WCF Web APIs on codeplex Here is what is new in this release. WCF Support for jQuery - create WCF web services that are easy to consume from JavaScript clients, in particular jQuery. Better support for using JsonValue as dynamic Support for JsonValue change notification events for databinding and other purposes Support for going between JsonValue and CLR types WCF HTTP - create HTTP / REST based web services. This is a minor release which contains fixe...LiveChat Starter Kit: LCSK v1.0: This is a working version of the LCSK for Visual Studio 2010, ASP.NET MVC 3 (using Razor View Engine). this is still provider based (with 1 provider Sql) and this is still using WebService and Windows Forms operator console. The solution is cleaner, with an installer to create tables etc. You can also install it via nuget (Install-Package lcsk) Let me know your feedbackOrchard Project: Orchard 0.9: Orchard Release Notes Build: 0.9.253 Published: 12/16/2010 How to Install OrchardTo install the Orchard tech preview using Web PI, follow these instructions: http://www.orchardproject.net/docs/Installing-Orchard-Using-Web-PI.ashx Web PI will detect your hardware environment and install the application. --OR-- Alternatively, to install the release manually, download the Orchard.Web.0.9.253.zip file. The zip contents are pre-built and ready-to-run. Simply extract the contents of the Orch...DotSpatial: DotSpatial 12-15-2010: This release contains a few minor bug fixes and hopefully the GDAL libraries for the 3.5 x86 build actually built to the correct directory this time.DotNetNuke® Community Edition: 05.06.01 Beta: This is the initial Beta of DotNetNuke 5.6.1. See the DotNetNuke Roadmap a full list of changes in this release.MSBuild Extension Pack: December 2010: Release Blog Post The MSBuild Extension Pack December 2010 release provides a collection of over 380 MSBuild tasks. A high level summary of what the tasks currently cover includes the following: System Items: Active Directory, Certificates, COM+, Console, Date and Time, Drives, Environment Variables, Event Logs, Files and Folders, FTP, GAC, Network, Performance Counters, Registry, Services, Sound Code: Assemblies, AsyncExec, CAB Files, Code Signing, DynamicExecute, File Detokenisation, GU...Silverlight Contrib: Silverlight Contrib 2010.1.0: 2010.1.0 New FeaturesCompatibility Release for Silverlight 4 and Visual Studio 2010New Projectsaqq2: aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2aqq2Cameleon CMS: Caméléon, le CMS open-source le plus simple d'utilisation sur le marché.Dail Emulator: Dail Emulator is a software that emulates the server software of Aeonsofts game FlyFF. Specifically made for the now defunct version 6 of the game.Dmitry Lyalin's WP7 Sample Application: A project where im putting all my public samples in a single WP7 client test applicationElixir: DotNetNuke module test framework. Use this DLL to write concise, to-the-point tests for your modules that can target multiple DNN platforms and multiple browsers.FlightTableEditor: This edits the Flight and Healing Tables of Pokémon Fire Red and Pokémon Leaf Green.Genetico UNLaM: Algoritmo Genetico UNLaMHelpCentral HelpDesk Ticket Application: An open-ended framework and client for a helpdesk ticket application and bug tracker. Built to be as extensible and available as possible, the central system is a WCF webservice which can be accessed by any client application.Import Media for Umbraco: This package allows you to import multiple or large files that have been uploaded to the server via FTP or other means. A new media item is created in the media tree and the media is moved from the upload location to the new directory for the media item.LargeCMS full sample (How to call CryptMsg API in streaming mode): LargeCms implements SignedLargeCMS and EnvelopedLargeCMS classes, my own version of .NET SignedCms and EnvelopedCMS to work with large data. It shows a way to call CryptMsg API in streaming mode to overcome the limitations of current .NET classesLinq.Autocompiler: Adds runtime, on demand compilation of Linq queries (in Linq to SQL), which creates CompiledQuery instances and stores them in a cache. Mathetris: it's a game.MonoWiimote: Managed c# library for using Wii Remote device under Mono/Linux enviroment. It requires libcwiimote-0.4 (sudo apt-get install libcwiimote-0.4).Movie Captor: Concorrendo no Desafio .NETNSpotifyLib: A .NET library contains wrapper classes for the Spotify Metadata API. http://developer.spotify.com/en/metadata-api/overview/ This library supports searching for artists, albums and tracks by using the Spotify Metadata API. In the future I'll add support for Lookups as well.Silverhawk: It's developed in C# programming language.SimpleSpy: Detecting object handle on cursor move and get its information. Basically this is a bare bone program for detecting object handle at cursor and add your own logic to detect for further information. SunburnJitter: JitterSunburn contains helper classes and methods to use the jitter physics engine easier in your sunburn projects. Simple component based shapes will provide easy editing in editor. It's developed in C#.Sunrod: Sunrod is a .NET wrapper for the Obsidian Portal API.TouchStack - A MonoTouch ServiceStack.NET client: TouchStack makes it easier for MonoTouch developers to consume Web services created and exposed by the brilliant ServiceStack.NET webservice framework. TouchStack is written in C# and uses the MonoTouch library.VNTViewer: Viewer for notes/memos (VNT files)VpNet: Official .NET Wrapper for Virtual Paradise.xlGUI: Streamlet GUI Framework

    Read the article

  • Where'd My Data Go? (and/or...How Do I Get Rid of It?)

    - by David Paquette
    Want to get a better idea of how cascade deletes work in Entity Framework Code First scenarios? Want to see it in action? Stick with us as we quickly demystify what happens when you tell your data context to nuke a parent entity. This post is authored by Calgary .NET User Group Leader David Paquette with help from Microsoft MVP in Asp.Net James Chambers. We got to spend a great week back in March at Prairie Dev Con West, chalk full of sessions, presentations, workshops, conversations and, of course, questions.  One of the questions that came up during my session: "How does Entity Framework Code First deal with cascading deletes?". James and I had different thoughts on what the default was, if it was different from SQL server, if it was the same as EF proper and if there was a way to override whatever the default was.  So we built a set of examples and figured out that the answer is simple: it depends.  (Download Samples) Consider the example of a hockey league. You have several different entities in the league including games, teams that play the games and players that make up the teams. Each team also has a mascot.  If you delete a team, we need a couple of things to happen: The team, games and mascot will be deleted, and The players for that team will remain in the league (and therefore the database) but they should no longer be assigned to a team. So, let's make this start to come together with a look at the default behaviour in SQL when using an EDMX-driven project. The Reference – Understanding EF's Behaviour with an EDMX/DB First Approach First up let’s take a look at the DB first approach.  In the database, we defined 4 tables: Teams, Players, Mascots, and Games.  We also defined 4 foreign keys as follows: Players.Team_Id (NULL) –> Teams.Id Mascots.Id (NOT NULL) –> Teams.Id (ON DELETE CASCADE) Games.HomeTeam_Id (NOT NULL) –> Teams.Id Games.AwayTeam_Id (NOT NULL) –> Teams.Id Note that by specifying ON DELETE CASCADE for the Mascots –> Teams foreign key, the database will automatically delete the team’s mascot when the team is deleted.  While we want the same behaviour for the Games –> Teams foreign keys, it is not possible to accomplish this using ON DELETE CASCADE in SQL Server.  Specifying a ON DELETE CASCADE on these foreign keys would cause a circular reference error: The series of cascading referential actions triggered by a single DELETE or UPDATE must form a tree that contains no circular references. No table can appear more than one time in the list of all cascading referential actions that result from the DELETE or UPDATE – MSDN When we create an entity data model from the above database, we get the following:   In order to get the Games to be deleted when the Team is deleted, we need to specify End1 OnDelete action of Cascade for the HomeGames and AwayGames associations.   Now, we have an Entity Data Model that accomplishes what we set out to do.  One caveat here is that Entity Framework will only properly handle the cascading delete when the the players and games for the team have been loaded into memory.  For a more detailed look at Cascade Delete in EF Database First, take a look at this blog post by Alex James.   Building The Same Sample with EF Code First Next, we're going to build up the model with the code first approach.  EF Code First is defined on the Ado.Net team blog as such: Code First allows you to define your model using C# or VB.Net classes, optionally additional configuration can be performed using attributes on your classes and properties or by using a Fluent API. Your model can be used to generate a database schema or to map to an existing database. Entity Framework Code First follows some conventions to determine when to cascade delete on a relationship.  More details can be found on MSDN: If a foreign key on the dependent entity is not nullable, then Code First sets cascade delete on the relationship. If a foreign key on the dependent entity is nullable, Code First does not set cascade delete on the relationship, and when the principal is deleted the foreign key will be set to null. The multiplicity and cascade delete behavior detected by convention can be overridden by using the fluent API. For more information, see Configuring Relationships with Fluent API (Code First). Our DbContext consists of 4 DbSets: public DbSet<Team> Teams { get; set; } public DbSet<Player> Players { get; set; } public DbSet<Mascot> Mascots { get; set; } public DbSet<Game> Games { get; set; } When we set the Mascot –> Team relationship to required, Entity Framework will automatically delete the Mascot when the Team is deleted.  This can be done either using the [Required] data annotation attribute, or by overriding the OnModelCreating method of your DbContext and using the fluent API. Data Annotations: public class Mascot { public int Id { get; set; } public string Name { get; set; } [Required] public virtual Team Team { get; set; } } Fluent API: protected override void OnModelCreating(DbModelBuilder modelBuilder) { modelBuilder.Entity<Mascot>().HasRequired(m => m.Team); } The Player –> Team relationship is automatically handled by the Code First conventions. When a Team is deleted, the Team property for all the players on that team will be set to null.  No additional configuration is required, however all the Player entities must be loaded into memory for the cascading to work properly. The Game –> Team relationship causes some grief in our Code First example.  If we try setting the HomeTeam and AwayTeam relationships to required, Entity Framework will attempt to set On Cascade Delete for the HomeTeam and AwayTeam foreign keys when creating the database tables.  As we saw in the database first example, this causes a circular reference error and throws the following SqlException: Introducing FOREIGN KEY constraint 'FK_Games_Teams_AwayTeam_Id' on table 'Games' may cause cycles or multiple cascade paths. Specify ON DELETE NO ACTION or ON UPDATE NO ACTION, or modify other FOREIGN KEY constraints. Could not create constraint. To solve this problem, we need to disable the default cascade delete behaviour using the fluent API: protected override void OnModelCreating(DbModelBuilder modelBuilder) { modelBuilder.Entity<Mascot>().HasRequired(m => m.Team); modelBuilder.Entity<Team>() .HasMany(t => t.HomeGames) .WithRequired(g => g.HomeTeam) .WillCascadeOnDelete(false); modelBuilder.Entity<Team>() .HasMany(t => t.AwayGames) .WithRequired(g => g.AwayTeam) .WillCascadeOnDelete(false); base.OnModelCreating(modelBuilder); } Unfortunately, this means we need to manually manage the cascade delete behaviour.  When a Team is deleted, we need to manually delete all the home and away Games for that Team. foreach (Game awayGame in jets.AwayGames.ToArray()) { entities.Games.Remove(awayGame); } foreach (Game homeGame in homeGames) { entities.Games.Remove(homeGame); } entities.Teams.Remove(jets); entities.SaveChanges();   Overriding the Defaults – When and How To As you have seen, the default behaviour of Entity Framework Code First can be overridden using the fluent API.  This can be done by overriding the OnModelCreating method of your DbContext, or by creating separate model override files for each entity.  More information is available on MSDN.   Going Further These were simple examples but they helped us illustrate a couple of points. First of all, we were able to demonstrate the default behaviour of Entity Framework when dealing with cascading deletes, specifically how entity relationships affect the outcome. Secondly, we showed you how to modify the code and control the behaviour to get the outcome you're looking for. Finally, we showed you how easy it is to explore this kind of thing, and we're hoping that you get a chance to experiment even further. For example, did you know that: Entity Framework Code First also works seamlessly with SQL Azure (MSDN) Database creation defaults can be overridden using a variety of IDatabaseInitializers  (Understanding Database Initializers) You can use Code Based migrations to manage database upgrades as your model continues to evolve (MSDN) Next Steps There's no time like the present to start the learning, so here's what you need to do: Get up-to-date in Visual Studio 2010 (VS2010 | SP1) or Visual Studio 2012 (VS2012) Build yourself a project to try these concepts out (or download the sample project) Get into the community and ask questions! There are a ton of great resources out there and community members willing to help you out (like these two guys!). Good luck! About the Authors David Paquette works as a lead developer at P2 Energy Solutions in Calgary, Alberta where he builds commercial software products for the energy industry.  Outside of work, David enjoys outdoor camping, fishing, and skiing. David is also active in the software community giving presentations both locally and at conferences. David also serves as the President of Calgary .Net User Group. James Chambers crafts software awesomeness with an incredible team at LogiSense Corp, based in Cambridge, Ontario. A husband, father and humanitarian, he is currently residing in the province of Manitoba where he resists the urge to cheer for the Jets and maintains he allegiance to the Calgary Flames. When he's not active with the family, outdoors or volunteering, you can find James speaking at conferences and user groups across the country about web development and related technologies.

    Read the article

  • Taming Hopping Windows

    - by Roman Schindlauer
    At first glance, hopping windows seem fairly innocuous and obvious. They organize events into windows with a simple periodic definition: the windows have some duration d (e.g. a window covers 5 second time intervals), an interval or period p (e.g. a new window starts every 2 seconds) and an alignment a (e.g. one of those windows starts at 12:00 PM on March 15, 2012 UTC). var wins = xs     .HoppingWindow(TimeSpan.FromSeconds(5),                    TimeSpan.FromSeconds(2),                    new DateTime(2012, 3, 15, 12, 0, 0, DateTimeKind.Utc)); Logically, there is a window with start time a + np and end time a + np + d for every integer n. That’s a lot of windows. So why doesn’t the following query (always) blow up? var query = wins.Select(win => win.Count()); A few users have asked why StreamInsight doesn’t produce output for empty windows. Primarily it’s because there is an infinite number of empty windows! (Actually, StreamInsight uses DateTimeOffset.MaxValue to approximate “the end of time” and DateTimeOffset.MinValue to approximate “the beginning of time”, so the number of windows is lower in practice.) That was the good news. Now the bad news. Events also have duration. Consider the following simple input: var xs = this.Application                 .DefineEnumerable(() => new[]                     { EdgeEvent.CreateStart(DateTimeOffset.UtcNow, 0) })                 .ToStreamable(AdvanceTimeSettings.IncreasingStartTime); Because the event has no explicit end edge, it lasts until the end of time. So there are lots of non-empty windows if we apply a hopping window to that single event! For this reason, we need to be careful with hopping window queries in StreamInsight. Or we can switch to a custom implementation of hopping windows that doesn’t suffer from this shortcoming. The alternate window implementation produces output only when the input changes. We start by breaking up the timeline into non-overlapping intervals assigned to each window. In figure 1, six hopping windows (“Windows”) are assigned to six intervals (“Assignments”) in the timeline. Next we take input events (“Events”) and alter their lifetimes (“Altered Events”) so that they cover the intervals of the windows they intersect. In figure 1, you can see that the first event e1 intersects windows w1 and w2 so it is adjusted to cover assignments a1 and a2. Finally, we can use snapshot windows (“Snapshots”) to produce output for the hopping windows. Notice however that instead of having six windows generating output, we have only four. The first and second snapshots correspond to the first and second hopping windows. The remaining snapshots however cover two hopping windows each! While in this example we saved only two events, the savings can be more significant when the ratio of event duration to window duration is higher. Figure 1: Timeline The implementation of this strategy is straightforward. We need to set the start times of events to the start time of the interval assigned to the earliest window including the start time. Similarly, we need to modify the end times of events to the end time of the interval assigned to the latest window including the end time. The following snap-to-boundary function that rounds a timestamp value t down to the nearest value t' <= t such that t' is a + np for some integer n will be useful. For convenience, we will represent both DateTime and TimeSpan values using long ticks: static long SnapToBoundary(long t, long a, long p) {     return t - ((t - a) % p) - (t > a ? 0L : p); } How do we find the earliest window including the start time for an event? It’s the window following the last window that does not include the start time assuming that there are no gaps in the windows (i.e. duration < interval), and limitation of this solution. To find the end time of that antecedent window, we need to know the alignment of window ends: long e = a + (d % p); Using the window end alignment, we are finally ready to describe the start time selector: static long AdjustStartTime(long t, long e, long p) {     return SnapToBoundary(t, e, p) + p; } To find the latest window including the end time for an event, we look for the last window start time (non-inclusive): public static long AdjustEndTime(long t, long a, long d, long p) {     return SnapToBoundary(t - 1, a, p) + p + d; } Bringing it together, we can define the translation from events to ‘altered events’ as in Figure 1: public static IQStreamable<T> SnapToWindowIntervals<T>(IQStreamable<T> source, TimeSpan duration, TimeSpan interval, DateTime alignment) {     if (source == null) throw new ArgumentNullException("source");     // reason about DateTime and TimeSpan in ticks     long d = Math.Min(DateTime.MaxValue.Ticks, duration.Ticks);     long p = Math.Min(DateTime.MaxValue.Ticks, Math.Abs(interval.Ticks));     // set alignment to earliest possible window     var a = alignment.ToUniversalTime().Ticks % p;     // verify constraints of this solution     if (d <= 0L) { throw new ArgumentOutOfRangeException("duration"); }     if (p == 0L || p > d) { throw new ArgumentOutOfRangeException("interval"); }     // find the alignment of window ends     long e = a + (d % p);     return source.AlterEventLifetime(         evt => ToDateTime(AdjustStartTime(evt.StartTime.ToUniversalTime().Ticks, e, p)),         evt => ToDateTime(AdjustEndTime(evt.EndTime.ToUniversalTime().Ticks, a, d, p)) -             ToDateTime(AdjustStartTime(evt.StartTime.ToUniversalTime().Ticks, e, p))); } public static DateTime ToDateTime(long ticks) {     // just snap to min or max value rather than under/overflowing     return ticks < DateTime.MinValue.Ticks         ? new DateTime(DateTime.MinValue.Ticks, DateTimeKind.Utc)         : ticks > DateTime.MaxValue.Ticks         ? new DateTime(DateTime.MaxValue.Ticks, DateTimeKind.Utc)         : new DateTime(ticks, DateTimeKind.Utc); } Finally, we can describe our custom hopping window operator: public static IQWindowedStreamable<T> HoppingWindow2<T>(     IQStreamable<T> source,     TimeSpan duration,     TimeSpan interval,     DateTime alignment) {     if (source == null) { throw new ArgumentNullException("source"); }     return SnapToWindowIntervals(source, duration, interval, alignment).SnapshotWindow(); } By switching from HoppingWindow to HoppingWindow2 in the following example, the query returns quickly rather than gobbling resources and ultimately failing! public void Main() {     var start = new DateTimeOffset(new DateTime(2012, 6, 28), TimeSpan.Zero);     var duration = TimeSpan.FromSeconds(5);     var interval = TimeSpan.FromSeconds(2);     var alignment = new DateTime(2012, 3, 15, 12, 0, 0, DateTimeKind.Utc);     var events = this.Application.DefineEnumerable(() => new[]     {         EdgeEvent.CreateStart(start.AddSeconds(0), "e0"),         EdgeEvent.CreateStart(start.AddSeconds(1), "e1"),         EdgeEvent.CreateEnd(start.AddSeconds(1), start.AddSeconds(2), "e1"),         EdgeEvent.CreateStart(start.AddSeconds(3), "e2"),         EdgeEvent.CreateStart(start.AddSeconds(9), "e3"),         EdgeEvent.CreateEnd(start.AddSeconds(3), start.AddSeconds(10), "e2"),         EdgeEvent.CreateEnd(start.AddSeconds(9), start.AddSeconds(10), "e3"),     }).ToStreamable(AdvanceTimeSettings.IncreasingStartTime);     var adjustedEvents = SnapToWindowIntervals(events, duration, interval, alignment);     var query = from win in HoppingWindow2(events, duration, interval, alignment)                 select win.Count();     DisplayResults(adjustedEvents, "Adjusted Events");     DisplayResults(query, "Query"); } As you can see, instead of producing a massive number of windows for the open start edge e0, a single window is emitted from 12:00:15 AM until the end of time: Adjusted Events StartTime EndTime Payload 6/28/2012 12:00:01 AM 12/31/9999 11:59:59 PM e0 6/28/2012 12:00:03 AM 6/28/2012 12:00:07 AM e1 6/28/2012 12:00:05 AM 6/28/2012 12:00:15 AM e2 6/28/2012 12:00:11 AM 6/28/2012 12:00:15 AM e3 Query StartTime EndTime Payload 6/28/2012 12:00:01 AM 6/28/2012 12:00:03 AM 1 6/28/2012 12:00:03 AM 6/28/2012 12:00:05 AM 2 6/28/2012 12:00:05 AM 6/28/2012 12:00:07 AM 3 6/28/2012 12:00:07 AM 6/28/2012 12:00:11 AM 2 6/28/2012 12:00:11 AM 6/28/2012 12:00:15 AM 3 6/28/2012 12:00:15 AM 12/31/9999 11:59:59 PM 1 Regards, The StreamInsight Team

    Read the article

  • CSG operations on implicit surfaces with marching cubes

    - by Mads Elvheim
    I render isosurfaces with marching cubes, (or perhaps marching squares as this is 2D) and I want to do set operations like set difference, intersection and union. I thought this was easy to implement, by simply choosing between two vertex scalars from two different implicit surfaces, but it is not. For my initial testing, I tried with two spheres, and the set operation difference. i.e A - B. One sphere is moving and the other one is stationary. Here's the approach I tried when picking vertex scalars and when classifying corner vertices as inside or outside. The code is written in C++. OpenGL is used for rendering, but that's not important. Normal rendering without any CSG operations does give the expected result. void march(const vec2& cmin, //min x and y for the grid cell const vec2& cmax, //max x and y for the grid cell std::vector<vec2>& tri, float iso, float (*cmp1)(const vec2&), //distance from stationary sphere float (*cmp2)(const vec2&) //distance from moving sphere ) { unsigned int squareindex = 0; float scalar[4]; vec2 verts[8]; /* initial setup of the grid cell */ verts[0] = vec2(cmax.x, cmax.y); verts[2] = vec2(cmin.x, cmax.y); verts[4] = vec2(cmin.x, cmin.y); verts[6] = vec2(cmax.x, cmin.y); float s1,s2; /********************************** ********For-loop of interest****** *******Set difference between **** *******two implicit surfaces****** **********************************/ for(int i=0,j=0; i<4; ++i, j+=2){ s1 = cmp1(verts[j]); s2 = cmp2(verts[j]); if((s1 < iso)){ //if inside sphere1 if((s2 < iso)){ //if inside sphere2 scalar[i] = s2; //then set the scalar to the moving sphere } else { scalar[i] = s1; //only inside sphere1 squareindex |= (1<<i); //mark as inside } } else { scalar[i] = s1; //inside neither sphere } } if(squareindex == 0) return; /* Usual interpolation between edge points to compute the new intersection points */ verts[1] = mix(iso, verts[0], verts[2], scalar[0], scalar[1]); verts[3] = mix(iso, verts[2], verts[4], scalar[1], scalar[2]); verts[5] = mix(iso, verts[4], verts[6], scalar[2], scalar[3]); verts[7] = mix(iso, verts[6], verts[0], scalar[3], scalar[0]); for(int i=0; i<10; ++i){ //10 = maxmimum 3 triangles, + one end token int index = triTable[squareindex][i]; //look up our indices for triangulation if(index == -1) break; tri.push_back(verts[index]); } } This gives me weird jaggies: It looks like the CSG operation is done without interpolation. It just "discards" the whole triangle. Do I need to interpolate in some other way, or combine the vertex scalar values? I'd love some help with this. A full testcase can be downloaded HERE

    Read the article

  • Does this inheritance design belong in the database?

    - by Berryl
    === CLARIFICATION ==== The 'answers' older than March are not answers to the question in this post! Hello In my domain I need to track allocations of time spent on Activities by resources. There are two general types of Activities of interest - ones base on a Project and ones based on an Account. The notion of Project and Account have other features totally unrelated to both each other and capturing allocations of time, and each is modeled as a table in the database. For a given Allocation of time however, it makes sense to not care whether the allocation was made to either a Project or an Account, so an ActivityBase class abstracts away the difference. An ActivityBase is either a ProjectActivity or an AccountingActivity (object model is below). Back to the database though, there is no direct value in having tables for ProjectActivity and AccountingActivity. BUT the Allocation table needs to store something in the column for it's ActivityBase. Should that something be the Id of the Project / Account or a reference to tables for ProjectActivity / Accounting? How would the mapping look? === Current Db Mapping (Fluent) ==== Below is how the mapping currently looks: public class ActivityBaseMap : IAutoMappingOverride<ActivityBase> { public void Override(AutoMapping<ActivityBase> mapping) { //mapping.IgnoreProperty(x => x.BusinessId); //mapping.IgnoreProperty(x => x.Description); //mapping.IgnoreProperty(x => x.TotalTime); mapping.IgnoreProperty(x => x.UniqueId); } } public class AccountingActivityMap : SubclassMap<AccountingActivity> { public void Override(AutoMapping<AccountingActivity> mapping) { mapping.References(x => x.Account); } } public class ProjectActivityMap : SubclassMap<ProjectActivity> { public void Override(AutoMapping<ProjectActivity> mapping) { mapping.References(x => x.Project); } } There are two odd smells here. Firstly, the inheritance chain adds nothing in the way of properties - it simply adapts Projects and Accounts into a common interface so that either can be used in an Allocation. Secondly, the properties in the ActivityBase interface are redundant to keep in the db, since that information is available in Projects and Accounts. Cheers, Berryl ==== Domain ===== public class Allocation : Entity { ... public virtual ActivityBase Activity { get; private set; } ... } public abstract class ActivityBase : Entity { public virtual string BusinessId { get; protected set; } public virtual string Description { get; protected set; } public virtual ICollection<Allocation> Allocations { get { return _allocations.Values; } } public virtual TimeQuantity TotalTime { get { return TimeQuantity.Hours(Allocations.Sum(x => x.TimeSpent.Amount)); } } } public class ProjectActivity : ActivityBase { public virtual Project Project { get; private set; } public ProjectActivity(Project project) { BusinessId = project.Code.ToString(); Description = project.Description; Project = project; } }

    Read the article

  • The Stub Proto: Not Just For Stub Objects Anymore

    - by user9154181
    One of the great pleasures of programming is to invent something for a narrow purpose, and then to realize that it is a general solution to a broader problem. In hindsight, these things seem perfectly natural and obvious. The stub proto area used to build the core Solaris consolidation has turned out to be one of those things. As discussed in an earlier article, the stub proto area was invented as part of the effort to use stub objects to build the core ON consolidation. Its purpose was merely as a place to hold stub objects. However, we keep finding other uses for it. It turns out that the stub proto should be more properly thought of as an auxiliary place to put things that we would like to put into the proto to help us build the product, but which we do not wish to package or deliver to the end user. Stub objects are one example, but private lint libraries, header files, archives, and relocatable objects, are all examples of things that might profitably go into the stub proto. Without a stub proto, these items were handled in a variety of ad hoc ways: If one part of the workspace needed private header files, libraries, or other such items, it might modify its Makefile to reach up and over to the place in the workspace where those things live and use them from there. There are several problems with this: Each component invents its own approach, meaning that programmers maintaining the system have to invest extra effort to understand what things mean. In the past, this has created makefile ghettos in which only the person who wrote the makefiles feels confident to modify them, while everyone else ignores them. This causes many difficulties and benefits no one. These interdependencies are not obvious to the make, utility, and can lead to races. They are not obvious to the human reader, who may therefore not realize that they exist, and break them. Our policy in ON is not to deliver files into the proto unless those files are intended to be packaged and delivered to the end user. However, sometimes non-shipping files were copied into the proto anyway, causing a different set of problems: It requires a long list of exceptions to silence our normal unused proto item error checking. In the past, we have accidentally shipped files that we did not intend to deliver to the end user. Mixing cruft with valuable items makes it hard to discern which is which. The stub proto area offers a convenient and robust solution. Files needed to build the workspace that are not delivered to the end user can instead be installed into the stub proto. No special exceptions or custom make rules are needed, and the intent is always clear. We are already accessing some private lint libraries and compilation symlinks in this manner. Ultimately, I'd like to see all of the files in the proto that have a packaging exception delivered to the stub proto instead, and for the elimination of all existing special case makefile rules. This would include shared objects, header files, and lint libraries. I don't expect this to happen overnight — it will be a long term case by case project, but the overall trend is clear. The Stub Proto, -z assert_deflib, And The End Of Accidental System Object Linking We recently used the stub proto to solve an annoying build issue that goes back to the earliest days of Solaris: How to ensure that we're linking to the OS bits we're building instead of to those from the running system. The Solaris product is made up of objects and files from a number of different consolidations, each of which is built separately from the others from an independent code base called a gate. The core Solaris OS consolidation is ON, which stands for "Operating System and Networking". You will frequently also see ON called the OSnet. There are consolidations for X11 graphics, the desktop environment, open source utilities, compilers and development tools, and many others. The collection of consolidations that make up Solaris is known as the "Wad Of Stuff", usually referred to simply as the WOS. None of these consolidations is self contained. Even the core ON consolidation has some dependencies on libraries that come from other consolidations. The build server used to build the OSnet must be running a relatively recent version of Solaris, which means that its objects will be very similar to the new ones being built. However, it is necessarily true that the build system objects will always be a little behind, and that incompatible differences may exist. The objects built by the OSnet link to other objects. Some of these dependencies come from the OSnet, while others come from other consolidations. The objects from other consolidations are provided by the standard library directories on the build system (/lib, /usr/lib). The objects from the OSnet itself are supposed to come from the proto areas in the workspace, and not from the build server. In order to achieve this, we make use of the -L command line option to the link-editor. The link-editor finds dependencies by looking in the directories specified by the caller using the -L command line option. If the desired dependency is not found in one of these locations, ld will then fall back to looking at the default locations (/lib, /usr/lib). In order to use OSnet objects from the workspace instead of the system, while still accessing non-OSnet objects from the system, our Makefiles set -L link-editor options that point at the workspace proto areas. In general, this works well and dependencies are found in the right places. However, there have always been failures: Building objects in the wrong order might mean that an OSnet dependency hasn't been built before an object that needs it. If so, the dependency will not be seen in the proto, and the link-editor will silently fall back to the one on the build server. Errors in the makefiles can wipe out the -L options that our top level makefiles establish to cause ld to look at the workspace proto first. In this case, all objects will be found on the build server. These failures were rarely if ever caught. As I mentioned earlier, the objects on the build server are generally quite close to the objects built in the workspace. If they offer compatible linking interfaces, then the objects that link to them will behave properly, and no issue will ever be seen. However, if they do not offer compatible linking interfaces, the failure modes can be puzzling and hard to pin down. Either way, there won't be a compile-time warning or error. The advent of the stub proto eliminated the first type of failure. With stub objects, there is no dependency ordering, and the necessary stub object dependency will always be in place for any OSnet object that needs it. However, makefile errors do still occur, and so, the second form of error was still possible. While working on the stub object project, we realized that the stub proto was also the key to solving the second form of failure caused by makefile errors: Due to the way we set the -L options to point at our workspace proto areas, any valid object from the OSnet should be found via a path specified by -L, and not from the default locations (/lib, /usr/lib). Any OSnet object found via the default locations means that we've linked to the build server, which is an error we'd like to catch. Non-OSnet objects don't exist in the proto areas, and so are found via the default paths. However, if we were to create a symlink in the stub proto pointing at each non-OSnet dependency that we require, then the non-OSnet objects would also be found via the paths specified by -L, and not from the link-editor defaults. Given the above, we should not find any dependency objects from the link-editor defaults. Any dependency found via the link-editor defaults means that we have a Makefile error, and that we are linking to the build server inappropriately. All we need to make use of this fact is a linker option to produce a warning when it happens. Although warnings are nice, we in the OSnet have a zero tolerance policy for build noise. The -z fatal-warnings option that was recently introduced with -z guidance can be used to turn the warnings into fatal build errors, forcing the programmer to fix them. This was too easy to resist. I integrated 7021198 ld option to warn when link accesses a library via default path PSARC/2011/068 ld -z assert-deflib option into snv_161 (February 2011), shortly after the stub proto was introduced into ON. This putback introduced the -z assert-deflib option to the link-editor: -z assert-deflib=[libname] Enables warning messages for libraries specified with the -l command line option that are found by examining the default search paths provided by the link-editor. If a libname value is provided, the default library warning feature is enabled, and the specified library is added to a list of libraries for which no warnings will be issued. Multiple -z assert-deflib options can be specified in order to specify multiple libraries for which warnings should not be issued. The libname value should be the name of the library file, as found by the link-editor, without any path components. For example, the following enables default library warnings, and excludes the standard C library. ld ... -z assert-deflib=libc.so ... -z assert-deflib is a specialized option, primarily of interest in build environments where multiple objects with the same name exist and tight control over the library used is required. If is not intended for general use. Note that the definition of -z assert-deflib allows for exceptions to be specified as arguments to the option. In general, the idea of using a symlink from the stub proto is superior because it does not clutter up the link command with a long list of objects. When building the OSnet, we usually use the plain from of -z deflib, and make symlinks for the non-OSnet dependencies. The exception to this are dependencies supplied by the compiler itself, which are usually found at whatever arbitrary location the compiler happens to be installed at. To handle these special cases, the command line version works better. Following the integration of the link-editor change, I made use of -z assert-deflib in OSnet builds with 7021896 Prevent OSnet from accidentally linking to build system which integrated into snv_162 (March 2011). Turning on -z assert-deflib exposed between 10 and 20 existing errors in our Makefiles, which were all fixed in the same putback. The errors we found in our Makefiles underscore how difficult they can be prevent without an automatic system in place to catch them. Conclusions The stub proto is proving to be a generally useful construct for ON builds that goes beyond serving as a place to hold stub objects. Although invented to hold stub objects, it has already allowed us to simplify a number of previously difficult situations in our makefiles and builds. I expect that we'll find uses for it beyond those described here as we go forward.

    Read the article

  • Paging, sorting and filtering in a stored procedure (SQL Server)

    - by Fruitbat
    I was looking at different ways of writing a stored procedure to return a "page" of data. This was for use with the asp ObjectDataSource, but it could be considered a more general problem. The requirement is to return a subset of the data based on the usual paging paremeters, startPageIndex and maximumRows, but also a sortBy parameter to allow the data to be sorted. Also there are some parameters passed in to filter the data on various conditions. One common way to do this seems to be something like this: [Method 1] ;WITH stuff AS ( SELECT CASE WHEN @SortBy = 'Name' THEN ROW_NUMBER() OVER (ORDER BY Name) WHEN @SortBy = 'Name DESC' THEN ROW_NUMBER() OVER (ORDER BY Name DESC) WHEN @SortBy = ... ELSE ROW_NUMBER() OVER (ORDER BY whatever) END AS Row, ., ., ., FROM Table1 INNER JOIN Table2 ... LEFT JOIN Table3 ... WHERE ... (lots of things to check) ) SELECT * FROM stuff WHERE (Row > @startRowIndex) AND (Row <= @startRowIndex + @maximumRows OR @maximumRows <= 0) ORDER BY Row One problem with this is that it doesn't give the total count and generally we need another stored procedure for that. This second stored procedure has to replicate the parameter list and the complex WHERE clause. Not nice. One solution is to append an extra column to the final select list, (SELECT COUNT(*) FROM stuff) AS TotalRows. This gives us the total but repeats it for every row in the result set, which is not ideal. [Method 2] An interesting alternative is given here (http://www.4guysfromrolla.com/articles/032206-1.aspx) using dynamic SQL. He reckons that the performance is better because the CASE statement in the first solution drags things down. Fair enough, and this solution makes it easy to get the totalRows and slap it into an output parameter. But I hate coding dynamic SQL. All that 'bit of SQL ' + STR(@parm1) +' bit more SQL' gubbins. [Method 3] The only way I can find to get what I want, without repeating code which would have to be synchronised, and keeping things reasonably readable is to go back to the "old way" of using a table variable: DECLARE @stuff TABLE (Row INT, ...) INSERT INTO @stuff SELECT CASE WHEN @SortBy = 'Name' THEN ROW_NUMBER() OVER (ORDER BY Name) WHEN @SortBy = 'Name DESC' THEN ROW_NUMBER() OVER (ORDER BY Name DESC) WHEN @SortBy = ... ELSE ROW_NUMBER() OVER (ORDER BY whatever) END AS Row, ., ., ., FROM Table1 INNER JOIN Table2 ... LEFT JOIN Table3 ... WHERE ... (lots of things to check) SELECT * FROM stuff WHERE (Row > @startRowIndex) AND (Row <= @startRowIndex + @maximumRows OR @maximumRows <= 0) ORDER BY Row (Or a similar method using an IDENTITY column on the table variable). Here I can just add a SELECT COUNT on the table variable to get the totalRows and put it into an output parameter. I did some tests and with a fairly simple version of the query (no sortBy and no filter), method 1 seems to come up on top (almost twice as quick as the other 2). Then I decided to test probably I needed the complexity and I needed the SQL to be in stored procedures. With this I get method 1 taking nearly twice as long as the other 2 methods. Which seems strange. Is there any good reason why I shouldn't spurn CTEs and stick with method 3? UPDATE - 15 March 2012 I tried adapting Method 1 to dump the page from the CTE into a temporary table so that I could extract the TotalRows and then select just the relevant columns for the resultset. This seemed to add significantly to the time (more than I expected). I should add that I'm running this on a laptop with SQL Server Express 2008 (all that I have available) but still the comparison should be valid. I looked again at the dynamic SQL method. It turns out I wasn't really doing it properly (just concatenating strings together). I set it up as in the documentation for sp_executesql (with a parameter description string and parameter list) and it's much more readable. Also this method runs fastest in my environment. Why that should be still baffles me, but I guess the answer is hinted at in Hogan's comment.

    Read the article

  • Intrinsics program (SSE) - g++ - help needed

    - by Sriram
    Hi all, This is the first time I am posting a question on stackoverflow, so please try and overlook any errors I may have made in formatting my question/code. But please do point the same out to me so I may be more careful. I was trying to write some simple intrinsics routines for the addition of two 128-bit (containing 4 float variables) numbers. I found some code on the net and was trying to get it to run on my system. The code is as follows: //this is a sample Intrinsics program to add two vectors. #include <iostream> #include <iomanip> #include <xmmintrin.h> #include <stdio.h> using namespace std; struct vector4 { float x, y, z, w; }; //functions to operate on them. vector4 set_vector(float x, float y, float z, float w = 0) { vector4 temp; temp.x = x; temp.y = y; temp.z = z; temp.w = w; return temp; } void print_vector(const vector4& v) { cout << " This is the contents of vector: " << endl; cout << " > vector.x = " << v.x << endl; cout << " vector.y = " << v.y << endl; cout << " vector.z = " << v.z << endl; cout << " vector.w = " << v.w << endl; } vector4 sse_vector4_add(const vector4&a, const vector4& b) { vector4 result; asm volatile ( "movl $a, %eax" //move operands into registers. "\n\tmovl $b, %ebx" "\n\tmovups (%eax), xmm0" //move register contents into SSE registers. "\n\tmovups (%ebx), xmm1" "\n\taddps xmm0, xmm1" //add the elements. addps operates on single-precision vectors. "\n\t movups xmm0, result" //move result into vector4 type data. ); return result; } int main() { vector4 a, b, result; a = set_vector(1.1, 2.1, 3.2, 4.5); b = set_vector(2.2, 4.2, 5.6); result = sse_vector4_add(a, b); print_vector(a); print_vector(b); print_vector(result); return 0; } The g++ parameters I use are: g++ -Wall -pedantic -g -march=i386 -msse intrinsics_SSE_example.C -o h The errors I get are as follows: intrinsics_SSE_example.C: Assembler messages: intrinsics_SSE_example.C:45: Error: too many memory references for movups intrinsics_SSE_example.C:46: Error: too many memory references for movups intrinsics_SSE_example.C:47: Error: too many memory references for addps intrinsics_SSE_example.C:48: Error: too many memory references for movups I have spent a lot of time on trying to debug these errors, googled them and so on. I am a complete noob to Intrinsics and so may have overlooked some important things. Any help is appreciated, Thanks, Sriram.

    Read the article

  • TypeError: Error #2007: Parameter child must be non-null.

    - by Bobby Francis Joseph
    I am running the following piece of code: package { import fl.controls.Button; import fl.controls.Label; import fl.controls.RadioButton; import fl.controls.RadioButtonGroup; import flash.display.Sprite; import flash.events.MouseEvent; import flash.text.TextFieldAutoSize; public class RadioButtonExample extends Sprite { private var j:uint; private var padding:uint = 10; private var currHeight:uint = 0; private var verticalSpacing:uint = 30; private var rbg:RadioButtonGroup; private var questionLabel:Label; private var answerLabel:Label; private var question:String = "What day is known internationally as Speak Like A Pirate Day?"; private var answers:Array = [ "August 12", "March 4", "September 19", "June 22" ]; public function RadioButtonExample() { setupQuiz(); } private function setupQuiz():void { setupQuestionLabel(); setupRadioButtons(); setupButton(); setupAnswerLabel(); } private function setupQuestionLabel():void { questionLabel = new Label(); questionLabel.text = question; questionLabel.autoSize = TextFieldAutoSize.LEFT; questionLabel.move(padding, padding + currHeight); currHeight += verticalSpacing; addChild(questionLabel); } private function setupAnswerLabel():void { answerLabel = new Label(); answerLabel.text = ""; answerLabel.autoSize = TextFieldAutoSize.LEFT; answerLabel.move(padding + 120, padding + currHeight); addChild(answerLabel); } private function setupRadioButtons():void { rbg = new RadioButtonGroup("question1"); createRadioButton(answers[0], rbg); createRadioButton(answers[1], rbg); createRadioButton(answers[2], rbg); createRadioButton(answers[3], rbg); } private function setupButton():void { var b:Button = new Button(); b.move(padding, padding + currHeight); b.label = "Check Answer"; b.addEventListener(MouseEvent.CLICK, checkAnswer); addChild(b); } private function createRadioButton(rbLabel:String, rbg:RadioButtonGroup):void { var rb:RadioButton = new RadioButton(); rb.group = rbg; rb.label = rbLabel; rb.move(padding, padding + currHeight); addChild(rb); currHeight += verticalSpacing; } private function checkAnswer(e:MouseEvent):void { if (rbg.selection == null) { return; } var resultStr:String = (rbg.selection.label == answers[2]) ? "Correct" : "Incorrect"; answerLabel.text = resultStr; } } } This is from Adobe Livedocs. http://www.adobe.com/livedocs/flash/9.0/ActionScriptLangRefV3/ I have added a Radiobutton to stage and then deleted it so that its there in the library. However I am getting the following error TypeError: Error #2007: Parameter child must be non-null. at flash.display::DisplayObjectContainer/addChildAt() at fl.controls::BaseButton/fl.controls:BaseButton::drawBackground() at fl.controls::LabelButton/fl.controls:LabelButton::draw() at fl.controls::Button/fl.controls:Button::draw() at fl.core::UIComponent/::callLaterDispatcher() Can anyone tell me what is going on and how to remove this error.

    Read the article

< Previous Page | 55 56 57 58 59 60 61  | Next Page >