Search Results

Search found 10199 results on 408 pages for 'fullscreen video'.

Page 371/408 | < Previous Page | 367 368 369 370 371 372 373 374 375 376 377 378  | Next Page >

  • Compare images after canny edge detection in OpenCV (C++)

    - by typoknig
    Hi all, I am working on an OpenCV project and I need to compare some images after canny has been applied to both of them. Before the canny was applied I had the gray scale images populating a histogram and then I compared the histograms, but when canny is added to the images the histogram does not populate. I have read that a canny image can populate a histogram, but have not found a way to make it happen. I do not necessairly need to keep using the histograms, I just want to know the best way to compare two canny images. SSCCE below for you to chew on. I have poached and patched about 75% of this code from books and various sites on the internet, so props to those guys... // SLC (Histogram).cpp : Defines the entry point for the console application. #include "stdafx.h" #include <cxcore.h> #include <cv.h> #include <cvaux.h> #include <highgui.h> #include <stdio.h> #include <sstream> #include <iostream> using namespace std; IplImage* image1= 0; IplImage* imgHistogram1 = 0; IplImage* gray1= 0; CvHistogram* hist1; int main(){ CvCapture* capture = cvCaptureFromCAM(0); if(!cvQueryFrame(capture)){ cout<<"Video capture failed, please check the camera."<<endl; } else{ cout<<"Video camera capture successful!"<<endl; }; CvSize sz = cvGetSize(cvQueryFrame(capture)); IplImage* image = cvCreateImage(sz, 8, 3); IplImage* imgHistogram = 0; IplImage* gray = 0; CvHistogram* hist; cvNamedWindow("Image Source",1); cvNamedWindow("gray", 1); cvNamedWindow("Histogram",1); cvNamedWindow("BG", 1); cvNamedWindow("FG", 1); cvNamedWindow("Canny",1); cvNamedWindow("Canny1", 1); image1 = cvLoadImage("image bin/use this image.jpg");// an image has to load here or the program will not run //size of the histogram -1D histogram int bins1 = 256; int hsize1[] = {bins1}; //max and min value of the histogram float max_value1 = 0, min_value1 = 0; //value and normalized value float value1; int normalized1; //ranges - grayscale 0 to 256 float xranges1[] = { 0, 256 }; float* ranges1[] = { xranges1 }; //create an 8 bit single channel image to hold a //grayscale version of the original picture gray1 = cvCreateImage( cvGetSize(image1), 8, 1 ); cvCvtColor( image1, gray1, CV_BGR2GRAY ); IplImage* canny1 = cvCreateImage(cvGetSize(gray1), 8, 1 ); cvCanny( gray1, canny1, 55, 175, 3 ); //Create 3 windows to show the results cvNamedWindow("original1",1); cvNamedWindow("gray1",1); cvNamedWindow("histogram1",1); //planes to obtain the histogram, in this case just one IplImage* planes1[] = { canny1 }; //get the histogram and some info about it hist1 = cvCreateHist( 1, hsize1, CV_HIST_ARRAY, ranges1,1); cvCalcHist( planes1, hist1, 0, NULL); cvGetMinMaxHistValue( hist1, &min_value1, &max_value1); printf("min: %f, max: %f\n", min_value1, max_value1); //create an 8 bits single channel image to hold the histogram //paint it white imgHistogram1 = cvCreateImage(cvSize(bins1, 50),8,1); cvRectangle(imgHistogram1, cvPoint(0,0), cvPoint(256,50), CV_RGB(255,255,255),-1); //draw the histogram :P for(int i=0; i < bins1; i++){ value1 = cvQueryHistValue_1D( hist1, i); normalized1 = cvRound(value1*50/max_value1); cvLine(imgHistogram1,cvPoint(i,50), cvPoint(i,50-normalized1), CV_RGB(0,0,0)); } //show the image results cvShowImage( "original1", image1 ); cvShowImage( "gray1", gray1 ); cvShowImage( "histogram1", imgHistogram1 ); cvShowImage( "Canny1", canny1); CvBGStatModel* bg_model = cvCreateFGDStatModel( image ); for(;;){ image = cvQueryFrame(capture); cvUpdateBGStatModel( image, bg_model ); //Size of the histogram -1D histogram int bins = 256; int hsize[] = {bins}; //Max and min value of the histogram float max_value = 0, min_value = 0; //Value and normalized value float value; int normalized; //Ranges - grayscale 0 to 256 float xranges[] = {0, 256}; float* ranges[] = {xranges}; //Create an 8 bit single channel image to hold a grayscale version of the original picture gray = cvCreateImage(cvGetSize(image), 8, 1); cvCvtColor(image, gray, CV_BGR2GRAY); IplImage* canny = cvCreateImage(cvGetSize(gray), 8, 1 ); cvCanny( gray, canny, 55, 175, 3 );//55, 175, 3 with direct light //Planes to obtain the histogram, in this case just one IplImage* planes[] = {canny}; //Get the histogram and some info about it hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges,1); cvCalcHist(planes, hist, 0, NULL); cvGetMinMaxHistValue(hist, &min_value, &max_value); //printf("Minimum Histogram Value: %f, Maximum Histogram Value: %f\n", min_value, max_value); //Create an 8 bits single channel image to hold the histogram and paint it white imgHistogram = cvCreateImage(cvSize(bins, 50),8,3); cvRectangle(imgHistogram, cvPoint(0,0), cvPoint(256,50), CV_RGB(255,255,255),-1); //Draw the histogram for(int i=0; i < bins; i++){ value = cvQueryHistValue_1D(hist, i); normalized = cvRound(value*50/max_value); cvLine(imgHistogram,cvPoint(i,50), cvPoint(i,50-normalized), CV_RGB(0,0,0)); } double correlation = cvCompareHist (hist1, hist, CV_COMP_CORREL); double chisquare = cvCompareHist (hist1, hist, CV_COMP_CHISQR); double intersection = cvCompareHist (hist1, hist, CV_COMP_INTERSECT); double bhattacharyya = cvCompareHist (hist1, hist, CV_COMP_BHATTACHARYYA); double difference = (1 - correlation) + chisquare + (1 - intersection) + bhattacharyya; printf("correlation: %f\n", correlation); printf("chi-square: %f\n", chisquare); printf("intersection: %f\n", intersection); printf("bhattacharyya: %f\n", bhattacharyya); printf("difference: %f\n", difference); cvShowImage("Image Source", image); cvShowImage("gray", gray); cvShowImage("Histogram", imgHistogram); cvShowImage( "Canny", canny); cvShowImage("BG", bg_model->background); cvShowImage("FG", bg_model->foreground); //Page 19 paragraph 3 of "Learning OpenCV" tells us why we DO NOT use "cvReleaseImage(&image)" in this section cvReleaseImage(&imgHistogram); cvReleaseImage(&gray); cvReleaseHist(&hist); cvReleaseImage(&canny); char c = cvWaitKey(10); //if ASCII key 27 (esc) is pressed then loop breaks if(c==27) break; } cvReleaseBGStatModel( &bg_model ); cvReleaseImage(&image); cvReleaseCapture(&capture); cvDestroyAllWindows(); }

    Read the article

  • actionscript3: reflect-class applied on rotationY

    - by algro
    Hi, I'm using a class which applies a visual reflection-effect to defined movieclips. I use a reflection-class from here: link to source. It works like a charm except when I apply a rotation to the movieclip. In my case the reflection is still visible but only a part of it. What am I doing wrong? How could I pass/include the rotation to the Reflection-Class ? Thanks in advance! This is how you apply the Reflection Class to your movieclip: var ref_mc:MovieClip = new MoviClip(); addChild(ref_mc); var r1:Reflect = new Reflect({mc:ref_mc, alpha:50, ratio:50,distance:0, updateTime:0,reflectionDropoff:1}); Now I apply a rotation to my movieclip: ref_mc.rotationY = 30; And Here the Reflect-Class: package com.pixelfumes.reflect{ import flash.display.MovieClip; import flash.display.DisplayObject; import flash.display.BitmapData; import flash.display.Bitmap; import flash.geom.Matrix; import flash.display.GradientType; import flash.display.SpreadMethod; import flash.utils.setInterval; import flash.utils.clearInterval; public class Reflect extends MovieClip{ //Created By Ben Pritchard of Pixelfumes 2007 //Thanks to Mim, Jasper, Jason Merrill and all the others who //have contributed to the improvement of this class //static var for the version of this class private static var VERSION:String = "4.0"; //reference to the movie clip we are reflecting private var mc:MovieClip; //the BitmapData object that will hold a visual copy of the mc private var mcBMP:BitmapData; //the BitmapData object that will hold the reflected image private var reflectionBMP:Bitmap; //the clip that will act as out gradient mask private var gradientMask_mc:MovieClip; //how often the reflection should update (if it is video or animated) private var updateInt:Number; //the size the reflection is allowed to reflect within private var bounds:Object; //the distance the reflection is vertically from the mc private var distance:Number = 0; function Reflect(args:Object){ /*the args object passes in the following variables /we set the values of our internal vars to math the args*/ //the clip being reflected mc = args.mc; //the alpha level of the reflection clip var alpha:Number = args.alpha/100; //the ratio opaque color used in the gradient mask var ratio:Number = args.ratio; //update time interval var updateTime:Number = args.updateTime; //the distance at which the reflection visually drops off at var reflectionDropoff:Number = args.reflectionDropoff; //the distance the reflection starts from the bottom of the mc var distance:Number = args.distance; //store width and height of the clip var mcHeight = mc.height; var mcWidth = mc.width; //store the bounds of the reflection bounds = new Object(); bounds.width = mcWidth; bounds.height = mcHeight; //create the BitmapData that will hold a snapshot of the movie clip mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); //create the BitmapData the will hold the reflection reflectionBMP = new Bitmap(mcBMP); //flip the reflection upside down reflectionBMP.scaleY = -1; //move the reflection to the bottom of the movie clip reflectionBMP.y = (bounds.height*2) + distance; //add the reflection to the movie clip's Display Stack var reflectionBMPRef:DisplayObject = mc.addChild(reflectionBMP); reflectionBMPRef.name = "reflectionBMP"; //add a blank movie clip to hold our gradient mask var gradientMaskRef:DisplayObject = mc.addChild(new MovieClip()); gradientMaskRef.name = "gradientMask_mc"; //get a reference to the movie clip - cast the DisplayObject that is returned as a MovieClip gradientMask_mc = mc.getChildByName("gradientMask_mc") as MovieClip; //set the values for the gradient fill var fillType:String = GradientType.LINEAR; var colors:Array = [0xFFFFFF, 0xFFFFFF]; var alphas:Array = [alpha, 0]; var ratios:Array = [0, ratio]; var spreadMethod:String = SpreadMethod.PAD; //create the Matrix and create the gradient box var matr:Matrix = new Matrix(); //set the height of the Matrix used for the gradient mask var matrixHeight:Number; if (reflectionDropoff<=0) { matrixHeight = bounds.height; } else { matrixHeight = bounds.height/reflectionDropoff; } matr.createGradientBox(bounds.width, matrixHeight, (90/180)*Math.PI, 0, 0); //create the gradient fill gradientMask_mc.graphics.beginGradientFill(fillType, colors, alphas, ratios, matr, spreadMethod); gradientMask_mc.graphics.drawRect(0,0,bounds.width,bounds.height); //position the mask over the reflection clip gradientMask_mc.y = mc.getChildByName("reflectionBMP").y - mc.getChildByName("reflectionBMP").height; //cache clip as a bitmap so that the gradient mask will function gradientMask_mc.cacheAsBitmap = true; mc.getChildByName("reflectionBMP").cacheAsBitmap = true; //set the mask for the reflection as the gradient mask mc.getChildByName("reflectionBMP").mask = gradientMask_mc; //if we are updating the reflection for a video or animation do so here if(updateTime > -1){ updateInt = setInterval(update, updateTime, mc); } } public function setBounds(w:Number,h:Number):void{ //allows the user to set the area that the reflection is allowed //this is useful for clips that move within themselves bounds.width = w; bounds.height = h; gradientMask_mc.width = bounds.width; redrawBMP(mc); } public function redrawBMP(mc:MovieClip):void { // redraws the bitmap reflection - Mim Gamiet [2006] mcBMP.dispose(); mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); } private function update(mc):void { //updates the reflection to visually match the movie clip mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); reflectionBMP.bitmapData = mcBMP; } public function destroy():void{ //provides a method to remove the reflection mc.removeChild(mc.getChildByName("reflectionBMP")); reflectionBMP = null; mcBMP.dispose(); clearInterval(updateInt); mc.removeChild(mc.getChildByName("gradientMask_mc")); } } }

    Read the article

  • SQLAuthority News – TechEd India – April 12-14, 2010 Bangalore – An Unforgettable Experience – An Op

    - by pinaldave
    TechEd India was one of the largest Technology events in India led by Microsoft. This event was attended by more than 3,000 technology enthusiasts, making it one of the most well-organized events of the year. Though I attempted to attend almost all the technology events here, I have not seen any bigger or better event in Indian subcontinents other than this. There are 21 Technical Tracks at Tech·Ed India 2010 that span more than 745 learning opportunities. I was fortunate enough to be a part of this whole event as a speaker and a delegate, as well. TechEd India Speaker Badge and A Token of Lifetime Hotel Selection I presented three different sessions at TechEd India and was also a part of panel discussion. (The details of the sessions are given at the end of this blog post.) Due to extensive traveling, I stay away from my family occasionally. For this reason, I took my wife – Nupur and daughter Shaivi (8 months old) to the event along with me. We stayed at the same hotel where the event was organized so as to maximize my time bonding with my family and to have more time in networking with technology community, at the same time. The hotel Lalit Ashok is the largest and most luxurious venue one can find in Bangalore, located in the middle of the city. The cost of the hotel was a bit pricey, but looking at all the advantages, I had decided to ask for a booking there. Hotel Lalit Ashok Nupur Dave and Shaivi Dave Arrival Day – DAY 0 – April 11, 2010 I reached the event a day earlier, and that was one wise decision for I was able to relax a bit and go over my presentation for the next day’s course. I am a kind of person who likes to get everything ready ahead of time. I was also able to enjoy a pleasant evening with several Microsoft employees and my family friends. I even checked out the location where I would be doing presentations the next day. I was fortunate enough to meet Bijoy Singhal from Microsoft who helped me out with a few of the logistics issues that occured the day before. I was not aware of the fact that the very next day he was going to be “The Man” of the TechEd 2010 event. Vinod Kumar from Microsoft was really very kind as he talked to me regarding my subsequent session. He gave me some suggestions which were really helpful that I was able to incorporate them during my presentation. Finally, I was able to meet Abhishek Kant from Microsoft; his valuable suggestions and unlimited passion have inspired many people like me to work with the Community. Pradipta from Microsoft was also around, being extremely busy with logistics; however, in those busy times, he did find some good spare time to have a chat with me and the other Community leaders. I also met Harish Ranganathan and Sachin Rathi, both from Microsoft. It was so interesting to listen to both of them talking about SharePoint. I just have no words to express my overwhelmed spirit because of all these passionate young guys - Pradipta,Vinod, Bijoy, Harish, Sachin and Ahishek (of course!). Map of TechEd India 2010 Event Day 1 – April 12, 2010 From morning until night time, today was truly a very busy day for me. I had two presentations and one panel discussion for the day. Needless to say, I had a few meetings to attend as well. The day started with a keynote from S. Somaseger where he announced the launch of Visual Studio 2010. The keynote area was really eye-catching because of the very large, bigger-than- life uniform screen. This was truly one to show. The title music of the keynote was very interesting and it featured Bijoy Singhal as the model. It was interesting to talk to him afterwards, when we laughed at jokes together about his modeling assignment. TechEd India Keynote Opening Featuring Bijoy TechEd India 2010 Keynote – S. Somasegar Time: 11:15pm – 11:45pm Session 1: True Lies of SQL Server – SQL Myth Buster Following the excellent keynote, I had my very first session on the subject of SQL Server Myth Buster. At first, I was a bit nervous as right after the keynote, for this was my very first session and during my presentation I saw lots of Microsoft Product Team members. Well, it really went well and I had a really good discussion with attendees of the session. I felt that a well begin was half-done and my confidence was regained. Right after the session, I met a few of my Community friends and had meaningful discussions with them on many subjects. The abstract of the session is as follows: In this 30-minute demo session, I am going to briefly demonstrate few SQL Server Myths and their resolutions as I back them up with some demo. This demo presentation is a must-attend for all developers and administrators who would come to the event. This is going to be a very quick yet fun session. Pinal Presenting session at TechEd India 2010 Time: 1:00 PM – 2:00 PM Lunch with Somasegar After the session I went to see my daughter, and then I headed right away to the lunch with S. Somasegar – the keynote speaker and senior vice president of the Developer Division at Microsoft. I really thank to Abhishek who made it possible for us. Because of his efforts, all the MVPs had the opportunity to meet such a legendary person and had to talk with them on Microsoft Technology. Though Somasegar is currently holding such a high position in Microsoft, he is very polite and a real gentleman, and how I wish that everybody in industry is like him. Believe me, if you spread love and kindness, then that is what you will receive back. As soon as lunch time was over, I ran to the session hall as my second presentation was about to start. Time: 2:30pm – 3:30pm Session 2: Master Data Services in Microsoft SQL Server 2008 R2 Business Intelligence is a subject which was widely talked about at TechEd. Everybody was interested in this subject, and I did not excuse myself from this great concept as well. I consider myself fortunate as I was presenting on the subject of Master Data Services at TechEd. When I had initially learned this subject, I had a bit of confusion about the usage of this tool. Later on, I decided that I would tackle about how we all developers and DBAs are not able to understand something so simple such as this, and even worst, creating confusion about the technology. During system designing, it is very important to have a reference material or master lookup tables. Well, I talked about the same subject and presented the session keeping that as my center talk. The session went very well and I received lots of interesting questions. I got many compliments for talking about this subject on the real-life scenario. I really thank Rushabh Mehta (CEO, Solid Quality Mentors India) for his supportive suggestions that helped me prepare the slide deck, as well as the subject. Pinal Presenting session at TechEd India 2010 The abstract of the session is as follows: SQL Server Master Data Services will ship with SQL Server 2008 R2 and will improve Microsoft’s platform appeal. This session provides an in-depth demonstration of MDS features and highlights important usage scenarios. Master Data Services enables consistent decision-making process by allowing you to create, manage and propagate changes from a single master view of your business entities. Also, MDS – Master Data-hub which is a vital component, helps ensure the consistency of reporting across systems and deliver faster and more accurate results across the enterprise. We will talk about establishing the basis for a centralized approach to defining, deploying, and managing master data in the enterprise. Pinal Presenting session at TechEd India 2010 The day was still not over for me. I had ran into several friends but we were not able keep our enthusiasm under control about all the rumors saying that SQL Server 2008 R2 was about to be launched tomorrow in the keynote. I then ran to my third and final technical event for the day- a panel discussion with the top technologies of India. Time: 5:00pm – 6:00pm Panel Discussion: Harness the power of Web – SEO and Technical Blogging As I have delivered two technical sessions by this time, I was a bit tired but  not less enthusiastic when I had to talk about Blog and Technology. We discussed many different topics there. I told them that the most important aspect for any blog is its content. We discussed in depth the issues with plagiarism and how to avoid it. Another topic of discussion was how we technology bloggers can create awareness in the Community about what the right kind of blogging is and what morally and technically wrong acts are. A couple of questions were raised about what type of liberty a person can have in terms of writing blogs. Well, it was generically agreed that a blog is mainly a representation of our ideas and thoughts; it should not be governed by external entities. As long as one is writing what they really want to say, but not providing incorrect information or not practicing plagiarism, a blogger should be allowed to express himself. This panel discussion was supposed to be over in an hour, but the interest of the participants was remarkable and so it was extended for 30 minutes more. Finally, we decided to bring to a close the discussion and agreed that we will continue the topic next year. TechEd India Panel Discussion on Web, Technology and SEO Surprisingly, the day was just beginning after doing all of these. By this time, I have almost met all the MVP who arrived at the event, as well as many Microsoft employees. There were lots of Community folks present, too. I decided that I would go to meet several friends from the Community and continue to communicate with me on SQLAuthority.com. I also met Abhishek Baxi and had a good talk with him regarding Win Mobile and Twitter. He also took a very quick video of me wherein I spoke in my mother’s tongue, Gujarati. It was funny that I talked in Gujarati almost all the day, but when I was talking in the interview I could not find the right Gujarati words to speak. I think we all think in English when we think about Technology, so as to address universality. After meeting them, I headed towards the Speakers’ Dinner. Time: 8:00 PM – onwards Speakers Dinner The Speakers’ dinner was indeed a wonderful opportunity for all the speakers to get together and relax. We talked so many different things, from XBOX to Hindi Movies, and from SQL to Samosas. I just could not express how much fun I had. After a long evening, when I returned tmy room and met Shaivi, I just felt instantly relaxed. Kids are really gifts from God. Today was a really long but exciting day. So many things happened in just one day: Visual Studio Lanch, lunch with Somasegar, 2 technical sessions, 1 panel discussion, community leaders meeting, speakers dinner and, last but not leas,t playing with my child! A perfect day! Day 2 – April 13, 2010 Today started with a bang with the excellent keynote by Kamal Hathi who launched SQL Server 2008 R2 in India and demonstrated the power of PowerPivot to all of us. 101 Million Rows in Excel brought lots of applause from the audience. Kamal Hathi Presenting Keynote at TechEd India 2010 The day was a bit easier one for me. I had no sessions today and no events planned. I had a few meetings planned for the second day of the event. I sat in the speaker’s lounge for half a day and met many people there. I attended nearly 9 different meetings today. The subjects of the meetings were very different. Here is a list of the topics of the Community-related meetings: SQL PASS and its involvement in India and subcontinents How to start community blogging Forums and developing aptitude towards technology Ahmedabad/Gandhinagar User Groups and their developments SharePoint and SQL Business Meeting – a client meeting Business Meeting – a potential performance tuning project Business Meeting – Solid Quality Mentors (SolidQ) And family friends Pinal Dave at TechEd India The day passed by so quickly during this meeting. In the evening, I headed to Partners Expo with friends and checked out few of the booths. I really wanted to talk about some of the products, but due to the freebies there was so much crowd that I finally decided to just take the contact details of the partner. I will now start sending them with my queries and, hopefully, I will have my questions answered. Nupur and Shaivi had also one meeting to attend; it was with our family friend Vijay Raj. Vijay is also a person who loves Technology and loves it more than anybody. I see him growing and learning every day, but still remaining as a ‘human’. I believe that if someone acquires as much knowledge as him, that person will become either a computer or cyborg. Here, Vijay is still a kind gentleman and is able to stay as our close family friend. Shaivi was really happy to play with Uncle Vijay. Pinal Dave and Vijay Raj Renuka Prasad, a Microsoft MVP, impressed me with his passion and knowledge of SQL. Every time he gives me credit for his success, I believe that he is very humble. He has way more certifications than me and has worked many more years with SQL compared to me. He is an excellent photographer as well. Most of the photos in this blog post have been taken by him. I told him if ever he wants to do a part time job, he can do the photography very well. Pinal Dave and Renuka Prasad I also met L Srividya from Microsoft, whom I was looking forward to meet. She is a bundle of knowledge that everyone would surely learn a lot from her. I was able to get a few minutes from her and well, I felt confident. She enlightened me with SQL Server BI concepts, domain management and SQL Server security and few other interesting details. I also had a wonderful time talking about SharePoint with fellow Solid Quality Mentor Joy Rathnayake. He is very passionate about SharePoint but when you talk .NET and SQL with him, he is still overwhelmingly knowledgeable. In fact, while talking to him, I figured out that the recent training he delivered was on SQL Server 2008 R2. I told him a joke that it hurts my ego as he is more popular now in SQL training and consulting than me. I am sure all of you agree that working with good people is a gift from God. I am fortunate enough to work with the best of the best Industry experts. It was a great pleasure to hang out with my Community friends – Ahswin Kini, HimaBindu Vejella, Vasudev G, Suprotim Agrawal, Dhananjay, Vikram Pendse, Mahesh Dhola, Mahesh Mitkari,  Manu Zacharia, Shobhan, Hardik Shah, Ashish Mohta, Manan, Subodh Sohani and Sanjay Shetty (of course!) .  (Please let me know if I have met you at the event and forgot your name to list here). Time: 8:00 PM – onwards Community Leaders Dinner After lots of meetings, I headed towards the Community Leaders dinner meeting and met almost all the folks I met in morning. The discussion was almost the same but the real good thing was that we were enjoying it. The food was really good. Nupur was invited in the event, but Shaivi could not come. When Nupur tried to enter the event, she was stopped as Shaivi did not have the pass to enter the dinner. Nupur expressed that Shaivi is only 8 months old and does not eat outside food as well and could not stay by herself at this age, but the door keeper did not agree and asked that without the entry details Shaivi could not go in, but Nupur could. Nupur called me on phone and asked me to help her out. By the time, I was outside; the organizer of the event reached to the door and happily approved Shaivi to join the party. Once in the party, Shaivi had lots of fun meeting so many people. Shaivi Dave and Abhishek Kant Dean Guida (Infragistics President and CEO) and Pinal Dave (SQLAuthority.com) Day 3 – April 14, 2010 Though, it was last day, I was very much excited today as I was about to present my very favorite session. Query Optimization and Performance Tuning is my domain expertise and I make my leaving by consulting and training the same. Today’s session was on the same subject and as an additional twist, another subject about Spatial Database was presented. I was always intrigued with Spatial Database and I have enjoyed learning about it; however, I have never thought about Spatial Indexing before it was decided that I will do this session. I really thank Solid Quality Mentor Dr. Greg Low for his assistance in helping me prepare the slide deck and also review the content. Furthermore, today was really what I call my ‘learning day’ . So far I had not attended any session in TechEd and I felt a bit down for that. Everybody spends their valuable time & money to learn something new and exciting in TechEd and I had not attended a single session at the moment thinking that it was already last day of the event. I did have a plan for the day and I attended two technical sessions before my session of spatial database. I attended 2 sessions of Vinod Kumar. Vinod is a natural storyteller and there was no doubt that his sessions would be jam-packed. People attended his sessions simply because Vinod is syhe speaker. He did not have a single time disappointed audience; he is truly a good speaker. He knows his stuff very well. I personally do not think that in India he can be compared to anyone for SQL. Time: 12:30pm-1:30pm SQL Server Query Optimization, Execution and Debugging Query Performance I really had a fun time attending this session. Vinod made this session very interactive. The entire audience really got into the presentation and started participating in the event. Vinod was presenting a small problem with Query Tuning, which any developer would have encountered and solved with their help in such a fashion that a developer feels he or she have already resolved it. In one question, I was the only one who was ready to answer and Vinod told me in a light tone that I am now allowed to answer it! The audience really found it very amusing. There was a huge crowd around Vinod after the session. Vinod – A master storyteller! Time: 3:45pm-4:45pm Data Recovery / consistency with CheckDB This session was much heavier than the earlier one, and I must say this is my most favorite session I EVER attended in India. In this TechEd I have only attended two sessions, but in my career, I have attended numerous technical sessions not only in India, but all over the world. This session had taken my breath away. One by one, Vinod took the different databases, and started to corrupt them in different ways. Each database has some unique ways to get corrupted. Once that was done, Vinod started to show the DBCC CEHCKDB and demonstrated how it can solve your problem. He finally fixed all the databases with this single tool. I do have a good knowledge of this subject, but let me honestly admit that I have learned a lot from this session. I enjoyed and cheered during this session along with other attendees. I had total satisfaction that, just like everyone, I took advantage of the event and learned something. I am now TECHnically EDucated. Pinal Dave and Vinod Kumar After two very interactive and informative SQL Sessions from Vinod Kumar, the next turn me presenting on Spatial Database and Indexing. I got once again nervous but Vinod told me to stay natural and do my presentation. Well, once I got a huge stage with a total of four projectors and a large crowd, I felt better. Time: 5:00pm-6:00pm Session 3: Developing with SQL Server Spatial and Deep Dive into Spatial Indexing Pinal Presenting session at TechEd India 2010 Pinal Presenting session at TechEd India 2010 I kicked off this session with Michael J Swart‘s beautiful spatial image. This session was the last one for the day but, to my surprise, I had more than 200+ attendees. Slowly, the rain was starting outside and I was worried that the hall would not be full; despite this, there was not a single seat available in the first five minutes of the session. Thanks to all of you for attending my presentation. I had demonstrated the map of world (and India) and quickly explained what  Geographic and Geometry data types in Spatial Database are. This session had interesting story of Indexing and Comparison, as well as how different traditional indexes are from spatial indexing. Pinal Presenting session at TechEd India 2010 Due to the heavy rain during this event, the power went off for about 22 minutes (just an accident – nobodies fault). During these minutes, there were no audio, no video and no light. I continued to address the mass of 200+ people without any audio device and PowerPoint. I must thank the audience because not a single person left from the session. They all stayed in their place, some moved closure to listen to me properly. I noticed that the curiosity and eagerness to learn new things was at the peak even though it was the very last session of the TechEd. Everybody wanted get the maximum knowledge out of this whole event. I was touched by the support from audience. They listened and participated in my session even without any kinds of technology (no ppt, no mike, no AC, nothing). During these 22 minutes, I had completed my theory verbally. Pinal Presenting session at TechEd India 2010 After a while, we got the projector back online and we continued with some exciting demos. Many thanks to Microsoft people who worked energetically in background to get the backup power for project up. I had a very interesting demo wherein I overlaid Bangalore and Hyderabad on the India Map and find their aerial distance between them. After finding the aerial distance, we browsed online and found that SQL Server estimates the exact aerial distance between these two cities, as compared to the factual distance. There was a huge applause from the crowd on the subject that SQL Server takes into the count of the curvature of the earth and finds the precise distances based on details. During the process of finding the distance, I demonstrated a few examples of the indexes where I expressed how one can use those indexes to find these distances and how they can improve the performance of similar query. I also demonstrated few examples wherein we were able to see in which data type the Index is most useful. We finished the demos with a few more internal stuff. Pinal Presenting session at TechEd India 2010 Despite all issues, I was mostly satisfied with my presentation. I think it was the best session I have ever presented at any conference. There was no help from Technology for a while, but I still got lots of appreciation at the end. When we ended the session, the applause from the audience was so loud that for a moment, the rain was not audible. I was truly moved by the dedication of the Technology enthusiasts. Pinal Dave After Presenting session at TechEd India 2010 The abstract of the session is as follows: The Microsoft SQL Server 2008 delivers new spatial data types that enable you to consume, use, and extend location-based data through spatial-enabled applications. Attend this session to learn how to use spatial functionality in next version of SQL Server to build and optimize spatial queries. This session outlines the new geography data type to store geodetic spatial data and perform operations on it, use the new geometry data type to store planar spatial data and perform operations on it, take advantage of new spatial indexes for high performance queries, use the new spatial results tab to quickly and easily view spatial query results directly from within Management Studio, extend spatial data capabilities by building or integrating location-enabled applications through support for spatial standards and specifications and much more. Time: 8:00 PM – onwards Dinner by Sponsors After the lively session during the day, there was another dinner party courtesy of one of the sponsors of TechEd. All the MVPs and several Community leaders were present at the dinner. I would like to express my gratitude to Abhishek Kant for organizing this wonderful event for us. It was a blast and really relaxing in all angles. We all stayed there for a long time and talked about our sweet and unforgettable memories of the event. Pinal Dave and Bijoy Singhal It was really one wonderful event. After writing this much, I say that I have no words to express about how much I enjoyed TechEd. However, it is true that I shared with you only 1% of the total activities I have done at the event. There were so many people I have met, yet were not mentioned here although I wanted to write their names here, too . Anyway, I have learned so many things and up until now, I am not able to get over all the fun I had in this event. Pinal Dave at TechEd India 2010 The Next Days – April 15, 2010 – till today I am still not able to get my mind out of the whole experience I had at TechEd India 2010. It was like a whole Microsoft Family working together to celebrate a happy occasion. TechEd India – Truly An Unforgettable Experience! Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: About Me, MVP, Pinal Dave, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQLAuthority Author Visit, SQLAuthority News, SQLServer, T SQL, Technology Tagged: TechEd, TechEdIn

    Read the article

  • SQLAuthority News – TechEd India – April 12-14, 2010 Bangalore – An Unforgettable Experience – An Op

    - by pinaldave
    TechEd India was one of the largest Technology events in India led by Microsoft. This event was attended by more than 3,000 technology enthusiasts, making it one of the most well-organized events of the year. Though I attempted to attend almost all the technology events here, I have not seen any bigger or better event in Indian subcontinents other than this. There are 21 Technical Tracks at Tech·Ed India 2010 that span more than 745 learning opportunities. I was fortunate enough to be a part of this whole event as a speaker and a delegate, as well. TechEd India Speaker Badge and A Token of Lifetime Hotel Selection I presented three different sessions at TechEd India and was also a part of panel discussion. (The details of the sessions are given at the end of this blog post.) Due to extensive traveling, I stay away from my family occasionally. For this reason, I took my wife – Nupur and daughter Shaivi (8 months old) to the event along with me. We stayed at the same hotel where the event was organized so as to maximize my time bonding with my family and to have more time in networking with technology community, at the same time. The hotel Lalit Ashok is the largest and most luxurious venue one can find in Bangalore, located in the middle of the city. The cost of the hotel was a bit pricey, but looking at all the advantages, I had decided to ask for a booking there. Hotel Lalit Ashok Nupur Dave and Shaivi Dave Arrival Day – DAY 0 – April 11, 2010 I reached the event a day earlier, and that was one wise decision for I was able to relax a bit and go over my presentation for the next day’s course. I am a kind of person who likes to get everything ready ahead of time. I was also able to enjoy a pleasant evening with several Microsoft employees and my family friends. I even checked out the location where I would be doing presentations the next day. I was fortunate enough to meet Bijoy Singhal from Microsoft who helped me out with a few of the logistics issues that occured the day before. I was not aware of the fact that the very next day he was going to be “The Man” of the TechEd 2010 event. Vinod Kumar from Microsoft was really very kind as he talked to me regarding my subsequent session. He gave me some suggestions which were really helpful that I was able to incorporate them during my presentation. Finally, I was able to meet Abhishek Kant from Microsoft; his valuable suggestions and unlimited passion have inspired many people like me to work with the Community. Pradipta from Microsoft was also around, being extremely busy with logistics; however, in those busy times, he did find some good spare time to have a chat with me and the other Community leaders. I also met Harish Ranganathan and Sachin Rathi, both from Microsoft. It was so interesting to listen to both of them talking about SharePoint. I just have no words to express my overwhelmed spirit because of all these passionate young guys - Pradipta,Vinod, Bijoy, Harish, Sachin and Ahishek (of course!). Map of TechEd India 2010 Event Day 1 – April 12, 2010 From morning until night time, today was truly a very busy day for me. I had two presentations and one panel discussion for the day. Needless to say, I had a few meetings to attend as well. The day started with a keynote from S. Somaseger where he announced the launch of Visual Studio 2010. The keynote area was really eye-catching because of the very large, bigger-than- life uniform screen. This was truly one to show. The title music of the keynote was very interesting and it featured Bijoy Singhal as the model. It was interesting to talk to him afterwards, when we laughed at jokes together about his modeling assignment. TechEd India Keynote Opening Featuring Bijoy TechEd India 2010 Keynote – S. Somasegar Time: 11:15pm – 11:45pm Session 1: True Lies of SQL Server – SQL Myth Buster Following the excellent keynote, I had my very first session on the subject of SQL Server Myth Buster. At first, I was a bit nervous as right after the keynote, for this was my very first session and during my presentation I saw lots of Microsoft Product Team members. Well, it really went well and I had a really good discussion with attendees of the session. I felt that a well begin was half-done and my confidence was regained. Right after the session, I met a few of my Community friends and had meaningful discussions with them on many subjects. The abstract of the session is as follows: In this 30-minute demo session, I am going to briefly demonstrate few SQL Server Myths and their resolutions as I back them up with some demo. This demo presentation is a must-attend for all developers and administrators who would come to the event. This is going to be a very quick yet fun session. Pinal Presenting session at TechEd India 2010 Time: 1:00 PM – 2:00 PM Lunch with Somasegar After the session I went to see my daughter, and then I headed right away to the lunch with S. Somasegar – the keynote speaker and senior vice president of the Developer Division at Microsoft. I really thank to Abhishek who made it possible for us. Because of his efforts, all the MVPs had the opportunity to meet such a legendary person and had to talk with them on Microsoft Technology. Though Somasegar is currently holding such a high position in Microsoft, he is very polite and a real gentleman, and how I wish that everybody in industry is like him. Believe me, if you spread love and kindness, then that is what you will receive back. As soon as lunch time was over, I ran to the session hall as my second presentation was about to start. Time: 2:30pm – 3:30pm Session 2: Master Data Services in Microsoft SQL Server 2008 R2 Business Intelligence is a subject which was widely talked about at TechEd. Everybody was interested in this subject, and I did not excuse myself from this great concept as well. I consider myself fortunate as I was presenting on the subject of Master Data Services at TechEd. When I had initially learned this subject, I had a bit of confusion about the usage of this tool. Later on, I decided that I would tackle about how we all developers and DBAs are not able to understand something so simple such as this, and even worst, creating confusion about the technology. During system designing, it is very important to have a reference material or master lookup tables. Well, I talked about the same subject and presented the session keeping that as my center talk. The session went very well and I received lots of interesting questions. I got many compliments for talking about this subject on the real-life scenario. I really thank Rushabh Mehta (CEO, Solid Quality Mentors India) for his supportive suggestions that helped me prepare the slide deck, as well as the subject. Pinal Presenting session at TechEd India 2010 The abstract of the session is as follows: SQL Server Master Data Services will ship with SQL Server 2008 R2 and will improve Microsoft’s platform appeal. This session provides an in-depth demonstration of MDS features and highlights important usage scenarios. Master Data Services enables consistent decision-making process by allowing you to create, manage and propagate changes from a single master view of your business entities. Also, MDS – Master Data-hub which is a vital component, helps ensure the consistency of reporting across systems and deliver faster and more accurate results across the enterprise. We will talk about establishing the basis for a centralized approach to defining, deploying, and managing master data in the enterprise. Pinal Presenting session at TechEd India 2010 The day was still not over for me. I had ran into several friends but we were not able keep our enthusiasm under control about all the rumors saying that SQL Server 2008 R2 was about to be launched tomorrow in the keynote. I then ran to my third and final technical event for the day- a panel discussion with the top technologies of India. Time: 5:00pm – 6:00pm Panel Discussion: Harness the power of Web – SEO and Technical Blogging As I have delivered two technical sessions by this time, I was a bit tired but  not less enthusiastic when I had to talk about Blog and Technology. We discussed many different topics there. I told them that the most important aspect for any blog is its content. We discussed in depth the issues with plagiarism and how to avoid it. Another topic of discussion was how we technology bloggers can create awareness in the Community about what the right kind of blogging is and what morally and technically wrong acts are. A couple of questions were raised about what type of liberty a person can have in terms of writing blogs. Well, it was generically agreed that a blog is mainly a representation of our ideas and thoughts; it should not be governed by external entities. As long as one is writing what they really want to say, but not providing incorrect information or not practicing plagiarism, a blogger should be allowed to express himself. This panel discussion was supposed to be over in an hour, but the interest of the participants was remarkable and so it was extended for 30 minutes more. Finally, we decided to bring to a close the discussion and agreed that we will continue the topic next year. TechEd India Panel Discussion on Web, Technology and SEO Surprisingly, the day was just beginning after doing all of these. By this time, I have almost met all the MVP who arrived at the event, as well as many Microsoft employees. There were lots of Community folks present, too. I decided that I would go to meet several friends from the Community and continue to communicate with me on SQLAuthority.com. I also met Abhishek Baxi and had a good talk with him regarding Win Mobile and Twitter. He also took a very quick video of me wherein I spoke in my mother’s tongue, Gujarati. It was funny that I talked in Gujarati almost all the day, but when I was talking in the interview I could not find the right Gujarati words to speak. I think we all think in English when we think about Technology, so as to address universality. After meeting them, I headed towards the Speakers’ Dinner. Time: 8:00 PM – onwards Speakers Dinner The Speakers’ dinner was indeed a wonderful opportunity for all the speakers to get together and relax. We talked so many different things, from XBOX to Hindi Movies, and from SQL to Samosas. I just could not express how much fun I had. After a long evening, when I returned tmy room and met Shaivi, I just felt instantly relaxed. Kids are really gifts from God. Today was a really long but exciting day. So many things happened in just one day: Visual Studio Lanch, lunch with Somasegar, 2 technical sessions, 1 panel discussion, community leaders meeting, speakers dinner and, last but not leas,t playing with my child! A perfect day! Day 2 – April 13, 2010 Today started with a bang with the excellent keynote by Kamal Hathi who launched SQL Server 2008 R2 in India and demonstrated the power of PowerPivot to all of us. 101 Million Rows in Excel brought lots of applause from the audience. Kamal Hathi Presenting Keynote at TechEd India 2010 The day was a bit easier one for me. I had no sessions today and no events planned. I had a few meetings planned for the second day of the event. I sat in the speaker’s lounge for half a day and met many people there. I attended nearly 9 different meetings today. The subjects of the meetings were very different. Here is a list of the topics of the Community-related meetings: SQL PASS and its involvement in India and subcontinents How to start community blogging Forums and developing aptitude towards technology Ahmedabad/Gandhinagar User Groups and their developments SharePoint and SQL Business Meeting – a client meeting Business Meeting – a potential performance tuning project Business Meeting – Solid Quality Mentors (SolidQ) And family friends Pinal Dave at TechEd India The day passed by so quickly during this meeting. In the evening, I headed to Partners Expo with friends and checked out few of the booths. I really wanted to talk about some of the products, but due to the freebies there was so much crowd that I finally decided to just take the contact details of the partner. I will now start sending them with my queries and, hopefully, I will have my questions answered. Nupur and Shaivi had also one meeting to attend; it was with our family friend Vijay Raj. Vijay is also a person who loves Technology and loves it more than anybody. I see him growing and learning every day, but still remaining as a ‘human’. I believe that if someone acquires as much knowledge as him, that person will become either a computer or cyborg. Here, Vijay is still a kind gentleman and is able to stay as our close family friend. Shaivi was really happy to play with Uncle Vijay. Pinal Dave and Vijay Raj Renuka Prasad, a Microsoft MVP, impressed me with his passion and knowledge of SQL. Every time he gives me credit for his success, I believe that he is very humble. He has way more certifications than me and has worked many more years with SQL compared to me. He is an excellent photographer as well. Most of the photos in this blog post have been taken by him. I told him if ever he wants to do a part time job, he can do the photography very well. Pinal Dave and Renuka Prasad I also met L Srividya from Microsoft, whom I was looking forward to meet. She is a bundle of knowledge that everyone would surely learn a lot from her. I was able to get a few minutes from her and well, I felt confident. She enlightened me with SQL Server BI concepts, domain management and SQL Server security and few other interesting details. I also had a wonderful time talking about SharePoint with fellow Solid Quality Mentor Joy Rathnayake. He is very passionate about SharePoint but when you talk .NET and SQL with him, he is still overwhelmingly knowledgeable. In fact, while talking to him, I figured out that the recent training he delivered was on SQL Server 2008 R2. I told him a joke that it hurts my ego as he is more popular now in SQL training and consulting than me. I am sure all of you agree that working with good people is a gift from God. I am fortunate enough to work with the best of the best Industry experts. It was a great pleasure to hang out with my Community friends – Ahswin Kini, HimaBindu Vejella, Vasudev G, Suprotim Agrawal, Dhananjay, Vikram Pendse, Mahesh Dhola, Mahesh Mitkari,  Manu Zacharia, Shobhan, Hardik Shah, Ashish Mohta, Manan, Subodh Sohani and Sanjay Shetty (of course!) .  (Please let me know if I have met you at the event and forgot your name to list here). Time: 8:00 PM – onwards Community Leaders Dinner After lots of meetings, I headed towards the Community Leaders dinner meeting and met almost all the folks I met in morning. The discussion was almost the same but the real good thing was that we were enjoying it. The food was really good. Nupur was invited in the event, but Shaivi could not come. When Nupur tried to enter the event, she was stopped as Shaivi did not have the pass to enter the dinner. Nupur expressed that Shaivi is only 8 months old and does not eat outside food as well and could not stay by herself at this age, but the door keeper did not agree and asked that without the entry details Shaivi could not go in, but Nupur could. Nupur called me on phone and asked me to help her out. By the time, I was outside; the organizer of the event reached to the door and happily approved Shaivi to join the party. Once in the party, Shaivi had lots of fun meeting so many people. Shaivi Dave and Abhishek Kant Dean Guida (Infragistics President and CEO) and Pinal Dave (SQLAuthority.com) Day 3 – April 14, 2010 Though, it was last day, I was very much excited today as I was about to present my very favorite session. Query Optimization and Performance Tuning is my domain expertise and I make my leaving by consulting and training the same. Today’s session was on the same subject and as an additional twist, another subject about Spatial Database was presented. I was always intrigued with Spatial Database and I have enjoyed learning about it; however, I have never thought about Spatial Indexing before it was decided that I will do this session. I really thank Solid Quality Mentor Dr. Greg Low for his assistance in helping me prepare the slide deck and also review the content. Furthermore, today was really what I call my ‘learning day’ . So far I had not attended any session in TechEd and I felt a bit down for that. Everybody spends their valuable time & money to learn something new and exciting in TechEd and I had not attended a single session at the moment thinking that it was already last day of the event. I did have a plan for the day and I attended two technical sessions before my session of spatial database. I attended 2 sessions of Vinod Kumar. Vinod is a natural storyteller and there was no doubt that his sessions would be jam-packed. People attended his sessions simply because Vinod is syhe speaker. He did not have a single time disappointed audience; he is truly a good speaker. He knows his stuff very well. I personally do not think that in India he can be compared to anyone for SQL. Time: 12:30pm-1:30pm SQL Server Query Optimization, Execution and Debugging Query Performance I really had a fun time attending this session. Vinod made this session very interactive. The entire audience really got into the presentation and started participating in the event. Vinod was presenting a small problem with Query Tuning, which any developer would have encountered and solved with their help in such a fashion that a developer feels he or she have already resolved it. In one question, I was the only one who was ready to answer and Vinod told me in a light tone that I am now allowed to answer it! The audience really found it very amusing. There was a huge crowd around Vinod after the session. Vinod – A master storyteller! Time: 3:45pm-4:45pm Data Recovery / consistency with CheckDB This session was much heavier than the earlier one, and I must say this is my most favorite session I EVER attended in India. In this TechEd I have only attended two sessions, but in my career, I have attended numerous technical sessions not only in India, but all over the world. This session had taken my breath away. One by one, Vinod took the different databases, and started to corrupt them in different ways. Each database has some unique ways to get corrupted. Once that was done, Vinod started to show the DBCC CEHCKDB and demonstrated how it can solve your problem. He finally fixed all the databases with this single tool. I do have a good knowledge of this subject, but let me honestly admit that I have learned a lot from this session. I enjoyed and cheered during this session along with other attendees. I had total satisfaction that, just like everyone, I took advantage of the event and learned something. I am now TECHnically EDucated. Pinal Dave and Vinod Kumar After two very interactive and informative SQL Sessions from Vinod Kumar, the next turn me presenting on Spatial Database and Indexing. I got once again nervous but Vinod told me to stay natural and do my presentation. Well, once I got a huge stage with a total of four projectors and a large crowd, I felt better. Time: 5:00pm-6:00pm Session 3: Developing with SQL Server Spatial and Deep Dive into Spatial Indexing Pinal Presenting session at TechEd India 2010 Pinal Presenting session at TechEd India 2010 I kicked off this session with Michael J Swart‘s beautiful spatial image. This session was the last one for the day but, to my surprise, I had more than 200+ attendees. Slowly, the rain was starting outside and I was worried that the hall would not be full; despite this, there was not a single seat available in the first five minutes of the session. Thanks to all of you for attending my presentation. I had demonstrated the map of world (and India) and quickly explained what  Geographic and Geometry data types in Spatial Database are. This session had interesting story of Indexing and Comparison, as well as how different traditional indexes are from spatial indexing. Pinal Presenting session at TechEd India 2010 Due to the heavy rain during this event, the power went off for about 22 minutes (just an accident – nobodies fault). During these minutes, there were no audio, no video and no light. I continued to address the mass of 200+ people without any audio device and PowerPoint. I must thank the audience because not a single person left from the session. They all stayed in their place, some moved closure to listen to me properly. I noticed that the curiosity and eagerness to learn new things was at the peak even though it was the very last session of the TechEd. Everybody wanted get the maximum knowledge out of this whole event. I was touched by the support from audience. They listened and participated in my session even without any kinds of technology (no ppt, no mike, no AC, nothing). During these 22 minutes, I had completed my theory verbally. Pinal Presenting session at TechEd India 2010 After a while, we got the projector back online and we continued with some exciting demos. Many thanks to Microsoft people who worked energetically in background to get the backup power for project up. I had a very interesting demo wherein I overlaid Bangalore and Hyderabad on the India Map and find their aerial distance between them. After finding the aerial distance, we browsed online and found that SQL Server estimates the exact aerial distance between these two cities, as compared to the factual distance. There was a huge applause from the crowd on the subject that SQL Server takes into the count of the curvature of the earth and finds the precise distances based on details. During the process of finding the distance, I demonstrated a few examples of the indexes where I expressed how one can use those indexes to find these distances and how they can improve the performance of similar query. I also demonstrated few examples wherein we were able to see in which data type the Index is most useful. We finished the demos with a few more internal stuff. Pinal Presenting session at TechEd India 2010 Despite all issues, I was mostly satisfied with my presentation. I think it was the best session I have ever presented at any conference. There was no help from Technology for a while, but I still got lots of appreciation at the end. When we ended the session, the applause from the audience was so loud that for a moment, the rain was not audible. I was truly moved by the dedication of the Technology enthusiasts. Pinal Dave After Presenting session at TechEd India 2010 The abstract of the session is as follows: The Microsoft SQL Server 2008 delivers new spatial data types that enable you to consume, use, and extend location-based data through spatial-enabled applications. Attend this session to learn how to use spatial functionality in next version of SQL Server to build and optimize spatial queries. This session outlines the new geography data type to store geodetic spatial data and perform operations on it, use the new geometry data type to store planar spatial data and perform operations on it, take advantage of new spatial indexes for high performance queries, use the new spatial results tab to quickly and easily view spatial query results directly from within Management Studio, extend spatial data capabilities by building or integrating location-enabled applications through support for spatial standards and specifications and much more. Time: 8:00 PM – onwards Dinner by Sponsors After the lively session during the day, there was another dinner party courtesy of one of the sponsors of TechEd. All the MVPs and several Community leaders were present at the dinner. I would like to express my gratitude to Abhishek Kant for organizing this wonderful event for us. It was a blast and really relaxing in all angles. We all stayed there for a long time and talked about our sweet and unforgettable memories of the event. Pinal Dave and Bijoy Singhal It was really one wonderful event. After writing this much, I say that I have no words to express about how much I enjoyed TechEd. However, it is true that I shared with you only 1% of the total activities I have done at the event. There were so many people I have met, yet were not mentioned here although I wanted to write their names here, too . Anyway, I have learned so many things and up until now, I am not able to get over all the fun I had in this event. Pinal Dave at TechEd India 2010 The Next Days – April 15, 2010 – till today I am still not able to get my mind out of the whole experience I had at TechEd India 2010. It was like a whole Microsoft Family working together to celebrate a happy occasion. TechEd India – Truly An Unforgettable Experience! Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: About Me, MVP, Pinal Dave, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQLAuthority Author Visit, SQLAuthority News, SQLServer, T SQL, Technology Tagged: TechEd, TechEdIn

    Read the article

  • Introduction to the ASP.NET Web API

    - by Stephen.Walther
    I am a huge fan of Ajax. If you want to create a great experience for the users of your website – regardless of whether you are building an ASP.NET MVC or an ASP.NET Web Forms site — then you need to use Ajax. Otherwise, you are just being cruel to your customers. We use Ajax extensively in several of the ASP.NET applications that my company, Superexpert.com, builds. We expose data from the server as JSON and use jQuery to retrieve and update that data from the browser. One challenge, when building an ASP.NET website, is deciding on which technology to use to expose JSON data from the server. For example, how do you expose a list of products from the server as JSON so you can retrieve the list of products with jQuery? You have a number of options (too many options) including ASMX Web services, WCF Web Services, ASHX Generic Handlers, WCF Data Services, and MVC controller actions. Fortunately, the world has just been simplified. With the release of ASP.NET 4 Beta, Microsoft has introduced a new technology for exposing JSON from the server named the ASP.NET Web API. You can use the ASP.NET Web API with both ASP.NET MVC and ASP.NET Web Forms applications. The goal of this blog post is to provide you with a brief overview of the features of the new ASP.NET Web API. You learn how to use the ASP.NET Web API to retrieve, insert, update, and delete database records with jQuery. We also discuss how you can perform form validation when using the Web API and use OData when using the Web API. Creating an ASP.NET Web API Controller The ASP.NET Web API exposes JSON data through a new type of controller called an API controller. You can add an API controller to an existing ASP.NET MVC 4 project through the standard Add Controller dialog box. Right-click your Controllers folder and select Add, Controller. In the dialog box, name your controller MovieController and select the Empty API controller template: A brand new API controller looks like this: using System; using System.Collections.Generic; using System.Linq; using System.Net.Http; using System.Web.Http; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { } } An API controller, unlike a standard MVC controller, derives from the base ApiController class instead of the base Controller class. Using jQuery to Retrieve, Insert, Update, and Delete Data Let’s create an Ajaxified Movie Database application. We’ll retrieve, insert, update, and delete movies using jQuery with the MovieController which we just created. Our Movie model class looks like this: namespace MyWebAPIApp.Models { public class Movie { public int Id { get; set; } public string Title { get; set; } public string Director { get; set; } } } Our application will consist of a single HTML page named Movies.html. We’ll place all of our jQuery code in the Movies.html page. Getting a Single Record with the ASP.NET Web API To support retrieving a single movie from the server, we need to add a Get method to our API controller: using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http; using MyWebAPIApp.Models; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { public Movie GetMovie(int id) { // Return movie by id if (id == 1) { return new Movie { Id = 1, Title = "Star Wars", Director = "Lucas" }; } // Otherwise, movie was not found throw new HttpResponseException(HttpStatusCode.NotFound); } } } In the code above, the GetMovie() method accepts the Id of a movie. If the Id has the value 1 then the method returns the movie Star Wars. Otherwise, the method throws an exception and returns 404 Not Found HTTP status code. After building your project, you can invoke the MovieController.GetMovie() method by entering the following URL in your web browser address bar: http://localhost:[port]/api/movie/1 (You’ll need to enter the correct randomly generated port). In the URL api/movie/1, the first “api” segment indicates that this is a Web API route. The “movie” segment indicates that the MovieController should be invoked. You do not specify the name of the action. Instead, the HTTP method used to make the request – GET, POST, PUT, DELETE — is used to identify the action to invoke. The ASP.NET Web API uses different routing conventions than normal ASP.NET MVC controllers. When you make an HTTP GET request then any API controller method with a name that starts with “GET” is invoked. So, we could have called our API controller action GetPopcorn() instead of GetMovie() and it would still be invoked by the URL api/movie/1. The default route for the Web API is defined in the Global.asax file and it looks like this: routes.MapHttpRoute( name: "DefaultApi", routeTemplate: "api/{controller}/{id}", defaults: new { id = RouteParameter.Optional } ); We can invoke our GetMovie() controller action with the jQuery code in the following HTML page: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Get Movie</title> </head> <body> <div> Title: <span id="title"></span> </div> <div> Director: <span id="director"></span> </div> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> getMovie(1, function (movie) { $("#title").html(movie.Title); $("#director").html(movie.Director); }); function getMovie(id, callback) { $.ajax({ url: "/api/Movie", data: { id: id }, type: "GET", contentType: "application/json;charset=utf-8", statusCode: { 200: function (movie) { callback(movie); }, 404: function () { alert("Not Found!"); } } }); } </script> </body> </html> In the code above, the jQuery $.ajax() method is used to invoke the GetMovie() method. Notice that the Ajax call handles two HTTP response codes. When the GetMove() method successfully returns a movie, the method returns a 200 status code. In that case, the details of the movie are displayed in the HTML page. Otherwise, if the movie is not found, the GetMovie() method returns a 404 status code. In that case, the page simply displays an alert box indicating that the movie was not found (hopefully, you would implement something more graceful in an actual application). You can use your browser’s Developer Tools to see what is going on in the background when you open the HTML page (hit F12 in the most recent version of most browsers). For example, you can use the Network tab in Google Chrome to see the Ajax request which invokes the GetMovie() method: Getting a Set of Records with the ASP.NET Web API Let’s modify our Movie API controller so that it returns a collection of movies. The following Movie controller has a new ListMovies() method which returns a (hard-coded) collection of movies: using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http; using MyWebAPIApp.Models; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { public IEnumerable<Movie> ListMovies() { return new List<Movie> { new Movie {Id=1, Title="Star Wars", Director="Lucas"}, new Movie {Id=1, Title="King Kong", Director="Jackson"}, new Movie {Id=1, Title="Memento", Director="Nolan"} }; } } } Because we named our action ListMovies(), the default Web API route will never match it. Therefore, we need to add the following custom route to our Global.asax file (at the top of the RegisterRoutes() method): routes.MapHttpRoute( name: "ActionApi", routeTemplate: "api/{controller}/{action}/{id}", defaults: new { id = RouteParameter.Optional } ); This route enables us to invoke the ListMovies() method with the URL /api/movie/listmovies. Now that we have exposed our collection of movies from the server, we can retrieve and display the list of movies using jQuery in our HTML page: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>List Movies</title> </head> <body> <div id="movies"></div> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> listMovies(function (movies) { var strMovies=""; $.each(movies, function (index, movie) { strMovies += "<div>" + movie.Title + "</div>"; }); $("#movies").html(strMovies); }); function listMovies(callback) { $.ajax({ url: "/api/Movie/ListMovies", data: {}, type: "GET", contentType: "application/json;charset=utf-8", }).then(function(movies){ callback(movies); }); } </script> </body> </html>     Inserting a Record with the ASP.NET Web API Now let’s modify our Movie API controller so it supports creating new records: public HttpResponseMessage<Movie> PostMovie(Movie movieToCreate) { // Add movieToCreate to the database and update primary key movieToCreate.Id = 23; // Build a response that contains the location of the new movie var response = new HttpResponseMessage<Movie>(movieToCreate, HttpStatusCode.Created); var relativePath = "/api/movie/" + movieToCreate.Id; response.Headers.Location = new Uri(Request.RequestUri, relativePath); return response; } The PostMovie() method in the code above accepts a movieToCreate parameter. We don’t actually store the new movie anywhere. In real life, you will want to call a service method to store the new movie in a database. When you create a new resource, such as a new movie, you should return the location of the new resource. In the code above, the URL where the new movie can be retrieved is assigned to the Location header returned in the PostMovie() response. Because the name of our method starts with “Post”, we don’t need to create a custom route. The PostMovie() method can be invoked with the URL /Movie/PostMovie – just as long as the method is invoked within the context of a HTTP POST request. The following HTML page invokes the PostMovie() method. <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Create Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToCreate = { title: "The Hobbit", director: "Jackson" }; createMovie(movieToCreate, function (newMovie) { alert("New movie created with an Id of " + newMovie.Id); }); function createMovie(movieToCreate, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify( movieToCreate ), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { callback(newMovie); } } }); } </script> </body> </html> This page creates a new movie (the Hobbit) by calling the createMovie() method. The page simply displays the Id of the new movie: The HTTP Post operation is performed with the following call to the jQuery $.ajax() method: $.ajax({ url: "/api/Movie", data: JSON.stringify( movieToCreate ), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { callback(newMovie); } } }); Notice that the type of Ajax request is a POST request. This is required to match the PostMovie() method. Notice, furthermore, that the new movie is converted into JSON using JSON.stringify(). The JSON.stringify() method takes a JavaScript object and converts it into a JSON string. Finally, notice that success is represented with a 201 status code. The HttpStatusCode.Created value returned from the PostMovie() method returns a 201 status code. Updating a Record with the ASP.NET Web API Here’s how we can modify the Movie API controller to support updating an existing record. In this case, we need to create a PUT method to handle an HTTP PUT request: public void PutMovie(Movie movieToUpdate) { if (movieToUpdate.Id == 1) { // Update the movie in the database return; } // If you can't find the movie to update throw new HttpResponseException(HttpStatusCode.NotFound); } Unlike our PostMovie() method, the PutMovie() method does not return a result. The action either updates the database or, if the movie cannot be found, returns an HTTP Status code of 404. The following HTML page illustrates how you can invoke the PutMovie() method: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Put Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToUpdate = { id: 1, title: "The Hobbit", director: "Jackson" }; updateMovie(movieToUpdate, function () { alert("Movie updated!"); }); function updateMovie(movieToUpdate, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify(movieToUpdate), type: "PUT", contentType: "application/json;charset=utf-8", statusCode: { 200: function () { callback(); }, 404: function () { alert("Movie not found!"); } } }); } </script> </body> </html> Deleting a Record with the ASP.NET Web API Here’s the code for deleting a movie: public HttpResponseMessage DeleteMovie(int id) { // Delete the movie from the database // Return status code return new HttpResponseMessage(HttpStatusCode.NoContent); } This method simply deletes the movie (well, not really, but pretend that it does) and returns a No Content status code (204). The following page illustrates how you can invoke the DeleteMovie() action: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Delete Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> deleteMovie(1, function () { alert("Movie deleted!"); }); function deleteMovie(id, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify({id:id}), type: "DELETE", contentType: "application/json;charset=utf-8", statusCode: { 204: function () { callback(); } } }); } </script> </body> </html> Performing Validation How do you perform form validation when using the ASP.NET Web API? Because validation in ASP.NET MVC is driven by the Default Model Binder, and because the Web API uses the Default Model Binder, you get validation for free. Let’s modify our Movie class so it includes some of the standard validation attributes: using System.ComponentModel.DataAnnotations; namespace MyWebAPIApp.Models { public class Movie { public int Id { get; set; } [Required(ErrorMessage="Title is required!")] [StringLength(5, ErrorMessage="Title cannot be more than 5 characters!")] public string Title { get; set; } [Required(ErrorMessage="Director is required!")] public string Director { get; set; } } } In the code above, the Required validation attribute is used to make both the Title and Director properties required. The StringLength attribute is used to require the length of the movie title to be no more than 5 characters. Now let’s modify our PostMovie() action to validate a movie before adding the movie to the database: public HttpResponseMessage PostMovie(Movie movieToCreate) { // Validate movie if (!ModelState.IsValid) { var errors = new JsonArray(); foreach (var prop in ModelState.Values) { if (prop.Errors.Any()) { errors.Add(prop.Errors.First().ErrorMessage); } } return new HttpResponseMessage<JsonValue>(errors, HttpStatusCode.BadRequest); } // Add movieToCreate to the database and update primary key movieToCreate.Id = 23; // Build a response that contains the location of the new movie var response = new HttpResponseMessage<Movie>(movieToCreate, HttpStatusCode.Created); var relativePath = "/api/movie/" + movieToCreate.Id; response.Headers.Location = new Uri(Request.RequestUri, relativePath); return response; } If ModelState.IsValid has the value false then the errors in model state are copied to a new JSON array. Each property – such as the Title and Director property — can have multiple errors. In the code above, only the first error message is copied over. The JSON array is returned with a Bad Request status code (400 status code). The following HTML page illustrates how you can invoke our modified PostMovie() action and display any error messages: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Create Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToCreate = { title: "The Hobbit", director: "" }; createMovie(movieToCreate, function (newMovie) { alert("New movie created with an Id of " + newMovie.Id); }, function (errors) { var strErrors = ""; $.each(errors, function(index, err) { strErrors += "*" + err + "\n"; }); alert(strErrors); } ); function createMovie(movieToCreate, success, fail) { $.ajax({ url: "/api/Movie", data: JSON.stringify(movieToCreate), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { success(newMovie); }, 400: function (xhr) { var errors = JSON.parse(xhr.responseText); fail(errors); } } }); } </script> </body> </html> The createMovie() function performs an Ajax request and handles either a 201 or a 400 status code from the response. If a 201 status code is returned then there were no validation errors and the new movie was created. If, on the other hand, a 400 status code is returned then there was a validation error. The validation errors are retrieved from the XmlHttpRequest responseText property. The error messages are displayed in an alert: (Please don’t use JavaScript alert dialogs to display validation errors, I just did it this way out of pure laziness) This validation code in our PostMovie() method is pretty generic. There is nothing specific about this code to the PostMovie() method. In the following video, Jon Galloway demonstrates how to create a global Validation filter which can be used with any API controller action: http://www.asp.net/web-api/overview/web-api-routing-and-actions/video-custom-validation His validation filter looks like this: using System.Json; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http.Controllers; using System.Web.Http.Filters; namespace MyWebAPIApp.Filters { public class ValidationActionFilter:ActionFilterAttribute { public override void OnActionExecuting(HttpActionContext actionContext) { var modelState = actionContext.ModelState; if (!modelState.IsValid) { dynamic errors = new JsonObject(); foreach (var key in modelState.Keys) { var state = modelState[key]; if (state.Errors.Any()) { errors[key] = state.Errors.First().ErrorMessage; } } actionContext.Response = new HttpResponseMessage<JsonValue>(errors, HttpStatusCode.BadRequest); } } } } And you can register the validation filter in the Application_Start() method in the Global.asax file like this: GlobalConfiguration.Configuration.Filters.Add(new ValidationActionFilter()); After you register the Validation filter, validation error messages are returned from any API controller action method automatically when validation fails. You don’t need to add any special logic to any of your API controller actions to take advantage of the filter. Querying using OData The OData protocol is an open protocol created by Microsoft which enables you to perform queries over the web. The official website for OData is located here: http://odata.org For example, here are some of the query options which you can use with OData: · $orderby – Enables you to retrieve results in a certain order. · $top – Enables you to retrieve a certain number of results. · $skip – Enables you to skip over a certain number of results (use with $top for paging). · $filter – Enables you to filter the results returned. The ASP.NET Web API supports a subset of the OData protocol. You can use all of the query options listed above when interacting with an API controller. The only requirement is that the API controller action returns its data as IQueryable. For example, the following Movie controller has an action named GetMovies() which returns an IQueryable of movies: public IQueryable<Movie> GetMovies() { return new List<Movie> { new Movie {Id=1, Title="Star Wars", Director="Lucas"}, new Movie {Id=2, Title="King Kong", Director="Jackson"}, new Movie {Id=3, Title="Willow", Director="Lucas"}, new Movie {Id=4, Title="Shrek", Director="Smith"}, new Movie {Id=5, Title="Memento", Director="Nolan"} }.AsQueryable(); } If you enter the following URL in your browser: /api/movie?$top=2&$orderby=Title Then you will limit the movies returned to the top 2 in order of the movie Title. You will get the following results: By using the $top option in combination with the $skip option, you can enable client-side paging. For example, you can use $top and $skip to page through thousands of products, 10 products at a time. The $filter query option is very powerful. You can use this option to filter the results from a query. Here are some examples: Return every movie directed by Lucas: /api/movie?$filter=Director eq ‘Lucas’ Return every movie which has a title which starts with ‘S’: /api/movie?$filter=startswith(Title,’S') Return every movie which has an Id greater than 2: /api/movie?$filter=Id gt 2 The complete documentation for the $filter option is located here: http://www.odata.org/developers/protocols/uri-conventions#FilterSystemQueryOption Summary The goal of this blog entry was to provide you with an overview of the new ASP.NET Web API introduced with the Beta release of ASP.NET 4. In this post, I discussed how you can retrieve, insert, update, and delete data by using jQuery with the Web API. I also discussed how you can use the standard validation attributes with the Web API. You learned how to return validation error messages to the client and display the error messages using jQuery. Finally, we briefly discussed how the ASP.NET Web API supports the OData protocol. For example, you learned how to filter records returned from an API controller action by using the $filter query option. I’m excited about the new Web API. This is a feature which I expect to use with almost every ASP.NET application which I build in the future.

    Read the article

  • Introduction to the ASP.NET Web API

    - by Stephen.Walther
    I am a huge fan of Ajax. If you want to create a great experience for the users of your website – regardless of whether you are building an ASP.NET MVC or an ASP.NET Web Forms site — then you need to use Ajax. Otherwise, you are just being cruel to your customers. We use Ajax extensively in several of the ASP.NET applications that my company, Superexpert.com, builds. We expose data from the server as JSON and use jQuery to retrieve and update that data from the browser. One challenge, when building an ASP.NET website, is deciding on which technology to use to expose JSON data from the server. For example, how do you expose a list of products from the server as JSON so you can retrieve the list of products with jQuery? You have a number of options (too many options) including ASMX Web services, WCF Web Services, ASHX Generic Handlers, WCF Data Services, and MVC controller actions. Fortunately, the world has just been simplified. With the release of ASP.NET 4 Beta, Microsoft has introduced a new technology for exposing JSON from the server named the ASP.NET Web API. You can use the ASP.NET Web API with both ASP.NET MVC and ASP.NET Web Forms applications. The goal of this blog post is to provide you with a brief overview of the features of the new ASP.NET Web API. You learn how to use the ASP.NET Web API to retrieve, insert, update, and delete database records with jQuery. We also discuss how you can perform form validation when using the Web API and use OData when using the Web API. Creating an ASP.NET Web API Controller The ASP.NET Web API exposes JSON data through a new type of controller called an API controller. You can add an API controller to an existing ASP.NET MVC 4 project through the standard Add Controller dialog box. Right-click your Controllers folder and select Add, Controller. In the dialog box, name your controller MovieController and select the Empty API controller template: A brand new API controller looks like this: using System; using System.Collections.Generic; using System.Linq; using System.Net.Http; using System.Web.Http; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { } } An API controller, unlike a standard MVC controller, derives from the base ApiController class instead of the base Controller class. Using jQuery to Retrieve, Insert, Update, and Delete Data Let’s create an Ajaxified Movie Database application. We’ll retrieve, insert, update, and delete movies using jQuery with the MovieController which we just created. Our Movie model class looks like this: namespace MyWebAPIApp.Models { public class Movie { public int Id { get; set; } public string Title { get; set; } public string Director { get; set; } } } Our application will consist of a single HTML page named Movies.html. We’ll place all of our jQuery code in the Movies.html page. Getting a Single Record with the ASP.NET Web API To support retrieving a single movie from the server, we need to add a Get method to our API controller: using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http; using MyWebAPIApp.Models; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { public Movie GetMovie(int id) { // Return movie by id if (id == 1) { return new Movie { Id = 1, Title = "Star Wars", Director = "Lucas" }; } // Otherwise, movie was not found throw new HttpResponseException(HttpStatusCode.NotFound); } } } In the code above, the GetMovie() method accepts the Id of a movie. If the Id has the value 1 then the method returns the movie Star Wars. Otherwise, the method throws an exception and returns 404 Not Found HTTP status code. After building your project, you can invoke the MovieController.GetMovie() method by entering the following URL in your web browser address bar: http://localhost:[port]/api/movie/1 (You’ll need to enter the correct randomly generated port). In the URL api/movie/1, the first “api” segment indicates that this is a Web API route. The “movie” segment indicates that the MovieController should be invoked. You do not specify the name of the action. Instead, the HTTP method used to make the request – GET, POST, PUT, DELETE — is used to identify the action to invoke. The ASP.NET Web API uses different routing conventions than normal ASP.NET MVC controllers. When you make an HTTP GET request then any API controller method with a name that starts with “GET” is invoked. So, we could have called our API controller action GetPopcorn() instead of GetMovie() and it would still be invoked by the URL api/movie/1. The default route for the Web API is defined in the Global.asax file and it looks like this: routes.MapHttpRoute( name: "DefaultApi", routeTemplate: "api/{controller}/{id}", defaults: new { id = RouteParameter.Optional } ); We can invoke our GetMovie() controller action with the jQuery code in the following HTML page: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Get Movie</title> </head> <body> <div> Title: <span id="title"></span> </div> <div> Director: <span id="director"></span> </div> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> getMovie(1, function (movie) { $("#title").html(movie.Title); $("#director").html(movie.Director); }); function getMovie(id, callback) { $.ajax({ url: "/api/Movie", data: { id: id }, type: "GET", contentType: "application/json;charset=utf-8", statusCode: { 200: function (movie) { callback(movie); }, 404: function () { alert("Not Found!"); } } }); } </script> </body> </html> In the code above, the jQuery $.ajax() method is used to invoke the GetMovie() method. Notice that the Ajax call handles two HTTP response codes. When the GetMove() method successfully returns a movie, the method returns a 200 status code. In that case, the details of the movie are displayed in the HTML page. Otherwise, if the movie is not found, the GetMovie() method returns a 404 status code. In that case, the page simply displays an alert box indicating that the movie was not found (hopefully, you would implement something more graceful in an actual application). You can use your browser’s Developer Tools to see what is going on in the background when you open the HTML page (hit F12 in the most recent version of most browsers). For example, you can use the Network tab in Google Chrome to see the Ajax request which invokes the GetMovie() method: Getting a Set of Records with the ASP.NET Web API Let’s modify our Movie API controller so that it returns a collection of movies. The following Movie controller has a new ListMovies() method which returns a (hard-coded) collection of movies: using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http; using MyWebAPIApp.Models; namespace MyWebAPIApp.Controllers { public class MovieController : ApiController { public IEnumerable<Movie> ListMovies() { return new List<Movie> { new Movie {Id=1, Title="Star Wars", Director="Lucas"}, new Movie {Id=1, Title="King Kong", Director="Jackson"}, new Movie {Id=1, Title="Memento", Director="Nolan"} }; } } } Because we named our action ListMovies(), the default Web API route will never match it. Therefore, we need to add the following custom route to our Global.asax file (at the top of the RegisterRoutes() method): routes.MapHttpRoute( name: "ActionApi", routeTemplate: "api/{controller}/{action}/{id}", defaults: new { id = RouteParameter.Optional } ); This route enables us to invoke the ListMovies() method with the URL /api/movie/listmovies. Now that we have exposed our collection of movies from the server, we can retrieve and display the list of movies using jQuery in our HTML page: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>List Movies</title> </head> <body> <div id="movies"></div> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> listMovies(function (movies) { var strMovies=""; $.each(movies, function (index, movie) { strMovies += "<div>" + movie.Title + "</div>"; }); $("#movies").html(strMovies); }); function listMovies(callback) { $.ajax({ url: "/api/Movie/ListMovies", data: {}, type: "GET", contentType: "application/json;charset=utf-8", }).then(function(movies){ callback(movies); }); } </script> </body> </html>     Inserting a Record with the ASP.NET Web API Now let’s modify our Movie API controller so it supports creating new records: public HttpResponseMessage<Movie> PostMovie(Movie movieToCreate) { // Add movieToCreate to the database and update primary key movieToCreate.Id = 23; // Build a response that contains the location of the new movie var response = new HttpResponseMessage<Movie>(movieToCreate, HttpStatusCode.Created); var relativePath = "/api/movie/" + movieToCreate.Id; response.Headers.Location = new Uri(Request.RequestUri, relativePath); return response; } The PostMovie() method in the code above accepts a movieToCreate parameter. We don’t actually store the new movie anywhere. In real life, you will want to call a service method to store the new movie in a database. When you create a new resource, such as a new movie, you should return the location of the new resource. In the code above, the URL where the new movie can be retrieved is assigned to the Location header returned in the PostMovie() response. Because the name of our method starts with “Post”, we don’t need to create a custom route. The PostMovie() method can be invoked with the URL /Movie/PostMovie – just as long as the method is invoked within the context of a HTTP POST request. The following HTML page invokes the PostMovie() method. <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Create Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToCreate = { title: "The Hobbit", director: "Jackson" }; createMovie(movieToCreate, function (newMovie) { alert("New movie created with an Id of " + newMovie.Id); }); function createMovie(movieToCreate, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify( movieToCreate ), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { callback(newMovie); } } }); } </script> </body> </html> This page creates a new movie (the Hobbit) by calling the createMovie() method. The page simply displays the Id of the new movie: The HTTP Post operation is performed with the following call to the jQuery $.ajax() method: $.ajax({ url: "/api/Movie", data: JSON.stringify( movieToCreate ), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { callback(newMovie); } } }); Notice that the type of Ajax request is a POST request. This is required to match the PostMovie() method. Notice, furthermore, that the new movie is converted into JSON using JSON.stringify(). The JSON.stringify() method takes a JavaScript object and converts it into a JSON string. Finally, notice that success is represented with a 201 status code. The HttpStatusCode.Created value returned from the PostMovie() method returns a 201 status code. Updating a Record with the ASP.NET Web API Here’s how we can modify the Movie API controller to support updating an existing record. In this case, we need to create a PUT method to handle an HTTP PUT request: public void PutMovie(Movie movieToUpdate) { if (movieToUpdate.Id == 1) { // Update the movie in the database return; } // If you can't find the movie to update throw new HttpResponseException(HttpStatusCode.NotFound); } Unlike our PostMovie() method, the PutMovie() method does not return a result. The action either updates the database or, if the movie cannot be found, returns an HTTP Status code of 404. The following HTML page illustrates how you can invoke the PutMovie() method: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Put Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToUpdate = { id: 1, title: "The Hobbit", director: "Jackson" }; updateMovie(movieToUpdate, function () { alert("Movie updated!"); }); function updateMovie(movieToUpdate, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify(movieToUpdate), type: "PUT", contentType: "application/json;charset=utf-8", statusCode: { 200: function () { callback(); }, 404: function () { alert("Movie not found!"); } } }); } </script> </body> </html> Deleting a Record with the ASP.NET Web API Here’s the code for deleting a movie: public HttpResponseMessage DeleteMovie(int id) { // Delete the movie from the database // Return status code return new HttpResponseMessage(HttpStatusCode.NoContent); } This method simply deletes the movie (well, not really, but pretend that it does) and returns a No Content status code (204). The following page illustrates how you can invoke the DeleteMovie() action: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Delete Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> deleteMovie(1, function () { alert("Movie deleted!"); }); function deleteMovie(id, callback) { $.ajax({ url: "/api/Movie", data: JSON.stringify({id:id}), type: "DELETE", contentType: "application/json;charset=utf-8", statusCode: { 204: function () { callback(); } } }); } </script> </body> </html> Performing Validation How do you perform form validation when using the ASP.NET Web API? Because validation in ASP.NET MVC is driven by the Default Model Binder, and because the Web API uses the Default Model Binder, you get validation for free. Let’s modify our Movie class so it includes some of the standard validation attributes: using System.ComponentModel.DataAnnotations; namespace MyWebAPIApp.Models { public class Movie { public int Id { get; set; } [Required(ErrorMessage="Title is required!")] [StringLength(5, ErrorMessage="Title cannot be more than 5 characters!")] public string Title { get; set; } [Required(ErrorMessage="Director is required!")] public string Director { get; set; } } } In the code above, the Required validation attribute is used to make both the Title and Director properties required. The StringLength attribute is used to require the length of the movie title to be no more than 5 characters. Now let’s modify our PostMovie() action to validate a movie before adding the movie to the database: public HttpResponseMessage PostMovie(Movie movieToCreate) { // Validate movie if (!ModelState.IsValid) { var errors = new JsonArray(); foreach (var prop in ModelState.Values) { if (prop.Errors.Any()) { errors.Add(prop.Errors.First().ErrorMessage); } } return new HttpResponseMessage<JsonValue>(errors, HttpStatusCode.BadRequest); } // Add movieToCreate to the database and update primary key movieToCreate.Id = 23; // Build a response that contains the location of the new movie var response = new HttpResponseMessage<Movie>(movieToCreate, HttpStatusCode.Created); var relativePath = "/api/movie/" + movieToCreate.Id; response.Headers.Location = new Uri(Request.RequestUri, relativePath); return response; } If ModelState.IsValid has the value false then the errors in model state are copied to a new JSON array. Each property – such as the Title and Director property — can have multiple errors. In the code above, only the first error message is copied over. The JSON array is returned with a Bad Request status code (400 status code). The following HTML page illustrates how you can invoke our modified PostMovie() action and display any error messages: <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Create Movie</title> </head> <body> <script type="text/javascript" src="Scripts/jquery-1.6.2.min.js"></script> <script type="text/javascript"> var movieToCreate = { title: "The Hobbit", director: "" }; createMovie(movieToCreate, function (newMovie) { alert("New movie created with an Id of " + newMovie.Id); }, function (errors) { var strErrors = ""; $.each(errors, function(index, err) { strErrors += "*" + err + "n"; }); alert(strErrors); } ); function createMovie(movieToCreate, success, fail) { $.ajax({ url: "/api/Movie", data: JSON.stringify(movieToCreate), type: "POST", contentType: "application/json;charset=utf-8", statusCode: { 201: function (newMovie) { success(newMovie); }, 400: function (xhr) { var errors = JSON.parse(xhr.responseText); fail(errors); } } }); } </script> </body> </html> The createMovie() function performs an Ajax request and handles either a 201 or a 400 status code from the response. If a 201 status code is returned then there were no validation errors and the new movie was created. If, on the other hand, a 400 status code is returned then there was a validation error. The validation errors are retrieved from the XmlHttpRequest responseText property. The error messages are displayed in an alert: (Please don’t use JavaScript alert dialogs to display validation errors, I just did it this way out of pure laziness) This validation code in our PostMovie() method is pretty generic. There is nothing specific about this code to the PostMovie() method. In the following video, Jon Galloway demonstrates how to create a global Validation filter which can be used with any API controller action: http://www.asp.net/web-api/overview/web-api-routing-and-actions/video-custom-validation His validation filter looks like this: using System.Json; using System.Linq; using System.Net; using System.Net.Http; using System.Web.Http.Controllers; using System.Web.Http.Filters; namespace MyWebAPIApp.Filters { public class ValidationActionFilter:ActionFilterAttribute { public override void OnActionExecuting(HttpActionContext actionContext) { var modelState = actionContext.ModelState; if (!modelState.IsValid) { dynamic errors = new JsonObject(); foreach (var key in modelState.Keys) { var state = modelState[key]; if (state.Errors.Any()) { errors[key] = state.Errors.First().ErrorMessage; } } actionContext.Response = new HttpResponseMessage<JsonValue>(errors, HttpStatusCode.BadRequest); } } } } And you can register the validation filter in the Application_Start() method in the Global.asax file like this: GlobalConfiguration.Configuration.Filters.Add(new ValidationActionFilter()); After you register the Validation filter, validation error messages are returned from any API controller action method automatically when validation fails. You don’t need to add any special logic to any of your API controller actions to take advantage of the filter. Querying using OData The OData protocol is an open protocol created by Microsoft which enables you to perform queries over the web. The official website for OData is located here: http://odata.org For example, here are some of the query options which you can use with OData: · $orderby – Enables you to retrieve results in a certain order. · $top – Enables you to retrieve a certain number of results. · $skip – Enables you to skip over a certain number of results (use with $top for paging). · $filter – Enables you to filter the results returned. The ASP.NET Web API supports a subset of the OData protocol. You can use all of the query options listed above when interacting with an API controller. The only requirement is that the API controller action returns its data as IQueryable. For example, the following Movie controller has an action named GetMovies() which returns an IQueryable of movies: public IQueryable<Movie> GetMovies() { return new List<Movie> { new Movie {Id=1, Title="Star Wars", Director="Lucas"}, new Movie {Id=2, Title="King Kong", Director="Jackson"}, new Movie {Id=3, Title="Willow", Director="Lucas"}, new Movie {Id=4, Title="Shrek", Director="Smith"}, new Movie {Id=5, Title="Memento", Director="Nolan"} }.AsQueryable(); } If you enter the following URL in your browser: /api/movie?$top=2&$orderby=Title Then you will limit the movies returned to the top 2 in order of the movie Title. You will get the following results: By using the $top option in combination with the $skip option, you can enable client-side paging. For example, you can use $top and $skip to page through thousands of products, 10 products at a time. The $filter query option is very powerful. You can use this option to filter the results from a query. Here are some examples: Return every movie directed by Lucas: /api/movie?$filter=Director eq ‘Lucas’ Return every movie which has a title which starts with ‘S’: /api/movie?$filter=startswith(Title,’S') Return every movie which has an Id greater than 2: /api/movie?$filter=Id gt 2 The complete documentation for the $filter option is located here: http://www.odata.org/developers/protocols/uri-conventions#FilterSystemQueryOption Summary The goal of this blog entry was to provide you with an overview of the new ASP.NET Web API introduced with the Beta release of ASP.NET 4. In this post, I discussed how you can retrieve, insert, update, and delete data by using jQuery with the Web API. I also discussed how you can use the standard validation attributes with the Web API. You learned how to return validation error messages to the client and display the error messages using jQuery. Finally, we briefly discussed how the ASP.NET Web API supports the OData protocol. For example, you learned how to filter records returned from an API controller action by using the $filter query option. I’m excited about the new Web API. This is a feature which I expect to use with almost every ASP.NET application which I build in the future.

    Read the article

  • XNA Screen Manager problem with transitions

    - by NexAddo
    I'm having issues using the game statemanagement example in the game I am developing. I have no issues with my first three screens transitioning between one another. I have a main menu screen, a splash screen and a high score screen that cycle: mainMenuScreen->splashScreen->highScoreScreen->mainMenuScreen The screens change every 15 seconds. Transition times public MainMenuScreen() { TransitionOnTime = TimeSpan.FromSeconds(0.5); TransitionOffTime = TimeSpan.FromSeconds(0.0); currentCreditAmount = Global.CurrentCredits; } public SplashScreen() { TransitionOnTime = TimeSpan.FromSeconds(0.5); TransitionOffTime = TimeSpan.FromSeconds(0.5); } public HighScoreScreen() { TransitionOnTime = TimeSpan.FromSeconds(0.5); TransitionOffTime = TimeSpan.FromSeconds(0.5); } public GamePlayScreen() { TransitionOnTime = TimeSpan.FromSeconds(0.5); TransitionOffTime = TimeSpan.FromSeconds(0.5); } When a user inserts credits they can play the game after pressing start mainMenuScreen->splashScreen->highScoreScreen->(loops forever) || || || ===========Credits In============= || Start || \/ LoadingScreen || Start || \/ GamePlayScreen During each of these transitions, between screens, the same code is used, which exits(removes) all current active screens and respects transitions, then adds the new screen to the screen manager: foreach (GameScreen screen in ScreenManager.GetScreens()) screen.ExitScreen(); //AddScreen takes a new screen to manage and the controlling player ScreenManager.AddScreen(new NameOfScreenHere(), null); Each screen is removed from the ScreenManager with ExitScreen() and using this function, each screen transition is respected. The problem I am having is with my gamePlayScreen. When the current game is finished and the transition is complete for the gamePlayScreen, it should be removed and the next screens should be added to the ScreenManager. GamePlayScreen Code Snippet private void FinishCurrentGame() { AudioManager.StopSounds(); this.UnloadContent(); if (Global.SaveDevice.IsReady) Stats.Save(); if (HighScoreScreen.IsInHighscores(timeLimit)) { foreach (GameScreen screen in ScreenManager.GetScreens()) screen.ExitScreen(); Global.TimeRemaining = timeLimit; ScreenManager.AddScreen(new BackgroundScreen(), null); ScreenManager.AddScreen(new MessageBoxScreen("Enter your Initials", true), null); } else { foreach (GameScreen screen in ScreenManager.GetScreens()) screen.ExitScreen(); ScreenManager.AddScreen(new BackgroundScreen(), null); ScreenManager.AddScreen(new MainMenuScreen(), null); } } The problem is that when isExiting is set to true by screen.ExitScreen() for the gamePlayScreen, the transition never completes the transition and removes the screen from the ScreenManager. Every other screen that I use the same technique to add and remove each screen fully transitions On/Off and is removed at the appropriate time from the ScreenManager, but noy my GamePlayScreen. Has anyone that has used the GameStateManagement example experienced this issue or can someone see the mistake I am making? EDIT This is what I tracked down. When the game is done, I call foreach (GameScreen screen in ScreenManager.GetScreens()) screen.ExitScreen(); to start the transition off process for the gameplay screen. At this point there is only 1 screen on the ScreenManager stack. The gamePlay screen gets isExiting set to true and starts to transition off. Right after the above call to ExitScreen() I add a background screen and menu screen to the screenManager: ScreenManager.AddScreen(new background(), null); ScreenManager.AddScreen(new Menu(), null); The count of the ScreenManager is now 3. What I noticed while stepping through the updates for GameScreen and ScreenManager, the gameplay screen never gets to the point where the transistion process finishes so the ScreenManager can remove it from the stack. This anomaly does not happen to any of my other screens when I switch between them. Screen Manager Code #region File Description //----------------------------------------------------------------------------- // ScreenManager.cs // // Microsoft XNA Community Game Platform // Copyright (C) Microsoft Corporation. All rights reserved. //----------------------------------------------------------------------------- #endregion #define DEMO #region Using Statements using System; using System.Diagnostics; using System.Collections.Generic; using Microsoft.Xna.Framework; using Microsoft.Xna.Framework.Content; using Microsoft.Xna.Framework.Graphics; using PerformanceUtility.GameDebugTools; #endregion namespace GameStateManagement { /// <summary> /// The screen manager is a component which manages one or more GameScreen /// instances. It maintains a stack of screens, calls their Update and Draw /// methods at the appropriate times, and automatically routes input to the /// topmost active screen. /// </summary> public class ScreenManager : DrawableGameComponent { #region Fields List<GameScreen> screens = new List<GameScreen>(); List<GameScreen> screensToUpdate = new List<GameScreen>(); InputState input = new InputState(); SpriteBatch spriteBatch; SpriteFont font; Texture2D blankTexture; bool isInitialized; bool getOut; bool traceEnabled; #if DEBUG DebugSystem debugSystem; Stopwatch stopwatch = new Stopwatch(); bool debugTextEnabled; #endif #endregion #region Properties /// <summary> /// A default SpriteBatch shared by all the screens. This saves /// each screen having to bother creating their own local instance. /// </summary> public SpriteBatch SpriteBatch { get { return spriteBatch; } } /// <summary> /// A default font shared by all the screens. This saves /// each screen having to bother loading their own local copy. /// </summary> public SpriteFont Font { get { return font; } } public Rectangle ScreenRectangle { get { return new Rectangle(0, 0, GraphicsDevice.Viewport.Width, GraphicsDevice.Viewport.Height); } } /// <summary> /// If true, the manager prints out a list of all the screens /// each time it is updated. This can be useful for making sure /// everything is being added and removed at the right times. /// </summary> public bool TraceEnabled { get { return traceEnabled; } set { traceEnabled = value; } } #if DEBUG public bool DebugTextEnabled { get { return debugTextEnabled; } set { debugTextEnabled = value; } } public DebugSystem DebugSystem { get { return debugSystem; } } #endif #endregion #region Initialization /// <summary> /// Constructs a new screen manager component. /// </summary> public ScreenManager(Game game) : base(game) { // we must set EnabledGestures before we can query for them, but // we don't assume the game wants to read them. //TouchPanel.EnabledGestures = GestureType.None; } /// <summary> /// Initializes the screen manager component. /// </summary> public override void Initialize() { base.Initialize(); #if DEBUG debugSystem = DebugSystem.Initialize(Game, "Fonts/MenuFont"); #endif isInitialized = true; } /// <summary> /// Load your graphics content. /// </summary> protected override void LoadContent() { // Load content belonging to the screen manager. ContentManager content = Game.Content; spriteBatch = new SpriteBatch(GraphicsDevice); font = content.Load<SpriteFont>(@"Fonts\menufont"); blankTexture = content.Load<Texture2D>(@"Textures\Backgrounds\blank"); // Tell each of the screens to load their content. foreach (GameScreen screen in screens) { screen.LoadContent(); } } /// <summary> /// Unload your graphics content. /// </summary> protected override void UnloadContent() { // Tell each of the screens to unload their content. foreach (GameScreen screen in screens) { screen.UnloadContent(); } } #endregion #region Update and Draw /// <summary> /// Allows each screen to run logic. /// </summary> public override void Update(GameTime gameTime) { #if DEBUG debugSystem.TimeRuler.StartFrame(); debugSystem.TimeRuler.BeginMark("Update", Color.Blue); if (debugTextEnabled && getOut == false) { debugSystem.FpsCounter.Visible = true; debugSystem.TimeRuler.Visible = true; debugSystem.TimeRuler.ShowLog = true; getOut = true; } else if (debugTextEnabled == false) { getOut = false; debugSystem.FpsCounter.Visible = false; debugSystem.TimeRuler.Visible = false; debugSystem.TimeRuler.ShowLog = false; } #endif // Read the keyboard and gamepad. input.Update(); // Make a copy of the master screen list, to avoid confusion if // the process of updating one screen adds or removes others. screensToUpdate.Clear(); foreach (GameScreen screen in screens) screensToUpdate.Add(screen); bool otherScreenHasFocus = !Game.IsActive; bool coveredByOtherScreen = false; // Loop as long as there are screens waiting to be updated. while (screensToUpdate.Count > 0) { // Pop the topmost screen off the waiting list. GameScreen screen = screensToUpdate[screensToUpdate.Count - 1]; screensToUpdate.RemoveAt(screensToUpdate.Count - 1); // Update the screen. screen.Update(gameTime, otherScreenHasFocus, coveredByOtherScreen); if (screen.ScreenState == ScreenState.TransitionOn || screen.ScreenState == ScreenState.Active) { // If this is the first active screen we came across, // give it a chance to handle input. if (!otherScreenHasFocus) { screen.HandleInput(input); otherScreenHasFocus = true; } // If this is an active non-popup, inform any subsequent // screens that they are covered by it. if (!screen.IsPopup) coveredByOtherScreen = true; } } // Print debug trace? if (traceEnabled) TraceScreens(); #if DEBUG debugSystem.TimeRuler.EndMark("Update"); #endif } /// <summary> /// Prints a list of all the screens, for debugging. /// </summary> void TraceScreens() { List<string> screenNames = new List<string>(); foreach (GameScreen screen in screens) screenNames.Add(screen.GetType().Name); Debug.WriteLine(string.Join(", ", screenNames.ToArray())); } /// <summary> /// Tells each screen to draw itself. /// </summary> public override void Draw(GameTime gameTime) { #if DEBUG debugSystem.TimeRuler.StartFrame(); debugSystem.TimeRuler.BeginMark("Draw", Color.Yellow); #endif foreach (GameScreen screen in screens) { if (screen.ScreenState == ScreenState.Hidden) continue; screen.Draw(gameTime); } #if DEBUG debugSystem.TimeRuler.EndMark("Draw"); #endif #if DEMO SpriteBatch.Begin(); SpriteBatch.DrawString(font, "DEMO - NOT FOR RESALE", new Vector2(20, 80), Color.White); SpriteBatch.End(); #endif } #endregion #region Public Methods /// <summary> /// Adds a new screen to the screen manager. /// </summary> public void AddScreen(GameScreen screen, PlayerIndex? controllingPlayer) { screen.ControllingPlayer = controllingPlayer; screen.ScreenManager = this; screen.IsExiting = false; // If we have a graphics device, tell the screen to load content. if (isInitialized) { screen.LoadContent(); } screens.Add(screen); } /// <summary> /// Removes a screen from the screen manager. You should normally /// use GameScreen.ExitScreen instead of calling this directly, so /// the screen can gradually transition off rather than just being /// instantly removed. /// </summary> public void RemoveScreen(GameScreen screen) { // If we have a graphics device, tell the screen to unload content. if (isInitialized) { screen.UnloadContent(); } screens.Remove(screen); screensToUpdate.Remove(screen); } /// <summary> /// Expose an array holding all the screens. We return a copy rather /// than the real master list, because screens should only ever be added /// or removed using the AddScreen and RemoveScreen methods. /// </summary> public GameScreen[] GetScreens() { return screens.ToArray(); } /// <summary> /// Helper draws a translucent black fullscreen sprite, used for fading /// screens in and out, and for darkening the background behind popups. /// </summary> public void FadeBackBufferToBlack(float alpha) { Viewport viewport = GraphicsDevice.Viewport; spriteBatch.Begin(); spriteBatch.Draw(blankTexture, new Rectangle(0, 0, viewport.Width, viewport.Height), Color.Black * alpha); spriteBatch.End(); } #endregion } } Game Screen Parent of GamePlayScreen #region File Description //----------------------------------------------------------------------------- // GameScreen.cs // // Microsoft XNA Community Game Platform // Copyright (C) Microsoft Corporation. All rights reserved. //----------------------------------------------------------------------------- #endregion #region Using Statements using System; using Microsoft.Xna.Framework; using Microsoft.Xna.Framework.Input; //using Microsoft.Xna.Framework.Input.Touch; using System.IO; #endregion namespace GameStateManagement { /// <summary> /// Enum describes the screen transition state. /// </summary> public enum ScreenState { TransitionOn, Active, TransitionOff, Hidden, } /// <summary> /// A screen is a single layer that has update and draw logic, and which /// can be combined with other layers to build up a complex menu system. /// For instance the main menu, the options menu, the "are you sure you /// want to quit" message box, and the main game itself are all implemented /// as screens. /// </summary> public abstract class GameScreen { #region Properties /// <summary> /// Normally when one screen is brought up over the top of another, /// the first screen will transition off to make room for the new /// one. This property indicates whether the screen is only a small /// popup, in which case screens underneath it do not need to bother /// transitioning off. /// </summary> public bool IsPopup { get { return isPopup; } protected set { isPopup = value; } } bool isPopup = false; /// <summary> /// Indicates how long the screen takes to /// transition on when it is activated. /// </summary> public TimeSpan TransitionOnTime { get { return transitionOnTime; } protected set { transitionOnTime = value; } } TimeSpan transitionOnTime = TimeSpan.Zero; /// <summary> /// Indicates how long the screen takes to /// transition off when it is deactivated. /// </summary> public TimeSpan TransitionOffTime { get { return transitionOffTime; } protected set { transitionOffTime = value; } } TimeSpan transitionOffTime = TimeSpan.Zero; /// <summary> /// Gets the current position of the screen transition, ranging /// from zero (fully active, no transition) to one (transitioned /// fully off to nothing). /// </summary> public float TransitionPosition { get { return transitionPosition; } protected set { transitionPosition = value; } } float transitionPosition = 1; /// <summary> /// Gets the current alpha of the screen transition, ranging /// from 1 (fully active, no transition) to 0 (transitioned /// fully off to nothing). /// </summary> public float TransitionAlpha { get { return 1f - TransitionPosition; } } /// <summary> /// Gets the current screen transition state. /// </summary> public ScreenState ScreenState { get { return screenState; } protected set { screenState = value; } } ScreenState screenState = ScreenState.TransitionOn; /// <summary> /// There are two possible reasons why a screen might be transitioning /// off. It could be temporarily going away to make room for another /// screen that is on top of it, or it could be going away for good. /// This property indicates whether the screen is exiting for real: /// if set, the screen will automatically remove itself as soon as the /// transition finishes. /// </summary> public bool IsExiting { get { return isExiting; } protected internal set { isExiting = value; } } bool isExiting = false; /// <summary> /// Checks whether this screen is active and can respond to user input. /// </summary> public bool IsActive { get { return !otherScreenHasFocus && (screenState == ScreenState.TransitionOn || screenState == ScreenState.Active); } } bool otherScreenHasFocus; /// <summary> /// Gets the manager that this screen belongs to. /// </summary> public ScreenManager ScreenManager { get { return screenManager; } internal set { screenManager = value; } } ScreenManager screenManager; public KeyboardState KeyboardState { get {return Keyboard.GetState();} } /// <summary> /// Gets the index of the player who is currently controlling this screen, /// or null if it is accepting input from any player. This is used to lock /// the game to a specific player profile. The main menu responds to input /// from any connected gamepad, but whichever player makes a selection from /// this menu is given control over all subsequent screens, so other gamepads /// are inactive until the controlling player returns to the main menu. /// </summary> public PlayerIndex? ControllingPlayer { get { return controllingPlayer; } internal set { controllingPlayer = value; } } PlayerIndex? controllingPlayer; /// <summary> /// Gets whether or not this screen is serializable. If this is true, /// the screen will be recorded into the screen manager's state and /// its Serialize and Deserialize methods will be called as appropriate. /// If this is false, the screen will be ignored during serialization. /// By default, all screens are assumed to be serializable. /// </summary> public bool IsSerializable { get { return isSerializable; } protected set { isSerializable = value; } } bool isSerializable = true; #endregion #region Initialization /// <summary> /// Load graphics content for the screen. /// </summary> public virtual void LoadContent() { } /// <summary> /// Unload content for the screen. /// </summary> public virtual void UnloadContent() { } #endregion #region Update and Draw /// <summary> /// Allows the screen to run logic, such as updating the transition position. /// Unlike HandleInput, this method is called regardless of whether the screen /// is active, hidden, or in the middle of a transition. /// </summary> public virtual void Update(GameTime gameTime, bool otherScreenHasFocus, bool coveredByOtherScreen) { this.otherScreenHasFocus = otherScreenHasFocus; if (isExiting) { // If the screen is going away to die, it should transition off. screenState = ScreenState.TransitionOff; if (!UpdateTransition(gameTime, transitionOffTime, 1)) { // When the transition finishes, remove the screen. ScreenManager.RemoveScreen(this); } } else if (coveredByOtherScreen) { // If the screen is covered by another, it should transition off. if (UpdateTransition(gameTime, transitionOffTime, 1)) { // Still busy transitioning. screenState = ScreenState.TransitionOff; } else { // Transition finished! screenState = ScreenState.Hidden; } } else { // Otherwise the screen should transition on and become active. if (UpdateTransition(gameTime, transitionOnTime, -1)) { // Still busy transitioning. screenState = ScreenState.TransitionOn; } else { // Transition finished! screenState = ScreenState.Active; } } } /// <summary> /// Helper for updating the screen transition position. /// </summary> bool UpdateTransition(GameTime gameTime, TimeSpan time, int direction) { // How much should we move by? float transitionDelta; if (time == TimeSpan.Zero) transitionDelta = 1; else transitionDelta = (float)(gameTime.ElapsedGameTime.TotalMilliseconds / time.TotalMilliseconds); // Update the transition position. transitionPosition += transitionDelta * direction; // Did we reach the end of the transition? if (((direction < 0) && (transitionPosition <= 0)) || ((direction > 0) && (transitionPosition >= 1))) { transitionPosition = MathHelper.Clamp(transitionPosition, 0, 1); return false; } // Otherwise we are still busy transitioning. return true; } /// <summary> /// Allows the screen to handle user input. Unlike Update, this method /// is only called when the screen is active, and not when some other /// screen has taken the focus. /// </summary> public virtual void HandleInput(InputState input) { } public KeyboardState currentKeyState; public KeyboardState lastKeyState; public bool IsKeyHit(Keys key) { if (currentKeyState.IsKeyDown(key) && lastKeyState.IsKeyUp(key)) return true; return false; } /// <summary> /// This is called when the screen should draw itself. /// </summary> public virtual void Draw(GameTime gameTime) { } #endregion #region Public Methods /// <summary> /// Tells the screen to serialize its state into the given stream. /// </summary> public virtual void Serialize(Stream stream) { } /// <summary> /// Tells the screen to deserialize its state from the given stream. /// </summary> public virtual void Deserialize(Stream stream) { } /// <summary> /// Tells the screen to go away. Unlike ScreenManager.RemoveScreen, which /// instantly kills the screen, this method respects the transition timings /// and will give the screen a chance to gradually transition off. /// </summary> public void ExitScreen() { if (TransitionOffTime == TimeSpan.Zero) { // If the screen has a zero transition time, remove it immediately. ScreenManager.RemoveScreen(this); } else { // Otherwise flag that it should transition off and then exit. isExiting = true; } } #endregion #region Helper Methods /// <summary> /// A helper method which loads assets using the screen manager's /// associated game content loader. /// </summary> /// <typeparam name="T">Type of asset.</typeparam> /// <param name="assetName">Asset name, relative to the loader root /// directory, and not including the .xnb extension.</param> /// <returns></returns> public T Load<T>(string assetName) { return ScreenManager.Game.Content.Load<T>(assetName); } #endregion } }

    Read the article

  • 256 Windows Azure Worker Roles, Windows Kinect and a 90's Text-Based Ray-Tracer

    - by Alan Smith
    For a couple of years I have been demoing a simple render farm hosted in Windows Azure using worker roles and the Azure Storage service. At the start of the presentation I deploy an Azure application that uses 16 worker roles to render a 1,500 frame 3D ray-traced animation. At the end of the presentation, when the animation was complete, I would play the animation delete the Azure deployment. The standing joke with the audience was that it was that it was a “$2 demo”, as the compute charges for running the 16 instances for an hour was $1.92, factor in the bandwidth charges and it’s a couple of dollars. The point of the demo is that it highlights one of the great benefits of cloud computing, you pay for what you use, and if you need massive compute power for a short period of time using Windows Azure can work out very cost effective. The “$2 demo” was great for presenting at user groups and conferences in that it could be deployed to Azure, used to render an animation, and then removed in a one hour session. I have always had the idea of doing something a bit more impressive with the demo, and scaling it from a “$2 demo” to a “$30 demo”. The challenge was to create a visually appealing animation in high definition format and keep the demo time down to one hour.  This article will take a run through how I achieved this. Ray Tracing Ray tracing, a technique for generating high quality photorealistic images, gained popularity in the 90’s with companies like Pixar creating feature length computer animations, and also the emergence of shareware text-based ray tracers that could run on a home PC. In order to render a ray traced image, the ray of light that would pass from the view point must be tracked until it intersects with an object. At the intersection, the color, reflectiveness, transparency, and refractive index of the object are used to calculate if the ray will be reflected or refracted. Each pixel may require thousands of calculations to determine what color it will be in the rendered image. Pin-Board Toys Having very little artistic talent and a basic understanding of maths I decided to focus on an animation that could be modeled fairly easily and would look visually impressive. I’ve always liked the pin-board desktop toys that become popular in the 80’s and when I was working as a 3D animator back in the 90’s I always had the idea of creating a 3D ray-traced animation of a pin-board, but never found the energy to do it. Even if I had a go at it, the render time to produce an animation that would look respectable on a 486 would have been measured in months. PolyRay Back in 1995 I landed my first real job, after spending three years being a beach-ski-climbing-paragliding-bum, and was employed to create 3D ray-traced animations for a CD-ROM that school kids would use to learn physics. I had got into the strange and wonderful world of text-based ray tracing, and was using a shareware ray-tracer called PolyRay. PolyRay takes a text file describing a scene as input and, after a few hours processing on a 486, produced a high quality ray-traced image. The following is an example of a basic PolyRay scene file. background Midnight_Blue   static define matte surface { ambient 0.1 diffuse 0.7 } define matte_white texture { matte { color white } } define matte_black texture { matte { color dark_slate_gray } } define position_cylindrical 3 define lookup_sawtooth 1 define light_wood <0.6, 0.24, 0.1> define median_wood <0.3, 0.12, 0.03> define dark_wood <0.05, 0.01, 0.005>     define wooden texture { noise surface { ambient 0.2  diffuse 0.7  specular white, 0.5 microfacet Reitz 10 position_fn position_cylindrical position_scale 1  lookup_fn lookup_sawtooth octaves 1 turbulence 1 color_map( [0.0, 0.2, light_wood, light_wood] [0.2, 0.3, light_wood, median_wood] [0.3, 0.4, median_wood, light_wood] [0.4, 0.7, light_wood, light_wood] [0.7, 0.8, light_wood, median_wood] [0.8, 0.9, median_wood, light_wood] [0.9, 1.0, light_wood, dark_wood]) } } define glass texture { surface { ambient 0 diffuse 0 specular 0.2 reflection white, 0.1 transmission white, 1, 1.5 }} define shiny surface { ambient 0.1 diffuse 0.6 specular white, 0.6 microfacet Phong 7  } define steely_blue texture { shiny { color black } } define chrome texture { surface { color white ambient 0.0 diffuse 0.2 specular 0.4 microfacet Phong 10 reflection 0.8 } }   viewpoint {     from <4.000, -1.000, 1.000> at <0.000, 0.000, 0.000> up <0, 1, 0> angle 60     resolution 640, 480 aspect 1.6 image_format 0 }       light <-10, 30, 20> light <-10, 30, -20>   object { disc <0, -2, 0>, <0, 1, 0>, 30 wooden }   object { sphere <0.000, 0.000, 0.000>, 1.00 chrome } object { cylinder <0.000, 0.000, 0.000>, <0.000, 0.000, -4.000>, 0.50 chrome }   After setting up the background and defining colors and textures, the viewpoint is specified. The “camera” is located at a point in 3D space, and it looks towards another point. The angle, image resolution, and aspect ratio are specified. Two lights are present in the image at defined coordinates. The three objects in the image are a wooden disc to represent a table top, and a sphere and cylinder that intersect to form a pin that will be used for the pin board toy in the final animation. When the image is rendered, the following image is produced. The pins are modeled with a chrome surface, so they reflect the environment around them. Note that the scale of the pin shaft is not correct, this will be fixed later. Modeling the Pin Board The frame of the pin-board is made up of three boxes, and six cylinders, the front box is modeled using a clear, slightly reflective solid, with the same refractive index of glass. The other shapes are modeled as metal. object { box <-5.5, -1.5, 1>, <5.5, 5.5, 1.2> glass } object { box <-5.5, -1.5, -0.04>, <5.5, 5.5, -0.09> steely_blue } object { box <-5.5, -1.5, -0.52>, <5.5, 5.5, -0.59> steely_blue } object { cylinder <-5.2, -1.2, 1.4>, <-5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, -1.2, 1.4>, <5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <-5.2, 5.2, 1.4>, <-5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, 5.2, 1.4>, <5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <0, -1.2, 1.4>, <0, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <0, 5.2, 1.4>, <0, 5.2, -0.74>, 0.2 steely_blue }   In order to create the matrix of pins that make up the pin board I used a basic console application with a few nested loops to create two intersecting matrixes of pins, which models the layout used in the pin boards. The resulting image is shown below. The pin board contains 11,481 pins, with the scene file containing 23,709 lines of code. For the complete animation 2,000 scene files will be created, which is over 47 million lines of code. Each pin in the pin-board will slide out a specific distance when an object is pressed into the back of the board. This is easily modeled by setting the Z coordinate of the pin to a specific value. In order to set all of the pins in the pin-board to the correct position, a bitmap image can be used. The position of the pin can be set based on the color of the pixel at the appropriate position in the image. When the Windows Azure logo is used to set the Z coordinate of the pins, the following image is generated. The challenge now was to make a cool animation. The Azure Logo is fine, but it is static. Using a normal video to animate the pins would not work; the colors in the video would not be the same as the depth of the objects from the camera. In order to simulate the pin board accurately a series of frames from a depth camera could be used. Windows Kinect The Kenect controllers for the X-Box 360 and Windows feature a depth camera. The Kinect SDK for Windows provides a programming interface for Kenect, providing easy access for .NET developers to the Kinect sensors. The Kinect Explorer provided with the Kinect SDK is a great starting point for exploring Kinect from a developers perspective. Both the X-Box 360 Kinect and the Windows Kinect will work with the Kinect SDK, the Windows Kinect is required for commercial applications, but the X-Box Kinect can be used for hobby projects. The Windows Kinect has the advantage of providing a mode to allow depth capture with objects closer to the camera, which makes for a more accurate depth image for setting the pin positions. Creating a Depth Field Animation The depth field animation used to set the positions of the pin in the pin board was created using a modified version of the Kinect Explorer sample application. In order to simulate the pin board accurately, a small section of the depth range from the depth sensor will be used. Any part of the object in front of the depth range will result in a white pixel; anything behind the depth range will be black. Within the depth range the pixels in the image will be set to RGB values from 0,0,0 to 255,255,255. A screen shot of the modified Kinect Explorer application is shown below. The Kinect Explorer sample application was modified to include slider controls that are used to set the depth range that forms the image from the depth stream. This allows the fine tuning of the depth image that is required for simulating the position of the pins in the pin board. The Kinect Explorer was also modified to record a series of images from the depth camera and save them as a sequence JPEG files that will be used to animate the pins in the animation the Start and Stop buttons are used to start and stop the image recording. En example of one of the depth images is shown below. Once a series of 2,000 depth images has been captured, the task of creating the animation can begin. Rendering a Test Frame In order to test the creation of frames and get an approximation of the time required to render each frame a test frame was rendered on-premise using PolyRay. The output of the rendering process is shown below. The test frame contained 23,629 primitive shapes, most of which are the spheres and cylinders that are used for the 11,800 or so pins in the pin board. The 1280x720 image contains 921,600 pixels, but as anti-aliasing was used the number of rays that were calculated was 4,235,777, with 3,478,754,073 object boundaries checked. The test frame of the pin board with the depth field image applied is shown below. The tracing time for the test frame was 4 minutes 27 seconds, which means rendering the2,000 frames in the animation would take over 148 hours, or a little over 6 days. Although this is much faster that an old 486, waiting almost a week to see the results of an animation would make it challenging for animators to create, view, and refine their animations. It would be much better if the animation could be rendered in less than one hour. Windows Azure Worker Roles The cost of creating an on-premise render farm to render animations increases in proportion to the number of servers. The table below shows the cost of servers for creating a render farm, assuming a cost of $500 per server. Number of Servers Cost 1 $500 16 $8,000 256 $128,000   As well as the cost of the servers, there would be additional costs for networking, racks etc. Hosting an environment of 256 servers on-premise would require a server room with cooling, and some pretty hefty power cabling. The Windows Azure compute services provide worker roles, which are ideal for performing processor intensive compute tasks. With the scalability available in Windows Azure a job that takes 256 hours to complete could be perfumed using different numbers of worker roles. The time and cost of using 1, 16 or 256 worker roles is shown below. Number of Worker Roles Render Time Cost 1 256 hours $30.72 16 16 hours $30.72 256 1 hour $30.72   Using worker roles in Windows Azure provides the same cost for the 256 hour job, irrespective of the number of worker roles used. Provided the compute task can be broken down into many small units, and the worker role compute power can be used effectively, it makes sense to scale the application so that the task is completed quickly, making the results available in a timely fashion. The task of rendering 2,000 frames in an animation is one that can easily be broken down into 2,000 individual pieces, which can be performed by a number of worker roles. Creating a Render Farm in Windows Azure The architecture of the render farm is shown in the following diagram. The render farm is a hybrid application with the following components: ·         On-Premise o   Windows Kinect – Used combined with the Kinect Explorer to create a stream of depth images. o   Animation Creator – This application uses the depth images from the Kinect sensor to create scene description files for PolyRay. These files are then uploaded to the jobs blob container, and job messages added to the jobs queue. o   Process Monitor – This application queries the role instance lifecycle table and displays statistics about the render farm environment and render process. o   Image Downloader – This application polls the image queue and downloads the rendered animation files once they are complete. ·         Windows Azure o   Azure Storage – Queues and blobs are used for the scene description files and completed frames. A table is used to store the statistics about the rendering environment.   The architecture of each worker role is shown below.   The worker role is configured to use local storage, which provides file storage on the worker role instance that can be use by the applications to render the image and transform the format of the image. The service definition for the worker role with the local storage configuration highlighted is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="CloudRay" >   <WorkerRole name="CloudRayWorkerRole" vmsize="Small">     <Imports>     </Imports>     <ConfigurationSettings>       <Setting name="DataConnectionString" />     </ConfigurationSettings>     <LocalResources>       <LocalStorage name="RayFolder" cleanOnRoleRecycle="true" />     </LocalResources>   </WorkerRole> </ServiceDefinition>     The two executable programs, PolyRay.exe and DTA.exe are included in the Azure project, with Copy Always set as the property. PolyRay will take the scene description file and render it to a Truevision TGA file. As the TGA format has not seen much use since the mid 90’s it is converted to a JPG image using Dave's Targa Animator, another shareware application from the 90’s. Each worker roll will use the following process to render the animation frames. 1.       The worker process polls the job queue, if a job is available the scene description file is downloaded from blob storage to local storage. 2.       PolyRay.exe is started in a process with the appropriate command line arguments to render the image as a TGA file. 3.       DTA.exe is started in a process with the appropriate command line arguments convert the TGA file to a JPG file. 4.       The JPG file is uploaded from local storage to the images blob container. 5.       A message is placed on the images queue to indicate a new image is available for download. 6.       The job message is deleted from the job queue. 7.       The role instance lifecycle table is updated with statistics on the number of frames rendered by the worker role instance, and the CPU time used. The code for this is shown below. public override void Run() {     // Set environment variables     string polyRayPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), PolyRayLocation);     string dtaPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), DTALocation);       LocalResource rayStorage = RoleEnvironment.GetLocalResource("RayFolder");     string localStorageRootPath = rayStorage.RootPath;       JobQueue jobQueue = new JobQueue("renderjobs");     JobQueue downloadQueue = new JobQueue("renderimagedownloadjobs");     CloudRayBlob sceneBlob = new CloudRayBlob("scenes");     CloudRayBlob imageBlob = new CloudRayBlob("images");     RoleLifecycleDataSource roleLifecycleDataSource = new RoleLifecycleDataSource();       Frames = 0;       while (true)     {         // Get the render job from the queue         CloudQueueMessage jobMsg = jobQueue.Get();           if (jobMsg != null)         {             // Get the file details             string sceneFile = jobMsg.AsString;             string tgaFile = sceneFile.Replace(".pi", ".tga");             string jpgFile = sceneFile.Replace(".pi", ".jpg");               string sceneFilePath = Path.Combine(localStorageRootPath, sceneFile);             string tgaFilePath = Path.Combine(localStorageRootPath, tgaFile);             string jpgFilePath = Path.Combine(localStorageRootPath, jpgFile);               // Copy the scene file to local storage             sceneBlob.DownloadFile(sceneFilePath);               // Run the ray tracer.             string polyrayArguments =                 string.Format("\"{0}\" -o \"{1}\" -a 2", sceneFilePath, tgaFilePath);             Process polyRayProcess = new Process();             polyRayProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), polyRayPath);             polyRayProcess.StartInfo.Arguments = polyrayArguments;             polyRayProcess.Start();             polyRayProcess.WaitForExit();               // Convert the image             string dtaArguments =                 string.Format(" {0} /FJ /P{1}", tgaFilePath, Path.GetDirectoryName (jpgFilePath));             Process dtaProcess = new Process();             dtaProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), dtaPath);             dtaProcess.StartInfo.Arguments = dtaArguments;             dtaProcess.Start();             dtaProcess.WaitForExit();               // Upload the image to blob storage             imageBlob.UploadFile(jpgFilePath);               // Add a download job.             downloadQueue.Add(jpgFile);               // Delete the render job message             jobQueue.Delete(jobMsg);               Frames++;         }         else         {             Thread.Sleep(1000);         }           // Log the worker role activity.         roleLifecycleDataSource.Alive             ("CloudRayWorker", RoleLifecycleDataSource.RoleLifecycleId, Frames);     } }     Monitoring Worker Role Instance Lifecycle In order to get more accurate statistics about the lifecycle of the worker role instances used to render the animation data was tracked in an Azure storage table. The following class was used to track the worker role lifecycles in Azure storage.   public class RoleLifecycle : TableServiceEntity {     public string ServerName { get; set; }     public string Status { get; set; }     public DateTime StartTime { get; set; }     public DateTime EndTime { get; set; }     public long SecondsRunning { get; set; }     public DateTime LastActiveTime { get; set; }     public int Frames { get; set; }     public string Comment { get; set; }       public RoleLifecycle()     {     }       public RoleLifecycle(string roleName)     {         PartitionKey = roleName;         RowKey = Utils.GetAscendingRowKey();         Status = "Started";         StartTime = DateTime.UtcNow;         LastActiveTime = StartTime;         EndTime = StartTime;         SecondsRunning = 0;         Frames = 0;     } }     A new instance of this class is created and added to the storage table when the role starts. It is then updated each time the worker renders a frame to record the total number of frames rendered and the total processing time. These statistics are used be the monitoring application to determine the effectiveness of use of resources in the render farm. Rendering the Animation The Azure solution was deployed to Windows Azure with the service configuration set to 16 worker role instances. This allows for the application to be tested in the cloud environment, and the performance of the application determined. When I demo the application at conferences and user groups I often start with 16 instances, and then scale up the application to the full 256 instances. The configuration to run 16 instances is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="16" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     About six minutes after deploying the application the first worker roles become active and start to render the first frames of the animation. The CloudRay Monitor application displays an icon for each worker role instance, with a number indicating the number of frames that the worker role has rendered. The statistics on the left show the number of active worker roles and statistics about the render process. The render time is the time since the first worker role became active; the CPU time is the total amount of processing time used by all worker role instances to render the frames.   Five minutes after the first worker role became active the last of the 16 worker roles activated. By this time the first seven worker roles had each rendered one frame of the animation.   With 16 worker roles u and running it can be seen that one hour and 45 minutes CPU time has been used to render 32 frames with a render time of just under 10 minutes.     At this rate it would take over 10 hours to render the 2,000 frames of the full animation. In order to complete the animation in under an hour more processing power will be required. Scaling the render farm from 16 instances to 256 instances is easy using the new management portal. The slider is set to 256 instances, and the configuration saved. We do not need to re-deploy the application, and the 16 instances that are up and running will not be affected. Alternatively, the configuration file for the Azure service could be modified to specify 256 instances.   <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="256" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     Six minutes after the new configuration has been applied 75 new worker roles have activated and are processing their first frames.   Five minutes later the full configuration of 256 worker roles is up and running. We can see that the average rate of frame rendering has increased from 3 to 12 frames per minute, and that over 17 hours of CPU time has been utilized in 23 minutes. In this test the time to provision 140 worker roles was about 11 minutes, which works out at about one every five seconds.   We are now half way through the rendering, with 1,000 frames complete. This has utilized just under three days of CPU time in a little over 35 minutes.   The animation is now complete, with 2,000 frames rendered in a little over 52 minutes. The CPU time used by the 256 worker roles is 6 days, 7 hours and 22 minutes with an average frame rate of 38 frames per minute. The rendering of the last 1,000 frames took 16 minutes 27 seconds, which works out at a rendering rate of 60 frames per minute. The frame counts in the server instances indicate that the use of a queue to distribute the workload has been very effective in distributing the load across the 256 worker role instances. The first 16 instances that were deployed first have rendered between 11 and 13 frames each, whilst the 240 instances that were added when the application was scaled have rendered between 6 and 9 frames each.   Completed Animation I’ve uploaded the completed animation to YouTube, a low resolution preview is shown below. Pin Board Animation Created using Windows Kinect and 256 Windows Azure Worker Roles   The animation can be viewed in 1280x720 resolution at the following link: http://www.youtube.com/watch?v=n5jy6bvSxWc Effective Use of Resources According to the CloudRay monitor statistics the animation took 6 days, 7 hours and 22 minutes CPU to render, this works out at 152 hours of compute time, rounded up to the nearest hour. As the usage for the worker role instances are billed for the full hour, it may have been possible to render the animation using fewer than 256 worker roles. When deciding the optimal usage of resources, the time required to provision and start the worker roles must also be considered. In the demo I started with 16 worker roles, and then scaled the application to 256 worker roles. It would have been more optimal to start the application with maybe 200 worker roles, and utilized the full hour that I was being billed for. This would, however, have prevented showing the ease of scalability of the application. The new management portal displays the CPU usage across the worker roles in the deployment. The average CPU usage across all instances is 93.27%, with over 99% used when all the instances are up and running. This shows that the worker role resources are being used very effectively. Grid Computing Scenarios Although I am using this scenario for a hobby project, there are many scenarios where a large amount of compute power is required for a short period of time. Windows Azure provides a great platform for developing these types of grid computing applications, and can work out very cost effective. ·         Windows Azure can provide massive compute power, on demand, in a matter of minutes. ·         The use of queues to manage the load balancing of jobs between role instances is a simple and effective solution. ·         Using a cloud-computing platform like Windows Azure allows proof-of-concept scenarios to be tested and evaluated on a very low budget. ·         No charges for inbound data transfer makes the uploading of large data sets to Windows Azure Storage services cost effective. (Transaction charges still apply.) Tips for using Windows Azure for Grid Computing Scenarios I found the implementation of a render farm using Windows Azure a fairly simple scenario to implement. I was impressed by ease of scalability that Azure provides, and by the short time that the application took to scale from 16 to 256 worker role instances. In this case it was around 13 minutes, in other tests it took between 10 and 20 minutes. The following tips may be useful when implementing a grid computing project in Windows Azure. ·         Using an Azure Storage queue to load-balance the units of work across multiple worker roles is simple and very effective. The design I have used in this scenario could easily scale to many thousands of worker role instances. ·         Windows Azure accounts are typically limited to 20 cores. If you need to use more than this, a call to support and a credit card check will be required. ·         Be aware of how the billing model works. You will be charged for worker role instances for the full clock our in which the instance is deployed. Schedule the workload to start just after the clock hour has started. ·         Monitor the utilization of the resources you are provisioning, ensure that you are not paying for worker roles that are idle. ·         If you are deploying third party applications to worker roles, you may well run into licensing issues. Purchasing software licenses on a per-processor basis when using hundreds of processors for a short time period would not be cost effective. ·         Third party software may also require installation onto the worker roles, which can be accomplished using start-up tasks. Bear in mind that adding a startup task and possible re-boot will add to the time required for the worker role instance to start and activate. An alternative may be to use a prepared VM and use VM roles. ·         Consider using the Windows Azure Autoscaling Application Block (WASABi) to autoscale the worker roles in your application. When using a large number of worker roles, the utilization must be carefully monitored, if the scaling algorithms are not optimal it could get very expensive!

    Read the article

  • What good technology podcasts are out there?

    - by Michael Stum
    Yes, Podcasts, those nice little Audiobooks I can listen to on the way to work. With the current amount of Podcasts, it's like searching a needle in a haystack, except that the haystack happens to be the Internet and is filled with too many of these "Hot new Gadgets" stuff :( Now, even though I am mainly a .NET developer nowadays, maybe anyone knows some good Podcasts from people regarding the whole software lifecycle? Unit Testing, Continous Integration, Documentation, Deployment... So - what are you guys and gals listening to? Please note that the categorizations are somewhat subjective and may not be 100% accurate as many podcasts cover several areas. Categorization is made against what is considered the "main" area. General Software Engineering / Productivity Stack Overflow TekPub (Requires Paid Subscription) SE Radio 43 Folders Perspectives Dr. Dobb's (now a video feed) The Pragmatic Podcast (Inactive) IT Matters Agile Toolkit Podcast The Stack Trace (Inactive) Parleys Techzing The Startup Success Podcast Berkeley CS class lectures FOSS Weekly .NET / Visual Studio / Microsoft Herding Code Hanselminutes .NET Rocks! Deep Fried Bytes Alt.Net Podcast Polymorphic Podcast Sparkling Client (The Silverlight Podcast) dnrTV! Spaghetti Code ASP.NET Podcast Channel 9 Radio TFS PowerScripting Podcast The Thirsty Developer Elegant Code ConnectedShow Crafty Coders Coding QA jQuery yayQuery The official jQuery podcast Java / Groovy The Java Posse Grails Podcast Java Technology Insider Ruby / Rails Railscasts Rails Envy The Ruby on Rails Podcast Rubiverse Web Design / JavaScript / Ajax WebDevRadio Boagworld The Rissington podcast Ajaxian YUI Theater Unix / Linux / Mac / iPhone Mac Developer Network Hacker Public Radio Linux Outlaws Mac OS Ken LugRadio Linux radio show (Inactive) The Linux Action Show! Linux Kernel Mailing List (LKML) Summary Podcast Stanford's iPhone programming class SysAdmin, Security or Infrastructure RunAs Radio Security Now! Crypto-Gram Security Podcast Hak5 VMWare VMTN Windows Weekly PaulDotCom Security The Register - Semi-Coherent Computing FeatherCast General Tech / Business Tekzilla This Week in Tech The Guardian Tech Weekly PCMag Radio Podcast Entrepreneurship Corner Manager Tools Other / Misc. / Podcast Networks IT Conversations Retrobits Podcast No Agenda Netcast Cranky Geeks The Command Line Freelance Radio IBM developerWorks The Register - Open Season Drunk and Retired Technometria Sod This Radio4Nerds Hacker Medley

    Read the article

  • A simple augmented reality application in C#

    - by Ian
    Hi All, I would like to start a personal project that will give me a lot of new concept to learn and understand. After some thought, I figured that an Augmented Reality related project will be the most beneficial for me because of the following reasons: I haven't tried interfacing a program with a live video/camera feed I haven't done any "image processing" project I haven't done any "graphics rendering" ever With that said, you can assume that I will totally suck at AR. So I'm here to ask for some advice regarding the best way to go through this. st milestone project that I am thinking of is to take a cheap webcam and read some Data Matrix using C#. nd milestone project will render some text-overlay on the feed when presented with a certain Data Matrix. rd milestone project will actually render some 3D shapes. I've searched all-around and found some nice yet advanced materials for AR: http://sites.google.com/site/augmentedrealitytestingsite/ http://soldeveloper.com/ http://www.mperfect.net/wpfAugReal/ So I came here to ask the following: Do you think my milestone is reasonable given my "deficiencies"? In order to start Milestone 1, can you give some materials that I can study? I prefer online materials. Thanks!

    Read the article

  • Visual C# GUI Designer - Recommended way of removing generated event handler-code & basic tutorial

    - by cusack
    Hi, I'm new to the Visual C# designer so these are general and pretty basic question on how to work with the designer. When we for instance add a label to a form and then double-click on it in the Visual C# designer (I'm using Microsoft Visual C# 2008 Express Edition), the following things happen: The designer generates code within Form1.Designer.cs (assume default names for simplicity) to add the label, then with the double-click it will add the event handler label1_Click to the label within Form1.Designer.cs, using the following code this.label1.Click += new System.EventHandler(this.label1_Click); and it adds the event handler method to Form1.cs private void label1_Click(object sender, EventArgs e) { } If I now remove the label only the code within Form1.Designer.cs will be removed but the label1_Click method will stay within Form1.cs even if it isn't used by anything else. But if I'm using reset within Properties-Events for the Click-event from within the designer even the label1_Click method in Form1.cs will be removed. 1.) Isn't that a little inconsistent behavior? 2.) What is the recommended way of removing such generated event handler-code? 3.) What is the best "mental approach"/best practice for using the designer? I would approach it by mental separation in the way that Form1.cs is 100% my responsibility and that on the other hand I'm not touching the code in Form1.Designer.cs at all. Does that make sense or not? Since sometimes the designer removes sth. from Form1.cs I'm not sure about this. 4.) Can you recommend a simple designer tutorial that assumes no Visual C# designer knowledge but expects/doesn't explain C#. The following one is an example of what I would not want it explains what a c#-comment is and I'd prefer text over video as well: http://msdn.microsoft.com/en-us/beginner/bb964631.aspx

    Read the article

  • Can you force a MPMoviePlayerPlaybackDidFinishNotification ?

    - by Jonathan
    Hi, I have several movies that are played and presented using this code. As you can see I also have removed the default movie controls and have added a custom overlay which essentially just stops the video. Here is my problem... When I stop the movie with my custom overlay button, I don't seem to be getting the 'MPMoviePlayerPlaybackDidFinishNotification' Note: everything works normal if I let the movie play through and it stop by itself. Is the any way of 'forcing' the PlaybackDidFinish notification? Can I do something like this [self moviePlayBackDidFinish:something]; ? Thank You! - (void) playMovie { NSString *path = [[NSBundle mainBundle] pathForResource:@"movie_frog" ofType:@"m4v"]; NSURL *url = [NSURL fileURLWithPath:path]; MPMoviePlayerController *mp = [[MPMoviePlayerController alloc] initWithContentURL:url]; if(mp) { self.myMoviePlayer = mp; [mp release]; //movie view [self.view addSubview:myMoviePlayer.view]; myMoviePlayer.view.frame = CGRectMake(0.0,0.0,480,320); self.myMoviePlayer.controlStyle = MPMovieControlStyleNone; [self.myMoviePlayer play]; //videoNav _videoNav = [[videoNav alloc] initWithNibName:@"videoNav" bundle:nil]; [self.view addSubview:_videoNav.view]; [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(moviePlayBackDidFinish:) name:MPMoviePlayerPlaybackDidFinishNotification object:nil]; } }

    Read the article

  • "Unable to associated Elastic IP with cluster" in Eclipse Plugin Tutorial

    - by Jeffrey Chee
    Hi all, I am currently trying to evaluate AWS for my company and was trying to follow the tutorials on the web. http://developer.amazonwebservices.com/connect/entry.jspa?externalID=2241 However I get the below error during startup of the server instance: Unable to associated Elastic IP with cluster: Unable to detect that the Elastic IP was orrectly associated. java.lang.Exception: Unable to detect that the Elastic IP was correctly associated at com.amazonaws.ec2.cluster.Cluster.associateElasticIp(Cluster.java:802) at com.amazonaws.ec2.cluster.Cluster.start(Cluster.java:311) at com.amazonaws.eclipse.wtp.ElasticClusterBehavior.launch(ElasticClusterBehavior.java:611) at com.amazonaws.eclipse.wtp.Ec2LaunchConfigurationDelegate.launch(Ec2LaunchConfigurationDelegate.java:47) at org.eclipse.debug.internal.core.LaunchConfiguration.launch(LaunchConfiguration.java:853) at org.eclipse.debug.internal.core.LaunchConfiguration.launch(LaunchConfiguration.java:703) at org.eclipse.debug.internal.core.LaunchConfiguration.launch(LaunchConfiguration.java:696) at org.eclipse.wst.server.core.internal.Server.startImpl2(Server.java:3051) at org.eclipse.wst.server.core.internal.Server.startImpl(Server.java:3001) at org.eclipse.wst.server.core.internal.Server$StartJob.run(Server.java:300) at org.eclipse.core.internal.jobs.Worker.run(Worker.java:55) Then after a while, another error occur: Unable to publish server configuration files: Unable to copy remote file after trying 4 timeslocal file: 'XXXXXXXX/XXX.zip' Results from first attempt: Unexpected exception: java.net.ConnectException: Connection timed out: connect root cause: java.net.ConnectException: Connection timed out: connect at com.amazonaws.eclipse.ec2.RemoteCommandUtils.copyRemoteFile(RemoteCommandUtils.java:128) at com.amazonaws.eclipse.wtp.tomcat.Ec2TomcatServer.publishServerConfiguration(Ec2TomcatServer.java:172) at com.amazonaws.ec2.cluster.Cluster.publishServerConfiguration(Cluster.java:369) at com.amazonaws.eclipse.wtp.ElasticClusterBehavior.publishServer(ElasticClusterBehavior.java:538) at org.eclipse.wst.server.core.model.ServerBehaviourDelegate.publish(ServerBehaviourDelegate.java:866) at org.eclipse.wst.server.core.model.ServerBehaviourDelegate.publish(ServerBehaviourDelegate.java:708) at org.eclipse.wst.server.core.internal.Server.publishImpl(Server.java:2731) at org.eclipse.wst.server.core.internal.Server$PublishJob.run(Server.java:278) at org.eclipse.core.internal.jobs.Worker.run(Worker.java:55) Can anyone point me to what I'm doing wrong? I followed the tutorials and the video tutorials on youtube exactly. Best Regards ~Jeffrey

    Read the article

  • Development process for an embedded project with significant hardware changes

    - by pierr
    I have a good idea about Agile development process but it seems it does not fit well with a embedded project with significant hardware changes. I will describe below what we are currently doing (Ad-hoc way, no defined process yet). The changes are divided into three categories and different processes are used for each of them: complete hardware change example : use a different video codec IP a) Study the new IP b) RTL/FPGA simulation c) Implement the legacy interface - go to b) d) Wait until hardware (tape out) is ready f) Test on the real hardware hardware improvement example : enhance the image display quality by improving the underlying algorithm a) RTL/FPGA simulation b) Wait until hardware and test on the hardware Minor change example : only change hardware register mapping a) Wait until hardware and test on the hardware The worry is it seems we don't have too much control and confidence about software maturity for the hardware changes as the bring-up schedule is always very tight and the customer desired a seamless change when updating to a new version of hardware. How did you manage this kind of hardware change? Did you solve that by a Hardware Abstraction Layer (HAL)? Did you have a automatic test for the HAL layer? How did you test when the hardware platform is not even ready? Do you have well-documented processes for this kind of change?

    Read the article

  • What's a good way to teach my son to program Java

    - by Software Monkey
    OK, so I've read through various posts about teaching beginner's to program, and there were some helpful things I will look at more closely. But what I want to know is whether there are any effective tools out there to teach a kid Java specifically? I want to teach him Java specifically because (a) with my strong background in C I feel that's too complex, (b) Java is the other language I know extremely well and therefore I can assist meaningfully without needing to teach myself a new but (to me) useless language, and (c) I feel that managed languages are the future, and lastly (d) Java is one of the simplest of all the languages I know well (aside from basic). I learned in basic, and I am open to teaching that first, but I am unaware of a decent free basic shell for Windows (though I haven't really searched, yet since it's not my first choice), and would anyway want to progress quickly to Java. My son is 8, so that's a couple of years earlier than I started - but he has expressed an interest in learning to program (possibly because I work from home a lot and he sees me programming all the time). If no-one can suggest a tool designed for this purpose, I will probably start him off with text/console based apps to teach the basics, and then progress to GUI building. Oh, one last thing, I am not a fan of IDE's (old school text editor type), so I would not be put off at all by a system that has him typing real code, and would likely prefer that to a toy drag/drop system. EDIT: Just to clarify; I really am specifically after ways to teach him Java; there are already a good many posts with good answers for other language alternatives - but that's not what I am looking for here. EDIT: What about Java frameworks for 2D video games - can anyone recommend any of them from personal experience? I like the idea of him starting with the mechanics in place (main game loop, scoring, etc) and adding the specifics for a game of his own imagining - that's what I did, though for me it was basic on a Commodore VIC-20 and a Sinclair ZX-81.

    Read the article

  • ActiveX Content in a local web page, and "the mark of the web"

    - by Daniel Magliola
    Hi, I'm trying to make a webpage that people will run from their hard drives, locally, and this page will show a .wmv video (which is also stored locally), using Windows Media Player When I run this, IE shows me the "ActiveX Warning" bar at the top, which is what i'm trying to work around. If I load this from a web server, it loads fine, but from the local disk, it won't. Now, apparently, MS has added the Mark of the Web thingy precisely to work around this problem, however, I've been trying for a while to make it work, and it just didn't. I still get the warning bar. Is the Mark of the Web supposed to still work? Or this is some kind of deprecated thing? Am I doing anything wrong? I'm supposedly following all instructions, it looks like: and I've tried placing it before DOCTYPE, between DOCTYPE and <HTML>, right after <HTML>, in the <HEAD> of the document, etc. Nothing seems to work. I've tried this in IE7 and IE8 Any ideas will be GREATLY appreciated. Thanks!!

    Read the article

  • Connecting to a Ghost User in Flex RTMFP

    - by Dan
    I have a simple Flex RTMFP P2P video app in the same mold as the Adobe Cirrus VideoPhone Sample application. A problem I've been encountering in developing this app (the same problem occurs in the sample) is when you try to connect to a ghost Stratus instance i.e you try to call someone whose Stratus id is in the database but who is no longer on the page. So here's an example of what I mean: Let's say you go to the Adobe Stratus sample and connect as Dan. Then open up a new tab, go to the sample again and connect as Fred. If from this point, you (as Fred) call Dan everything will work fine. But, if you close the tab in which you connected as Dan, and then from the Fred tab try to connect to Dan the program will just hang. I would have thought there would be a NetStream event that would be triggered if you tried to connect to a Stratus instance that is not longer online but I can't seem to find anything besides NetStream.Connect.Rejected which doesn't seem to be called. Any help is much appreciated!

    Read the article

  • How can you replace the src of an image that points to mjpeg?

    - by user165623
    I have an application that displays images from network cameras. The application will show multiple images on a single page. Each image container has a button on it that causes jQuery to scale that image up to fill a 64x480 div vs much smaller when multiple images are present. I change the src in the image with jquery as you'd expect: $("#imageId").attr("src", "http://cameraUrl.com:6000?width=640&height=480&d="+date.getTime()); the date.getTime() is intended to override the caching in a browser. It works for retrieving still images. Sometimes the video changes to the larger resolution. Other times it just sticks with the original image scaled up to fit the larger div as if it is not loading the new URL. This is evident by the fact it's grainy and the text overlay from the camera is scaled up. Occasionally the image finally loads in the higher resolution version and it works evidenced by the image clarity and the text overlay scaling to what it should be. Is there a correct way to force the setting of an image src where the src points to a motion jpeg stream to reload?

    Read the article

  • signalR groups - connecting/disconnecting and sending - am I missing something?

    - by Terry_Brown
    very new to signalR, and have rolled up a very simple app that will take questions for moderation at conferences (felt like a straight forward use case) I have 2 hubs at the moment: - Question (for asking questions) - Speaker (these should receive questions and allow moderation, but that will come later) Solution lives at https://github.com/terrybrown/InterASK After watching a video (by David Fowler/Damian Edwards) (http://channel9.msdn.com/Shows/Web+Camps+TV/Damian-Edwards-and-David-Fowler-Demonstrate-SignalR) and another that I can't find the URL for atm, I thought I'd go with 'groups' as the concept to keep messages flowing to the right people. I implemented IConnected, IDisconnect as I'd seen in one of the videos, and upon debugging I can see Connect fire (and on reload I can see disconnect fire), but it seems nothing I do adds a person to a group. The signalR documentation suggests "Groups are not persisted on the server so applications are responsible for keeping track of what connections are in what groups so things like group count can be achieved" which I guess is telling me that I need to keep some method (static or otherwise?) of tracking who is in a group? Certainly I don't seem able to send to groups currently, though I have no problem distributing to anyone currently connected to the app and implementing the same JS method (2 machines on the same page). I suspect I'm just missing something - I read a few of the other questions on here, but none of them seem to mention IConnected/IDisconnect, which tells me these are either new (and nobody is using them) or that they're old (and nobody is using them). I know this could be considered a subjective question, though what I'm looking for is just a simple means of managing the groups so that I can do what I want to - send a question from one hub, and have people connected to a different hub receive it - groups felt the cleanest solution for this? Many thanks folks. Terry

    Read the article

  • Yii: Multi-language website - best practices.

    - by michal
    Hi, I find Yii great framework, and the example website created with yiic shell is a good point to start... however it doesn't cover the topic of multi-language websites, unfortunately. The docs covers the topic of translating short messages, but not keeping the multi-lingual content ... I'm about to start working on a website which needs to be in at least two languages, and I'm wondering what is the best way to keep content for that ... The problem is that the content is mixed extensively with common elements (like embedded video files). I need to avoid duplicating those commons ... so far I used to have an array of arrays containing texts (usually no more than 1-2 short paragraphs), then the view file was just rendering the text from an array. Now I'd like to avoid keeping it in arrays (which requires some attention when putting double quotations " " and is inconvenient in general...). So, what is the best way to keep those short paragraphs? Should I keep them in DB like (id | msg_id | language | content ) and then select them by msg_id & language? That still requires me to create some msg_id's and embed them into view file ... Is there any recommended paradigm for which Yii has some solutions? Thanks, m.

    Read the article

  • Complex query making site extremely slow

    - by Basit
    select SQL_CALC_FOUND_ROWS DISTINCT media.*, username from album as album, album_permission as permission, user as user, media as media , word_tag as word_tag, tag as tag where ((media.album_id = album.album_id and album.private = 'yes' and album.album_id = permission.album_id and (permission.email = '' or permission.user_id = '') ) or (media.album_id = album.album_id and album.private = 'no' ) or media.album_id = '0' ) and media.status = '1' and media.user_id = user.user_id and word_tag.media_id = media.media_id and word_tag.tag_id = tag.tag_id and tag.name in ('justin','bieber','malfunction','katherine','heigl','wardrobe','cinetube') and media.media_type = 'video' and media.media_id not in ('YHL6a5z8MV4') group by media.media_id order by RAND() #there is limit too, by 20 rows.. i dont know where to begin explaining about this query, but please forgive me and ask me if you have any question. following is the explanation. SQL_CALC_FOUND_ROWS is calculating how many rows are there and will be using for pagination, so it counts total records, even tho only 20 is showing. DISTINCT will stop the repeated row to display. username is from user table. album, album_permission. its checking if album is private and if it is, then check if user has permission, by user_id. i think rest is easy to understand, but if you need to know more about it, then please ask. im really frustrated by this query and site is very slow or not opening sometimes cause of this query. please help

    Read the article

  • Save UIwebview contents to photo gallery

    - by user307410
    There's a video tutorial on u tube that shows how to perform this.It consists of a UIwebview and toolbar button to save the contents.Haven't had any luck making this work.Could someone have a look and see they can make it work.Many thanks in advance. http://www.youtube.com/watch?v=gDPca3JIc_s&feature=player_embedded# /////////////////////////////////////////////////////////////////// // // SaveWebViewController.h // SaveWeb // // // Copyright MyCompanyName 2010. All rights reserved. // import @interface SaveWebViewController : UIViewController { IBOutlet UIWebView *webview; } @property (nonatomic, retain) IBOutlet UIWebView *webview; [IBAction]saveWeb:(id)sender; @end //////////////////////////////////////////////////////////////////////////////// // // SaveWebViewController.m // SaveWeb // // // Copyright MyCompanyName 2010. All rights reserved. // import "SaveWebViewController.h" @implementation SaveWebViewController (IBAction)saveWeb:(id)sender { UIGraphicsBeginImageContext(webView.frame.size); [self.view.layer renderInContext: UIGraphicsGetCurrentContext()]; UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); UIImageWriteToSavedPhotosAlbum(viewImage, nil, nil, nil); } // The designated initializer. Override to perform setup that is required before the view is loaded. - (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil { if (self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil]) { // Custom initialization } return self; } // Implement loadView to create a view hierarchy programmatically, without using a nib. - (void)loadView { } //Implement viewDidLoad to do additional setup after loading the view, typically from a nib. - (void)viewDidLoad { [super viewDidLoad]; [webView loadRequest:[NSURLRequest requestWithURL:[NSURL URLWithString:@"http://google.com"]]]; } // Override to allow orientations other than the default portrait orientation. - (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation { // Return YES for supported orientations return (interfaceOrientation == UIInterfaceOrientationPortrait); } (void)didReceiveMemoryWarning { // Releases the view if it doesn't have a superview. [super didReceiveMemoryWarning]; // Release any cached data, images, etc that aren't in use. } (void)viewDidUnload { // Release any retained subviews of the main view. // e.g. self.myOutlet = nil; } (void)dealloc { [super dealloc]; } @end

    Read the article

  • Putting a MovieMaterial behind a DAE model in Papervision3D

    - by didibus
    Hi, I'm doing a project using FLARManager augmented reality and the Papervision3D library. Unfortunately, Papervision is giving me a lot of problems. My scene3D contains a DAE model and a plane. The plane has a MovieMaterial and is playing a video through FLVPlayback. The DAE and the plane are both inside the same DisplayObject3D container. FLARManager transforms the container so that everything appears through the angle of the marker. My DAE model is a TV, the screen of the TV is transparent. I want to have my Plane inside of my DAE model, so that the Movie playing on the plane material appears to be what is playing on the TV. The problem is that, even if the plane has a lower Z index then the TV, it always appears in front of the TV. How do I have my plane and its MovieMaterial appear behind the TV, so that some of its corners are cut out by the TV and the part of the TV thats transparent let me see the Movie? If its impossible, anyone has an idea of how I could get the desired effect of having a movie play on the screen of my DAE tv model? Thank You.

    Read the article

  • Why does Facebook iOS dialog for feed publish disappear after login?

    - by Mason G. Zhwiti
    I'm trying to use the Facebook iOS "feed" dialog call to allow my app's user to share something on their Facebook wall. When the facebook app is not installed, it attempts to let them authenticate within the app (presumably using a web view). The issue is that this dialog just disappears once they authenticate. I was expecting the web view to return to the "feed" sharing view. How do I detect that they authenticated so that I can re-open the feed dialog? I've added fbDidLogin to my app delegate, but it's not being called. (I wasn't sure if this would normally be called or not, but I read several people recommending this.) SBJSON *jsonWriter = [SBJSON new]; // The action links to be shown with the post in the feed NSArray* actionLinks = [NSArray arrayWithObjects:[NSDictionary dictionaryWithObjectsAndKeys: @"More Videos",@"name",@"http://www.example.com/",@"link", nil], nil]; NSString *actionLinksStr = [jsonWriter stringWithObject:actionLinks]; NSMutableDictionary* params = [NSMutableDictionary dictionaryWithObjectsAndKeys: @"Test Caption", @"caption", @"Test Description", @"description", @"https://s3.amazonaws.com/example/images/test.png", @"source", self.video.blogLink, @"link", @"01234567890123", @"app_id", actionLinksStr, @"actions", nil]; [delegate facebook].sessionDelegate = delegate; [[delegate facebook] dialog:@"feed" andParams:params andDelegate:self];

    Read the article

  • AS3 : RegExp exec method in loop problem

    - by Boun
    Hi everyone, I need some help about RegExp in AS3. I have a simple pattern : patternYouTube = new RegExp ( "v(?:\/|=)([A-Z0-9_-]+)", "gi" ); This pattern is looking for the youTube id video. For example : var tmpUrl : String; var result : Object; var toto : Array = new Array(); toto = [http://www.youtube.com/v/J-vCxmjCm-8&autoplay=1, http://www.youtube.com/v/xFTRnE1WBmU&autoplay=1]; var i : uint; for ( i = 0 ; i < toto.length ; i++) { tmpUrl = toto[i]; result = patternYouTube.exec ( tmpUrl ); if ( result.length != 0 && result != null ) { trace(result); } } When i == 0, it works perfectly. Flash returns me : v/J-vCxmjCm-8,J-vCxmjCm-8 When i == 1, it fails. Flash returns me : null When I revert the two strings in my array such as : toto = [ http://www.youtube.com/v/xFTRnE1WBmU&autoplay=1, http://www.youtube.com/v/J-vCxmjCm-8&autoplay=1 ]; When i == 0, it works perfectly : Flash returns me : xFTRnE1WBmU When i == 1, it fails : Flash returns me : null Do you have any idea about the problem in the loop ?

    Read the article

< Previous Page | 367 368 369 370 371 372 373 374 375 376 377 378  | Next Page >