Search Results

Search found 60932 results on 2438 pages for 'data operations'.

Page 20/2438 | < Previous Page | 16 17 18 19 20 21 22 23 24 25 26 27  | Next Page >

  • How to make the tokenizer detect empty spaces while using strtok()

    - by Shadi Al Mahallawy
    I am designing a c++ program, somewhere in the program i need to detect if there is a blank(empty token) next to the token used know eg. if(token1==start) { token2=strtok(NULL," "); if(token2==NULL) {LCCTR=0;} else {LCCTR=atoi(token2);} so in the previous peice token1 is pointing to start , and i want to check if there is anumber next to the start , so I used token2=strtok(NULL," ") to point to the next token but unfortunattly the strtok function cannot detect empty spaces so it gives me an error at run time"INVALID NULL POINTER" how can i fix it or is there another function to use to detect empty spaces #include <iostream> #include<string> #include<map> #include<iomanip> #include<fstream> #include<ctype.h> using namespace std; const int MAX=300; int LCCTR; int START(char* token1); char* PASS1(char*token1); void tokinizer() { ifstream in; ofstream out; char oneline[MAX]; in.open("infile.txt"); out.open("outfile.txt"); if(in.is_open()) { char *token1; in.getline(oneline,MAX); token1 = strtok(oneline," \t"); START (token1); //cout<<'\t'; while(token1!=NULL) { //PASS1(token1); //cout<<token1<<" "; token1=strtok(NULL," \t"); if(NULL==token1) {//cout<<endl; //cout<<LCCTR<<'\t'; in.getline(oneline,MAX); token1 = strtok(oneline," \t"); } } } in.close(); out.close(); } int START(char* token1) { string start("START"); char*token2; if(token1 != start) {LCCTR=0;} else if(token1==start) { token2=strchr(token1+2,' '); cout<<token2; if(token2==NULL) {LCCTR=0;} else {LCCTR=atoi(token2); if(atoi(token2)>9999||atoi(token2)<0){cout<<"IVALID STARTING ADDRESS"<<endl;exit(1);} } } return LCCTR; } char* PASS1 (char*token1) { map<string,int> operations; map<string,int>symtable; map<string,int>::iterator it; pair<map<string,int>::iterator,bool> ret; char*token3=NULL; char*token2=NULL; string test; string comp(" "); string start("START"); string word("WORD"); string byte("BYTE"); string resb("RESB"); string resw("RESW"); string end("END"); operations["ADD"] = 18; operations["AND"] = 40; operations["COMP"] = 28; operations["DIV"] = 24; operations["J"] = 0X3c; operations["JEQ"] =30; operations["JGT"] =34; operations["JLT"] =38; operations["JSUB"] =48; operations["LDA"] =00; operations["LDCH"] =50; operations["LDL"] =55; operations["LDX"] =04; operations["MUL"] =20; operations["OR"] =44; operations["RD"] =0xd8; operations["RSUB"] =0x4c; operations["STA"] =0x0c; operations["STCH"] =54; operations["STL"] =14; operations["STSW"] =0xe8; operations["STX"] =10; operations["SUB"] =0x1c; operations["TD"] =0xe0; operations["TIX"] =0x2c; operations["WD"] =0xdc; if(operations.find("ADD")->first==token1) { token2=strtok(NULL," "); //test=token2; cout<<token2; //if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} //else{LCCTR=LCCTR+3;} } /*else if(operations.find("AND")->first==token1) { token2=strtok(NULL," "); test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("COMP")->first==token1) { token2=token1+5; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("DIV")->first==token1) { token2=token1+4; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("J")->first==token1) { token2=token1+2; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("JEQ")->first==token1) { token2=token1+5; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("JGT")->first==token1) { token2=strtok(NULL," "); test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("JLT")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("JSUB")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("LDA")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("LDCH")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("LDL")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("LDX")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("MUL")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("OR")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("RD")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("RSUB")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("STA")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("STCH")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("STL")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("STSW")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("STX")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("SUB")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("TD")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("TIX")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} } else if(operations.find("WD")->first==token1) { token2=token1+6; test=token2; if(test.empty()){cout<<"MISSING OPERAND"<<endl;exit(1);} else{LCCTR=LCCTR+3;} }*/ //else if( if(word==token1) {LCCTR=LCCTR+3;} else if(byte==token1) {string test; token2=token1+7; test=token2; if(test[0]=='C') {token3=token1+10; test=token3; if(test.length()>15) {cout<<"ERROR"<<endl; exit(1);} } else if(test[0]=='X') {token3=token1+10; test=token3; if(test.length()>14) {cout<<"ERROR"<<endl; exit(1);} } LCCTR=LCCTR+test.length(); } else if(resb==token1) {token3=token1+5; LCCTR=LCCTR+atoi(token3);} else if(resw==token1) {token3=token1+5; LCCTR=LCCTR+3*atoi(token3);} else if(end==token1) {exit(1);} /*else { test=token1; int last=test.length(); if(token1==start||test[0]=='C'||test[0]=='X'||ispunct(test[last])||isdigit(test[0])||isdigit(test[1])||isdigit(test[2])||isdigit(test[3])){} else { token2=strtok(NULL," "); //test=token2; cout<<token2; if(token2!=NULL) { symtable.insert( pair<string,int>(token1,LCCTR)); for(it=symtable.begin() ;it!=symtable.end() ;++it) {/*cout<<"symbol: "<<it->first<<" LCCTR: "<<it->second<<endl;} } else{} } }*/ return token3; } int main() { tokinizer(); return 0; }

    Read the article

  • Beyond S&OP: Integrated Business Planning

    - by Paul Homchick
    In most corporations, planning is done at the department level — leaving disconnects and gaps across different departments. Finance sets revenue and profit goals with minimum validation from Manufacturing that the company has the resources, material, capacity, or demand to reach these goals. On the operations side, Manufacturing is developing plans to balance demand and supply but seldom knows if the resulting "plan" will meet the budgets on which the company's revenue and profit goals are based. The Sales department agrees to quotas that meet Finance's revenue goals without a complete understanding of what manufacturing can deliver. Integrated Business Planning (IBP) bridges these gaps in corporate planning systems. Integrated Business Planning integrates the financial planning provided by EPM systems with operations planning provided by Sales and Operations Planning solutions. This means that revenue goals and budgets are validated against a bottom-up operating plan, and that the operating plan is reconciled against financial goals. When detailed changes are made to the operations plan, planners can immediately see the big picture impact of the changes. IBP also addresses one the CFO's big concerns—the reliability of the revenue forecast. Operating plans are updated daily or weekly from a precise forecast based on current market conditions. These updated plans are then made available so that financial analysts are working with data that best represents what is going to happen - not what they projected would happen based on last quarter's data. For a discussion in more depth, see my article: Improve Reliability of Financial Forecasts with Integrated Business Planning in Supply & Demand Chain-Executive Magazine.

    Read the article

  • Beyond S&OP: Integrated Business Planning

    - by Paul Homchick
    In most corporations, planning is done at the department level — leaving disconnects and gaps across different departments. Finance sets revenue and profit goals with minimum validation from Manufacturing that the company has the resources, material, capacity, or demand to reach these goals. On the operations side, Manufacturing is developing plans to balance demand and supply but seldom knows if the resulting "plan" will meet the budgets on which the company's revenue and profit goals are based. The Sales department agrees to quotas that meet Finance's revenue goals without a complete understanding of what manufacturing can deliver. Integrated Business Planning (IBP) bridges these gaps in corporate planning systems. Integrated Business Planning integrates the financial planning provided by EPM systems with operations planning provided by Sales and Operations Planning solutions. This means that revenue goals and budgets are validated against a bottom-up operating plan, and that the operating plan is reconciled against financial goals. When detailed changes are made to the operations plan, planners can immediately see the big picture impact of the changes. IBP also addresses one the CFO's big concerns—the reliability of the revenue forecast. Operating plans are updated daily or weekly from a precise forecast based on current market conditions. These updated plans are then made available so that financial analysts are working with data that best represents what is going to happen - not what they projected would happen based on last quarter's data. For a discussion in more depth, see my article: Improve Reliability of Financial Forecasts with Integrated Business Planning in Supply & Demand Chain-Executive Magazine.

    Read the article

  • The Best Data Integration for Exadata Comes from Oracle

    - by maria costanzo
    Oracle Data Integrator and Oracle GoldenGate offer unique and optimized data integration solutions for Oracle Exadata. For example, customers that choose to feed their data warehouse or reporting database with near real-time throughout the day, can do so without decreasing  performance or availability of source and target systems. And if you ask why real-time, the short answer is: in today’s fast-paced, always-on world, business decisions need to use more relevant, timely data to be able to act fast and seize opportunities. A longer response to "why real-time" question can be found in a related blog post. If we look at the solution architecture, as shown on the diagram below,  Oracle Data Integrator and Oracle GoldenGate are both uniquely designed to take full advantage of the power of the database and to eliminate unnecessary middle-tier components. Oracle Data Integrator (ODI) is the best bulk data loading solution for Exadata. ODI is the only ETL platform that can leverage the full power of Exadata, integrate directly on the Exadata machine without any additional hardware, and by far provides the simplest setup and fastest overall performance on an Exadata system. We regularly see customers achieving a 5-10 times boost when they move their ETL to ODI on Exadata. For  some companies the performance gain is even much higher. For example a large insurance company did a proof of concept comparing ODI vs a traditional ETL tool (one of the market leaders) on Exadata. The same process that was taking 5hrs and 11 minutes to complete using the competing ETL product took 7 minutes and 20 seconds with ODI. Oracle Data Integrator was 42 times faster than the conventional ETL when running on Exadata.This shows that Oracle's own data integration offering helps you to gain the most out of your Exadata investment with a truly optimized solution. GoldenGate is the best solution for streaming data from heterogeneous sources into Exadata in real time. Oracle GoldenGate can also be used together with Data Integrator for hybrid use cases that also demand non-invasive capture, high-speed real time replication. Oracle GoldenGate enables real-time data feeds from heterogeneous sources non-invasively, and delivers to the staging area on the target Exadata system. ODI runs directly on Exadata to use the database engine power to perform in-database transformations. Enterprise Data Quality is integrated with Oracle Data integrator and enables ODI to load trusted data into the data warehouse tables. Only Oracle can offer all these technical benefits wrapped into a single intelligence data warehouse solution that runs on Exadata. Compared to traditional ETL with add-on CDC this solution offers: §  Non-invasive data capture from heterogeneous sources and avoids any performance impact on source §  No mid-tier; set based transformations use database power §  Mini-batches throughout the day –or- bulk processing nightly which means maximum availability for the DW §  Integrated solution with Enterprise Data Quality enables leveraging trusted data in the data warehouse In addition to Starwood Hotels and Resorts, Morrison Supermarkets, United Kingdom’s fourth-largest food retailer, has seen the power of this solution for their new BI platform and shared their story with us. Morrisons needed to analyze data across a large number of manufacturing, warehousing, retail, and financial applications with the goal to achieve single view into operations for improved customer service. The retailer deployed Oracle GoldenGate and Oracle Data Integrator to bring new data into Oracle Exadata in near real-time and replicate the data into reporting structures within the data warehouse—extending visibility into operations. Using Oracle's data integration offering for Exadata, Morrisons produced financial reports in seconds, rather than minutes, and improved staff productivity and agility. You can read more about Morrison’s success story here and hear from Starwood here. From an Irem Radzik article.

    Read the article

  • Data access pattern

    - by andlju
    I need some advice on what kind of pattern(s) I should use for pushing/pulling data into my application. I'm writing a rule-engine that needs to hold quite a large amount of data in-memory in order to be efficient enough. I have some rather conflicting requirements; It is not acceptable for the engine to always have to wait for a full pre-load of all data before it is functional. Only fetching and caching data on-demand will lead to the engine taking too long before it is running quickly enough. An external event can trigger the need for specific parts of the data to be reloaded. Basically, I think I need a combination of pushing and pulling data into the application. A simplified version of my current "pattern" looks like this (in psuedo-C# written in notepad): // This interface is implemented by all classes that needs the data interface IDataSubscriber { void RegisterData(Entity data); } // This interface is implemented by the data access class interface IDataProvider { void EnsureLoaded(Key dataKey); void RegisterSubscriber(IDataSubscriber subscriber); } class MyClassThatNeedsData : IDataSubscriber { IDataProvider _provider; MyClassThatNeedsData(IDataProvider provider) { _provider = provider; _provider.RegisterSubscriber(this); } public void RegisterData(Entity data) { // Save data for later StoreDataInCache(data); } void UseData(Key key) { // Make sure that the data has been stored in cache _provider.EnsureLoaded(key); Entity data = GetDataFromCache(key); } } class MyDataProvider : IDataProvider { List<IDataSubscriber> _subscribers; // Make sure that the data for key has been loaded to all subscribers public void EnsureLoaded(Key key) { if (HasKeyBeenMarkedAsLoaded(key)) return; PublishDataToSubscribers(key); MarkKeyAsLoaded(key); } // Force all subscribers to get a new version of the data for key public void ForceReload(Key key) { PublishDataToSubscribers(key); MarkKeyAsLoaded(key); } void PublishDataToSubscribers(Key key) { Entity data = FetchDataFromStore(key); foreach(var subscriber in _subscribers) { subscriber.RegisterData(data); } } } // This class will be spun off on startup and should make sure that all data is // preloaded as quickly as possible class MyPreloadingThread { IDataProvider _provider; MyPreloadingThread(IDataProvider provider) { _provider = provider; } void RunInBackground() { IEnumerable<Key> allKeys = GetAllKeys(); foreach(var key in allKeys) { _provider.EnsureLoaded(key); } } } I have a feeling though that this is not necessarily the best way of doing this.. Just the fact that explaining it seems to take two pages feels like an indication.. Any ideas? Any patterns out there I should have a look at?

    Read the article

  • Data access pattern, combining push and pull?

    - by andlju
    I need some advice on what kind of pattern(s) I should use for pushing/pulling data into my application. I'm writing a rule-engine that needs to hold quite a large amount of data in-memory in order to be efficient enough. I have some rather conflicting requirements; It is not acceptable for the engine to always have to wait for a full pre-load of all data before it is functional. Only fetching and caching data on-demand will lead to the engine taking too long before it is running quickly enough. An external event can trigger the need for specific parts of the data to be reloaded. Basically, I think I need a combination of pushing and pulling data into the application. A simplified version of my current "pattern" looks like this (in psuedo-C# written in notepad): // This interface is implemented by all classes that needs the data interface IDataSubscriber { void RegisterData(Entity data); } // This interface is implemented by the data access class interface IDataProvider { void EnsureLoaded(Key dataKey); void RegisterSubscriber(IDataSubscriber subscriber); } class MyClassThatNeedsData : IDataSubscriber { IDataProvider _provider; MyClassThatNeedsData(IDataProvider provider) { _provider = provider; _provider.RegisterSubscriber(this); } public void RegisterData(Entity data) { // Save data for later StoreDataInCache(data); } void UseData(Key key) { // Make sure that the data has been stored in cache _provider.EnsureLoaded(key); Entity data = GetDataFromCache(key); } } class MyDataProvider : IDataProvider { List<IDataSubscriber> _subscribers; // Make sure that the data for key has been loaded to all subscribers public void EnsureLoaded(Key key) { if (HasKeyBeenMarkedAsLoaded(key)) return; PublishDataToSubscribers(key); MarkKeyAsLoaded(key); } // Force all subscribers to get a new version of the data for key public void ForceReload(Key key) { PublishDataToSubscribers(key); MarkKeyAsLoaded(key); } void PublishDataToSubscribers(Key key) { Entity data = FetchDataFromStore(key); foreach(var subscriber in _subscribers) { subscriber.RegisterData(data); } } } // This class will be spun off on startup and should make sure that all data is // preloaded as quickly as possible class MyPreloadingThread { IDataProvider _provider; MyPreloadingThread(IDataProvider provider) { _provider = provider; } void RunInBackground() { IEnumerable<Key> allKeys = GetAllKeys(); foreach(var key in allKeys) { _provider.EnsureLoaded(key); } } } I have a feeling though that this is not necessarily the best way of doing this.. Just the fact that explaining it seems to take two pages feels like an indication.. Any ideas? Any patterns out there I should have a look at?

    Read the article

  • Import csv data (SDK iphone)

    - by Ni
    I am new to cocoa. I have been working on these stuff for a few days. For the following code, i can read all the data in the string, and successfully get the data for plot. NSMutableArray *contentArray = [NSMutableArray array]; NSString *filePath = @"995,995,995,995,995,995,995,995,1000,997,995,994,992,993,992,989,988,987,990,993,989"; NSArray *myText = [filePath componentsSeparatedByString:@","]; NSInteger idx; for (idx = 0; idx < myText.count; idx++) { NSString *data =[myText objectAtIndex:idx]; NSLog(@"%@", data); id x = [NSNumber numberWithFloat:0+idx*0.002777778]; id y = [NSDecimalNumber decimalNumberWithString:data]; [contentArray addObject: [NSMutableDictionary dictionaryWithObjectsAndKeys:x, @"x", y, @"y", nil]]; } self.dataForPlot = contentArray; then, i try to load the data from csv file. the data in Data.csv file has the same value and the same format as 995,995,995,995,995,995,995,995,1000,997,995,994,992,993,992,989,988,987,990,993,989. I run the code, it is supposed to give the same graph output. however, it seems that the data is not loaded from csv file successfully. i can not figure out what's wrong with my code. NSMutableArray *contentArray = [NSMutableArray array]; NSString *filePath = [[NSBundle mainBundle] pathForResource:@"Data" ofType:@"csv"]; NSString *Data = [NSString stringWithContentsOfFile:filePath encoding:NSUTF8StringEncoding error:nil ]; if (Data) { NSArray *myText = [Data componentsSeparatedByString:@","]; NSInteger idx; for (idx = 0; idx < myText.count; idx++) { NSString *data =[myText objectAtIndex:idx]; NSLog(@"%@", data); id x = [NSNumber numberWithFloat:0+idx*0.002777778]; id y = [NSDecimalNumber decimalNumberWithString:data]; [contentArray addObject: [NSMutableDictionary dictionaryWithObjectsAndKeys:x, @"x", y, @"y",nil]]; } self.dataForPlot = contentArray; } The only difference is NSString *filePath = [[NSBundle mainBundle] pathForResource:@"Data" ofType:@"csv"]; NSString *Data = [NSString stringWithContentsOfFile:filePath encoding:NSUTF8StringEncoding error:nil ]; if (data){ } did i do anything wrong here?? Thanks for your help!!!!

    Read the article

  • POST data not being received

    - by Alexander
    I've got an iPhone App that is supposed to send POST data to my server to register the device in a MySQL database so we can send notifications etc... to it. It sends it's unique identifier, device name, token, and a few other small things like passwords and usernames as a POST request to our server. The problem is that sometimes the server doesn't receive the data. And by this I mean, its not just receiving blank values for the POST inputs but, its not receiving ANY post data at all. I am logging all POST inputs to my server into some log files and when the script that relies on the POST data from the device fails (detects no data) I notice that its because NO POST data was sent. Is this a problem on the server, like refusing data or something or does this have to be on the client's side? What could be causing this?

    Read the article

  • Oracle Big Data Learning Library - Click on LEARN BY PRODUCT to Open Page

    - by chberger
    Oracle Big Data Learning Library... Learn about Oracle Big Data, Data Science, Learning Analytics, Oracle NoSQL Database, and more! Oracle Big Data Essentials Attend this Oracle University Course! Using Oracle NoSQL Database Attend this Oracle University class! Oracle and Big Data on OTN See the latest resource on OTN. Search Welcome Get Started Learn by Role Learn by Product Latest Additions Additional Resources Oracle Big Data Appliance Oracle Big Data and Data Science Basics Meeting the Challenge of Big Data Oracle Big Data Tutorial Video Series Oracle MoviePlex - a Big Data End-to-End Series of Demonstrations Oracle Big Data Overview Oracle Big Data Essentials Data Mining Oracle NoSQL Database Tutorial Videos Oracle NoSQL Database Tutorial Series Oracle NoSQL Database Release 2 New Features Using Oracle NoSQL Database Exalytics Enterprise Manager 12c R3: Manage Exalytics Setting Up and Running Summary Advisor on an E s Oracle R Enterprise Oracle R Enterprise Tutorial Series Oracle Big Data Connectors Integrate All Your Data with Oracle Big Data Connectors Using Oracle Direct Connector for HDFS to Read the Data from HDSF Using Oracle R Connector for Hadoop to Analyze Data Oracle NoSQL Database Oracle NoSQL Database Tutorial Videos Oracle NoSQL Database Tutorial Series Oracle NoSQL Database Release 2 New Features  Using Oracle NoSQL Database eries Oracle Business Intelligence Enterprise Edition Oracle Business Intelligence Oracle BI 11g R1: Create Analyses and Dashboards - 4 day class Oracle BI Publisher 11g R1: Fundamentals - 3 day class Oracle BI 11g R1: Build Repositories - 5 day class

    Read the article

  • Order of operations to render VBO to FBO texture and then rendering FBO texture full quad

    - by cyberdemon
    I've just started using OpenGL with C# via the OpenTK library. I've managed to successfully render my game world using VBOs. I now want to create a pixellated affect by rendering the frame to an offscreen FBO with a size half of my GameWindow size and then render that FBO to a full screen quad. I've been looking at the OpenTK example here: http://www.opentk.com/doc/graphics/frame-buffer-objects ...but the result is a black form. I'm not sure which parts of the example code belongs in the OnLoad event and OnRenderFrame. Can someone please tell me if the below code shows the correct order of operations? OnLoad { // VBO. // DataArrayBuffer GenBuffers/BindBuffer/BufferData // ElementArrayBuffer GenBuffers/BindBuffer/BufferData // ColourArrayBuffer GenBuffers/BindBuffer/BufferData // FBO. // ColourTexture GenTextures/BindTexture/TexParameterx4/TexImage2D // Create FBO. // Textures Ext.GenFramebuffers/Ext.BindFramebuffer/Ext.FramebufferTexture2D/Ext.FramebufferRenderbuffer } OnRenderFrame { // Use FBO buffer. Ext.BindFramebuffer(FBO) GL.Clear // Set viewport to FBO dimensions. GL.DrawBuffer((DrawBufferMode)FramebufferAttachment.ColorAttachment0Ext) // Bind VBO arrays. GL.BindBuffer(ColourArrayBuffer) GL.ColorPointer GL.EnableClientState(ColorArray) GL.BindBuffer(DataArrayBuffer) // If world changed GL.BufferData(DataArrayBuffer) GL.VertexPointer GL.EnableClientState(VertexArray) GL.BindBuffer(ElementArrayBuffer) // Render VBO. GL.DrawElements // Bind visible buffer. GL.Ext.BindFramebuffer(0) GL.DrawBuffer(Back) GL.Clear // Set camera to view texture. GL.BindTexture(ColourTexture) // Render FBO texture GL.Begin(Quads) // Draw texture on quad // TexCoord2/Vertex2 GL.End SwapBuffers }

    Read the article

  • Let's introduce the Oracle Enterprise Data Quality family!

    - by Sarah Zanchetti
    The Oracle Enterprise Data Quality family of products helps you to achieve maximum value from their business applications by delivering fit-­for-­purpose data. OEDQ is a state-of-the-art collaborative data quality profiling, analysis, parsing, standardization, matching and merging product, designed to help you understand, improve, protect and govern the quality of the information your business uses, all from a single integrated environment. Oracle Enterprise Data Quality products are: Oracle Enterprise Data Quality Profile and Audit Oracle Enterprise Data Quality Parsing and Standardization Oracle Enterprise Data Quality Match and Merge Oracle Enterprise Data Quality Address Verification Server Oracle Enterprise Data Quality Product Data Parsing and Standardization Oracle Enterprise Data Quality Product Data Match and Merge Also, the following are some of the key features of OEDQ: Integrated data profiling, auditing, cleansing and matching Browser-based client access Ability to handle all types of data – for example customer, product, asset, financial, operational Connection to any JDBC-compliant data sources and targets Multi-user project support (role-based access, issue tracking, process annotation, and version control) Services Oriented Architecture (SOA) - support for designing processes that may be exposed to external applications as a service Designed to process large data volumes A single repository to hold data along with gathered statistics and project tracking information, with shared access Intuitive graphical user interface designed to help you solve real-world information quality issues quickly Easy, data-led creation and extension of validation and transformation rules Fully extensible architecture allowing the insertion of any required custom processing  If you need to learn more about EDQ, or get assistance for any kind of issue, the Oracle Technology Network offers a huge range of resources on Oracle software. Discuss technical problems and solutions on the Discussion Forums. Get hands-on step-by-step tutorials with Oracle By Example. Download Sample Code. Get the latest news and information on any Oracle product. You can also get further help and information with Oracle software from: My Oracle Support Oracle Support Services An Information Center is available, where you can find technical information and fast solutions to the most common already solved issues: Information Center: Oracle Enterprise Data Quality [ID 1555073.2]

    Read the article

  • Oracle Enterprise Manager 12c R3 introduces advancements in cloud lifecycle and operations management

    - by Anand Akela
    Oracle Enterprise Manager 12c Release 3 (R3) was announced ( Press Release ) earlier today. It is now available for download at  OTN . This latest release features improvements in several areas, including: Improvements to Private Cloud and Engineered Systems Management Expanded Middleware and Application Management Capabilities Efficiency Gains for Enterprise manager Users in EM’s Enterprise-Ready Framework You can learn more about what's new in the Oracle Enterprise Manager 12c R3 in the Enterprise Manager 12c documentation . You will see more blogs and details about the new features during the next few weeks. Please let us what On July 18th, you can join us at a webcast to hear Thomas Kurian, EVP of Product Development on what Oracle Engineering has achieved with Oracle Enterprise Manager 12c Release 3 to address these challenges. Later, during this webcast, Oracle experts will discuss the latest capabilities in Oracle Enterprise Manager 12c Release 3 for cloud lifecycle and operations management. The presentation will be followed by a live Q&A session with Oracle experts. You can also join us online on Twitter to get your specific questions answered. Please use hash tag #em12c to join the conversation. /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:10.0pt; font-family:"Times New Roman","serif";} Register Now for the Webcast! Stay Connected: Twitter |  Face book |  You Tube |  Linked in |  Newsletter

    Read the article

  • RESTful applications logic and cross resource operations

    - by Gaz_Edge
    I have an RESTful api that allows my users to receive enquiries about their business e.g. 'I would like to book service x on date y. Is this available?'. The api saves this information as a resource to the following URI users/{userId}/enquiries/{enquiryId} The information shown when this resource is retrieved are the standard sort of things you'd expect from an enquiry - email, first_name, last_name, address, message The api also allows customers to be created for a user. The customer has a login and password and also a profile. The following URIs expose these two resources PUT users/{userId}/customers/{customerId} PUT users/{userId}/customers/{customerId}/profile The problem I am having is that I would like to have the ability to allow users to create a customer from an enquiry. For example, the user is able to offer their service on the date requested and will then want to setup a customer with login details etc to allow them to manage the rest of the process. The obvious answer would be to use a URI like users/{userId}/enquiries/{enquiryId}/convert-to-client The problem with this is is that it somewhat goes against a lot of what I've been reading about how to implement REST (specifically from the book Restful Web Services which suggests that URIs should point to resources not operations on resources). The other option would be to get the client application (i.e. the code that calls the api) to handle some of this application logic. This doesn't quite feel right to me. I have implemented in my design that the client app is fairly dumb. It knows just enough to display the results from the API, and does not contain any application logic. Would be great to hear what others views are on the best way of setting this up Am I wrong to have no application logic in the client app? How would I perform this operation purely in the REST api?

    Read the article

  • Impact of Service Oriented Architecture (SOA) on Business and IT Operations

    The impact of Service Oriented Architecture (SOA) on business and IT operations varies from company to company. I think more and more companies are starting to view SOA as just another technology that they can incorporate in an existing or new system. One of the driving factors in using SOA is the reduction in maintenance costs and decrease in the time needed to bring products to market. The reductions in costs, and reduced turnaround time can be directly converted in to increased profitability due to less expenditures that are needed in order to maintain or create new systems. My personal perspective on SOA is that it is great for what it is actually intended to do. SOA allows systems to be distributed across networks or even the world while ensuring enterprise processing consistency, data integrity and preventing code duplication. This being said a lot of preparation and work goes into properly designing and implementing an SOA especially if an enterprise wants to take full advantage of its benefits. Even though SOA has recently gotten a lot of hype about its benefits it does not a perfect fit for all situations. At the end of the day SOA is just another tool in my tool belt that I can pull from to create solutions that meet the business’s needs. Based on current industry trends SOA appears to be a very solid technology to use moving forward, especially as more and more companies shift towards cloud based computing. It is important to remember that SOA is one of many technologies that can be used in creating business solutions and I think more time will be spent in the future evaluating if SOA is the right technology for a solution once the initial hype of SOA has calmed down.

    Read the article

  • Oracle Enterprise Data Quality - Geared Up and Ready for OpenWorld 2012

    - by Mala Narasimharajan
    10 days and counting till Oracle OpenWorld 2012 is upon us.  Enterprise data quality is key to every information integration and consolidation initiative. At this year's OpenWorld, hear how Oracle Enterprise Data Quality provides the critical piece to achieving trusted, reliable master data and increases the value of data integration initiatives. Here are the different ways you can learn and experience Enterprise Data Quality at OpenWorld:  Conference sessions: Oracle Enterprise Data Quality: Product Overview and Roadmap - Monday 10/1/12, 1:45-2:45 PM - Moscone West - 3006 Data Preparation and Ongoing Governance with the Oracle Enterprise Data Quality Platform - Wednesday 10/3/2012, 1:15-2:15 PM - Moscone West - 3000  Data Acquisition, Migration and Integration with the Oracle Enterprise Data Quality Platform - Thursday 10/4/2012, 12:45-1:45 PM - Moscone West - 3005  Hands on Labs: Introduction to Oracle Enterprise Data Quality Platform -  Monday 10/2/2012, 4:45-5:45 PM - Marriot Marquis - Salon 1/2 Demos:  Trusted Data with Oracle Enterprise Data Quality - Moscone South, Right - S-243 (note: proceed to Middleware Demo grounds) For a list of Master Data Management and Data Quality sessions and other events click here. 

    Read the article

  • MIX 2010 Covert Operations Day 4

    - by GeekAgilistMercenary
    The Microsoft Azure Cloud is looking pretty solid compared to just a few months ago.  The storage mechanisms in the cloud now are blobs, drives, tables, and queues.  Also, not to forget, is SQL Azure.  I won’t dive too much into that, as most will know what SQL Server is, and SQL Azure is pretty much just a hosted SQL Server instance. The blobs are generally geared toward holding binary type data, images and those types of things.  The tables are huge key value type stores.  The drives are VHD, which are virtual hard drives.  The queues are just queues used for workflow and also to store messages back and forth in a queue. These methods are accessible via REST, which makes application development against the storage services extremely easy.  This is a big plus point as REST services are a preferred way to connect and interact with data storage.  It also sets up Silverlight as a prime framework to utilize services. Day 4 I pretty much dedicated to reviewing these cloud services and finishing up work related development.  With that, I'm wrapping up my MIX 2010 blog coverage.  Now back to your regularly scheduled programming. Original entry.

    Read the article

  • SL3/SL4 - Ado.Net Data Services Error during new DataServiceCollection<T>(queryResponse)

    - by Soulhuntre
    Hey all, I have two functions in a SL project (VS2010) that do almost exactly the same thing, yet one throws an error and the other does not. It seems to be related to the projections, but I am unsure about the best way to resolve. The function that works is... public void LoadAllChunksExpandAll(DataHelperReturnHandler handler, string orderby) { DataServiceCollection<CmsChunk> data = null; DataServiceQuery<CmsChunk> theQuery = _dataservice .CmsChunks .Expand("CmsItemState") .AddQueryOption("$orderby", orderby); theQuery.BeginExecute( delegate(IAsyncResult asyncResult) { _callback_dispatcher.BeginInvoke( () => { try { DataServiceQuery<CmsChunk> query = asyncResult.AsyncState as DataServiceQuery<CmsChunk>; if (query != null) { //create a tracked DataServiceCollection from the result of the asynchronous query. QueryOperationResponse<CmsChunk> queryResponse = query.EndExecute(asyncResult) as QueryOperationResponse<CmsChunk>; data = new DataServiceCollection<CmsChunk>(queryResponse); handler(data); } } catch { handler(data); } } ); }, theQuery ); } This compiles and runs as expected. A very, very similar function (shown below) fails... public void LoadAllPagesExpandAll(DataHelperReturnHandler handler, string orderby) { DataServiceCollection<CmsPage> data = null; DataServiceQuery<CmsPage> theQuery = _dataservice .CmsPages .Expand("CmsChildPages") .Expand("CmsParentPage") .Expand("CmsItemState") .AddQueryOption("$orderby", orderby); theQuery.BeginExecute( delegate(IAsyncResult asyncResult) { _callback_dispatcher.BeginInvoke( () => { try { DataServiceQuery<CmsPage> query = asyncResult.AsyncState as DataServiceQuery<CmsPage>; if (query != null) { //create a tracked DataServiceCollection from the result of the asynchronous query. QueryOperationResponse<CmsPage> queryResponse = query.EndExecute(asyncResult) as QueryOperationResponse<CmsPage>; data = new DataServiceCollection<CmsPage>(queryResponse); handler(data); } } catch { handler(data); } } ); }, theQuery ); } Clearly the issue is the Expand projections that involve a self referencing relationship (pages can contain other pages). This is under SL4 or SL3 using ADONETDataServices SL3 Update CTP3. I am open to any work around or pointers to goo information, a Google search for the error results in two hits, neither particularly helpful that I can decipher. The short error is "An item could not be added to the collection. When items in a DataServiceCollection are tracked by the DataServiceContext, new items cannot be added before items have been loaded into the collection." The full error is... System.Reflection.TargetInvocationException was caught Message=Exception has been thrown by the target of an invocation. StackTrace: at System.RuntimeMethodHandle.InvokeMethodFast(IRuntimeMethodInfo method, Object target, Object[] arguments, SignatureStruct& sig, MethodAttributes methodAttributes, RuntimeType typeOwner) at System.Reflection.RuntimeMethodInfo.Invoke(Object obj, BindingFlags invokeAttr, Binder binder, Object[] parameters, CultureInfo culture, Boolean skipVisibilityChecks) at System.Reflection.RuntimeMethodInfo.Invoke(Object obj, BindingFlags invokeAttr, Binder binder, Object[] parameters, CultureInfo culture) at System.Reflection.MethodBase.Invoke(Object obj, Object[] parameters) at System.Data.Services.Client.ClientType.ClientProperty.SetValue(Object instance, Object value, String propertyName, Boolean allowAdd) at System.Data.Services.Client.AtomMaterializer.ApplyItemsToCollection(AtomEntry entry, ClientProperty property, IEnumerable items, Uri nextLink, ProjectionPlan continuationPlan) at System.Data.Services.Client.AtomMaterializer.ApplyFeedToCollection(AtomEntry entry, ClientProperty property, AtomFeed feed, Boolean includeLinks) at System.Data.Services.Client.AtomMaterializer.MaterializeResolvedEntry(AtomEntry entry, Boolean includeLinks) at System.Data.Services.Client.AtomMaterializer.Materialize(AtomEntry entry, Type expectedEntryType, Boolean includeLinks) at System.Data.Services.Client.AtomMaterializer.DirectMaterializePlan(AtomMaterializer materializer, AtomEntry entry, Type expectedEntryType) at System.Data.Services.Client.AtomMaterializerInvoker.DirectMaterializePlan(Object materializer, Object entry, Type expectedEntryType) at System.Data.Services.Client.ProjectionPlan.Run(AtomMaterializer materializer, AtomEntry entry, Type expectedType) at System.Data.Services.Client.AtomMaterializer.Read() at System.Data.Services.Client.MaterializeAtom.MoveNextInternal() at System.Data.Services.Client.MaterializeAtom.MoveNext() at System.Linq.Enumerable.d_b11.MoveNext() at System.Data.Services.Client.DataServiceCollection1.InternalLoadCollection(IEnumerable1 items) at System.Data.Services.Client.DataServiceCollection1.StartTracking(DataServiceContext context, IEnumerable1 items, String entitySet, Func2 entityChanged, Func2 collectionChanged) at System.Data.Services.Client.DataServiceCollection1..ctor(DataServiceContext context, IEnumerable1 items, TrackingMode trackingMode, String entitySetName, Func2 entityChangedCallback, Func2 collectionChangedCallback) at System.Data.Services.Client.DataServiceCollection1..ctor(IEnumerable1 items) at Phinli.Dashboard.Silverlight.Helpers.DataHelper.<>c__DisplayClass44.<>c__DisplayClass46.<LoadAllPagesExpandAll>b__43() InnerException: System.InvalidOperationException Message=An item could not be added to the collection. When items in a DataServiceCollection are tracked by the DataServiceContext, new items cannot be added before items have been loaded into the collection. StackTrace: at System.Data.Services.Client.DataServiceCollection1.InsertItem(Int32 index, T item) at System.Collections.ObjectModel.Collection`1.Add(T item) InnerException: Thanks for any help!

    Read the article

  • 3rd party data - Store in Data Warehouse or Primary database?

    - by brydgesk
    This is mostly a data warehouse philosophy question. My project involves an Oracle forms application, and a Teradata Data Warehouse for reporting and ad-hoc purposes. In addition to the primary data created by the users of our application, we also require data from various other sources. Currently, this 3rd party data comes via FTPd flat files directly to our Data Warehouse. To access the data, our users must use a series of custom BusinessObjects reports. My question is, would it make more sense for this data to be sent to our source Oracle system instead? Is it ever appropriate for a Data Warehouse to be the point of origin for users to access raw data? In short, is it more important that the operational database contain only the data created by your project, or that the data warehouse remain dedicated solely to reporting and analysis?

    Read the article

  • Starting to construct a data access layer. Things to consider?

    - by Phil
    Our organisation uses inline sql. We have been tasked with providing a suitable data access layer and are weighing up the pro's and cons of which way to go... Datasets ADO.net Linq Entity framework Subsonic Other? Some tutorials and articles I have been using for reference: http://www.asp.net/(S(pdfrohu0ajmwt445fanvj2r3))/learn/data-access/tutorial-01-vb.aspx http://www.simple-talk.com/dotnet/.net-framework/designing-a-data-access-layer-in-linq-to-sql/ http://msdn.microsoft.com/en-us/magazine/cc188750.aspx http://msdn.microsoft.com/en-us/library/aa697427(VS.80).aspx http://www.subsonicproject.com/ I'm extremely torn, and finding it very difficult to make a decision on which way to go. Our site is a series of 2 internal portals and a public web site. We are using vs2008 sp1 and framework version 3.5. Please can you give me advise on what factors to consider and any pro's and cons you have faced with your data access layer. Thanks.

    Read the article

  • Using Core Data Concurrently and Reliably

    - by John Topley
    I'm building my first iOS app, which in theory should be pretty straightforward but I'm having difficulty making it sufficiently bulletproof for me to feel confident submitting it to the App Store. Briefly, the main screen has a table view, upon selecting a row it segues to another table view that displays information relevant for the selected row in a master-detail fashion. The underlying data is retrieved as JSON data from a web service once a day and then cached in a Core Data store. The data previous to that day is deleted to stop the SQLite database file from growing indefinitely. All data persistence operations are performed using Core Data, with an NSFetchedResultsController underpinning the detail table view. The problem I am seeing is that if you switch quickly between the master and detail screens several times whilst fresh data is being retrieved, parsed and saved, the app freezes or crashes completely. There seems to be some sort of race condition, maybe due to Core Data importing data in the background whilst the main thread is trying to perform a fetch, but I'm speculating. I've had trouble capturing any meaningful crash information, usually it's a SIGSEGV deep in the Core Data stack. The table below shows the actual order of events that happen when the detail table view controller is loaded: Main Thread Background Thread viewDidLoad Get JSON data (using AFNetworking) Create child NSManagedObjectContext (MOC) Parse JSON data Insert managed objects in child MOC Save child MOC Post import completion notification Receive import completion notification Save parent MOC Perform fetch and reload table view Delete old managed objects in child MOC Save child MOC Post deletion completion notification Receive deletion completion notification Save parent MOC Once the AFNetworking completion block is triggered when the JSON data has arrived, a nested NSManagedObjectContext is created and passed to an "importer" object that parses the JSON data and saves the objects to the Core Data store. The importer executes using the new performBlock method introduced in iOS 5: NSManagedObjectContext *child = [[NSManagedObjectContext alloc] initWithConcurrencyType:NSPrivateQueueConcurrencyType]; [child setParentContext:self.managedObjectContext]; [child performBlock:^{ // Create importer instance, passing it the child MOC... }]; The importer object observes its own MOC's NSManagedObjectContextDidSaveNotification and then posts its own notification which is observed by the detail table view controller. When this notification is posted the table view controller performs a save on its own (parent) MOC. I use the same basic pattern with a "deleter" object for deleting the old data after the new data for the day has been imported. This occurs asynchronously after the new data has been fetched by the fetched results controller and the detail table view has been reloaded. One thing I am not doing is observing any merge notifications or locking any of the managed object contexts or the persistent store coordinator. Is this something I should be doing? I'm a bit unsure how to architect this all correctly so would appreciate any advice.

    Read the article

  • Used HDD/ran DiskSmartView/40,000 Power-on-hours?? should i trust it w/ my data, or take it back and bitch?

    - by David Lindsay
    I just bought a used hard drive from a University Surplus Store. Decided to run DiskSmartView to make sure it wasn't ready to fail. 40,000 power-on-hours I don't know if I feel like trusting my data to something that used. I really dont know if thats unreasonably old, but when i compare it to the POH reading i get when testing my other hdds its more than 3x older (my others have 2110 hours, 6150 hours, etc.. It's a Western Digital, so that gives me a little bit of hope(WDC WD4000KD-00NAB0). I could sure use someone else's opinion here. Thanks, DAVE

    Read the article

  • MIX 2010 Covert Operations Day 2 Silverlight + Windows 7 Phone

    - by GeekAgilistMercenary
    Left the Circus Circus and headed to the geek circus at Mandalay Bay.  Got in, got some breakfast, met a few more people and headed to the keynote. Upon arriving the crew I was hanging with at the event; Erik Mork, Beth Murray, and Brian Henderson and I were entertained with several other thousand geeks by the wicked yo-yoing. The first video demo of something was of Bing Maps and various aspects of Microsoft Research integrated together.  Namely the pictures, put in place, on real 3d element maps of various environments. Silverlight Scott Guthrie, as one would guess, kicked off the keynote.  His first point was that user experience has become a priority at Microsoft.  This can be seen by any observant soul with the release and push of Expression, Silverlight, and the other tools.  This is even more apparent when one takes note of Microsoft bringing in people that can actually do good design and putting them at the forefront. The next thing Scott brought up was a few key points about Silverlight.  Currently Silverlight is a little over 2 years old and has achieved a pretty solid 60% penetration.  Silverlight has all sorts of capabilities that have been developed and are now provided as open source including;  ad injection, smoothing, playback editing, and more.  Another thing he showed, which really struck me as awesome being in the analytics space, was the Olympics and a quick glimpse of the ad statistics, viewer experience, video playback performance, audience trends, and overall viewer participation.  All of it rendered in Silverlight in beautiful detail. The key piece of Scott's various points were all punctuated with the fact that all of this code is available as open source.  Not only is Microsoft really delving into this design element of things, they're getting involved in the right ways. One of the last points I'll bring up about Silverlight 4 is the ability to have HD video on a monitor, and an entirely different activity being done on the other monitor, effectively making Silverlight the only RIA framework that supports multi-monitor support.  Overall, Silverlight is continuing to impress – providing superior capabilities tit-for-tat with the competition. Windows 7 Phone The Windows 7 Phone has 3 primary buttons (yes, more than the iPhone, don't let your mind explode!!).  Start, Search, and Back control all of the needed functionality of the phone.  At the same time, of course, there is the multi-touch, touch, and other interactive abilities of the interface.  The intent, once start is pressed is to have all the information that a phone owner wants displayed immediately.  Avoiding the scrolling through pages of apps or rolling a ball to get through multitudes of other non-interactive phone interfaces.  The Windows 7 Phone simply has the data right in front of you, basically a phone dashboard.  From there it is easy to dive into the interactive areas of the phone. Each area of the interface of the phone is broken into hubs.  These hubs include applications, data, and other things based on a relative basis.  This basis being determined by the user.  These applications interact on many other levels, and form a kind of relationship between each other adding more and more meta-data to the phone user, their interactions between the applications, and of course the social element of their interactions on the phone.  This makes this phone a practical must have for a marketer involved in social media.  The level of wired together interaction is massive, and of course, if you've seen Office Outlook 2010 you know that the power that is pulled into the phone by being tied to Outlook is massive. Joe Belfiore also showed several UI & specifically UX elements of the phone interface that allows paging to be instinctual by simple clipped items, flipping page to page, and other excellent user experience advances for phone devices.  Belfiore's also showed how his people hub had a massive list of people, with pictures, all from various different social networks and other associated relations.  The rendering, speed, and viewing of these people's, their pictures, their social network information, and other characteristics was smooth and in some situations unbelievably rendered.  This demo showed some of the great power of the beta phone, which isn't even as powerful as the planned end device. Joe finished up by jumping into the music, videos, and other media with the Zune Component of the Windows 7 Mobile Phone.  This was all good stuff, but I'll get to what really sold me on the media element in a moment. When Joe was done, Scott Guthrie stepped back up to walk through building a Windows 7 Mobile Phone.  This is were I have to give serious props.  He built this application, in Visual Studio 2010, in front of 2000+ people.  That was cool, but what really was amazing that he build the application in about 2 minutes.  The IDE, side by side design that is standard in Visual Studio is light years ahead of x-Code or any of the iPhone IDEs.  The Windows 7 Mobile System, if it can get market penetration, poses a technologically superior development and phone platform over anything on the market right now.  The biggest problem with the phone, is it just isn't available yet.  I personally can't wait for a chance to build some apps for the new Windows Phone. Netflix, I May Start Up an Account Again! When I get my Windows 7 Phone device, I am absolutely getting a Netflix account again.  The Vertigo crew, as I wrote on Twitter "#MIX10 Props @seesharp on @netflix demo", displayed an application on the phone for Netflix that actually ran HD Video of Rescue Me (with Dennis Leary).  The video played back smooth as it would on a dedicated computer, I was instantly sold.  So this didn't actually sell me on the phone, because I'm already sold, but it did sell me whole heartedly on the media capabilities of the pending phone. Anyway, I try not to do this but I may double post today.  Lunch is over and I'm off to another session very near and dear to the heart of my occupation, Analytics Tracking.  Stay tuned and I should have that post up by the end of the day. Original Post – Check out my other blog for even more technical ramblings and reads.

    Read the article

  • Windows Azure Recipe: Big Data

    - by Clint Edmonson
    As the name implies, what we’re talking about here is the explosion of electronic data that comes from huge volumes of transactions, devices, and sensors being captured by businesses today. This data often comes in unstructured formats and/or too fast for us to effectively process in real time. Collectively, we call these the 4 big data V’s: Volume, Velocity, Variety, and Variability. These qualities make this type of data best managed by NoSQL systems like Hadoop, rather than by conventional Relational Database Management System (RDBMS). We know that there are patterns hidden inside this data that might provide competitive insight into market trends.  The key is knowing when and how to leverage these “No SQL” tools combined with traditional business such as SQL-based relational databases and warehouses and other business intelligence tools. Drivers Petabyte scale data collection and storage Business intelligence and insight Solution The sketch below shows one of many big data solutions using Hadoop’s unique highly scalable storage and parallel processing capabilities combined with Microsoft Office’s Business Intelligence Components to access the data in the cluster. Ingredients Hadoop – this big data industry heavyweight provides both large scale data storage infrastructure and a highly parallelized map-reduce processing engine to crunch through the data efficiently. Here are the key pieces of the environment: Pig - a platform for analyzing large data sets that consists of a high-level language for expressing data analysis programs, coupled with infrastructure for evaluating these programs. Mahout - a machine learning library with algorithms for clustering, classification and batch based collaborative filtering that are implemented on top of Apache Hadoop using the map/reduce paradigm. Hive - data warehouse software built on top of Apache Hadoop that facilitates querying and managing large datasets residing in distributed storage. Directly accessible to Microsoft Office and other consumers via add-ins and the Hive ODBC data driver. Pegasus - a Peta-scale graph mining system that runs in parallel, distributed manner on top of Hadoop and that provides algorithms for important graph mining tasks such as Degree, PageRank, Random Walk with Restart (RWR), Radius, and Connected Components. Sqoop - a tool designed for efficiently transferring bulk data between Apache Hadoop and structured data stores such as relational databases. Flume - a distributed, reliable, and available service for efficiently collecting, aggregating, and moving large log data amounts to HDFS. Database – directly accessible to Hadoop via the Sqoop based Microsoft SQL Server Connector for Apache Hadoop, data can be efficiently transferred to traditional relational data stores for replication, reporting, or other needs. Reporting – provides easily consumable reporting when combined with a database being fed from the Hadoop environment. Training These links point to online Windows Azure training labs where you can learn more about the individual ingredients described above. Hadoop Learning Resources (20+ tutorials and labs) Huge collection of resources for learning about all aspects of Apache Hadoop-based development on Windows Azure and the Hadoop and Windows Azure Ecosystems SQL Azure (7 labs) Microsoft SQL Azure delivers on the Microsoft Data Platform vision of extending the SQL Server capabilities to the cloud as web-based services, enabling you to store structured, semi-structured, and unstructured data. See my Windows Azure Resource Guide for more guidance on how to get started, including links web portals, training kits, samples, and blogs related to Windows Azure.

    Read the article

  • Master Data Management

    - by Logicalj
    I am looking for a very flexible, easy to integrate and dynamic application with as many features as possible for Master Data Management. As Master Data Management is used to Manage Operational Data, Analytical Data and Master Data so, I want guidance about "What is exactly expected from Master Data Management and What are the Basic and Challenging Scenarios to be covered or resolved in Master Data Management". Please guide me with all the possible aspects of Master Data Management like Data Cleansing, Data Management and Start Data Analyzing, etc.

    Read the article

  • What is the architectural name for the set of data that enables UI choices?

    - by Richard Collette
    I have separate service methods that fetch business object data and the data for UI selection input such as radio buttons, check-boxes, combo-boxes, etc. I want to name my service methods that fetch the selection data appropriately. I am assuming that Model and ViewModel would not be part of the name because the selection data is but a portion of the Model or ViewModel. What might this set of data be named such that I can name my service method?

    Read the article

< Previous Page | 16 17 18 19 20 21 22 23 24 25 26 27  | Next Page >