Search Results

Search found 24094 results on 964 pages for 'console log'.

Page 154/964 | < Previous Page | 150 151 152 153 154 155 156 157 158 159 160 161  | Next Page >

  • When debugging in VS 2008 why does the debugger land on a second return statement?

    - by Hellfire
    When debugging the following console program: class Program { static void Main(string[] args) { Console.WriteLine(DoIt(false)); Console.WriteLine(DoIt(true)); } private static Boolean DoIt(Boolean abort) { try { throw new InvalidOperationException(); } catch(Exception ex) { if (abort) { return true; } Console.WriteLine("Got here"); return false; } } } Why does the IDE land on the second return statement during the second call to DoIt()? The results of the execution is correct but the debugging experience is misleading. Is this a known issue? Is the behavior in VS 2010 the same?

    Read the article

  • How to detect invalid route and trigger function in Backbone.Controller

    - by tomodian
    Is there any method to detect invalid (or undefined) route and trigger 404 page in Backbone.Controller? I've defined routes in my Controller like this, but it didn't work. class MyController extends Backbone.Controller routes: "method_a": "methodA" "method_b": "methodB" "*undefined": "show404Error" # when access to /#method_a methodA: -> console.log "this page exists" # when access to /#method_b methodB: -> console.log "this also exists" # when access to /#some_invalid_hash_fragment_for_malicious_attack show404Error: -> console.log "sorry, this page does not exist"

    Read the article

  • How do you create a program that will ask the user to enter 3 numbers that would display the largest

    - by lobosagwan
    int numbers; int largestNum = 0; Console.WriteLine("Enter numbers : "); numbers=int.Parse(Console.ReadLine()); numbers = 0; for (int i = 0; i < 3; i++) { if (numbers[i] > largestNum) } Console.WriteLine("The largest number is : {0}", largestNum); Console.ReadLine(); this is a similar code, the problem is how to ask the user to enter 3 numbers and display the largest number.

    Read the article

  • C# How to output to GUI when data is coming via an interface via MarshalByRefObject?

    - by Tom
    Hey, can someone please show me how i can write the output of OnCreateFile to a GUI? I thought the GUI would have to be declared at the bottom in the main function, so how do i then refer to it within OnCreateFile? using System; using System.Collections.Generic; using System.Runtime.Remoting; using System.Text; using System.Diagnostics; using System.IO; using EasyHook; using System.Drawing; using System.Windows.Forms; namespace FileMon { public class FileMonInterface : MarshalByRefObject { public void IsInstalled(Int32 InClientPID) { //Console.WriteLine("FileMon has been installed in target {0}.\r\n", InClientPID); } public void OnCreateFile(Int32 InClientPID, String[] InFileNames) { for (int i = 0; i < InFileNames.Length; i++) { String[] s = InFileNames[i].ToString().Split('\t'); if (s[0].ToString().Contains("ROpen")) { //Console.WriteLine(DateTime.Now.Hour+":"+DateTime.Now.Minute+":"+DateTime.Now.Second+"."+DateTime.Now.Millisecond + "\t" + s[0] + "\t" + getProcessName(int.Parse(s[1])) + "\t" + getRootHive(s[2])); Program.ff.enterText(DateTime.Now.Hour + ":" + DateTime.Now.Minute + ":" + DateTime.Now.Second + "." + DateTime.Now.Millisecond + "\t" + s[0] + "\t" + getProcessName(int.Parse(s[1])) + "\t" + getRootHive(s[2])); } else if (s[0].ToString().Contains("RQuery")) { Console.WriteLine(DateTime.Now.Hour + ":" + DateTime.Now.Minute + ":" + DateTime.Now.Second + "." + DateTime.Now.Millisecond + "\t" + s[0] + "\t" + getProcessName(int.Parse(s[1])) + "\t" + getRootHive(s[2])); } else if (s[0].ToString().Contains("RDelete")) { Console.WriteLine(DateTime.Now.Hour + ":" + DateTime.Now.Minute + ":" + DateTime.Now.Second + "." + DateTime.Now.Millisecond + "\t" + s[0] + "\t" + getProcessName(int.Parse(s[0])) + "\t" + getRootHive(s[1])); } else if (s[0].ToString().Contains("FCreate")) { //Console.WriteLine(DateTime.Now.Hour+":"+DateTime.Now.Minute+":"+DateTime.Now.Second+"."+DateTime.Now.Millisecond + "\t" + s[0] + "\t" + getProcessName(int.Parse(s[1])) + "\t" + s[2]); } } } public void ReportException(Exception InInfo) { Console.WriteLine("The target process has reported an error:\r\n" + InInfo.ToString()); } public void Ping() { } public String getProcessName(int ID) { String name = ""; Process[] process = Process.GetProcesses(); for (int i = 0; i < process.Length; i++) { if (process[i].Id == ID) { name = process[i].ProcessName; } } return name; } public String getRootHive(String hKey) { int r = hKey.CompareTo("2147483648"); int r1 = hKey.CompareTo("2147483649"); int r2 = hKey.CompareTo("2147483650"); int r3 = hKey.CompareTo("2147483651"); int r4 = hKey.CompareTo("2147483653"); if (r == 0) { return "HKEY_CLASSES_ROOT"; } else if (r1 == 0) { return "HKEY_CURRENT_USER"; } else if (r2 == 0) { return "HKEY_LOCAL_MACHINE"; } else if (r3 == 0) { return "HKEY_USERS"; } else if (r4 == 0) { return "HKEY_CURRENT_CONFIG"; } else return hKey.ToString(); } } class Program : System.Windows.Forms.Form { static String ChannelName = null; public static Form1 ff; Program() // ADD THIS CONSTRUCTOR { InitializeComponent(); } static void Main() { try { Config.Register("A FileMon like demo application.", "FileMon.exe", "FileMonInject.dll"); RemoteHooking.IpcCreateServer<FileMonInterface>(ref ChannelName, WellKnownObjectMode.SingleCall); Process[] p = Process.GetProcesses(); for (int i = 0; i < p.Length; i++) { try { RemoteHooking.Inject(p[i].Id, "FileMonInject.dll", "FileMonInject.dll", ChannelName); } catch (Exception e) { } } } catch (Exception ExtInfo) { Console.WriteLine("There was an error while connecting to target:\r\n{0}", ExtInfo.ToString()); } } } }

    Read the article

  • Reference an object, based on a variable with it's name in it

    - by James G
    I'm looking for a way to reference an object, based on a variable with it's name in it. I know I can do this for properties and sub properties: var req = {body: {jobID: 12}}; console.log(req.body.jobID); //12 var subProperty = "jobID"; console.log(req.body[subProperty ]); //12 var property = "body"; console.log(req[property][subProperty]); //12 is it possible for the object itself? var req = {body: {jobID: 12}}; var object = "req"; var property = "body"; var subProperty = "jobID"; console.log([object][property][subProperty]); //12 or console.log(this[object][property][subProperty]); //12 Note: I'm doing this in node.js not a browser. Here is an exert from the function: if(action.render){ res.render(action.render,renderData); }else if(action.redirect){ if(action.redirect.args){ var args = action.redirect.args; res.redirect(action.redirect.path+req[args[0]][args[1]]); }else{ res.redirect(action.redirect.path); } } I could work around it by changing it to this, but I was looking for something more dynamic. if(action.render){ res.render(action.render,renderData); }else if(action.redirect){ if(action.redirect.args){ var args = action.redirect.args; if(args[0]==="req"){ res.redirect(action.redirect.path+req[args[1]][args[2]]); }else if(args[0]==="rows"){ rows.redirect(action.redirect.path+rows[args[1]][args[2]]); } }else{ res.redirect(action.redirect.path); } }

    Read the article

  • C# Script to get modified date from URL directory

    - by Jynx
    Hello, I am fairly new to C# and was hoping for some assistance. I currently use Visual Studio 2008. What I am wanting to do is the following: I have a server (\backupserv) that runs a RoboCopy script nightly to backup directories from 18 other servers. These directories are then copied down to \backup in directories of their own: Example: It copies down "Dir1", "Dir2", and "Dir3" from Server1 into \backupserv\backups\Server1 into their own directories (\backupserv\backups\Server1\Dir1, \backupserv\backups\Server1\Dir2, and \backupserv\backups\Server1\Dir3). It does this for all 18 servers nightly between 12am and 6am. The RoboCopy runs via schedule task. A log file is created in \backupserv\backups\log and is named server1-dir1.log, server1-dir2.log, etc. What I am wanting to accomplish in C# is the ability to have a 'report' showing the modified date of each text log file. To do this I need to browse the \backupserv\backups\log directory, determine the modified date, and have a report displayed (prefer HTML if possible). Along with the modified date I will be showing more information, but that is later. Again, I am fairly new to C#, so, please be gentle. I was referred here by another programmer, and was told I would get some assistance. Thanks in advance. If I have missed any detail please let me know and I will do my best to answer.

    Read the article

  • jquery ajax: headers seem to not be working

    - by Will
    hello, i am trying to get the headers of an ajax request i made through jquery $.get(url, function(response, textStatus, headers ) { console.log("Response: %o", response); console.log("TextStatus: %o", textStatus); console.log("Request: %o", headers); } ); this does not seem to be working however: the response and textstatus are printing, but the "headers" object seems to be undefined i simply want to check if it is what i expect (content type='excel', etc) or if the response type is html, i can assume the page i was calling is an error

    Read the article

  • JS Split ( ) to check if substring exists in Array

    - by Javacadabra
    I have an array of products that are stored as Strings in this format productname:quantity. The issue I am running into is that if a user adds one product with a quantity of x it is inserted into the array as it should. However, if they then decide to add more of a particular product a new entry is made into the array instead of checking if the product already exists and adjusting the quantity to the new value. oldQty + newQty. For example this is my array: ["CBL202659/A:1","OUTER9:1","PALLET CARDS:1"] If I add another PALLET CARDS product it creates a new entry rather than updating the quantity of the existing item to 2. New array ["CBL202659/A:1","OUTER9:1","PALLET CARDS:1","PALLET CARDS:1"] I would like the array to end up like this: - updating the quantity ["CBL202659/A:1","OUTER9:1","PALLET CARDS:2"] Currently this is my code: I use the split() method to seperate the String where a colon occurs and store the product name and quantity in two seperate variables. $(".orderBtn").click(function(event){ //Show the order Box $(".order-alert").show(); event.preventDefault(); //Create the Array var productArray = []; //Get reference to the product clicked var stockCode = $(this).closest('li').find('.stock_code').html(); //Get reference to the quantity selected var quantity = $(this).closest('li').find('.order_amount').val(); var item = stockCode + ":" + quantity; var itemCheck = stockCode + ":"; if(quantity == 0){ console.log("Quantity must be greater than 0") }else{ //If no Cookie exists, create one and add the Array if ($.cookie('order_cookie') === undefined) { console.log("CREATE NEW COOKIE"); //Add items to Array productArray.push(item); //Add Array to Cookie $.cookie('order_cookie', JSON.stringify(productArray), { expires: 1, path: '/' }); //If the Cookie already exists do this } else { productArray = JSON.parse($.cookie('order_cookie'));//get ref to array if(productArray.indexOf(itemCheck)!= -1){//It exists so update qty console.log("EXISTS... updating item: " + itemCheck); //var index = productArray.indexOf(item); //var update = productArray[index].split(":"); //var name = update[0]; //var oldQty = update[1]; //console.log(name + ":" + oldQty); //productArray[index] = item; }else{//It does not exist, so add to array console.log("Does not exist... adding new item: " + item); //Append items onto the Array productArray.push(item); } //Update the Cookie $.cookie('order_cookie', JSON.stringify(productArray), { expires: 1, path: '/' }); console.log($.cookie('order_cookie')); } //Display the number of items in the Array in the Order Box $('#order_counter').html(productArray.length); } }); I suppose the real question I am asking here, is if it is possible to search the array for a subString - containing productname: ??

    Read the article

  • communicate with a process in utf-8 on a cp1252 consoless

    - by Mapad
    I need to control a program by sending commands in utf-8 encoding to its standard input. For this I run the program using subprocess.Popen(): proc = Popen("myexecutable.exe", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) proc.stdin.write(u'ééé'.encode('utf_8')) If I run this from a cygwin utf-8 console, it works. If I run it from a windows console (encoding ='cp1252') this doesn't work. Is there a way to make this work without having to install a cygwin utf-8 console on each computer I want it to run from ? (NB: I don't need to output anything to console)

    Read the article

  • F# why my recursion is faster than Seq.exists?

    - by user38397
    I am pretty new to F#. I'm trying to understand how I can get a fast code in F#. For this, I tried to write two methods (IsPrime1 and IsPrime2) for benchmarking. My code is: // Learn more about F# at http://fsharp.net open System open System.Diagnostics #light let isDivisible n d = n % d = 0 let IsPrime1 n = Array.init (n-2) ((+) 2) |> Array.exists (isDivisible n) |> not let rec hasDivisor n d = match d with | x when x < n -> (n % x = 0) || (hasDivisor n (d+1)) | _ -> false let IsPrime2 n = hasDivisor n 2 |> not let SumOfPrimes max = [|2..max|] |> Array.filter IsPrime1 |> Array.sum let maxVal = 20000 let s = new Stopwatch() s.Start() let valOfSum = SumOfPrimes maxVal s.Stop() Console.WriteLine valOfSum Console.WriteLine("IsPrime1: {0}", s.ElapsedMilliseconds) ////////////////////////////////// s.Reset() s.Start() let SumOfPrimes2 max = [|2..max|] |> Array.filter IsPrime2 |> Array.sum let valOfSum2 = SumOfPrimes2 maxVal s.Stop() Console.WriteLine valOfSum2 Console.WriteLine("IsPrime2: {0}", s.ElapsedMilliseconds) Console.ReadKey() IsPrime1 takes 760 ms while IsPrime2 takes 260ms for the same result. What's going on here and how I can make my code even faster?

    Read the article

  • why can not create instance from any class out side of constructor?

    - by Phsika
    why i generate instance outside of class. i give inheritance snifC to sinifD i need to create instance sinifC sinifc= new sinifC() in SinifD out side of constructor? public class sinifC { public void method3() { Console.WriteLine("Deneme3"); } } public class sinifD : sinifC { void method4() { Console.WriteLine("Deneme4"); } public sinifD() { sinifC sinifc = new sinifC(); sinifc.method3(); } } i want to make it below: public class sinifC { public void method3() { Console.WriteLine("Deneme3"); } } public class sinifD : sinifC { void method4() { Console.WriteLine("Deneme4"); } sinifC sinifc = new sinifC(); sinifc.method3(); } Error: Invalid token '(' in class, struct, or interface member declaration

    Read the article

  • sequencing function calls in javascript - are callbacks the only way?

    - by tim
    I read through various threads like this one for example. But it really escapes me how to accomplish the following: I have 4 functions, and want them happen one after another in sequence. Notice they are in incorrect order, to get my point across. I want the result that will output "1, 2, 3, 4' function firstFunction(){ // some very time consuming asynchronous code... console.log('1'); } function thirdFunction(){ // definitely dont wanna do this until secondFunction is finished console.log('3'); } function secondFunction(){ // waits for firstFunction to be completed console.log('2'); } function fourthFunction(){ // last function, not executed until the other 3 are done. console.log('4'); } I tried to figure out callbacks but am getting lost :( Isn't there some simple way to do this? Like looping through an array...

    Read the article

  • Quartz.Net Writing your first Hello World Job

    - by Tarun Arora
    In this blog post I’ll be covering, 01: A few things to consider before you should schedule a Job using Quartz.Net 02: Setting up your solution to use Quartz.Net API 03: Quartz.Net configuration 04: Writing & scheduling a hello world job with Quartz.Net If you are new to Quartz.Net I would recommend going through, A brief introduction to Quartz.net Walkthrough of Installing & Testing Quartz.Net as a Windows Service A few things to consider before you should schedule a Job using Quartz.Net - An instance of the scheduler service - A trigger - And last but not the least a job For example, if I wanted to schedule a script to run on the server, I should be jotting down answers to the below questions, a. Considering there are multiple machines set up with Quartz.Net windows service, how can I choose the instance of Quartz.Net where I want my script to be run b. What will trigger the execution of the job c. How often do I want the job to run d. Do I want the job to run right away or start after a delay or may be have the job start at a specific time e. What will happen to my job if Quartz.Net windows service is reset f. Do I want multiple instances of this job to run concurrently g. Can I pass parameters to the job being executed by Quartz.Net windows service Setting up your solution to use Quartz.Net API 1. Create a new C# Console Application project and call it “HelloWorldQuartzDotNet” and add a reference to Quartz.Net.dll. I use the NuGet Package Manager to add the reference. This can be done by right clicking references and choosing Manage NuGet packages, from the Nuget Package Manager choose Online from the left panel and in the search box on the right search for Quartz.Net. Click Install on the package “Quartz” (Screen shot below). 2. Right click the project and choose Add New Item. Add a new Interface and call it ‘IScheduledJob.cs’. Mark the Interface public and add the signature for Run. Your interface should look like below. namespace HelloWorldQuartzDotNet { public interface IScheduledJob { void Run(); } }   3. Right click the project and choose Add new Item. Add a class and call it ‘Scheduled Job’. Use this class to implement the interface ‘IscheduledJob.cs’. Look at the pseudo code in the implementation of the Run method. using System; namespace HelloWorldQuartzDotNet { class ScheduledJob : IScheduledJob { public void Run() { // Get an instance of the Quartz.Net scheduler // Define the Job to be scheduled // Associate a trigger with the Job // Assign the Job to the scheduler throw new NotImplementedException(); } } }   I’ll get into the implementation in more detail, but let’s look at the minimal configuration a sample configuration file for Quartz.Net service to work. Quartz.Net configuration In the App.Config file copy the below configuration <?xml version="1.0" encoding="utf-8" ?> <configuration> <configSections> <section name="quartz" type="System.Configuration.NameValueSectionHandler, System, Version=1.0.5000.0,Culture=neutral, PublicKeyToken=b77a5c561934e089" /> </configSections> <quartz> <add key="quartz.scheduler.instanceName" value="ServerScheduler" /> <add key="quartz.threadPool.type" value="Quartz.Simpl.SimpleThreadPool, Quartz" /> <add key="quartz.threadPool.threadCount" value="10" /> <add key="quartz.threadPool.threadPriority" value="2" /> <add key="quartz.jobStore.misfireThreshold" value="60000" /> <add key="quartz.jobStore.type" value="Quartz.Simpl.RAMJobStore, Quartz" /> </quartz> </configuration>   As you can see in the configuration above, I have included the instance name of the quartz scheduler, the thread pool type, count and priority, the job store type has been defined as RAM. You have the option of configuring that to ADO.NET JOB store. More details here. Writing & scheduling a hello world job with Quartz.Net Once fully implemented the ScheduleJob.cs class should look like below. I’ll walk you through the details of the implementation… - GetScheduler() uses the name of the quartz.net and listens on localhost port 555 to try and connect to the quartz.net windows service. - Run() an attempt is made to start the scheduler in case it is in standby mode - I have defined a job “WriteHelloToConsole” (that’s the name of the job), this job belongs to the group “IT”. Think of group as a logical grouping feature. It helps you bucket jobs into groups. Quartz.Net gives you the ability to pause or delete all jobs in a group (We’ll look at that in some of the future posts). I have requested for recovery of this job in case the quartz.net service fails over to the other node in the cluster. The jobType is “HelloWorldJob”. This is the class that would be called to execute the job. More details on this below… - I have defined a trigger for my job. I have called the trigger “WriteHelloToConsole”. The Trigger works on the cron schedule “0 0/1 * 1/1 * ? *” which means fire the job once every minute. I would recommend that you look at www.cronmaker.com a free and great website to build and parse cron expressions. The trigger has a priority 1. So, if two jobs are run at the same time, this trigger will have high priority and will be run first. - Use the Job and Trigger to schedule the job. This method returns a datetime offeset. It is possible to see the next fire time for the job from this variable. using System.Collections.Specialized; using System.Configuration; using Quartz; using System; using Quartz.Impl; namespace HelloWorldQuartzDotNet { class ScheduledJob : IScheduledJob { public void Run() { // Get an instance of the Quartz.Net scheduler var schd = GetScheduler(); // Start the scheduler if its in standby if (!schd.IsStarted) schd.Start(); // Define the Job to be scheduled var job = JobBuilder.Create<HelloWorldJob>() .WithIdentity("WriteHelloToConsole", "IT") .RequestRecovery() .Build(); // Associate a trigger with the Job var trigger = (ICronTrigger)TriggerBuilder.Create() .WithIdentity("WriteHelloToConsole", "IT") .WithCronSchedule("0 0/1 * 1/1 * ? *") // visit http://www.cronmaker.com/ Queues the job every minute .WithPriority(1) .Build(); // Assign the Job to the scheduler var schedule = schd.ScheduleJob(job, trigger); Console.WriteLine("Job '{0}' scheduled for '{1}'", "", schedule.ToString("r")); } // Get an instance of the Quartz.Net scheduler private static IScheduler GetScheduler() { try { var properties = new NameValueCollection(); properties["quartz.scheduler.instanceName"] = "ServerScheduler"; // set remoting expoter properties["quartz.scheduler.proxy"] = "true"; properties["quartz.scheduler.proxy.address"] = string.Format("tcp://{0}:{1}/{2}", "localhost", "555", "QuartzScheduler"); // Get a reference to the scheduler var sf = new StdSchedulerFactory(properties); return sf.GetScheduler(); } catch (Exception ex) { Console.WriteLine("Scheduler not available: '{0}'", ex.Message); throw; } } } }   The above highlighted values have been taken from the Quartz.config file, this file is available in the Quartz.net server installation directory. Implementation of my HelloWorldJob Class below. The HelloWorldJob class gets called to execute the job “WriteHelloToConsole” using the once every minute trigger set up for this job. The HelloWorldJob is a class that implements the interface IJob. I’ll walk you through the details of the implementation… - context is passed to the method execute by the quartz.net scheduler service. This has everything you need to pull out the job, trigger specific information. - for example. I have pulled out the value of the jobKey name, the fire time and next fire time. using Quartz; using System; namespace HelloWorldQuartzDotNet { class HelloWorldJob : IJob { public void Execute(IJobExecutionContext context) { try { Console.WriteLine("Job {0} fired @ {1} next scheduled for {2}", context.JobDetail.Key, context.FireTimeUtc.Value.ToString("r"), context.NextFireTimeUtc.Value.ToString("r")); Console.WriteLine("Hello World!"); } catch (Exception ex) { Console.WriteLine("Failed: {0}", ex.Message); } } } }   I’ll add a call to call the scheduler in the Main method in Program.cs using System; using System.Threading; namespace HelloWorldQuartzDotNet { class Program { static void Main(string[] args) { try { var sj = new ScheduledJob(); sj.Run(); Thread.Sleep(10000 * 10000); } catch (Exception ex) { Console.WriteLine("Failed: {0}", ex.Message); } } } }   This was third in the series of posts on enterprise scheduling using Quartz.net, in the next post I’ll be covering how to pass parameters to the scheduled task scheduled on Quartz.net windows service. Thank you for taking the time out and reading this blog post. If you enjoyed the post, remember to subscribe to http://feeds.feedburner.com/TarunArora. Stay tuned!

    Read the article

  • How to deal with transport level security policy with OSB

    - by Jian Liang
    Recently, we received a use case for Oracle Service Bus (OSB) 11gPS4 to consume a Web Service which is secured by HTTP transport level security policy. The WSDL of the remote web service looks like following where the part marked in red shows the security policy: <?xml version='1.0' encoding='UTF-8'?> <definitions xmlns:wssutil="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:tns="https://httpsbasicauth" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns="http://schemas.xmlsoap.org/wsdl/" targetNamespace="https://httpsbasicauth" name="HttpsBasicAuthService"> <wsp:UsingPolicy wssutil:Required="true"/> <wsp:Policy wssutil:Id="WSHttpBinding_IPartyServicePortType_policy"> <wsp:ExactlyOne> <wsp:All> <ns1:TransportBinding xmlns:ns1="http://schemas.xmlsoap.org/ws/2005/07/securitypolicy"> <wsp:Policy> <ns1:TransportToken> <wsp:Policy> <ns1:HttpsToken RequireClientCertificate="false"/> </wsp:Policy> </ns1:TransportToken> <ns1:AlgorithmSuite> <wsp:Policy> <ns1:Basic256/> </wsp:Policy> </ns1:AlgorithmSuite> <ns1:Layout> <wsp:Policy> <ns1:Strict/> </wsp:Policy> </ns1:Layout> </wsp:Policy> </ns1:TransportBinding> <ns2:UsingAddressing xmlns:ns2="http://www.w3.org/2006/05/addressing/wsdl"/> </wsp:All> </wsp:ExactlyOne> </wsp:Policy> <types> <xsd:schema> <xsd:import namespace="https://proxyhttpsbasicauth" schemaLocation="http://localhost:7001/WS/HttpsBasicAuthService?xsd=1"/> </xsd:schema> <xsd:schema> <xsd:import namespace="https://httpsbasicauth" schemaLocation="http://localhost:7001/WS/HttpsBasicAuthService?xsd=2"/> </xsd:schema> </types> <message name="echoString"> <part name="parameters" element="tns:echoString"/> </message> <message name="echoStringResponse"> <part name="parameters" element="tns:echoStringResponse"/> </message> <portType name="HttpsBasicAuth"> <operation name="echoString"> <input message="tns:echoString"/> <output message="tns:echoStringResponse"/> </operation> </portType> <binding name="HttpsBasicAuthSoapPortBinding" type="tns:HttpsBasicAuth"> <wsp:PolicyReference URI="#WSHttpBinding_IPartyServicePortType_policy"/> <soap:binding transport="http://schemas.xmlsoap.org/soap/http" style="document"/> <operation name="echoString"> <soap:operation soapAction=""/> <input> <soap:body use="literal"/> </input> <output> <soap:body use="literal"/> </output> </operation> </binding> <service name="HttpsBasicAuthService"> <port name="HttpsBasicAuthSoapPort" binding="tns:HttpsBasicAuthSoapPortBinding"> <soap:address location="https://localhost:7002/WS/HttpsBasicAuthService"/> </port> </service> </definitions> The security assertion in the WSDL (marked in red) indicates that this is the HTTP transport level security policy which requires one way SSL with default authentication (aka. basic authenticate with username/password). Normally, there are two ways to handle web service security policy with OSB 11g: Use WebLogic 9.x policy Use OWSM Since OSB doesn’t support WebLogic 9.x WSSP transport level assertion (except for WS transport), when we tried to create the business service based on the imported WSDL, OSB complained with the following message: [OSB Kernel:398133]The service is based on WSDL with Web Services Security Policies that are not natively supported by Oracle Service Bus. Please select OWSM Policies - From OWSM Policy Store option and attach equivalent OWSM security policy. For the Business Service, either you can add the necessary client policies manually by clicking Add button or you can let Oracle Service Bus automatically pick and add compatible client policies by clicking Add Compatible button. Unfortunately, when tried with OWSM, we couldn’t find http_token_policy from OWSM since OSB PS4 doesn’t support OWSM http_token_policy. It seems that we ran into an unsupported situation that no appropriate policy can be used from both WebLogic and OWSM. As this security policy requires one way SSL with basic authentication at the transport level, a possible workaround is to meet the remote service's requirement at transport level without using web service policy. We can simply use OSB to establish SSL connection and provide username/password for authentication at the transport level to the remote web service. In this case, the business service within OSB will be transparent to the web service policy. However, we still need to deal with OSB console’s complaint related to unsupported security policy because the failure of WSDL validation prohibits OSB console to move forward. With the help from OSB Product Management team, we finally came up with the following solutions: Solution 1: OSB PS5 The good news is that the http_token_policy is made available in OSB PS5. With OSB PS5, you can simply add OWSM oracle/wss_http_token_over_ssl_client_policy to the business service. The simplest solution is to upgrade to OSB PS5 where the OWSM solution is provided out of the box. But if you are not in a position where upgrading is an immediate option, you might want to consider other two workaround solutions described below. Solution 2: Modifying WSDL This solution addresses OSB console’s complaint by removing the security policy from the imported WSDL within OSB. Without the security policy, OSB console allows the business service to be created based on modified WSDL.  Please bear in mind, modifying WSDL is done only for the OSB side via OSB console, no change is required on the remote Web Service. The main steps of this solution: Connect to OSB console import the remote WSDL into OSB remove security assertion (the red marked part) from the imported WSDL create a service account. In our sample, we simply take the user weblogic create the business service and check "Basic" for Authentication and select the created service account make sure that OSB consumes the web service via https. This solution requires modifying WSDL. It is suitable for any OSB version (10g or OSB 11g version) prior to PS5 without OWSM. However, modifying WSDL by hand is troublesome as it requires the user to remember that the original WSDL was edited.  It forces you to make the same edit each time you want to re-import the service WSDL when changes occur at the service level. This also prevents you from using UDDI to import WSDL.  Solution 3: Using original WSDL This solution keeps the WSDL intact and ignores the embedded policy by using OWSM. By design, OWSM doesn’t like WSDL with embedded security assertion. Since OWSM doesn’t provide the feature to explicitly ignore the embedded policy from a remote WSDL, in this solution, we use OWSM in a tricky way to ignore the embedded policy. Connect to OSB console import the remote WSDL into OSB create a service account create the business service in which check "Basic" for Authentication and select the created service account as the imported WSDL is intact, the OSB Kernel:398133 error is expected ignore this error message for the moment and navigate to the Policies Page of business service Select “From OWSM Policy Store” and click “Add” button, the list of policies will pop-up Here is the tricky part: select an arbitrary policy, and click “Cancel” Update and save By clicking “Cancel’ button, we didn’t add any OWSM policy to business service, but the embedded policy is ignored. Yes, this is tricky. According to Oracle OSB Product Manager, the future release of OWSM will add a button “None” which allows to ignore the embedded policy explicitly. This solution keeps the imported WSDL intact which is the big advantage over the solution 2. It is suitable for OSB 11g (version prior to PS5) domain with OWSM configured. This blog addressed the unsupported transport level web service security policy with OSB PS4. To summarize, if you are using OSB PS5 or in a position to upgrade to PS5, the recommendation is to use OWSM OOTB transport level security policy directly. With the release prior to 11g PS5, you can consider the solution 2 or 3 depending on if OWSM is configured.

    Read the article

  • Queued Loadtest to remove Concurrency issues using Shared Data Service in OpenScript

    - by stefan.thieme(at)oracle.com
    Queued Processing to remove Concurrency issues in Loadtest ScriptsSome scripts act on information returned by the server, e.g. act on first item in the returned list of pending tasks/actions. This may lead to concurrency issues if the virtual users simulated in a load test scenario are not synchronized in some way.As the load test cases should be carried out in a comparable and straight forward manner simply cancel a transaction in case a collision occurs is clearly not an option. In case you increase the number of virtual users this approach would lead to a high number of requests for the early steps in your transaction (e.g. login, retrieve list of action points, assign an action point to the virtual user) but later steps would be rarely visited successfully or at all, depending on the application logic.A way to tackle this problem is to enqueue the virtual users in a Shared Data Service queue. Only the first virtual user in this queue will be allowed to carry out the critical steps (retrieve list of action points, assign an action point to the virtual user) in your transaction at any one time.Once a virtual user has passed the critical path it will dequeue himself from the head of the queue and continue with his actions. This does theoretically allow virtual users to run in parallel all steps of the transaction which are not part of the critical path.In practice it has been seen this is rarely the case, though it does not allow adding more than N users to perform a transaction without causing delays due to virtual users waiting in the queue. N being the time of the total transaction divided by the sum of the time of all critical steps in this transaction.While this problem can be circumvented by allowing multiple queues to act on individual segments of the list of actions, e.g. per country filter, ends with 0..9 filter, etc.This would require additional handling of these additional queues of slots for the virtual users at the head of the queue in order to maintain the mutually exclusive access to the first element in the list returned by the server at any one time of the load test. Such an improved handling of multiple queues and/or multiple slots is above the subject of this paper.Shared Data Services Pre-RequisitesStart WebLogic Server to host Shared Data ServicesYou will have to make sure that your WebLogic server is installed and started. Shared Data Services may not work if you installed only the minimal installation package for OpenScript. If however you installed the default package including OLT and OTM, you may follow the instructions below to start and verify WebLogic installation.To start the WebLogic Server deployed underneath of Oracle Load Testing and/or Oracle Test Manager you can go to your Start menu, Oracle Application Testing Suite and select the Restart Oracle Application Testing Suite Application Service entry from the Tools submenu.To verify the service has been started you can run the Microsoft Management Console for Services by Selecting Run from the Start Menu and entering services.msc. Look for the entry that reads Oracle Application Testing Suite Application Service, once it has changed it status from Starting to Started you can proceed to verify the login. Please note that this may take several minutes, I would say up to 10 minutes depending on the strength of your CPU horse-power.Verify WebLogic Server user credentialsYou will have to make sure that your WebLogic Server is installed and started. Next open the Oracle WebLogic Server Adminstration Console on http://localhost:8088/console.It may take a while until the application is deployed and started. It may display the following until the Administration Console has been deployed on the fly.Afterwards you can login using the username oats and the password that you selected during install time for your Application Testing Suite administrative purposes.This will bring up the Home page of you WebLogic Server. You have actually verified that you are able to login with these credentials already. However if you want to check the details, navigate to Security Realms, myrealm, Users and Groups tab.Here you could add users to your WebLogic Server which could be used in the later steps. Details on the Groups required for such a custom user to work are exceeding this quick overview and have to be selected with the WebLogic Server Adminstration Guide in mind.Shared Data Services pre-requisites for Load testingOpenScript Preferences have to be set to enable Encryption and provide a default Shared Data Service Connection for Playback.These are pre-requisites you want to use for load testing with Shared Data Services.Please note that the usage of the Connection Parameters (individual directive in the script) for Shared Data Services did not playback reliably in the current version 9.20.0370 of Oracle Load Testing (OLT) and encryption of credentials still seemed to be mandatory as well.General Encryption settingsSelect OpenScript Preferences from the View menu and navigate to the General, Encryption entry in the tree on the left. Select the Encrypt script data option from the list and enter the same password that you used for securing your WebLogic Server Administration Console.Enable global shared data access credentialsSelect OpenScript Preferences from the View menu and navigate to the Playback, Shared Data entry in the tree on the left. Enable the global shared data access credentials and enter the Address, User name and Password determined for your WebLogic Server to host Shared Data Services.Please note, that you may want to replace the localhost in Address with the hosts realname in case you plan to run load tests with Loadtest Agents running on remote systems.Queued Processing of TransactionsEnable Shared Data Services Module in Script PropertiesThe Shared Data Services Module has to be enabled for each Script that wants to employ the Shared Data Service Queue functionality in OpenScript. It can be enabled under the Script menu selecting Script Properties. On the Script Properties Dialog select the Modules section and check Shared Data to enable Shared Data Service Module for your script. Checking the Shared Data Services option will effectively add a line to your script code that adds the sharedData ScriptService to your script class of IteratingVUserScript.@ScriptService oracle.oats.scripting.modules.sharedData.api.SharedDataService sharedData;Record your scriptRecord your script as usual and then add the following things for Queue handling in the Initialize code block, before the first step and after the last step of your critical path and in the Finalize code block.The java code to be added at individual locations is explained in the following sections in full detail.Create a Shared Data Queue in InitializeTo create a Shared Data Queue go to the Java view of your script and enter the following statements to the initialize() code block.info("Create queueA with life time of 120 minutes");sharedData.createQueue("queueA", 120);This will create an instantiation of the Shared Data Queue object named queueA which is maintained for upto 120 minutes.If you want to use the code for multiple scripts, make sure to use a different queue name for each one here and in the subsequent steps. You may even consider to use a dynamic queueName based on filters of your result list being concurrently accessed.Prepare a unique id for each IterationIn order to keep track of individual virtual users in our queue we need to create a unique identifier from the virtual user id and the used username right after retrieving the next record from our databank file.getDatabank("Usernames").getNextDatabankRecord();getVariables().set("usernameValue1","VU_{{@vuid}}_{{@iterationnum}}_{{db.Usernames.Username}}_{{@timestamp}}_{{@random(10000)}}");String usernameValue = getVariables().get("usernameValue1");info("Now running virtual user " + usernameValue);As you can see from the above code block, we have set the OpenScript variable usernameValue1 to VU_{{@vuid}}_{{@iterationnum}}_{{db.Usernames.Username}}_{{@timestamp}}_{{@random(10000)}} which is a concatenation of the virtual user id and the iterationnumber for general uniqueness; as well as the username from our databank, the timestamp and a random number for making it further unique and ease spotting of errors.Not all of these fields are actually required to make it really unique, but adding the queue name may also be considered to help troubleshoot multiple queues.The value is then retrieved with the getVariables.get() method call and assigned to the usernameValue String used throughout the script.Please note that moving the getDatabank("Usernames").getNextDatabankRecord(); call to the initialize block was later considered to remove concurrency of multiple virtual users running with the same userid and therefor accessing the same "My Inbox" in step 6. This will effectively give each virtual user a userid from the databank file. Make sure you have enough userids to remove this second hurdle.Enqueue and attend Queue before Critical PathTo maintain the right order of virtual users being allowed into the critical path of the transaction the following pseudo step has to be added in front of the first critical step. In the case of this example this is right in front of the step where we retrieve the list of actions from which we select the first to be assigned to us.beginStep("[0] Waiting in the Queue", 0);{info("Enqueued virtual user " + usernameValue + " at the end of queueA");sharedData.offerLast("queueA", usernameValue);info("Wait until the user is the first in queueA");String queueValue1 = null;do {// we wait for at least 0.7 seconds before we check the head of the// queue. This is the time it takes one user to move through the// critical path, i.e. pass steps [5] Enter country and [6] Assign// to meThread.sleep(700);queueValue1 = (String) sharedData.peekFirst("queueA");info("The first user in queueA is currently: '" + queueValue1 + "' " + queueValue1.getClass() + " length " + queueValue1.length() );info("The current user is '"+ usernameValue + "' " + usernameValue.getClass() + " length " + usernameValue.length() + ": indexOf " + usernameValue.indexOf(queueValue1) + " equals " + usernameValue.equals(queueValue1) );} while ( queueValue1.indexOf(usernameValue) < 0 );info("Now the user is the first in queueA");}endStep();This will enqueue the username to the tail of our Queue. It will will wait for at least 700 milliseconds, the time it takes for one user to exit the critical path and then compare the head of our queue with it's username. This last step will be repeated while the two are not equal (indexOf less than zero). If they are equal the indexOf will yield a value of zero or larger and we will perform the critical steps.Dequeue after Critical PathAfter the virtual user has left the critical path and complete its last step the following code block needs to dequeue the virtual user. In the case of our example this is right after the action has been actually assigned to the virtual user. This will allow the next virtual user to retrieve the list of actions still available and in turn let him make his selection/assignment.info("Get and remove the current user from the head of queueA");String pollValue1 = (String) sharedData.pollFirst("queueA");The current user is removed from the head of the queue. The next one will now be able to match his username against the head of the queue.Clear and Destroy Queue for FinishWhen the script has completed, it should clear and destroy the queue. This code block can be put in the finish block of your script and/or in a separate script in order to clear and remove the queue in case you have spotted an error or want to reset the queue for some reason.info("Clear queueA");sharedData.clearQueue("queueA");info("Destroy queueA");sharedData.destroyQueue("queueA");The users waiting in queueA are cleared and the queue is destroyed. If you have scripts still executing they will be caught in a loop.I found it better to maintain a separate Reset Queue script which contained only the following code in the initialize() block. I use to call this script to make sure the queue is cleared in between multiple Loadtest runs. This script could also even be added as the first in a larger scenario, which would execute it only once at very start of the Loadtest and make sure the queues do not contain any stale entries.info("Create queueA with life time of 120 minutes");sharedData.createQueue("queueA", 120);info("Clear queueA");sharedData.clearQueue("queueA");This will create a Shared Data Queue instance of queueA and clear all entries from this queue.Monitoring QueueWhile creating the scripts it was useful to monitor the contents, i.e. the current first user in the Queue. The following code block will make sure the Shared Data Queue is accessible in the initialize() block.info("Create queueA with life time of 120 minutes");sharedData.createQueue("queueA", 120);In the run() block the following code will continuously monitor the first element of the Queue and write an informational message with the current username Value to the Result window.info("Monitor the first users in queueA");String queueValue1 = null;do {queueValue1 = (String) sharedData.peekFirst("queueA");if (queueValue1 != null)info("The first user in queueA is currently: '" + queueValue1 + "' " + queueValue1.getClass() + " length " + queueValue1.length() );} while ( true );This script can be run from OpenScript parallel to a loadtest performed by the Oracle Load Test.However it is not recommend to run this in a production loadtest as the performance impact is unknown. Accessing the Queue's head with the peekFirst() method has been reported with about 2 seconds response time by both OpenScript and OTL. It is advised to log a Service Request to see if this could be lowered in future releases of Application Testing Suite, as the pollFirst() and even offerLast() writing to the tail of the Queue usually returned after an average 0.1 seconds.Debugging QueueWhile debugging the scripts the following was useful to remove single entries from its head, i.e. the current first user in the Queue. The following code block will make sure the Shared Data Queue is accessible in the initialize() block.info("Create queueA with life time of 120 minutes");sharedData.createQueue("queueA", 120);In the run() block the following code will remove the first element of the Queue and write an informational message with the current username Value to the Result window.info("Get and remove the current user from the head of queueA");String pollValue1 = (String) sharedData.pollFirst("queueA");info("The first user in queueA was currently: '" + pollValue1 + "' " + pollValue1.getClass() + " length " + pollValue1.length() );ReferencesOracle Functional Testing OpenScript User's Guide Version 9.20 [E15488-05]Chapter 17 Using the Shared Data Modulehttp://download.oracle.com/otn/nt/apptesting/oats-docs-9.21.0030.zipOracle Fusion Middleware Oracle WebLogic Server Administration Console Online Help 11g Release 1 (10.3.4) [E13952-04]Administration Console Online Help - Manage users and groupshttp://download.oracle.com/docs/cd/E17904_01/apirefs.1111/e13952/taskhelp/security/ManageUsersAndGroups.htm

    Read the article

  • Metro: Namespaces and Modules

    - by Stephen.Walther
    The goal of this blog entry is to describe how you can use the Windows JavaScript (WinJS) library to create namespaces. In particular, you learn how to use the WinJS.Namespace.define() and WinJS.Namespace.defineWithParent() methods. You also learn how to hide private methods by using the module pattern. Why Do We Need Namespaces? Before we do anything else, we should start by answering the question: Why do we need namespaces? What function do they serve? Do they just add needless complexity to our Metro applications? After all, plenty of JavaScript libraries do just fine without introducing support for namespaces. For example, jQuery has no support for namespaces and jQuery is the most popular JavaScript library in the universe. If jQuery can do without namespaces, why do we need to worry about namespaces at all? Namespaces perform two functions in a programming language. First, namespaces prevent naming collisions. In other words, namespaces enable you to create more than one object with the same name without conflict. For example, imagine that two companies – company A and company B – both want to make a JavaScript shopping cart control and both companies want to name the control ShoppingCart. By creating a CompanyA namespace and CompanyB namespace, both companies can create a ShoppingCart control: a CompanyA.ShoppingCart and a CompanyB.ShoppingCart control. The second function of a namespace is organization. Namespaces are used to group related functionality even when the functionality is defined in different physical files. For example, I know that all of the methods in the WinJS library related to working with classes can be found in the WinJS.Class namespace. Namespaces make it easier to understand the functionality available in a library. If you are building a simple JavaScript application then you won’t have much reason to care about namespaces. If you need to use multiple libraries written by different people then namespaces become very important. Using WinJS.Namespace.define() In the WinJS library, the most basic method of creating a namespace is to use the WinJS.Namespace.define() method. This method enables you to declare a namespace (of arbitrary depth). The WinJS.Namespace.define() method has the following parameters: · name – A string representing the name of the new namespace. You can add nested namespace by using dot notation · members – An optional collection of objects to add to the new namespace For example, the following code sample declares two new namespaces named CompanyA and CompanyB.Controls. Both namespaces contain a ShoppingCart object which has a checkout() method: // Create CompanyA namespace with ShoppingCart WinJS.Namespace.define("CompanyA"); CompanyA.ShoppingCart = { checkout: function (){ return "Checking out from A"; } }; // Create CompanyB.Controls namespace with ShoppingCart WinJS.Namespace.define( "CompanyB.Controls", { ShoppingCart: { checkout: function(){ return "Checking out from B"; } } } ); // Call CompanyA ShoppingCart checkout method console.log(CompanyA.ShoppingCart.checkout()); // Writes "Checking out from A" // Call CompanyB.Controls checkout method console.log(CompanyB.Controls.ShoppingCart.checkout()); // Writes "Checking out from B" In the code above, the CompanyA namespace is created by calling WinJS.Namespace.define(“CompanyA”). Next, the ShoppingCart is added to this namespace. The namespace is defined and an object is added to the namespace in separate lines of code. A different approach is taken in the case of the CompanyB.Controls namespace. The namespace is created and the ShoppingCart object is added to the namespace with the following single line of code: WinJS.Namespace.define( "CompanyB.Controls", { ShoppingCart: { checkout: function(){ return "Checking out from B"; } } } ); Notice that CompanyB.Controls is a nested namespace. The top level namespace CompanyB contains the namespace Controls. You can declare a nested namespace using dot notation and the WinJS library handles the details of creating one namespace within the other. After the namespaces have been defined, you can use either of the two shopping cart controls. You call CompanyA.ShoppingCart.checkout() or you can call CompanyB.Controls.ShoppingCart.checkout(). Using WinJS.Namespace.defineWithParent() The WinJS.Namespace.defineWithParent() method is similar to the WinJS.Namespace.define() method. Both methods enable you to define a new namespace. The difference is that the defineWithParent() method enables you to add a new namespace to an existing namespace. The WinJS.Namespace.defineWithParent() method has the following parameters: · parentNamespace – An object which represents a parent namespace · name – A string representing the new namespace to add to the parent namespace · members – An optional collection of objects to add to the new namespace The following code sample demonstrates how you can create a root namespace named CompanyA and add a Controls child namespace to the CompanyA parent namespace: WinJS.Namespace.define("CompanyA"); WinJS.Namespace.defineWithParent(CompanyA, "Controls", { ShoppingCart: { checkout: function () { return "Checking out"; } } } ); console.log(CompanyA.Controls.ShoppingCart.checkout()); // Writes "Checking out" One significant advantage of using the defineWithParent() method over the define() method is the defineWithParent() method is strongly-typed. In other words, you use an object to represent the base namespace instead of a string. If you misspell the name of the object (CompnyA) then you get a runtime error. Using the Module Pattern When you are building a JavaScript library, you want to be able to create both public and private methods. Some methods, the public methods, are intended to be used by consumers of your JavaScript library. The public methods act as your library’s public API. Other methods, the private methods, are not intended for public consumption. Instead, these methods are internal methods required to get the library to function. You don’t want people calling these internal methods because you might need to change them in the future. JavaScript does not support access modifiers. You can’t mark an object or method as public or private. Anyone gets to call any method and anyone gets to interact with any object. The only mechanism for encapsulating (hiding) methods and objects in JavaScript is to take advantage of functions. In JavaScript, a function determines variable scope. A JavaScript variable either has global scope – it is available everywhere – or it has function scope – it is available only within a function. If you want to hide an object or method then you need to place it within a function. For example, the following code contains a function named doSomething() which contains a nested function named doSomethingElse(): function doSomething() { console.log("doSomething"); function doSomethingElse() { console.log("doSomethingElse"); } } doSomething(); // Writes "doSomething" doSomethingElse(); // Throws ReferenceError You can call doSomethingElse() only within the doSomething() function. The doSomethingElse() function is encapsulated in the doSomething() function. The WinJS library takes advantage of function encapsulation to hide all of its internal methods. All of the WinJS methods are defined within self-executing anonymous functions. Everything is hidden by default. Public methods are exposed by explicitly adding the public methods to namespaces defined in the global scope. Imagine, for example, that I want a small library of utility methods. I want to create a method for calculating sales tax and a method for calculating the expected ship date of a product. The following library encapsulates the implementation of my library in a self-executing anonymous function: (function (global) { // Public method which calculates tax function calculateTax(price) { return calculateFederalTax(price) + calculateStateTax(price); } // Private method for calculating state tax function calculateStateTax(price) { return price * 0.08; } // Private method for calculating federal tax function calculateFederalTax(price) { return price * 0.02; } // Public method which returns the expected ship date function calculateShipDate(currentDate) { currentDate.setDate(currentDate.getDate() + 4); return currentDate; } // Export public methods WinJS.Namespace.define("CompanyA.Utilities", { calculateTax: calculateTax, calculateShipDate: calculateShipDate } ); })(this); // Show expected ship date var shipDate = CompanyA.Utilities.calculateShipDate(new Date()); console.log(shipDate); // Show price + tax var price = 12.33; var tax = CompanyA.Utilities.calculateTax(price); console.log(price + tax); In the code above, the self-executing anonymous function contains four functions: calculateTax(), calculateStateTax(), calculateFederalTax(), and calculateShipDate(). The following statement is used to expose only the calcuateTax() and the calculateShipDate() functions: // Export public methods WinJS.Namespace.define("CompanyA.Utilities", { calculateTax: calculateTax, calculateShipDate: calculateShipDate } ); Because the calculateTax() and calcuateShipDate() functions are added to the CompanyA.Utilities namespace, you can call these two methods outside of the self-executing function. These are the public methods of your library which form the public API. The calculateStateTax() and calculateFederalTax() methods, on the other hand, are forever hidden within the black hole of the self-executing function. These methods are encapsulated and can never be called outside of scope of the self-executing function. These are the internal methods of your library. Summary The goal of this blog entry was to describe why and how you use namespaces with the WinJS library. You learned how to define namespaces using both the WinJS.Namespace.define() and WinJS.Namespace.defineWithParent() methods. We also discussed how to hide private members and expose public members using the module pattern.

    Read the article

  • Metro: Understanding the default.js File

    - by Stephen.Walther
    The goal of this blog entry is to describe — in painful detail — the contents of the default.js file in a Metro style application written with JavaScript. When you use Visual Studio to create a new Metro application then you get a default.js file automatically. The file is located in a folder named \js\default.js. The default.js file kicks off all of your custom JavaScript code. It is the main entry point to a Metro application. The default contents of the default.js file are included below: // For an introduction to the Blank template, see the following documentation: // http://go.microsoft.com/fwlink/?LinkId=232509 (function () { "use strict"; var app = WinJS.Application; app.onactivated = function (eventObject) { if (eventObject.detail.kind === Windows.ApplicationModel.Activation.ActivationKind.launch) { if (eventObject.detail.previousExecutionState !== Windows.ApplicationModel.Activation.ApplicationExecutionState.terminated) { // TODO: This application has been newly launched. Initialize // your application here. } else { // TODO: This application has been reactivated from suspension. // Restore application state here. } WinJS.UI.processAll(); } }; app.oncheckpoint = function (eventObject) { // TODO: This application is about to be suspended. Save any state // that needs to persist across suspensions here. You might use the // WinJS.Application.sessionState object, which is automatically // saved and restored across suspension. If you need to complete an // asynchronous operation before your application is suspended, call // eventObject.setPromise(). }; app.start(); })(); There are several mysterious things happening in this file. The purpose of this blog entry is to dispel this mystery. Understanding the Module Pattern The first thing that you should notice about the default.js file is that the entire contents of this file are enclosed within a self-executing JavaScript function: (function () { ... })(); Metro applications written with JavaScript use something called the module pattern. The module pattern is a common pattern used in JavaScript applications to create private variables, objects, and methods. Anything that you create within the module is encapsulated within the module. Enclosing all of your custom code within a module prevents you from stomping on code from other libraries accidently. Your application might reference several JavaScript libraries and the JavaScript libraries might have variables, objects, or methods with the same names. By encapsulating your code in a module, you avoid overwriting variables, objects, or methods in the other libraries accidently. Enabling Strict Mode with “use strict” The first statement within the default.js module enables JavaScript strict mode: 'use strict'; Strict mode is a new feature of ECMAScript 5 (the latest standard for JavaScript) which enables you to make JavaScript more strict. For example, when strict mode is enabled, you cannot declare variables without using the var keyword. The following statement would result in an exception: hello = "world!"; When strict mode is enabled, this statement throws a ReferenceError. When strict mode is not enabled, a global variable is created which, most likely, is not what you want to happen. I’d rather get the exception instead of the unwanted global variable. The full specification for strict mode is contained in the ECMAScript 5 specification (look at Annex C): http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf Aliasing the WinJS.Application Object The next line of code in the default.js file is used to alias the WinJS.Application object: var app = WinJS.Application; This line of code enables you to use a short-hand syntax when referring to the WinJS.Application object: for example,  app.onactivated instead of WinJS.Application.onactivated. The WinJS.Application object  represents your running Metro application. Handling Application Events The default.js file contains an event handler for the WinJS.Application activated event: app.onactivated = function (eventObject) { if (eventObject.detail.kind === Windows.ApplicationModel.Activation.ActivationKind.launch) { if (eventObject.detail.previousExecutionState !== Windows.ApplicationModel.Activation.ApplicationExecutionState.terminated) { // TODO: This application has been newly launched. Initialize // your application here. } else { // TODO: This application has been reactivated from suspension. // Restore application state here. } WinJS.UI.processAll(); } }; This WinJS.Application class supports the following events: · loaded – Happens after browser DOMContentLoaded event. After this event, the DOM is ready and you can access elements in a page. This event is raised before external images have been loaded. · activated – Triggered by the Windows.UI.WebUI.WebUIApplication activated event. After this event, the WinRT is ready. · ready – Happens after both loaded and activated events. · unloaded – Happens before application is unloaded. The following default.js file has been modified to capture each of these events and write a message to the Visual Studio JavaScript Console window: (function () { "use strict"; var app = WinJS.Application; WinJS.Application.onloaded = function (e) { console.log("Loaded"); }; WinJS.Application.onactivated = function (e) { console.log("Activated"); }; WinJS.Application.onready = function (e) { console.log("Ready"); } WinJS.Application.onunload = function (e) { console.log("Unload"); } app.start(); })(); When you execute the code above, a message is written to the Visual Studio JavaScript Console window when each event occurs with the exception of the Unload event (presumably because the console is not attached when that event is raised).   Handling Different Activation Contexts The code for the activated handler in the default.js file looks like this: app.onactivated = function (eventObject) { if (eventObject.detail.kind === Windows.ApplicationModel.Activation.ActivationKind.launch) { if (eventObject.detail.previousExecutionState !== Windows.ApplicationModel.Activation.ApplicationExecutionState.terminated) { // TODO: This application has been newly launched. Initialize // your application here. } else { // TODO: This application has been reactivated from suspension. // Restore application state here. } WinJS.UI.processAll(); } }; Notice that the code contains a conditional which checks the Kind of the event (the value of e.detail.kind). The startup code is executed only when the activated event is triggered by a Launch event, The ActivationKind enumeration has the following values: · launch · search · shareTarget · file · protocol · fileOpenPicker · fileSavePicker · cacheFileUpdater · contactPicker · device · printTaskSettings · cameraSettings Metro style applications can be activated in different contexts. For example, a camera application can be activated when modifying camera settings. In that case, the ActivationKind would be CameraSettings. Because we want to execute our JavaScript code when our application first launches, we verify that the kind of the activation event is an ActivationKind.Launch event. There is a second conditional within the activated event handler which checks whether an application is being newly launched or whether the application is being resumed from a suspended state. When running a Metro application with Visual Studio, you can use Visual Studio to simulate different application execution states by taking advantage of the Debug toolbar and the new Debug Location toolbar.  Handling the checkpoint Event The default.js file also includes an event handler for the WinJS.Application checkpoint event: app.oncheckpoint = function (eventObject) { // TODO: This application is about to be suspended. Save any state // that needs to persist across suspensions here. You might use the // WinJS.Application.sessionState object, which is automatically // saved and restored across suspension. If you need to complete an // asynchronous operation before your application is suspended, call // eventObject.setPromise(). }; The checkpoint event is raised when your Metro application goes into a suspended state. The idea is that you can save your application data when your application is suspended and reload your application data when your application resumes. Starting the Application The final statement in the default.js file is the statement that gets everything going: app.start(); Events are queued up in a JavaScript array named eventQueue . Until you call the start() method, the events in the queue are not processed. If you don’t call the start() method then the Loaded, Activated, Ready, and Unloaded events are never raised. Summary The goal of this blog entry was to describe the contents of the default.js file which is the JavaScript file which you use to kick off your custom code in a Windows Metro style application written with JavaScript. In this blog entry, I discussed the module pattern, JavaScript strict mode, handling first chance exceptions, WinJS Application events, and activation contexts.

    Read the article

  • Part 2&ndash;Load Testing In The Cloud

    - by Tarun Arora
    Welcome to Part 2, In Part 1 we discussed the advantages of creating a Test Rig in the cloud, the Azure edge and the Test Rig Topology we want to get to. In Part 2, Let’s start by understanding the components of Azure we’ll be making use of followed by manually putting them together to create the test rig, so… let’s get down dirty start setting up the Test Rig.  What Components of Azure will I be using for building the Test Rig in the Cloud? To run the Test Agents we’ll make use of Windows Azure Compute and to enable communication between Test Controller and Test Agents we’ll make use of Windows Azure Connect.  Azure Connect The Test Controller is on premise and the Test Agents are in the cloud (How will they talk?). To enable communication between the two, we’ll make use of Windows Azure Connect. With Windows Azure Connect, you can use a simple user interface to configure IPsec protected connections between computers or virtual machines (VMs) in your organization’s network, and roles running in Windows Azure. With this you can now join Windows Azure role instances to your domain, so that you can use your existing methods for domain authentication, name resolution, or other domain-wide maintenance actions. For more details refer to an overview of Windows Azure connect. A very useful video explaining everything you wanted to know about Windows Azure connect.  Azure Compute Windows Azure compute provides developers a platform to host and manage applications in Microsoft’s data centres across the globe. A Windows Azure application is built from one or more components called ‘roles.’ Roles come in three different types: Web role, Worker role, and Virtual Machine (VM) role, we’ll be using the Worker role to set up the Test Agents. A very nice blog post discussing the difference between the 3 role types. Developers are free to use the .NET framework or other software that runs on Windows with the Worker role or Web role. Developers can also create applications using languages such as PHP and Java. More on Windows Azure Compute. Each Windows Azure compute instance represents a virtual server... Virtual Machine Size CPU Cores Memory Cost Per Hour Extra Small Shared 768 MB $0.04 Small 1 1.75 GB $0.12 Medium 2 3.50 GB $0.24 Large 4 7.00 GB $0.48 Extra Large 8 14.00 GB $0.96   You might want to review the Windows Azure Pricing FAQ. Let’s Get Started building the Test Rig… Configuration Machine Role Comments VM – 1 Domain Controller for Playpit.com On Premise VM – 2 TFS, Test Controller On Premise VM – 3 Test Agent Cloud   In this blog post I would assume that you have the domain, Team Foundation Server and Test Controller Installed and set up already. If not, please refer to the TFS 2010 Installation Guide and this walkthrough on MSDN to set up your Test Controller. You can also download a preconfigured TFS 2010 VM from Brian Keller's blog, Brian also has some great hands on Labs on TFS 2010 that you may want to explore. I. Lets start building VM – 3: The Test Agent Download the Windows Azure SDK and Tools Open Visual Studio and create a new Windows Azure Project using the Cloud Template                   Choose the Worker Role for reasons explained in the earlier post         The WorkerRole.cs implements the Run() and OnStart() methods, no code changes required. You should be able to compile the project and run it in the compute emulator (The compute emulator should have been installed as part of the Windows Azure Toolkit) on your local machine.                   We will only be making changes to WindowsAzureProject, open ServiceDefinition.csdef. Ensure that the vmsize is small (remember the cost chart above). Import the “Connect” module. I am importing the Connect module because I need to join the Worker role VM to the Playpit domain. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect"/> </Imports> </WorkerRole> </ServiceDefinition> Go to the ServiceConfiguration.Cloud.cscfg and note that settings with key ‘Microsoft.WindowsAzure.Plugins.Connect.%%%%’ have been added to the configuration file. This is because you decided to import the connect module. See the config below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration>             Let’s go step by step and understand all the highlighted parameters and where you can find the values for them.       osFamily – By default this is set to 1 (Windows Server 2008 SP2). Change this to 2 if you want the Windows Server 2008 R2 operating system. The Advantage of using osFamily = “2” is that you get Powershell 2.0 rather than Powershell 1.0. In Powershell 2.0 you could simply use “powershell -ExecutionPolicy Unrestricted ./myscript.ps1” and it will work while in Powershell 1.0 you will have to change the registry key by including the following in your command file “reg add HKLM\Software\Microsoft\PowerShell\1\ShellIds\Microsoft.PowerShell /v ExecutionPolicy /d Unrestricted /f” before you can execute any power shell. The other reason you might want to move to os2 is if you wanted IIS 7.5.       Activation Token – To enable communication between the on premise machine and the Windows Azure Worker role VM both need to have the same token. Log on to Windows Azure Management Portal, click on Connect, click on Get Activation Token, this should give you the activation token, copy the activation token to the clipboard and paste it in the configuration file. Note – Later in the blog I’ll be showing you how to install connect on the on premise machine.                       EnableDomainJoin – Set the value to true, ofcourse we want to join the on windows azure worker role VM to the domain.       DomainFQDN, DomainControllerFQDN, DomainAccountName, DomainPassword, DomainOU, Administrators – This information is specific to your domain. I have extracted this information from the ‘service manager’ and ‘Active Directory Users and Computers’. Also, i created a new Domain-OU namely ‘CloudInstances’ so all my cloud instances joined to my domain show up here, this is optional. You can encrypt the DomainPassword – refer to the instructions here. Or hold fire, I’ll be covering that when i come to certificates and encryption in the coming section.       Now once you have filled all this information up, the configuration file should look something like below, <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration> Next we will be enabling the Remote Desktop module in to the ServiceDefinition.csdef, we could make changes manually or allow a beautiful wizard to help us make changes. I prefer the second option. So right click on the Windows Azure project and choose Publish       Now once you get the publish wizard, if you haven’t already you would be asked to import your Windows Azure subscription, this is simply the Msdn subscription activation key xml. Once you have done click Next to go to the Settings page and check ‘Enable Remote Desktop for all roles’.       As soon as you do that you get another pop up asking you the details for the user that you would be logging in with (make sure you enter a reasonable expiry date, you do not want the user account to expire today). Notice the more information tag at the bottom, click that to get access to the certificate section. See screen shot below.       From the drop down select the option to create a new certificate        In the pop up window enter the friendly name for your certificate. In my case I entered ‘WAC – Test Rig’ and click ok. This will create a new certificate for you. Click on the view button to see the certificate details. Do you see the Thumbprint, this is the value that will go in the config file (very important). Now click on the Copy to File button to copy the certificate, we will need to import the certificate to the windows Azure Management portal later. So, make sure you save it a safe location.                                Click Finish and enter details of the user you would like to create with permissions for remote desktop access, once you have entered the details on the ‘Remote desktop configuration’ screen click on Ok. From the Publish Windows Azure Wizard screen press Cancel. Cancel because we don’t want to publish the role just yet and Yes because we want to save all the changes in the config file.       Now if you go to the ServiceDefinition.csdef file you will see that the RemoteAccess and RemoteForwarder roles have been imported for you. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect" /> <Import moduleName="RemoteAccess" /> <Import moduleName="RemoteForwarder" /> </Imports> </WorkerRole> </ServiceDefinition> Now go to the ServiceConfiguration.Cloud.cscfg file and you see a whole bunch for setting “Microsoft.WindowsAzure.Plugins.RemoteAccess.%%%” values added for you. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" value="Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" value="MIIBnQYJKoZIhvcNAQcDoIIBjjCCAYoCAQAxggFOMIIBSgIBADAyMB4xHDAaBgNVBAMME1dpbmRvd 3MgQXp1cmUgVG9vbHMCEGa+B46voeO5T305N7TSG9QwDQYJKoZIhvcNAQEBBQAEggEABg4ol5Xol66Ip6QKLbAPWdmD4ae ADZ7aKj6fg4D+ATr0DXBllZHG5Umwf+84Sj2nsPeCyrg3ZDQuxrfhSbdnJwuChKV6ukXdGjX0hlowJu/4dfH4jTJC7sBWS AKaEFU7CxvqYEAL1Hf9VPL5fW6HZVmq1z+qmm4ecGKSTOJ20Fptb463wcXgR8CWGa+1w9xqJ7UmmfGeGeCHQ4QGW0IDSBU6ccg vzF2ug8/FY60K1vrWaCYOhKkxD3YBs8U9X/kOB0yQm2Git0d5tFlIPCBT2AC57bgsAYncXfHvPesI0qs7VZyghk8LVa9g5IqaM Cp6cQ7rmY/dLsKBMkDcdBHuCTAzBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECDRVifSXbA43gBApNrp40L1VTVZ1iGag+3O1" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" value="2012-11-27T23:59:59.0000000+00:00" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" value="true" /> </ConfigurationSettings> <Certificates> <Certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" thumbprint="AA23016CF0BDFC344400B5B82706B608B92E4217" thumbprintAlgorithm="sha1" /> </Certificates> </Role> </ServiceConfiguration>          Okay let’s look at them one at a time,       Enabled - Yes, we would like to enable Remote Access.       AccountUserName – This is the user name you entered while you were on the publish windows azure role screen, as detailed above.       AccountEncrytedPassword – Try and decode that, the certificate is used to encrypt the password you specified for the user account. Remember earlier i said, either use the instructions or wait and i’ll be showing you encryption, now the user account i am using for rdp has the same password as my domain password, so i can simply copy the value of the AccountEncryptedPassword to the DomainPassword as well.       AccountExpiration – This is the expiration as you specified in the wizard earlier, make sure your account does not expire today.       Remote Forwarder – Check out the documentation, below is how I understand it, -- One role in an application that implements a remote desktop connection must import the RemoteForwarder module. The two modules work together to enable the remote desktop connections to role instances. -- If you have multiple roles defined in the service model, it does not matter which role you add the RemoteForwarder module to, but you must add it to only one of the role definitions.       Certificate – Remember the certificate thumbprint from the wizard, the on premise machine and windows azure role machine that need to speak to each other must have the same thumbprint. More on that when we install Windows Azure connect Endpoints on the on premise machine. As i said earlier, in this blog post, I’ll be showing you the manual process so i won’t be scripting any star up tasks to install the test agent or register the test agent with the TFS Server. I’ll be showing you all this cool stuff in the next blog post, that’s because it’s important to understand the manual side of it, it becomes easier for you to troubleshoot in case something fails. Having said that, the changes we have made are sufficient to spin up the Windows Azure Worker Role aka Test Agent VM, have it connected with the play.pit.com domain and have remote access enabled on it. Before we deploy the Test Agent VM we need to set up Windows Azure Connect on the TFS Server. II. Windows Azure Connect: Setting up Connect on VM – 2 i.e. TFS & Test Controller Glad you made it so far, now to enable communication between the on premise TFS/Test Controller and Azure-ed Test Agent we need to enable communication. We have set up the Azure connect module in the Test Agent configuration, now the connect end points need to be enabled on the on premise machines, let’s have a look at how we can do this. Log on to VM – 2 running the TFS Server and Test Controller Log on to the Windows Azure Management Portal and click on Virtual Network Click on Virtual Network, if you already have a subscription you should see the below screen shot, if not, you would be asked to complete the subscription first        Click on Install Local Endpoints from the top left on the panel and you get a url appended with a token id in it, remember the token i showed you earlier, in theory the token you get here should match the token you added to the Test Agent config file.        Copy the url to the clip board and paste it in IE explorer (important, the installation at present only works out of IE and you need to have cookies enabled in order to complete the installation). As stated in the pop up, you can NOT download and run the software later, you need to run it as is, since it contains a token. Once the installation completes you should see the Windows Azure connect icon in the system tray.                         Right click the Azure Connect icon, choose Diagnostics and refer to this link for diagnostic detail terminology. NOTE – Unfortunately I could not see the Windows Azure connect icon in the system tray, a bit of binging with Google revealed that the azure connect icon is only shown when the ‘Windows Azure Connect Endpoint’ Service is started. So go to services.msc and make sure that the service is started, if not start it, unfortunately again, the service did not start for me on a manual start and i realised that one of the dependant services was disabled, you can look at the service dependencies and start them and then start windows azure connect. Bottom line, you need to start Windows Azure connect service before you can proceed. Please refer here on MSDN for more on Troubleshooting Windows Azure connect. (Follow the next step as well)   Now go back to the Windows Azure Management Portal and from Groups and Roles create a new group, lets call it ‘Test Rig’. Make sure you add the VM – 2 (the TFS Server VM where you just installed the endpoint).       Now if you go back to the Azure Connect icon in the system tray and click ‘Refresh Policy’ you will notice that the disconnected status of the icon should change to ready for connection. III. Importing Certificate in to Windows Azure Management Portal But before that you need to import the certificate you created in Step I in to the Windows Azure Management Portal. Log on to the Windows Azure Management Portal and click on ‘Hosted Services, Storage Accounts & CDN’ and then ‘Management Certificates’ followed by Add Certificates as shown in the screen shot below        Browse to the location where you saved the certificate earlier, remember… Refer to Step I in case you forgot.        Now you should be able to see the imported certificate here, make sure the thumbprint of the certificate matches the one you inserted in the config files        IV. Publish Windows Azure Worker Role aka Test Agent Having completed I, II and III, you are ready to publish the Test Agent VM – 3 to the cloud. Go to Visual Studio and right click the Windows Azure project and select Publish. Verify the infomration in the wizard, from the advanced settings tab, you can also enabled capture of intellitrace or profiling information.         Click Next and Click Publish! From the view menu bar select the Windows Azure Activity Log window.       Now you should be able to see the deployment progress in real time.             In the Windows Azure Management Portal, you should also be able to see the progress of creation of a new Worker Role.       Once the deployment is complete you should be able to RDP (go to run prompt type mstsc and in the pop up the machine name) in to the Test Agent Worker Role VM from the Playpit network using the domain admin user account. In case you are unable to log in to the Test Agent using the domain admin user account it means the process of joining the Test Agent to the domain has failed! But the good news is, because you imported the connect module, you can connect to the Test Agent machine using Windows Azure Management Portal and troubleshoot the reason for failure, you will be able to log in with the user name and password you specified in the config file for the keys ‘RemoteAccess.AccountUsername, RemoteAccess.EncryptedPassword (just that enter the password unencrypted)’, fix it or manually join the machine to the domain. Once you have managed to Join the Test Agent VM to the Domain move to the next step.      So, log in to the Test Agent Worker Role VM with the Playpit Domain Administrator and verify that you can log in, the machine is connected to the domain and the connect service is successfully running. If yes, give your self a pat on the back, you are 80% mission accomplished!         Go to the Windows Azure Management Portal and click on Virtual Network, click on Groups and Roles and click on Test Rig, click Edit Group, the edit the Test Rig group you created earlier. In the Connect to section, click on Add to select the worker role you have just deployed. Also, check the ‘Allow connections between endpoints in the group’ with this you will enable to communication between test controller and test agents and test agents/test agents. Click Save.      Now, you are ready to deploy the Test Agent software on the Worker Role Test Agent VM and configure it to work with the Test Controller. V. Configuring VM – 3: Installing Test Agent and Associating Test Agent to Controller Log in to the Worker Role Test Agent VM that you have just successfully deployed, make sure you log in with the domain administrator account. Download the All Agents software from MSDN, ‘en_visual_studio_agents_2010_x86_x64_dvd_509679.iso’, extract the iso and navigate to where you have extracted the iso. In my case, i have extracted the iso to “C:\Resources\Temp\VsAgentSetup”. Open the Test Agent folder and double click on setup.exe. Once you have installed the Test Agent you should reach the configuration window. If you face any issues installing TFS Test Agent on the VM, refer to the walkthrough on MSDN.       Once you have successfully installed the Test Agent software you will need to configure the test agent. Right click the test agent configuration tool and run as a different user. i.e. an Administrator. This is really to run the configuration wizard with elevated privileges (you might have UAC block something's otherwise).        In the run options, you can select ‘service’ you do not need to run the agent as interactive un less you are running coded UI tests. I have specified the domain administrator to connect to the TFS Test Controller. In real life, i would never do that, i would create a separate test user service account for this purpose. But for the blog post, we are using the most powerful user so that any policies or restrictions don’t block you.        Click the Apply Settings button and you should be all green! If not, the summary usually gives helpful error messages that you can resolve and proceed. As per my experience, you may run in to either a permission or a firewall blocking communication issue.        And now the moment of truth! Go to VM –2 open up Visual Studio and from the Test Menu select Manage Test Controller       Mission Accomplished! You should be able to see the Test Agent that you have just configured here,         VI. Creating and Running Load Tests on your brand new Azure-ed Test Rig I have various blog posts on Performance Testing with Visual Studio Ultimate, you can follow the links and videos below, Blog Posts: - Part 1 – Performance Testing using Visual Studio 2010 Ultimate - Part 2 – Performance Testing using Visual Studio 2010 Ultimate - Part 3 – Performance Testing using Visual Studio 2010 Ultimate Videos: - Test Tools Configuration & Settings in Visual Studio - Why & How to Record Web Performance Tests in Visual Studio Ultimate - Goal Driven Load Testing using Visual Studio Ultimate Now that you have created your load tests, there is one last change you need to make before you can run the tests on your Azure Test Rig, create a new Test settings file, and change the Test Execution method to ‘Remote Execution’ and select the test controller you have configured the Worker Role Test Agent against in our case VM – 2 So, go on, fire off a test run and see the results of the test being executed on the Azur-ed Test Rig. Review and What’s next? A quick recap of the benefits of running the Test Rig in the cloud and what i will be covering in the next blog post AND I would love to hear your feedback! Advantages Utilizing the power of Azure compute to run a heavy virtual user load. Benefiting from the Azure flexibility, destroy Test Agents when not in use, takes < 25 minutes to spin up a new Test Agent. Most important test Network Latency, (network latency and speed of connection are two different things – usually network latency is very hard to test), by placing the Test Agents in Microsoft Data centres around the globe, one can actually test the lag in transferring the bytes not because of a slow connection but because the page has been requested from the other side of the globe. Next Steps The process of spinning up the Test Agents in windows Azure is not 100% automated. I am working on the Worker process and power shell scripts to make the role deployment, unattended install of test agent software and registration of the test agent to the test controller automated. In the next blog post I will show you how to make the complete process unattended and automated. Remember to subscribe to http://feeds.feedburner.com/TarunArora. Hope you enjoyed this post, I would love to hear your feedback! If you have any recommendations on things that I should consider or any questions or feedback, feel free to leave a comment. See you in Part III.   Share this post : CodeProject

    Read the article

  • Problem rendering VBO

    - by Onno
    I'm developing a game engine using OpenTK. I'm trying to get to grips with the use of VBO's. I've run into some trouble because somehow it doesn't render correctly. Thus far I've used immediate mode to render a test object, a test cube with a texture. namespace SharpEngine.Utility.Mesh { using System; using System.Collections.Generic; using OpenTK; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL; using SharpEngine.Utility; using System.Drawing; public class ImmediateFaceBasedCube : IMesh { private IList<Face> faces = new List<Face>(); public ImmediateFaceBasedCube() { IList<Vector3> allVertices = new List<Vector3>(); //rechtsbovenvoor allVertices.Add(new Vector3(1.0f, 1.0f, 1.0f)); //0 //rechtsbovenachter allVertices.Add(new Vector3(1.0f, 1.0f, -1.0f)); //1 //linksbovenachter allVertices.Add(new Vector3(-1.0f, 1.0f, -1.0f)); //2 //linksbovenvoor allVertices.Add(new Vector3(-1.0f, 1.0f, 1.0f)); //3 //rechtsondervoor allVertices.Add(new Vector3(1.0f, -1.0f, 1.0f)); //4 //rechtsonderachter allVertices.Add(new Vector3(1.0f, -1.0f, -1.0f)); //5 //linksonderachter allVertices.Add(new Vector3(-1.0f, -1.0f, -1.0f)); //6 //linksondervoor allVertices.Add(new Vector3(-1.0f, -1.0f, 1.0f)); //7 IList<Vector2> textureCoordinates = new List<Vector2>(); textureCoordinates.Add(new Vector2(0, 0)); //AA - 0 textureCoordinates.Add(new Vector2(0, 0.3333333f)); //AB - 1 textureCoordinates.Add(new Vector2(0, 0.6666666f)); //AC - 2 textureCoordinates.Add(new Vector2(0, 1)); //AD - 3 textureCoordinates.Add(new Vector2(0.3333333f, 0)); //BA - 4 textureCoordinates.Add(new Vector2(0.3333333f, 0.3333333f)); //BB - 5 textureCoordinates.Add(new Vector2(0.3333333f, 0.6666666f)); //BC - 6 textureCoordinates.Add(new Vector2(0.3333333f, 1)); //BD - 7 textureCoordinates.Add(new Vector2(0.6666666f, 0)); //CA - 8 textureCoordinates.Add(new Vector2(0.6666666f, 0.3333333f)); //CB - 9 textureCoordinates.Add(new Vector2(0.6666666f, 0.6666666f)); //CC -10 textureCoordinates.Add(new Vector2(0.6666666f, 1)); //CD -11 textureCoordinates.Add(new Vector2(1, 0)); //DA -12 textureCoordinates.Add(new Vector2(1, 0.3333333f)); //DB -13 textureCoordinates.Add(new Vector2(1, 0.6666666f)); //DC -14 textureCoordinates.Add(new Vector2(1, 1)); //DD -15 Vector3 copy1 = new Vector3(-2.0f, -2.5f, -3.5f); IList<Vector3> normals = new List<Vector3>(); normals.Add(new Vector3(0, 1.0f, 0)); //0 normals.Add(new Vector3(0, 0, 1.0f)); //1 normals.Add(new Vector3(1.0f, 0, 0)); //2 normals.Add(new Vector3(0, 0, -1.0f)); //3 normals.Add(new Vector3(-1.0f, 0, 0)); //4 normals.Add(new Vector3(0, -1.0f, 0)); //5 //todo: move vertex normal and texture data to datastructure //todo: VBO based rendering //top face //1 IList<VertexData> verticesT1 = new List<VertexData>(); VertexData T1a = new VertexData(); T1a.Normal = normals[0]; T1a.TexCoord = textureCoordinates[5]; T1a.Position = allVertices[3]; verticesT1.Add(T1a); VertexData T1b = new VertexData(); T1b.Normal = normals[0]; T1b.TexCoord = textureCoordinates[9]; T1b.Position = allVertices[0]; verticesT1.Add(T1b); VertexData T1c = new VertexData(); T1c.Normal = normals[0]; T1c.TexCoord = textureCoordinates[10]; T1c.Position = allVertices[1]; verticesT1.Add(T1c); Face F1 = new Face(verticesT1); faces.Add(F1); //2 IList<VertexData> verticesT2 = new List<VertexData>(); VertexData T2a = new VertexData(); T2a.Normal = normals[0]; T2a.TexCoord = textureCoordinates[10]; T2a.Position = allVertices[1]; verticesT2.Add(T2a); VertexData T2b = new VertexData(); T2b.Normal = normals[0]; T2b.TexCoord = textureCoordinates[6]; T2b.Position = allVertices[2]; verticesT2.Add(T2b); VertexData T2c = new VertexData(); T2c.Normal = normals[0]; T2c.TexCoord = textureCoordinates[5]; T2c.Position = allVertices[3]; verticesT2.Add(T2c); Face F2 = new Face(verticesT2); faces.Add(F2); //front face //3 IList<VertexData> verticesT3 = new List<VertexData>(); VertexData T3a = new VertexData(); T3a.Normal = normals[1]; T3a.TexCoord = textureCoordinates[1]; T3a.Position = allVertices[3]; verticesT3.Add(T3a); VertexData T3b = new VertexData(); T3b.Normal = normals[1]; T3b.TexCoord = textureCoordinates[0]; T3b.Position = allVertices[7]; verticesT3.Add(T3b); VertexData T3c = new VertexData(); T3c.Normal = normals[1]; T3c.TexCoord = textureCoordinates[5]; T3c.Position = allVertices[0]; verticesT3.Add(T3c); Face F3 = new Face(verticesT3); faces.Add(F3); //4 IList<VertexData> verticesT4 = new List<VertexData>(); VertexData T4a = new VertexData(); T4a.Normal = normals[1]; T4a.TexCoord = textureCoordinates[5]; T4a.Position = allVertices[0]; verticesT4.Add(T4a); VertexData T4b = new VertexData(); T4b.Normal = normals[1]; T4b.TexCoord = textureCoordinates[0]; T4b.Position = allVertices[7]; verticesT4.Add(T4b); VertexData T4c = new VertexData(); T4c.Normal = normals[1]; T4c.TexCoord = textureCoordinates[4]; T4c.Position = allVertices[4]; verticesT4.Add(T4c); Face F4 = new Face(verticesT4); faces.Add(F4); //right face //5 IList<VertexData> verticesT5 = new List<VertexData>(); VertexData T5a = new VertexData(); T5a.Normal = normals[2]; T5a.TexCoord = textureCoordinates[2]; T5a.Position = allVertices[0]; verticesT5.Add(T5a); VertexData T5b = new VertexData(); T5b.Normal = normals[2]; T5b.TexCoord = textureCoordinates[1]; T5b.Position = allVertices[4]; verticesT5.Add(T5b); VertexData T5c = new VertexData(); T5c.Normal = normals[2]; T5c.TexCoord = textureCoordinates[6]; T5c.Position = allVertices[1]; verticesT5.Add(T5c); Face F5 = new Face(verticesT5); faces.Add(F5); //6 IList<VertexData> verticesT6 = new List<VertexData>(); VertexData T6a = new VertexData(); T6a.Normal = normals[2]; T6a.TexCoord = textureCoordinates[1]; T6a.Position = allVertices[4]; verticesT6.Add(T6a); VertexData T6b = new VertexData(); T6b.Normal = normals[2]; T6b.TexCoord = textureCoordinates[5]; T6b.Position = allVertices[5]; verticesT6.Add(T6b); VertexData T6c = new VertexData(); T6c.Normal = normals[2]; T6c.TexCoord = textureCoordinates[6]; T6c.Position = allVertices[1]; verticesT6.Add(T6c); Face F6 = new Face(verticesT6); faces.Add(F6); //back face //7 IList<VertexData> verticesT7 = new List<VertexData>(); VertexData T7a = new VertexData(); T7a.Normal = normals[3]; T7a.TexCoord = textureCoordinates[4]; T7a.Position = allVertices[5]; verticesT7.Add(T7a); VertexData T7b = new VertexData(); T7b.Normal = normals[3]; T7b.TexCoord = textureCoordinates[9]; T7b.Position = allVertices[2]; verticesT7.Add(T7b); VertexData T7c = new VertexData(); T7c.Normal = normals[3]; T7c.TexCoord = textureCoordinates[5]; T7c.Position = allVertices[1]; verticesT7.Add(T7c); Face F7 = new Face(verticesT7); faces.Add(F7); //8 IList<VertexData> verticesT8 = new List<VertexData>(); VertexData T8a = new VertexData(); T8a.Normal = normals[3]; T8a.TexCoord = textureCoordinates[9]; T8a.Position = allVertices[2]; verticesT8.Add(T8a); VertexData T8b = new VertexData(); T8b.Normal = normals[3]; T8b.TexCoord = textureCoordinates[4]; T8b.Position = allVertices[5]; verticesT8.Add(T8b); VertexData T8c = new VertexData(); T8c.Normal = normals[3]; T8c.TexCoord = textureCoordinates[8]; T8c.Position = allVertices[6]; verticesT8.Add(T8c); Face F8 = new Face(verticesT8); faces.Add(F8); //left face //9 IList<VertexData> verticesT9 = new List<VertexData>(); VertexData T9a = new VertexData(); T9a.Normal = normals[4]; T9a.TexCoord = textureCoordinates[8]; T9a.Position = allVertices[6]; verticesT9.Add(T9a); VertexData T9b = new VertexData(); T9b.Normal = normals[4]; T9b.TexCoord = textureCoordinates[13]; T9b.Position = allVertices[3]; verticesT9.Add(T9b); VertexData T9c = new VertexData(); T9c.Normal = normals[4]; T9c.TexCoord = textureCoordinates[9]; T9c.Position = allVertices[2]; verticesT9.Add(T9c); Face F9 = new Face(verticesT9); faces.Add(F9); //10 IList<VertexData> verticesT10 = new List<VertexData>(); VertexData T10a = new VertexData(); T10a.Normal = normals[4]; T10a.TexCoord = textureCoordinates[8]; T10a.Position = allVertices[6]; verticesT10.Add(T10a); VertexData T10b = new VertexData(); T10b.Normal = normals[4]; T10b.TexCoord = textureCoordinates[12]; T10b.Position = allVertices[7]; verticesT10.Add(T10b); VertexData T10c = new VertexData(); T10c.Normal = normals[4]; T10c.TexCoord = textureCoordinates[13]; T10c.Position = allVertices[3]; verticesT10.Add(T10c); Face F10 = new Face(verticesT10); faces.Add(F10); //bottom face //11 IList<VertexData> verticesT11 = new List<VertexData>(); VertexData T11a = new VertexData(); T11a.Normal = normals[5]; T11a.TexCoord = textureCoordinates[10]; T11a.Position = allVertices[7]; verticesT11.Add(T11a); VertexData T11b = new VertexData(); T11b.Normal = normals[5]; T11b.TexCoord = textureCoordinates[9]; T11b.Position = allVertices[6]; verticesT11.Add(T11b); VertexData T11c = new VertexData(); T11c.Normal = normals[5]; T11c.TexCoord = textureCoordinates[14]; T11c.Position = allVertices[4]; verticesT11.Add(T11c); Face F11 = new Face(verticesT11); faces.Add(F11); //12 IList<VertexData> verticesT12 = new List<VertexData>(); VertexData T12a = new VertexData(); T12a.Normal = normals[5]; T12a.TexCoord = textureCoordinates[13]; T12a.Position = allVertices[5]; verticesT12.Add(T12a); VertexData T12b = new VertexData(); T12b.Normal = normals[5]; T12b.TexCoord = textureCoordinates[14]; T12b.Position = allVertices[4]; verticesT12.Add(T12b); VertexData T12c = new VertexData(); T12c.Normal = normals[5]; T12c.TexCoord = textureCoordinates[9]; T12c.Position = allVertices[6]; verticesT12.Add(T12c); Face F12 = new Face(verticesT12); faces.Add(F12); } public void draw() { GL.Begin(BeginMode.Triangles); foreach (Face face in faces) { foreach (VertexData datapoint in face.verticesWithTexCoords) { GL.Normal3(datapoint.Normal); GL.TexCoord2(datapoint.TexCoord); GL.Vertex3(datapoint.Position); } } GL.End(); } } } Gets me this very nice picture: The immediate mode cube renders nicely and taught me a bit on how to use OpenGL, but VBO's are the way to go. Since I read on the OpenTK forums that OpenTK has problems doing VA's or DL's, I decided to skip using those. Now, I've tried to change this cube to a VBO by using the same vertex, normal and tc collections, and making float arrays from them by using the coordinates in combination with uint arrays which contain the index numbers from the immediate cube. (see the private functions at end of the code sample) Somehow this only renders two triangles namespace SharpEngine.Utility.Mesh { using System; using System.Collections.Generic; using OpenTK; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL; using SharpEngine.Utility; using System.Drawing; public class VBOFaceBasedCube : IMesh { private int VerticesVBOID; private int VerticesVBOStride; private int VertexCount; private int ELementBufferObjectID; private int textureCoordinateVBOID; private int textureCoordinateVBOStride; //private int textureCoordinateArraySize; private int normalVBOID; private int normalVBOStride; public VBOFaceBasedCube() { IList<Vector3> allVertices = new List<Vector3>(); //rechtsbovenvoor allVertices.Add(new Vector3(1.0f, 1.0f, 1.0f)); //0 //rechtsbovenachter allVertices.Add(new Vector3(1.0f, 1.0f, -1.0f)); //1 //linksbovenachter allVertices.Add(new Vector3(-1.0f, 1.0f, -1.0f)); //2 //linksbovenvoor allVertices.Add(new Vector3(-1.0f, 1.0f, 1.0f)); //3 //rechtsondervoor allVertices.Add(new Vector3(1.0f, -1.0f, 1.0f)); //4 //rechtsonderachter allVertices.Add(new Vector3(1.0f, -1.0f, -1.0f)); //5 //linksonderachter allVertices.Add(new Vector3(-1.0f, -1.0f, -1.0f)); //6 //linksondervoor allVertices.Add(new Vector3(-1.0f, -1.0f, 1.0f)); //7 IList<Vector2> textureCoordinates = new List<Vector2>(); textureCoordinates.Add(new Vector2(0, 0)); //AA - 0 textureCoordinates.Add(new Vector2(0, 0.3333333f)); //AB - 1 textureCoordinates.Add(new Vector2(0, 0.6666666f)); //AC - 2 textureCoordinates.Add(new Vector2(0, 1)); //AD - 3 textureCoordinates.Add(new Vector2(0.3333333f, 0)); //BA - 4 textureCoordinates.Add(new Vector2(0.3333333f, 0.3333333f)); //BB - 5 textureCoordinates.Add(new Vector2(0.3333333f, 0.6666666f)); //BC - 6 textureCoordinates.Add(new Vector2(0.3333333f, 1)); //BD - 7 textureCoordinates.Add(new Vector2(0.6666666f, 0)); //CA - 8 textureCoordinates.Add(new Vector2(0.6666666f, 0.3333333f)); //CB - 9 textureCoordinates.Add(new Vector2(0.6666666f, 0.6666666f)); //CC -10 textureCoordinates.Add(new Vector2(0.6666666f, 1)); //CD -11 textureCoordinates.Add(new Vector2(1, 0)); //DA -12 textureCoordinates.Add(new Vector2(1, 0.3333333f)); //DB -13 textureCoordinates.Add(new Vector2(1, 0.6666666f)); //DC -14 textureCoordinates.Add(new Vector2(1, 1)); //DD -15 Vector3 copy1 = new Vector3(-2.0f, -2.5f, -3.5f); IList<Vector3> normals = new List<Vector3>(); normals.Add(new Vector3(0, 1.0f, 0)); //0 normals.Add(new Vector3(0, 0, 1.0f)); //1 normals.Add(new Vector3(1.0f, 0, 0)); //2 normals.Add(new Vector3(0, 0, -1.0f)); //3 normals.Add(new Vector3(-1.0f, 0, 0)); //4 normals.Add(new Vector3(0, -1.0f, 0)); //5 //todo: VBO based rendering uint[] vertexElements = { 3,0,1, //01 1,2,3, //02 3,7,0, //03 0,7,4, //04 0,4,1, //05 4,5,1, //06 5,2,1, //07 2,5,6, //08 6,3,2, //09 6,7,5, //10 7,6,4, //11 5,4,6 //12 }; VertexCount = vertexElements.Length; IList<uint> vertexElementList = new List<uint>(vertexElements); uint[] normalElements = { 0,0,0, 0,0,0, 1,1,1, 1,1,1, 2,2,2, 2,2,2, 3,3,3, 3,3,3, 4,4,4, 4,4,4, 5,5,5, 5,5,5 }; IList<uint> normalElementList = new List<uint>(normalElements); uint[] textureIndexArray = { 5,9,10, 10,6,5, 1,0,5, 5,0,4, 2,1,6, 1,5,6, 4,9,5, 9,4,8, 8,13,9, 8,12,13, 10,9,14, 13,14,9 }; //textureCoordinateArraySize = textureIndexArray.Length; IList<uint> textureIndexList = new List<uint>(textureIndexArray); LoadVBO(allVertices, normals, textureCoordinates, vertexElements, normalElementList, textureIndexList); } public void draw() { //bind vertices //bind elements //bind normals //bind texture coordinates GL.EnableClientState(ArrayCap.VertexArray); GL.EnableClientState(ArrayCap.NormalArray); GL.EnableClientState(ArrayCap.TextureCoordArray); GL.BindBuffer(BufferTarget.ArrayBuffer, VerticesVBOID); GL.VertexPointer(3, VertexPointerType.Float, VerticesVBOStride, 0); GL.BindBuffer(BufferTarget.ArrayBuffer, normalVBOID); GL.NormalPointer(NormalPointerType.Float, normalVBOStride, 0); GL.BindBuffer(BufferTarget.ArrayBuffer, textureCoordinateVBOID); GL.TexCoordPointer(2, TexCoordPointerType.Float, textureCoordinateVBOStride, 0); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ELementBufferObjectID); GL.DrawElements(BeginMode.Polygon, VertexCount, DrawElementsType.UnsignedShort, 0); } //loads a static VBO void LoadVBO(IList<Vector3> vertices, IList<Vector3> normals, IList<Vector2> texcoords, uint[] elements, IList<uint> normalIndices, IList<uint> texCoordIndices) { int size; //todo // To create a VBO: // 1) Generate the buffer handles for the vertex and element buffers. // 2) Bind the vertex buffer handle and upload your vertex data. Check that the buffer was uploaded correctly. // 3) Bind the element buffer handle and upload your element data. Check that the buffer was uploaded correctly. float[] verticesArray = convertVector3fListToFloatArray(vertices); float[] normalsArray = createFloatArrayFromListOfVector3ElementsAndIndices(normals, normalIndices); float[] textureCoordinateArray = createFloatArrayFromListOfVector2ElementsAndIndices(texcoords, texCoordIndices); GL.GenBuffers(1, out VerticesVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, VerticesVBOID); Console.WriteLine("load 1 - vertices"); VerticesVBOStride = BlittableValueType.StrideOf(verticesArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(verticesArray.Length * sizeof(float)), verticesArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); if (verticesArray.Length * BlittableValueType.StrideOf(verticesArray) != size) { throw new ApplicationException("Vertex data not uploaded correctly"); } else { Console.WriteLine("load 1 finished ok"); size = 0; } Console.WriteLine("load 2 - elements"); GL.GenBuffers(1, out ELementBufferObjectID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ELementBufferObjectID); GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(elements.Length * sizeof(uint)), elements, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ElementArrayBuffer, BufferParameterName.BufferSize, out size); if (elements.Length * sizeof(uint) != size) { throw new ApplicationException("Element data not uploaded correctly"); } else { size = 0; Console.WriteLine("load 2 finished ok"); } GL.GenBuffers(1, out normalVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, normalVBOID); Console.WriteLine("load 3 - normals"); normalVBOStride = BlittableValueType.StrideOf(normalsArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(normalsArray.Length * sizeof(float)), normalsArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); Console.WriteLine("load 3 - pre check"); if (normalsArray.Length * BlittableValueType.StrideOf(normalsArray) != size) { throw new ApplicationException("Normal data not uploaded correctly"); } else { Console.WriteLine("load 3 finished ok"); size = 0; } GL.GenBuffers(1, out textureCoordinateVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, textureCoordinateVBOID); Console.WriteLine("load 4- texture coordinates"); textureCoordinateVBOStride = BlittableValueType.StrideOf(textureCoordinateArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(textureCoordinateArray.Length * textureCoordinateVBOStride), textureCoordinateArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); if (textureCoordinateArray.Length * BlittableValueType.StrideOf(textureCoordinateArray) != size) { throw new ApplicationException("texture coordinate data not uploaded correctly"); } else { Console.WriteLine("load 3 finished ok"); size = 0; } } //used to convert vertex arrayss for use with VBO's private float[] convertVector3fListToFloatArray(IList<Vector3> input) { int arrayElementCount = input.Count * 3; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (Vector3 v in input) { output[fillCount] = v.X; output[fillCount + 1] = v.Y; output[fillCount + 2] = v.Z; fillCount += 3; } return output; } //used for converting texture coordinate arrays for use with VBO's private float[] convertVector2List_to_floatArray(IList<Vector2> input) { int arrayElementCount = input.Count * 2; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (Vector2 v in input) { output[fillCount] = v.X; output[fillCount + 1] = v.Y; fillCount += 2; } return output; } //used to create an array of floats from private float[] createFloatArrayFromListOfVector3ElementsAndIndices(IList<Vector3> inputVectors, IList<uint> indices) { int arrayElementCount = inputVectors.Count * indices.Count * 3; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (int i in indices) { output[fillCount] = inputVectors[i].X; output[fillCount + 1] = inputVectors[i].Y; output[fillCount + 2] = inputVectors[i].Z; fillCount += 3; } return output; } private float[] createFloatArrayFromListOfVector2ElementsAndIndices(IList<Vector2> inputVectors, IList<uint> indices) { int arrayElementCount = inputVectors.Count * indices.Count * 2; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (int i in indices) { output[fillCount] = inputVectors[i].X; output[fillCount + 1] = inputVectors[i].Y; fillCount += 2; } return output; } } } This code will only render two triangles and they're nothing like I had in mind: I've done some searching. In some other questions I read that, if I did something wrong, I'd get no rendering at all. Clearly, something gets sent to the GFX card, but it might be that I'm not sending the right data. I've tried altering the sequence in which the triangles are rendered by swapping some of the index numbers in the vert, tc and normal index arrays, but this doesn't seem to be of any effect. I'm slightly lost here. What am I doing wrong here?

    Read the article

  • Have you really fixed that problem?

    - by DavidWimbush
    The day before yesterday I saw our main live server's CPU go up to constantly 100% with just the occasional short drop to a lower level. The exact opposite of what you'd want to see. We're log shipping every 15 minutes and part of that involves calling WinRAR to compress the log backups before copying them over. (We're on SQL2005 so there's no native compression and we have bandwidth issues with the connection to our remote site.) I realised the log shipping jobs were taking about 10 minutes and that most of that was spent shipping a 'live' reporting database that is completely rebuilt every 20 minutes. (I'm just trying to keep this stuff alive until I can improve it.) We can rebuild this database in minutes if we have to fail over so I disabled log shipping of that database. The log shipping went down to less than 2 minutes and I went off to the SQL Social evening in London feeling quite pleased with myself. It was a great evening - fun, educational and thought-provoking. Thanks to Simon Sabin & co for laying that on, and thanks too to the guests for making the effort when they must have been pretty worn out after doing DevWeek all day first. The next morning I came down to earth with a bump: CPU still at 100%. WTF? I looked in the activity monitor but it was confusing because some sessions have been running for a long time so it's not a good guide what's using the CPU now. I tried the standard reports showing queries by CPU (average and total) but they only show the top 10 so they just show my big overnight archiving and data cleaning stuff. But the Profiler showed it was four queries used by our new website usage tracking system. Four simple indexes later the CPU was back where it should be: about 20% with occasional short spikes. So the moral is: even when you're convinced you've found the cause and fixed the problem, you HAVE to go back and confirm that the problem has gone. And, yes, I have checked the CPU again today and it's still looking sweet.

    Read the article

  • Andengine put bullet to pull, when it leaves screen

    - by Ashot
    i'm creating a bullet with physics body. Bullet class (extends Sprite class) has die() method, which unregister physics connector, hide sprite and put it in pull public void die() { Log.d("bulletDie", "See you in hell!"); if (this.isVisible()) { this.setVisible(false); mPhysicsWorld.unregisterPhysicsConnector(physicsConnector); physicsConnector.setUpdatePosition(false); body.setActive(false); this.setIgnoreUpdate(true); bulletsPool.recyclePoolItem(this); } } in onUpdate method of PhysicsConnector i executes die method, when sprite leaves screen physicsConnector = new PhysicsConnector(this,body,true,false) { @Override public void onUpdate(final float pSecondsElapsed) { super.onUpdate(pSecondsElapsed); if (!camera.isRectangularShapeVisible(_bullet)) { Log.d("bulletDie","Dead?"); _bullet.die(); } } }; it works as i expected, but _bullet.die() executes TWICE. what i`m doing wrong and is it right way to hide sprites? here is full code of Bullet class (it is inner class of class that represents player) private class Bullet extends Sprite implements PhysicsConstants { private final Body body; private final PhysicsConnector physicsConnector; private final Bullet _bullet; private int id; public Bullet(float x, float y, ITextureRegion texture, VertexBufferObjectManager vertexBufferObjectManager) { super(x,y,texture,vertexBufferObjectManager); _bullet = this; id = bulletId++; body = PhysicsFactory.createCircleBody(mPhysicsWorld, this, BodyDef.BodyType.DynamicBody, bulletFixture); physicsConnector = new PhysicsConnector(this,body,true,false) { @Override public void onUpdate(final float pSecondsElapsed) { super.onUpdate(pSecondsElapsed); if (!camera.isRectangularShapeVisible(_bullet)) { Log.d("bulletDie","Dead?"); Log.d("bulletDie",id+""); _bullet.die(); } } }; mPhysicsWorld.registerPhysicsConnector(physicsConnector); $this.getParent().attachChild(this); } public void reset() { final float angle = canon.getRotation(); final float x = (float) ((Math.cos(MathUtils.degToRad(angle))*radius) + centerX) / PIXEL_TO_METER_RATIO_DEFAULT; final float y = (float) ((Math.sin(MathUtils.degToRad(angle))*radius) + centerY) / PIXEL_TO_METER_RATIO_DEFAULT; this.setVisible(true); this.setIgnoreUpdate(false); body.setActive(true); mPhysicsWorld.registerPhysicsConnector(physicsConnector); body.setTransform(new Vector2(x,y),0); } public Body getBody() { return body; } public void setLinearVelocity(Vector2 velocity) { body.setLinearVelocity(velocity); } public void die() { Log.d("bulletDie", "See you in hell!"); if (this.isVisible()) { this.setVisible(false); mPhysicsWorld.unregisterPhysicsConnector(physicsConnector); physicsConnector.setUpdatePosition(false); body.setActive(false); this.setIgnoreUpdate(true); bulletsPool.recyclePoolItem(this); } } }

    Read the article

  • Custom session: Window does not capture full screen area by default. 12.04

    - by juzerali
    I am trying to create a custom session by creating a custom.desktop file in /usr/share/xesessions folder. Remember this is not a gnome or some other session. I have created my own application for this session, which are simple. Case 1 Chrome Browser Contents of custom.desktop file [Desktop Entry] Name=Internet Kiosk Comment=This is an internet kiosk Exec=google-chrome --kiosk TryExec= Icon= Type=Application Issue Chrome browser starts in kiosk mode but does not capture complete screen area. Some area is left at the bottom and right side of the screen. Case 2 Custom pyGTK app (Quickly) Contents of custom.desktop file [Desktop Entry] Name=Custom Kiosk Comment=This is a custom kiosk Exec=~/MyCustomPyGTKApp TryExec= Icon= Type=Application Issue My custom pyGTK app has window.fullScreen() in the code. That means it should open in full screen without the window chrome (and it does under the normal session). But that too, leaves lots of space around it. Need Help Can anyone tell me whats going on here. I think its some issue with borders as pointed out at http://www.instructables.com/id/Setting-Up-Ubuntu-as-a-Kiosk-Web-Appliance/?ALLSTEPS in Step 8 If by chance, Google Chromium is not stretched to the edges with the --kiosk switch enabled there is a simple fix. To stretch Chromium simply log in as your regular user and edit chromeKiosk.sh to not have the --kiosk switch. Then log in as the restricted user, click the wrench and choose options. Then on the Personal Stuff tab select Hide system title bar and use compact borders. Close the options screen and stretch Chromium to fit the monitor. Then go back into the options window and set it to Use system title bar and borders. After this is done, log out of your restricted user (might need to just reboot) and log into your regular user. Edit chromeKiosk.sh back to include the --kiosk switch again and Chromium should be full screen next time you log into the restricted user. If I were to use a custom pyGTK or a gtkmm app, how should I get around this issue. window.fullScreen() should occupy the complete screen area. This has to be done programmatically or in some other way that can scale. I have to deploy this on large number of machines located at different geographical areas. Doing it manually on every machine is not possible.

    Read the article

  • How to Add a Taskbar to the Desktop in Ubuntu 14.04

    - by Lori Kaufman
    If you’ve switched to Ubuntu from Windows, it may take some time to get used to the new and different interface. However, you can easily incorporate a familiar Windows feature, the Taskbar, into Ubuntu to make the transition easier. A tool called Tint2 provides a bar at the bottom of the Ubuntu Desktop that resembles the Windows Taskbar. We will show you how to install it and make it start every time you log into Ubuntu. NOTE: When we say to type something in this article and there are quotes around the text, DO NOT type the quotes, unless we specify otherwise. Press Ctrl + Alt + T to open a Terminal window. To install Tint2, type the following line at the prompt and press Enter. sudo apt-get install tint2 Type your password at the prompt and press Enter. The progress of the installation displays and then a message displays saying how much disk space will be used. When asked if you want to continue, type a “y” and press Enter. When the installation has finished, close the Terminal window by typing “exit” at the prompt and pressing Enter. Click the Search button at the top of the Unity bar. Start typing “startup applications” in the Search box. Items that match what you type start displaying below the Search box. When the Startup Applications tool displays, click the icon to open it. On the Startup Applications Preferences window, click Add. On the Add Startup Program dialog box, enter a name for the startup application. This name displays in the list on the Startup Applications Preferences window. Type “tint2” in the Command edit box, enter a description in the Comment edit box, if desired, and click Add. Tint2 is added as a startup program and will start every time you log into Ubuntu. Click Close to close the Startup Applications Preferences window. Log out and log back in to make the Taskbar available on the desktop. You do not need to reboot the computer for this change to take effect. Now, when you minimize a program, an icon for it displays on the Taskbar at the bottom of the screen, just like the Taskbar in Windows. If you decide that you don’t want the Taskbar to display every time you log into Ubuntu, you can uncheck the Tint2 startup program on the Startup Applications Preferences window. You don’t need to delete it from the list.

    Read the article

  • 5.1 surround sound

    - by rocker9455
    Ok, So i've always had trouble with enabling 5.1 in ubuntu. Running 'alsamixer': I have: Master, Heaphones, PCM, Front, Front Mi, Front Mi, Surround, Center All are at 100% Card:HDA Intel Chip:Realtek ALC888 (This is my onboard sound, Its a dell studio, with 7.1 integrated sound) Running "speaker-test -c6 -twav" I only get the front 2 speakers (Right/Left) making any noise. The others make no noise at all. I have no other sound card to use as all my PCI slots are used up. Daemon.conf: ; daemonize = no ; fail = yes ; allow-module-loading = yes ; allow-exit = yes ; use-pid-file = yes ; system-instance = no ; enable-shm = yes ; shm-size-bytes = 0 # setting this 0 will use the system-default, usually 64 MiB ; lock-memory = no ; cpu-limit = no ; high-priority = yes ; nice-level = -11 ; realtime-scheduling = yes ; realtime-priority = 5 ; exit-idle-time = 20 ; scache-idle-time = 20 ; dl-search-path = (depends on architecture) ; load-default-script-file = yes ; default-script-file = ; log-target = auto ; log-level = notice ; log-meta = no ; log-time = no ; log-backtrace = 0 resample-method = speex-float-1 ; enable-remixing = yes ; enable-lfe-remixing = no flat-volumes = no ; rlimit-fsize = -1 ; rlimit-data = -1 ; rlimit-stack = -1 ; rlimit-core = -1 ; rlimit-as = -1 ; rlimit-rss = -1 ; rlimit-nproc = -1 ; rlimit-nofile = 256 ; rlimit-memlock = -1 ; rlimit-locks = -1 ; rlimit-sigpending = -1 ; rlimit-msgqueue = -1 ; rlimit-nice = 31 ; rlimit-rtprio = 9 ; rlimit-rttime = 1000000 ; default-sample-format = s16le ; default-sample-rate = 44100 ; default-sample-channels = 6 ; default-channel-map = front-left,front-right default-fragments = 8 default-fragment-size-msec = 10

    Read the article

  • Andengine. Put bullet to pool, when it leaves screen

    - by Ashot
    i'm creating a bullet with physics body. Bullet class (extends Sprite class) has die() method, which unregister physics connector, hide sprite and put it in pool public void die() { Log.d("bulletDie", "See you in hell!"); if (this.isVisible()) { this.setVisible(false); mPhysicsWorld.unregisterPhysicsConnector(physicsConnector); physicsConnector.setUpdatePosition(false); body.setActive(false); this.setIgnoreUpdate(true); bulletsPool.recyclePoolItem(this); } } in onUpdate method of PhysicsConnector i executes die method, when sprite leaves screen physicsConnector = new PhysicsConnector(this,body,true,false) { @Override public void onUpdate(final float pSecondsElapsed) { super.onUpdate(pSecondsElapsed); if (!camera.isRectangularShapeVisible(_bullet)) { Log.d("bulletDie","Dead?"); _bullet.die(); } } }; it works as i expected, but _bullet.die() executes TWICE. what i`m doing wrong and is it right way to hide sprites? here is full code of Bullet class (it is inner class of class that represents player) private class Bullet extends Sprite implements PhysicsConstants { private final Body body; private final PhysicsConnector physicsConnector; private final Bullet _bullet; private int id; public Bullet(float x, float y, ITextureRegion texture, VertexBufferObjectManager vertexBufferObjectManager) { super(x,y,texture,vertexBufferObjectManager); _bullet = this; id = bulletId++; body = PhysicsFactory.createCircleBody(mPhysicsWorld, this, BodyDef.BodyType.DynamicBody, bulletFixture); physicsConnector = new PhysicsConnector(this,body,true,false) { @Override public void onUpdate(final float pSecondsElapsed) { super.onUpdate(pSecondsElapsed); if (!camera.isRectangularShapeVisible(_bullet)) { Log.d("bulletDie","Dead?"); Log.d("bulletDie",id+""); _bullet.die(); } } }; mPhysicsWorld.registerPhysicsConnector(physicsConnector); $this.getParent().attachChild(this); } public void reset() { final float angle = canon.getRotation(); final float x = (float) ((Math.cos(MathUtils.degToRad(angle))*radius) + centerX) / PIXEL_TO_METER_RATIO_DEFAULT; final float y = (float) ((Math.sin(MathUtils.degToRad(angle))*radius) + centerY) / PIXEL_TO_METER_RATIO_DEFAULT; this.setVisible(true); this.setIgnoreUpdate(false); body.setActive(true); mPhysicsWorld.registerPhysicsConnector(physicsConnector); body.setTransform(new Vector2(x,y),0); } public Body getBody() { return body; } public void setLinearVelocity(Vector2 velocity) { body.setLinearVelocity(velocity); } public void die() { Log.d("bulletDie", "See you in hell!"); if (this.isVisible()) { this.setVisible(false); mPhysicsWorld.unregisterPhysicsConnector(physicsConnector); physicsConnector.setUpdatePosition(false); body.setActive(false); this.setIgnoreUpdate(true); bulletsPool.recyclePoolItem(this); } } }

    Read the article

< Previous Page | 150 151 152 153 154 155 156 157 158 159 160 161  | Next Page >