Search Results

Search found 16397 results on 656 pages for 'pass 2012'.

Page 654/656 | < Previous Page | 650 651 652 653 654 655 656  | Next Page >

  • C# ASP.NET AJAX CascadingDropDown Selected value propriety problem

    - by Eyla
    Greetings, I have a problem to use selected value propriety of CascadingDropDown. I have 3 asp dropdown controls with ajax CascadingDropDown for each one of them. I have no problem to bind data to the 3 CascadingDropDown but my problem is to rebind CascadingDropDown. simply what I want to do is to select a record from Gridview which has the selected values for the CascadingDropDown that I want to pass then rebind the CascadingDropDown with selected value. I'm posting my code down which include: 1-ASP.NET code. 2-Code behind to handle selected record from grid view. 3- web servisice that handle binding data to the 3 CascadingDropDown. please advice how to rebind data to CascadingDropDown with selected value. by the way I used selected value proprety as showning in my code but it is not working and there is no error. Thank you, ........................ ASP.NET code ........................ <%@ Page Title="" Language="C#" MasterPageFile="~/Master.Master" AutoEventWireup="true" CodeBehind="WebForm1.aspx.cs" Inherits="IMAM_APPLICATION.WebForm1" %> <%@ Register Assembly="AjaxControlToolkit" Namespace="AjaxControlToolkit" TagPrefix="cc1" %> <asp:Content ID="Content2" ContentPlaceHolderID="ContentPlaceHolder1" runat="server"> <asp:GridView ID="GridView1" runat="server" AutoGenerateColumns="False" DataKeyNames="idcontact_info" DataSourceID="ObjectDataSource1" onselectedindexchanged="GridView1_SelectedIndexChanged"> <Columns> <asp:CommandField ShowSelectButton="True" /> <asp:BoundField DataField="idcontact_info" HeaderText="idcontact_info" InsertVisible="False" ReadOnly="True" SortExpression="idcontact_info" /> <asp:BoundField DataField="Work_Field" HeaderText="Work_Field" SortExpression="Work_Field" /> <asp:BoundField DataField="Occupation" HeaderText="Occupation" SortExpression="Occupation" /> <asp:BoundField DataField="sub_Occupation" HeaderText="sub_Occupation" SortExpression="sub_Occupation" /> </Columns> </asp:GridView> <asp:Label ID="lbl" runat="server" Text="Label"></asp:Label> <asp:ObjectDataSource ID="ObjectDataSource1" runat="server" DeleteMethod="Delete" InsertMethod="Insert" OldValuesParameterFormatString="original_{0}" SelectMethod="GetData" TypeName="IMAM_APPLICATION.DSContactTableAdapters.contact_infoTableAdapter" UpdateMethod="Update"> <DeleteParameters> <asp:Parameter Name="Original_idcontact_info" Type="Int32" /> </DeleteParameters> <UpdateParameters> <asp:Parameter Name="Work_Field" Type="String" /> <asp:Parameter Name="Occupation" Type="String" /> <asp:Parameter Name="sub_Occupation" Type="String" /> <asp:Parameter Name="Original_idcontact_info" Type="Int32" /> </UpdateParameters> <InsertParameters> <asp:Parameter Name="Work_Field" Type="String" /> <asp:Parameter Name="Occupation" Type="String" /> <asp:Parameter Name="sub_Occupation" Type="String" /> </InsertParameters> </asp:ObjectDataSource> <asp:DropDownList ID="cmbWorkField" runat="server" Style="top: 715px; left: 180px; position: absolute; height: 22px; width: 126px"> </asp:DropDownList> <asp:DropDownList runat="server" ID="cmbOccupation" Style="top: 745px; left: 180px; position: absolute; height: 22px; width: 77px"> </asp:DropDownList> <asp:DropDownList ID="cmbSubOccup" runat="server" style="position:absolute; top: 775px; left: 180px;"> </asp:DropDownList> <cc1:CascadingDropDown ID="cmbWorkField_CascadingDropDown" runat="server" TargetControlID="cmbWorkField" Category="WorkField" LoadingText="Please Wait ..." PromptText="Select Wor kField ..." ServiceMethod="GetWorkField" ServicePath="ServiceTags.asmx"> </cc1:CascadingDropDown> <cc1:CascadingDropDown ID="cmbOccupation_CascadingDropDown" runat="server" TargetControlID="cmbOccupation" Category="Occup" LoadingText="Please wait..." PromptText="Select Occup ..." ServiceMethod="GetOccup" ServicePath="ServiceTags.asmx" ParentControlID="cmbWorkField"> </cc1:CascadingDropDown> <cc1:CascadingDropDown ID="cmbSubOccup_CascadingDropDown" runat="server" Category="SubOccup" Enabled="True" LoadingText="Please Wait..." ParentControlID="cmbOccupation" PromptText="Select Sub Occup" ServiceMethod="GetSubOccup" ServicePath="ServiceTags.asmx" TargetControlID="cmbSubOccup"> </cc1:CascadingDropDown> </asp:Content> ...................................................... C# code behind ...................................................... protected void GridView1_SelectedIndexChanged(object sender, EventArgs e) { string strg = GridView1.SelectedDataKey["idcontact_info"].ToString(); int index = Convert.ToInt32(GridView1.SelectedDataKey["idcontact_info"].ToString()); //txtSearch.Text = GridView1.SelectedIndex.ToString(); // txtSearch.Text = GridView1.SelectedDataKey["idcontact_info"].ToString(); DSContactTableAdapters.contact_infoTableAdapter GetByIDAdapter = new DSContactTableAdapters.contact_infoTableAdapter(); DSContact.contact_infoDataTable ByID = GetByIDAdapter.GetDataByID(index); //DSSearch.contact_infoDataTable FirstName = FirstNameAdapter.GetDataByFirstNameList(prefixText); foreach (DataRow dr in ByID.Rows) { lbl.Text = dr["Work_Field"].ToString() + "....." + dr["Occupation"].ToString() + "....." + dr["sub_Occupation"].ToString(); cmbWorkField_CascadingDropDown.SelectedValue = dr["Work_Field"].ToString(); cmbOccupation_CascadingDropDown.SelectedValue = dr["Occupation"].ToString(); cmbSubOccup_CascadingDropDown.SelectedValue = dr["sub_Occupation"].ToString(); } } ....................................................... web Service ....................................................... [WebMethod] public CascadingDropDownNameValue[] GetWorkField(string knownCategoryValues, string category) { //dsCarsTableAdapters.CarsTableAdapter makeAdapter = new dsCarsTableAdapters.CarsTableAdapter(); //dsCars.CarsDataTable makes = makeAdapter.GetAllCars(); DSContactTableAdapters.tag_work_fieldTableAdapter GetWorkFieldAdapter = new DSContactTableAdapters.tag_work_fieldTableAdapter(); DSContact.tag_work_fieldDataTable WorkFields = GetWorkFieldAdapter.GetDataByGetWorkField(); List<CascadingDropDownNameValue> values = new List<CascadingDropDownNameValue>(); foreach (DataRow dr in WorkFields) { string Work_Field = (string)dr["work_Field_name"]; int idtag_work_field = (int)dr["idtag_work_field"]; values.Add(new CascadingDropDownNameValue(Work_Field, idtag_work_field.ToString())); } return values.ToArray(); } [WebMethod] public CascadingDropDownNameValue[] GetOccup(string knownCategoryValues, string category) { StringDictionary kv = CascadingDropDown.ParseKnownCategoryValuesString(knownCategoryValues); int idtag_work_field; if (!kv.ContainsKey("WorkField") || !Int32.TryParse(kv["WorkField"], out idtag_work_field)) { return null; } //dsCarModelsTableAdapters.CarModelsTableAdapter modelAdapter = new dsCarModelsTableAdapters.CarModelsTableAdapter(); //dsCarModels.CarModelsDataTable models = modelAdapter.GetModelsByCarId(makeId); DSContactTableAdapters.tag_OccupTableAdapter GetOccupAdapter = new DSContactTableAdapters.tag_OccupTableAdapter(); DSContact.tag_OccupDataTable Occups = GetOccupAdapter.GetByOccup_ID(idtag_work_field); // List<CascadingDropDownNameValue> values = new List<CascadingDropDownNameValue>(); foreach (DataRow dr in Occups) { values.Add(new CascadingDropDownNameValue((string)dr["Occup_Name"], dr["idtag_Occup"].ToString())); } return values.ToArray(); } [WebMethod] public CascadingDropDownNameValue[] GetSubOccup(string knownCategoryValues, string category) { StringDictionary kv = CascadingDropDown.ParseKnownCategoryValuesString(knownCategoryValues); int idtag_Occup; if (!kv.ContainsKey("Occup") || !Int32.TryParse(kv["Occup"], out idtag_Occup)) { return null; } //dsModelColorsTableAdapters.ModelColorsTableAdapter adapter = new dsModelColorsTableAdapters.ModelColorsTableAdapter(); //dsModelColors.ModelColorsDataTable colors = adapter.GetColorsByModelId(colorId); DSContactTableAdapters.tag_Sub_OccupTableAdapter GetSubOccupAdapter = new DSContactTableAdapters.tag_Sub_OccupTableAdapter(); DSContact.tag_Sub_OccupDataTable SubOccups = GetSubOccupAdapter.GetDataBy_Sub_Occup_ID(idtag_Occup); List<CascadingDropDownNameValue> values = new List<CascadingDropDownNameValue>(); foreach (DataRow dr in SubOccups) { values.Add(new CascadingDropDownNameValue((string)dr["Sub_Occup_Name"], dr["idtag_Sub_Occup"].ToString())); } return values.ToArray(); }

    Read the article

  • A way of doing real-world test-driven development (and some thoughts about it)

    - by Thomas Weller
    Lately, I exchanged some arguments with Derick Bailey about some details of the red-green-refactor cycle of the Test-driven development process. In short, the issue revolved around the fact that it’s not enough to have a test red or green, but it’s also important to have it red or green for the right reasons. While for me, it’s sufficient to initially have a NotImplementedException in place, Derick argues that this is not totally correct (see these two posts: Red/Green/Refactor, For The Right Reasons and Red For The Right Reason: Fail By Assertion, Not By Anything Else). And he’s right. But on the other hand, I had no idea how his insights could have any practical consequence for my own individual interpretation of the red-green-refactor cycle (which is not really red-green-refactor, at least not in its pure sense, see the rest of this article). This made me think deeply for some days now. In the end I found out that the ‘right reason’ changes in my understanding depending on what development phase I’m in. To make this clear (at least I hope it becomes clear…) I started to describe my way of working in some detail, and then something strange happened: The scope of the article slightly shifted from focusing ‘only’ on the ‘right reason’ issue to something more general, which you might describe as something like  'Doing real-world TDD in .NET , with massive use of third-party add-ins’. This is because I feel that there is a more general statement about Test-driven development to make:  It’s high time to speak about the ‘How’ of TDD, not always only the ‘Why’. Much has been said about this, and me myself also contributed to that (see here: TDD is not about testing, it's about how we develop software). But always justifying what you do is very unsatisfying in the long run, it is inherently defensive, and it costs time and effort that could be used for better and more important things. And frankly: I’m somewhat sick and tired of repeating time and again that the test-driven way of software development is highly preferable for many reasons - I don’t want to spent my time exclusively on stating the obvious… So, again, let’s say it clearly: TDD is programming, and programming is TDD. Other ways of programming (code-first, sometimes called cowboy-coding) are exceptional and need justification. – I know that there are many people out there who will disagree with this radical statement, and I also know that it’s not a description of the real world but more of a mission statement or something. But nevertheless I’m absolutely sure that in some years this statement will be nothing but a platitude. Side note: Some parts of this post read as if I were paid by Jetbrains (the manufacturer of the ReSharper add-in – R#), but I swear I’m not. Rather I think that Visual Studio is just not production-complete without it, and I wouldn’t even consider to do professional work without having this add-in installed... The three parts of a software component Before I go into some details, I first should describe my understanding of what belongs to a software component (assembly, type, or method) during the production process (i.e. the coding phase). Roughly, I come up with the three parts shown below:   First, we need to have some initial sort of requirement. This can be a multi-page formal document, a vague idea in some programmer’s brain of what might be needed, or anything in between. In either way, there has to be some sort of requirement, be it explicit or not. – At the C# micro-level, the best way that I found to formulate that is to define interfaces for just about everything, even for internal classes, and to provide them with exhaustive xml comments. The next step then is to re-formulate these requirements in an executable form. This is specific to the respective programming language. - For C#/.NET, the Gallio framework (which includes MbUnit) in conjunction with the ReSharper add-in for Visual Studio is my toolset of choice. The third part then finally is the production code itself. It’s development is entirely driven by the requirements and their executable formulation. This is the delivery, the two other parts are ‘only’ there to make its production possible, to give it a decent quality and reliability, and to significantly reduce related costs down the maintenance timeline. So while the first two parts are not really relevant for the customer, they are very important for the developer. The customer (or in Scrum terms: the Product Owner) is not interested at all in how  the product is developed, he is only interested in the fact that it is developed as cost-effective as possible, and that it meets his functional and non-functional requirements. The rest is solely a matter of the developer’s craftsmanship, and this is what I want to talk about during the remainder of this article… An example To demonstrate my way of doing real-world TDD, I decided to show the development of a (very) simple Calculator component. The example is deliberately trivial and silly, as examples always are. I am totally aware of the fact that real life is never that simple, but I only want to show some development principles here… The requirement As already said above, I start with writing down some words on the initial requirement, and I normally use interfaces for that, even for internal classes - the typical question “intf or not” doesn’t even come to mind. I need them for my usual workflow and using them automatically produces high componentized and testable code anyway. To think about their usage in every single situation would slow down the production process unnecessarily. So this is what I begin with: namespace Calculator {     /// <summary>     /// Defines a very simple calculator component for demo purposes.     /// </summary>     public interface ICalculator     {         /// <summary>         /// Gets the result of the last successful operation.         /// </summary>         /// <value>The last result.</value>         /// <remarks>         /// Will be <see langword="null" /> before the first successful operation.         /// </remarks>         double? LastResult { get; }       } // interface ICalculator   } // namespace Calculator So, I’m not beginning with a test, but with a sort of code declaration - and still I insist on being 100% test-driven. There are three important things here: Starting this way gives me a method signature, which allows to use IntelliSense and AutoCompletion and thus eliminates the danger of typos - one of the most regular, annoying, time-consuming, and therefore expensive sources of error in the development process. In my understanding, the interface definition as a whole is more of a readable requirement document and technical documentation than anything else. So this is at least as much about documentation than about coding. The documentation must completely describe the behavior of the documented element. I normally use an IoC container or some sort of self-written provider-like model in my architecture. In either case, I need my components defined via service interfaces anyway. - I will use the LinFu IoC framework here, for no other reason as that is is very simple to use. The ‘Red’ (pt. 1)   First I create a folder for the project’s third-party libraries and put the LinFu.Core dll there. Then I set up a test project (via a Gallio project template), and add references to the Calculator project and the LinFu dll. Finally I’m ready to write the first test, which will look like the following: namespace Calculator.Test {     [TestFixture]     public class CalculatorTest     {         private readonly ServiceContainer container = new ServiceContainer();           [Test]         public void CalculatorLastResultIsInitiallyNull()         {             ICalculator calculator = container.GetService<ICalculator>();               Assert.IsNull(calculator.LastResult);         }       } // class CalculatorTest   } // namespace Calculator.Test       This is basically the executable formulation of what the interface definition states (part of). Side note: There’s one principle of TDD that is just plain wrong in my eyes: I’m talking about the Red is 'does not compile' thing. How could a compiler error ever be interpreted as a valid test outcome? I never understood that, it just makes no sense to me. (Or, in Derick’s terms: this reason is as wrong as a reason ever could be…) A compiler error tells me: Your code is incorrect, but nothing more.  Instead, the ‘Red’ part of the red-green-refactor cycle has a clearly defined meaning to me: It means that the test works as intended and fails only if its assumptions are not met for some reason. Back to our Calculator. When I execute the above test with R#, the Gallio plugin will give me this output: So this tells me that the test is red for the wrong reason: There’s no implementation that the IoC-container could load, of course. So let’s fix that. With R#, this is very easy: First, create an ICalculator - derived type:        Next, implement the interface members: And finally, move the new class to its own file: So far my ‘work’ was six mouse clicks long, the only thing that’s left to do manually here, is to add the Ioc-specific wiring-declaration and also to make the respective class non-public, which I regularly do to force my components to communicate exclusively via interfaces: This is what my Calculator class looks like as of now: using System; using LinFu.IoC.Configuration;   namespace Calculator {     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         public double? LastResult         {             get             {                 throw new NotImplementedException();             }         }     } } Back to the test fixture, we have to put our IoC container to work: [TestFixture] public class CalculatorTest {     #region Fields       private readonly ServiceContainer container = new ServiceContainer();       #endregion // Fields       #region Setup/TearDown       [FixtureSetUp]     public void FixtureSetUp()     {        container.LoadFrom(AppDomain.CurrentDomain.BaseDirectory, "Calculator.dll");     }       ... Because I have a R# live template defined for the setup/teardown method skeleton as well, the only manual coding here again is the IoC-specific stuff: two lines, not more… The ‘Red’ (pt. 2) Now, the execution of the above test gives the following result: This time, the test outcome tells me that the method under test is called. And this is the point, where Derick and I seem to have somewhat different views on the subject: Of course, the test still is worthless regarding the red/green outcome (or: it’s still red for the wrong reasons, in that it gives a false negative). But as far as I am concerned, I’m not really interested in the test outcome at this point of the red-green-refactor cycle. Rather, I only want to assert that my test actually calls the right method. If that’s the case, I will happily go on to the ‘Green’ part… The ‘Green’ Making the test green is quite trivial. Just make LastResult an automatic property:     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         public double? LastResult { get; private set; }     }         One more round… Now on to something slightly more demanding (cough…). Let’s state that our Calculator exposes an Add() method:         ...   /// <summary>         /// Adds the specified operands.         /// </summary>         /// <param name="operand1">The operand1.</param>         /// <param name="operand2">The operand2.</param>         /// <returns>The result of the additon.</returns>         /// <exception cref="ArgumentException">         /// Argument <paramref name="operand1"/> is &lt; 0.<br/>         /// -- or --<br/>         /// Argument <paramref name="operand2"/> is &lt; 0.         /// </exception>         double Add(double operand1, double operand2);       } // interface ICalculator A remark: I sometimes hear the complaint that xml comment stuff like the above is hard to read. That’s certainly true, but irrelevant to me, because I read xml code comments with the CR_Documentor tool window. And using that, it looks like this:   Apart from that, I’m heavily using xml code comments (see e.g. here for a detailed guide) because there is the possibility of automating help generation with nightly CI builds (using MS Sandcastle and the Sandcastle Help File Builder), and then publishing the results to some intranet location.  This way, a team always has first class, up-to-date technical documentation at hand about the current codebase. (And, also very important for speeding up things and avoiding typos: You have IntelliSense/AutoCompletion and R# support, and the comments are subject to compiler checking…).     Back to our Calculator again: Two more R# – clicks implement the Add() skeleton:         ...           public double Add(double operand1, double operand2)         {             throw new NotImplementedException();         }       } // class Calculator As we have stated in the interface definition (which actually serves as our requirement document!), the operands are not allowed to be negative. So let’s start implementing that. Here’s the test: [Test] [Row(-0.5, 2)] public void AddThrowsOnNegativeOperands(double operand1, double operand2) {     ICalculator calculator = container.GetService<ICalculator>();       Assert.Throws<ArgumentException>(() => calculator.Add(operand1, operand2)); } As you can see, I’m using a data-driven unit test method here, mainly for these two reasons: Because I know that I will have to do the same test for the second operand in a few seconds, I save myself from implementing another test method for this purpose. Rather, I only will have to add another Row attribute to the existing one. From the test report below, you can see that the argument values are explicitly printed out. This can be a valuable documentation feature even when everything is green: One can quickly review what values were tested exactly - the complete Gallio HTML-report (as it will be produced by the Continuous Integration runs) shows these values in a quite clear format (see below for an example). Back to our Calculator development again, this is what the test result tells us at the moment: So we’re red again, because there is not yet an implementation… Next we go on and implement the necessary parameter verification to become green again, and then we do the same thing for the second operand. To make a long story short, here’s the test and the method implementation at the end of the second cycle: // in CalculatorTest:   [Test] [Row(-0.5, 2)] [Row(295, -123)] public void AddThrowsOnNegativeOperands(double operand1, double operand2) {     ICalculator calculator = container.GetService<ICalculator>();       Assert.Throws<ArgumentException>(() => calculator.Add(operand1, operand2)); }   // in Calculator: public double Add(double operand1, double operand2) {     if (operand1 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand1");     }     if (operand2 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand2");     }     throw new NotImplementedException(); } So far, we have sheltered our method from unwanted input, and now we can safely operate on the parameters without further caring about their validity (this is my interpretation of the Fail Fast principle, which is regarded here in more detail). Now we can think about the method’s successful outcomes. First let’s write another test for that: [Test] [Row(1, 1, 2)] public void TestAdd(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Add(operand1, operand2);       Assert.AreEqual(expectedResult, result); } Again, I’m regularly using row based test methods for these kinds of unit tests. The above shown pattern proved to be extremely helpful for my development work, I call it the Defined-Input/Expected-Output test idiom: You define your input arguments together with the expected method result. There are two major benefits from that way of testing: In the course of refining a method, it’s very likely to come up with additional test cases. In our case, we might add tests for some edge cases like ‘one of the operands is zero’ or ‘the sum of the two operands causes an overflow’, or maybe there’s an external test protocol that has to be fulfilled (e.g. an ISO norm for medical software), and this results in the need of testing against additional values. In all these scenarios we only have to add another Row attribute to the test. Remember that the argument values are written to the test report, so as a side-effect this produces valuable documentation. (This can become especially important if the fulfillment of some sort of external requirements has to be proven). So your test method might look something like that in the end: [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 2)] [Row(0, 999999999, 999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, double.MaxValue)] [Row(4, double.MaxValue - 2.5, double.MaxValue)] public void TestAdd(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Add(operand1, operand2);       Assert.AreEqual(expectedResult, result); } And this will produce the following HTML report (with Gallio):   Not bad for the amount of work we invested in it, huh? - There might be scenarios where reports like that can be useful for demonstration purposes during a Scrum sprint review… The last requirement to fulfill is that the LastResult property is expected to store the result of the last operation. I don’t show this here, it’s trivial enough and brings nothing new… And finally: Refactor (for the right reasons) To demonstrate my way of going through the refactoring portion of the red-green-refactor cycle, I added another method to our Calculator component, namely Subtract(). Here’s the code (tests and production): // CalculatorTest.cs:   [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 0)] [Row(0, 999999999, -999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, -double.MaxValue)] [Row(4, double.MaxValue - 2.5, -double.MaxValue)] public void TestSubtract(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Subtract(operand1, operand2);       Assert.AreEqual(expectedResult, result); }   [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 0)] [Row(0, 999999999, -999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, -double.MaxValue)] [Row(4, double.MaxValue - 2.5, -double.MaxValue)] public void TestSubtractGivesExpectedLastResult(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       calculator.Subtract(operand1, operand2);       Assert.AreEqual(expectedResult, calculator.LastResult); }   ...   // ICalculator.cs: /// <summary> /// Subtracts the specified operands. /// </summary> /// <param name="operand1">The operand1.</param> /// <param name="operand2">The operand2.</param> /// <returns>The result of the subtraction.</returns> /// <exception cref="ArgumentException"> /// Argument <paramref name="operand1"/> is &lt; 0.<br/> /// -- or --<br/> /// Argument <paramref name="operand2"/> is &lt; 0. /// </exception> double Subtract(double operand1, double operand2);   ...   // Calculator.cs:   public double Subtract(double operand1, double operand2) {     if (operand1 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand1");     }       if (operand2 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand2");     }       return (this.LastResult = operand1 - operand2).Value; }   Obviously, the argument validation stuff that was produced during the red-green part of our cycle duplicates the code from the previous Add() method. So, to avoid code duplication and minimize the number of code lines of the production code, we do an Extract Method refactoring. One more time, this is only a matter of a few mouse clicks (and giving the new method a name) with R#: Having done that, our production code finally looks like that: using System; using LinFu.IoC.Configuration;   namespace Calculator {     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         #region ICalculator           public double? LastResult { get; private set; }           public double Add(double operand1, double operand2)         {             ThrowIfOneOperandIsInvalid(operand1, operand2);               return (this.LastResult = operand1 + operand2).Value;         }           public double Subtract(double operand1, double operand2)         {             ThrowIfOneOperandIsInvalid(operand1, operand2);               return (this.LastResult = operand1 - operand2).Value;         }           #endregion // ICalculator           #region Implementation (Helper)           private static void ThrowIfOneOperandIsInvalid(double operand1, double operand2)         {             if (operand1 < 0.0)             {                 throw new ArgumentException("Value must not be negative.", "operand1");             }               if (operand2 < 0.0)             {                 throw new ArgumentException("Value must not be negative.", "operand2");             }         }           #endregion // Implementation (Helper)       } // class Calculator   } // namespace Calculator But is the above worth the effort at all? It’s obviously trivial and not very impressive. All our tests were green (for the right reasons), and refactoring the code did not change anything. It’s not immediately clear how this refactoring work adds value to the project. Derick puts it like this: STOP! Hold on a second… before you go any further and before you even think about refactoring what you just wrote to make your test pass, you need to understand something: if your done with your requirements after making the test green, you are not required to refactor the code. I know… I’m speaking heresy, here. Toss me to the wolves, I’ve gone over to the dark side! Seriously, though… if your test is passing for the right reasons, and you do not need to write any test or any more code for you class at this point, what value does refactoring add? Derick immediately answers his own question: So why should you follow the refactor portion of red/green/refactor? When you have added code that makes the system less readable, less understandable, less expressive of the domain or concern’s intentions, less architecturally sound, less DRY, etc, then you should refactor it. I couldn’t state it more precise. From my personal perspective, I’d add the following: You have to keep in mind that real-world software systems are usually quite large and there are dozens or even hundreds of occasions where micro-refactorings like the above can be applied. It’s the sum of them all that counts. And to have a good overall quality of the system (e.g. in terms of the Code Duplication Percentage metric) you have to be pedantic on the individual, seemingly trivial cases. My job regularly requires the reading and understanding of ‘foreign’ code. So code quality/readability really makes a HUGE difference for me – sometimes it can be even the difference between project success and failure… Conclusions The above described development process emerged over the years, and there were mainly two things that guided its evolution (you might call it eternal principles, personal beliefs, or anything in between): Test-driven development is the normal, natural way of writing software, code-first is exceptional. So ‘doing TDD or not’ is not a question. And good, stable code can only reliably be produced by doing TDD (yes, I know: many will strongly disagree here again, but I’ve never seen high-quality code – and high-quality code is code that stood the test of time and causes low maintenance costs – that was produced code-first…) It’s the production code that pays our bills in the end. (Though I have seen customers these days who demand an acceptance test battery as part of the final delivery. Things seem to go into the right direction…). The test code serves ‘only’ to make the production code work. But it’s the number of delivered features which solely counts at the end of the day - no matter how much test code you wrote or how good it is. With these two things in mind, I tried to optimize my coding process for coding speed – or, in business terms: productivity - without sacrificing the principles of TDD (more than I’d do either way…).  As a result, I consider a ratio of about 3-5/1 for test code vs. production code as normal and desirable. In other words: roughly 60-80% of my code is test code (This might sound heavy, but that is mainly due to the fact that software development standards only begin to evolve. The entire software development profession is very young, historically seen; only at the very beginning, and there are no viable standards yet. If you think about software development as a kind of casting process, where the test code is the mold and the resulting production code is the final product, then the above ratio sounds no longer extraordinary…) Although the above might look like very much unnecessary work at first sight, it’s not. With the aid of the mentioned add-ins, doing all the above is a matter of minutes, sometimes seconds (while writing this post took hours and days…). The most important thing is to have the right tools at hand. Slow developer machines or the lack of a tool or something like that - for ‘saving’ a few 100 bucks -  is just not acceptable and a very bad decision in business terms (though I quite some times have seen and heard that…). Production of high-quality products needs the usage of high-quality tools. This is a platitude that every craftsman knows… The here described round-trip will take me about five to ten minutes in my real-world development practice. I guess it’s about 30% more time compared to developing the ‘traditional’ (code-first) way. But the so manufactured ‘product’ is of much higher quality and massively reduces maintenance costs, which is by far the single biggest cost factor, as I showed in this previous post: It's the maintenance, stupid! (or: Something is rotten in developerland.). In the end, this is a highly cost-effective way of software development… But on the other hand, there clearly is a trade-off here: coding speed vs. code quality/later maintenance costs. The here described development method might be a perfect fit for the overwhelming majority of software projects, but there certainly are some scenarios where it’s not - e.g. if time-to-market is crucial for a software project. So this is a business decision in the end. It’s just that you have to know what you’re doing and what consequences this might have… Some last words First, I’d like to thank Derick Bailey again. His two aforementioned posts (which I strongly recommend for reading) inspired me to think deeply about my own personal way of doing TDD and to clarify my thoughts about it. I wouldn’t have done that without this inspiration. I really enjoy that kind of discussions… I agree with him in all respects. But I don’t know (yet?) how to bring his insights into the described production process without slowing things down. The above described method proved to be very “good enough” in my practical experience. But of course, I’m open to suggestions here… My rationale for now is: If the test is initially red during the red-green-refactor cycle, the ‘right reason’ is: it actually calls the right method, but this method is not yet operational. Later on, when the cycle is finished and the tests become part of the regular, automated Continuous Integration process, ‘red’ certainly must occur for the ‘right reason’: in this phase, ‘red’ MUST mean nothing but an unfulfilled assertion - Fail By Assertion, Not By Anything Else!

    Read the article

  • Understanding and Implementing a Force based graph layout algorithm

    - by zcourts
    I'm trying to implement a force base graph layout algorithm, based on http://en.wikipedia.org/wiki/Force-based_algorithms_(graph_drawing) My first attempt didn't work so I looked at http://blog.ivank.net/force-based-graph-drawing-in-javascript.html and https://github.com/dhotson/springy I changed my implementation based on what I thought I understood from those two but I haven't managed to get it right and I'm hoping someone can help? JavaScript isn't my strong point so be gentle... If you're wondering why write my own. In reality I have no real reason to write my own I'm just trying to understand how the algorithm is implemented. Especially in my first link, that demo is brilliant. This is what I've come up with //support function.bind - https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Function/bind#Compatibility if (!Function.prototype.bind) { Function.prototype.bind = function (oThis) { if (typeof this !== "function") { // closest thing possible to the ECMAScript 5 internal IsCallable function throw new TypeError("Function.prototype.bind - what is trying to be bound is not callable"); } var aArgs = Array.prototype.slice.call(arguments, 1), fToBind = this, fNOP = function () {}, fBound = function () { return fToBind.apply(this instanceof fNOP ? this : oThis || window, aArgs.concat(Array.prototype.slice.call(arguments))); }; fNOP.prototype = this.prototype; fBound.prototype = new fNOP(); return fBound; }; } (function() { var lastTime = 0; var vendors = ['ms', 'moz', 'webkit', 'o']; for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame']; window.cancelAnimationFrame = window[vendors[x]+'CancelAnimationFrame'] || window[vendors[x]+'CancelRequestAnimationFrame']; } if (!window.requestAnimationFrame) window.requestAnimationFrame = function(callback, element) { var currTime = new Date().getTime(); var timeToCall = Math.max(0, 16 - (currTime - lastTime)); var id = window.setTimeout(function() { callback(currTime + timeToCall); }, timeToCall); lastTime = currTime + timeToCall; return id; }; if (!window.cancelAnimationFrame) window.cancelAnimationFrame = function(id) { clearTimeout(id); }; }()); function Graph(o){ this.options=o; this.vertices={}; this.edges={};//form {vertexID:{edgeID:edge}} } /** *Adds an edge to the graph. If the verticies in this edge are not already in the *graph then they are added */ Graph.prototype.addEdge=function(e){ //if vertex1 and vertex2 doesn't exist in this.vertices add them if(typeof(this.vertices[e.vertex1])==='undefined') this.vertices[e.vertex1]=new Vertex(e.vertex1); if(typeof(this.vertices[e.vertex2])==='undefined') this.vertices[e.vertex2]=new Vertex(e.vertex2); //add the edge if(typeof(this.edges[e.vertex1])==='undefined') this.edges[e.vertex1]={}; this.edges[e.vertex1][e.id]=e; } /** * Add a vertex to the graph. If a vertex with the same ID already exists then * the existing vertex's .data property is replaced with the @param v.data */ Graph.prototype.addVertex=function(v){ if(typeof(this.vertices[v.id])==='undefined') this.vertices[v.id]=v; else this.vertices[v.id].data=v.data; } function Vertex(id,data){ this.id=id; this.data=data?data:{}; //initialize to data.[x|y|z] or generate random number for each this.x = this.data.x?this.data.x:-100 + Math.random()*200; this.y = this.data.y?this.data.y:-100 + Math.random()*200; this.z = this.data.y?this.data.y:-100 + Math.random()*200; //set initial velocity to 0 this.velocity = new Point(0, 0, 0); this.mass=this.data.mass?this.data.mass:Math.random(); this.force=new Point(0,0,0); } function Edge(vertex1ID,vertex2ID){ vertex1ID=vertex1ID?vertex1ID:Math.random() vertex2ID=vertex2ID?vertex2ID:Math.random() this.id=vertex1ID+"->"+vertex2ID; this.vertex1=vertex1ID; this.vertex2=vertex2ID; } function Point(x, y, z) { this.x = x; this.y = y; this.z = z; } Point.prototype.plus=function(p){ this.x +=p.x this.y +=p.y this.z +=p.z } function ForceLayout(o){ this.repulsion = o.repulsion?o.repulsion:200; this.attraction = o.attraction?o.attraction:0.06; this.damping = o.damping?o.damping:0.9; this.graph = o.graph?o.graph:new Graph(); this.total_kinetic_energy =0; this.animationID=-1; } ForceLayout.prototype.draw=function(){ //vertex velocities initialized to (0,0,0) when a vertex is created //vertex positions initialized to random position when created cc=0; do{ this.total_kinetic_energy =0; //for each vertex for(var i in this.graph.vertices){ var thisNode=this.graph.vertices[i]; // running sum of total force on this particular node var netForce=new Point(0,0,0) //for each other node for(var j in this.graph.vertices){ if(thisNode!=this.graph.vertices[j]){ //net-force := net-force + Coulomb_repulsion( this_node, other_node ) netForce.plus(this.CoulombRepulsion( thisNode,this.graph.vertices[j])) } } //for each spring connected to this node for(var k in this.graph.edges[thisNode.id]){ //(this node, node its connected to) //pass id of this node and the node its connected to so hookesattraction //can update the force on both vertices and return that force to be //added to the net force this.HookesAttraction(thisNode.id, this.graph.edges[thisNode.id][k].vertex2 ) } // without damping, it moves forever // this_node.velocity := (this_node.velocity + timestep * net-force) * damping thisNode.velocity.x=(thisNode.velocity.x+thisNode.force.x)*this.damping; thisNode.velocity.y=(thisNode.velocity.y+thisNode.force.y)*this.damping; thisNode.velocity.z=(thisNode.velocity.z+thisNode.force.z)*this.damping; //this_node.position := this_node.position + timestep * this_node.velocity thisNode.x=thisNode.velocity.x; thisNode.y=thisNode.velocity.y; thisNode.z=thisNode.velocity.z; //normalize x,y,z??? //total_kinetic_energy := total_kinetic_energy + this_node.mass * (this_node.velocity)^2 this.total_kinetic_energy +=thisNode.mass*((thisNode.velocity.x+thisNode.velocity.y+thisNode.velocity.z)* (thisNode.velocity.x+thisNode.velocity.y+thisNode.velocity.z)) } cc+=1; }while(this.total_kinetic_energy >0.5) console.log(cc,this.total_kinetic_energy,this.graph) this.cancelAnimation(); } ForceLayout.prototype.HookesAttraction=function(v1ID,v2ID){ var a=this.graph.vertices[v1ID] var b=this.graph.vertices[v2ID] var force=new Point(this.attraction*(b.x - a.x),this.attraction*(b.y - a.y),this.attraction*(b.z - a.z)) // hook's attraction a.force.x += force.x; a.force.y += force.y; a.force.z += force.z; b.force.x += this.attraction*(a.x - b.x); b.force.y += this.attraction*(a.y - b.y); b.force.z += this.attraction*(a.z - b.z); return force; } ForceLayout.prototype.CoulombRepulsion=function(vertex1,vertex2){ //http://en.wikipedia.org/wiki/Coulomb's_law // distance squared = ((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)) + ((z1-z2)*(z1-z2)) var distanceSquared = ( (vertex1.x-vertex2.x)*(vertex1.x-vertex2.x)+ (vertex1.y-vertex2.y)*(vertex1.y-vertex2.y)+ (vertex1.z-vertex2.z)*(vertex1.z-vertex2.z) ); if(distanceSquared==0) distanceSquared = 0.001; var coul = this.repulsion / distanceSquared; return new Point(coul * (vertex1.x-vertex2.x),coul * (vertex1.y-vertex2.y), coul * (vertex1.z-vertex2.z)); } ForceLayout.prototype.animate=function(){ if(this.animating) this.animationID=requestAnimationFrame(this.animate.bind(this)); this.draw(); } ForceLayout.prototype.cancelAnimation=function(){ cancelAnimationFrame(this.animationID); this.animating=false; } ForceLayout.prototype.redraw=function(){ this.animating=true; this.animate(); } $(document).ready(function(){ var g= new Graph(); for(var i=0;i<=100;i++){ var v1=new Vertex(Math.random(), {}) var v2=new Vertex(Math.random(), {}) var e1= new Edge(v1.id,v2.id); g.addEdge(e1); } console.log(g); var l=new ForceLayout({ graph:g }); l.redraw(); });

    Read the article

  • actionscript3: reflect-class applied on rotationY

    - by algro
    Hi, I'm using a class which applies a visual reflection-effect to defined movieclips. I use a reflection-class from here: link to source. It works like a charm except when I apply a rotation to the movieclip. In my case the reflection is still visible but only a part of it. What am I doing wrong? How could I pass/include the rotation to the Reflection-Class ? Thanks in advance! This is how you apply the Reflection Class to your movieclip: var ref_mc:MovieClip = new MoviClip(); addChild(ref_mc); var r1:Reflect = new Reflect({mc:ref_mc, alpha:50, ratio:50,distance:0, updateTime:0,reflectionDropoff:1}); Now I apply a rotation to my movieclip: ref_mc.rotationY = 30; And Here the Reflect-Class: package com.pixelfumes.reflect{ import flash.display.MovieClip; import flash.display.DisplayObject; import flash.display.BitmapData; import flash.display.Bitmap; import flash.geom.Matrix; import flash.display.GradientType; import flash.display.SpreadMethod; import flash.utils.setInterval; import flash.utils.clearInterval; public class Reflect extends MovieClip{ //Created By Ben Pritchard of Pixelfumes 2007 //Thanks to Mim, Jasper, Jason Merrill and all the others who //have contributed to the improvement of this class //static var for the version of this class private static var VERSION:String = "4.0"; //reference to the movie clip we are reflecting private var mc:MovieClip; //the BitmapData object that will hold a visual copy of the mc private var mcBMP:BitmapData; //the BitmapData object that will hold the reflected image private var reflectionBMP:Bitmap; //the clip that will act as out gradient mask private var gradientMask_mc:MovieClip; //how often the reflection should update (if it is video or animated) private var updateInt:Number; //the size the reflection is allowed to reflect within private var bounds:Object; //the distance the reflection is vertically from the mc private var distance:Number = 0; function Reflect(args:Object){ /*the args object passes in the following variables /we set the values of our internal vars to math the args*/ //the clip being reflected mc = args.mc; //the alpha level of the reflection clip var alpha:Number = args.alpha/100; //the ratio opaque color used in the gradient mask var ratio:Number = args.ratio; //update time interval var updateTime:Number = args.updateTime; //the distance at which the reflection visually drops off at var reflectionDropoff:Number = args.reflectionDropoff; //the distance the reflection starts from the bottom of the mc var distance:Number = args.distance; //store width and height of the clip var mcHeight = mc.height; var mcWidth = mc.width; //store the bounds of the reflection bounds = new Object(); bounds.width = mcWidth; bounds.height = mcHeight; //create the BitmapData that will hold a snapshot of the movie clip mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); //create the BitmapData the will hold the reflection reflectionBMP = new Bitmap(mcBMP); //flip the reflection upside down reflectionBMP.scaleY = -1; //move the reflection to the bottom of the movie clip reflectionBMP.y = (bounds.height*2) + distance; //add the reflection to the movie clip's Display Stack var reflectionBMPRef:DisplayObject = mc.addChild(reflectionBMP); reflectionBMPRef.name = "reflectionBMP"; //add a blank movie clip to hold our gradient mask var gradientMaskRef:DisplayObject = mc.addChild(new MovieClip()); gradientMaskRef.name = "gradientMask_mc"; //get a reference to the movie clip - cast the DisplayObject that is returned as a MovieClip gradientMask_mc = mc.getChildByName("gradientMask_mc") as MovieClip; //set the values for the gradient fill var fillType:String = GradientType.LINEAR; var colors:Array = [0xFFFFFF, 0xFFFFFF]; var alphas:Array = [alpha, 0]; var ratios:Array = [0, ratio]; var spreadMethod:String = SpreadMethod.PAD; //create the Matrix and create the gradient box var matr:Matrix = new Matrix(); //set the height of the Matrix used for the gradient mask var matrixHeight:Number; if (reflectionDropoff<=0) { matrixHeight = bounds.height; } else { matrixHeight = bounds.height/reflectionDropoff; } matr.createGradientBox(bounds.width, matrixHeight, (90/180)*Math.PI, 0, 0); //create the gradient fill gradientMask_mc.graphics.beginGradientFill(fillType, colors, alphas, ratios, matr, spreadMethod); gradientMask_mc.graphics.drawRect(0,0,bounds.width,bounds.height); //position the mask over the reflection clip gradientMask_mc.y = mc.getChildByName("reflectionBMP").y - mc.getChildByName("reflectionBMP").height; //cache clip as a bitmap so that the gradient mask will function gradientMask_mc.cacheAsBitmap = true; mc.getChildByName("reflectionBMP").cacheAsBitmap = true; //set the mask for the reflection as the gradient mask mc.getChildByName("reflectionBMP").mask = gradientMask_mc; //if we are updating the reflection for a video or animation do so here if(updateTime > -1){ updateInt = setInterval(update, updateTime, mc); } } public function setBounds(w:Number,h:Number):void{ //allows the user to set the area that the reflection is allowed //this is useful for clips that move within themselves bounds.width = w; bounds.height = h; gradientMask_mc.width = bounds.width; redrawBMP(mc); } public function redrawBMP(mc:MovieClip):void { // redraws the bitmap reflection - Mim Gamiet [2006] mcBMP.dispose(); mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); } private function update(mc):void { //updates the reflection to visually match the movie clip mcBMP = new BitmapData(bounds.width, bounds.height, true, 0xFFFFFF); mcBMP.draw(mc); reflectionBMP.bitmapData = mcBMP; } public function destroy():void{ //provides a method to remove the reflection mc.removeChild(mc.getChildByName("reflectionBMP")); reflectionBMP = null; mcBMP.dispose(); clearInterval(updateInt); mc.removeChild(mc.getChildByName("gradientMask_mc")); } } }

    Read the article

  • Why won't Javascript assembled Iframe load in IE6 over HTTPS although it will over HTTP?

    - by Lauren
    The issue: The iframe won't load inside the tags on the review and submit page here: https://checkout.netsuite.com/s.nl/c.659197/sc.4/category.confirm/.f Login:[email protected] pass:test03 To produce problem: - Where it says "Your Third Party Shipper Numbers (To enter one, click here.)", click "here" to see the form that won't load in IE6. It seems to load in every other modern browser. The same form works fine on this page (you have to click on the "order sample" button to see the link to the same form): http://www.avaline.com/R3000_3 Here's the HTML: <div style="border-color: rgb(255, 221, 221);" id="itmSampl"> <div id="placeshipnum" style="display: none;"></div> <div id="sampAdd"> <strong>Your Third Party Shipper Numbers</strong> (To enter one, click <a rel="nofollow" href="javascript:;" onclick="enterShipNum()">here</a>.) <ul style="list-style: none outside none; padding-left: 20px;"> <li><span class="bold">UPS #</span>: 333333</li> <li><span class="bold">FedEx #</span>: 777888999</li> </ul> </div> </div> Upon clicking the "to enter one, click here" link this is the iframe HTML in all browsers except IE6 (in IE6, the "shipnum" div element is assembled, but that's it): <div id="placeshipnum" style="display: block;"> <div id="shipnum" style="background: none repeat scroll 0% 0% rgb(255, 255, 255);"> <div class="wrapper-x"> <a title="close window" class="linkfooter" href="javascript:;" onclick="enterShipNum()"> <img height="11" width="11" alt="close window" src="/c.659197/site/av-template/x-image-browser.gif"> </a> </div> <iframe scrolling="no" height="240" frameborder="0" width="190" src="https://forms.netsuite.com/app/site/crm/externalleadpage.nl?compid=659197&amp;formid=56&amp;h=9b260d2f9bca0fd9c300&amp;[email protected]&amp;firstname=Test&amp;lastname=Account&amp;ck=Q1BnzaRXAe_RfjhE&amp;vid=Q1BnzaRXAd3Rfik7&amp;cktime=87919&amp;cart=5257&amp;promocode=SAMPLE&amp;chrole=1014&amp;cjsid=0a0102621f435ef0d0d4b3cd49ab8b2db4e253c671eb" allowtransparency="true" border="0" onload="hideShipLoadImg()" style="display: block;"></iframe></div></div> This is the relevant Javascript: // Allow for shipper number update var shipNumDisplay=0; function enterShipNum() { if (shipNumDisplay == 0){ //odrSampl(); document.getElementById('placeshipnum').style.display="block"; document.getElementById('placeshipnum').innerHTML='<div id="shipnum"><div class="wrapper-x"> <a onclick="enterShipNum()" href="javascript:;" class="linkfooter" title="close window"> <img height="11" width="11" src="/c.659197/site/av-template/x-image-browser.gif" alt="close window" /> </a> </div><iframe onload="hideShipLoadImg()" scrolling="no" height="240" frameborder="0" width="190" border="0" allowtransparency="true" src="https://forms.netsuite.com/app/site/crm/externalleadpage.nl?compid=659197&formid=56&h=9b260d2f9bca0fd9c300&[email protected]&firstname=Test&lastname=Account&ck=Q1BnzaRXAe_RfjhE&vid=Q1BnzaRXAd3Rfik7&cktime=87919&cart=5257&promocode=SAMPLE&chrole=1014&cjsid=0a0102621f435ef0d0d4b3cd49ab8b2db4e253c671eb"></iframe></div>'; shipNumDisplay=1; } else { document.getElementById('placeshipnum').style.display="none"; document.getElementById('shipnum').parentNode.removeChild(document.getElementById('shipnum')); shipNumDisplay=0; } } function hideShipLoadImg(){ var shipiframe= document.getElementById('shipnum').getElementsByTagName('iframe')[0]; shipiframe.style.display = 'block'; shipiframe.parentNode.style.background = '#fff'; } This is most of the form inside the iframe although I don't think it's relevant: <form style="margin: 0pt;" onsubmit="return ( window.isinited &amp;&amp; window.isvalid &amp;&amp; save_record( true ) )" action="/app/site/crm/externalleadpage.nl" enctype="multipart/form-data" method="POST" name="main_form" id="main_form"> <div class="field name"> <label for="firstname">First Name <span class="required">*</span></label> <span class="input" id="firstname_fs"><span class="input" id="firstname_val">Test</span></span><input type="hidden" id="firstname" name="firstname" value="Test" onchange="nlapiFieldChanged(null,'firstname');"> </div> <div class="field name"> <label for="lastname">Last Name <span class="required">*</span></label> <span class="input" id="lastname_fs"><span class="input" id="lastname_val">Account</span></span><input type="hidden" id="lastname" name="lastname" value="Account" onchange="nlapiFieldChanged(null,'lastname');"> </div> <div id="ups" class="field"> <label for="custentity4">UPS # </label> <span id="custentity4_fs" style="white-space: nowrap;"><input type="text" id="custentity4" onblur="if (this.checkvalid == true) {this.isvalid=(validate_field(this,'text',false,false) &amp;&amp; nlapiValidateField(null,'custentity4'));} if (this.isvalid == false) { selectAndFocusField(this); return this.isvalid;}" name="custentity4" size="25" onfocus="if (this.isvalid == true || this.isvalid == false) this.checkvalid=true;" onchange="setWindowChanged(window, true);this.isvalid=(validate_field(this,'text',true,false) &amp;&amp; nlapiValidateField(null,'custentity4'));this.checkvalid=false;if (this.isvalid) {nlapiFieldChanged(null,'custentity4');;}if (this.isvalid) this.isvalid=validate_textfield_maxlen(this,6,true,true);if (!this.isvalid) { selectAndFocusField(this);}return this.isvalid;" class="input" maxlength="6"></span> </div> <div id="fedex" class="field"> <label for="custentity9">FedEx # </label> <span id="custentity9_fs" style="white-space: nowrap;"><input type="text" id="custentity9" onblur="if (this.checkvalid == true) {this.isvalid=(validate_field(this,'text',false,false) &amp;&amp; nlapiValidateField(null,'custentity9'));} if (this.isvalid == false) { selectAndFocusField(this); return this.isvalid;}" name="custentity9" size="25" onfocus="if (this.isvalid == true || this.isvalid == false) this.checkvalid=true;" onchange="setWindowChanged(window, true);this.isvalid=(validate_field(this,'text',true,false) &amp;&amp; nlapiValidateField(null,'custentity9'));this.checkvalid=false;if (this.isvalid) {nlapiFieldChanged(null,'custentity9');;}if (this.isvalid) this.isvalid=validate_textfield_maxlen(this,9,true,true);if (!this.isvalid) { selectAndFocusField(this);}return this.isvalid;" class="input" maxlength="9"></span> </div> <div class="field hidden"><input type="hidden" id="email" name="email" value="[email protected]"></div> <div class="field"><label class="submit" for="submitbutton"><span class="required">*</span> Indicates required fields.</label></div> <input type="submit" id="submitbutton" value="submit"> <!-- REQUIRED HIDDEN FIELDS FOR HTML ONLINE FORM --> <input type="hidden" value="659197" name="compid"><input type="hidden" value="56" name="formid"><input type="hidden" value="" name="id"><input type="hidden" value="9b260d2f9bca0fd9c300" name="h"><input type="hidden" value="-1" name="rectype"><input type="hidden" value="" name="nlapiPI"><input type="hidden" value="" name="nlapiSR"><input type="hidden" value="ShipValidateField" name="nlapiVF"><input type="hidden" value="" name="nlapiFC"><input type="hidden" value="/app/site/crm/externalleadpage.nl?compid=659197&amp;formid=56&amp;h=9b260d2f9bca0fd9c300&amp;[email protected]&amp;firstname=Test&amp;lastname=Account&amp;ck=Q1BnzaRXAe_RfjhE&amp;vid=Q1BnzaRXAd3Rfik7&amp;cktime=87919&amp;cart=5257&amp;promocode=SAMPLE&amp;chrole=1014&amp;cjsid=0a0102621f435ef0d0d4b3cd49ab8b2db4e253c671eb" name="whence"><input type="hidden" name="submitted"> <iframe height="0" style="visibility: hidden;" name="server_commands" id="server_commands" src="javascript:false"></iframe> <!-- END OF REQUIRED HIDDEN FIELDS FOR HTML ONLINE FORM --> </form>

    Read the article

  • How can I add headers to DualList control wpf

    - by devnet247
    Hi all I am trying to write a Dual List usercontrol in wpf. I am new to wpf and I am finding it quite difficult. This is something I have put together in a couple of hours.It's not that good but a start. I would be extremely grateful if somebody with wpf experience could improve it. The aim is to simplify the usage as much as possible I am kind of stuck. I would like the user of the DualList Control to be able to set up headers how do you do that. Do I need to expose some dependency properties in my control? At the moment when loading the user has to pass a ObservableCollection is there a better way? Could you have a look and possibly make any suggestions with some code? Thanks a lot!!!!! xaml <Grid ShowGridLines="False"> <Grid.ColumnDefinitions> <ColumnDefinition Width="*"/> <ColumnDefinition Width="25px"></ColumnDefinition> <ColumnDefinition Width="*"/> </Grid.ColumnDefinitions> <Grid.RowDefinitions> <RowDefinition Height="*"></RowDefinition> </Grid.RowDefinitions> <StackPanel Orientation="Vertical" Grid.Column="0" Grid.Row="0"> <Label Name="lblLeftTitle" Content="Available"></Label> <ListView Name="lvwLeft"> </ListView> </StackPanel> <WrapPanel Grid.Column="1" Grid.Row="0"> <Button Name="btnMoveRight" Content=">" Width="25" Margin="0,35,0,0" Click="btnMoveRight_Click" /> <Button Name="btnMoveAllRight" Content=">>" Width="25" Margin="0,05,0,0" Click="btnMoveAllRight_Click" /> <Button Name="btnMoveLeft" Content="&lt;" Width="25" Margin="0,25,0,0" Click="btnMoveLeft_Click" /> <Button Name="btnMoveAllLeft" Content="&lt;&lt;" Width="25" Margin="0,05,0,0" Click="btnMoveAllLeft_Click" /> </WrapPanel> <StackPanel Orientation="Vertical" Grid.Column="2" Grid.Row="0"> <Label Name="lblRightTitle" Content="Selected"></Label> <ListView Name="lvwRight"> </ListView> </StackPanel> </Grid> Client public partial class DualListTest { public ObservableCollection<ListViewItem> LeftList { get; set; } public ObservableCollection<ListViewItem> RightList { get; set; } public DualListTest() { InitializeComponent(); LoadCustomers(); LoadDualList(); } private void LoadDualList() { dualList1.Load(LeftList, RightList); } private void LoadCustomers() { //Pretend we are getting a list of Customers from a repository. //Some go in the left List(Good Customers) some go in the Right List(Bad Customers). LeftList = new ObservableCollection<ListViewItem>(); RightList = new ObservableCollection<ListViewItem>(); var customers = GetCustomers(); foreach (var customer in customers) { if (customer.Status == CustomerStatus.Good) { LeftList.Add(new ListViewItem { Content = customer }); } else { RightList.Add(new ListViewItem{Content=customer }); } } } private static IEnumerable<Customer> GetCustomers() { return new List<Customer> { new Customer {Name = "Jo Blogg", Status = CustomerStatus.Good}, new Customer {Name = "Rob Smith", Status = CustomerStatus.Good}, new Customer {Name = "Michel Platini", Status = CustomerStatus.Good}, new Customer {Name = "Roberto Baggio", Status = CustomerStatus.Good}, new Customer {Name = "Gio Surname", Status = CustomerStatus.Bad}, new Customer {Name = "Diego Maradona", Status = CustomerStatus.Bad} }; } } UserControl public partial class DualList:UserControl { public ObservableCollection<ListViewItem> LeftListCollection { get; set; } public ObservableCollection<ListViewItem> RightListCollection { get; set; } public DualList() { InitializeComponent(); } public void Load(ObservableCollection<ListViewItem> leftListCollection, ObservableCollection<ListViewItem> rightListCollection) { LeftListCollection = leftListCollection; RightListCollection = rightListCollection; lvwLeft.ItemsSource = leftListCollection; lvwRight.ItemsSource = rightListCollection; EnableButtons(); } public static DependencyProperty LeftTitleProperty = DependencyProperty.Register("LeftTitle", typeof(string), typeof(Label)); public static DependencyProperty RightTitleProperty = DependencyProperty.Register("RightTitle", typeof(string), typeof(Label)); public static DependencyProperty LeftListProperty = DependencyProperty.Register("LeftList", typeof(ListView), typeof(DualList)); public static DependencyProperty RightListProperty = DependencyProperty.Register("RightList", typeof(ListView), typeof(DualList)); public string LeftTitle { get { return (string)lblLeftTitle.Content; } set { lblLeftTitle.Content = value; } } public string RightTitle { get { return (string)lblRightTitle.Content; } set { lblRightTitle.Content = value; } } public ListView LeftList { get { return lvwLeft; } set { lvwLeft = value; } } public ListView RightList { get { return lvwRight; } set { lvwRight = value; } } private void EnableButtons() { if (lvwLeft.Items.Count > 0) { btnMoveRight.IsEnabled = true; btnMoveAllRight.IsEnabled = true; } else { btnMoveRight.IsEnabled = false; btnMoveAllRight.IsEnabled = false; } if (lvwRight.Items.Count > 0) { btnMoveLeft.IsEnabled = true; btnMoveAllLeft.IsEnabled = true; } else { btnMoveLeft.IsEnabled = false; btnMoveAllLeft.IsEnabled = false; } if (lvwLeft.Items.Count != 0 || lvwRight.Items.Count != 0) return; btnMoveLeft.IsEnabled = false; btnMoveAllLeft.IsEnabled = false; btnMoveRight.IsEnabled = false; btnMoveAllRight.IsEnabled = false; } private void MoveRight() { while (lvwLeft.SelectedItems.Count > 0) { var selectedItem = (ListViewItem)lvwLeft.SelectedItem; LeftListCollection.Remove(selectedItem); RightListCollection.Add(selectedItem); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveAllRight() { while (lvwLeft.Items.Count > 0) { var item = (ListViewItem)lvwLeft.Items[lvwLeft.Items.Count - 1]; LeftListCollection.Remove(item); RightListCollection.Add(item); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveAllLeft() { while (lvwRight.Items.Count > 0) { var item = (ListViewItem)lvwRight.Items[lvwRight.Items.Count - 1]; RightListCollection.Remove(item); LeftListCollection.Add(item); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveLeft() { while (lvwRight.SelectedItems.Count > 0) { var selectedCustomer = (ListViewItem)lvwRight.SelectedItem; LeftListCollection.Add(selectedCustomer); RightListCollection.Remove(selectedCustomer); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void btnMoveLeft_Click(object sender, RoutedEventArgs e) { MoveLeft(); } private void btnMoveAllLeft_Click(object sender, RoutedEventArgs e) { MoveAllLeft(); } private void btnMoveRight_Click(object sender, RoutedEventArgs e) { MoveRight(); } private void btnMoveAllRight_Click(object sender, RoutedEventArgs e) { MoveAllRight(); } }

    Read the article

  • 256 Windows Azure Worker Roles, Windows Kinect and a 90's Text-Based Ray-Tracer

    - by Alan Smith
    For a couple of years I have been demoing a simple render farm hosted in Windows Azure using worker roles and the Azure Storage service. At the start of the presentation I deploy an Azure application that uses 16 worker roles to render a 1,500 frame 3D ray-traced animation. At the end of the presentation, when the animation was complete, I would play the animation delete the Azure deployment. The standing joke with the audience was that it was that it was a “$2 demo”, as the compute charges for running the 16 instances for an hour was $1.92, factor in the bandwidth charges and it’s a couple of dollars. The point of the demo is that it highlights one of the great benefits of cloud computing, you pay for what you use, and if you need massive compute power for a short period of time using Windows Azure can work out very cost effective. The “$2 demo” was great for presenting at user groups and conferences in that it could be deployed to Azure, used to render an animation, and then removed in a one hour session. I have always had the idea of doing something a bit more impressive with the demo, and scaling it from a “$2 demo” to a “$30 demo”. The challenge was to create a visually appealing animation in high definition format and keep the demo time down to one hour.  This article will take a run through how I achieved this. Ray Tracing Ray tracing, a technique for generating high quality photorealistic images, gained popularity in the 90’s with companies like Pixar creating feature length computer animations, and also the emergence of shareware text-based ray tracers that could run on a home PC. In order to render a ray traced image, the ray of light that would pass from the view point must be tracked until it intersects with an object. At the intersection, the color, reflectiveness, transparency, and refractive index of the object are used to calculate if the ray will be reflected or refracted. Each pixel may require thousands of calculations to determine what color it will be in the rendered image. Pin-Board Toys Having very little artistic talent and a basic understanding of maths I decided to focus on an animation that could be modeled fairly easily and would look visually impressive. I’ve always liked the pin-board desktop toys that become popular in the 80’s and when I was working as a 3D animator back in the 90’s I always had the idea of creating a 3D ray-traced animation of a pin-board, but never found the energy to do it. Even if I had a go at it, the render time to produce an animation that would look respectable on a 486 would have been measured in months. PolyRay Back in 1995 I landed my first real job, after spending three years being a beach-ski-climbing-paragliding-bum, and was employed to create 3D ray-traced animations for a CD-ROM that school kids would use to learn physics. I had got into the strange and wonderful world of text-based ray tracing, and was using a shareware ray-tracer called PolyRay. PolyRay takes a text file describing a scene as input and, after a few hours processing on a 486, produced a high quality ray-traced image. The following is an example of a basic PolyRay scene file. background Midnight_Blue   static define matte surface { ambient 0.1 diffuse 0.7 } define matte_white texture { matte { color white } } define matte_black texture { matte { color dark_slate_gray } } define position_cylindrical 3 define lookup_sawtooth 1 define light_wood <0.6, 0.24, 0.1> define median_wood <0.3, 0.12, 0.03> define dark_wood <0.05, 0.01, 0.005>     define wooden texture { noise surface { ambient 0.2  diffuse 0.7  specular white, 0.5 microfacet Reitz 10 position_fn position_cylindrical position_scale 1  lookup_fn lookup_sawtooth octaves 1 turbulence 1 color_map( [0.0, 0.2, light_wood, light_wood] [0.2, 0.3, light_wood, median_wood] [0.3, 0.4, median_wood, light_wood] [0.4, 0.7, light_wood, light_wood] [0.7, 0.8, light_wood, median_wood] [0.8, 0.9, median_wood, light_wood] [0.9, 1.0, light_wood, dark_wood]) } } define glass texture { surface { ambient 0 diffuse 0 specular 0.2 reflection white, 0.1 transmission white, 1, 1.5 }} define shiny surface { ambient 0.1 diffuse 0.6 specular white, 0.6 microfacet Phong 7  } define steely_blue texture { shiny { color black } } define chrome texture { surface { color white ambient 0.0 diffuse 0.2 specular 0.4 microfacet Phong 10 reflection 0.8 } }   viewpoint {     from <4.000, -1.000, 1.000> at <0.000, 0.000, 0.000> up <0, 1, 0> angle 60     resolution 640, 480 aspect 1.6 image_format 0 }       light <-10, 30, 20> light <-10, 30, -20>   object { disc <0, -2, 0>, <0, 1, 0>, 30 wooden }   object { sphere <0.000, 0.000, 0.000>, 1.00 chrome } object { cylinder <0.000, 0.000, 0.000>, <0.000, 0.000, -4.000>, 0.50 chrome }   After setting up the background and defining colors and textures, the viewpoint is specified. The “camera” is located at a point in 3D space, and it looks towards another point. The angle, image resolution, and aspect ratio are specified. Two lights are present in the image at defined coordinates. The three objects in the image are a wooden disc to represent a table top, and a sphere and cylinder that intersect to form a pin that will be used for the pin board toy in the final animation. When the image is rendered, the following image is produced. The pins are modeled with a chrome surface, so they reflect the environment around them. Note that the scale of the pin shaft is not correct, this will be fixed later. Modeling the Pin Board The frame of the pin-board is made up of three boxes, and six cylinders, the front box is modeled using a clear, slightly reflective solid, with the same refractive index of glass. The other shapes are modeled as metal. object { box <-5.5, -1.5, 1>, <5.5, 5.5, 1.2> glass } object { box <-5.5, -1.5, -0.04>, <5.5, 5.5, -0.09> steely_blue } object { box <-5.5, -1.5, -0.52>, <5.5, 5.5, -0.59> steely_blue } object { cylinder <-5.2, -1.2, 1.4>, <-5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, -1.2, 1.4>, <5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <-5.2, 5.2, 1.4>, <-5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, 5.2, 1.4>, <5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <0, -1.2, 1.4>, <0, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <0, 5.2, 1.4>, <0, 5.2, -0.74>, 0.2 steely_blue }   In order to create the matrix of pins that make up the pin board I used a basic console application with a few nested loops to create two intersecting matrixes of pins, which models the layout used in the pin boards. The resulting image is shown below. The pin board contains 11,481 pins, with the scene file containing 23,709 lines of code. For the complete animation 2,000 scene files will be created, which is over 47 million lines of code. Each pin in the pin-board will slide out a specific distance when an object is pressed into the back of the board. This is easily modeled by setting the Z coordinate of the pin to a specific value. In order to set all of the pins in the pin-board to the correct position, a bitmap image can be used. The position of the pin can be set based on the color of the pixel at the appropriate position in the image. When the Windows Azure logo is used to set the Z coordinate of the pins, the following image is generated. The challenge now was to make a cool animation. The Azure Logo is fine, but it is static. Using a normal video to animate the pins would not work; the colors in the video would not be the same as the depth of the objects from the camera. In order to simulate the pin board accurately a series of frames from a depth camera could be used. Windows Kinect The Kenect controllers for the X-Box 360 and Windows feature a depth camera. The Kinect SDK for Windows provides a programming interface for Kenect, providing easy access for .NET developers to the Kinect sensors. The Kinect Explorer provided with the Kinect SDK is a great starting point for exploring Kinect from a developers perspective. Both the X-Box 360 Kinect and the Windows Kinect will work with the Kinect SDK, the Windows Kinect is required for commercial applications, but the X-Box Kinect can be used for hobby projects. The Windows Kinect has the advantage of providing a mode to allow depth capture with objects closer to the camera, which makes for a more accurate depth image for setting the pin positions. Creating a Depth Field Animation The depth field animation used to set the positions of the pin in the pin board was created using a modified version of the Kinect Explorer sample application. In order to simulate the pin board accurately, a small section of the depth range from the depth sensor will be used. Any part of the object in front of the depth range will result in a white pixel; anything behind the depth range will be black. Within the depth range the pixels in the image will be set to RGB values from 0,0,0 to 255,255,255. A screen shot of the modified Kinect Explorer application is shown below. The Kinect Explorer sample application was modified to include slider controls that are used to set the depth range that forms the image from the depth stream. This allows the fine tuning of the depth image that is required for simulating the position of the pins in the pin board. The Kinect Explorer was also modified to record a series of images from the depth camera and save them as a sequence JPEG files that will be used to animate the pins in the animation the Start and Stop buttons are used to start and stop the image recording. En example of one of the depth images is shown below. Once a series of 2,000 depth images has been captured, the task of creating the animation can begin. Rendering a Test Frame In order to test the creation of frames and get an approximation of the time required to render each frame a test frame was rendered on-premise using PolyRay. The output of the rendering process is shown below. The test frame contained 23,629 primitive shapes, most of which are the spheres and cylinders that are used for the 11,800 or so pins in the pin board. The 1280x720 image contains 921,600 pixels, but as anti-aliasing was used the number of rays that were calculated was 4,235,777, with 3,478,754,073 object boundaries checked. The test frame of the pin board with the depth field image applied is shown below. The tracing time for the test frame was 4 minutes 27 seconds, which means rendering the2,000 frames in the animation would take over 148 hours, or a little over 6 days. Although this is much faster that an old 486, waiting almost a week to see the results of an animation would make it challenging for animators to create, view, and refine their animations. It would be much better if the animation could be rendered in less than one hour. Windows Azure Worker Roles The cost of creating an on-premise render farm to render animations increases in proportion to the number of servers. The table below shows the cost of servers for creating a render farm, assuming a cost of $500 per server. Number of Servers Cost 1 $500 16 $8,000 256 $128,000   As well as the cost of the servers, there would be additional costs for networking, racks etc. Hosting an environment of 256 servers on-premise would require a server room with cooling, and some pretty hefty power cabling. The Windows Azure compute services provide worker roles, which are ideal for performing processor intensive compute tasks. With the scalability available in Windows Azure a job that takes 256 hours to complete could be perfumed using different numbers of worker roles. The time and cost of using 1, 16 or 256 worker roles is shown below. Number of Worker Roles Render Time Cost 1 256 hours $30.72 16 16 hours $30.72 256 1 hour $30.72   Using worker roles in Windows Azure provides the same cost for the 256 hour job, irrespective of the number of worker roles used. Provided the compute task can be broken down into many small units, and the worker role compute power can be used effectively, it makes sense to scale the application so that the task is completed quickly, making the results available in a timely fashion. The task of rendering 2,000 frames in an animation is one that can easily be broken down into 2,000 individual pieces, which can be performed by a number of worker roles. Creating a Render Farm in Windows Azure The architecture of the render farm is shown in the following diagram. The render farm is a hybrid application with the following components: ·         On-Premise o   Windows Kinect – Used combined with the Kinect Explorer to create a stream of depth images. o   Animation Creator – This application uses the depth images from the Kinect sensor to create scene description files for PolyRay. These files are then uploaded to the jobs blob container, and job messages added to the jobs queue. o   Process Monitor – This application queries the role instance lifecycle table and displays statistics about the render farm environment and render process. o   Image Downloader – This application polls the image queue and downloads the rendered animation files once they are complete. ·         Windows Azure o   Azure Storage – Queues and blobs are used for the scene description files and completed frames. A table is used to store the statistics about the rendering environment.   The architecture of each worker role is shown below.   The worker role is configured to use local storage, which provides file storage on the worker role instance that can be use by the applications to render the image and transform the format of the image. The service definition for the worker role with the local storage configuration highlighted is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="CloudRay" >   <WorkerRole name="CloudRayWorkerRole" vmsize="Small">     <Imports>     </Imports>     <ConfigurationSettings>       <Setting name="DataConnectionString" />     </ConfigurationSettings>     <LocalResources>       <LocalStorage name="RayFolder" cleanOnRoleRecycle="true" />     </LocalResources>   </WorkerRole> </ServiceDefinition>     The two executable programs, PolyRay.exe and DTA.exe are included in the Azure project, with Copy Always set as the property. PolyRay will take the scene description file and render it to a Truevision TGA file. As the TGA format has not seen much use since the mid 90’s it is converted to a JPG image using Dave's Targa Animator, another shareware application from the 90’s. Each worker roll will use the following process to render the animation frames. 1.       The worker process polls the job queue, if a job is available the scene description file is downloaded from blob storage to local storage. 2.       PolyRay.exe is started in a process with the appropriate command line arguments to render the image as a TGA file. 3.       DTA.exe is started in a process with the appropriate command line arguments convert the TGA file to a JPG file. 4.       The JPG file is uploaded from local storage to the images blob container. 5.       A message is placed on the images queue to indicate a new image is available for download. 6.       The job message is deleted from the job queue. 7.       The role instance lifecycle table is updated with statistics on the number of frames rendered by the worker role instance, and the CPU time used. The code for this is shown below. public override void Run() {     // Set environment variables     string polyRayPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), PolyRayLocation);     string dtaPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), DTALocation);       LocalResource rayStorage = RoleEnvironment.GetLocalResource("RayFolder");     string localStorageRootPath = rayStorage.RootPath;       JobQueue jobQueue = new JobQueue("renderjobs");     JobQueue downloadQueue = new JobQueue("renderimagedownloadjobs");     CloudRayBlob sceneBlob = new CloudRayBlob("scenes");     CloudRayBlob imageBlob = new CloudRayBlob("images");     RoleLifecycleDataSource roleLifecycleDataSource = new RoleLifecycleDataSource();       Frames = 0;       while (true)     {         // Get the render job from the queue         CloudQueueMessage jobMsg = jobQueue.Get();           if (jobMsg != null)         {             // Get the file details             string sceneFile = jobMsg.AsString;             string tgaFile = sceneFile.Replace(".pi", ".tga");             string jpgFile = sceneFile.Replace(".pi", ".jpg");               string sceneFilePath = Path.Combine(localStorageRootPath, sceneFile);             string tgaFilePath = Path.Combine(localStorageRootPath, tgaFile);             string jpgFilePath = Path.Combine(localStorageRootPath, jpgFile);               // Copy the scene file to local storage             sceneBlob.DownloadFile(sceneFilePath);               // Run the ray tracer.             string polyrayArguments =                 string.Format("\"{0}\" -o \"{1}\" -a 2", sceneFilePath, tgaFilePath);             Process polyRayProcess = new Process();             polyRayProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), polyRayPath);             polyRayProcess.StartInfo.Arguments = polyrayArguments;             polyRayProcess.Start();             polyRayProcess.WaitForExit();               // Convert the image             string dtaArguments =                 string.Format(" {0} /FJ /P{1}", tgaFilePath, Path.GetDirectoryName (jpgFilePath));             Process dtaProcess = new Process();             dtaProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), dtaPath);             dtaProcess.StartInfo.Arguments = dtaArguments;             dtaProcess.Start();             dtaProcess.WaitForExit();               // Upload the image to blob storage             imageBlob.UploadFile(jpgFilePath);               // Add a download job.             downloadQueue.Add(jpgFile);               // Delete the render job message             jobQueue.Delete(jobMsg);               Frames++;         }         else         {             Thread.Sleep(1000);         }           // Log the worker role activity.         roleLifecycleDataSource.Alive             ("CloudRayWorker", RoleLifecycleDataSource.RoleLifecycleId, Frames);     } }     Monitoring Worker Role Instance Lifecycle In order to get more accurate statistics about the lifecycle of the worker role instances used to render the animation data was tracked in an Azure storage table. The following class was used to track the worker role lifecycles in Azure storage.   public class RoleLifecycle : TableServiceEntity {     public string ServerName { get; set; }     public string Status { get; set; }     public DateTime StartTime { get; set; }     public DateTime EndTime { get; set; }     public long SecondsRunning { get; set; }     public DateTime LastActiveTime { get; set; }     public int Frames { get; set; }     public string Comment { get; set; }       public RoleLifecycle()     {     }       public RoleLifecycle(string roleName)     {         PartitionKey = roleName;         RowKey = Utils.GetAscendingRowKey();         Status = "Started";         StartTime = DateTime.UtcNow;         LastActiveTime = StartTime;         EndTime = StartTime;         SecondsRunning = 0;         Frames = 0;     } }     A new instance of this class is created and added to the storage table when the role starts. It is then updated each time the worker renders a frame to record the total number of frames rendered and the total processing time. These statistics are used be the monitoring application to determine the effectiveness of use of resources in the render farm. Rendering the Animation The Azure solution was deployed to Windows Azure with the service configuration set to 16 worker role instances. This allows for the application to be tested in the cloud environment, and the performance of the application determined. When I demo the application at conferences and user groups I often start with 16 instances, and then scale up the application to the full 256 instances. The configuration to run 16 instances is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="16" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     About six minutes after deploying the application the first worker roles become active and start to render the first frames of the animation. The CloudRay Monitor application displays an icon for each worker role instance, with a number indicating the number of frames that the worker role has rendered. The statistics on the left show the number of active worker roles and statistics about the render process. The render time is the time since the first worker role became active; the CPU time is the total amount of processing time used by all worker role instances to render the frames.   Five minutes after the first worker role became active the last of the 16 worker roles activated. By this time the first seven worker roles had each rendered one frame of the animation.   With 16 worker roles u and running it can be seen that one hour and 45 minutes CPU time has been used to render 32 frames with a render time of just under 10 minutes.     At this rate it would take over 10 hours to render the 2,000 frames of the full animation. In order to complete the animation in under an hour more processing power will be required. Scaling the render farm from 16 instances to 256 instances is easy using the new management portal. The slider is set to 256 instances, and the configuration saved. We do not need to re-deploy the application, and the 16 instances that are up and running will not be affected. Alternatively, the configuration file for the Azure service could be modified to specify 256 instances.   <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="256" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     Six minutes after the new configuration has been applied 75 new worker roles have activated and are processing their first frames.   Five minutes later the full configuration of 256 worker roles is up and running. We can see that the average rate of frame rendering has increased from 3 to 12 frames per minute, and that over 17 hours of CPU time has been utilized in 23 minutes. In this test the time to provision 140 worker roles was about 11 minutes, which works out at about one every five seconds.   We are now half way through the rendering, with 1,000 frames complete. This has utilized just under three days of CPU time in a little over 35 minutes.   The animation is now complete, with 2,000 frames rendered in a little over 52 minutes. The CPU time used by the 256 worker roles is 6 days, 7 hours and 22 minutes with an average frame rate of 38 frames per minute. The rendering of the last 1,000 frames took 16 minutes 27 seconds, which works out at a rendering rate of 60 frames per minute. The frame counts in the server instances indicate that the use of a queue to distribute the workload has been very effective in distributing the load across the 256 worker role instances. The first 16 instances that were deployed first have rendered between 11 and 13 frames each, whilst the 240 instances that were added when the application was scaled have rendered between 6 and 9 frames each.   Completed Animation I’ve uploaded the completed animation to YouTube, a low resolution preview is shown below. Pin Board Animation Created using Windows Kinect and 256 Windows Azure Worker Roles   The animation can be viewed in 1280x720 resolution at the following link: http://www.youtube.com/watch?v=n5jy6bvSxWc Effective Use of Resources According to the CloudRay monitor statistics the animation took 6 days, 7 hours and 22 minutes CPU to render, this works out at 152 hours of compute time, rounded up to the nearest hour. As the usage for the worker role instances are billed for the full hour, it may have been possible to render the animation using fewer than 256 worker roles. When deciding the optimal usage of resources, the time required to provision and start the worker roles must also be considered. In the demo I started with 16 worker roles, and then scaled the application to 256 worker roles. It would have been more optimal to start the application with maybe 200 worker roles, and utilized the full hour that I was being billed for. This would, however, have prevented showing the ease of scalability of the application. The new management portal displays the CPU usage across the worker roles in the deployment. The average CPU usage across all instances is 93.27%, with over 99% used when all the instances are up and running. This shows that the worker role resources are being used very effectively. Grid Computing Scenarios Although I am using this scenario for a hobby project, there are many scenarios where a large amount of compute power is required for a short period of time. Windows Azure provides a great platform for developing these types of grid computing applications, and can work out very cost effective. ·         Windows Azure can provide massive compute power, on demand, in a matter of minutes. ·         The use of queues to manage the load balancing of jobs between role instances is a simple and effective solution. ·         Using a cloud-computing platform like Windows Azure allows proof-of-concept scenarios to be tested and evaluated on a very low budget. ·         No charges for inbound data transfer makes the uploading of large data sets to Windows Azure Storage services cost effective. (Transaction charges still apply.) Tips for using Windows Azure for Grid Computing Scenarios I found the implementation of a render farm using Windows Azure a fairly simple scenario to implement. I was impressed by ease of scalability that Azure provides, and by the short time that the application took to scale from 16 to 256 worker role instances. In this case it was around 13 minutes, in other tests it took between 10 and 20 minutes. The following tips may be useful when implementing a grid computing project in Windows Azure. ·         Using an Azure Storage queue to load-balance the units of work across multiple worker roles is simple and very effective. The design I have used in this scenario could easily scale to many thousands of worker role instances. ·         Windows Azure accounts are typically limited to 20 cores. If you need to use more than this, a call to support and a credit card check will be required. ·         Be aware of how the billing model works. You will be charged for worker role instances for the full clock our in which the instance is deployed. Schedule the workload to start just after the clock hour has started. ·         Monitor the utilization of the resources you are provisioning, ensure that you are not paying for worker roles that are idle. ·         If you are deploying third party applications to worker roles, you may well run into licensing issues. Purchasing software licenses on a per-processor basis when using hundreds of processors for a short time period would not be cost effective. ·         Third party software may also require installation onto the worker roles, which can be accomplished using start-up tasks. Bear in mind that adding a startup task and possible re-boot will add to the time required for the worker role instance to start and activate. An alternative may be to use a prepared VM and use VM roles. ·         Consider using the Windows Azure Autoscaling Application Block (WASABi) to autoscale the worker roles in your application. When using a large number of worker roles, the utilization must be carefully monitored, if the scaling algorithms are not optimal it could get very expensive!

    Read the article

  • Reusing XSL template to be invoked with different relative XPaths

    - by meomaxy
    Here is my contrived example that illustrates what I am attempting to accomplish. I have an input XML file that I wish to flatten for further processing. Input file: <BICYCLES> <BICYCLE> <COLOR>BLUE</COLOR> <WHEELS> <WHEEL> <WHEEL_TYPE>FRONT</WHEEL_TYPE> <FLAT>NO</FLAT> <REFLECTORS> <REFLECTOR> <REFLECTOR_NUM>1</REFLECTOR_NUM> <COLOR>RED</COLOR> <SHAPE>SQUARE</SHAPE> </REFLECTOR> <REFLECTOR> <REFLECTOR_NUM>2</REFLECTOR_NUM> <COLOR>WHITE</COLOR> <SHAPE>ROUND</SHAPE> </REFLECTOR> </REFLECTORS> </WHEEL> <WHEEL> <WHEEL_TYPE>REAR</WHEEL_TYPE> <FLAT>NO</FLAT> </WHEEL> </WHEELS> </BICYCLE> </BICYCLES> The input is a list of <BICYCLE> nodes. Each <BICYCLE> has a <COLOR> and optionally has <WHEELS>. <WHEELS> is a list of <WHEEL> nodes, each of which has a few attributes, and optionally has <REFLECTORS>. <REFLECTORS> is a list of <REFLECTOR> nodes, each of which has a few attributes. The goal is to flatten this XML. This is the XSL I'm using: <xsl:stylesheet version="2.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:fo="http://www.w3.org/1999/XSL/Format" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:fn="http://www.w3.org/2005/xpath-functions"> <xsl:output method="xml" encoding="UTF-8" indent="yes" omit-xml-declaration="yes" xml:space="preserve"/> <xsl:template match="/"> <BICYCLES> <xsl:apply-templates/> </BICYCLES> </xsl:template> <xsl:template match="BICYCLE"> <xsl:choose> <xsl:when test="WHEELS"> <xsl:apply-templates select="WHEELS"/> </xsl:when> <xsl:otherwise> <BICYCLE> <COLOR><xsl:value-of select="COLOR"/></COLOR> <WHEEL_TYPE/> <FLAT/> <REFLECTOR_NUM/> <COLOR/> <SHAPE/> </BICYCLE> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template match="WHEELS"> <xsl:apply-templates select="WHEEL"/> </xsl:template> <xsl:template match="WHEEL"> <xsl:choose> <xsl:when test="REFLECTORS"> <xsl:apply-templates select="REFLECTORS"/> </xsl:when> <xsl:otherwise> <BICYCLE> <COLOR><xsl:value-of select="../../COLOR"/></COLOR> <WHEEL_TYPE><xsl:value-of select="WHEEL_TYPE"/></WHEEL_TYPE> <FLAT><xsl:value-of select="FLAT"/></FLAT> <REFLECTOR_NUM/> <COLOR/> <SHAPE/> </BICYCLE> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template match="REFLECTORS"> <xsl:apply-templates select="REFLECTOR"/> </xsl:template> <xsl:template match="REFLECTOR"> <BICYCLE> <COLOR><xsl:value-of select="../../../../COLOR"/></COLOR> <WHEEL_TYPE><xsl:value-of select="../../WHEEL_TYPE"/></WHEEL_TYPE> <FLAT><xsl:value-of select="../../FLAT"/></FLAT> <REFLECTOR_NUM><xsl:value-of select="REFLECTOR_NUM"/></REFLECTOR_NUM> <COLOR><xsl:value-of select="COLOR"/></COLOR> <SHAPE><xsl:value-of select="SHAPE"/></SHAPE> </BICYCLE> </xsl:template> </xsl:stylesheet> The output is: <BICYCLES xmlns:fn="http://www.w3.org/2005/xpath-functions" xmlns:fo="http://www.w3.org/1999/XSL/Format" xmlns:xs="http://www.w3.org/2001/XMLSchema"> <BICYCLE> <COLOR>BLUE</COLOR> <WHEEL_TYPE>FRONT</WHEEL_TYPE> <FLAT>NO</FLAT> <REFLECTOR_NUM>1</REFLECTOR_NUM> <COLOR>RED</COLOR> <SHAPE>SQUARE</SHAPE> </BICYCLE> <BICYCLE> <COLOR>BLUE</COLOR> <WHEEL_TYPE>FRONT</WHEEL_TYPE> <FLAT>NO</FLAT> <REFLECTOR_NUM>2</REFLECTOR_NUM> <COLOR>WHITE</COLOR> <SHAPE>ROUND</SHAPE> </BICYCLE> <BICYCLE> <COLOR>BLUE</COLOR> <WHEEL_TYPE>REAR</WHEEL_TYPE> <FLAT>NO</FLAT> <REFLECTOR_NUM/> <COLOR/> <SHAPE/> </BICYCLE> </BICYCLES> What I don't like about this is that I'm outputting the color attribute in several forms: <COLOR><xsl:value-of select="../../../../COLOR"/></COLOR> <COLOR><xsl:value-of select="../../COLOR"/></COLOR> <COLOR><xsl:value-of select="COLOR"/></COLOR> <COLOR/> It seems like there ought to be a way to make a named template and invoke it from the various places where it is needed and pass some parameter that represents the path back to the <BICYCLE> node to which it refers. Is there a way to clean this up, say with a named template for bicycle fields, for wheel fields and for reflector fields? In the real world example this is based on, there are many more attributes to a "bicycle" than just color, and I want to make this XSL easy to change to include or exclude fields without having to change the XSL in multiple places.

    Read the article

  • Popping UIView crashes app

    - by Adun
    I'm basically pushing a UIView from a UITableViewController and all it contains is a UIWebView. However when I remove the UIView to return back to the UITableView the app crashes. - (void)tableView:(UITableView *)tableView didSelectRowAtIndexPath:(NSIndexPath *)indexPath { // Navigation logic may go here. Create and push another view controller. if (indexPath.row == websiteCell) { NSString *urlPath = [NSString stringWithFormat:@"http://%@", exhibitor.website]; WebViewController *webViewController = [[WebViewController alloc] initWithURLString:urlPath]; // Pass the selected object to the new view controller. [self.parentViewController presentModalViewController:webViewController animated:YES]; [webViewController release]; } } If I comment out the [webViewController release] the app doesn't crash, but I know that this would be a leak. Below is the code for the Web Browser: #import "WebViewController.h" @implementation WebViewController @synthesize webBrowserView; @synthesize urlValue; @synthesize toolBar; @synthesize spinner; @synthesize loadUrl; -(id)initWithURLString:(NSString *)urlString { if (self = [super init]) { urlValue = urlString; } return self; } #pragma mark WebView Controls - (void)goBack { [webBrowserView goBack]; } - (void)goForward { [webBrowserView goForward]; } - (void)reload { [webBrowserView reload]; } - (void)closeBrowser { [self.parentViewController dismissModalViewControllerAnimated:YES]; } #pragma end // Implement viewDidLoad to do additional setup after loading the view, typically from a nib. - (void)viewDidLoad { [super viewDidLoad]; CGRect contentRect = self.view.bounds; //NSLog(@"%f", contentRect.size.height); float webViewHeight = contentRect.size.height - 44.0f; // navBar = 44 float toolBarHeight = contentRect.size.height - webViewHeight; // navigation bar UINavigationBar *navBar = [[[UINavigationBar alloc] initWithFrame:CGRectMake(0, 20, contentRect.size.width, 44)] autorelease]; navBar.delegate = self; UIBarButtonItem *doneButton = [[[UIBarButtonItem alloc] initWithTitle:@"Done" style:UIBarButtonItemStyleDone target:nil action:@selector(closeBrowser)] autorelease]; UINavigationItem *item = [[[UINavigationItem alloc] initWithTitle:@"CEDIA10"] autorelease]; item.leftBarButtonItem = doneButton; [navBar pushNavigationItem:item animated:NO]; [self.view addSubview:navBar]; // web browser webBrowserView = [[UIWebView alloc] initWithFrame:CGRectMake(0, 64, contentRect.size.width, webViewHeight)]; webBrowserView.delegate = self; webBrowserView.autoresizingMask = UIViewAutoresizingFlexibleWidth | UIViewAutoresizingFlexibleHeight; webBrowserView.scalesPageToFit = YES; [self.view addSubview:webBrowserView]; // buttons UIBarButtonItem *backButton = [[[UIBarButtonItem alloc] initWithImage:[UIImage imageNamed:@"arrowleft.png"] style:UIBarButtonItemStylePlain target:self action:@selector(goBack)] autorelease]; UIBarButtonItem *fwdButton = [[[UIBarButtonItem alloc] initWithImage:[UIImage imageNamed:@"arrowright.png"] style:UIBarButtonItemStylePlain target:self action:@selector(goForward)] autorelease]; UIBarButtonItem *refreshButton = [[[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemRefresh target:self action:@selector(reload)] autorelease]; UIBarButtonItem *flexSpace = [[[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemFlexibleSpace target:nil action:nil] autorelease]; UIBarButtonItem *fixSpace = [[[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemFixedSpace target:nil action:nil] autorelease]; [fixSpace setWidth: 40.0f]; spinner = [[[UIActivityIndicatorView alloc] initWithActivityIndicatorStyle:UIActivityIndicatorViewStyleWhite] autorelease]; [spinner startAnimating]; UIBarButtonItem *loadingIcon = [[[UIBarButtonItem alloc] initWithCustomView:spinner] autorelease]; NSArray *toolBarButtons = [[NSArray alloc] initWithObjects: fixSpace, backButton, fixSpace, fwdButton, flexSpace, loadingIcon, flexSpace, refreshButton, nil]; // toolbar toolBar = [[UIToolbar alloc] initWithFrame:CGRectMake(0, webViewHeight, contentRect.size.width, toolBarHeight)]; toolBar.autoresizingMask = UIViewAutoresizingFlexibleTopMargin | UIViewAutoresizingFlexibleWidth; toolBar.items = toolBarButtons; [self.view addSubview:toolBar]; // load the request NSURL *requestString = [NSURL URLWithString:urlValue]; [webBrowserView loadRequest:[NSURLRequest requestWithURL: requestString]]; [toolBarButtons release]; } - (void)viewWillDisappear { if ([webBrowserView isLoading]) { [webBrowserView stopLoading]; webBrowserView.delegate = nil; } } #pragma mark UIWebView - (void)webViewDidStartLoad:(UIWebView*)webView { [spinner startAnimating]; } - (void)webViewDidFinishLoad:(UIWebView*)webView { [spinner stopAnimating]; } - (BOOL)webView:(UIWebView *)webView shouldStartLoadWithRequest:(NSURLRequest *)request navigationType:(UIWebViewNavigationType)navigationType { loadUrl = [[request URL] retain]; if ([[loadUrl scheme] isEqualToString: @"mailto"]) { UIAlertView *alert = [[UIAlertView alloc] initWithTitle:@"CEDIA10" message:@"Do you want to open Mail and exit AREC10?" delegate:self cancelButtonTitle:@"No" otherButtonTitles:@"Yes",nil]; [alert show]; [alert release]; return NO; } [loadUrl release]; return YES; } - (void)webView:(UIWebView *)webView didFailLoadWithError:(NSError *)error { [spinner stopAnimating]; if (error.code == -1009) { // no internet connection UIAlertView *alert = [[UIAlertView alloc] initWithTitle:@"CEDIA10" message:@"You need an active Internet connection." delegate:self cancelButtonTitle:@"OK" otherButtonTitles:nil]; [alert show]; [alert release]; } } #pragma mark UIAlertView - (void)alertView:(UIAlertView *)alertView clickedButtonAtIndex:(NSInteger)buttonIndex { if (buttonIndex == 1) { [[UIApplication sharedApplication] openURL:loadUrl]; [loadUrl release]; } } - (void)didReceiveMemoryWarning { // Releases the view if it doesn't have a superview. [super didReceiveMemoryWarning]; // Release any cached data, images, etc that aren't in use. [webBrowserView release]; [urlValue release]; [toolBar release]; [spinner release]; [loadUrl release]; webBrowserView = nil; webBrowserView.delegate = nil; urlValue = nil; toolBar = nil; spinner = nil; loadUrl = nil; } - (void)viewDidUnload { // Release any retained subviews of the main view. // e.g. self.myOutlet = nil; } - (void)dealloc { [webBrowserView release]; [urlValue release]; [toolBar release]; [spinner release]; [loadUrl release]; webBrowserView.delegate = nil; urlValue = nil; toolBar = nil; spinner = nil; loadUrl = nil; [super dealloc]; } @end Below this is the crash log that I am getting: Date/Time: 2010-05-13 11:58:20.023 +1000 OS Version: iPhone OS 3.1.3 (7E18) Report Version: 104 Exception Type: EXC_CRASH (SIGABRT) Exception Codes: 0x00000000, 0x00000000 Crashed Thread: 0 Thread 0 Crashed: 0 libSystem.B.dylib 0x00090b2c __kill + 8 1 libSystem.B.dylib 0x00090b1a kill + 4 2 libSystem.B.dylib 0x00090b0e raise + 10 3 libSystem.B.dylib 0x000a7e34 abort + 36 4 libstdc++.6.dylib 0x00066390 __gnu_cxx::__verbose_terminate_handler() + 588 5 libobjc.A.dylib 0x00008898 _objc_terminate + 160 6 libstdc++.6.dylib 0x00063a84 __cxxabiv1::__terminate(void (*)()) + 76 7 libstdc++.6.dylib 0x00063afc std::terminate() + 16 8 libstdc++.6.dylib 0x00063c24 __cxa_throw + 100 9 libobjc.A.dylib 0x00006e54 objc_exception_throw + 104 10 CoreFoundation 0x00095bf6 -[NSObject doesNotRecognizeSelector:] + 106 11 CoreFoundation 0x0001ab12 ___forwarding___ + 474 12 CoreFoundation 0x00011838 _CF_forwarding_prep_0 + 40 13 QuartzCore 0x0000f448 CALayerCopyRenderLayer + 24 14 QuartzCore 0x0000f048 CA::Context::commit_layer(_CALayer*, unsigned int, unsigned int, void*) + 100 15 QuartzCore 0x0000ef34 CALayerCommitIfNeeded + 336 16 QuartzCore 0x0000eedc CALayerCommitIfNeeded + 248 17 QuartzCore 0x00011ee8 CA::Context::commit_root(void*, void*) + 52 18 QuartzCore 0x00011e80 x_hash_table_foreach + 64 19 QuartzCore 0x00011e2c CA::Transaction::foreach_root(void (*)(void*, void*), void*) + 40 20 QuartzCore 0x0000bb68 CA::Context::commit_transaction(CA::Transaction*) + 1068 21 QuartzCore 0x0000b46c CA::Transaction::commit() + 276 22 QuartzCore 0x000135d4 CA::Transaction::observer_callback(__CFRunLoopObserver*, unsigned long, void*) + 84 23 CoreFoundation 0x0000f82a __CFRunLoopDoObservers + 466 24 CoreFoundation 0x00057340 CFRunLoopRunSpecific + 1812 25 CoreFoundation 0x00056c18 CFRunLoopRunInMode + 44 26 GraphicsServices 0x000041c0 GSEventRunModal + 188 27 UIKit 0x00003c28 -[UIApplication _run] + 552 28 UIKit 0x00002228 UIApplicationMain + 960 29 CEDIA10 0x00002e16 main (main.m:14) 30 CEDIA10 0x00002db8 start + 32 Any ideas on why the app is crashing?

    Read the article

  • getelementbyid does not work in firefox

    - by gaurab
    hi, this below mentioned code works perfect in internet explorer but not in firefox... i get an error in line in firefox: document.getElementById("supplier_no").value= values_array[0]; that getElementById returns null. how to solve the problem? var winName; //variable for the popup window var g_return_destination = null ; //variable to track where the data gets sent back to. // Set the value in the original pages text box. function f_set_home_value( as_Value ) { if (document.getElementById(g_return_destination[0]).name == "netbank_supplier_name_info" ) { //clear the old values for (selnum = 1; selnum <= 5; selnum++) { document.getElementById("expense_account"+selnum).value = ""; document.getElementById("expense_account_name"+selnum).value = ""; document.getElementById("expense_vat_flag"+selnum).value = "off"; document.getElementById("expense_vat_flag"+selnum).checked = ""; document.getElementById("expense_vat_amount"+selnum).value = ""; document.getElementById("expense_vat_code"+selnum).value = ""; document.getElementById("expense_period"+selnum).value = ""; document.getElementById("expense_date"+selnum).value = ""; if (selnum!=1) {//these are sometimes defaulted in, and in any case you will always have line1 document.getElementById("expense_more_dept"+selnum).value = ""; document.getElementById("expense_more_prj"+selnum).value = ""; document.getElementById("expense_more_subj"+selnum).value = ""; } document.getElementById("expense_amount"+selnum).value = ""; } var values_array = as_Value[0].split("!"); document.getElementById("supplier_no").value= values_array[0]; document.getElementById("supplier_bankAccount_no").value= values_array[1]; str = values_array[2] ; str = str.split(";sp;").join(" "); document.getElementById("default_expense_account").value= str; document.getElementById("expense_account1").value= str; document.getElementById("expense_more_sok1").disabled= false; str = values_array[3] ; str = str.split(";sp;").join(" "); document.getElementById("payment_term").value= str; strPeriod = calcPeriod(str,document.getElementById("due_date").value); document.getElementById("expense_period1").value = (strPeriod); strExpenseDate = calcExpenseDate(str,document.getElementById("due_date").value); document.getElementById("expense_date1").value = (strExpenseDate); str = values_array[4] ; str = str.split(";sp;").join(" "); document.getElementById("expense_account_name1").value= str; str = values_array[5] ; str = str.split(";sp;").join(" "); document.getElementById("expense_vat_code1").value= str; if (str == 0) { document.getElementById("expense_vat_flag1").checked= ''; document.getElementById("expense_vat_flag1").disabled= true; }else{ document.getElementById("expense_vat_flag1").checked= 'yes'; document.getElementById("expense_vat_flag1").value= 'on'; document.getElementById("expense_vat_flag1").disabled= false; } str = values_array[6] ; str = str.split(";sp;").join(" "); document.getElementById("supplier_name").value= str; var str = values_array[7]; str = str.split(";sp;").join(" "); str = str.split("&cr;").join("\r"); document.getElementById("netbank_supplier_name_info").value= str; strx = justNumberNF(document.getElementById("amount").value); document.all["expense_vat_amount1"].value = NetbankToDollarsAndCents(strx * (24/124)) ; document.getElementById("amount").value=NetbankToDollarsAndCents(strx); document.getElementById("expense_amount1").value = document.getElementById("amount").value; document.getElementById("expense_amount2").value = ''; document.getElementById("expense_account2").value= ''; //document.getElementById("expense_vat_flag2").value= ''; document.getElementById("expense_vat_amount2").value= ''; document.getElementById("expense_amount3").value = ''; document.getElementById("expense_account3").value= ''; //.getElementById("expense_vat_flag3").value= ''; document.getElementById("expense_vat_amount3").value= ''; document.getElementById("expense_amount4").value = ''; document.getElementById("expense_account4").value= ''; //document.getElementById("expense_vat_flag4").value= ''; document.getElementById("expense_vat_amount4").value= ''; document.getElementById("expense_amount5").value = ''; document.getElementById("expense_account5").value= ''; //document.getElementById("expense_vat_flag5").value= ''; document.getElementById("expense_vat_amount5").value= ''; str = values_array[8] ; str = str.split(";sp;").join(" "); if (str=="2"){ document.frmName.ButtonSelPeriodisering1.disabled=false; document.frmName.ButtonSelPeriodisering1.click(); } winName.close(); } } //Pass Data Back to original window function f_popup_return(as_Value) { var l_return = new Array(1); l_return[0] = as_Value; f_set_home_value(l_return); } function justNumberNF(val){ val = (val==null) ? 0 : val; // check if a number, otherwise try taking out non-number characters. if (isNaN(val)) { var newVal = parseFloat(val.replace(/[^\d\.\-]/g, '.')); // check if still not a number. Might be undefined, '', etc., so just replace with 0. return (isNaN(newVal) ? 0 : newVal); } // return 0 in place of infinite numbers. else if (!isFinite(val)) { return 0; } return val; }; function NetbankToDollarsAndCents(n) { var s = "" + Math.round(n * 100) / 100 ; var i = s.indexOf('.') ; if (i < 0) {return s + ",00" } ; var t = s.substring(0, i + 1) + s.substring(i + 1, i + 3) ; if (i + 2 == s.length) {t += "0"} ; return t.replace('.',',') ; }

    Read the article

  • Delphi hook to redirect to different ip

    - by Chris
    What is the best way to redirect ANY browser to a different ip for specific sites? For example if the user will type www.facebook.com in any browser he will be redirected to 127.0.0.1. Also the same should happen if he will type 66.220.146.11. What I have until now is this: using the winpkfilter I am able to intercept all the traffic on port 80, with type(in or out), source ip, destination ip and packet. My problem is to modify somehow the packet so the browser will be redirected. This is the code that i have right now: program Pass; {$APPTYPE CONSOLE} uses SysUtils, Windows, Winsock, winpkf, iphlp; var iIndex, counter : DWORD; hFilt : THANDLE; Adapts : TCP_AdapterList; AdapterMode : ADAPTER_MODE; Buffer, ParsedBuffer : INTERMEDIATE_BUFFER; ReadRequest : ETH_REQUEST; hEvent : THANDLE; hAdapter : THANDLE; pEtherHeader : TEtherHeaderPtr; pIPHeader : TIPHeaderPtr; pTcpHeader : TTCPHeaderPtr; pUdpHeader : TUDPHeaderPtr; SourceIP, DestIP : TInAddr; thePacket : PChar; f : TextFile; SourceIpString, DestinationIpString : string; SourceName, DestinationName : string; function IPAddrToName(IPAddr : string) : string; var SockAddrIn : TSockAddrIn; HostEnt : PHostEnt; WSAData : TWSAData; begin WSAStartup($101, WSAData); SockAddrIn.sin_addr.s_addr := inet_addr(PChar(IPAddr)); HostEnt := gethostbyaddr(@SockAddrIn.sin_addr.S_addr, 4, AF_INET); if HostEnt < nil then begin result := StrPas(Hostent^.h_name) end else begin result := ''; end; end; procedure ReleaseInterface(); begin // Restore default mode AdapterMode.dwFlags := 0; AdapterMode.hAdapterHandle := hAdapter; SetAdapterMode(hFilt, @AdapterMode); // Set NULL event to release previously set event object SetPacketEvent(hFilt, hAdapter, 0); // Close Event if hEvent < 0 then CloseHandle(hEvent); // Close driver object CloseFilterDriver(hFilt); // Release NDISAPI FreeNDISAPI(); end; begin // Check the number of parameters if ParamCount() < 2 then begin Writeln('Command line syntax:'); Writeln(' PassThru.exe index num'); Writeln(' index - network interface index.'); Writeln(' num - number or packets to filter'); Writeln('You can use ListAdapters to determine correct index.'); Exit; end; // Initialize NDISAPI InitNDISAPI(); // Create driver object hFilt := OpenFilterDriver('NDISRD'); if IsDriverLoaded(hFilt) then begin // Get parameters from command line iIndex := StrToInt(ParamStr(1)); counter := StrToInt(ParamStr(2)); // Set exit procedure ExitProcessProc := ReleaseInterface; // Get TCP/IP bound interfaces GetTcpipBoundAdaptersInfo(hFilt, @Adapts); // Check paramer values if iIndex > Adapts.m_nAdapterCount then begin Writeln('There is no network interface with such index on this system.'); Exit; end; hAdapter := Adapts.m_nAdapterHandle[iIndex]; AdapterMode.dwFlags := MSTCP_FLAG_SENT_TUNNEL or MSTCP_FLAG_RECV_TUNNEL; AdapterMode.hAdapterHandle := hAdapter; // Create notification event hEvent := CreateEvent(nil, TRUE, FALSE, nil); if hEvent <> 0 then if SetPacketEvent(hFilt, hAdapter, hEvent) <> 0 then begin // Initialize request ReadRequest.EthPacket.Buffer := @Buffer; ReadRequest.hAdapterHandle := hAdapter; SetAdapterMode(hFilt, @AdapterMode); counter := 0; //while counter <> 0 do while true do begin WaitForSingleObject(hEvent, INFINITE); while ReadPacket(hFilt, @ReadRequest) <> 0 do begin //dec(counter); pEtherHeader := TEtherHeaderPtr(@Buffer.m_IBuffer); if ntohs(pEtherHeader.h_proto) = ETH_P_IP then begin pIPHeader := TIPHeaderPtr(Integer(pEtherHeader) + SizeOf(TEtherHeader)); SourceIP.S_addr := pIPHeader.SourceIp; DestIP.S_addr := pIPHeader.DestIp; if pIPHeader.Protocol = IPPROTO_TCP then begin pTcpHeader := TTCPHeaderPtr(Integer(pIPHeader) + (pIPHeader.VerLen and $F) * 4); if (pTcpHeader.SourcePort = htons(80)) or (pTcpHeader.DestPort = htons(80)) then begin inc(counter); if Buffer.m_dwDeviceFlags = PACKET_FLAG_ON_SEND then Writeln(counter, ') - MSTCP --> Interface') else Writeln(counter, ') - Interface --> MSTCP'); Writeln(' Packet size = ', Buffer.m_Length); Writeln(Format(' IP %.3u.%.3u.%.3u.%.3u --> %.3u.%.3u.%.3u.%.3u PROTOCOL: %u', [byte(SourceIP.S_un_b.s_b1), byte(SourceIP.S_un_b.s_b2), byte(SourceIP.S_un_b.s_b3), byte(SourceIP.S_un_b.s_b4), byte(DestIP.S_un_b.s_b1), byte(DestIP.S_un_b.s_b2), byte(DestIP.S_un_b.s_b3), byte(DestIP.S_un_b.s_b4), byte(pIPHeader.Protocol)] )); Writeln(Format(' TCP SRC PORT: %d DST PORT: %d', [ntohs(pTcpHeader.SourcePort), ntohs(pTcpHeader.DestPort)])); //get the data thePacket := pchar(pEtherHeader) + (sizeof(TEtherHeaderPtr) + pIpHeader.VerLen * 4 + pTcpHeader.Offset * 4); { SourceIpString := IntToStr(byte(SourceIP.S_un_b.s_b1)) + '.' + IntToStr(byte(SourceIP.S_un_b.s_b2)) + '.' + IntToStr(byte(SourceIP.S_un_b.s_b3)) + '.' + IntToStr(byte(SourceIP.S_un_b.s_b4)); DestinationIpString := IntToStr(byte(DestIP.S_un_b.s_b1)) + '.' + IntToStr(byte(DestIP.S_un_b.s_b2)) + '.' + IntToStr(byte(DestIP.S_un_b.s_b3)) + '.' + IntToStr(byte(DestIP.S_un_b.s_b4)); } end; end; end; // if ntohs(pEtherHeader.h_proto) = ETH_P_RARP then // Writeln(' Reverse Addr Res packet'); // if ntohs(pEtherHeader.h_proto) = ETH_P_ARP then // Writeln(' Address Resolution packet'); //Writeln('__'); if Buffer.m_dwDeviceFlags = PACKET_FLAG_ON_SEND then // Place packet on the network interface SendPacketToAdapter(hFilt, @ReadRequest) else // Indicate packet to MSTCP SendPacketToMstcp(hFilt, @ReadRequest); { if counter = 0 then begin Writeln('Filtering complete'); readln; break; end; } end; ResetEvent(hEvent); end; end; end; end.

    Read the article

  • storing session data in mysql using php is not retrieving the data properly from the tables.

    - by Ronedog
    I have a problem retrieving some data from the $_SESSION using php and mysql. I've commented out the line in php.ini that tells the server to use the "file" to store the session info so my database will be used. I have a class that I use to write the information to the database and its working fine. When the user passes their credentials the class gets instantiated and the $_SESSION vars get set, then the user gets redirected to the index page. The index.php page includes the file where the db session class is, which when instantiated calles session_start() and the session variables should be in $_SESSION, but when I do var_dump($_SESSION) there is nothing in the array. However, when I look at the data in mysql, all the session information is in there. Its acting like session_start() has not been called, but by instantiating the class it is. Any idea what could be wrong? Here's the HTML: <?php include_once "classes/phpsessions_db/class.dbsession.php"; //used for sessions var_dump($_SESSION); ?> <html> . . . </html> Here's the dbsession class: <?php error_reporting(E_ALL); class dbSession { function dbSession($gc_maxlifetime = "", $gc_probability = "", $gc_divisor = "") { // if $gc_maxlifetime is specified and is an integer number if ($gc_maxlifetime != "" && is_integer($gc_maxlifetime)) { // set the new value @ini_set('session.gc_maxlifetime', $gc_maxlifetime); } // if $gc_probability is specified and is an integer number if ($gc_probability != "" && is_integer($gc_probability)) { // set the new value @ini_set('session.gc_probability', $gc_probability); } // if $gc_divisor is specified and is an integer number if ($gc_divisor != "" && is_integer($gc_divisor)) { // set the new value @ini_set('session.gc_divisor', $gc_divisor); } // get session lifetime $this->sessionLifetime = ini_get("session.gc_maxlifetime"); //Added by AARON. cancel the session's auto start,important, without this the session var's don't show up on next pg. session_write_close(); // register the new handler session_set_save_handler( array(&$this, 'open'), array(&$this, 'close'), array(&$this, 'read'), array(&$this, 'write'), array(&$this, 'destroy'), array(&$this, 'gc') ); register_shutdown_function('session_write_close'); // start the session @session_start(); } function stop() { $new_sess_id = $this->regenerate_id(true); session_unset(); session_destroy(); return $new_sess_id; } function regenerate_id($return_val=false) { // saves the old session's id $oldSessionID = session_id(); // regenerates the id // this function will create a new session, with a new id and containing the data from the old session // but will not delete the old session session_regenerate_id(); // because the session_regenerate_id() function does not delete the old session, // we have to delete it manually //$this->destroy($oldSessionID); //ADDED by aaron // returns the new session id if($return_val) { return session_id(); } } function open($save_path, $session_name) { // global $gf; // $gf->debug_this($gf, "GF: Opening Session"); // change the next values to match the setting of your mySQL database $mySQLHost = "localhost"; $mySQLUsername = "user"; $mySQLPassword = "pass"; $mySQLDatabase = "sessions"; $link = mysql_connect($mySQLHost, $mySQLUsername, $mySQLPassword); if (!$link) { die ("Could not connect to database!"); } $dbc = mysql_select_db($mySQLDatabase, $link); if (!$dbc) { die ("Could not select database!"); } return true; } function close() { mysql_close(); return true; } function read($session_id) { $result = @mysql_query(" SELECT session_data FROM session_data WHERE session_id = '".$session_id."' AND http_user_agent = '".$_SERVER["HTTP_USER_AGENT"]."' AND session_expire > '".time()."' "); // if anything was found if (is_resource($result) && @mysql_num_rows($result) > 0) { // return found data $fields = @mysql_fetch_assoc($result); // don't bother with the unserialization - PHP handles this automatically return unserialize($fields["session_data"]); } // if there was an error return an empty string - this HAS to be an empty string return ""; } function write($session_id, $session_data) { // global $gf; // first checks if there is a session with this id $result = @mysql_query(" SELECT * FROM session_data WHERE session_id = '".$session_id."' "); // if there is if (@mysql_num_rows($result) > 0) { // update the existing session's data // and set new expiry time $result = @mysql_query(" UPDATE session_data SET session_data = '".serialize($session_data)."', session_expire = '".(time() + $this->sessionLifetime)."' WHERE session_id = '".$session_id."' "); // if anything happened if (@mysql_affected_rows()) { // return true return true; } } else // if this session id is not in the database { // $gf->debug_this($gf, "inside dbSession, trying to write to db because session id was NOT in db"); $sql = " INSERT INTO session_data ( session_id, http_user_agent, session_data, session_expire ) VALUES ( '".serialize($session_id)."', '".$_SERVER["HTTP_USER_AGENT"]."', '".$session_data."', '".(time() + $this->sessionLifetime)."' ) "; // insert a new record $result = @mysql_query($sql); // if anything happened if (@mysql_affected_rows()) { // return an empty string return ""; } } // if something went wrong, return false return false; } function destroy($session_id) { // deletes the current session id from the database $result = @mysql_query(" DELETE FROM session_data WHERE session_id = '".$session_id."' "); // if anything happened if (@mysql_affected_rows()) { // return true return true; } // if something went wrong, return false return false; } function gc($maxlifetime) { // it deletes expired sessions from database $result = @mysql_query(" DELETE FROM session_data WHERE session_expire < '".(time() - $maxlifetime)."' "); } } //End of Class $session = new dbsession(); ?>

    Read the article

  • Generic Aggregation of C++ Objects by Attribute When Attribute Name is Unknown at Runtime

    - by stretch
    I'm currently implementing a system with a number of class's representing objects such as client, business, product etc. Standard business logic. As one might expect each class has a number of standard attributes. I have a long list of essentially identical requirements such as: the ability to retrieve all business' whose industry is manufacturing. the ability to retrieve all clients based in London Class business has attribute sector and client has attribute location. Clearly this a relational problem and in pseudo SQL would look something like: SELECT ALL business in business' WHERE sector == manufacturing Unfortunately plugging into a DB is not an option. What I want to do is have a single generic aggregation function whose signature would take the form: vector<generic> genericAggregation(class, attribute, value); Where class is the class of object I want to aggregate, attribute and value being the class attribute and value of interest. In my example I've put vector as return type, but this wouldn't work. Probably better to declare a vector of relevant class type and pass it as an argument. But this isn't the main problem. How can I accept arguments in string form for class, attribute and value and then map these in a generic object aggregation function? Since it's rude not to post code, below is a dummy program which creates a bunch of objects of imaginatively named classes. Included is a specific aggregation function which returns a vector of B objects whose A object is equal to an id specified at the command line e.g. .. $ ./aggregations 5 which returns all B's whose A objects 'i' attribute is equal to 5. See below: #include <iostream> #include <cstring> #include <sstream> #include <vector> using namespace std; //First imaginativly names dummy class class A { private: int i; double d; string s; public: A(){} A(int i, double d, string s) { this->i = i; this->d = d; this->s = s; } ~A(){} int getInt() {return i;} double getDouble() {return d;} string getString() {return s;} }; //second imaginativly named dummy class class B { private: int i; double d; string s; A *a; public: B(int i, double d, string s, A *a) { this->i = i; this->d = d; this->s = s; this->a = a; } ~B(){} int getInt() {return i;} double getDouble() {return d;} string getString() {return s;} A* getA() {return a;} }; //Containers for dummy class objects vector<A> a_vec (10); vector<B> b_vec;//100 //Util function, not important.. string int2string(int number) { stringstream ss; ss << number; return ss.str(); } //Example function that returns a new vector containing on B objects //whose A object i attribute is equal to 'id' vector<B> getBbyA(int id) { vector<B> result; for(int i = 0; i < b_vec.size(); i++) { if(b_vec.at(i).getA()->getInt() == id) { result.push_back(b_vec.at(i)); } } return result; } int main(int argc, char** argv) { //Create some A's and B's, each B has an A... //Each of the 10 A's are associated with 10 B's. for(int i = 0; i < 10; ++i) { A a(i, (double)i, int2string(i)); a_vec.at(i) = a; for(int j = 0; j < 10; j++) { B b((i * 10) + j, (double)j, int2string(i), &a_vec.at(i)); b_vec.push_back(b); } } //Got some objects so lets do some aggregation //Call example aggregation function to return all B objects //whose A object has i attribute equal to argv[1] vector<B> result = getBbyA(atoi(argv[1])); //If some B's were found print them, else don't... if(result.size() != 0) { for(int i = 0; i < result.size(); i++) { cout << result.at(i).getInt() << " " << result.at(i).getA()->getInt() << endl; } } else { cout << "No B's had A's with attribute i equal to " << argv[1] << endl; } return 0; } Compile with: g++ -o aggregations aggregations.cpp If you wish :) Instead of implementing a separate aggregation function (i.e. getBbyA() in the example) I'd like to have a single generic aggregation function which accounts for all possible class attribute pairs such that all aggregation requirements are met.. and in the event additional attributes are added later, or additional aggregation requirements, these will automatically be accounted for. So there's a few issues here but the main one I'm seeking insight into is how to map a runtime argument to a class attribute. I hope I've provided enough detail to adequately describe what I'm trying to do...

    Read the article

  • how would you debug this javascript problem?

    - by pedalpete
    I've been travelling and developing for the past few weeks. The site I'm developing was running well. Then, the other day, i connected to a network and the page 'looked' fine, but it turns out the javascript wasn't running. I checked firebug, and there were no errors, as I was suspecting that maybe a script didn't load (I'm using the google api for jQuery and jQuery UI, as well as loading google maps api and fbconnect). I would suspect that if the issue was with one of these pages not loading I would get an error, and yet there was nothing. Thinking maybe i didn't connect properly or something, i reconnected to the network and even restarted my computer, as well as trying to run the local version. I got nothing. The local version not running also hinted to me that it was the loading of an external javascript which caused the problem. I let it pass as something strange with that one network. Unfortunately now I'm 100s of miles away. Today my brother sent me an e-mail that the network he was on at the airport wouldn't load my page. Same issue. Everything is laid out properly, and part of the layout is set in Javascript, so clearly javascript is running. he too got no errors. Of course, he got on his plane, and now he is no longer at the airport. Now the site works on his computer (and i haven't changed anything). How on earth would you go about figuring out what happened in this situation? That is two of maybe 12 or so networks. But I have no idea how i would find a network that doesn't work (and living in a small town, it could be difficult for me to find a network that doesn't work). Any ideas? The site is still in Dev, so I'd rather not post a link just yet (but could in a few days). What I can see not working is the javascript functions which are called on load, and on click. So i do think it is a javascript issue, but no errors. This wouldn't be as HUGE an issue if I could find and sit on one of these networks, but I can't. So what would you do? EDIT ---------------------------------------------------------- the first function(s - their linked) that doesn't get called is below. I've cut the code of at the .ajax call as the call wasn't being made. function getResultsFromForm(){ jQuery('form#filterList input.button').hide(); var searchAddress=jQuery('form#filterList input#searchTxt').val(); if(searchAddress=='' || searchAddress=='<?php echo $searchLocation; ?>'){ mapShow(20, -40, 0, 'areaMap', 2); jQuery('form#filterList input.button').show(); return; } if (GBrowserIsCompatible()) { var geo = new GClientGeocoder(); geo.setBaseCountryCode(cl.address.country); geo.getLocations(searchAddress, function (result) { if(!result.Placemark && searchAddress!='<?php echo $searchLocation; ?>'){ jQuery('span#addressNotFound').text('<?php echo $addressNotFound; ?>').slideDown('slow'); jQuery('form#filterList input.button').show(); } else { jQuery('span#addressNotFound').slideUp('slow').empty(); jQuery('span#headerLocal').text(searchAddress); var date = new Date(); date.setTime(date.getTime() + (8 * 24 * 60 * 60 * 1000)); jQuery.cookie('address', searchAddress, { expires: date}); var accuracy= result.Placemark[0].AddressDetails.Accuracy; var lat = result.Placemark[0].Point.coordinates[1]; var long = result.Placemark[0].Point.coordinates[0]; lat=parseFloat(lat); long=parseFloat(long); var getTab=jQuery('div#tabs div#active').attr('class'); jQuery('div#tabs').show(); loadForecast(lat, long, getTab, 'true', 0); var zoom=zoomLevel(); mapShow(lat, long, accuracy, 'areaMap', zoom ); } }); } } function zoomLevel(){ var zoomarray= new Array(); zoomarray=jQuery('span.viewDist').attr('id'); zoomarray=zoomarray.split("-"); var zoom=zoomarray[1]; if(zoom==''){ zoom=5; } zoom=parseFloat(zoom); return(zoom); } function loadForecast(lat, long, type, loadForecast, page){ jQuery('div#holdForecast').empty(); var date = new Date(); var d = date.getDate(); var day = (d < 10) ? '0' + d : d; var m = date.getMonth() + 1; var month = (m < 10) ? '0' + m : m; var year='2009'; toDate=year+'-'+month+'-'+day; var genre=jQuery('span.genreblock span#updateGenre').html(); var numDays=''; var numResults=''; var range=jQuery('span.viewDist').attr('id'); var dateRange = jQuery('.updateDate').attr('id'); jQuery('div#holdShows ul.showList').html('<li class="show"><div class="showData"><center><img src="../hwImages/loading.gif"/></center></div></li>'); jQuery('div#holdShows ul.'+type+'List').livequery(function(){ jQuery.ajax({ type: "GET", url: "processes/formatShows.php", data: "output=&genre="+genre+"&numResults="+numResults+"&date="+toDate+"&dateRange="+dateRange+"&range="+range+"&lat="+lat+"&long="+long+'&page='+page, success: function(response){ EDIT 2 ----------------------------------------------------------------------------- Please keep in mind that the problem is not that I can't load the site, the site works fine on most connections, but there are times when the site doesn't work, and no errors are thrown, and nothing changes. My brother couldn't run it earlier today while I had no problems, so it was something to do with his location/network. HOWEVER, the page loads, he had a connection, it was his first time visiting the site, so nothing could have been cashed. Same with when I had the issue a few days before. I didn't change anything, and I got to a different network and everything worked fine.

    Read the article

  • C++ Multithreading with pthread is blocking (including sockets)

    - by Sebastian Büttner
    I am trying to implement a multi threaded application with pthread. I did implement a thread class which looks like the following and I call it later twice (or even more), but it seems to block instead of execute the threads parallel. Here is what I got until now: The Thread Class is an abstract class which has the abstract method "exec" which should contain the thread code in a derive class (I did a sample of this, named DerivedThread) Thread.hpp #ifndef THREAD_H_ #define THREAD_H_ #include <pthread.h> class Thread { public: Thread(); void start(); void join(); virtual int exec() = 0; int exit_code(); private: static void* thread_router(void* arg); void exec_thread(); pthread_t pth_; int code_; }; #endif /* THREAD_H_ */ And Thread.cpp #include <iostream> #include "Thread.hpp" /*****************************/ using namespace std; Thread::Thread(): code_(0) { cout << "[Thread] Init" << endl; } void Thread::start() { cout << "[Thread] Created Thread" << endl; pthread_create( &pth_, NULL, Thread::thread_router, reinterpret_cast<void*>(this)); } void Thread::join() { cout << "[Thread] Join Thread" << endl; pthread_join(pth_, NULL); } int Thread::exit_code() { return code_; } void Thread::exec_thread() { cout << "[Thread] Execute" << endl; code_ = exec(); } void* Thread::thread_router(void* arg) { cout << "[Thread] exec_thread function in thread" << endl; reinterpret_cast<Thread*>(arg)->exec_thread(); return NULL; } DerivedThread.hpp #include "Thread.hpp" class DerivedThread : public Thread { public: DerivedThread(); virtual ~DerivedThread(); int exec(); void Close() = 0; DerivedThread.cpp [...] #include "DerivedThread.cpp" [...] int DerivedThread::exec() { //code to be executed do { cout << "Thread executed" << endl; usleep(1000000); } while (true); //dummy, just to let it run for a while } [...] Basically, I am calling this like the here: DerivedThread *thread; cout << "Creating Thread" << endl; thread = new DerivedThread(); cout << "Created thread, starting..." << endl; thread->start(); cout << "Started thread" << endl; cout << "Creating 2nd Thread" << endl; thread = new DerivedThread(); cout << "Created 2nd thread, starting..." << endl; thread->start(); cout << "Started 2nd thread" << endl; What is working great if I am only starting one of these Threads , but if I start multiple which should run together (not synced, only parallel) . But I discovered, that the thread is created, then as it tries to execute it (via start) the problem seems to block until the thread has closed. After that the next Thread is processed. I thought that pthread would do it unblocked for me, so what did I wrong? A sample output might be: Creating Thread [Thread] Thread Init Created thread, starting... [Thread] Created thread [Thread] exec_thread function in thread [Thread] Execute Thread executed Thread executed Thread executed Thread executed Thread executed Thread executed Thread executed .... Until Thread 1 is not terminated, a Thread 2 won't be created not executed. The process above is executed in an other class. Just for the information: I am trying to create a multi threaded server. The concept is like this: MultiThreadedServer Class has a main loop, like this one: ::inet::ServerSock *sock; //just a simple self made wrapper class for sockets DerivedThread *thread; for (;;) { sock = new ::inet::ServerSock(); this->Socket->accept( *sock ); cout << "Creating Thread" << endl; //Threads (according to code sample above) thread = new DerivedThread(sock); //I did not mentoine the parameter before as it was not neccesary, in fact, I pass the socket handle with the connected socket to the thread cout << "Created thread, starting..." << endl; thread->start(); cout << "Started thread" << endl; } So I thought that this would loop over and over and wait for new connections to accept. and when a new client arrives, I am creating a new thread and give the thread the connected socket as a parameter. In the DerivedThread::exec I am doing the handling for the connected client. Like: [...] do { [...] if (this-sock_-read( Buffer, sizeof(PacketStruc) ) 0) { cout << "[Handler_Base] Recv Packet" << endl; //handle the packet } else { Connected = false; } delete Buffer; } while ( Connected ); So I loop in the created thread as long as the client keeps the connection. I think, that the socket may cause the blocking behaviour. Edit: I figured out, that it is not the read() loop in the DerivedThread Class as I simply replaced it with a loop over a simple cout-usleep part. It did also only execute the first one and after first thread finished, the 2nd one was executed. Many thanks and best regards, Sebastian

    Read the article

  • AS3 XML Slideshow Woes

    - by Chris
    Hello All, I am hoping someone out there can possibly shed some light on a couple issues I am having with an image slideshow I have been working on a simple slideshow for a project and everything was going great/as expected up until I created a function to hide the previous images before displaying the next. On the first pass through everything seems to run fine; once the slideshow starts over, the images don't show up, yet when it goes to load the following image, it plays the tween from the hide previous function which shows the image briefly before fading out. So essentially the display is blank, then the timer calls in the next slide and its a brief flash of an image fading out, and then blank again. Another issue I am having is the text that I am loading in from the XML does not seem to want to tween in or out as they are supposed to. Here is the code: import fl.transitions.Tween; import fl.transitions.easing.*; import fl.transitions.TweenEvent; import flash.display.BitmapData; import flash.display.Bitmap; import flash.display.Sprite; import flash.utils.Timer; import flash.events.TimerEvent; var filePath:String = "photo1.xml"; var iArray:Array = new Array(); var titleArray:Array = new Array(); var dateArray:Array = new Array(); var catArray:Array = new Array(); var descArray:Array = new Array(); var rotationSpeed:Number; var totalImages:Number; var dataList:XMLList; var imagesLoaded:Number = 0; var currImage:Number = 0; var rotationTimer:Timer; var oldTween:Tween; var imageContainer:Sprite = new Sprite(); var imageHolder:Sprite = new Sprite(); var titleContainer:Sprite = new Sprite(); var dateContainer:Sprite = new Sprite(); var catContainer:Sprite = new Sprite(); var descContainer:Sprite = new Sprite(); var theMask:Sprite = new Sprite(); theMask.graphics.beginFill(0x000000); theMask.graphics.drawRect(114, 25, 323, 332); addChild(theMask); var loader:URLLoader = new URLLoader(); loader.addEventListener(Event.COMPLETE, onComplete); loader.load(new URLRequest(filePath)); function onComplete (evt:Event):void{ var pigData:XML = new XML(evt.target.data); pigData.ignoreWhitespace; dataList = pigData.pic; totalImages = dataList.length(); rotationSpeed = pigData.@speed * 1000; stage.scaleMode = StageScaleMode.NO_SCALE; loadImages(); rotationTimer = new Timer(rotationSpeed); rotationTimer.addEventListener(TimerEvent.TIMER, rotateImage); rotationTimer.start(); } function loadImages() { for (var i:Number = 0; i < totalImages; i++) { var imageURL:String = dataList[i].big; var imageLoader:Loader = new Loader(); imageLoader.load(new URLRequest(imageURL)); imageLoader.contentLoaderInfo.addEventListener(Event.COMPLETE, imageLoaded); iArray.push(imageLoader); var titleField:TextField = new TextField(); var dateField:TextField = new TextField(); var catField:TextField = new TextField(); var descField:TextField = new TextField(); titleField.text = dataList[i].title; titleField.autoSize = TextFieldAutoSize.LEFT; titleArray.push(titleField); dateField.text = dataList[i].date; dateField.autoSize = TextFieldAutoSize.LEFT; dateArray.push(dateField); catField.text = dataList[i].category; catField.autoSize = TextFieldAutoSize.LEFT; catArray.push(catField); descField.text = dataList[i].description; descField.autoSize = TextFieldAutoSize.LEFT; descArray.push(descField); } } function imageLoaded(evt:Event):void { imagesLoaded++; if (imagesLoaded == totalImages) { showImage(); } } function showImage():void { addChild(imageContainer); imageContainer.addChild(imageHolder); imageContainer.addChild(titleContainer); imageContainer.addChild(dateContainer); imageContainer.addChild(catContainer); imageContainer.addChild(descContainer); changeImage(); } function changeImage():void { var currentImage:Loader = Loader(iArray[currImage]); imageHolder.addChild(currentImage); currentImage.x = (stage.stageWidth - currentImage.width)/2; currentImage.y = (imageHolder.height - currentImage.height)/2; imageHolder.mask = theMask; new Tween(imageHolder, "alpha", Regular.easeOut, 0, 1, 1, true); var titleField:TextField = TextField(titleArray[currImage]); titleContainer.addChild(titleField); titleField.x = 107; titleField.y = 365; var titleTween:Tween = new Tween(titleField, "alpha", Regular.easeOut, 0, 1, 1, true); var dateField:TextField = TextField(dateArray[currImage]); dateContainer.addChild(dateField); dateField.x = 107; dateField.y = 375; var dateTween:Tween = new Tween(dateField, "alpha", Regular.easeOut, 0, 1, 1, true); var catField:TextField = TextField(catArray[currImage]); catContainer.addChild(catField); catField.x = 107; catField.y = 385; var catTween:Tween = new Tween(catField, "alpha", Regular.easeOut, 0, 1, 1, true); var descField:TextField = TextField(descArray[currImage]); descContainer.addChild(descField); descField.x = 107; descField.y = 395; var descTween:Tween = new Tween(descField, "alpha", Regular.easeOut, 0, 1, 1, true); } function rotateImage(evt:TimerEvent) { hidePrev(); currImage++; if (currImage == totalImages) { currImage = 0; } changeImage(); } function hidePrev():void{ var oldImage:Loader = Loader(imageHolder.getChildAt(0)); oldTween = new Tween(oldImage,"alpha", Regular.easeOut, 1, 0, 1, true); oldTween.addEventListener(TweenEvent.MOTION_FINISH, onFadeOut) var oldTitle:TextField = TextField(titleContainer.getChildAt(0)); var oldTitleTween:Tween = new Tween(oldTitle,"alpha", Regular.easeOut, 1, 0, 1, true); var oldDate:TextField = TextField(dateContainer.getChildAt(0)); var oldDateTween:Tween = new Tween(oldDate,"alpha",Regular.easeOut,1,0,1,true); var oldCat:TextField = TextField(catContainer.getChildAt(0)); var oldCatTween:Tween = new Tween(oldCat,"alpha",Regular.easeOut,1,0,1,true); var oldDesc:TextField = TextField(descContainer.getChildAt(0)); var oldDescTween:Tween = new Tween(oldDesc,"alpha",Regular.easeOut,1,0,1,true); } function onFadeOut(evt:TweenEvent):void{ imageHolder.removeChildAt(0); titleContainer.removeChildAt(0); dateContainer.removeChildAt(0); catContainer.removeChildAt(0); descContainer.removeChildAt(0); } I'm no flash whiz, bet can generally figure most issues out by either analyzing the code, or digging around online; however, this has me stumped. Any help would really be appreciated, and I thank you all in advance.

    Read the article

  • Codeigniter: Using URIs with forms

    - by Kevin Brown
    I'm using URIs to direct a function in a library: $id = $this->CI->session->userdata('id'); $URI = $this->CI->uri->uri_string(); $new = "new"; if(strpos($URI, $new) === FALSE){ $method = "update"; } elseif(strpos($URI, $new) !== FALSE){ $method = "create"; } So I have two if statements directing what information to if ($method === 'update') { // Modify form, first load $this->CI->db->from('be_survey'); $this->CI->db->where('user_id' , $id); $survey = $this->CI->db->get(); $user = array_merge($user->row_array(),$survey->row_array()); $this->CI->validation->set_default_value($user); // Display page $data['user'] = $user; } $this->CI->validation->set_rules($rules); if ( $this->CI->validation->run() === FALSE ) { // Output any errors $this->CI->validation->output_errors(); } else { // Submit form $this->_submit($method); } Submit function: function _submit($method) { //Submit and Update for current User $id = $this->CI->session->userdata('id'); $this->CI->db->select('users.id, users.username, users.email, profiles.firstname, profiles.manager_id'); $this->CI->db->from('be_users' . " users"); $this->CI->db->join('be_user_profiles' . " profiles",'users.id=profiles.user_id'); $this->CI->db->having('id', $id); $email_data['user'] = $this->CI->db->get(); $email_data['user'] = $email_data['user']->row(); $manager_id = $email_data['user']->manager_id; $this->CI->db->select('firstname','email')->from('be_user_profiles')->where('user_id', $manager_id); $email_data['manager'] = $this->CI->db->get(); $email_data['manager'] = $email_data['manager']->row(); // Fetch what they entered in the form for($i=1;$i<18;$i++){ $survey["a_".$i]= $this->CI->input->post('a_'.$i); } for($i=1;$i<15;$i++){ $survey["b_".$i]= $this->CI->input->post('b_'.$i); } for($i=1;$i<12;$i++){ $survey["c_".$i]= $this->CI->input->post('c_'.$i); } $profile['firstname'] = $this->CI->input->post('firstname'); $profile['lastname'] = $this->CI->input->post('lastname'); $profile['test_date'] = date ("Y-m-d H:i:s"); $profile['company_name'] = $this->CI->input->post('company_name'); $profile['company_address'] = $this->CI->input->post('company_address'); $profile['company_city'] = $this->CI->input->post('company_city'); $profile['company_phone'] = $this->CI->input->post('company_phone'); $profile['company_state'] = $this->CI->input->post('company_state'); $profile['company_zip'] = $this->CI->input->post('company_zip'); $profile['job_title'] = $this->CI->input->post('job_title'); $profile['job_type'] = $this->CI->input->post('job_type'); $profile['job_time'] = $this->CI->input->post('job_time'); $profile['department'] = $this->CI->input->post('department'); $profile['vision'] = $this->CI->input->post('vision'); $profile['height'] = $this->CI->input->post('height'); $profile['weight'] = $this->CI->input->post('weight'); $profile['hand_dominance'] = $this->CI->input->post('hand_dominance'); $profile['areas_of_fatigue'] = $this->CI->input->post('areas_of_fatigue'); $profile['job_description'] = $this->CI->input->post('job_description'); $profile['injury_review'] = $this->CI->input->post('injury_review'); $profile['job_positive'] = $this->CI->input->post('job_positive'); $profile['risk_factors'] = $this->CI->input->post('risk_factors'); $profile['job_improvement_short'] = $this->CI->input->post('job_improvement_short'); $profile['job_improvement_long'] = $this->CI->input->post('job_improvement_long'); if ($method == "update") { //Begin db transmission $this->CI->db->trans_begin(); $this->CI->home_model->update('Survey',$survey, array('user_id' => $id)); $this->CI->db->update('be_user_profiles',$profile, array('user_id' => $id)); if ($this->CI->db->trans_status() === FALSE) { flashMsg('error','There was a problem entering your test! Please contact an administrator.'); redirect('survey','location'); } else { //Get credits of user and subtract 1 $this->CI->db->set('credits', 'credits -1', FALSE); $this->CI->db->update('be_user_profiles',$profile, array('user_id' => $manager_id)); //Mark the form completed. $this->CI->db->set('test_complete', '1'); $this->CI->db->where('user_id', $id)->update('be_user_profiles'); // Stuff worked... $this->CI->db->trans_commit(); //Get Manager Information $this->CI->db->select('users.id, users.username, users.email, profiles.firstname'); $this->CI->db->from('be_users' . " users"); $this->CI->db->join('be_user_profiles' . " profiles",'users.id=profiles.user_id'); $this->CI->db->having('id', $email_data['user']->manager_id); $email_data['manager'] = $this->CI->db->get(); $email_data['manager'] = $email_data['manager']->row(); //Email User $this->CI->load->library('User_email'); $data_user = array( 'firstname'=>$email_data['user']->firstname, 'email'=> $email_data['user']->email, 'user_completed'=>$email_data['user']->firstname, 'site_name'=>$this->CI->preference->item('site_name'), 'site_url'=>base_url() ); //Email Manager $data_manager = array( 'firstname'=>$email_data['manager']->firstname, 'email'=> $email_data['manager']->email, 'user_completed'=>$email_data['user']->firstname, 'site_name'=>$this->CI->preference->item('site_name'), 'site_url'=>base_url() ); $this->CI->user_email->send($email_data['manager']->email,'Completed the Assessment Tool','public/email_manager_complete',$data_manager); $this->CI->user_email->send($email_data['user']->email,'Completed the Assessment Tool','public/email_user_complete',$data_user); flashMsg('success','You finished the assessment successfully!'); redirect('home','location'); } } //Create New User elseif ($method == "create") { // Build $profile['user_id'] = $id; $profile['manager_id'] = $manager_id; $profile['test_complete'] = '1'; $survey['user_id'] = $id; $this->CI->db->trans_begin(); // Add user_profile details to DB $this->CI->db->insert('be_user_profiles',$profile); $this->CI->db->insert('be_survey',$survey); if ($this->CI->db->trans_status() === FALSE) { // Registration failed $this->CI->db->trans_rollback(); flashMsg('error',$this->CI->lang->line('userlib_registration_failed')); redirect('auth/register','location'); } else { // User registered $this->CI->db->trans_commit(); flashMsg('success',$this->CI->lang->line('userlib_registration_success')); redirect($this->CI->config->item('userlib_action_register'),'location'); } } } The submit function is similar, updating the db if $method == "update", and inserting if the method == "create". The problem is, when the form is submitted, it doesn't take into account the url b/c the form submits to the function "survey", which passes data to the lib function, so things are always updated, never created. *How can I pass $method to the _submit() function correctly?!*

    Read the article

  • Casting an object using 'as' returns null: myObject = newObject as MyObject; // null

    - by John Russell
    I am trying to create a custom object in AS3 to pass information to and from a server, which in this case will be Red5. In the below screenshots you will see that I am able to send a request for an object from as3, and receive it successfully from the java server. However, when I try to cast the received object to my defined objectType using 'as', it takes the value of null. It is my understanding that that when using "as" your checking to see if your variable is a member of the specified data type. If the variable is not, then null will be returned. This screenshot illustrates that I am have successfully received my object 'o' from red5 and I am just about to cast it to the (supposedly) identical datatype testObject of LobbyData: However, when testObject = o as LobbyData; runs, it returns null. :( Below you will see my specifications both on the java server and the as3 client. I am confident that both objects are identical in every way, but for some reason flash does not think so. I have been pulling my hair out for a long time, does anyone have any thoughts? AS3 Object: import flash.utils.IDataInput; import flash.utils.IDataOutput; import flash.utils.IExternalizable; import flash.net.registerClassAlias; [Bindable] [RemoteClass(alias = "myLobbyData.LobbyData")] public class LobbyData implements IExternalizable { private var sent:int; // java sentinel private var u:String; // red5 username private var sen:int; // another sentinel? private var ui:int; // fb uid private var fn:String; // fb name private var pic:String; // fb pic private var inb:Boolean; // is in the table? private var t:int; // table number private var s:int; // seat number public function setSent(sent:int):void { this.sent = sent; } public function getSent():int { return sent; } public function setU(u:String):void { this.u = u; } public function getU():String { return u; } public function setSen(sen:int):void { this.sen = sen; } public function getSen():int { return sen; } public function setUi(ui:int):void { this.ui = ui; } public function getUi():int { return ui; } public function setFn(fn:String):void { this.fn = fn; } public function getFn():String { return fn; } public function setPic(pic:String):void { this.pic = pic; } public function getPic():String { return pic; } public function setInb(inb:Boolean):void { this.inb = inb; } public function getInb():Boolean { return inb; } public function setT(t:int):void { this.t = t; } public function getT():int { return t; } public function setS(s:int):void { this.s = s; } public function getS():int { return s; } public function readExternal(input:IDataInput):void { sent = input.readInt(); u = input.readUTF(); sen = input.readInt(); ui = input.readInt(); fn = input.readUTF(); pic = input.readUTF(); inb = input.readBoolean(); t = input.readInt(); s = input.readInt(); } public function writeExternal(output:IDataOutput):void { output.writeInt(sent); output.writeUTF(u); output.writeInt(sen); output.writeInt(ui); output.writeUTF(fn); output.writeUTF(pic); output.writeBoolean(inb); output.writeInt(t); output.writeInt(s); } } Java Object: package myLobbyData; import org.red5.io.amf3.IDataInput; import org.red5.io.amf3.IDataOutput; import org.red5.io.amf3.IExternalizable; public class LobbyData implements IExternalizable { private static final long serialVersionUID = 115280920; private int sent; // java sentinel private String u; // red5 username private int sen; // another sentinel? private int ui; // fb uid private String fn; // fb name private String pic; // fb pic private Boolean inb; // is in the table? private int t; // table number private int s; // seat number public void setSent(int sent) { this.sent = sent; } public int getSent() { return sent; } public void setU(String u) { this.u = u; } public String getU() { return u; } public void setSen(int sen) { this.sen = sen; } public int getSen() { return sen; } public void setUi(int ui) { this.ui = ui; } public int getUi() { return ui; } public void setFn(String fn) { this.fn = fn; } public String getFn() { return fn; } public void setPic(String pic) { this.pic = pic; } public String getPic() { return pic; } public void setInb(Boolean inb) { this.inb = inb; } public Boolean getInb() { return inb; } public void setT(int t) { this.t = t; } public int getT() { return t; } public void setS(int s) { this.s = s; } public int getS() { return s; } @Override public void readExternal(IDataInput input) { sent = input.readInt(); u = input.readUTF(); sen = input.readInt(); ui = input.readInt(); fn = input.readUTF(); pic = input.readUTF(); inb = input.readBoolean(); t = input.readInt(); s = input.readInt(); } @Override public void writeExternal(IDataOutput output) { output.writeInt(sent); output.writeUTF(u); output.writeInt(sen); output.writeInt(ui); output.writeUTF(fn); output.writeUTF(pic); output.writeBoolean(inb); output.writeInt(t); output.writeInt(s); } } AS3 Client: public function refreshRoom(event:Event) { var resp:Responder=new Responder(handleResp,null); ncLobby.call("getLobbyData", resp, null); } public function handleResp(o:Object):void { var testObject:LobbyData=new LobbyData; testObject = o as LobbyData; trace(testObject); } Java Client public LobbyData getLobbyData(String param) { LobbyData lobbyData1 = new LobbyData(); lobbyData1.setSent(5); lobbyData1.setU("lawlcats"); lobbyData1.setSen(5); lobbyData1.setUi(5); lobbyData1.setFn("lulz"); lobbyData1.setPic("lulzagain"); lobbyData1.setInb(true); lobbyData1.setT(5); lobbyData1.setS(5); return lobbyData1; }

    Read the article

  • Choose variable based on users choice from picklist.

    - by Kelbizzle
    I have this form. Basically what I want is to send a auto-response with a different URL based on what the user picks in the "attn" picklist. I've been thinking I could have a different variable for each drop down value. It will then pass this variable on to the mail script that will choose which URL to insert inside the auto response that is sent. It gives me a headache thinking about it sometimes. What's the easiest way to accomplish this? Am I making more work for myself? I really don't know because I'm no programmer. Thanks in advance! Here is the form: <form name="contact_form" method="post" action="sendemail_reports.php" onsubmit="return validate_form ( );"> <div id='zohoWebToLead' align=center> <table width="100%" border="0" align="center" cellpadding="0" cellspacing="0" class="txt_body"> <table border=0 cellspacing=0 cellpadding=5 width=480 style='border-bottom-color: #999999; border-top-color: #999999; border-bottom-style: none; border-top-style: none; border-bottom-width: 1px; border-top-width: 2px; background-color:transparent;'> <tr> <td width='75%'><table width=480 border=0 cellpadding=5 cellspacing=0 style='border-bottom-color: #999999; border-top-color: #999999; border-bottom-style: none; border-top-style: none; border-bottom-width: 1px; border-top-width: 2px; background-color:transparent;'> <input type="hidden" name="ip" value="'.$ipi.'" /> <input type="hidden" name="httpref" value="'.$httprefi.'" /> <input type="hidden" name="httpagent" value="'.$httpagenti.'" /> <tr></tr> <tr> <td colspan='2' align='left' style='border-bottom-color: #dadada; border-bottom-style: none; border-bottom-width: 2px; color:#000000; font-family:sans-serif; font-size:14px;'><strong>Send us an Email</strong></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> First Name   : </td> <td width='75%'><input name='visitorf' type='text' size="48" maxlength='40' /></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'>Last Name   :</td> <td width='75%'><input name='visitorfl' type='text' size="48" maxlength='80' /></td> </tr> <tr> <td nowrap style= 'font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Email Adress  : </td> <td width='75%'><input name='visitormail' type='text' size="48" maxlength='100' /></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Phone   : </td> <td width='75%'><input name='visitorphone' type='text' size="48" maxlength='30' /></td> </tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Subject   : </td> <td width='75%'><select name="attn" size="1"> <option value=" Investment Opportunities ">Investment Opportunities </option> <option value=" Vacation Rentals ">Vacation Rentals </option> <option value=" Real Estate Offerings ">Real Estate Offerings </option> <option value=" Gatherings ">Gatherings </option> <option value=" General ">General </option> </select></td> <tr> <td nowrap style= 'font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Message   :<br /> <em>(max 5000 char)</em></td> <td width='75%'><textarea name='notes' maxlength='5000' cols="48" rows="3"></textarea></td> </tr> <tr> <td colspan=2 align=center style=''><input name='save' type='submit' class="boton" value=Send mail /> &nbsp; &nbsp; <input type='reset' name='reset' value=Reset class="boton" /></td> </tr> </table></td> </tr> </table> </div> </form> Here is the mail script: <?php //the 3 variables below were changed to use the SERVER variable $ip = $_SERVER['REMOTE_ADDR']; $httpref = $_SERVER['HTTP_REFERER']; $httpagent = $_SERVER['HTTP_USER_AGENT']; $visitorf = $_POST['visitorf']; $visitorl = $_POST['visitorl']; $visitormail = $_POST['visitormail']; $visitorphone = $_POST['visitorphone']; $notes = $_POST['notes']; $attn = $_POST['attn']; //additional headers $headers = 'From: Me <[email protected]>' . "\n" ; $headers = 'Bcc: [email protected]' . "\n"; if (eregi('http:', $notes)) { die ("Do NOT try that! ! "); } if(!$visitormail == "" && (!strstr($visitormail,"@") || !strstr($visitormail,"."))) { echo "<h2>Use Back - Enter valid e-mail</h2>\n"; $badinput = "<h2>Feedback was NOT submitted</h2>\n"; echo $badinput; die ("Go back! ! "); } if(empty($visitorf) || empty($visitormail) || empty($notes )) { echo "<h2>Use Back - fill in all fields</h2>\n"; die ("Use back! ! "); } $todayis = date("l, F j, Y, g:i a") ; $subject = "I want to download the report about $attn"; $notes = stripcslashes($notes); $message = "$todayis [EST] \nAttention: $attn \nMessage: $notes \nFrom: $visitorf $visitorl ($visitormail) \nTelephone Number: $visitorphone \nAdditional Info : IP = $ip \nBrowser Info: $httpagent \nReferral : $httpref\n"; //check if the function even exists if(function_exists("mail")) { //send the email mail($_SESSION['email'], $subject, $message, $headers) or die("could not send email"); } else { die("mail function not enabled"); } header( "Location: http://www.domain.com/thanks.php" ); ?>

    Read the article

  • Dealing with external processes

    - by Jesse Aldridge
    I've been working on a gui app that needs to manage external processes. Working with external processes leads to a lot of issues that can make a programmer's life difficult. I feel like maintenence on this app is taking an unacceptably long time. I've been trying to list the things that make working with external processes difficult so that I can come up with ways of mitigating the pain. This kind of turned into a rant which I thought I'd post here in order to get some feedback and to provide some guidance to anybody thinking about sailing into these very murky waters. Here's what I've got so far: Output from the child can get mixed up with output from the parent. This can make both outputs misleading and hard to read. It can be hard to tell what came from where. It becomes harder to figure out what's going on when things are asynchronous. Here's a contrived example: import textwrap, os, time from subprocess import Popen test_path = 'test_file.py' with open(test_path, 'w') as file: file.write(textwrap.dedent(''' import time for i in range(3): print 'Hello %i' % i time.sleep(1)''')) proc = Popen('python -B "%s"' % test_path) for i in range(3): print 'Hello %i' % i time.sleep(1) os.remove(test_path) I guess I could have the child process write its output to a file. But it can be annoying to have to open up a file every time I want to see the result of a print statement. If I have code for the child process I could add a label, something like print 'child: Hello %i', but it can be annoying to do that for every print. And it adds some noise to the output. And of course I can't do it if I don't have access to the code. I could manually manage the process output. But then you open up a huge can of worms with threads and polling and stuff like that. A simple solution is to treat processes like synchronous functions, that is, no further code executes until the process completes. In other words, make the process block. But that doesn't work if you're building a gui app. Which brings me to the next problem... Blocking processes cause the gui to become unresponsive. import textwrap, sys, os from subprocess import Popen from PyQt4.QtGui import * from PyQt4.QtCore import * test_path = 'test_file.py' with open(test_path, 'w') as file: file.write(textwrap.dedent(''' import time for i in range(3): print 'Hello %i' % i time.sleep(1)''')) app = QApplication(sys.argv) button = QPushButton('Launch process') def launch_proc(): # Can't move the window until process completes proc = Popen('python -B "%s"' % test_path) proc.communicate() button.connect(button, SIGNAL('clicked()'), launch_proc) button.show() app.exec_() os.remove(test_path) Qt provides a process wrapper of its own called QProcess which can help with this. You can connect functions to signals to capture output relatively easily. This is what I'm currently using. But I'm finding that all these signals behave suspiciously like goto statements and can lead to spaghetti code. I think I want to get sort-of blocking behavior by having the 'finished' signal from QProcess call a function containing all the code that comes after the process call. I think that should work but I'm still a bit fuzzy on the details... Stack traces get interrupted when you go from the child process back to the parent process. If a normal function screws up, you get a nice complete stack trace with filenames and line numbers. If a subprocess screws up, you'll be lucky if you get any output at all. You end up having to do a lot more detective work everytime something goes wrong. Speaking of which, output has a way of disappearing when dealing external processes. Like if you run something via the windows 'cmd' command, the console will pop up, execute the code, and then disappear before you have a chance to see the output. You have to pass the /k flag to make it stick around. Similar issues seem to crop up all the time. I suppose both problems 3 and 4 have the same root cause: no exception handling. Exception handling is meant to be used with functions, it doesn't work with processes. Maybe there's some way to get something like exception handling for processes? I guess that's what stderr is for? But dealing with two different streams can be annoying in itself. Maybe I should look into this more... Processes can hang and stick around in the background without you realizing it. So you end up yelling at your computer cuz it's going so slow until you finally bring up your task manager and see 30 instances of the same process hanging out in the background. Also, hanging background processes can interefere with other instances of the process in various fun ways, such as causing permissions errors by holding a handle to a file or someting like that. It seems like an easy solution to this would be to have the parent process kill the child process on exit if the child process didn't close itself. But if the parent process crashes, cleanup code might not get called and the child can be left hanging. Also, if the parent waits for the child to complete, and the child is in an infinite loop or something, you can end up with two hanging processes. This problem can tie in to problem 2 for extra fun, causing your gui to stop responding entirely and force you to kill everything with the task manager. F***ing quotes Parameters often need to be passed to processes. This is a headache in itself. Especially if you're dealing with file paths. Say... 'C:/My Documents/whatever/'. If you don't have quotes, the string will often be split at the space and interpreted as two arguments. If you need nested quotes you can use ' and ". But if you need to use more than two layers of quotes, you have to do some nasty escaping, for example: "cmd /k 'python \'path 1\' \'path 2\''". A good solution to this problem is passing parameters as a list rather than as a single string. Subprocess allows you to do this. Can't easily return data from a subprocess. You can use stdout of course. But what if you want to throw a print in there for debugging purposes? That's gonna screw up the parent if it's expecting output formatted a certain way. In functions you can print one string and return another and everything works just fine. Obscure command-line flags and a crappy terminal based help system. These are problems I often run into when using os level apps. Like the /k flag I mentioned, for holding a cmd window open, who's idea was that? Unix apps don't tend to be much friendlier in this regard. Hopefully you can use google or StackOverflow to find the answer you need. But if not, you've got a lot of boring reading and frusterating trial and error to do. External factors. This one's kind of fuzzy. But when you leave the relatively sheltered harbor of your own scripts to deal with external processes you find yourself having to deal with the "outside world" to a much greater extent. And that's a scary place. All sorts of things can go wrong. Just to give a random example: the cwd in which a process is run can modify it's behavior. There are probably other issues, but those are the ones I've written down so far. Any other snags you'd like to add? Any suggestions for dealing with these problems?

    Read the article

  • Odd behavior when recursively building a return type for variadic functions

    - by Dennis Zickefoose
    This is probably going to be a really simple explanation, but I'm going to give as much backstory as possible in case I'm wrong. Advanced apologies for being so verbose. I'm using gcc4.5, and I realize the c++0x support is still somewhat experimental, but I'm going to act on the assumption that there's a non-bug related reason for the behavior I'm seeing. I'm experimenting with variadic function templates. The end goal was to build a cons-list out of std::pair. It wasn't meant to be a custom type, just a string of pair objects. The function that constructs the list would have to be in some way recursive, with the ultimate return value being dependent on the result of the recursive calls. As an added twist, successive parameters are added together before being inserted into the list. So if I pass [1, 2, 3, 4, 5, 6] the end result should be {1+2, {3+4, 5+6}}. My initial attempt was fairly naive. A function, Build, with two overloads. One took two identical parameters and simply returned their sum. The other took two parameters and a parameter pack. The return value was a pair consisting of the sum of the two set parameters, and the recursive call. In retrospect, this was obviously a flawed strategy, because the function isn't declared when I try to figure out its return type, so it has no choice but to resolve to the non-recursive version. That I understand. Where I got confused was the second iteration. I decided to make those functions static members of a template class. The function calls themselves are not parameterized, but instead the entire class is. My assumption was that when the recursive function attempts to generate its return type, it would instantiate a whole new version of the structure with its own static function, and everything would work itself out. The result was: "error: no matching function for call to BuildStruct<double, double, char, char>::Go(const char&, const char&)" The offending code: static auto Go(const Type& t0, const Type& t1, const Types&... rest) -> std::pair<Type, decltype(BuildStruct<Types...>::Go(rest...))> My confusion comes from the fact that the parameters to BuildStruct should always be the same types as the arguments sent to BuildStruct::Go, but in the error code Go is missing the initial two double parameters. What am I missing here? If my initial assumption about how the static functions would be chosen was incorrect, why is it trying to call the wrong function rather than just not finding a function at all? It seems to just be mixing types willy-nilly, and I just can't come up with an explanation as to why. If I add additional parameters to the initial call, it always burrows down to that last step before failing, so presumably the recursion itself is at least partially working. This is in direct contrast to the initial attempt, which always failed to find a function call right away. Ultimately, I've gotten past the problem, with a fairly elegant solution that hardly resembles either of the first two attempts. So I know how to do what I want to do. I'm looking for an explanation for the failure I saw. Full code to follow since I'm sure my verbal description was insufficient. First some boilerplate, if you feel compelled to execute the code and see it for yourself. Then the initial attempt, which failed reasonably, then the second attempt, which did not. #include <iostream> using std::cout; using std::endl; #include <utility> template<typename T1, typename T2> std::ostream& operator <<(std::ostream& str, const std::pair<T1, T2>& p) { return str << "[" << p.first << ", " << p.second << "]"; } //Insert code here int main() { Execute(5, 6, 4.3, 2.2, 'c', 'd'); Execute(5, 6, 4.3, 2.2); Execute(5, 6); return 0; } Non-struct solution: template<typename Type> Type BuildFunction(const Type& t0, const Type& t1) { return t0 + t1; } template<typename Type, typename... Rest> auto BuildFunction(const Type& t0, const Type& t1, const Rest&... rest) -> std::pair<Type, decltype(BuildFunction(rest...))> { return std::pair<Type, decltype(BuildFunction(rest...))> (t0 + t1, BuildFunction(rest...)); } template<typename... Types> void Execute(const Types&... t) { cout << BuildFunction(t...) << endl; } Resulting errors: test.cpp: In function 'void Execute(const Types& ...) [with Types = {int, int, double, double, char, char}]': test.cpp:33:35: instantiated from here test.cpp:28:3: error: no matching function for call to 'BuildFunction(const int&, const int&, const double&, const double&, const char&, const char&)' Struct solution: template<typename... Types> struct BuildStruct; template<typename Type> struct BuildStruct<Type, Type> { static Type Go(const Type& t0, const Type& t1) { return t0 + t1; } }; template<typename Type, typename... Types> struct BuildStruct<Type, Type, Types...> { static auto Go(const Type& t0, const Type& t1, const Types&... rest) -> std::pair<Type, decltype(BuildStruct<Types...>::Go(rest...))> { return std::pair<Type, decltype(BuildStruct<Types...>::Go(rest...))> (t0 + t1, BuildStruct<Types...>::Go(rest...)); } }; template<typename... Types> void Execute(const Types&... t) { cout << BuildStruct<Types...>::Go(t...) << endl; } Resulting errors: test.cpp: In instantiation of 'BuildStruct<int, int, double, double, char, char>': test.cpp:33:3: instantiated from 'void Execute(const Types& ...) [with Types = {int, int, double, double, char, char}]' test.cpp:38:41: instantiated from here test.cpp:24:15: error: no matching function for call to 'BuildStruct<double, double, char, char>::Go(const char&, const char&)' test.cpp:24:15: note: candidate is: static std::pair<Type, decltype (BuildStruct<Types ...>::Go(BuildStruct<Type, Type, Types ...>::Go::rest ...))> BuildStruct<Type, Type, Types ...>::Go(const Type&, const Type&, const Types& ...) [with Type = double, Types = {char, char}, decltype (BuildStruct<Types ...>::Go(BuildStruct<Type, Type, Types ...>::Go::rest ...)) = char] test.cpp: In function 'void Execute(const Types& ...) [with Types = {int, int, double, double, char, char}]': test.cpp:38:41: instantiated from here test.cpp:33:3: error: 'Go' is not a member of 'BuildStruct<int, int, double, double, char, char>'

    Read the article

  • Implementing a robust async stream reader for a console

    - by Jon
    I recently provided an answer to this question: C# - Realtime console output redirection. As often happens, explaining stuff (here "stuff" was how I tackled a similar problem) leads you to greater understanding and/or, as is the case here, "oops" moments. I realized that my solution, as implemented, has a bug. The bug has little practical importance, but it has an extremely large importance to me as a developer: I can't rest easy knowing that my code has the potential to blow up. Squashing the bug is the purpose of this question. I apologize for the long intro, so let's get dirty. I wanted to build a class that allows me to receive input from a Stream in an event-based manner. The stream, in my scenario, is guaranteed to be a FileStream and there is also an associated StreamReader already present to leverage. The public interface of the class is this: public class MyStreamManager { public event EventHandler<ConsoleOutputReadEventArgs> StandardOutputRead; public void StartSendingEvents(); public void StopSendingEvents(); } Obviously this specific scenario has to do with a console's standard output. StartSendingEvents and StopSendingEvents do what they advertise; for the purposes of this discussion, we can assume that events are always being sent without loss of generality. The class uses these two fields internally: protected readonly StringBuilder inputAccumulator = new StringBuilder(); protected readonly byte[] buffer = new byte[256]; The functionality of the class is implemented in the methods below. To get the ball rolling: public void StartSendingEvents(); { this.stopAutomation = false; this.BeginReadAsync(); } To read data out of the Stream without blocking, and also without requiring a carriage return char, BeginRead is called: protected void BeginReadAsync() { if (!this.stopAutomation) { this.StandardOutput.BaseStream.BeginRead( this.buffer, 0, this.buffer.Length, this.ReadHappened, null); } } The challenging part: BeginRead requires using a buffer. This means that when reading from the stream, it is possible that the bytes available to read ("incoming chunk") are larger than the buffer. Since we are only handing off data from the stream to a consumer, and that consumer may well have inside knowledge about the size and/or format of these chunks, I want to call event subscribers exactly once for each chunk. Otherwise the abstraction breaks down and the subscribers have to buffer the incoming data and reconstruct the chunks themselves using said knowledge. This is much less convenient to the calling code, and detracts from the usefulness of my class. Edit: There are comments below correctly stating that since the data is coming from a stream, there is absolutely nothing that the receiver can infer about the structure of the data unless it is fully prepared to parse it. What I am trying to do here is leverage the "flush the output" "structure" that the owner of the console imparts while writing on it. I am prepared to assume (better: allow my caller to have the option to assume) that the OS will pass me the data written between two flushes of the stream in exactly one piece. To this end, if the buffer is full after EndRead, we don't send its contents to subscribers immediately but instead append them to a StringBuilder. The contents of the StringBuilder are only sent back whenever there is no more to read from the stream (thus preserving the chunks). private void ReadHappened(IAsyncResult asyncResult) { var bytesRead = this.StandardOutput.BaseStream.EndRead(asyncResult); if (bytesRead == 0) { this.OnAutomationStopped(); return; } var input = this.StandardOutput.CurrentEncoding.GetString( this.buffer, 0, bytesRead); this.inputAccumulator.Append(input); if (bytesRead < this.buffer.Length) { this.OnInputRead(); // only send back if we 're sure we got it all } this.BeginReadAsync(); // continue "looping" with BeginRead } After any read which is not enough to fill the buffer, all accumulated data is sent to the subscribers: private void OnInputRead() { var handler = this.StandardOutputRead; if (handler == null) { return; } handler(this, new ConsoleOutputReadEventArgs(this.inputAccumulator.ToString())); this.inputAccumulator.Clear(); } (I know that as long as there are no subscribers the data gets accumulated forever. This is a deliberate decision). The good This scheme works almost perfectly: Async functionality without spawning any threads Very convenient to the calling code (just subscribe to an event) Maintains the "chunkiness" of the data; this allows the calling code to use inside knowledge of the data without doing any extra work Is almost agnostic to the buffer size (it will work correctly with any size buffer irrespective of the data being read) The bad That last almost is a very big one. Consider what happens when there is an incoming chunk with length exactly equal to the size of the buffer. The chunk will be read and buffered, but the event will not be triggered. This will be followed up by a BeginRead that expects to find more data belonging to the current chunk in order to send it back all in one piece, but... there will be no more data in the stream. In fact, as long as data is put into the stream in chunks with length exactly equal to the buffer size, the data will be buffered and the event will never be triggered. This scenario may be highly unlikely to occur in practice, especially since we can pick any number for the buffer size, but the problem is there. Solution? Unfortunately, after checking the available methods on FileStream and StreamReader, I can't find anything which lets me peek into the stream while also allowing async methods to be used on it. One "solution" would be to have a thread wait on a ManualResetEvent after the "buffer filled" condition is detected. If the event is not signaled (by the async callback) in a small amount of time, then more data from the stream will not be forthcoming and the data accumulated so far should be sent to subscribers. However, this introduces the need for another thread, requires thread synchronization, and is plain inelegant. Specifying a timeout for BeginRead would also suffice (call back into my code every now and then so I can check if there's data to be sent back; most of the time there will not be anything to do, so I expect the performance hit to be negligible). But it looks like timeouts are not supported in FileStream. Since I imagine that async calls with timeouts are an option in bare Win32, another approach might be to PInvoke the hell out of the problem. But this is also undesirable as it will introduce complexity and simply be a pain to code. Is there an elegant way to get around the problem? Thanks for being patient enough to read all of this.

    Read the article

  • Warning: Cannot modify header information - headers already sent

    - by Pankaj Khurana
    Hi, I am using code available on http://www.forosdelweb.com/f18/zip-lib-php-archivo-zip-vacio-431133/ for creating zip file. First file-zip.lib.php <?php /* $Id: zip.lib.php,v 1.1 2004/02/14 15:21:18 anoncvs_tusedb Exp $ */ // vim: expandtab sw=4 ts=4 sts=4: /** * Zip file creation class. * Makes zip files. * * Last Modification and Extension By : * * Hasin Hayder * HomePage : www.hasinme.info * Email : [email protected] * IDE : PHP Designer 2005 * * * Originally Based on : * * http://www.zend.com/codex.php?id=535&single=1 * By Eric Mueller <[email protected]> * * http://www.zend.com/codex.php?id=470&single=1 * by Denis125 <[email protected]> * * a patch from Peter Listiak <[email protected]> for last modified * date and time of the compressed file * * Official ZIP file format: http://www.pkware.com/appnote.txt * * @access public */ class zipfile { /** * Array to store compressed data * * @var array $datasec */ var $datasec = array(); /** * Central directory * * @var array $ctrl_dir */ var $ctrl_dir = array(); /** * End of central directory record * * @var string $eof_ctrl_dir */ var $eof_ctrl_dir = "\x50\x4b\x05\x06\x00\x00\x00\x00"; /** * Last offset position * * @var integer $old_offset */ var $old_offset = 0; /** * Converts an Unix timestamp to a four byte DOS date and time format (date * in high two bytes, time in low two bytes allowing magnitude comparison). * * @param integer the current Unix timestamp * * @return integer the current date in a four byte DOS format * * @access private */ function unix2DosTime($unixtime = 0) { $timearray = ($unixtime == 0) ? getdate() : getdate($unixtime); if ($timearray['year'] < 1980) { $timearray['year'] = 1980; $timearray['mon'] = 1; $timearray['mday'] = 1; $timearray['hours'] = 0; $timearray['minutes'] = 0; $timearray['seconds'] = 0; } // end if return (($timearray['year'] - 1980) << 25) | ($timearray['mon'] << 21) | ($timearray['mday'] << 16) | ($timearray['hours'] << 11) | ($timearray['minutes'] << 5) | ($timearray['seconds'] >> 1); } // end of the 'unix2DosTime()' method /** * Adds "file" to archive * * @param string file contents * @param string name of the file in the archive (may contains the path) * @param integer the current timestamp * * @access public */ function addFile($data, $name, $time = 0) { $name = str_replace('', '/', $name); $dtime = dechex($this->unix2DosTime($time)); $hexdtime = 'x' . $dtime[6] . $dtime[7] . 'x' . $dtime[4] . $dtime[5] . 'x' . $dtime[2] . $dtime[3] . 'x' . $dtime[0] . $dtime[1]; eval('$hexdtime = "' . $hexdtime . '";'); $fr = "\x50\x4b\x03\x04"; $fr .= "\x14\x00"; // ver needed to extract $fr .= "\x00\x00"; // gen purpose bit flag $fr .= "\x08\x00"; // compression method $fr .= $hexdtime; // last mod time and date // "local file header" segment $unc_len = strlen($data); $crc = crc32($data); $zdata = gzcompress($data); $zdata = substr(substr($zdata, 0, strlen($zdata) - 4), 2); // fix crc bug $c_len = strlen($zdata); $fr .= pack('V', $crc); // crc32 $fr .= pack('V', $c_len); // compressed filesize $fr .= pack('V', $unc_len); // uncompressed filesize $fr .= pack('v', strlen($name)); // length of filename $fr .= pack('v', 0); // extra field length $fr .= $name; // "file data" segment $fr .= $zdata; // "data descriptor" segment (optional but necessary if archive is not // served as file) $fr .= pack('V', $crc); // crc32 $fr .= pack('V', $c_len); // compressed filesize $fr .= pack('V', $unc_len); // uncompressed filesize // add this entry to array $this -> datasec[] = $fr; // now add to central directory record $cdrec = "\x50\x4b\x01\x02"; $cdrec .= "\x00\x00"; // version made by $cdrec .= "\x14\x00"; // version needed to extract $cdrec .= "\x00\x00"; // gen purpose bit flag $cdrec .= "\x08\x00"; // compression method $cdrec .= $hexdtime; // last mod time & date $cdrec .= pack('V', $crc); // crc32 $cdrec .= pack('V', $c_len); // compressed filesize $cdrec .= pack('V', $unc_len); // uncompressed filesize $cdrec .= pack('v', strlen($name) ); // length of filename $cdrec .= pack('v', 0 ); // extra field length $cdrec .= pack('v', 0 ); // file comment length $cdrec .= pack('v', 0 ); // disk number start $cdrec .= pack('v', 0 ); // internal file attributes $cdrec .= pack('V', 32 ); // external file attributes - 'archive' bit set $cdrec .= pack('V', $this -> old_offset ); // relative offset of local header $this -> old_offset += strlen($fr); $cdrec .= $name; // optional extra field, file comment goes here // save to central directory $this -> ctrl_dir[] = $cdrec; } // end of the 'addFile()' method /** * Dumps out file * * @return string the zipped file * * @access public */ function file() { $data = implode('', $this -> datasec); $ctrldir = implode('', $this -> ctrl_dir); return $data . $ctrldir . $this -> eof_ctrl_dir . pack('v', sizeof($this -> ctrl_dir)) . // total # of entries "on this disk" pack('v', sizeof($this -> ctrl_dir)) . // total # of entries overall pack('V', strlen($ctrldir)) . // size of central dir pack('V', strlen($data)) . // offset to start of central dir "\x00\x00"; // .zip file comment length } // end of the 'file()' method /** * A Wrapper of original addFile Function * * Created By Hasin Hayder at 29th Jan, 1:29 AM * * @param array An Array of files with relative/absolute path to be added in Zip File * * @access public */ function addFiles($files /*Only Pass Array*/) { foreach($files as $file) { if (is_file($file)) //directory check { $data = implode("",file($file)); $this->addFile($data,$file); } } } /** * A Wrapper of original file Function * * Created By Hasin Hayder at 29th Jan, 1:29 AM * * @param string Output file name * * @access public */ function output($file) { $fp=fopen($file,"w"); fwrite($fp,$this->file()); fclose($fp); } } // end of the 'zipfile' class ?> My second file newzip.php <? include("zip.lib.php"); $ziper = new zipfile(); $ziper->addFiles(array("index.htm")); //array of files // the next three lines force an immediate download of the zip file: header("Content-type: application/octet-stream"); header("Content-disposition: attachment; filename=test.zip"); echo $ziper -> file(); ?> I am getting this warning while executing newzip.php Warning: Cannot modify header information - headers already sent by (output started at E:\xampp\htdocs\demo\zip.lib.php:233) in E:\xampp\htdocs\demo\newzip.php on line 6 I am unable to figure out the reason for the same. Please help me on this. Thanks

    Read the article

  • how to display user name in login name control

    - by user569285
    I have a master page that holds the loginview content that appears on all subsequent pages based on the master page. i have a username control also nested in the loginview to display the name of the user when they are logged in. the code for the loginview from the master page is displayed as follows: <div class="loginView"> <asp:LoginView ID="MasterLoginView" runat="server"> <LoggedInTemplate> Welcome <span class="bold"><asp:LoginName ID="HeadLoginName" runat="server" /> <asp:Label ID="userNameLabel" runat="server" Text="Label"></asp:Label></span>! [ <asp:LoginStatus ID="HeadLoginStatus" runat="server" LogoutAction="Redirect" LogoutText="Log Out" LogoutPageUrl="~/Logout.aspx"/> ] <%--Welcome: <span class="bold"><asp:LoginName ID="MasterLoginName" runat="server" /> </span>!--%> </LoggedInTemplate> <AnonymousTemplate> Welcome: Guest [ <a href="~/Account/Login.aspx" ID="HeadLoginStatus" runat="server">Log In</a> ] </AnonymousTemplate> </asp:LoginView> <%--&nbsp;&nbsp; [&nbsp;<asp:LoginStatus ID="MasterLoginStatus" runat="server" LogoutAction="Redirect" LogoutPageUrl="~/Logout.aspx" />&nbsp;]&nbsp;&nbsp;--%> </div> Since VS2010 launches with a default login page in the accounts folder, i didnt think it necessary to create a separate log in page, so i just used the same log in page. please find the code for the login control below: <asp:Login ID="LoginUser" runat="server" EnableViewState="false" RenderOuterTable="false"> <LayoutTemplate> <span class="failureNotification"> <asp:Literal ID="FailureText" runat="server"></asp:Literal> </span> <asp:ValidationSummary ID="LoginUserValidationSummary" runat="server" CssClass="failureNotification" ValidationGroup="LoginUserValidationGroup"/> <div class="accountInfo"> <fieldset class="login"> <legend style="text-align:left; font-size:1.2em; color:White;">Account Information</legend> <p style="text-align:left; font-size:1.2em; color:White;"> <asp:Label ID="UserNameLabel" runat="server" AssociatedControlID="UserName">User ID:</asp:Label> <asp:TextBox ID="UserName" runat="server" CssClass="textEntry"></asp:TextBox> <asp:RequiredFieldValidator ID="UserNameRequired" runat="server" ControlToValidate="UserName" CssClass="failureNotification" ErrorMessage="User ID is required." ToolTip="User ID field is required." ValidationGroup="LoginUserValidationGroup">*</asp:RequiredFieldValidator> </p> <p style="text-align:left; font-size:1.2em; color:White;"> <asp:Label ID="PasswordLabel" runat="server" AssociatedControlID="Password">Password:</asp:Label> <asp:TextBox ID="Password" runat="server" CssClass="passwordEntry" TextMode="Password"></asp:TextBox> <asp:RequiredFieldValidator ID="PasswordRequired" runat="server" ControlToValidate="Password" CssClass="failureNotification" ErrorMessage="Password is required." ToolTip="Password is required." ValidationGroup="LoginUserValidationGroup">*</asp:RequiredFieldValidator> </p> <p style="text-align:left; font-size:1.2em; color:White;"> <asp:CheckBox ID="RememberMe" runat="server"/> <asp:Label ID="RememberMeLabel" runat="server" AssociatedControlID="RememberMe" CssClass="inline">Keep me logged in</asp:Label> </p> </fieldset> <p class="submitButton"> <asp:Button ID="LoginButton" runat="server" CommandName="Login" Text="Log In" ValidationGroup="LoginUserValidationGroup" onclick="LoginButton_Click"/> </p> </div> </LayoutTemplate> </asp:Login> I then wrote my own code for authentication since i had my own database. the following displays the code in the login buttons click event.: public partial class Login : System.Web.UI.Page { //create string objects string userIDStr, pwrdStr; protected void LoginButton_Click(object sender, EventArgs e) { //assign textbox items to string objects userIDStr = LoginUser.UserName.ToString(); pwrdStr = LoginUser.Password.ToString(); //SQL connection string string strConn; strConn = WebConfigurationManager.ConnectionStrings["CMSSQL3ConnectionString"].ConnectionString; SqlConnection Conn = new SqlConnection(strConn); //SqlDataSource CSMDataSource = new SqlDataSource(); // CSMDataSource.ConnectionString = ConfigurationManager.ConnectionStrings["CMSSQL3ConnectionString"].ToString(); //SQL select statement for comparison string sqlUserData; sqlUserData = "SELECT StaffID, StaffPassword, StaffFName, StaffLName, StaffType FROM Staffs"; sqlUserData += " WHERE (StaffID ='" + userIDStr + "')"; sqlUserData += " AND (StaffPassword ='" + pwrdStr + "')"; SqlCommand com = new SqlCommand(sqlUserData, Conn); SqlDataReader rdr; string usrdesc; string lname; string fname; string staffname; try { //string CurrentData; //CurrentData = (string)com.ExecuteScalar(); Conn.Open(); rdr = com.ExecuteReader(); rdr.Read(); usrdesc = (string)rdr["StaffType"]; fname = (string)rdr["StaffFName"]; lname = (string)rdr["StaffLName"]; staffname = lname.ToString() + " " + fname.ToString(); LoginUser.UserName = staffname.ToString(); rdr.Close(); if (usrdesc.ToLower() == "administrator") { Response.Redirect("~/CaseAdmin.aspx", false); } else if (usrdesc.ToLower() == "manager") { Response.Redirect("~/CaseManager.aspx", false); } else if (usrdesc.ToLower() == "investigator") { Response.Redirect("~/Investigator.aspx", false); } else { Response.Redirect("~/Default.aspx", false); } } catch(Exception ex) { string script = "<script>alert('" + ex.Message + "');</script>"; } finally { Conn.Close(); } } My authentication works perfectly and the page gets redirected to the designated destination. However, the login view does not display the users name. i actually cant figure out how to pass the users name that i had picked from the database to the login name control to be displayed. taking a close look i also noticed the logout text that should be displayed after successful log in does not show. that leaves me wondering if the loggedin template control on the masterpage even fires at all or its still the anonymous template control that keeps displaying.? How do i get this to work as expected? Please help....

    Read the article

  • A first look at ConfORM - Part 1

    - by thangchung
    All source codes for this post can be found at here.Have you ever heard of ConfORM is not? I have read it three months ago when I wrote an post about NHibernate and Autofac. At that time, this project really has just started and still in beta version, so I still do not really care much. But recently when reading a book by Jason Dentler NHibernate 3.0 Cookbook, I started to pay attention to it. Author have mentioned quite a lot of OSS in his book. And now again I have reviewed ConfORM once again. I have been involved in ConfORM development group on google and read some articles about it. Fabio Maulo spent a lot of work for the OSS, and I hope it will adapt a great way for NHibernate (because he contributed to NHibernate that). So what is ConfORM? It is stand for Configuration ORM, and it was trying to use a lot of heuristic model for identifying entities from C# code. Today, it's mostly Model First Driven development, so the first thing is to build the entity model. This is really important and we can see it is the heart of business software. Then we have to tell DB about the entity of this model. We often will use Inversion Engineering here, Database Schema is will create based on recently Entity Model. From now we will absolutely not interested in the DB again, only focus on the Entity Model.Fluent NHibenate really good, I liked this OSS. Sharp Architecture and has done so well in Fluent NHibernate integration with applications. A Multiple Database technical in Sharp Architecture is truly awesome. It can receive configuration, a connection string and a dll containing entity model, which would then create a SessionFactory, finally caching inside the computer memory. As the number of SessionFactory can be very large and will full of the memory, it has also devised a way of caching SessionFactory in the file. This post I hope this will not completely explain about and building a model of multiple databases. I just tried to mount a number of posts from the community and apply some of my knowledge to build a management model Session for ConfORM.As well as Fluent NHibernate, ConfORM also supported on the interface mapping, see this to understand it. So the first thing we will build the Entity Model for it, and here is what I will use the model for this article. A simple model for managing news and polls, it will be too easy for a number of people, but I hope not to bring complexity to this post.I will then have some code to build super type for the Entity Model. public interface IEntity<TId>    {        TId Id { get; set; }    } public abstract class EntityBase<TId> : IEntity<TId>    {        public virtual TId Id { get; set; }         public override bool Equals(object obj)        {            return Equals(obj as EntityBase<TId>);        }         private static bool IsTransient(EntityBase<TId> obj)        {            return obj != null &&            Equals(obj.Id, default(TId));        }         private Type GetUnproxiedType()        {            return GetType();        }         public virtual bool Equals(EntityBase<TId> other)        {            if (other == null)                return false;            if (ReferenceEquals(this, other))                return true;            if (!IsTransient(this) &&            !IsTransient(other) &&            Equals(Id, other.Id))            {                var otherType = other.GetUnproxiedType();                var thisType = GetUnproxiedType();                return thisType.IsAssignableFrom(otherType) ||                otherType.IsAssignableFrom(thisType);            }            return false;        }         public override int GetHashCode()        {            if (Equals(Id, default(TId)))                return base.GetHashCode();            return Id.GetHashCode();        }    } Database schema will be created as:The next step is to build the ConORM builder to create a NHibernate Configuration. Patrick have a excellent article about it at here. Contract of it below: public interface IConfigBuilder    {        Configuration BuildConfiguration(string connectionString, string sessionFactoryName);    } The idea here is that I will pass in a connection string and a set of the DLL containing the Entity Model and it makes me a NHibernate Configuration (shame that I stole this ideas of Sharp Architecture). And here is its code: public abstract class ConfORMConfigBuilder : RootObject, IConfigBuilder    {        private static IConfigurator _configurator;         protected IEnumerable<Type> DomainTypes;         private readonly IEnumerable<string> _assemblies;         protected ConfORMConfigBuilder(IEnumerable<string> assemblies)            : this(new Configurator(), assemblies)        {            _assemblies = assemblies;        }         protected ConfORMConfigBuilder(IConfigurator configurator, IEnumerable<string> assemblies)        {            _configurator = configurator;            _assemblies = assemblies;        }         public abstract void GetDatabaseIntegration(IDbIntegrationConfigurationProperties dBIntegration, string connectionString);         protected abstract HbmMapping GetMapping();         public Configuration BuildConfiguration(string connectionString, string sessionFactoryName)        {            Contract.Requires(!string.IsNullOrEmpty(connectionString), "ConnectionString is null or empty");            Contract.Requires(!string.IsNullOrEmpty(sessionFactoryName), "SessionFactory name is null or empty");            Contract.Requires(_configurator != null, "Configurator is null");             return CatchExceptionHelper.TryCatchFunction(                () =>                {                    DomainTypes = GetTypeOfEntities(_assemblies);                     if (DomainTypes == null)                        throw new Exception("Type of domains is null");                     var configure = new Configuration();                    configure.SessionFactoryName(sessionFactoryName);                     configure.Proxy(p => p.ProxyFactoryFactory<ProxyFactoryFactory>());                    configure.DataBaseIntegration(db => GetDatabaseIntegration(db, connectionString));                     if (_configurator.GetAppSettingString("IsCreateNewDatabase").ConvertToBoolean())                    {                        configure.SetProperty("hbm2ddl.auto", "create-drop");                    }                     configure.Properties.Add("default_schema", _configurator.GetAppSettingString("DefaultSchema"));                    configure.AddDeserializedMapping(GetMapping(),                                                     _configurator.GetAppSettingString("DocumentFileName"));                     SchemaMetadataUpdater.QuoteTableAndColumns(configure);                     return configure;                }, Logger);        }         protected IEnumerable<Type> GetTypeOfEntities(IEnumerable<string> assemblies)        {            var type = typeof(EntityBase<Guid>);            var domainTypes = new List<Type>();             foreach (var assembly in assemblies)            {                var realAssembly = Assembly.LoadFrom(assembly);                 if (realAssembly == null)                    throw new NullReferenceException();                 domainTypes.AddRange(realAssembly.GetTypes().Where(                    t =>                    {                        if (t.BaseType != null)                            return string.Compare(t.BaseType.FullName,                                          type.FullName) == 0;                        return false;                    }));            }             return domainTypes;        }    } I do not want to dependency on any RDBMS, so I made a builder as an abstract class, and so I will create a concrete instance for SQL Server 2008 as follows: public class SqlServerConfORMConfigBuilder : ConfORMConfigBuilder    {        public SqlServerConfORMConfigBuilder(IEnumerable<string> assemblies)            : base(assemblies)        {        }         public override void GetDatabaseIntegration(IDbIntegrationConfigurationProperties dBIntegration, string connectionString)        {            dBIntegration.Dialect<MsSql2008Dialect>();            dBIntegration.Driver<SqlClientDriver>();            dBIntegration.KeywordsAutoImport = Hbm2DDLKeyWords.AutoQuote;            dBIntegration.IsolationLevel = IsolationLevel.ReadCommitted;            dBIntegration.ConnectionString = connectionString;            dBIntegration.LogSqlInConsole = true;            dBIntegration.Timeout = 10;            dBIntegration.LogFormatedSql = true;            dBIntegration.HqlToSqlSubstitutions = "true 1, false 0, yes 'Y', no 'N'";        }         protected override HbmMapping GetMapping()        {            var orm = new ObjectRelationalMapper();             orm.Patterns.PoidStrategies.Add(new GuidPoidPattern());             var patternsAppliers = new CoolPatternsAppliersHolder(orm);            //patternsAppliers.Merge(new DatePropertyByNameApplier()).Merge(new MsSQL2008DateTimeApplier());            patternsAppliers.Merge(new ManyToOneColumnNamingApplier());            patternsAppliers.Merge(new OneToManyKeyColumnNamingApplier(orm));             var mapper = new Mapper(orm, patternsAppliers);             var entities = new List<Type>();             DomainDefinition(orm);            Customize(mapper);             entities.AddRange(DomainTypes);             return mapper.CompileMappingFor(entities);        }         private void DomainDefinition(IObjectRelationalMapper orm)        {            orm.TablePerClassHierarchy(new[] { typeof(EntityBase<Guid>) });            orm.TablePerClass(DomainTypes);             orm.OneToOne<News, Poll>();            orm.ManyToOne<Category, News>();             orm.Cascade<Category, News>(Cascade.All);            orm.Cascade<News, Poll>(Cascade.All);            orm.Cascade<User, Poll>(Cascade.All);        }         private static void Customize(Mapper mapper)        {            CustomizeRelations(mapper);            CustomizeTables(mapper);            CustomizeColumns(mapper);        }         private static void CustomizeRelations(Mapper mapper)        {        }         private static void CustomizeTables(Mapper mapper)        {        }         private static void CustomizeColumns(Mapper mapper)        {            mapper.Class<Category>(                cm =>                {                    cm.Property(x => x.Name, m => m.NotNullable(true));                    cm.Property(x => x.CreatedDate, m => m.NotNullable(true));                });             mapper.Class<News>(                cm =>                {                    cm.Property(x => x.Title, m => m.NotNullable(true));                    cm.Property(x => x.ShortDescription, m => m.NotNullable(true));                    cm.Property(x => x.Content, m => m.NotNullable(true));                });             mapper.Class<Poll>(                cm =>                {                    cm.Property(x => x.Value, m => m.NotNullable(true));                    cm.Property(x => x.VoteDate, m => m.NotNullable(true));                    cm.Property(x => x.WhoVote, m => m.NotNullable(true));                });             mapper.Class<User>(                cm =>                {                    cm.Property(x => x.UserName, m => m.NotNullable(true));                    cm.Property(x => x.Password, m => m.NotNullable(true));                });        }    } As you can see that we can do so many things in this class, such as custom entity relationships, custom binding on the columns, custom table name, ... Here I only made two so-Appliers for OneToMany and ManyToOne relationships, you can refer to it here public class ManyToOneColumnNamingApplier : IPatternApplier<PropertyPath, IManyToOneMapper>    {        #region IPatternApplier<PropertyPath,IManyToOneMapper> Members         public void Apply(PropertyPath subject, IManyToOneMapper applyTo)        {            applyTo.Column(subject.ToColumnName() + "Id");        }         #endregion         #region IPattern<PropertyPath> Members         public bool Match(PropertyPath subject)        {            return subject != null;        }         #endregion    } public class OneToManyKeyColumnNamingApplier : OneToManyPattern, IPatternApplier<PropertyPath, ICollectionPropertiesMapper>    {        public OneToManyKeyColumnNamingApplier(IDomainInspector domainInspector) : base(domainInspector) { }         #region Implementation of IPattern<PropertyPath>         public bool Match(PropertyPath subject)        {            return Match(subject.LocalMember);        }         #endregion Implementation of IPattern<PropertyPath>         #region Implementation of IPatternApplier<PropertyPath,ICollectionPropertiesMapper>         public void Apply(PropertyPath subject, ICollectionPropertiesMapper applyTo)        {            applyTo.Key(km => km.Column(GetKeyColumnName(subject)));        }         #endregion Implementation of IPatternApplier<PropertyPath,ICollectionPropertiesMapper>         protected virtual string GetKeyColumnName(PropertyPath subject)        {            Type propertyType = subject.LocalMember.GetPropertyOrFieldType();            Type childType = propertyType.DetermineCollectionElementType();            var entity = subject.GetContainerEntity(DomainInspector);            var parentPropertyInChild = childType.GetFirstPropertyOfType(entity);            var baseName = parentPropertyInChild == null ? subject.PreviousPath == null ? entity.Name : entity.Name + subject.PreviousPath : parentPropertyInChild.Name;            return GetKeyColumnName(baseName);        }         protected virtual string GetKeyColumnName(string baseName)        {            return string.Format("{0}Id", baseName);        }    } Everyone also can download the ConfORM source at google code and see example inside it. Next part I will write about multiple database factory. Hope you enjoy about it. happy coding and see you next part.

    Read the article

< Previous Page | 650 651 652 653 654 655 656  | Next Page >