Search Results

Search found 19393 results on 776 pages for 'reference count'.

Page 123/776 | < Previous Page | 119 120 121 122 123 124 125 126 127 128 129 130  | Next Page >

  • Model View Control Issue: Null Pointer Initialization Question [closed]

    - by David Dimalanta
    Good morning again. This is David. Please, I need an urgent help regarding control model view where I making a code that uniquely separating into groups: An Activity Java Class to Display the Interface A View and Function Java Class for Drawing Cards and Display it on the Activity Class The problem is that the result returns a Null Pointer Exception. I have initialize for the ID for Text View and Image View. Under this class "draw_deck.java". Please help me. Here's my code for draw_deck.java: package com.bodapps.inbetween.model; import android.content.Context; import android.view.View; import android.widget.ImageView; import android.widget.TextView; import com.bodapps.inbetween.R; public class draw_deck extends View { public TextView count_label; public ImageView draw_card; private int count; public draw_deck(Context context) { super(context); // TODO Auto-generated constructor stub //I have initialized two widgets for ID. I still don't get it why I got forced closed by Null Pointer Exception thing. draw_card = (ImageView) findViewById(R.id.IV_Draw_Card); count_label = (TextView) findViewById(R.id.Text_View_Count_Card); } public void draw(int s, int c, String strSuit, String strValue, Pile pile, Context context) { //super(context); //Just printing the card drawn from pile int suit, value = 1; draw_card = (ImageView) findViewById(R.id.IV_Draw_Card); count_label = (TextView) findViewById(R.id.Text_View_Count_Card); Card card; if(!pile.isEmpty()) //Setting it to IF statement displays the card one by one. { card = pile.drawFromPile(); //Need to check first if card is null. if (card != null) { //draws an extra if (card != null) { //Get suit of card to print out. suit = card.getSuit(); switch (suit) { case CardInfo.DIAMOND: strSuit = "DIAMOND"; s=0; break; case CardInfo.HEART: strSuit = "HEART"; s=1; break; case CardInfo.SPADE: strSuit = "SPADE"; s=2; break; case CardInfo.CLUB: strSuit = "CLUB"; s=3; break; } //Get value of card to print out. value = card.getValue(); switch (value) { case CardInfo.ACE: strValue = "ACE"; c=0; break; case CardInfo.TWO: c=1; break; case CardInfo.THREE: strValue = "THREE"; c=2; break; case CardInfo.FOUR: strValue = "FOUR"; c=3; break; case CardInfo.FIVE: strValue = "FIVE"; c=4; break; case CardInfo.SIX: strValue = "SIX"; c=4; break; case CardInfo.SEVEN: strValue = "SEVEN"; c=4; break; case CardInfo.EIGHT: strValue = "EIGHT"; c=4; break; case CardInfo.NINE: strValue = "NINE"; c=4; break; case CardInfo.TEN: strValue = "TEN"; c=4; break; case CardInfo.JACK: strValue = "JACK"; c=4; break; case CardInfo.QUEEN: strValue = "QUEEN"; c=4; break; case CardInfo.KING: strValue = "KING"; c=4; break; } } } }// //Below two lines of code, this is where issued the Null Pointer Exception. draw_card.setImageResource(deck[s][c]); count_label.setText(new StringBuilder(strValue).append(" of ").append(strSuit).append(String.valueOf(" " + count++)).toString()); } //Choice of Suits in a Deck public Integer[][] deck = { //Array Group 1 is [0][0] (No. of Cards: 4 - DIAMOND) { R.drawable.card_dummy_1, R.drawable.card_dummy_2, R.drawable.card_dummy_4, R.drawable.card_dummy_5, R.drawable.card_dummy_3 }, //Array Group 2 is [1][0] (No. of Cards: 4 - HEART) { R.drawable.card_dummy_1, R.drawable.card_dummy_2, R.drawable.card_dummy_4, R.drawable.card_dummy_5, R.drawable.card_dummy_3 }, //Array Group 3 is [2][0] (No. of Cards: 4 - SPADE) { R.drawable.card_dummy_1, R.drawable.card_dummy_2, R.drawable.card_dummy_4, R.drawable.card_dummy_5, R.drawable.card_dummy_3 }, //Array Group 4 is [3][0] (No. of Cards: 4 - CLUB) { R.drawable.card_dummy_1, R.drawable.card_dummy_2, R.drawable.card_dummy_4, R.drawable.card_dummy_5, R.drawable.card_dummy_3 }, }; } And this one of the activity class, Player_Mode_2.java: package com.bodapps.inbetween; import java.util.Random; import android.app.Activity; import android.app.Dialog; import android.content.Context; import android.os.Bundle; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.EditText; import android.widget.ImageView; import android.widget.TextView; import android.widget.Toast; import com.bodapps.inbetween.model.Card; import com.bodapps.inbetween.model.Pile; import com.bodapps.inbetween.model.draw_deck; /* * * Public class for Two-Player mode. * */ public class Player_Mode_2 extends Activity { //Image Views private ImageView draw_card; private ImageView player_1; private ImageView player_2; private ImageView icon; //Buttons private Button set_deck; //Edit Texts private EditText enter_no_of_decks; //text Views private TextView count_label; //Integer Data Types private int no_of_cards, count; private int card_multiplier; //Contexts final Context context = this; //Pile Model public Pile pile; //Card Model public Card card; //create View @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.play_2_player_mode); //-----[ Search for Views ]----- //Initialize for Image View draw_card = (ImageView) findViewById(R.id.IV_Draw_Card); player_1 = (ImageView) findViewById(R.id.IV_Player_1_Card); player_2 = (ImageView) findViewById(R.id.IV_Player_2_Card); //Initialize for Text view or Label count_label = (TextView) findViewById(R.id.Text_View_Count_Card); //-----[ Adding Values ]----- //Integer Values count = 0; no_of_cards = 0; //-----[ Adding Dialog ]----- //Initializing Dialog final Dialog deck_dialog = new Dialog(context); deck_dialog.setContentView(R.layout.dialog); deck_dialog.setTitle("Deck Dialog"); //-----[ Initializing Views for Dialog's Contents ]----- //Initialize for Edit Text enter_no_of_decks = (EditText) deck_dialog.findViewById(R.id.Edit_Text_Set_Number_of_Decks); //Initialize for Button set_deck = (Button) deck_dialog.findViewById(R.id.Button_Deck); //-----[ Setting onClickListener() ]----- //Set Event Listener for Image view draw_card.setOnClickListener(new Draw_Card_Model()); //Set Event Listener for Setting the Deck set_deck.setOnClickListener(new OnClickListener() { public void onClick(View v) { if(card_multiplier <= 8) { //Use "Integer.parseInt()" method to instantly convert from String to int value. card_multiplier = Integer.parseInt(enter_no_of_decks.getText().toString()); //Shuffling cards... pile = new Pile(card_multiplier); //Multiply no. of decks //Dismiss or close the dialog. deck_dialog.dismiss(); } else { Toast.makeText(getApplicationContext(), "Please choose a number from 1 to 8.", Toast.LENGTH_SHORT).show(); } } }); //Show dialog. deck_dialog.show(); } //Shuffling the Array public void Shuffle_Cards(Integer[][] Shuffle_Deck) { Random random = new Random(); for(int i = Shuffle_Deck[no_of_cards].length - 1; i >=0; i--) { int Index = random.nextInt(i + 1); //Simple Swapping Integer swap = Shuffle_Deck[card_multiplier-1][Index]; Shuffle_Deck[card_multiplier-1][Index] = Shuffle_Deck[card_multiplier-1][i]; Shuffle_Deck[card_multiplier-1][i] = swap; } } //Private Class for Random Card Draw private class Draw_Card_Model implements OnClickListener { public void onClick(View v) { //Just printing the card drawn from pile int suit = 0, value = 0; String strSuit = "", strValue = ""; draw_deck draw = new draw_deck(context); //This line is where issued the Null Pointer Exception. if (count == card_multiplier*52) { // A message shows up when all cards are draw out. Toast.makeText(getApplicationContext(), "All cards have been used up.", Toast.LENGTH_SHORT).show(); draw_card.setEnabled(false); } else { draw.draw(suit, value, strSuit, strValue, pile, context); count_label.setText(count); //This is where I got force closed error, although "int count" have initialized the number. This was supposed to accept in the setText() method. count++; } } } } Take note that the issues on Null Pointer Exception is the Image View and the Edit Text. I got to test it. Thanks. If you have any info about my question, let me know it frankly.

    Read the article

  • Is this an acceptable approach to undo/redo in Python?

    - by Codemonkey
    I'm making an application (wxPython) to process some data from Excel documents. I want the user to be able to undo and redo actions, even gigantic actions like processing the contents of 10 000 cells simultaneously. I Googled the topic, and all the solutions I could find involves a lot of black magic or is overly complicated. Here is how I imagine my simple undo/redo scheme. I write two classes - one called ActionStack and an abstract one called Action. Every "undoable" operation must be a subclass of Action and define the methods do and undo. The Action subclass is passed the instance of the "document", or data model, and is responsible for committing the operation and remembering how to undo the change. Now, every document is associated with an instance of the ActionStack. The ActionStack maintains a stack of actions (surprise!). Every time actions are undone and new actions are performed, all undone actions are removed for ever. The ActionStack will also automatically remove the oldest Action when the stack reaches the configurable maximum amount. I imagine the workflow would produce code looking something like this: class TableDocument(object): def __init__(self, table): self.table = table self.action_stack = ActionStack(history_limit=50) # ... def delete_cells(self, cells): self.action_stack.push( DeleteAction(self, cells) ) def add_column(self, index, name=''): self.action_stack.push( AddColumnAction(self, index, name) ) # ... def undo(self, count=1): self.action_stack.undo(count) def redo(self, count=1): self.action_stack.redo(count) Given that none of the methods I've found are this simple, I thought I'd get the experts' opinion before I go ahead with this plan. More specifically, what I'm wondering about is - are there any glaring holes in this plan that I'm not seeing?

    Read the article

  • Changes to the LINQ-to-StreamInsight Dialect

    - by Roman Schindlauer
    In previous versions of StreamInsight (1.0 through 2.0), CepStream<> represents temporal streams of many varieties: Streams with ‘open’ inputs (e.g., those defined and composed over CepStream<T>.Create(string streamName) Streams with ‘partially bound’ inputs (e.g., those defined and composed over CepStream<T>.Create(Type adapterFactory, …)) Streams with fully bound inputs (e.g., those defined and composed over To*Stream – sequences or DQC) The stream may be embedded (where Server.Create is used) The stream may be remote (where Server.Connect is used) When adding support for new programming primitives in StreamInsight 2.1, we faced a choice: Add a fourth variety (use CepStream<> to represent streams that are bound the new programming model constructs), or introduce a separate type that represents temporal streams in the new user model. We opted for the latter. Introducing a new type has the effect of reducing the number of (confusing) runtime failures due to inappropriate uses of CepStream<> instances in the incorrect context. The new types are: IStreamable<>, which logically represents a temporal stream. IQStreamable<> : IStreamable<>, which represents a queryable temporal stream. Its relationship to IStreamable<> is analogous to the relationship of IQueryable<> to IEnumerable<>. The developer can compose temporal queries over remote stream sources using this type. The syntax of temporal queries composed over IQStreamable<> is mostly consistent with the syntax of our existing CepStream<>-based LINQ provider. However, we have taken the opportunity to refine certain aspects of the language surface. Differences are outlined below. Because 2.1 introduces new types to represent temporal queries, the changes outlined in this post do no impact existing StreamInsight applications using the existing types! SelectMany StreamInsight does not support the SelectMany operator in its usual form (which is analogous to SQL’s “CROSS APPLY” operator): static IEnumerable<R> SelectMany<T, R>(this IEnumerable<T> source, Func<T, IEnumerable<R>> collectionSelector) It instead uses SelectMany as a convenient syntactic representation of an inner join. The parameter to the selector function is thus unavailable. Because the parameter isn’t supported, its type in StreamInsight 1.0 – 2.0 wasn’t carefully scrutinized. Unfortunately, the type chosen for the parameter is nonsensical to LINQ programmers: static CepStream<R> SelectMany<T, R>(this CepStream<T> source, Expression<Func<CepStream<T>, CepStream<R>>> streamSelector) Using Unit as the type for the parameter accurately reflects the StreamInsight’s capabilities: static IQStreamable<R> SelectMany<T, R>(this IQStreamable<T> source, Expression<Func<Unit, IQStreamable<R>>> streamSelector) For queries that succeed – that is, queries that do not reference the stream selector parameter – there is no difference between the code written for the two overloads: from x in xs from y in ys select f(x, y) Top-K The Take operator used in StreamInsight causes confusion for LINQ programmers because it is applied to the (unbounded) stream rather than the (bounded) window, suggesting that the query as a whole will return k rows: (from win in xs.SnapshotWindow() from x in win orderby x.A select x.B).Take(k) The use of SelectMany is also unfortunate in this context because it implies the availability of the window parameter within the remainder of the comprehension. The following compiles but fails at runtime: (from win in xs.SnapshotWindow() from x in win orderby x.A select win).Take(k) The Take operator in 2.1 is applied to the window rather than the stream: Before After (from win in xs.SnapshotWindow() from x in win orderby x.A select x.B).Take(k) from win in xs.SnapshotWindow() from b in     (from x in win     orderby x.A     select x.B).Take(k) select b Multicast We are introducing an explicit multicast operator in order to preserve expression identity, which is important given the semantics about moving code to and from StreamInsight. This also better matches existing LINQ dialects, such as Reactive. This pattern enables expressing multicasting in two ways: Implicit Explicit var ys = from x in xs          where x.A > 1          select x; var zs = from y1 in ys          from y2 in ys.ShiftEventTime(_ => TimeSpan.FromSeconds(1))          select y1 + y2; var ys = from x in xs          where x.A > 1          select x; var zs = ys.Multicast(ys1 =>     from y1 in ys1     from y2 in ys1.ShiftEventTime(_ => TimeSpan.FromSeconds(1))     select y1 + y2; Notice the product translates an expression using implicit multicast into an expression using the explicit multicast operator. The user does not see this translation. Default window policies Only default window policies are supported in the new surface. Other policies can be simulated by using AlterEventLifetime. Before After xs.SnapshotWindow(     WindowInputPolicy.ClipToWindow,     SnapshotWindowInputPolicy.Clip) xs.SnapshotWindow() xs.TumblingWindow(     TimeSpan.FromSeconds(1),     HoppingWindowOutputPolicy.PointAlignToWindowEnd) xs.TumblingWindow(     TimeSpan.FromSeconds(1)) xs.TumblingWindow(     TimeSpan.FromSeconds(1),     HoppingWindowOutputPolicy.ClipToWindowEnd) Not supported … LeftAntiJoin Representation of LASJ as a correlated sub-query in the LINQ surface is problematic as the StreamInsight engine does not support correlated sub-queries (see discussion of SelectMany). The current syntax requires the introduction of an otherwise unsupported ‘IsEmpty()’ operator. As a result, the pattern is not discoverable and implies capabilities not present in the server. The direct representation of LASJ is used instead: Before After from x in xs where     (from y in ys     where x.A > y.B     select y).IsEmpty() select x xs.LeftAntiJoin(ys, (x, y) => x.A > y.B) from x in xs where     (from y in ys     where x.A == y.B     select y).IsEmpty() select x xs.LeftAntiJoin(ys, x => x.A, y => y.B) ApplyWithUnion The ApplyWithUnion methods have been deprecated since their signatures are redundant given the standard SelectMany overloads: Before After xs.GroupBy(x => x.A).ApplyWithUnion(gs => from win in gs.SnapshotWindow() select win.Count()) xs.GroupBy(x => x.A).SelectMany(     gs =>     from win in gs.SnapshotWindow()     select win.Count()) xs.GroupBy(x => x.A).ApplyWithUnion(gs => from win in gs.SnapshotWindow() select win.Count(), r => new { r.Key, Count = r.Payload }) from x in xs group x by x.A into gs from win in gs.SnapshotWindow() select new { gs.Key, Count = win.Count() } Alternate UDO syntax The representation of UDOs in the StreamInsight LINQ dialect confuses cardinalities. Based on the semantics of user-defined operators in StreamInsight, one would expect to construct queries in the following form: from win in xs.SnapshotWindow() from y in MyUdo(win) select y Instead, the UDO proxy method is referenced within a projection, and the (many) results returned by the user code are automatically flattened into a stream: from win in xs.SnapshotWindow() select MyUdo(win) The “many-or-one” confusion is exemplified by the following example that compiles but fails at runtime: from win in xs.SnapshotWindow() select MyUdo(win) + win.Count() The above query must fail because the UDO is in fact returning many values per window while the count aggregate is returning one. Original syntax New alternate syntax from win in xs.SnapshotWindow() select win.UdoProxy(1) from win in xs.SnapshotWindow() from y in win.UserDefinedOperator(() => new Udo(1)) select y -or- from win in xs.SnapshotWindow() from y in win.UdoMacro(1) select y Notice that this formulation also sidesteps the dynamic type pitfalls of the existing “proxy method” approach to UDOs, in which the type of the UDO implementation (TInput, TOuput) and the type of its constructor arguments (TConfig) need to align in a precise and non-obvious way with the argument and return types for the corresponding proxy method. UDSO syntax UDSO currently leverages the DataContractSerializer to clone initial state for logical instances of the user operator. Initial state will instead be described by an expression in the new LINQ surface. Before After xs.Scan(new Udso()) xs.Scan(() => new Udso()) Name changes ShiftEventTime => AlterEventStartTime: The alter event lifetime overload taking a new start time value has been renamed. CountByStartTimeWindow => CountWindow

    Read the article

  • SSIS Catalog: How to use environment in every type of package execution

    - by Kevin Shyr
    Here is a good blog on how to create a SSIS Catalog and setting up environments.  http://sqlblog.com/blogs/jamie_thomson/archive/2010/11/13/ssis-server-catalogs-environments-environment-variables-in-ssis-in-denali.aspx Here I will summarize 3 ways I know so far to execute a package while using variables set up in SSIS Catalog environment. First way, we have SSIS project having reference to environment, and having one of the project parameter using a value set up in the environment called "Development".  With this set up, you are limited to calling the packages by right-clicking on the packages in the SSIS catalog list and select Execute, but you are free to choose absolute or relative path of the environment. The following screenshot shows the 2 available paths to your SSIS environments.  Personally, I use absolute path because of Option 3, just to keep everything simple for myself. The second option is to call through SQL Job.  This does require you to configure your project to already reference an environment and use its variable.  When a job step is set up, the configuration part will require you to select that reference again.  This is more useful when you want to automate the same package that needs to be run in different environments. The third option is the most important to me as I have a SSIS framework that calls hundreds of packages.  The main part of the stored procedure is in this post (http://geekswithblogs.net/LifeLongTechie/archive/2012/11/14/time-to-stop-using-ldquoexecute-package-taskrdquondash-a-way-to.aspx).  But the top part had to be modified to include the logic to use environment reference. CREATE PROCEDURE [AUDIT].[LaunchPackageExecutionInSSISCatalog] @PackageName NVARCHAR(255) , @ProjectFolder NVARCHAR(255) , @ProjectName NVARCHAR(255) , @AuditKey INT , @DisableNotification BIT , @PackageExecutionLogID INT , @EnvironmentName NVARCHAR(128) = NULL , @Use32BitRunTime BIT = FALSE AS BEGIN TRY DECLARE @execution_id BIGINT = 0; -- Create a package execution IF @EnvironmentName IS NULL BEGIN   EXEC [SSISDB].[catalog].[create_execution]     @package_name=@PackageName,     @execution_id=@execution_id OUTPUT,     @folder_name=@ProjectFolder,     @project_name=@ProjectName,     @use32bitruntime=@Use32BitRunTime; END ELSE BEGIN   DECLARE @EnvironmentID AS INT   SELECT @EnvironmentID = [reference_id]    FROM SSISDB.[internal].[environment_references] WITH(NOLOCK)    WHERE [environment_name] = @EnvironmentName     AND [environment_folder_name] = @ProjectFolder      EXEC [SSISDB].[catalog].[create_execution]     @package_name=@PackageName,     @execution_id=@execution_id OUTPUT,     @folder_name=@ProjectFolder,     @project_name=@ProjectName,     @reference_id=@EnvironmentID,     @use32bitruntime=@Use32BitRunTime; END

    Read the article

  • Centralizing a resource file among multiple projects in one solution (C#/WPF)

    - by MarkPearl
    One of the challenges one faces when doing multi language support in WPF is when one has several projects in one solution (i.e. a business layer & ui layer) and you want multi language support. Typically each solution would have a resource file – meaning if you have 3 projects in a solution you will have 3 resource files.   For me this isn’t an ideal solution, as you normally want to send the resource files to a translator and the more resource files you have, the more fragmented the dictionary will be and the more complicated it will be for the translator. This can easily be overcome by creating a single project that just holds your translation resources and then exposing it to the other projects as a reference as explained in the following steps. Step 1 Step 1 -  Add a class library to your solution that will contain just the resource files. Your solution will now have an additional project as illustrated below. Step 2 Reference this project to the other projects. Step 3 Move all the resources from the other resource files to the translation projects resource file. Step 4 Set the translations projects resource files access modifier to public. Step 5 Reference all other projects to use the translation resource file instead of their local resource file. To do this in xaml you would need to expose the project as a namespace at the top of the xaml file… note that the example below is for a project called MaxCutLanguages – you need to put the correct project name in its place.   xmlns:MaxCutLanguages="clr-namespace:MaxCutLanguages;assembly=MaxCutLanguages"   And then in the actual xaml you need to replace any text with a reference to the resource file. <TextBlock Text="{x:Static MaxCutLanguages:Properties.Resources.HelloWorld}"/> End Result You can now delete all the resource files in the other projects as you now have one centralized resource file.

    Read the article

  • Does LINQ require significantly more processing cycles and memory than lower-level data iteration techniques?

    - by Matthew Patrick Cashatt
    Background I am recently in the process of enduring grueling tech interviews for positions that use the .NET stack, some of which include silly questions like this one, and some questions that are more valid. I recently came across an issue that may be valid but I want to check with the community here to be sure. When asked by an interviewer how I would count the frequency of words in a text document and rank the results, I answered that I would Use a stream object put the text file in memory as a string. Split the string into an array on spaces while ignoring punctuation. Use LINQ against the array to .GroupBy() and .Count(), then OrderBy() said count. I got this answer wrong for two reasons: Streaming an entire text file into memory could be disasterous. What if it was an entire encyclopedia? Instead I should stream one block at a time and begin building a hash table. LINQ is too expensive and requires too many processing cycles. I should have built a hash table instead and, for each iteration, only added a word to the hash table if it didn't otherwise exist and then increment it's count. The first reason seems, well, reasonable. But the second gives me more pause. I thought that one of the selling points of LINQ is that it simply abstracts away lower-level operations like hash tables but that, under the veil, it is still the same implementation. Question Aside from a few additional processing cycles to call any abstracted methods, does LINQ require significantly more processing cycles to accomplish a given data iteration task than a lower-level task (such as building a hash table) would?

    Read the article

  • XNA- Transforming children

    - by user1806687
    So, I have a Model stored in MyModel, that is made from three meshes. If you loop thrue MyModel.Meshes the first two are children of the third one. And was just wondering, if anyone could tell me where is the problem with my code. This method is called whenever I want to programmaticly change the position of a whole model: public void ChangePosition(Vector3 newPos) { Position = newPos; MyModel.Root.Transform = Matrix.CreateScale(VectorMathHelper.VectorMath(CurrentSize, DefaultSize, '/')) * Matrix.CreateFromAxisAngle(MyModel.Root.Transform.Up, MathHelper.ToRadians(Rotation.Y)) * Matrix.CreateFromAxisAngle(MyModel.Root.Transform.Right, MathHelper.ToRadians(Rotation.X)) * Matrix.CreateFromAxisAngle(MyModel.Root.Transform.Forward, MathHelper.ToRadians(Rotation.Z)) * Matrix.CreateTranslation(Position); Matrix[] transforms = new Matrix[MyModel.Bones.Count]; MyModel.CopyAbsoluteBoneTransformsTo(transforms); int count = transforms.Length - 1; foreach (ModelMesh mesh in MyModel.Meshes) { mesh.ParentBone.Transform = transforms[count]; count--; } } This is the draw method: foreach (ModelMesh mesh in MyModel.Meshes) { foreach (BasicEffect effect in mesh.Effects) { effect.View = camera.view; effect.Projection = camera.projection; effect.World = mesh.ParentBone.Transform; effect.EnableDefaultLighting(); } mesh.Draw(); } The thing is when I call ChangePosition() the first time everything works perfectlly, but as soon as I call it again and again. The first two meshes(children meshes) start to move away from the parent mesh. Another thing I wanted to ask, if I change the scale/rotation/position of a children mesh, and then do CopyAbsoluteBoneTransforms() will children meshes be positioned properlly(at the proper distance) or would achieving that require more math/methods? Thanks in advance

    Read the article

  • Why values in my WCF data contract were suddenly wrong...

    - by mipsen
    A WCF Service I provided took a very simple data contract as parameter (containing one string and one int...) and had a very simple task to do. A .NET 3.5 client was created using the VS2008 feature "Add Service Reference". Everything worked as expected. Then a slight change came in: The client was expected to run on machines with .NET 2.0 only. So we set the Target  Framework to .NET 2.0, removed the references to System.ServiceModel, System.Runtime.Serialization and the ServiceReference and created a new Reference to the Service using the old "Add Web Reference" . A matter of 2 minutes.  When testing, the int value in the data contract arriving at the WCF Service suddenly was 0, instead of 38 as we expected. What happened? When generating an old  Web Reference on a WCF data contract an additional boolean field for each value-type field is created called [Fieldname]Specified (e.g. AgeSpecified) which defaults to "false". WCF inspects these boolean fields to determine if a value was provided for the value-type field. If the "Specified"-field is "false", WCF translates that to using the default-value of the value-type field. For int this is 0. So we had to insert  setting the "Specified"-field  for the int-value to "true" and everything was fine again. That was what we forgot after setting the Framework-version to 2.0...

    Read the article

  • I want to keep the values on textbox after onchange function

    - by user1908045
    hello i have problem with this code..I want to keep the values of the textboxes when the pageload and didnt write again the values. I have two drop down list. the First one is the country when the country selected then the page load and appear the city to select but afte the pageload the values on textbox is empty. I want to keep the values of textbox when the page load. This is the code <head> <script type="text/javascript"> function Load_id() { var Count = document.getElementById("Count").value; var Count_txt = "?Count=" location = Count_txt + Count } </script> <meta charset="UTF-8"> </head> <body> <div class="main"> <div class="headers"> <table> <tr><td rowspan="2"><img alt="unipi" src="/Images/logo.jpeg" height="75" width="52"></td> <td>University</td></tr> <tr><td>Data</td></tr> </table> </div> <div class="form"> <h3>Personal</h3><br/><br/><br/> <form id="Page1" name="Page1" action="Form1Sub.php" method="Post"> <table style="width:520px;text-align:left;"> <tr><td><label>Number:</label></td> <td><input type="text" required="required" id="AM" name="AM" value=""/></td> </tr> <tr><td><label>Name:</label></td> <td><input type="text" required="required" name="Name"/></td> </tr> <?php $host="localhost"; $username=""; $password=""; $dbName="Database"; $connection = mysql_connect($host, $username, $password) or die("Couldn't Connect to the Server"); $db = mysql_select_db($dbName, $connection) or die("cannot select DataBase"); $Count = $_GET['Count']; echo "<tr><td><label>Country</label></td>\n"; $country = mysql_query("select DISTINCT Country FROM lut_country_city "); echo " <td><select id=\"Count\" name=\"cat\" onChange=\"Load_id(this)\">\n"; echo " `<option>Select Country</option>\n"; while($nt=mysql_fetch_array($country)){ $selected = ($nt["Country"] == $Count)? "SELECTED":""; echo"<option value=\"".$nt['Country']."\"". $selected." >".$nt['Country']."</option>"; } echo " </select></td></tr>\n"; echo"<tr><td><label>City:</label></td>\n"; $q2 = mysql_query("Select id,City,Country FROM lut_country_city WHERE Country = '$Count'"); echo"<td><select name=\"SelectCity\">\n"; while($row = mysql_fetch_array($q2)) { echo"<option value=\"".$row['id']."\">".$row['City']."</option>"; } echo " </select></td></tr>\n"; ?> </table> <p> <button type="submit" id="Next">Next</button> </form> <form id="form1" action="index.php"> <button id="Back" type="submit">Back</button> </form> </p> </div> </div> </body> </html>

    Read the article

  • Trouble linking libboost libraries to compile sslsniff on RHEL

    - by rwong48
    Trying to build sslsniff on a RHEL 5.2 system here. When compiling sslsniff on RHEL I hit the same errors when using libboost packages (from repositories like rpmforge) and compiling libboost from source (which appeared to be successful.) I tried this on a fresh system as well (no previous/failed/garbage installs of libboost etc.) # make g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT SSLConnectionManager.o -MD -MP -MF .deps/SSLConnectionManager.Tpo -c -o SSLConnectionManager.o SSLConnectionManager.cpp mv -f .deps/SSLConnectionManager.Tpo .deps/SSLConnectionManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT FirefoxUpdater.o -MD -MP -MF .deps/FirefoxUpdater.Tpo -c -o FirefoxUpdater.o FirefoxUpdater.cpp mv -f .deps/FirefoxUpdater.Tpo .deps/FirefoxUpdater.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT Logger.o -MD -MP -MF .deps/Logger.Tpo -c -o Logger.o Logger.cpp mv -f .deps/Logger.Tpo .deps/Logger.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT SessionCache.o -MD -MP -MF .deps/SessionCache.Tpo -c -o SessionCache.o SessionCache.cpp mv -f .deps/SessionCache.Tpo .deps/SessionCache.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT SSLBridge.o -MD -MP -MF .deps/SSLBridge.Tpo -c -o SSLBridge.o SSLBridge.cpp mv -f .deps/SSLBridge.Tpo .deps/SSLBridge.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT HTTPSBridge.o -MD -MP -MF .deps/HTTPSBridge.Tpo -c -o HTTPSBridge.o HTTPSBridge.cpp mv -f .deps/HTTPSBridge.Tpo .deps/HTTPSBridge.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT sslsniff.o -MD -MP -MF .deps/sslsniff.Tpo -c -o sslsniff.o sslsniff.cpp mv -f .deps/sslsniff.Tpo .deps/sslsniff.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT FingerprintManager.o -MD -MP -MF .deps/FingerprintManager.Tpo -c -o FingerprintManager.o FingerprintManager.cpp mv -f .deps/FingerprintManager.Tpo .deps/FingerprintManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT AuthorityCertificateManager.o -MD -MP -MF .deps/AuthorityCertificateManager.Tpo -c -o AuthorityCertificateManager.o `test -f 'certificate/AuthorityCertificateManager.cpp' || echo './'`certificate/AuthorityCertificateManager.cpp mv -f .deps/AuthorityCertificateManager.Tpo .deps/AuthorityCertificateManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT TargetedCertificateManager.o -MD -MP -MF .deps/TargetedCertificateManager.Tpo -c -o TargetedCertificateManager.o `test -f 'certificate/TargetedCertificateManager.cpp' || echo './'`certificate/TargetedCertificateManager.cpp mv -f .deps/TargetedCertificateManager.Tpo .deps/TargetedCertificateManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT CertificateManager.o -MD -MP -MF .deps/CertificateManager.Tpo -c -o CertificateManager.o `test -f 'certificate/CertificateManager.cpp' || echo './'`certificate/CertificateManager.cpp mv -f .deps/CertificateManager.Tpo .deps/CertificateManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT HttpBridge.o -MD -MP -MF .deps/HttpBridge.Tpo -c -o HttpBridge.o `test -f 'http/HttpBridge.cpp' || echo './'`http/HttpBridge.cpp mv -f .deps/HttpBridge.Tpo .deps/HttpBridge.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT HttpConnectionManager.o -MD -MP -MF .deps/HttpConnectionManager.Tpo -c -o HttpConnectionManager.o `test -f 'http/HttpConnectionManager.cpp' || echo './'`http/HttpConnectionManager.cpp mv -f .deps/HttpConnectionManager.Tpo .deps/HttpConnectionManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT HttpHeaders.o -MD -MP -MF .deps/HttpHeaders.Tpo -c -o HttpHeaders.o `test -f 'http/HttpHeaders.cpp' || echo './'`http/HttpHeaders.cpp mv -f .deps/HttpHeaders.Tpo .deps/HttpHeaders.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT UpdateManager.o -MD -MP -MF .deps/UpdateManager.Tpo -c -o UpdateManager.o UpdateManager.cpp mv -f .deps/UpdateManager.Tpo .deps/UpdateManager.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT OCSPDenier.o -MD -MP -MF .deps/OCSPDenier.Tpo -c -o OCSPDenier.o `test -f 'http/OCSPDenier.cpp' || echo './'`http/OCSPDenier.cpp mv -f .deps/OCSPDenier.Tpo .deps/OCSPDenier.Po g++ -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE=\"sslsniff\" -DVERSION=\"0.6\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -I. -ggdb -g -O2 -MT FirefoxAddonUpdater.o -MD -MP -MF .deps/FirefoxAddonUpdater.Tpo -c -o FirefoxAddonUpdater.o FirefoxAddonUpdater.cpp mv -f .deps/FirefoxAddonUpdater.Tpo .deps/FirefoxAddonUpdater.Po g++ -ggdb -g -O2 -lssl -lboost_filesystem -lpthread -lboost_thread -llog4cpp -o sslsniff SSLConnectionManager.o FirefoxUpdater.o Logger.o SessionCache.o SSLBridge.o HTTPSBridge.o sslsniff.o FingerprintManager.o AuthorityCertificateManager.o TargetedCertificateManager.o CertificateManager.o HttpBridge.o HttpConnectionManager.o HttpHeaders.o UpdateManager.o OCSPDenier.o FirefoxAddonUpdater.o SSLConnectionManager.o: In function `__static_initialization_and_destruction_0': /usr/local/include/boost/system/error_code.hpp:208: undefined reference to `boost::system::get_system_category()' /usr/local/include/boost/system/error_code.hpp:209: undefined reference to `boost::system::get_generic_category()' /usr/local/include/boost/system/error_code.hpp:214: undefined reference to `boost::system::get_generic_category()' /usr/local/include/boost/system/error_code.hpp:215: undefined reference to `boost::system::get_generic_category()' /usr/local/include/boost/system/error_code.hpp:216: undefined reference to `boost::system::get_system_category()' There's more, but I guess there's a post length limit.. Most of them appear related to boost::system so I added -lboost_system to the linker command and got farther: # g++ -ggdb -g -O2 -lssl -lboost_filesystem -lpthread -lboost_thread -llog4cpp -o sslsniff SSLConnectionManager.o FirefoxUpdater.o Logger.o SessionCache.o SSLBridge.o HTTPSBridge.o sslsniff.o FingerprintManager.o AuthorityCertificateManager.o TargetedCertificateManager.o CertificateManager.o HttpBridge.o HttpConnectionManager.o HttpHeaders.o UpdateManager.o OCSPDenier.o FirefoxAddonUpdater.o -lboost_system SSLConnectionManager.o: In function `thread<boost::_bi::bind_t<void, boost::_mfi::mf3<void, SSLConnectionManager, boost::shared_ptr<boost::asio::basic_stream_socket<boost::asio::ip::tcp, boost::asio::stream_socket_service<boost::asio::ip::tcp> > >, boost::asio::ip::basic_endpoint<boost::asio::ip::tcp>, bool>, boost::_bi::list4<boost::_bi::value<SSLConnectionManager*>, boost::_bi::value<boost::shared_ptr<boost::asio::basic_stream_socket<boost::asio::ip::tcp, boost::asio::stream_socket_service<boost::asio::ip::tcp> > > >, boost::_bi::value<boost::asio::ip::basic_endpoint<boost::asio::ip::tcp> >, boost::_bi::value<bool> > > >': /usr/local/include/boost/thread/detail/thread.hpp:191: undefined reference to `boost::thread::start_thread()' SSLConnectionManager.o: In function `~thread_data': /usr/local/include/boost/thread/detail/thread.hpp:40: undefined reference to `boost::detail::thread_data_base::~thread_data_base()' /usr/local/include/boost/thread/detail/thread.hpp:40: undefined reference to `boost::detail::thread_data_base::~thread_data_base()' /usr/local/include/boost/thread/detail/thread.hpp:40: undefined reference to `boost::detail::thread_data_base::~thread_data_base()' /usr/local/include/boost/thread/detail/thread.hpp:40: undefined reference to `boost::detail::thread_data_base::~thread_data_base()' Now the errors are related to boost::detail and boost::filesystem::detail. I've tried using boost 1.35 and 1.42 (latest). On my own Ubuntu system, I installed the libraries from Ubuntu repositories and I was able to compile+link sslsniff just fine. Thanks in advance.

    Read the article

  • How can I add headers to DualList control wpf

    - by devnet247
    Hi all I am trying to write a Dual List usercontrol in wpf. I am new to wpf and I am finding it quite difficult. This is something I have put together in a couple of hours.It's not that good but a start. I would be extremely grateful if somebody with wpf experience could improve it. The aim is to simplify the usage as much as possible I am kind of stuck. I would like the user of the DualList Control to be able to set up headers how do you do that. Do I need to expose some dependency properties in my control? At the moment when loading the user has to pass a ObservableCollection is there a better way? Could you have a look and possibly make any suggestions with some code? Thanks a lot!!!!! xaml <Grid ShowGridLines="False"> <Grid.ColumnDefinitions> <ColumnDefinition Width="*"/> <ColumnDefinition Width="25px"></ColumnDefinition> <ColumnDefinition Width="*"/> </Grid.ColumnDefinitions> <Grid.RowDefinitions> <RowDefinition Height="*"></RowDefinition> </Grid.RowDefinitions> <StackPanel Orientation="Vertical" Grid.Column="0" Grid.Row="0"> <Label Name="lblLeftTitle" Content="Available"></Label> <ListView Name="lvwLeft"> </ListView> </StackPanel> <WrapPanel Grid.Column="1" Grid.Row="0"> <Button Name="btnMoveRight" Content=">" Width="25" Margin="0,35,0,0" Click="btnMoveRight_Click" /> <Button Name="btnMoveAllRight" Content=">>" Width="25" Margin="0,05,0,0" Click="btnMoveAllRight_Click" /> <Button Name="btnMoveLeft" Content="&lt;" Width="25" Margin="0,25,0,0" Click="btnMoveLeft_Click" /> <Button Name="btnMoveAllLeft" Content="&lt;&lt;" Width="25" Margin="0,05,0,0" Click="btnMoveAllLeft_Click" /> </WrapPanel> <StackPanel Orientation="Vertical" Grid.Column="2" Grid.Row="0"> <Label Name="lblRightTitle" Content="Selected"></Label> <ListView Name="lvwRight"> </ListView> </StackPanel> </Grid> Client public partial class DualListTest { public ObservableCollection<ListViewItem> LeftList { get; set; } public ObservableCollection<ListViewItem> RightList { get; set; } public DualListTest() { InitializeComponent(); LoadCustomers(); LoadDualList(); } private void LoadDualList() { dualList1.Load(LeftList, RightList); } private void LoadCustomers() { //Pretend we are getting a list of Customers from a repository. //Some go in the left List(Good Customers) some go in the Right List(Bad Customers). LeftList = new ObservableCollection<ListViewItem>(); RightList = new ObservableCollection<ListViewItem>(); var customers = GetCustomers(); foreach (var customer in customers) { if (customer.Status == CustomerStatus.Good) { LeftList.Add(new ListViewItem { Content = customer }); } else { RightList.Add(new ListViewItem{Content=customer }); } } } private static IEnumerable<Customer> GetCustomers() { return new List<Customer> { new Customer {Name = "Jo Blogg", Status = CustomerStatus.Good}, new Customer {Name = "Rob Smith", Status = CustomerStatus.Good}, new Customer {Name = "Michel Platini", Status = CustomerStatus.Good}, new Customer {Name = "Roberto Baggio", Status = CustomerStatus.Good}, new Customer {Name = "Gio Surname", Status = CustomerStatus.Bad}, new Customer {Name = "Diego Maradona", Status = CustomerStatus.Bad} }; } } UserControl public partial class DualList:UserControl { public ObservableCollection<ListViewItem> LeftListCollection { get; set; } public ObservableCollection<ListViewItem> RightListCollection { get; set; } public DualList() { InitializeComponent(); } public void Load(ObservableCollection<ListViewItem> leftListCollection, ObservableCollection<ListViewItem> rightListCollection) { LeftListCollection = leftListCollection; RightListCollection = rightListCollection; lvwLeft.ItemsSource = leftListCollection; lvwRight.ItemsSource = rightListCollection; EnableButtons(); } public static DependencyProperty LeftTitleProperty = DependencyProperty.Register("LeftTitle", typeof(string), typeof(Label)); public static DependencyProperty RightTitleProperty = DependencyProperty.Register("RightTitle", typeof(string), typeof(Label)); public static DependencyProperty LeftListProperty = DependencyProperty.Register("LeftList", typeof(ListView), typeof(DualList)); public static DependencyProperty RightListProperty = DependencyProperty.Register("RightList", typeof(ListView), typeof(DualList)); public string LeftTitle { get { return (string)lblLeftTitle.Content; } set { lblLeftTitle.Content = value; } } public string RightTitle { get { return (string)lblRightTitle.Content; } set { lblRightTitle.Content = value; } } public ListView LeftList { get { return lvwLeft; } set { lvwLeft = value; } } public ListView RightList { get { return lvwRight; } set { lvwRight = value; } } private void EnableButtons() { if (lvwLeft.Items.Count > 0) { btnMoveRight.IsEnabled = true; btnMoveAllRight.IsEnabled = true; } else { btnMoveRight.IsEnabled = false; btnMoveAllRight.IsEnabled = false; } if (lvwRight.Items.Count > 0) { btnMoveLeft.IsEnabled = true; btnMoveAllLeft.IsEnabled = true; } else { btnMoveLeft.IsEnabled = false; btnMoveAllLeft.IsEnabled = false; } if (lvwLeft.Items.Count != 0 || lvwRight.Items.Count != 0) return; btnMoveLeft.IsEnabled = false; btnMoveAllLeft.IsEnabled = false; btnMoveRight.IsEnabled = false; btnMoveAllRight.IsEnabled = false; } private void MoveRight() { while (lvwLeft.SelectedItems.Count > 0) { var selectedItem = (ListViewItem)lvwLeft.SelectedItem; LeftListCollection.Remove(selectedItem); RightListCollection.Add(selectedItem); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveAllRight() { while (lvwLeft.Items.Count > 0) { var item = (ListViewItem)lvwLeft.Items[lvwLeft.Items.Count - 1]; LeftListCollection.Remove(item); RightListCollection.Add(item); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveAllLeft() { while (lvwRight.Items.Count > 0) { var item = (ListViewItem)lvwRight.Items[lvwRight.Items.Count - 1]; RightListCollection.Remove(item); LeftListCollection.Add(item); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void MoveLeft() { while (lvwRight.SelectedItems.Count > 0) { var selectedCustomer = (ListViewItem)lvwRight.SelectedItem; LeftListCollection.Add(selectedCustomer); RightListCollection.Remove(selectedCustomer); } lvwRight.ItemsSource = RightListCollection; lvwLeft.ItemsSource = LeftListCollection; EnableButtons(); } private void btnMoveLeft_Click(object sender, RoutedEventArgs e) { MoveLeft(); } private void btnMoveAllLeft_Click(object sender, RoutedEventArgs e) { MoveAllLeft(); } private void btnMoveRight_Click(object sender, RoutedEventArgs e) { MoveRight(); } private void btnMoveAllRight_Click(object sender, RoutedEventArgs e) { MoveAllRight(); } }

    Read the article

  • Weblogic 10.0: SAMLSignedObject.verify() failed to validate signature value

    - by joshea
    I've been having this problem for a while and it's driving me nuts. I'm trying to create a client (in C# .NET 2.0) that will use SAML 1.1 to sign on to a WebLogic 10.0 server (i.e., a Single Sign-On scenario, using browser/post profile). The client is on a WinXP machine and the WebLogic server is on a RHEL 5 box. I based my client largely on code in the example here: http://www.codeproject.com/KB/aspnet/DotNetSamlPost.aspx (the source has a section for SAML 1.1). I set up WebLogic based on instructions for SAML Destination Site from here:http://www.oracle.com/technology/pub/articles/dev2arch/2006/12/sso-with-saml4.html I created a certificate using makecert that came with VS 2005. makecert -r -pe -n "CN=whatever" -b 01/01/2010 -e 01/01/2011 -sky exchange whatever.cer -sv whatever.pvk pvk2pfx.exe -pvk whatever.pvk -spc whatever.cer -pfx whatever.pfx Then I installed the .pfx to my personal certificate directory, and installed the .cer into the WebLogic SAML Identity Asserter V2. I read on another site that formatting the response to be readable (ie, adding whitespace) to the response after signing would cause this problem, so I tried various combinations of turning on/off .Indent XMLWriterSettings and turning on/off .PreserveWhiteSpace when loading the XML document, and none of it made any difference. I've printed the SignatureValue both before the message is is encoded/sent and after it arrives/gets decoded, and they are the same. So, to be clear: the Response appears to be formed, encoded, sent, and decoded fine (I see the full Response in the WebLogic logs). WebLogic finds the certificate I want it to use, verifies that a key was supplied, gets the signed info, and then fails to validate the signature. Code: public string createResponse(Dictionary<string, string> attributes){ ResponseType response = new ResponseType(); // Create Response response.ResponseID = "_" + Guid.NewGuid().ToString(); response.MajorVersion = "1"; response.MinorVersion = "1"; response.IssueInstant = System.DateTime.UtcNow; response.Recipient = "http://theWLServer/samlacs/acs"; StatusType status = new StatusType(); status.StatusCode = new StatusCodeType(); status.StatusCode.Value = new XmlQualifiedName("Success", "urn:oasis:names:tc:SAML:1.0:protocol"); response.Status = status; // Create Assertion AssertionType assertionType = CreateSaml11Assertion(attributes); response.Assertion = new AssertionType[] {assertionType}; //Serialize XmlSerializerNamespaces ns = new XmlSerializerNamespaces(); ns.Add("samlp", "urn:oasis:names:tc:SAML:1.0:protocol"); ns.Add("saml", "urn:oasis:names:tc:SAML:1.0:assertion"); XmlSerializer responseSerializer = new XmlSerializer(response.GetType()); StringWriter stringWriter = new StringWriter(); XmlWriterSettings settings = new XmlWriterSettings(); settings.OmitXmlDeclaration = true; settings.Indent = false;//I've tried both ways, for the fun of it settings.Encoding = Encoding.UTF8; XmlWriter responseWriter = XmlTextWriter.Create(stringWriter, settings); responseSerializer.Serialize(responseWriter, response, ns); responseWriter.Close(); string samlString = stringWriter.ToString(); stringWriter.Close(); // Sign the document XmlDocument doc = new XmlDocument(); doc.PreserveWhiteSpace = true; //also tried this both ways to no avail doc.LoadXml(samlString); X509Certificate2 cert = null; X509Store store = new X509Store(StoreName.My, StoreLocation.CurrentUser); store.Open(OpenFlags.ReadOnly); X509Certificate2Collection coll = store.Certificates.Find(X509FindType.FindBySubjectDistinguishedName, "distName", true); if (coll.Count < 1) { throw new ArgumentException("Unable to locate certificate"); } cert = coll[0]; store.Close(); //this special SignDoc just overrides a function in SignedXml so //it knows to look for ResponseID rather than ID XmlElement signature = SamlHelper.SignDoc( doc, cert, "ResponseID", response.ResponseID); doc.DocumentElement.InsertBefore(signature, doc.DocumentElement.ChildNodes[0]); // Base64Encode and URL Encode byte[] base64EncodedBytes = Encoding.UTF8.GetBytes(doc.OuterXml); string returnValue = System.Convert.ToBase64String( base64EncodedBytes); return returnValue; } private AssertionType CreateSaml11Assertion(Dictionary<string, string> attributes){ AssertionType assertion = new AssertionType(); assertion.AssertionID = "_" + Guid.NewGuid().ToString(); assertion.Issuer = "madeUpValue"; assertion.MajorVersion = "1"; assertion.MinorVersion = "1"; assertion.IssueInstant = System.DateTime.UtcNow; //Not before, not after conditions ConditionsType conditions = new ConditionsType(); conditions.NotBefore = DateTime.UtcNow; conditions.NotBeforeSpecified = true; conditions.NotOnOrAfter = DateTime.UtcNow.AddMinutes(10); conditions.NotOnOrAfterSpecified = true; //Name Identifier to be used in Saml Subject NameIdentifierType nameIdentifier = new NameIdentifierType(); nameIdentifier.NameQualifier = domain.Trim(); nameIdentifier.Value = subject.Trim(); SubjectConfirmationType subjectConfirmation = new SubjectConfirmationType(); subjectConfirmation.ConfirmationMethod = new string[] { "urn:oasis:names:tc:SAML:1.0:cm:bearer" }; // // Create some SAML subject. SubjectType samlSubject = new SubjectType(); AttributeStatementType attrStatement = new AttributeStatementType(); AuthenticationStatementType authStatement = new AuthenticationStatementType(); authStatement.AuthenticationMethod = "urn:oasis:names:tc:SAML:1.0:am:password"; authStatement.AuthenticationInstant = System.DateTime.UtcNow; samlSubject.Items = new object[] { nameIdentifier, subjectConfirmation}; attrStatement.Subject = samlSubject; authStatement.Subject = samlSubject; IPHostEntry ipEntry = Dns.GetHostEntry(System.Environment.MachineName); SubjectLocalityType subjectLocality = new SubjectLocalityType(); subjectLocality.IPAddress = ipEntry.AddressList[0].ToString(); authStatement.SubjectLocality = subjectLocality; attrStatement.Attribute = new AttributeType[attributes.Count]; int i=0; // Create SAML attributes. foreach (KeyValuePair<string, string> attribute in attributes) { AttributeType attr = new AttributeType(); attr.AttributeName = attribute.Key; attr.AttributeNamespace= domain; attr.AttributeValue = new object[] {attribute.Value}; attrStatement.Attribute[i] = attr; i++; } assertion.Conditions = conditions; assertion.Items = new StatementAbstractType[] {authStatement, attrStatement}; return assertion; } private static XmlElement SignDoc(XmlDocument doc, X509Certificate2 cert2, string referenceId, string referenceValue) { // Use our own implementation of SignedXml SamlSignedXml sig = new SamlSignedXml(doc, referenceId); // Add the key to the SignedXml xmlDocument. sig.SigningKey = cert2.PrivateKey; // Create a reference to be signed. Reference reference = new Reference(); reference.Uri= String.Empty; reference.Uri = "#" + referenceValue; // Add an enveloped transformation to the reference. XmlDsigEnvelopedSignatureTransform env = new XmlDsigEnvelopedSignatureTransform(); reference.AddTransform(env); // Add the reference to the SignedXml object. sig.AddReference(reference); // Add an RSAKeyValue KeyInfo (optional; helps recipient find key to validate). KeyInfo keyInfo = new KeyInfo(); keyInfo.AddClause(new KeyInfoX509Data(cert2)); sig.KeyInfo = keyInfo; // Compute the signature. sig.ComputeSignature(); // Get the XML representation of the signature and save // it to an XmlElement object. XmlElement xmlDigitalSignature = sig.GetXml(); return xmlDigitalSignature; } To open the page in my client app, string postData = String.Format("SAMLResponse={0}&APID=ap_00001&TARGET={1}", System.Web.HttpUtility.UrlEncode(builder.buildResponse("http://theWLServer/samlacs/acs",attributes)), "http://desiredURL"); webBrowser.Navigate("http://theWLServer/samlacs/acs", "_self", Encoding.UTF8.GetBytes(postData), "Content-Type: application/x-www-form-urlencoded");

    Read the article

  • problem with piping in my own implementation of shell

    - by codemax
    Hey guys, i am implementing my own shell. I want to involve piping. i searched here and i got a code. But it is not working.Can any one help me? this is my code #include <sys/types.h> #include <sys/wait.h> #include <sys/ipc.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #include <iostream> #include <cstdlib> using namespace std; char temp1[81][81],temp2[81][81] ,*cmdptr1[40], *cmdptr2[40]; void process(char**,int); int arg_count, count; int arg_cnt[2]; int pip,tok; char input[81]; int fds[2]; void process( char* cmd[])//, int arg_count ) { pid_t pid; pid = fork(); //char path[81]; //getcwd(path,81); //strcat(path,"/"); //strcat(path,cmd[0]); if(pid < 0) { cout << "Fork Failed" << endl; exit(-1); } else if( pid == 0 ) { execvp( cmd[0] , cmd ); } else { wait(NULL); } } void pipe(char **cmd1, char**cmd2) { cout<<endl<<endl<<"in pipe"<<endl; for(int i=0 ; i<arg_cnt[0] ; i++) { cout<<cmdptr1[i]<<" "; } cout<<endl; for(int i=0 ; i<arg_cnt[1] ; i++) { cout<<cmdptr2[i]<<" "; } pipe(fds); if (fork() == 0 ) { dup2(fds[1], 1); close(fds[0]); close(fds[1]); process(cmd1); } if (fork() == 0) { dup2(fds[0], 0); close(fds[0]); close(fds[1]); process(cmd2); } close(fds[0]); close(fds[1]); wait(NULL); } void pipecommand(char** cmd1, char** cmd2) { cout<<endl<<endl; for(int i=0 ; i<arg_cnt[0] ; i++) { cout<<cmd1[i]<<" "; } cout<<endl; for(int i=0 ; i<arg_cnt[1] ; i++) { cout<<cmd2[i]<<" "; } int fds[2]; // file descriptors pipe(fds); // child process #1 if (fork() == 0) { // Reassign stdin to fds[0] end of pipe. dup2(fds[0], STDIN_FILENO); close(fds[1]); close(fds[0]); process(cmd2); // child process #2 if (fork() == 0) { // Reassign stdout to fds[1] end of pipe. dup2(fds[1], STDOUT_FILENO); close(fds[0]); close(fds[1]); // Execute the first command. process(cmd1); } wait(NULL); } close(fds[1]); close(fds[0]); wait(NULL); } void splitcommand1() { tok++; int k,done=0,no=0; arg_count = 0; for(int i=count ; input[i] != '\0' ; i++) { k=0; while(1) { count++; if(input[i] == ' ') { break; } if((input[i] == '\0')) { done = 1; break; } if(input[i] == '|') { pip = 1; done = 1; break; } temp1[arg_count][k++] = input[i++]; } temp1[arg_count][k++] = '\0'; arg_count++; if(done == 1) { break; } } for(int i=0 ; i<arg_count ; i++) { cmdptr1[i] = temp1[i]; } arg_cnt[tok] = arg_count; } void splitcommand2() { tok++; cout<<"count is :"<<count<<endl; int k,done=0,no=0; arg_count = 0; for(int i=count ; input[i] != '\0' ; i++) { k=0; while(1) { count++; if(input[i] == ' ') { break; } if((input[i] == '\0')) { done = 1; break; } if(input[i] == '|') { pip = 1; done = 1; cout<<"PIP"; break; } temp2[arg_count][k++] = input[i++]; } temp2[arg_count][k++] = '\0'; arg_count++; if(done == 1) { break; } } for(int i=0 ; i<arg_count ; i++) { cmdptr2[i] = temp2[i]; } arg_cnt[tok] = arg_count; } int main() { cout<<endl<<endl<<"Welcome to unique shell !!!!!!!!!!!"<<endl; tok=-1; while(1) { cout<<endl<<"***********UNIQUE**********"<<endl; cin.getline(input,81); count = 0,pip=0; splitcommand1(); if(pip == 1) { count++; splitcommand2(); } cout<<endl<<endl; if(strcmp(cmdptr1[0], "exit") == 0 ) { cout<<endl<<"EXITING UNIQUE SHELL"<<endl; exit(0); } //cout<<endl<<"Arg count is :"<<arg_count<<endl; if(pip == 1) { cout<<endl<<endl<<"in main :"; for(int i=0 ; i<arg_cnt[0] ; i++) { cout<<cmdptr1[i]<<" "; } cout<<endl; for(int i=0 ; i<arg_cnt[1] ; i++) { cout<<cmdptr2[i]<<" "; } pipe(cmdptr1, cmdptr2); } else { process (cmdptr1);//,arg_count); } } } I know it is not well coded. But try to help me :(

    Read the article

  • mysql_query arguments in PHP

    - by Chris Wilson
    I'm currently building my first database in MySQL with an interface written in PHP and am using the 'learn-by-doing' approach. The figure below illustrates my database. Table names are at the top, and the attribute names are as they appear in the real database. I am attempting to query the values of each of these attributes using the code seen below the table. I think there is something wrong with my mysql_query() function since I am able to observe the expected behaviour when my form is successfully submitted, but no search results are returned. Can anyone see where I'm going wrong here? Update 1: I've updated the question with my enter script, minus the database login credentials. <html> <head> <title>Search</title> </head> <body> <h1>Search</h1> <!--Search form - get user input from this--> <form name = "search" action = "<?=$PHP_SELF?>" method = "get"> Search for <input type = "text" name = "find" /> in <select name = "field"> <option value = "Title">Title</option> <option value = "Description">Description</option> <option value = "City">Location</option> <option value = "Company_name">Employer</option> </select> <input type = "submit" name = "search" value = "Search" /> </form> <form name = "clearsearch" action = "Search.php"> <input type = "submit" value = "Reset search" /> </form> <?php if (isset($_GET["search"])) // Check if form has been submitted correctly { // Check for a search query if($_GET["find"] == "") { echo "<p>You did not enter a search query. Please press the 'Reset search' button and try again"; exit; } echo "<h2>Search results</h2>"; ?> <table align = "left" border = "1" cellspacing = "2" cellpadding = "2"> <tr> <th><font face="Arial, Helvetica, sans-serif">No.</font></th> <th><font face="Arial, Helvetica, sans-serif">Title</font></th> <th><font face="Arial, Helvetica, sans-serif">Employer</font></th> <th><font face="Arial, Helvetica, sans-serif">Description</font></th> <th><font face="Arial, Helvetica, sans-serif">Location</font></th> <th><font face="Arial, Helvetica, sans-serif">Date Posted</font></th> <th><font face="Arial, Helvetica, sans-serif">Application Deadline</font></th> </tr> <? // Connect to the database $username=REDACTED; $password=REDACTED; $host=REDACTED; $database=REDACTED; mysql_connect($host, $username, $password); @mysql_select_db($database) or die (mysql_error()); // Perform the search $find = mysql_real_escape_string($find); $query = "SELECT job.Title, job.Description, employer.Company_name, address.City, job.Date_posted, job.Application_deadline WHERE ( Title = '{$_GET['find']}' OR Company_name = '{$_GET['find']}' OR Date_posted = '{$_GET['find']}' OR Application_deadline = '{$_GET['find']}' ) AND job.employer_id_job = employer.employer_id AND job.address_id_job = address.address_id"; if (!$query) { die ('Invalid query:' .mysql_error()); } $result = mysql_query($query); $num = mysql_numrows($result); $count = 0; while ($count < $num) { $title = mysql_result ($result, $count, "Title"); $date_posted = mysql_result ($result, $count, "Date_posted"); $application_deadline = mysql_result ($result, $count, "Application_deadline"); $description = mysql_result ($result, $count, "Description"); $company = mysql_result ($result, $count, "Company_name"); $city = mysql_result ($result, $count, "City"); ?> <tr> <td><font face = "Arial, Helvetica, sans-serif"><? echo $count + 1; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $title; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $company; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $description; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $date_posted; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $application_deadline; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $education_level; ?></font></td> <td><font face = "Arial, Helvetica, sans-serif"><? echo $years_of_experience; ?></font></td> <? $count ++; } } ?> </body> </html>

    Read the article

  • SharePoint - Summing Calculated Columns By Groups (DVWP)

    - by Mark Rackley
    I had a problem… okay.. okay.. so I have many problems… but let’s focus on one in particular or this blog post would never end… okay? Thank you…. So, I had an electronic timesheet where users entered hours for each day of the week. It also had a “Week Total” column which was a calculated column of the sum. The calculated column looked like this: Pretty easy.. nothing spectacular. So, what’s the problem? WELL……………….. There is a row in the timesheet for each task a person worked on in a given week. So, if you worked on 4 tasks, you would have 4 rows of data, and 4 week totals for that week: This is all fine and dandy, but I want to know what the total was for the entire week. Yes.. I realize the answer is 24 from my example… I mean, I know how to add! I just want SharePoint to display it for me for the executives (we all know, they have math problems).  You may be thinking, hey genius (in a sarcastic tone of course), why don’t you just go to the view and total on the “Week Total” field. What a brilliant idea! Why didn’t I think of that… let’s go to the view and do just that…. Ohhhhhh… you can’t total on a Calculated Column.. it’s not even an option…  Yeah… I had the same moment. So, what do you do? Well… what do you think I did? 1) Googled “SharePoint total calculated column” 2) Said it couldn’t be done 3) Took a nap 4) Asked the question on twitter? The correct answer of course is number 4… followed by number 3… although I may have told my boss number 2 so that I look more brilliant than I am? It’s safe to say I did NOT try to find the solution on my own doing step 1… that would be just WAY to easy… So, anyway, I posted the question on Twitter and it turns out several people had suggestions from using jQuery to using DVWPs. I tend to be a big fan of the DVWP except for the disgusting process of deploying them to another farm.. ugh… just shoot me…. so, that is the solution I went with. Laura Rogers (@WonderLaura) has a super duper easy to follow video on the subject over at EndUserSharePoint.com: SharePoint: Displaying Calculated Column SUMS in a View (Screencast) Laura’s video was very easy to follow and was ALMOST exactly what I needed. She does a great job walking you through every step of summing up a calculated field which was PART of my problem. The other part was my list is grouped by date! So, I wanted to see for a given week, the summed “Week Total” of hours. Laura got me on the right track with her video and I dug a little deeper into the DVWP to accomplish my task. So, here are the steps you follow: 1. Click on the "chevron” (I didn’t know it was actually called that until I heard Laura say it).. I always call it the “little-button-in-the-top-right-corner-with-the-greater-than-sign”.. but “chevron” is much shorter. So, click on the chevron, click on “Sort and Group”. The Add the field you want to group by, in my example it is the “Monday Date” of the timesheet entry. Make sure to check the check boxes for “Show Group Header” AND “Show Group Footer”. Click “OK”. The view now shows the count of each grouped set of data: Interesting, this looks very similar to Laura’s video… right? So, let’s take a look at the code for the Count: Count : <xsl:value-of select="count($nodeset)" /> Wow, also very similar… except in Laura’s video it looks like: Count : <xsl:value-of select="count($Rows)" /> So.. the only difference is that instead of $Rows we have $nodeset. It turns out the $nodeset will go through each Row in the group just like $Rows goes through each row in the entire view. So, using the exact same logic as in Laura’s blog except replacing $Rows with $nodeset we get the functionality of being able to sum up the values for a group. So, I want to replace “Count: #” with the total hours, this is done using the following changes to the above code: Week Total : <xsl:value-of select="sum($nodeset/@Monday)+sum($nodeset/@Tuesday) +sum($nodeset/@Wednesday)+sum($nodeset/@Thursday)+sum($nodeset/@Friday) +sum($nodeset/@Saturday)+sum($nodeset/@Sunday)" /> Our final output has the summed hours for each group! So… long story short… follow Laura’s blog, then group your list, then replace “$Rows” with “$nodeset”. One caveat, this will not work if you group by a person field. For some reason the person field does not go through each row in the group. I haven’t dug into this much yet. Maybe if I find some time… whatever that is… Anyway, Laura did all the work, I just took it one small step forward… as always, feel free to leave any additional insights you may have. We’re all learning here!

    Read the article

  • C#/.NET Little Wonders: Using &lsquo;default&rsquo; to Get Default Values

    - by James Michael Hare
    Once again, in this series of posts I look at the parts of the .NET Framework that may seem trivial, but can help improve your code by making it easier to write and maintain. The index of all my past little wonders posts can be found here. Today’s little wonder is another of those small items that can help a lot in certain situations, especially when writing generics.  In particular, it is useful in determining what the default value of a given type would be. The Problem: what’s the default value for a generic type? There comes a time when you’re writing generic code where you may want to set an item of a given generic type.  Seems simple enough, right?  We’ll let’s see! Let’s say we want to query a Dictionary<TKey, TValue> for a given key and get back the value, but if the key doesn’t exist, we’d like a default value instead of throwing an exception. So, for example, we might have a the following dictionary defined: 1: var lookup = new Dictionary<int, string> 2: { 3: { 1, "Apple" }, 4: { 2, "Orange" }, 5: { 3, "Banana" }, 6: { 4, "Pear" }, 7: { 9, "Peach" } 8: }; And using those definitions, perhaps we want to do something like this: 1: // assume a default 2: string value = "Unknown"; 3:  4: // if the item exists in dictionary, get its value 5: if (lookup.ContainsKey(5)) 6: { 7: value = lookup[5]; 8: } But that’s inefficient, because then we’re double-hashing (once for ContainsKey() and once for the indexer).  Well, to avoid the double-hashing, we could use TryGetValue() instead: 1: string value; 2:  3: // if key exists, value will be put in value, if not default it 4: if (!lookup.TryGetValue(5, out value)) 5: { 6: value = "Unknown"; 7: } But the “flow” of using of TryGetValue() can get clunky at times when you just want to assign either the value or a default to a variable.  Essentially it’s 3-ish lines (depending on formatting) for 1 assignment.  So perhaps instead we’d like to write an extension method to support a cleaner interface that will return a default if the item isn’t found: 1: public static class DictionaryExtensions 2: { 3: public static TValue GetValueOrDefault<TKey, TValue>(this Dictionary<TKey, TValue> dict, 4: TKey key, TValue defaultIfNotFound) 5: { 6: TValue value; 7:  8: // value will be the result or the default for TValue 9: if (!dict.TryGetValue(key, out value)) 10: { 11: value = defaultIfNotFound; 12: } 13:  14: return value; 15: } 16: } 17:  So this creates an extension method on Dictionary<TKey, TValue> that will attempt to get a value using the given key, and will return the defaultIfNotFound as a stand-in if the key does not exist. This code compiles, fine, but what if we would like to go one step further and allow them to specify a default if not found, or accept the default for the type?  Obviously, we could overload the method to take the default or not, but that would be duplicated code and a bit heavy for just specifying a default.  It seems reasonable that we could set the not found value to be either the default for the type, or the specified value. So what if we defaulted the type to null? 1: public static TValue GetValueOrDefault<TKey, TValue>(this Dictionary<TKey, TValue> dict, 2: TKey key, TValue defaultIfNotFound = null) // ... No, this won’t work, because only reference types (and Nullable<T> wrapped types due to syntactical sugar) can be assigned to null.  So what about a calling parameterless constructor? 1: public static TValue GetValueOrDefault<TKey, TValue>(this Dictionary<TKey, TValue> dict, 2: TKey key, TValue defaultIfNotFound = new TValue()) // ... No, this won’t work either for several reasons.  First, we’d expect a reference type to return null, not an “empty” instance.  Secondly, not all reference types have a parameter-less constructor (string for example does not).  And finally, a constructor cannot be determined at compile-time, while default values can. The Solution: default(T) – returns the default value for type T Many of us know the default keyword for its uses in switch statements as the default case.  But it has another use as well: it can return us the default value for a given type.  And since it generates the same defaults that default field initialization uses, it can be determined at compile-time as well. For example: 1: var x = default(int); // x is 0 2:  3: var y = default(bool); // y is false 4:  5: var z = default(string); // z is null 6:  7: var t = default(TimeSpan); // t is a TimeSpan with Ticks == 0 8:  9: var n = default(int?); // n is a Nullable<int> with HasValue == false Notice that for numeric types the default is 0, and for reference types the default is null.  In addition, for struct types, the value is a default-constructed struct – which simply means a struct where every field has their default value (hence 0 Ticks for TimeSpan, etc.). So using this, we could modify our code to this: 1: public static class DictionaryExtensions 2: { 3: public static TValue GetValueOrDefault<TKey, TValue>(this Dictionary<TKey, TValue> dict, 4: TKey key, TValue defaultIfNotFound = default(TValue)) 5: { 6: TValue value; 7:  8: // value will be the result or the default for TValue 9: if (!dict.TryGetValue(key, out value)) 10: { 11: value = defaultIfNotFound; 12: } 13:  14: return value; 15: } 16: } Now, if defaultIfNotFound is unspecified, it will use default(TValue) which will be the default value for whatever value type the dictionary holds.  So let’s consider how we could use this: 1: lookup.GetValueOrDefault(1); // returns “Apple” 2:  3: lookup.GetValueOrDefault(5); // returns null 4:  5: lookup.GetValueOrDefault(5, “Unknown”); // returns “Unknown” 6:  Again, do not confuse a parameter-less constructor with the default value for a type.  Remember that the default value for any type is the compile-time default for any instance of that type (0 for numeric, false for bool, null for reference types, and struct will all default fields for struct).  Consider the difference: 1: // both zero 2: int i1 = default(int); 3: int i2 = new int(); 4:  5: // both “zeroed” structs 6: var dt1 = default(DateTime); 7: var dt2 = new DateTime(); 8:  9: // sb1 is null, sb2 is an “empty” string builder 10: var sb1 = default(StringBuilder()); 11: var sb2 = new StringBuilder(); So in the above code, notice that the value types all resolve the same whether using default or parameter-less construction.  This is because a value type is never null (even Nullable<T> wrapped types are never “null” in a reference sense), they will just by default contain fields with all default values. However, for reference types, the default is null and not a constructed instance.  Also it should be noted that not all classes have parameter-less constructors (string, for instance, doesn’t have one – and doesn’t need one). Summary Whenever you need to get the default value for a type, especially a generic type, consider using the default keyword.  This handy word will give you the default value for the given type at compile-time, which can then be used for initialization, optional parameters, etc. Technorati Tags: C#,CSharp,.NET,Little Wonders,default

    Read the article

  • SQL SERVER – SSIS Look Up Component – Cache Mode – Notes from the Field #028

    - by Pinal Dave
    [Notes from Pinal]: Lots of people think that SSIS is all about arranging various operations together in one logical flow. Well, the understanding is absolutely correct, but the implementation of the same is not as easy as it seems. Similarly most of the people think lookup component is just component which does look up for additional information and does not pay much attention to it. Due to the same reason they do not pay attention to the same and eventually get very bad performance. Linchpin People are database coaches and wellness experts for a data driven world. In this 28th episode of the Notes from the Fields series database expert Tim Mitchell (partner at Linchpin People) shares very interesting conversation related to how to write a good lookup component with Cache Mode. In SQL Server Integration Services, the lookup component is one of the most frequently used tools for data validation and completion.  The lookup component is provided as a means to virtually join one set of data to another to validate and/or retrieve missing values.  Properly configured, it is reliable and reasonably fast. Among the many settings available on the lookup component, one of the most critical is the cache mode.  This selection will determine whether and how the distinct lookup values are cached during package execution.  It is critical to know how cache modes affect the result of the lookup and the performance of the package, as choosing the wrong setting can lead to poorly performing packages, and in some cases, incorrect results. Full Cache The full cache mode setting is the default cache mode selection in the SSIS lookup transformation.  Like the name implies, full cache mode will cause the lookup transformation to retrieve and store in SSIS cache the entire set of data from the specified lookup location.  As a result, the data flow in which the lookup transformation resides will not start processing any data buffers until all of the rows from the lookup query have been cached in SSIS. The most commonly used cache mode is the full cache setting, and for good reason.  The full cache setting has the most practical applications, and should be considered the go-to cache setting when dealing with an untested set of data. With a moderately sized set of reference data, a lookup transformation using full cache mode usually performs well.  Full cache mode does not require multiple round trips to the database, since the entire reference result set is cached prior to data flow execution. There are a few potential gotchas to be aware of when using full cache mode.  First, you can see some performance issues – memory pressure in particular – when using full cache mode against large sets of reference data.  If the table you use for the lookup is very large (either deep or wide, or perhaps both), there’s going to be a performance cost associated with retrieving and caching all of that data.  Also, keep in mind that when doing a lookup on character data, full cache mode will always do a case-sensitive (and in some cases, space-sensitive) string comparison even if your database is set to a case-insensitive collation.  This is because the in-memory lookup uses a .NET string comparison (which is case- and space-sensitive) as opposed to a database string comparison (which may be case sensitive, depending on collation).  There’s a relatively easy workaround in which you can use the UPPER() or LOWER() function in the pipeline data and the reference data to ensure that case differences do not impact the success of your lookup operation.  Again, neither of these present a reason to avoid full cache mode, but should be used to determine whether full cache mode should be used in a given situation. Full cache mode is ideally useful when one or all of the following conditions exist: The size of the reference data set is small to moderately sized The size of the pipeline data set (the data you are comparing to the lookup table) is large, is unknown at design time, or is unpredictable Each distinct key value(s) in the pipeline data set is expected to be found multiple times in that set of data Partial Cache When using the partial cache setting, lookup values will still be cached, but only as each distinct value is encountered in the data flow.  Initially, each distinct value will be retrieved individually from the specified source, and then cached.  To be clear, this is a row-by-row lookup for each distinct key value(s). This is a less frequently used cache setting because it addresses a narrower set of scenarios.  Because each distinct key value(s) combination requires a relational round trip to the lookup source, performance can be an issue, especially with a large pipeline data set to be compared to the lookup data set.  If you have, for example, a million records from your pipeline data source, you have the potential for doing a million lookup queries against your lookup data source (depending on the number of distinct values in the key column(s)).  Therefore, one has to be keenly aware of the expected row count and value distribution of the pipeline data to safely use partial cache mode. Using partial cache mode is ideally suited for the conditions below: The size of the data in the pipeline (more specifically, the number of distinct key column) is relatively small The size of the lookup data is too large to effectively store in cache The lookup source is well indexed to allow for fast retrieval of row-by-row values No Cache As you might guess, selecting no cache mode will not add any values to the lookup cache in SSIS.  As a result, every single row in the pipeline data set will require a query against the lookup source.  Since no data is cached, it is possible to save a small amount of overhead in SSIS memory in cases where key values are not reused.  In the real world, I don’t see a lot of use of the no cache setting, but I can imagine some edge cases where it might be useful. As such, it’s critical to know your data before choosing this option.  Obviously, performance will be an issue with anything other than small sets of data, as the no cache setting requires row-by-row processing of all of the data in the pipeline. I would recommend considering the no cache mode only when all of the below conditions are true: The reference data set is too large to reasonably be loaded into SSIS memory The pipeline data set is small and is not expected to grow There are expected to be very few or no duplicates of the key values(s) in the pipeline data set (i.e., there would be no benefit from caching these values) Conclusion The cache mode, an often-overlooked setting on the SSIS lookup component, represents an important design decision in your SSIS data flow.  Choosing the right lookup cache mode directly impacts the fidelity of your results and the performance of package execution.  Know how this selection impacts your ETL loads, and you’ll end up with more reliable, faster packages. If you want me to take a look at your server and its settings, or if your server is facing any issue we can Fix Your SQL Server. Reference: Pinal Dave (http://blog.sqlauthority.com)Filed under: Notes from the Field, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL Tagged: SSIS

    Read the article

  • XNA Xbox 360 Content Manager Thread freezing Draw Thread

    - by Alikar
    I currently have a game that takes in large images, easily bigger than 1MB, to serve as backgrounds. I know exactly when this transition is supposed to take place, so I made a loader class to handle loading these large images in the background, but when I load the images it still freezes the main thread where the drawing takes place. Since this code runs on the 360 I move the thread to the 4th hardware thread, but that doesn't seem to help. Below is the class I am using. Any thoughts as to why my new content manager which should be in its own thread is interrupting the draw in my main thread would be appreciated. namespace FileSystem { /// <summary> /// This is used to reference how many objects reference this texture. /// Everytime someone references a texture we increase the iNumberOfReferences. /// When a class calls remove on a specific texture we check to see if anything /// else is referencing the class, if it is we don't remove it. If there isn't /// anything referencing the texture its safe to dispose of. /// </summary> class TextureContainer { public uint uiNumberOfReferences = 0; public Texture2D texture; } /// <summary> /// This class loads all the files from the Content. /// </summary> static class FileManager { static Microsoft.Xna.Framework.Content.ContentManager Content; static EventWaitHandle wh = new AutoResetEvent(false); static Dictionary<string, TextureContainer> Texture2DResourceDictionary; static List<Texture2D> TexturesToDispose; static List<String> TexturesToLoad; static int iProcessor = 4; private static object threadMutex = new object(); private static object Texture2DMutex = new object(); private static object loadingMutex = new object(); private static bool bLoadingTextures = false; /// <summary> /// Returns if we are loading textures or not. /// </summary> public static bool LoadingTexture { get { lock (loadingMutex) { return bLoadingTextures; } } } /// <summary> /// Since this is an static class. This is the constructor for the file loadeder. This is the version /// for the Xbox 360. /// </summary> /// <param name="_Content"></param> public static void Initalize(IServiceProvider serviceProvider, string rootDirectory, int _iProcessor ) { Content = new Microsoft.Xna.Framework.Content.ContentManager(serviceProvider, rootDirectory); Texture2DResourceDictionary = new Dictionary<string, TextureContainer>(); TexturesToDispose = new List<Texture2D>(); iProcessor = _iProcessor; CreateThread(); } /// <summary> /// Since this is an static class. This is the constructor for the file loadeder. /// </summary> /// <param name="_Content"></param> public static void Initalize(IServiceProvider serviceProvider, string rootDirectory) { Content = new Microsoft.Xna.Framework.Content.ContentManager(serviceProvider, rootDirectory); Texture2DResourceDictionary = new Dictionary<string, TextureContainer>(); TexturesToDispose = new List<Texture2D>(); CreateThread(); } /// <summary> /// Creates the thread incase we wanted to set up some parameters /// Outside of the constructor. /// </summary> static public void CreateThread() { Thread t = new Thread(new ThreadStart(StartThread)); t.Start(); } // This is the function that we thread. static public void StartThread() { //BBSThreadClass BBSTC = (BBSThreadClass)_oData; FileManager.Execute(); } /// <summary> /// This thread shouldn't be called by the outside world. /// It allows the File Manager to loop. /// </summary> static private void Execute() { // Make sure our thread is on the correct processor on the XBox 360. #if WINDOWS #else Thread.CurrentThread.SetProcessorAffinity(new int[] { iProcessor }); Thread.CurrentThread.IsBackground = true; #endif // This loop will load textures into ram for us away from the main thread. while (true) { wh.WaitOne(); // Locking down our data while we process it. lock (threadMutex) { lock (loadingMutex) { bLoadingTextures = true; } bool bContainsKey = false; for (int con = 0; con < TexturesToLoad.Count; con++) { // If we have already loaded the texture into memory reference // the one in the dictionary. lock (Texture2DMutex) { bContainsKey = Texture2DResourceDictionary.ContainsKey(TexturesToLoad[con]); } if (bContainsKey) { // Do nothing } // Otherwise load it into the dictionary and then reference the // copy in the dictionary else { TextureContainer TC = new TextureContainer(); TC.uiNumberOfReferences = 1; // We start out with 1 referece. // Loading the texture into memory. try { TC.texture = Content.Load<Texture2D>(TexturesToLoad[con]); // This is passed into the dictionary, thus there is only one copy of // the texture in memory. // There is an issue with Sprite Batch and disposing textures. // This will have to wait until its figured out. lock (Texture2DMutex) { bContainsKey = Texture2DResourceDictionary.ContainsKey(TexturesToLoad[con]); Texture2DResourceDictionary.Add(TexturesToLoad[con], TC); } // We don't have the find the reference to the container since we // already have it. } // Occasionally our texture will already by loaded by another thread while // this thread is operating. This mainly happens on the first level. catch (Exception e) { // If this happens we don't worry about it since this thread only loads // texture data and if its already there we don't need to load it. } } Thread.Sleep(100); } } lock (loadingMutex) { bLoadingTextures = false; } } } static public void LoadTextureList(List<string> _textureList) { // Ensuring that we can't creating threading problems. lock (threadMutex) { TexturesToLoad = _textureList; } wh.Set(); } /// <summary> /// This loads a 2D texture which represents a 2D grid of Texels. /// </summary> /// <param name="_textureName">The name of the picture you wish to load.</param> /// <returns>Holds the image data.</returns> public static Texture2D LoadTexture2D( string _textureName ) { TextureContainer temp; lock (Texture2DMutex) { bool bContainsKey = false; // If we have already loaded the texture into memory reference // the one in the dictionary. lock (Texture2DMutex) { bContainsKey = Texture2DResourceDictionary.ContainsKey(_textureName); if (bContainsKey) { temp = Texture2DResourceDictionary[_textureName]; temp.uiNumberOfReferences++; // Incrementing the number of references } // Otherwise load it into the dictionary and then reference the // copy in the dictionary else { TextureContainer TC = new TextureContainer(); TC.uiNumberOfReferences = 1; // We start out with 1 referece. // Loading the texture into memory. try { TC.texture = Content.Load<Texture2D>(_textureName); // This is passed into the dictionary, thus there is only one copy of // the texture in memory. } // Occasionally our texture will already by loaded by another thread while // this thread is operating. This mainly happens on the first level. catch(Exception e) { temp = Texture2DResourceDictionary[_textureName]; temp.uiNumberOfReferences++; // Incrementing the number of references } // There is an issue with Sprite Batch and disposing textures. // This will have to wait until its figured out. Texture2DResourceDictionary.Add(_textureName, TC); // We don't have the find the reference to the container since we // already have it. temp = TC; } } } // Return a reference to the texture return temp.texture; } /// <summary> /// Go through our dictionary and remove any references to the /// texture passed in. /// </summary> /// <param name="texture">Texture to remove from texture dictionary.</param> public static void RemoveTexture2D(Texture2D texture) { foreach (KeyValuePair<string, TextureContainer> pair in Texture2DResourceDictionary) { // Do our references match? if (pair.Value.texture == texture) { // Only one object or less holds a reference to the // texture. Logically it should be safe to remove. if (pair.Value.uiNumberOfReferences <= 1) { // Grabing referenc to texture TexturesToDispose.Add(pair.Value.texture); // We are about to release the memory of the texture, // thus we make sure no one else can call this member // in the dictionary. Texture2DResourceDictionary.Remove(pair.Key); // Once we have removed the texture we don't want to create an exception. // So we will stop looking in the list since it has changed. break; } // More than one Object has a reference to this texture. // So we will not be removing it from memory and instead // simply marking down the number of references by 1. else { pair.Value.uiNumberOfReferences--; } } } } /*public static void DisposeTextures() { int Count = TexturesToDispose.Count; // If there are any textures to dispose of. if (Count > 0) { for (int con = 0; con < TexturesToDispose.Count; con++) { // =!THIS REMOVES THE TEXTURE FROM MEMORY!= // This is not like a normal dispose. This will actually // remove the object from memory. Texture2D is inherited // from GraphicsResource which removes it self from // memory on dispose. Very nice for game efficency, // but "dangerous" in managed land. Texture2D Temp = TexturesToDispose[con]; Temp.Dispose(); } // Remove textures we've already disposed of. TexturesToDispose.Clear(); } }*/ /// <summary> /// This loads a 2D texture which represnets a font. /// </summary> /// <param name="_textureName">The name of the font you wish to load.</param> /// <returns>Holds the font data.</returns> public static SpriteFont LoadFont( string _fontName ) { SpriteFont temp = Content.Load<SpriteFont>( _fontName ); return temp; } /// <summary> /// This loads an XML document. /// </summary> /// <param name="_textureName">The name of the XML document you wish to load.</param> /// <returns>Holds the XML data.</returns> public static XmlDocument LoadXML( string _fileName ) { XmlDocument temp = Content.Load<XmlDocument>( _fileName ); return temp; } /// <summary> /// This loads a sound file. /// </summary> /// <param name="_fileName"></param> /// <returns></returns> public static SoundEffect LoadSound( string _fileName ) { SoundEffect temp = Content.Load<SoundEffect>(_fileName); return temp; } } }

    Read the article

  • Modify code to change timestamp timezone in sitemap

    - by Aahan Krish
    Below is the code from a plugin I use for sitemaps. I would like to know if there's a way to "enforce" a different timezone on all date variable and functions in it. If not, how do I modify the code to change the timestamp to reflect a different timezone? Say for example, to America/New_York timezone. Please look for date to quickly get to the appropriate code blocks. Code on Pastebin. <?php /** * @package XML_Sitemaps */ class WPSEO_Sitemaps { ..... /** * Build the root sitemap -- example.com/sitemap_index.xml -- which lists sub-sitemaps * for other content types. * * @todo lastmod for sitemaps? */ function build_root_map() { global $wpdb; $options = get_wpseo_options(); $this->sitemap = '<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n"; $base = $GLOBALS['wp_rewrite']->using_index_permalinks() ? 'index.php/' : ''; // reference post type specific sitemaps foreach ( get_post_types( array( 'public' => true ) ) as $post_type ) { if ( $post_type == 'attachment' ) continue; if ( isset( $options['post_types-' . $post_type . '-not_in_sitemap'] ) && $options['post_types-' . $post_type . '-not_in_sitemap'] ) continue; $count = $wpdb->get_var( $wpdb->prepare( "SELECT COUNT(ID) FROM $wpdb->posts WHERE post_type = %s AND post_status = 'publish' LIMIT 1", $post_type ) ); // don't include post types with no posts if ( !$count ) continue; $n = ( $count > 1000 ) ? (int) ceil( $count / 1000 ) : 1; for ( $i = 0; $i < $n; $i++ ) { $count = ( $n > 1 ) ? $i + 1 : ''; if ( empty( $count ) || $count == $n ) { $date = $this->get_last_modified( $post_type ); } else { $date = $wpdb->get_var( $wpdb->prepare( "SELECT post_modified_gmt FROM $wpdb->posts WHERE post_status = 'publish' AND post_type = %s ORDER BY post_modified_gmt ASC LIMIT 1 OFFSET %d", $post_type, $i * 1000 + 999 ) ); $date = date( 'c', strtotime( $date ) ); } $this->sitemap .= '<sitemap>' . "\n"; $this->sitemap .= '<loc>' . home_url( $base . $post_type . '-sitemap' . $count . '.xml' ) . '</loc>' . "\n"; $this->sitemap .= '<lastmod>' . htmlspecialchars( $date ) . '</lastmod>' . "\n"; $this->sitemap .= '</sitemap>' . "\n"; } } // reference taxonomy specific sitemaps foreach ( get_taxonomies( array( 'public' => true ) ) as $tax ) { if ( in_array( $tax, array( 'link_category', 'nav_menu', 'post_format' ) ) ) continue; if ( isset( $options['taxonomies-' . $tax . '-not_in_sitemap'] ) && $options['taxonomies-' . $tax . '-not_in_sitemap'] ) continue; // don't include taxonomies with no terms if ( !$wpdb->get_var( $wpdb->prepare( "SELECT term_id FROM $wpdb->term_taxonomy WHERE taxonomy = %s AND count != 0 LIMIT 1", $tax ) ) ) continue; // Retrieve the post_types that are registered to this taxonomy and then retrieve last modified date for all of those combined. $taxobj = get_taxonomy( $tax ); $date = $this->get_last_modified( $taxobj->object_type ); $this->sitemap .= '<sitemap>' . "\n"; $this->sitemap .= '<loc>' . home_url( $base . $tax . '-sitemap.xml' ) . '</loc>' . "\n"; $this->sitemap .= '<lastmod>' . htmlspecialchars( $date ) . '</lastmod>' . "\n"; $this->sitemap .= '</sitemap>' . "\n"; } // allow other plugins to add their sitemaps to the index $this->sitemap .= apply_filters( 'wpseo_sitemap_index', '' ); $this->sitemap .= '</sitemapindex>'; } /** * Build a sub-sitemap for a specific post type -- example.com/post_type-sitemap.xml * * @param string $post_type Registered post type's slug */ function build_post_type_map( $post_type ) { $options = get_wpseo_options(); ............ // We grab post_date, post_name, post_author and post_status too so we can throw these objects into get_permalink, which saves a get_post call for each permalink. while ( $total > $offset ) { $join_filter = apply_filters( 'wpseo_posts_join', '', $post_type ); $where_filter = apply_filters( 'wpseo_posts_where', '', $post_type ); // Optimized query per this thread: http://wordpress.org/support/topic/plugin-wordpress-seo-by-yoast-performance-suggestion // Also see http://explainextended.com/2009/10/23/mysql-order-by-limit-performance-late-row-lookups/ $posts = $wpdb->get_results( "SELECT l.ID, post_content, post_name, post_author, post_parent, post_modified_gmt, post_date, post_date_gmt FROM ( SELECT ID FROM $wpdb->posts {$join_filter} WHERE post_status = 'publish' AND post_password = '' AND post_type = '$post_type' {$where_filter} ORDER BY post_modified ASC LIMIT $steps OFFSET $offset ) o JOIN $wpdb->posts l ON l.ID = o.ID ORDER BY l.ID" ); /* $posts = $wpdb->get_results("SELECT ID, post_content, post_name, post_author, post_parent, post_modified_gmt, post_date, post_date_gmt FROM $wpdb->posts {$join_filter} WHERE post_status = 'publish' AND post_password = '' AND post_type = '$post_type' {$where_filter} ORDER BY post_modified ASC LIMIT $steps OFFSET $offset"); */ $offset = $offset + $steps; foreach ( $posts as $p ) { $p->post_type = $post_type; $p->post_status = 'publish'; $p->filter = 'sample'; if ( wpseo_get_value( 'meta-robots-noindex', $p->ID ) && wpseo_get_value( 'sitemap-include', $p->ID ) != 'always' ) continue; if ( wpseo_get_value( 'sitemap-include', $p->ID ) == 'never' ) continue; if ( wpseo_get_value( 'redirect', $p->ID ) && strlen( wpseo_get_value( 'redirect', $p->ID ) ) > 0 ) continue; $url = array(); $url['mod'] = ( isset( $p->post_modified_gmt ) && $p->post_modified_gmt != '0000-00-00 00:00:00' ) ? $p->post_modified_gmt : $p->post_date_gmt; $url['chf'] = 'weekly'; $url['loc'] = get_permalink( $p ); ............. } /** * Build a sub-sitemap for a specific taxonomy -- example.com/tax-sitemap.xml * * @param string $taxonomy Registered taxonomy's slug */ function build_tax_map( $taxonomy ) { $options = get_wpseo_options(); .......... // Grab last modified date $sql = "SELECT MAX(p.post_date) AS lastmod FROM $wpdb->posts AS p INNER JOIN $wpdb->term_relationships AS term_rel ON term_rel.object_id = p.ID INNER JOIN $wpdb->term_taxonomy AS term_tax ON term_tax.term_taxonomy_id = term_rel.term_taxonomy_id AND term_tax.taxonomy = '$c->taxonomy' AND term_tax.term_id = $c->term_id WHERE p.post_status = 'publish' AND p.post_password = ''"; $url['mod'] = $wpdb->get_var( $sql ); $url['chf'] = 'weekly'; $output .= $this->sitemap_url( $url ); } } /** * Build the <url> tag for a given URL. * * @param array $url Array of parts that make up this entry * @return string */ function sitemap_url( $url ) { if ( isset( $url['mod'] ) ) $date = mysql2date( "Y-m-d\TH:i:s+00:00", $url['mod'] ); else $date = date( 'c' ); $output = "\t<url>\n"; $output .= "\t\t<loc>" . $url['loc'] . "</loc>\n"; $output .= "\t\t<lastmod>" . $date . "</lastmod>\n"; $output .= "\t\t<changefreq>" . $url['chf'] . "</changefreq>\n"; $output .= "\t\t<priority>" . str_replace( ',', '.', $url['pri'] ) . "</priority>\n"; if ( isset( $url['images'] ) && count( $url['images'] ) > 0 ) { foreach ( $url['images'] as $img ) { $output .= "\t\t<image:image>\n"; $output .= "\t\t\t<image:loc>" . esc_html( $img['src'] ) . "</image:loc>\n"; if ( isset( $img['title'] ) ) $output .= "\t\t\t<image:title>" . _wp_specialchars( html_entity_decode( $img['title'], ENT_QUOTES, get_bloginfo('charset') ) ) . "</image:title>\n"; if ( isset( $img['alt'] ) ) $output .= "\t\t\t<image:caption>" . _wp_specialchars( html_entity_decode( $img['alt'], ENT_QUOTES, get_bloginfo('charset') ) ) . "</image:caption>\n"; $output .= "\t\t</image:image>\n"; } } $output .= "\t</url>\n"; return $output; } /** * Get the modification date for the last modified post in the post type: * * @param array $post_types Post types to get the last modification date for * @return string */ function get_last_modified( $post_types ) { global $wpdb; if ( !is_array( $post_types ) ) $post_types = array( $post_types ); $result = 0; foreach ( $post_types as $post_type ) { $key = 'lastpostmodified:gmt:' . $post_type; $date = wp_cache_get( $key, 'timeinfo' ); if ( !$date ) { $date = $wpdb->get_var( $wpdb->prepare( "SELECT post_modified_gmt FROM $wpdb->posts WHERE post_status = 'publish' AND post_type = %s ORDER BY post_modified_gmt DESC LIMIT 1", $post_type ) ); if ( $date ) wp_cache_set( $key, $date, 'timeinfo' ); } if ( strtotime( $date ) > $result ) $result = strtotime( $date ); } // Transform to W3C Date format. $result = date( 'c', $result ); return $result; } } global $wpseo_sitemaps; $wpseo_sitemaps = new WPSEO_Sitemaps();

    Read the article

  • How to improve this piece of code

    - by user303518
    Can anyone help me on this. It may be very frustrating for you all. But I want you guys to take a moment to look at the code below and please tell me all the things that are wrong in the below piece of code. You can copy it into your visual studio to analyze this better. You don’t have to make this code compile. My task is to get the following things: Most obvious mistakes with this code All the things that are wrong/bad practices with the code below Modify/Write an optimized version of this code. Keep in mind, the code DOES NOT need to compile. Reduce the number of lines of code You should NEVER try to implement something like below: public List<ValidationErrorDto> ProcessEQuote(string eQuoteXml, long programUniversalID) { // Get Program Info. DataTable programs = GetAllPrograms(); DataRow[] programRows = programs.Select(string.Format("ProgramUniversalID = {0}", programUniversalID)); long programID = (long)programRows[0]["ProgramID"]; string programName = (string)programRows[0]["Description"]; // Get Config file values. string fromEmail = ConfigurationManager.AppSettings["eQuotesFromEmail"]; string technicalSupportPhone = ConfigurationManager.AppSettings["TechnicalSupportPhone"]; string fromEmailDisplayName = string.IsNullOrEmpty(ConfigurationManager.AppSettings["eQuotesFromDisplayName"]) ? null : string.Format(ConfigurationManager.AppSettings["eQuotesFromDisplayName"], programName); string itEmail = !string.IsNullOrEmpty(ConfigurationManager.AppSettings["ITEmail"]) ? ConfigurationManager.AppSettings["ITEmail"] : string.Empty; string itName = !string.IsNullOrEmpty(ConfigurationManager.AppSettings["ITName"]) ? ConfigurationManager.AppSettings["ITName"] : "IT"; try { List<ValidationErrorDto> allValidationErrors = new List<ValidationErrorDto>(); List<ValidationErrorDto> validationErrors = new List<ValidationErrorDto>(); if (validationErrors.Count == 0) { validationErrors.AddRange(ValidateEQuoteXmlAgainstSchema(eQuoteXml)); if (validationErrors.Count == 0) { XmlDocument eQuoteXmlDoc = new XmlDocument(); eQuoteXmlDoc.LoadXml(eQuoteXml); XmlElement rootElement = eQuoteXmlDoc.DocumentElement; XmlNodeList quotesList = rootElement.SelectNodes("Quote"); foreach (XmlNode node in quotesList) { // Each node should be a quote node but to be safe, check if (node.Name == "Quote") { string groupName = node.SelectSingleNode("Group/GroupName").InnerText; string groupCity = node.SelectSingleNode("Group/GroupCity").InnerText; string groupPostalCode = node.SelectSingleNode("Group/GroupZipCode").InnerText; string groupSicCode = node.SelectSingleNode("Group/GroupSIC").InnerText; string generalAgencyLicenseNumber = node.SelectSingleNode("Group/GALicenseNbr").InnerText; string brokerName = node.SelectSingleNode("Group/BrokerName").InnerText; string deliverToEmailAddress = node.SelectSingleNode("Group/ReturnEmailAddress").InnerText; string brokerEmail = node.SelectSingleNode("Group/BrokerEmail").InnerText; string groupEligibleEmployeeCountString = node.SelectSingleNode("Group/GroupNbrEmployees").InnerText; string quoteEffectiveDateString = node.SelectSingleNode("Group/QuoteEffectiveDate").InnerText; string salesRepName = node.SelectSingleNode("Group/SalesRepName").InnerText; string salesRepPhone = node.SelectSingleNode("Group/SalesRepPhone").InnerText; string salesRepEmail = node.SelectSingleNode("Group/SalesRepEmail").InnerText; string brokerLicenseNumber = node.SelectSingleNode("Group/BrokerLicenseNbr").InnerText; DateTime? quoteEffectiveDate = null; int eligibleEmployeeCount = int.Parse(groupEligibleEmployeeCountString); DateTime quoteEffectiveDateOut; if (!DateTime.TryParse(quoteEffectiveDateString, out quoteEffectiveDateOut)) validationErrors.Add(ValidationHelper.CreateValidationError((long)QuoteField.EffectiveDate, "Quote Effective Date", ValidationErrorDto.ValueOutOfRange, false, ValidationHelper.CreateValidationContext(Entity.QuoteDetail, "Quote"))); else quoteEffectiveDate = quoteEffectiveDateOut; Dictionary<string, string> replacementCodeValues = new Dictionary<string, string>(); if (string.IsNullOrEmpty(Resources.ParameterMessageKeys.ResourceManager.GetString("GroupName"))) throw new InvalidOperationException("GroupName key is not configured"); replacementCodeValues.Add(Resources.ParameterMessageKeys.GroupName, groupName); replacementCodeValues.Add(Resources.ParameterMessageKeys.ProgramName, programName); replacementCodeValues.Add(Resources.ParameterMessageKeys.SalesRepName, salesRepName); replacementCodeValues.Add(Resources.ParameterMessageKeys.SalesRepPhone, salesRepPhone); replacementCodeValues.Add(Resources.ParameterMessageKeys.SalesRepEmail, salesRepEmail); replacementCodeValues.Add(Resources.ParameterMessageKeys.TechnicalSupportPhone, technicalSupportPhone); replacementCodeValues.Add(Resources.ParameterMessageKeys.EligibleEmployeCount, eligibleEmployeeCount.ToString()); // Retrieve the CityID and StateID long? cityID = null; long? stateID = null; DataSet citiesAndStates = Addresses.GetCitiesAndStatesFromPostalCode(groupPostalCode); DataTable cities = citiesAndStates.Tables[0]; DataTable states = citiesAndStates.Tables[1]; DataRow[] cityRows = cities.Select(string.Format("Name = '{0}'", groupCity)); if (cityRows.Length > 0) { cityID = (long)cityRows[0]["CityID"]; DataRow[] stateRows = states.Select(string.Format("CityID = {0}", cityID)); if (stateRows.Length > 0) stateID = (long)stateRows[0]["StateID"]; } // If the StateID does not exist, then we cannot get the GeneralAgency, so set a validation error and do not contine. // Else, Continue and look for an General Agency. If a GA was found, look for or create a Broker. Then look for or create a Prospect Group // Then using the info, create a quote. if (!stateID.HasValue) validationErrors.Add(ValidationHelper.CreateValidationError((long)ProspectGroupField.State, "Prospect Group State", ValidationErrorDto.RequiredFieldMissing, false, ValidationHelper.CreateValidationContext(Entity.ProspectGroup, "Prospect Group"))); bool brokerValidationError = false; SalesRepDto salesRep = GetSalesRepByEmail(salesRepEmail, ref validationErrors); if (salesRep == null) { string exceptionMessage = "Sales Rep Not found in Opportunity System. Please make sure Sales Rep is present in the system by adding the sales rep in AWP SR Add Screen." + Environment.NewLine; exceptionMessage = exceptionMessage + " Sales Rep Name: " + salesRepName + Environment.NewLine; exceptionMessage = exceptionMessage + " Sales Rep Email: " + salesRepEmail + Environment.NewLine; exceptionMessage = exceptionMessage + " Module : E-Quote Service" + Environment.NewLine; throw new Exception(exceptionMessage); } if (validationErrors.Count == 0) { // Note that StateID and EffectiveDate should be valid at this point. If it weren't there would be validation errors. // Find General Agency long? generalAgencyID; validationErrors.AddRange(GetEQuoteGeneralAgency(generalAgencyLicenseNumber, stateID.Value, out generalAgencyID)); // If GA was found, check for Broker. if (validationErrors.Count == 0 && generalAgencyID.HasValue) { Dictionary<string, string> brokerNameParts = ContactHelper.GetNamePartsFromFullName(brokerName); long? brokerID; validationErrors.AddRange(CreateEQuoteBroker(brokerNameParts["FirstName"], brokerNameParts["LastName"], brokerEmail, brokerLicenseNumber, stateID.Value, generalAgencyID.Value, salesRep, programID, out brokerID)); // If Broker was found but had validation errors if (validationErrors.Count > 0) { DeliverEmailMessage(programID, quoteEffectiveDate.Value, fromEmail, fromEmailDisplayName, itEmail, DocumentType.EQuoteBrokerValidationFailureMessageEmail, replacementCodeValues); brokerValidationError = true; } // If Broker was found, check for Prospect Group if (validationErrors.Count == 0 && brokerID.HasValue) { long? prospectGroupID; validationErrors.AddRange(CreateEQuoteProspectGroup(groupName, cityID, stateID, groupPostalCode, groupSicCode, brokerID.Value, out prospectGroupID)); if (validationErrors.Count == 0 && prospectGroupID.HasValue) { if (validationErrors.Count == 0) { long? quoteID; validationErrors.AddRange(CreateEQuote(programID, prospectGroupID.Value, generalAgencyID.Value, quoteEffectiveDate.Value, eligibleEmployeeCount, deliverToEmailAddress, node.SelectNodes("Employees/Employee"), deliverToEmailAddress, out quoteID)); if (validationErrors.Count == 0 && quoteID.HasValue) { QuoteFromServiceDto quoteFromService = GetQuoteByQuoteID(quoteID.Value); // Generate Pre-Message replacementCodeValues.Add(Resources.ParameterMessageKeys.QuoteNumber, string.Format("{0}.{1}", quoteFromService.QuoteNumber, quoteFromService.QuoteVersion)); replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, brokerName); replacementCodeValues.Add(Resources.ParameterMessageKeys.LicenseNumbers, brokerLicenseNumber); DeliverEmailMessage(programID, quoteEffectiveDate.Value, fromEmail, fromEmailDisplayName, deliverToEmailAddress, DocumentType.EQuotePreMessageEmail, replacementCodeValues); bool quoteGenerated = false; try { quoteGenerated = GenerateAndDeliverInitialQuote(quoteID.Value); } catch (Exception exception) { TraceLogger.LogException(exception, LoggingCategory); if (replacementCodeValues.ContainsKey(Resources.ParameterMessageKeys.Name)) replacementCodeValues[Resources.ParameterMessageKeys.Name] = itName; else replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, itName); if (replacementCodeValues.ContainsKey(Resources.ParameterMessageKeys.Errors)) replacementCodeValues[Resources.ParameterMessageKeys.Errors] = string.Format("Errors:\r\n:{0}", exception); else replacementCodeValues.Add(Resources.ParameterMessageKeys.Errors, string.Format("Errors:\r\n:{0}", exception)); DeliverEmailMessage(programID, quoteEffectiveDate.Value, fromEmail, fromEmailDisplayName, itEmail, DocumentType.EQuoteSystemFailureMessageEmail, replacementCodeValues); } if (!quoteGenerated) { // Generate System Failure Message if (replacementCodeValues.ContainsKey(Resources.ParameterMessageKeys.Name)) replacementCodeValues[Resources.ParameterMessageKeys.Name] = brokerName; else replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, brokerName); if (replacementCodeValues.ContainsKey(Resources.ParameterMessageKeys.Errors)) replacementCodeValues[Resources.ParameterMessageKeys.Errors] = string.Empty; else replacementCodeValues.Add(Resources.ParameterMessageKeys.Errors, string.Empty); DeliverEmailMessage(programID, quoteEffectiveDate.Value, fromEmail, fromEmailDisplayName, itEmail, DocumentType.EQuoteSystemFailureMessageEmail, replacementCodeValues); } } } } } } } //if (validationErrors.Count > 0) // Per spec, if Broker Validation returned an error we already sent an email, don't send another generic one if (validationErrors.Count > 0 && (!brokerValidationError)) { StringBuilder errorString = new StringBuilder(); foreach (ValidationErrorDto validationError in validationErrors) errorString = errorString.AppendLine(string.Format(" - {0}", ValidationHelper.GetValidationErrorReason(validationError, true))); replacementCodeValues.Add(Resources.ParameterMessageKeys.Errors, errorString.ToString()); if (replacementCodeValues.ContainsKey(Resources.ParameterMessageKeys.Name)) replacementCodeValues[Resources.ParameterMessageKeys.Name] = brokerName; else replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, brokerName); // HACK: If there is no effective date, then use Today's date. Do we care about the effecitve dat on validation message? if (quoteEffectiveDate.HasValue) DeliverEmailMessage(programID, quoteEffectiveDate.Value, fromEmail, fromEmailDisplayName, itEmail, DocumentType.EQuoteValidationFailureMessageEmail, replacementCodeValues); else DeliverEmailMessage(programID, DateTime.Now, fromEmail, fromEmailDisplayName, itEmail, DocumentType.EQuoteValidationFailureMessageEmail, replacementCodeValues); } allValidationErrors.AddRange(validationErrors); validationErrors.Clear(); } } } else { // Use todays date as the effective date. Dictionary<string, string> replacementCodeValues = new Dictionary<string, string>(); StringBuilder errorString = new StringBuilder(); foreach (ValidationErrorDto validationError in validationErrors) errorString = errorString.AppendLine(string.Format(" - {0}", ValidationHelper.GetValidationErrorReason(validationError, true))); replacementCodeValues.Add(Resources.ParameterMessageKeys.Errors, string.Format("The following validation errors occurred: \r\n{0}", errorString)); replacementCodeValues.Add(Resources.ParameterMessageKeys.ProgramName, programName); replacementCodeValues.Add(Resources.ParameterMessageKeys.GroupName, "Group"); replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, itName); DeliverEmailMessage(programID, DateTime.Now, fromEmail, null, itEmail, DocumentType.EQuoteSystemFailureMessageEmail, replacementCodeValues); allValidationErrors.AddRange(validationErrors); validationErrors.Clear(); } } return allValidationErrors; } catch (Exception exception) { TraceLogger.LogException(exception, LoggingCategory); // Use todays date as the effective date. Dictionary<string, string> replacementCodeValues = new Dictionary<string, string>(); replacementCodeValues.Add(Resources.ParameterMessageKeys.ProgramName, programName); replacementCodeValues.Add(Resources.ParameterMessageKeys.GroupName, "Group"); replacementCodeValues.Add(Resources.ParameterMessageKeys.Name, itName); replacementCodeValues.Add(Resources.ParameterMessageKeys.Errors, string.Format("Errors:\r\n:{0}", exception)); DeliverEmailMessage(programID, DateTime.Now, fromEmail, null, itEmail, DocumentType.EQuoteSystemFailureMessageEmail, replacementCodeValues); throw new FaultException(exception.ToString()); } }

    Read the article

  • Get 1 array from 2 arrays (using RestKit 0.20)

    - by Reez
    I'm using RestKit and was wondering how to combine two array's into one array. I already have the data being pulled in separately from API1 and API2, but I don't know how to combine them into 1 tableView. Each API is pulling in media, and I want the combined tableView to show the most recent media (like any standard timeline does these days). I will post any extra code or help as necessary, thanks so much! Below shows API1 + API2 being pulled in correctly, but not combined into the tableView. Only data from API1 shows in the tableView. ViewController.m @interface StackOverflowViewController () @property (strong, nonatomic) NSArray *hArray; @property (strong, nonatomic) NSArray *springs; @property (strong, nonatomic) RKObjectManager *eObjectManager; @property (strong, nonatomic) NSArray *iArray; @property (strong, nonatomic) NSArray *imagesArray; @property (strong, nonatomic) RKObjectManager *iObjectManager; // Wain @property (strong, nonatomic) NSMutableArray *tableDataList; // Laarme @property (nonatomic, strong) NSMutableArray *contentArray; @property (nonatomic, strong) NSDateFormatter *dateFormatter1; // Dan @property (nonatomic, strong) NSMutableArray *combinedModel; @end @implementation StackOverflowViewController @synthesize tableView = _tableView; @synthesize spring; @synthesize leaf; @synthesize theme; @synthesize hArray; @synthesize springs; @synthesize eObjectManager; @synthesize iArray; @synthesize imagesArray; @synthesize iObjectManager; // Wain @synthesize tableDataList; // Laarme @synthesize contentArray; @synthesize dateFormatter1; // Dan @synthesize combinedModel; - (void)viewDidLoad { [super viewDidLoad]; // Do any additional setup after loading the view. [self configureRestKit]; [self loadMediaDan]; [self sortCombinedModel]; } - (void)didReceiveMemoryWarning { [super didReceiveMemoryWarning]; // Dispose of any resources that can be recreated. } - (void)configureRestKit { // API1 // initialize AFNetworking HTTPClient NSURL *baseURLE = [NSURL URLWithString:@"https://api.e.com"]; AFHTTPClient *clientE = [[AFHTTPClient alloc] initWithBaseURL:baseURLE]; // initialize RestKit RKObjectManager *eManager = [[RKObjectManager alloc] initWithHTTPClient:clientE]; self.eObjectManager = eManager; // setup object mappings RKObjectMapping *feedMapping = [RKObjectMapping mappingForClass:[Feed class]]; [feedMapping addAttributeMappingsFromArray:@[@"headline", @"premium", @"published", @"description"]]; RKObjectMapping *linksMapping = [RKObjectMapping mappingForClass:[Links class]]; RKObjectMapping *webMapping = [RKObjectMapping mappingForClass:[Web class]]; [webMapping addAttributeMappingsFromArray:@[@"href"]]; RKObjectMapping *mobileMapping = [RKObjectMapping mappingForClass:[Mobile class]]; [mobileMapping addAttributeMappingsFromArray:@[@"href"]]; RKObjectMapping *imagesMapping = [RKObjectMapping mappingForClass:[Images class]]; [imagesMapping addAttributeMappingsFromArray:@[@"height", @"width", @"url"]]; [feedMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"links" toKeyPath:@"links" withMapping:linksMapping]]; [feedMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"images" toKeyPath:@"images" withMapping:imagesMapping]]; [linksMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"web" toKeyPath:@"web" withMapping:webMapping]]; [linksMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"mobile" toKeyPath:@"mobile" withMapping:mobileMapping]]; // register mappings with the provider using a response descriptor RKResponseDescriptor *responseDescriptor = [RKResponseDescriptor responseDescriptorWithMapping:feedMapping method:RKRequestMethodGET pathPattern:nil keyPath:@"feed" statusCodes:[NSIndexSet indexSetWithIndex:200]]; [self.eObjectManager addResponseDescriptor:responseDescriptor]; // API2 // initialize AFNetworking HTTPClient NSURL *baseURLI = [NSURL URLWithString:@"https://api.i.com"]; AFHTTPClient *clientI = [[AFHTTPClient alloc] initWithBaseURL:baseURLI]; // initialize RestKit RKObjectManager *iManager = [[RKObjectManager alloc] initWithHTTPClient:clientI]; self.iObjectManager = iManager; // setup object mappings RKObjectMapping *dataMapping = [RKObjectMapping mappingForClass:[Data class]]; [dataMapping addAttributeMappingsFromArray:@[@"link", @"created_time"]]; RKObjectMapping *imagesMapping = [RKObjectMapping mappingForClass:[ImagesI class]]; [IMapping addAttributeMappingsFromArray:@[@""]]; RKObjectMapping *standardResolutionMapping = [RKObjectMapping mappingForClass:[StandardResolution class]]; [standardResolutionMapping addAttributeMappingsFromArray:@[@"url", @"width", @"height"]]; RKObjectMapping *captionMapping = [RKObjectMapping mappingForClass:[Caption class]]; [captionMapping addAttributeMappingsFromArray:@[@"text", @"created_time"]]; RKObjectMapping *userMapping = [RKObjectMapping mappingForClass:[User class]]; [userMapping addAttributeMappingsFromArray:@[@"username"]]; [dataMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"images" toKeyPath:@"images" withMapping:imagesMapping]]; [imagesMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"standard_resolution" toKeyPath:@"standard_resolution" withMapping:standardResolutionMapping]]; [dataMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"caption" toKeyPath:@"caption" withMapping:captionMapping]]; [dataMapping addPropertyMapping:[RKRelationshipMapping relationshipMappingFromKeyPath:@"user" toKeyPath:@"user" withMapping:userMapping]]; // register mappings with the provider using a response descriptor RKResponseDescriptor *responseDescriptor2 = [RKResponseDescriptor responseDescriptorWithMapping:dataMapping method:RKRequestMethodGET pathPattern:nil keyPath:@"data" statusCodes:[NSIndexSet indexSetWithIndex:200]]; [self.iObjectManager addResponseDescriptor:responseDescriptor2]; } - (void)loadMedia { // Laarme contentArray = [[NSMutableArray alloc] init]; [self sortByDates]; // API1 NSString *apikey = @kCLIENTKEY; NSDictionary *queryParams = @{@"apikey" : apikey,}; [self.eObjectManager getObjectsAtPath:[NSString stringWithFormat:@"v1/n/?limit=4&leafs=%@&themes=%@", leafAbbreviation, themeID] // Changed limit to 4 for the time being parameters:queryParams success:^(RKObjectRequestOperation *operation, RKMappingResult *mappingResult) { hArray = mappingResult.array; [self.tableView reloadData]; } failure:^(RKObjectRequestOperation *operation, NSError *error) { NSLog(@"No?: %@", error); }]; // API2 [self.iObjectManager getObjectsAtPath:[NSString stringWithFormat:@"v1/u/2/m/recent/?client_id=e999"] parameters:nil success:^(RKObjectRequestOperation *operation, RKMappingResult *mappingResult) { iArray = mappingResult.array; [self.tableView reloadData]; } failure:^(RKObjectRequestOperation *operation, NSError *error) { NSLog(@"No: %@", error); }]; } // Laarme - (void)sortByDates { NSDateFormatter *dateFormatter2 = [[NSDateFormatter alloc] init]; //Do the dateFormatter settings, you may have to use 2 NSDateFormatters if the format is different for Data & Feed //The initialization of the dateFormatter is done before the block, because its alloc/init take some time, and you may have to declare it with "__block" //Since in your edit you do that and it seems it's the same format, just do @property (nonatomic, strong) NSDateFormatter dateFormatter; NSArray *sortedArray = [contentArray sortedArrayUsingComparator:^NSComparisonResult(id a, id b) { // Added Curly Braces around if else statements and used feedObject NSDate *aDate, *bDate; Feed *feedObject = (Feed *)a; Data *dataObject = (Data *)b; if ([a isKindOfClass:[Feed class]]) { //Feed *feedObject = (Feed *)a; aDate = [dateFormatter1 dateFromString:feedObject.published];} else { //if ([a isKindOfClass:[Data class]]) aDate = [dateFormatter2 dateFromString:dataObject.created_time];} if ([b isKindOfClass:[Feed class]]) { bDate = [dateFormatter1 dateFromString:feedObject.published];} else {//if ([b isKindOfClass:[Data class]]) bDate = [dateFormatter2 dateFromString:dataObject.created_time];} return [aDate compare:bDate]; }]; } #pragma mark - Table View - (NSInteger)numberOfSectionsInTableView:(UITableView *)tableView { return 1; } - (NSInteger)tableView:(UITableView *)tableView numberOfRowsInSection:(NSInteger)section { // API1 //return hArray.count; // API2 //return iArray.count; // API1 + API2 return hArray.count + iArray.count; } - (UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath { UITableViewCell *cell; if(indexPath.row < hArray.count) { // Date Change NSDateFormatter *df = [[NSDateFormatter alloc] init]; [df setDateFormat:@"MMMM d, yyyy h:mma"]; // API 1 TableViewCell *api1Cell = [tableView dequeueReusableCellWithIdentifier:@"YourAPI1Cell"]; // Do everything you need to do with the api1Cell // Use the index in 'indexPath.row' to get the object from you array // API1 Feed *feedLocal = [hArray objectAtIndex:indexPath.row]; // API1 NSString *dateString = [self timeSincePublished:feedLocal.published]; NSString *headlineText = [NSString stringWithFormat:@"%@", feedLocal.headline]; NSString *descriptionText = [NSString stringWithFormat:@"%@", feedLocal.description]; NSString *premiumText = [NSString stringWithFormat:@"%@", feedLocal.premium]; api1Cell.labelHeadline.text = [NSString stringWithFormat:@"%@", headlineText]; api1Cell.labelPublished.text = dateString; api1Cell.labelDescription.text = descriptionText; // SDWebImage API1 if ([feedLocal.images count] == 0) { // Not sure anything needed here } else { Images *imageLocal = [feedLocal.images objectAtIndex:0]; NSString *imageURL = [NSString stringWithFormat:@"%@", imageLocal.url]; NSString *imageWith = [NSString stringWithFormat:@"%@", imageLocal.width]; NSString *imageHeight = [NSString stringWithFormat:@"%@", imageLocal.height]; __weak UITableViewCell *wcell = cell; [cell.imageView setImageWithURL:[NSURL URLWithString:imageURL] placeholderImage:[UIImage imageNamed:@"X"] completed:^(UIImage *image, NSError *error, SDImageCacheType cacheType) { // Something }]; } cell = api1Cell; } else { // Date Change NSDateFormatter *df = [[NSDateFormatter alloc] init]; [df setDateFormat:@"MMMM d, yyyy h:mma"]; // API 2 MRWebListTableViewCellTwo *api2Cell = [tableView dequeueReusableCellWithIdentifier:@"YourAPI2Cell"]; // Do everything you need to do with the api2Cell // Remember to use 'indexPath.row - hArray.count' as the index for getting an object for your second array // API 2 Data *dataLocal = [iArray objectAtIndex:indexPath.row - hArray.count]; // API 2 NSString *dateStringI = [self timeSincePublished:dataLocal.created_time]; NSString *captionTextI = [NSString stringWithFormat:@"%@", dataLocal.caption.text]; NSString *usernameI = [NSString stringWithFormat:@"%@", dataLocal.user.username]; api2Cell.labelHeadline.text = usernameI; api2Cell.labelDescription.text = captionTextI; api2Cell.labelPublished.text = dateStringI; // SDWebImage API 2 if ([dataLocal.images count] == 0) { NSLog(@"Images Count: %lu", (unsigned long)dataLocal.images.count); // Not sure anything needed here } else { ImagesI *imageLocalI = [dataLocal.images objectAtIndex:0]; StandardResolutionI *standardResolutionLocal = [imageLocalI.standard_resolution objectAtIndex:0]; NSString *imageURLI = [NSString stringWithFormat:@"%@", standardResolutionLocal.url]; NSString *imageWithI = [NSString stringWithFormat:@"%@", standardResolutionLocal.width]; NSString *imageHeightI = [NSString stringWithFormat:@"%@", standardResolutionLocal.height]; // 11.2 __weak UITableViewCell *wcell = cell; [cell.imageView setImageWithURL:[NSURL URLWithString:imageURLI] placeholderImage:[UIImage imageNamed:@"X"] completed:^(UIImage *image, NSError *error, SDImageCacheType cacheType) { // Something }]; } cell = api2Cell; } return cell; } Feed.h @property (nonatomic, strong) Links *links; @property (nonatomic, strong) NSString *headline; @property (nonatomic, strong) NSString *source; @property (nonatomic, strong) NSDate *published; @property (nonatomic, strong) NSString *description; @property (nonatomic, strong) NSString *premium; @property (nonatomic, strong) NSArray *images; Data.h @property (nonatomic, strong) NSString *link; @property (nonatomic, strong) NSDate *created_time; @property (nonatomic, strong) UserI *user; @property (nonatomic, strong) NSArray *images; @property (nonatomic, strong) CaptionI *caption;

    Read the article

  • TFS 2008 Web Access Report 100 record limitation

    - by HosamKamel
    By default TFS 2008 Web Access has the limit of 100 record when you open any query in report mode. Even if you tried to export the query to excel or PDF you will only get first 100 record exported. To overcome this issue, you have to reconfigure this count in the web.config file Navigate to web access files C:\Program Files\Microsoft Visual Studio 2008 Team System Web Access\Wiwa Open web.config modify maxWorkitemsInReportList count to whatever count you need. You need to do modify the same configuration in web.config located here C:\Program Files\Microsoft Visual Studio 2008 Team System Web Access\Web  A full discussion thread exists here Team Foundation Server - Team System Web Access

    Read the article

  • Install the Ajax Control Toolkit from NuGet

    - by Stephen Walther
    The Ajax Control Toolkit is now available from NuGet. This makes it super easy to add the latest version of the Ajax Control Toolkit to any Web Forms application. If you haven’t used NuGet yet, then you are missing out on a great tool which you can use with Visual Studio to add new features to an application. You can use NuGet with both ASP.NET MVC and ASP.NET Web Forms applications. NuGet is compatible with both Websites and Web Applications and it works with both C# and VB.NET applications. For example, I habitually use NuGet to add the latest version of ELMAH, Entity Framework, jQuery, jQuery UI, and jQuery Templates to applications that I create. To download NuGet, visit the NuGet website at: http://NuGet.org Imagine, for example, that you want to take advantage of the Ajax Control Toolkit RoundedCorners extender to create cross-browser compatible rounded corners in a Web Forms application. Follow these steps. Right click on your project in the Solution Explorer window and select the option Add Library Package Reference. In the Add Library Package Reference dialog, select the Online tab and enter AjaxControlToolkit in the search box: Click the Install button and the latest version of the Ajax Control Toolkit will be installed. Installing the Ajax Control Toolkit makes several modifications to your application. First, a reference to the Ajax Control Toolkit is added to your application. In a Web Application Project, you can see the new reference in the References folder: Installing the Ajax Control Toolkit NuGet package also updates your Web.config file. The tag prefix ajaxToolkit is registered so that you can easily use Ajax Control Toolkit controls within any page without adding a @Register directive to the page. <configuration> <system.web> <compilation debug="true" targetFramework="4.0" /> <pages> <controls> <add tagPrefix="ajaxToolkit" assembly="AjaxControlToolkit" namespace="AjaxControlToolkit" /> </controls> </pages> </system.web> </configuration> You should do a rebuild of your application by selecting the Visual Studio menu option Build, Rebuild Solution so that Visual Studio picks up on the new controls (You won’t get Intellisense for the Ajax Control Toolkit controls until you do a build). After you add the Ajax Control Toolkit to your application, you can start using any of the 40 Ajax Control Toolkit controls in your application (see http://www.asp.net/ajax/ajaxcontroltoolkit/samples/ for a reference for the controls). <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="WebForm1.aspx.cs" Inherits="WebApplication1.WebForm1" %> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head runat="server"> <title>Rounded Corners</title> <style type="text/css"> #pnl1 { background-color: gray; width: 200px; color:White; font: 14pt Verdana; } #pnl1_contents { padding: 10px; } </style> </head> <body> <form id="form1" runat="server"> <div> <asp:Panel ID="pnl1" runat="server"> <div id="pnl1_contents"> I have rounded corners! </div> </asp:Panel> <ajaxToolkit:ToolkitScriptManager ID="sm1" runat="server" /> <ajaxToolkit:RoundedCornersExtender TargetControlID="pnl1" runat="server" /> </div> </form> </body> </html> The page contains the following three controls: Panel – The Panel control named pnl1 contains the content which appears with rounded corners. ToolkitScriptManager – Every page which uses the Ajax Control Toolkit must contain a single ToolkitScriptManager. The ToolkitScriptManager loads all of the JavaScript files used by the Ajax Control Toolkit. RoundedCornersExtender – This Ajax Control Toolkit extender targets the Panel control. It makes the Panel control appear with rounded corners. You can control the “roundiness” of the corners by modifying the Radius property. Notice that you get Intellisense when typing the Ajax Control Toolkit tags. As soon as you type <ajaxToolkit, all of the available Ajax Control Toolkit controls appear: When you open the page in a browser, then the contents of the Panel appears with rounded corners. The advantage of using the RoundedCorners extender is that it is cross-browser compatible. It works great with Internet Explorer, Opera, Firefox, Chrome, and Safari even though different browsers implement rounded corners in different ways. The RoundedCorners extender even works with an ancient browser such as Internet Explorer 6. Getting the Latest Version of the Ajax Control Toolkit The Ajax Control Toolkit continues to evolve at a rapid pace. We are hard at work at fixing bugs and adding new features to the project. We plan to have a new release of the Ajax Control Toolkit each month. The easiest way to get the latest version of the Ajax Control Toolkit is to use NuGet. You can open the NuGet Add Library Package Reference dialog at any time to update the Ajax Control Toolkit to the latest version.

    Read the article

  • Using Web Services from an XNA 4.0 WP7 Game

    - by Michael Cummings
    Now that the Windows Phone 7 development tools have been out for a while, let’s talk about how you can use them. Windows Phone 7 ( WP7 ) has two application types that you can create, either Silverlight or XNA, and you can’t really mix the two together. The development environment for WP7 is a special edition of Visual Studio 2010 called Visual Studio 2010 Express for Windows Phone. This edition will be installed with the WP7 tools, even if you have a full edition of VS2010 already installed. While you can use your full edition of VS2010 to do WP7 development, this astute developer has noticed that there are a few things that you can only do in the Express for Windows Phone edition. So lets start by discussing WP7 networking. On the WP7 platform the only networking available is through Web Services using WCF or if you’re really masochistic, you’ll use the WebClient to do http. In Silverlight, it’s fairly easy to wire up a WCF proxy to call a web service and get some data. In the XNA projects, not so much. Create WCF Service First, we’ll create our service that will return some information that we need in our game. Open Visual Studio 2010, and create a new WCF Web Service project. We’ll use the default implementation as we only need to see how to use a service, we are not interested in creating a really cool service at this point. However you may want to follow the instructions in the comments of Service1.svc.cs to change the name to something better, I used DataService and IDataService for the interface. You should now be able to run the project and the WCF Test Client will load and properly enumerate your service. At this point we have a functional service that can be consumed by our XNA game. Consume the WCF Service Open Visual Studio 2010 Express for Windows Phone and create a new XNA Game Studio 4.0 Windows Phone Game project. Now if you try to add a service reference to the project, you’ll notice that the option is not available. However, if you add a Silverlight application to your solution, you’ll notice that you can create a service reference there. So using the Silverlight project, we can create the service reference. Unfortunately you can’t reference the Silverlight project from the XNA Game project, so using Windows Explorer copy the Service References folder from the Silverlight project directory to the XNA Game project directory, then add the folder to your XNA Game project. You’ll need to set the property Build Action to None for all the files, except for Reference.cs, which should be Build. Truely, we only need Reference.cs but I find it easier to copy the whole folder. If you try to compile at this point, you’ll notice that we are missing  a couple of references, System.Runtime.Serialization, System.Net and System.ServiceModel. Add these to the XNA Game project and you should build successfully. You’ll also need to copy the ServiceReference.ClientConfig file and add it to your project. The WCF infrastructure looks for this file and will complain if it can’t find it. You’ll need to set the Copy to Output Directory property to Copy if Newer. We now need to add the code to call the service and display the results on the screen. Go ahead and add a SpriteFont resource to the Content project and load it in the Game project. There’s nothing here that’s changed much from 3.1 other than your Content project is now under the Solution node and not the Project node. While you’re at it, add a string field to store the result of the service call, and intialize it to string.Empty. Then in the Draw method, write the string out to the screen, only if it does not equal string.Empty. Now to wrap this up, lets create a new field that’s of the type DataServiceClient. In the Initialize Method, create a new instance of this type using its default contructor, then in the LoadContent we can call the service. Since we can only call the GetData method of our service asynchronously we need to set up a Completed event handler first. Thankfully, Visual Studio helps out a lot there just create, using the tab key whatever VS says to. In the GetDataAsyncCompleted event handler assign the service result ( e.Result) to your string field. If you run your game, you should get something like this : Enjoy!

    Read the article

  • To ref or not to ref

    - by nmarun
    So the question is what is the point of passing a reference type along with the ref keyword? I have an Employee class as below: 1: public class Employee 2: { 3: public string FirstName { get; set; } 4: public string LastName { get; set; } 5:  6: public override string ToString() 7: { 8: return string.Format("{0}-{1}", FirstName, LastName); 9: } 10: } In my calling class, I say: 1: class Program 2: { 3: static void Main() 4: { 5: Employee employee = new Employee 6: { 7: FirstName = "John", 8: LastName = "Doe" 9: }; 10: Console.WriteLine(employee); 11: CallSomeMethod(employee); 12: Console.WriteLine(employee); 13: } 14:  15: private static void CallSomeMethod(Employee employee) 16: { 17: employee.FirstName = "Smith"; 18: employee.LastName = "Doe"; 19: } 20: }   After having a look at the code, you’ll probably say, Well, an instance of a class gets passed as a reference, so any changes to the instance inside the CallSomeMethod, actually modifies the original object. Hence the output will be ‘John-Doe’ on the first call and ‘Smith-Doe’ on the second. And you’re right: So the question is what’s the use of passing this Employee parameter as a ref? 1: class Program 2: { 3: static void Main() 4: { 5: Employee employee = new Employee 6: { 7: FirstName = "John", 8: LastName = "Doe" 9: }; 10: Console.WriteLine(employee); 11: CallSomeMethod(ref employee); 12: Console.WriteLine(employee); 13: } 14:  15: private static void CallSomeMethod(ref Employee employee) 16: { 17: employee.FirstName = "Smith"; 18: employee.LastName = "Doe"; 19: } 20: } The output is still the same: Ok, so is there really a need to pass a reference type using the ref keyword? I’ll remove the ‘ref’ keyword and make one more change to the CallSomeMethod method. 1: class Program 2: { 3: static void Main() 4: { 5: Employee employee = new Employee 6: { 7: FirstName = "John", 8: LastName = "Doe" 9: }; 10: Console.WriteLine(employee); 11: CallSomeMethod(employee); 12: Console.WriteLine(employee); 13: } 14:  15: private static void CallSomeMethod(Employee employee) 16: { 17: employee = new Employee 18: { 19: FirstName = "Smith", 20: LastName = "John" 21: }; 22: } 23: } In line 17 you’ll see I’ve ‘new’d up the incoming Employee parameter and then set its properties to new values. The output tells me that the original instance of the Employee class does not change. Huh? But an instance of a class gets passed by reference, so why did the values not change on the original instance or how do I keep the two instances in-sync all the times? Aah, now here’s the answer. In order to keep the objects in sync, you pass them using the ‘ref’ keyword. 1: class Program 2: { 3: static void Main() 4: { 5: Employee employee = new Employee 6: { 7: FirstName = "John", 8: LastName = "Doe" 9: }; 10: Console.WriteLine(employee); 11: CallSomeMethod(ref employee); 12: Console.WriteLine(employee); 13: } 14:  15: private static void CallSomeMethod(ref Employee employee) 16: { 17: employee = new Employee 18: { 19: FirstName = "Smith", 20: LastName = "John" 21: }; 22: } 23: } Viola! Now, to prove it beyond doubt, I said, let me try with another reference type: string. 1: class Program 2: { 3: static void Main() 4: { 5: string name = "abc"; 6: Console.WriteLine(name); 7: CallSomeMethod(ref name); 8: Console.WriteLine(name); 9: } 10:  11: private static void CallSomeMethod(ref string name) 12: { 13: name = "def"; 14: } 15: } The output was as expected, first ‘abc’ and then ‘def’ - proves the 'ref' keyword works here as well. Now, what if I remove the ‘ref’ keyword? The output should still be the same as the above right, since string is a reference type? 1: class Program 2: { 3: static void Main() 4: { 5: string name = "abc"; 6: Console.WriteLine(name); 7: CallSomeMethod(name); 8: Console.WriteLine(name); 9: } 10:  11: private static void CallSomeMethod(string name) 12: { 13: name = "def"; 14: } 15: } Wrong, the output shows ‘abc’ printed twice. Wait a minute… now how could this be? This is because string is an immutable type. This means that any time you modify an instance of string, new memory address is allocated to the instance. The effect is similar to ‘new’ing up the Employee instance inside the CallSomeMethod in the absence of the ‘ref’ keyword. Verdict: ref key came to the rescue and saved the planet… again!

    Read the article

< Previous Page | 119 120 121 122 123 124 125 126 127 128 129 130  | Next Page >