Search Results

Search found 36242 results on 1450 pages for 'value converter'.

Page 285/1450 | < Previous Page | 281 282 283 284 285 286 287 288 289 290 291 292  | Next Page >

  • Binary data instead of actual image in C#

    - by acadia
    Hello, I am using the below mentioned library to create a barcode which is storing in a specified location as shown below: My question is, is there a way instead of saving it to a png file I get byte data? thanks Code39 code = new Code39("10090"); code.Paint().Save("c:/NewBARCODE.png", ImageFormat.Png); using System; using System.Collections.Generic; using System.Drawing; using System.Drawing.Imaging; using System.Diagnostics; namespace BarCode39 { public class Code39Settings { private int height = 60; public int BarCodeHeight { get { return height; } set { height = value; } } private bool drawText = true; public bool DrawText { get { return drawText; } set { drawText = value; } } private int leftMargin = 10; public int LeftMargin { get { return leftMargin; } set { leftMargin = value; } } private int rightMargin = 10; public int RightMargin { get { return rightMargin; } set { rightMargin = value; } } private int topMargin = 10; public int TopMargin { get { return topMargin; } set { topMargin = value; } } private int bottomMargin = 10; public int BottomMargin { get { return bottomMargin; } set { bottomMargin = value; } } private int interCharacterGap = 2; public int InterCharacterGap { get { return interCharacterGap; } set { interCharacterGap = value; } } private int wideWidth = 2; public int WideWidth { get { return wideWidth; } set { wideWidth = value; } } private int narrowWidth = 1; public int NarrowWidth { get { return narrowWidth; } set { narrowWidth = value; } } private Font font = new Font(FontFamily.GenericSansSerif, 12); public Font Font { get { return font; } set { font = value; } } private int codeToTextGapHeight = 10; public int BarCodeToTextGapHeight { get { return codeToTextGapHeight; } set { codeToTextGapHeight = value; } } } public class Code39 { #region Static initialization static Dictionary<char, Pattern> codes; static Code39() { object[][] chars = new object[][] { new object[] {'0', "n n n w w n w n n"}, new object[] {'1', "w n n w n n n n w"}, new object[] {'2', "n n w w n n n n w"}, new object[] {'3', "w n w w n n n n n"}, new object[] {'4', "n n n w w n n n w"}, new object[] {'5', "w n n w w n n n n"}, new object[] {'6', "n n w w w n n n n"}, new object[] {'7', "n n n w n n w n w"}, new object[] {'8', "w n n w n n w n n"}, new object[] {'9', "n n w w n n w n n"}, new object[] {'A', "w n n n n w n n w"}, new object[] {'B', "n n w n n w n n w"}, new object[] {'C', "w n w n n w n n n"}, new object[] {'D', "n n n n w w n n w"}, new object[] {'E', "w n n n w w n n n"}, new object[] {'F', "n n w n w w n n n"}, new object[] {'G', "n n n n n w w n w"}, new object[] {'H', "w n n n n w w n n"}, new object[] {'I', "n n w n n w w n n"}, new object[] {'J', "n n n n w w w n n"}, new object[] {'K', "w n n n n n n w w"}, new object[] {'L', "n n w n n n n w w"}, new object[] {'M', "w n w n n n n w n"}, new object[] {'N', "n n n n w n n w w"}, new object[] {'O', "w n n n w n n w n"}, new object[] {'P', "n n w n w n n w n"}, new object[] {'Q', "n n n n n n w w w"}, new object[] {'R', "w n n n n n w w n"}, new object[] {'S', "n n w n n n w w n"}, new object[] {'T', "n n n n w n w w n"}, new object[] {'U', "w w n n n n n n w"}, new object[] {'V', "n w w n n n n n w"}, new object[] {'W', "w w w n n n n n n"}, new object[] {'X', "n w n n w n n n w"}, new object[] {'Y', "w w n n w n n n n"}, new object[] {'Z', "n w w n w n n n n"}, new object[] {'-', "n w n n n n w n w"}, new object[] {'.', "w w n n n n w n n"}, new object[] {' ', "n w w n n n w n n"}, new object[] {'*', "n w n n w n w n n"}, new object[] {'$', "n w n w n w n n n"}, new object[] {'/', "n w n w n n n w n"}, new object[] {'+', "n w n n n w n w n"}, new object[] {'%', "n n n w n w n w n"} }; codes = new Dictionary<char, Pattern>(); foreach (object[] c in chars) codes.Add((char)c[0], Pattern.Parse((string)c[1])); } #endregion private static Pen pen = new Pen(Color.Black); private static Brush brush = Brushes.Black; private string code; private Code39Settings settings; public Code39(string code) : this(code, new Code39Settings()) { } public Code39(string code, Code39Settings settings) { foreach (char c in code) if (!codes.ContainsKey(c)) throw new ArgumentException("Invalid character encountered in specified code."); if (!code.StartsWith("*")) code = "*" + code; if (!code.EndsWith("*")) code = code + "*"; this.code = code; this.settings = settings; } public Bitmap Paint() { string code = this.code.Trim('*'); SizeF sizeCodeText = Graphics.FromImage(new Bitmap(1, 1)).MeasureString(code, settings.Font); int w = settings.LeftMargin + settings.RightMargin; foreach (char c in this.code) w += codes[c].GetWidth(settings) + settings.InterCharacterGap; w -= settings.InterCharacterGap; int h = settings.TopMargin + settings.BottomMargin + settings.BarCodeHeight; if (settings.DrawText) h += settings.BarCodeToTextGapHeight + (int)sizeCodeText.Height; Bitmap bmp = new Bitmap(w, h, PixelFormat.Format32bppArgb); Graphics g = Graphics.FromImage(bmp); int left = settings.LeftMargin; foreach (char c in this.code) left += codes[c].Paint(settings, g, left) + settings.InterCharacterGap; if (settings.DrawText) { int tX = settings.LeftMargin + (w - settings.LeftMargin - settings.RightMargin - (int)sizeCodeText.Width) / 2; if (tX < 0) tX = 0; int tY = settings.TopMargin + settings.BarCodeHeight + settings.BarCodeToTextGapHeight; g.DrawString(code, settings.Font, brush, tX, tY); } return bmp; } private class Pattern { private bool[] nw = new bool[9]; public static Pattern Parse(string s) { Debug.Assert(s != null); s = s.Replace(" ", "").ToLower(); Debug.Assert(s.Length == 9); Debug.Assert(s.Replace("n", "").Replace("w", "").Length == 0); Pattern p = new Pattern(); int i = 0; foreach (char c in s) p.nw[i++] = c == 'w'; return p; } public int GetWidth(Code39Settings settings) { int width = 0; for (int i = 0; i < 9; i++) width += (nw[i] ? settings.WideWidth : settings.NarrowWidth); return width; } public int Paint(Code39Settings settings, Graphics g, int left) { #if DEBUG Rectangle gray = new Rectangle(left, 0, GetWidth(settings), settings.BarCodeHeight + settings.TopMargin + settings.BottomMargin); g.FillRectangle(Brushes.Gray, gray); #endif int x = left; int w = 0; for (int i = 0; i < 9; i++) { int width = (nw[i] ? settings.WideWidth : settings.NarrowWidth); if (i % 2 == 0) { Rectangle r = new Rectangle(x, settings.TopMargin, width, settings.BarCodeHeight); g.FillRectangle(brush, r); } x += width; w += width; } return w; } } } }

    Read the article

  • Choose variable based on users choice from picklist.

    - by Kelbizzle
    I have this form. Basically what I want is to send a auto-response with a different URL based on what the user picks in the "attn" picklist. I've been thinking I could have a different variable for each drop down value. It will then pass this variable on to the mail script that will choose which URL to insert inside the auto response that is sent. It gives me a headache thinking about it sometimes. What's the easiest way to accomplish this? Am I making more work for myself? I really don't know because I'm no programmer. Thanks in advance! Here is the form: <form name="contact_form" method="post" action="sendemail_reports.php" onsubmit="return validate_form ( );"> <div id='zohoWebToLead' align=center> <table width="100%" border="0" align="center" cellpadding="0" cellspacing="0" class="txt_body"> <table border=0 cellspacing=0 cellpadding=5 width=480 style='border-bottom-color: #999999; border-top-color: #999999; border-bottom-style: none; border-top-style: none; border-bottom-width: 1px; border-top-width: 2px; background-color:transparent;'> <tr> <td width='75%'><table width=480 border=0 cellpadding=5 cellspacing=0 style='border-bottom-color: #999999; border-top-color: #999999; border-bottom-style: none; border-top-style: none; border-bottom-width: 1px; border-top-width: 2px; background-color:transparent;'> <input type="hidden" name="ip" value="'.$ipi.'" /> <input type="hidden" name="httpref" value="'.$httprefi.'" /> <input type="hidden" name="httpagent" value="'.$httpagenti.'" /> <tr></tr> <tr> <td colspan='2' align='left' style='border-bottom-color: #dadada; border-bottom-style: none; border-bottom-width: 2px; color:#000000; font-family:sans-serif; font-size:14px;'><strong>Send us an Email</strong></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> First Name   : </td> <td width='75%'><input name='visitorf' type='text' size="48" maxlength='40' /></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'>Last Name   :</td> <td width='75%'><input name='visitorfl' type='text' size="48" maxlength='80' /></td> </tr> <tr> <td nowrap style= 'font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Email Adress  : </td> <td width='75%'><input name='visitormail' type='text' size="48" maxlength='100' /></td> </tr> <tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Phone   : </td> <td width='75%'><input name='visitorphone' type='text' size="48" maxlength='30' /></td> </tr> <td nowrap style='font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Subject   : </td> <td width='75%'><select name="attn" size="1"> <option value=" Investment Opportunities ">Investment Opportunities </option> <option value=" Vacation Rentals ">Vacation Rentals </option> <option value=" Real Estate Offerings ">Real Estate Offerings </option> <option value=" Gatherings ">Gatherings </option> <option value=" General ">General </option> </select></td> <tr> <td nowrap style= 'font-family:sans-serif;font-size:12px;font-weight:bold' align='right' width='25%'> Message   :<br /> <em>(max 5000 char)</em></td> <td width='75%'><textarea name='notes' maxlength='5000' cols="48" rows="3"></textarea></td> </tr> <tr> <td colspan=2 align=center style=''><input name='save' type='submit' class="boton" value=Send mail /> &nbsp; &nbsp; <input type='reset' name='reset' value=Reset class="boton" /></td> </tr> </table></td> </tr> </table> </div> </form> Here is the mail script: <?php //the 3 variables below were changed to use the SERVER variable $ip = $_SERVER['REMOTE_ADDR']; $httpref = $_SERVER['HTTP_REFERER']; $httpagent = $_SERVER['HTTP_USER_AGENT']; $visitorf = $_POST['visitorf']; $visitorl = $_POST['visitorl']; $visitormail = $_POST['visitormail']; $visitorphone = $_POST['visitorphone']; $notes = $_POST['notes']; $attn = $_POST['attn']; //additional headers $headers = 'From: Me <[email protected]>' . "\n" ; $headers = 'Bcc: [email protected]' . "\n"; if (eregi('http:', $notes)) { die ("Do NOT try that! ! "); } if(!$visitormail == "" && (!strstr($visitormail,"@") || !strstr($visitormail,"."))) { echo "<h2>Use Back - Enter valid e-mail</h2>\n"; $badinput = "<h2>Feedback was NOT submitted</h2>\n"; echo $badinput; die ("Go back! ! "); } if(empty($visitorf) || empty($visitormail) || empty($notes )) { echo "<h2>Use Back - fill in all fields</h2>\n"; die ("Use back! ! "); } $todayis = date("l, F j, Y, g:i a") ; $subject = "I want to download the report about $attn"; $notes = stripcslashes($notes); $message = "$todayis [EST] \nAttention: $attn \nMessage: $notes \nFrom: $visitorf $visitorl ($visitormail) \nTelephone Number: $visitorphone \nAdditional Info : IP = $ip \nBrowser Info: $httpagent \nReferral : $httpref\n"; //check if the function even exists if(function_exists("mail")) { //send the email mail($_SESSION['email'], $subject, $message, $headers) or die("could not send email"); } else { die("mail function not enabled"); } header( "Location: http://www.domain.com/thanks.php" ); ?>

    Read the article

  • Spring's EntityManager not persisting

    - by Fernando Camargo
    Well, my project was using EJB and JPA (with Hibernate), but I had to switch to Spring. Everything was working well before that. The EJB used to inject the EntityManager, controled the transaction, etc. Ok, when I switched to Spring, I had a lot of problems because I'm new on Spring. But after everything is running, I have the problem: the data is never saved on database. I configured my Spring to control the transactions, I have spring beans used in JSF, that has spring services that do the hard work. This services have a EntityManager injected and use @Transactional REQUIRED. This services pass the EntityManager to a DAO that call entityManager.persist(bean). The selects appears to work well, the JTA transaction appears to work well to (I saw in log), but the entity is not saved! Here is the log: INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.jpa.support.OpenEntityManagerInViewFilter: doFilterInternal() (linha 136): Opening JPA EntityManager in OpenEntityManagerInViewFilter INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.beans.factory.support.DefaultListableBeanFactory: doGetBean() (linha 245): Returning cached instance of singleton bean 'transactionManager' INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: getTransaction() (linha 365): Creating new transaction with name [br.org.cni.pronatec.controller.service.MontanteServiceImpl.adicionarValor]: PROPAGATION_REQUIRED,ISOLATION_DEFAULT; '' INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: doBegin() (linha 493): Opened new Session [org.hibernate.impl.SessionImpl@2b2fe2f0] for Hibernate transaction INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: doBegin() (linha 504): Preparing JDBC Connection of Hibernate Session [org.hibernate.impl.SessionImpl@2b2fe2f0] INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: doBegin() (linha 569): Exposing Hibernate transaction as JDBC transaction [com.sun.gjc.spi.jdbc40.ConnectionHolder40@3bcd4840] INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.jpa.ExtendedEntityManagerCreator$ExtendedEntityManagerInvocationHandler: doJoinTransaction() (linha 383): Joined JTA transaction INFO: Hibernate: select hibernate_sequence.nextval from dual INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: processCommit() (linha 752): Initiating transaction commit INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: doCommit() (linha 652): Committing Hibernate transaction on Session [org.hibernate.impl.SessionImpl@2b2fe2f0] INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.HibernateTransactionManager: doCleanupAfterCompletion() (linha 734): Closing Hibernate Session [org.hibernate.impl.SessionImpl@2b2fe2f0] after transaction INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.hibernate3.SessionFactoryUtils: closeSession() (linha 800): Closing Hibernate Session INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.jpa.support.OpenEntityManagerInViewFilter: doFilterInternal() (linha 154): Closing JPA EntityManager in OpenEntityManagerInViewFilter INFO: [Pronatec] - 04/04/2012 11:30:20 - [DEBUG] org.springframework.orm.jpa.EntityManagerFactoryUtils: closeEntityManager() (linha 343): Closing JPA EntityManager In the log, I see it commiting the transaction, but I don't see the insert query (the Hibernate is printing any query). I also see that the Hibernate lookup to get the next value of the sequence ID. But after that, it never really inserts. Here is the spring context configuration: <bean id="entityManagerFactory" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> <property name="persistenceUnitName" value="PronatecPU" /> <property name="persistenceXmlLocation" value="classpath:META-INF/persistence.xml" /> <property name="loadTimeWeaver"> <bean class="org.springframework.instrument.classloading.InstrumentationLoadTimeWeaver"/> </property> <property name="jpaProperties"> <props> <prop key="hibernate.transaction.factory_class">org.hibernate.transaction.JTATransactionFactory</prop> </props> </property> </bean> <bean id="transactionManager" class="org.springframework.orm.jpa.JpaTransactionManager" > <property name="transactionManagerName" value="java:/TransactionManager" /> <property name="userTransactionName" value="UserTransaction" /> <property name="entityManagerFactory" ref="entityManagerFactory" /> </bean> <bean class="org.springframework.orm.jpa.support.PersistenceAnnotationBeanPostProcessor" /> <tx:annotation-driven transaction-manager="transactionManager" /> Here is my persistence.xml: <?xml version="1.0" encoding="UTF-8"?> <persistence version="1.0" xmlns="http://java.sun.com/xml/ns/persistence" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/persistence http://java.sun.com/xml/ns/persistence/persistence_1_0.xsd"> <persistence-unit name="PronatecPU" transaction-type="JTA"> <provider>org.hibernate.ejb.HibernatePersistence</provider> <jta-data-source>jdbc/pronatec</jta-data-source> <class>br.org.cni.pronatec.model.bean.AgendamentoBuscaSistec</class> <class>br.org.cni.pronatec.model.bean.AgendamentoExportacaoZeus</class> <class>br.org.cni.pronatec.model.bean.AgendamentoImportacaoZeus</class> <class>br.org.cni.pronatec.model.bean.Aluno</class> <class>br.org.cni.pronatec.model.bean.Curso</class> <class>br.org.cni.pronatec.model.bean.DepartamentoRegional</class> <class>br.org.cni.pronatec.model.bean.Dof</class> <class>br.org.cni.pronatec.model.bean.Escola</class> <class>br.org.cni.pronatec.model.bean.Inconsistencia</class> <class>br.org.cni.pronatec.model.bean.Matricula</class> <class>br.org.cni.pronatec.model.bean.Montante</class> <class>br.org.cni.pronatec.model.bean.ParametrosVingentes</class> <class>br.org.cni.pronatec.model.bean.TipoCurso</class> <class>br.org.cni.pronatec.model.bean.Turma</class> <class>br.org.cni.pronatec.model.bean.UnidadeFederativa</class> <class>br.org.cni.pronatec.model.bean.ValorAssistenciaEstudantil</class> <class>br.org.cni.pronatec.model.bean.ValorHora</class> <exclude-unlisted-classes>true</exclude-unlisted-classes> <properties> <property name="current_session_context_class" value="thread"/> <property name="hibernate.show_sql" value="true"/> <property name="hibernate.format_sql" value="true"/> <property name="hibernate.dialect" value="org.hibernate.dialect.OracleDialect"/> <property name="hibernate.transaction.manager_lookup_class" value="org.hibernate.transaction.SunONETransactionManagerLookup"/> <property name="hibernate.hbm2ddl.auto" value="update"/> </properties> </persistence-unit> </persistence> Here is my service that is injected in the managed bean: @Service @Scope("prototype") @Transactional(propagation= Propagation.REQUIRED) public class MontanteServiceImpl { // more code @PersistenceContext(unitName="PronatecPU", type= PersistenceContextType.EXTENDED) private EntityManager entityManager; // more code // The method that is called by another public method that do something before private void salvarMontante(Montante montante) { montante.setDataTransacao(new Date()); MontanteDao montanteDao = new MontanteDao(entityManager); montanteDao.salvar(montante); } // more code } My MontanteDao inherits from a base DAO, like this: public class MontanteDao extends BaseDao<Montante> { public MontanteDao(EntityManager entityManager) { super(entityManager); } } And the method that is called in BaseDao is this: public void salvar(T bean) { entityManager.persist(bean); } Like you can see, it just pick the injected entityManager and call the persist() method. The transaction is being controlled by the Spring, like is printed in the log, but the insert query is never printed in log and it is never saved. I'm sorry about my bad english. Thanks in advance for who helps.

    Read the article

  • SQL error - Cannot convert nvarchar to decimal

    - by jakesankey
    I have a C# application that simply parses all of the txt documents within a given network directory and imports the data to a SQL server db. Everything was cruising along just fine until about the 1800th file when it happend to have a few blanks in columns that are called out as DBType.Decimal (and the value is usually zero in the files, not blank). So I got this error, "cannot convert nvarchar to decimal". I am wondering how I could tell the app to simply skip the lines that have this issue?? Perhaps I could even just change the column type to varchar even tho values are numbers (what problems could this create?) Thanks for any help! using System; using System.Data; using System.Data.SQLite; using System.IO; using System.Text.RegularExpressions; using System.Threading; using System.Collections.Generic; using System.Linq; using System.Data.SqlClient; namespace JohnDeereCMMDataParser { internal class Program { public static List<string> GetImportedFileList() { List<string> ImportedFiles = new List<string>(); using (SqlConnection connect = new SqlConnection(@"Server=FRXSQLDEV;Database=RX_CMMData;Integrated Security=YES")) { connect.Open(); using (SqlCommand fmd = connect.CreateCommand()) { fmd.CommandText = @"SELECT FileName FROM CMMData;"; fmd.CommandType = CommandType.Text; SqlDataReader r = fmd.ExecuteReader(); while (r.Read()) { ImportedFiles.Add(Convert.ToString(r["FileName"])); } } } return ImportedFiles; } private static void Main(string[] args) { Console.Title = "John Deere CMM Data Parser"; Console.WriteLine("Preparing CMM Data Parser... done"); Console.WriteLine("Scanning for new CMM data..."); Console.ForegroundColor = ConsoleColor.Gray; using (SqlConnection con = new SqlConnection(@"Server=FRXSQLDEV;Database=RX_CMMData;Integrated Security=YES")) { con.Open(); using (SqlCommand insertCommand = con.CreateCommand()) { Console.WriteLine("Connecting to SQL server..."); SqlCommand cmdd = con.CreateCommand(); string[] files = Directory.GetFiles(@"C:\Documents and Settings\js91162\Desktop\CMM WENZEL\", "*_*_*.txt", SearchOption.AllDirectories); List<string> ImportedFiles = GetImportedFileList(); insertCommand.Parameters.Add(new SqlParameter("@FeatType", DbType.String)); insertCommand.Parameters.Add(new SqlParameter("@FeatName", DbType.String)); insertCommand.Parameters.Add(new SqlParameter("@Axis", DbType.String)); insertCommand.Parameters.Add(new SqlParameter("@Actual", DbType.Decimal)); insertCommand.Parameters.Add(new SqlParameter("@Nominal", DbType.Decimal)); insertCommand.Parameters.Add(new SqlParameter("@Dev", DbType.Decimal)); insertCommand.Parameters.Add(new SqlParameter("@TolMin", DbType.Decimal)); insertCommand.Parameters.Add(new SqlParameter("@TolPlus", DbType.Decimal)); insertCommand.Parameters.Add(new SqlParameter("@OutOfTol", DbType.Decimal)); foreach (string file in files.Except(ImportedFiles)) { var FileNameExt1 = Path.GetFileName(file); cmdd.Parameters.Clear(); cmdd.Parameters.Add(new SqlParameter("@FileExt", FileNameExt1)); cmdd.CommandText = @" IF (EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'RX_CMMData' AND TABLE_NAME = 'CMMData')) BEGIN SELECT COUNT(*) FROM CMMData WHERE FileName = @FileExt; END"; int count = Convert.ToInt32(cmdd.ExecuteScalar()); con.Close(); con.Open(); if (count == 0) { Console.WriteLine("Preparing to parse CMM data for SQL import..."); if (file.Count(c => c == '_') > 5) continue; insertCommand.CommandText = @" INSERT INTO CMMData (FeatType, FeatName, Axis, Actual, Nominal, Dev, TolMin, TolPlus, OutOfTol, PartNumber, CMMNumber, Date, FileName) VALUES (@FeatType, @FeatName, @Axis, @Actual, @Nominal, @Dev, @TolMin, @TolPlus, @OutOfTol, @PartNumber, @CMMNumber, @Date, @FileName);"; string FileNameExt = Path.GetFullPath(file); string RNumber = Path.GetFileNameWithoutExtension(file); int index2 = RNumber.IndexOf("~"); Match RNumberE = Regex.Match(RNumber, @"^(R|L)\d{6}(COMP|CRIT|TEST|SU[1-9])(?=_)", RegexOptions.IgnoreCase); Match RNumberD = Regex.Match(RNumber, @"(?<=_)\d{3}[A-Z]\d{4}|\d{3}[A-Z]\d\w\w\d(?=_)", RegexOptions.IgnoreCase); Match RNumberDate = Regex.Match(RNumber, @"(?<=_)\d{8}(?=_)", RegexOptions.IgnoreCase); string RNumE = Convert.ToString(RNumberE); string RNumD = Convert.ToString(RNumberD); if (RNumberD.Value == @"") continue; if (RNumberE.Value == @"") continue; if (RNumberDate.Value == @"") continue; if (index2 != -1) continue; DateTime dateTime = DateTime.ParseExact(RNumberDate.Value, "yyyyMMdd", Thread.CurrentThread.CurrentCulture); string cmmDate = dateTime.ToString("dd-MMM-yyyy"); string[] lines = File.ReadAllLines(file); bool parse = false; foreach (string tmpLine in lines) { string line = tmpLine.Trim(); if (!parse && line.StartsWith("Feat. Type,")) { parse = true; continue; } if (!parse || string.IsNullOrEmpty(line)) { continue; } Console.WriteLine(tmpLine); foreach (SqlParameter parameter in insertCommand.Parameters) { parameter.Value = null; } string[] values = line.Split(new[] { ',' }); for (int i = 0; i < values.Length - 1; i++) { if (i = "" || i = null) continue; SqlParameter param = insertCommand.Parameters[i]; if (param.DbType == DbType.Decimal) { decimal value; param.Value = decimal.TryParse(values[i], out value) ? value : 0; } else { param.Value = values[i]; } } insertCommand.Parameters.Add(new SqlParameter("@PartNumber", RNumE)); insertCommand.Parameters.Add(new SqlParameter("@CMMNumber", RNumD)); insertCommand.Parameters.Add(new SqlParameter("@Date", cmmDate)); insertCommand.Parameters.Add(new SqlParameter("@FileName", FileNameExt)); insertCommand.ExecuteNonQuery(); insertCommand.Parameters.RemoveAt("@PartNumber"); insertCommand.Parameters.RemoveAt("@CMMNumber"); insertCommand.Parameters.RemoveAt("@Date"); insertCommand.Parameters.RemoveAt("@FileName"); } } } Console.WriteLine("CMM data successfully imported to SQL database..."); } con.Close(); } } } }

    Read the article

  • Problems using Hibernate and Spring in web application

    - by user628480
    Hi.I'm having NullPointerException trying to getCurrentSession() java.lang.NullPointerException servlets.ControlServlet.doPost(ControlServlet.java:46) javax.servlet.http.HttpServlet.service(HttpServlet.java:709) javax.servlet.http.HttpServlet.service(HttpServlet.java:802) I use Tomcat 5.5 index.jsp page: <%@ page language="java" contentType="text/html; charset=ISO-8859-1"pageEncoding="ISO-8859-1"%> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title></title> </head> <body> <%@ page import="java.util.List" %> <%@ page import="data.Singer" %> <jsp:useBean id="singer" class="data.Singer" scope="session"/> <jsp:setProperty name="singer" property="*" /> <form action="ControlServlet" method="POST"> <form method=“POST”> Name:<br /> <input type=“text” name="name" /><br /> Type:<br /> <input type=“text” name="type" /><br /> <input type="submit" name="Add song" value="Add song"> <input type="submit" name="save" value="Save" /><br><br> <input type ="submit" name="values" value="Get values" > </form> </body> </html> web.xml <?xml version="1.0" encoding="UTF-8"?> <web-app id="WebApp_ID" version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"> <display-name>webproject</display-name> <context-param> <param-name>contextConfigLocation</param-name> <param-value>/WEB-INF/beans.xml, /WEB-INF/conf.xml, /WEB-INF/singers.hbm.xml, /WEB-INF/songs.hbm.xml, /WEB-INF/singerbeans.xml, /WEB-INF/songbeans.xml</param-value> </context-param> <servlet> <servlet-name>context</servlet-name> <servlet-class>org.springframework.web.context.ContextLoaderServlet</servlet-class> <load-on-startup>1</load-on-startup> </servlet> <servlet> <servlet-name>test</servlet-name> <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class> <load-on-startup>1</load-on-startup> </servlet> <servlet-mapping> <servlet-name>test</servlet-name> <url-pattern>*.*</url-pattern> </servlet-mapping> <servlet> <servlet-name>action</servlet-name> <servlet-class>org.apache.struts.action.ActionServlet</servlet-class> <init-param> <param-name>config</param-name> <param-value>/WEB-INF/beans.xml, /WEB-INF/conf.xml, /WEB-INF/singers.hbm.xml, /WEB-INF/songs.hbm.xml, /WEB-INF/singerbeans.xml, /WEB-INF/songbeans.xml</param-value> </init-param> <init-param> <param-name>debug</param-name> <param-value>2</param-value> </init-param> <init-param> <param-name>detail</param-name> <param-value>2</param-value> </init-param> <load-on-startup>2</load-on-startup> </servlet> <servlet> <description> </description> <display-name>ControlServlet</display-name> <servlet-name>ControlServlet</servlet-name> <servlet-class>servlets.ControlServlet</servlet-class> </servlet> <servlet-mapping> <servlet-name>action</servlet-name> <url-pattern>*.*</url-pattern> </servlet-mapping> <servlet-mapping> <servlet-name>ControlServlet</servlet-name> <url-pattern>/ControlServlet</url-pattern> </servlet-mapping> </web-app> ControlServlet.java public class ControlServlet extends HttpServlet { private static final long serialVersionUID = 1L; @Autowired private SingerDao singerdao; public SingerDao getSingerDao() { return singerdao; } public void setSingerDao(SingerDao singerdao) { this.singerdao = singerdao; } public ControlServlet() { super(); // TODO Auto-generated constructor stub } protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // TODO Auto-generated method stub } protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { if (request.getParameter("values") != null) { response.getWriter().println(singerdao.getDBValues()); } } } and SingerDao.java public class SingerDao implements SingerDaoInterface { @Autowired private SessionFactory sessionFactory; public void setSessionFactory(SessionFactory sessionFactory) { this.sessionFactory = sessionFactory; } public List getDBValues() { Session session = getCurrentSession(); List<Singer> singers = session.createCriteria(Singer.class).list(); return singers; } private org.hibernate.classic.Session getCurrentSession() { return sessionFactory.getCurrentSession(); } public void updateSinger(Singer singer) { Session session = getCurrentSession(); session.update(singer); } public Singer getSinger(int id) { Singer singer = null; Session session = getCurrentSession(); singer = (Singer) session.load(Singer.class, id); return singer; } public void deleteSinger(Singer singer) { Session session = getCurrentSession(); session.delete(singer); } public void insertRow(Singer singer) { Session session = getCurrentSession(); session.save(singer); } } In simple Java Project it works fine.I think sessionFactory doesn't autowires,but why? Thanks all.

    Read the article

  • Script to create a table and fields in SQL wont work

    - by Jacksta
    Warning this is lenghty! attack if you knowledagble. well at least more then a newb beginner like me. This script uses three files as detailed below. It is suppoed to create the database and fields from the form input. It gets to the end and shows my_contacts has been created!. But when i go into phpMyadmin the table has not been created. I have a file named show_createtable.html which is used to create a table in MySQL <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> </head> <body> <h1>Step 1: Name and Number</h1> <form method="post" action="do_showfielddef.php" /> <p><strong>Table Name:</strong><br /> <input type="text" name="table_name" size="30" /></p> <p><strong>Number of fields:</strong><br /> <input type="text" name="num_fields" size="30" /></p> <p><input type="submit" name="submit" value="go to step2" /></p> </form> </body> </html> This Form Posts to do_showfielddef.php <?php //validate important input if ((!$_POST[table_name]) || (!$_POST[num_fields])) { header( "location: show_createtable.html"); exit; } //begin creating form for display $form_block = " <form action=\"do_createtable.php\" method=\"post\"> <input name=\"table_name\" type=\"hidden\" value=\"$_POST[table_name]\"> <table cellspacing=\"5\" cellpadding=\"5\"> <tr> <th>Field Name</th><th>Field Type</th><th>Table Length</th><th>Primary Key?</th><th>Auto-Increment?</th> </tr>"; //count from 0 until you reach the number fo fields for ($i = 0; $i <$_POST[num_fields]; $i++) { $form_block .=" <tr> <td align=center><input type=\"texr\" name=\"field name[]\" size=\"30\"></td> <td align=center> <select name=\"field_type[]\"> <option value=\"char\">char</option> <option value=\"date\">date</option> <option value=\"float\">float</option> <option value=\"int\">int</option> <option value=\"text\">text</option> <option value=\"varchar\">varchar</option> </select> </td> <td align=center><input type=\"text\" name=\"field_length[]\" size=\"5\"></td> <td aligh=center><input type=\"checkbox\" name=\"primary[]\" value=\"Y\"></td> <td aligh=center><input type=\"checkbox\" name=\"auto_increment[]\" value=\"Y\"></td> </tr>"; } //finish up the form $form_block .= " <tr> <td align=center colspan=3><input type =\"submit\" value=\"create table\"> </td> </tr> </table> </form>"; ?> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Create a database table: Step 2</title> </head> <body> <h1>defnie fields for <? echo "$_POST[table_name]"; ?> </h1> <? echo "$form_block"; ?> </body> </html> Which in turn creates the table and fields with this file do_showfielddef.php //connect to database $connection = @mysql_connect("localhost", "user", "pass") or die(mysql_error()); $db = @mysql_select_db($db_name, $connection) or die(mysql_error()); //start creating the SQL statement $sql = "CREATE TABLE $_POST[table_name]("; //continue the SQL statement for each new field for ($i = 0; $i < count($_POST[field_name]); $i++) { $sql .= $_POST[field_name][$i]." ".$_POST[field_type][$i]; if ($_POST[auto_increment][$i] =="Y") { $additional = "NOT NULL auto_increment"; } else { $additional = ""; } if ($_POST[primary][$i] =="Y") { $additional .= ", primary key (".$_POST[field_name][$i].")"; } else { $additional = ""; } if ($_POST[field_length][$i] !="") { $sql .= " (".$_POST[field_length][$i].") $additional ,"; } else { $sql .=" $additional ,"; } } //clean up the end of the string $sql = substr($sql, 0, -1); $sql .= ")"; //execute the query $result = mysql_query($sql, $connection) or die(mysql_error()); //get a giid message for display upon success if ($result) { $msg = "<p>" .$_POST[table_name]." has been created!</p>"; } ?> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Create A Database Table: Step 3</title> </head> <body> <h1>Adding table to <? echo "$db_name"; ?>...</h1> <? echo "$msg"; ?> </body> </html>

    Read the article

  • Fake ISAPI Handler to serve static files with extention that are rewritted by url rewriter

    - by developerit
    Introduction I often map html extention to the asp.net dll in order to use url rewritter with .html extentions. Recently, in the new version of www.nouvelair.ca, we renamed all urls to end with .html. This works great, but failed when we used FCK Editor. Static html files would not get serve because we mapped the html extension to the .NET Framework. We can we do to to use .html extension with our rewritter but still want to use IIS behavior with static html files. Analysis I thought that this could be resolve with a simple HTTP handler. We would map urls of static files in our rewriter to this handler that would read the static file and serve it, just as IIS would do. Implementation This is how I coded the class. Note that this may not be bullet proof. I only tested it once and I am sure that the logic behind IIS is more complicated that this. If you find errors or think of possible improvements, let me know. Imports System.Web Imports System.Web.Services ' Author: Nicolas Brassard ' For: Solutions Nitriques inc. http://www.nitriques.com ' Date Created: April 18, 2009 ' Last Modified: April 18, 2009 ' License: CPOL (http://www.codeproject.com/info/cpol10.aspx) ' Files: ISAPIDotNetHandler.ashx ' ISAPIDotNetHandler.ashx.vb ' Class: ISAPIDotNetHandler ' Description: Fake ISAPI handler to serve static files. ' Usefull when you want to serve static file that has a rewrited extention. ' Example: It often map html extention to the asp.net dll in order to use url rewritter with .html. ' If you want to still serve static html file, add a rewritter rule to redirect html files to this handler Public Class ISAPIDotNetHandler Implements System.Web.IHttpHandler Sub ProcessRequest(ByVal context As HttpContext) Implements IHttpHandler.ProcessRequest ' Since we are doing the job IIS normally does with html files, ' we set the content type to match html. ' You may want to customize this with your own logic, if you want to serve ' txt or xml or any other text file context.Response.ContentType = "text/html" ' We begin a try here. Any error that occurs will result in a 404 Page Not Found error. ' We replicate the behavior of IIS when it doesn't find the correspoding file. Try ' Declare a local variable containing the value of the query string Dim uri As String = context.Request("fileUri") ' If the value in the query string is null, ' throw an error to generate a 404 If String.IsNullOrEmpty(uri) Then Throw New ApplicationException("No fileUri") End If ' If the value in the query string doesn't end with .html, then block the acces ' This is a HUGE security hole since it could permit full read access to .aspx, .config, etc. If Not uri.ToLower.EndsWith(".html") Then ' throw an error to generate a 404 Throw New ApplicationException("Extention not allowed") End If ' Map the file on the server. ' If the file doesn't exists on the server, it will throw an exception and generate a 404. Dim fullPath As String = context.Server.MapPath(uri) ' Read the actual file Dim stream As IO.StreamReader = FileIO.FileSystem.OpenTextFileReader(fullPath) ' Write the file into the response context.Response.Output.Write(stream.ReadToEnd) ' Close and Dipose the stream stream.Close() stream.Dispose() stream = Nothing Catch ex As Exception ' Set the Status Code of the response context.Response.StatusCode = 404 'Page not found ' For testing and bebugging only ! This may cause a security leak ' context.Response.Output.Write(ex.Message) Finally ' In all cases, flush and end the response context.Response.Flush() context.Response.End() End Try End Sub ' Automaticly generated by Visual Studio ReadOnly Property IsReusable() As Boolean Implements IHttpHandler.IsReusable Get Return False End Get End Property End Class Conclusion As you see, with our static files map to this handler using query string (ex.: /ISAPIDotNetHandler.ashx?fileUri=index.html) you will have the same behavior as if you ask for the uri /index.html. Finally, test this only in IIS with the html extension map to aspnet_isapi.dll. Url rewritting will work in Casini (Internal Web Server shipped with Visual Studio) but it’s not the same as with IIS since EVERY request is handle by .NET. Versions First release

    Read the article

  • How To Disable Control Panel in Windows 7

    - by Mysticgeek
    If you have a shared computer that your family and friends can access, you might not want them to mess around in the Control Panel, and luckily with a simple tweak you can disable it. Disable Control Panel with Group Policy Note: This process uses Local Group Policy Editor which is not available in Home versions of Windows 7. Skip down below for the registry hack version that works on Home editions as well. First type gpedit.msc into the Search box in the Start menu and hit Enter. When Local Group Policy Editor opens, navigate to User Configuration \ Administrative Templates then select Control Panel in the left Column. In the right column double-click on Prohibit access to the Control Panel. In the next window, select Enable, click OK, then close out of Local Group Policy Editor. After the Control Panel is disabled, you’ll notice it’s no longer listed in the Start Menu. If the user tries to type Control Panel into the Search box in the Start menu, they will get the following message indicating it’s restricted. Disable Control Panel with a Registry Tweak You can also tweak the Registry to disable Control Panel. This will work with all versions of Windows 7, Vista, and XP. Making changes in the Registry is not recommended for beginners and you should create a Restore Point, or backup the Registry before making any changes. Type regedit into the Search box in the Start menu and hit Enter. In Registry Editor navigate to HKEY_CURRENT_USER\Software\Microsoft\Windows\Current Version\Policies\Explorer. Then right-click in the right pane and create a new DWORD (32-bit) Value. Name the value NoControlPanel. Then right-click on the new Value and click Modify…   In the Value data field change the value to “1” then click OK. Close out of Registry Editor and restart the machine to complete the process. When you get back from reboot, you’ll notice Control Panel is no longer listed in the Start menu. If a user tries to access it by typing Control Panel into the Search box in the Start menu… They will get the following message indicating it is restricted, just like if you were to disable it via Group Policy. If you want to re-enable the Control Panel, go back into the Registry and change the NoControlPanel value back to “0” then reboot the computer. This comes in handy if you have inexperienced users working on your machine and don’t want them messing with Control Panel settings. Similar Articles Productive Geek Tips Disable User Account Control (UAC) the Easy Way on Win 7 or VistaStill Useful in Vista: Startup Control PanelRestore Missing Items in Windows Vista Control PanelHow To Manage Action Center in Windows 7New Vista Syntax for Opening Control Panel Items from the Command-line TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Home Networks – How do they look like & the problems they cause Check Your IMAP Mail Offline In Thunderbird Follow Finder Finds You Twitter Users To Follow Combine MP3 Files Easily QuicklyCode Provides Cheatsheets & Other Programming Stuff Download Free MP3s from Amazon

    Read the article

  • JavaScript: this

    - by bdukes
    JavaScript is a language steeped in juxtaposition.  It was made to “look like Java,” yet is dynamic and classless.  From this origin, we get the new operator and the this keyword.  You are probably used to this referring to the current instance of a class, so what could it mean in a language without classes? In JavaScript, this refers to the object off of which a function is referenced when it is invoked (unless it is invoked via call or apply). What this means is that this is not bound to your function, and can change depending on how your function is invoked. It also means that this changes when declaring a function inside another function (i.e. each function has its own this), such as when writing a callback. Let's see some of this in action: var obj = { count: 0, increment: function () { this.count += 1; }, logAfterTimeout = function () { setTimeout(function () { console.log(this.count); }, 1); } }; obj.increment(); console.log(obj.count); // 1 var increment = obj.increment; window.count = 'global count value: '; increment(); console.log(obj.count); // 1 console.log(window.count); // global count value: 1 var newObj = {count:50}; increment.call(newObj); console.log(newObj.count); // 51 obj.logAfterTimeout();// global count value: 1 obj.logAfterTimeout = function () { var proxiedFunction = $.proxy(function () { console.log(this.count); }, this); setTimeout(proxiedFunction, 1); }; obj.logAfterTimeout(); // 1 obj.logAfterTimeout = function () { var that = this; setTimeout(function () { console.log(that.count); }, 1); }; obj.logAfterTimeout(); // 1 The last couple of examples here demonstrate some methods for making sure you get the values you expect.  The first time logAfterTimeout is redefined, we use jQuery.proxy to create a new function which has its this permanently set to the passed in value (in this case, the current this).  The second time logAfterTimeout is redefined, we save the value of this in a variable (named that in this case, also often named self) and use the new variable in place of this. Now, all of this is to clarify what’s going on when you use this.  However, it’s pretty easy to avoid using this altogether in your code (especially in the way I’ve demonstrated above).  Instead of using this.count all over the place, it would have been much easier if I’d made count a variable instead of a property, and then I wouldn’t have to use this to refer to it.  var obj = (function () { var count = 0; return { increment: function () { count += 1; }, logAfterTimeout = function () { setTimeout(function () { console.log(count); }, 1); }, getCount: function () { return count; } }; }()); If you’re writing your code in this way, the main place you’ll run into issues with this is when handling DOM events (where this is the element on which the event occurred).  In that case, just be careful when using a callback within that event handler, that you’re not expecting this to still refer to the element (and use proxy or that/self if you need to refer to it). Finally, as demonstrated in the example, you can use call or apply on a function to set its this value.  This isn’t often needed, but you may also want to know that you can use apply to pass in an array of arguments to a function (e.g. console.log.apply(console, [1, 2, 3, 4])).

    Read the article

  • Silverlight Recruiting Application Part 5 - Jobs Module / View

    Now we starting getting into a more code-heavy portion of this series, thankfully though this means the groundwork is all set for the most part and after adding the modules we will have a complete application that can be provided with full source. The Jobs module will have two concerns- adding and maintaining jobs that can then be broadcast out to the website. How they are displayed on the site will be handled by our admin system (which will just poll from this common database), so we aren't too concerned with that, but rather with getting the information into the system and allowing the backend administration/HR users to keep things up to date. Since there is a fair bit of information that we want to display, we're going to move editing to a separate view so we can get all that information in an easy-to-use spot. With all the files created for this module, the project looks something like this: And now... on to the code. XAML for the Job Posting View All we really need for the Job Posting View is a RadGridView and a few buttons. This will let us both show off records and perform operations on the records without much hassle. That XAML is going to look something like this: 01.<Grid x:Name="LayoutRoot" 02.Background="White"> 03.<Grid.RowDefinitions> 04.<RowDefinition Height="30" /> 05.<RowDefinition /> 06.</Grid.RowDefinitions> 07.<StackPanel Orientation="Horizontal"> 08.<Button x:Name="xAddRecordButton" 09.Content="Add Job" 10.Width="120" 11.cal:Click.Command="{Binding AddRecord}" 12.telerik:StyleManager.Theme="Windows7" /> 13.<Button x:Name="xEditRecordButton" 14.Content="Edit Job" 15.Width="120" 16.cal:Click.Command="{Binding EditRecord}" 17.telerik:StyleManager.Theme="Windows7" /> 18.</StackPanel> 19.<telerikGrid:RadGridView x:Name="xJobsGrid" 20.Grid.Row="1" 21.IsReadOnly="True" 22.AutoGenerateColumns="False" 23.ColumnWidth="*" 24.RowDetailsVisibilityMode="VisibleWhenSelected" 25.ItemsSource="{Binding MyJobs}" 26.SelectedItem="{Binding SelectedJob, Mode=TwoWay}" 27.command:SelectedItemChangedEventClass.Command="{Binding SelectedItemChanged}"> 28.<telerikGrid:RadGridView.Columns> 29.<telerikGrid:GridViewDataColumn Header="Job Title" 30.DataMemberBinding="{Binding JobTitle}" 31.UniqueName="JobTitle" /> 32.<telerikGrid:GridViewDataColumn Header="Location" 33.DataMemberBinding="{Binding Location}" 34.UniqueName="Location" /> 35.<telerikGrid:GridViewDataColumn Header="Resume Required" 36.DataMemberBinding="{Binding NeedsResume}" 37.UniqueName="NeedsResume" /> 38.<telerikGrid:GridViewDataColumn Header="CV Required" 39.DataMemberBinding="{Binding NeedsCV}" 40.UniqueName="NeedsCV" /> 41.<telerikGrid:GridViewDataColumn Header="Overview Required" 42.DataMemberBinding="{Binding NeedsOverview}" 43.UniqueName="NeedsOverview" /> 44.<telerikGrid:GridViewDataColumn Header="Active" 45.DataMemberBinding="{Binding IsActive}" 46.UniqueName="IsActive" /> 47.</telerikGrid:RadGridView.Columns> 48.</telerikGrid:RadGridView> 49.</Grid> I'll explain what's happening here by line numbers: Lines 11 and 16: Using the same type of click commands as we saw in the Menu module, we tie the button clicks to delegate commands in the viewmodel. Line 25: The source for the jobs will be a collection in the viewmodel. Line 26: We also bind the selected item to a public property from the viewmodel for use in code. Line 27: We've turned the event into a command so we can handle it via code in the viewmodel. So those first three probably make sense to you as far as Silverlight/WPF binding magic is concerned, but for line 27... This actually comes from something I read onDamien Schenkelman's blog back in the day for creating an attached behavior from any event. So, any time you see me using command:Whatever.Command, the backing for it is actually something like this: SelectedItemChangedEventBehavior.cs: 01.public class SelectedItemChangedEventBehavior : CommandBehaviorBase<Telerik.Windows.Controls.DataControl> 02.{ 03.public SelectedItemChangedEventBehavior(DataControl element) 04.: base(element) 05.{ 06.element.SelectionChanged += new EventHandler<SelectionChangeEventArgs>(element_SelectionChanged); 07.} 08.void element_SelectionChanged(object sender, SelectionChangeEventArgs e) 09.{ 10.// We'll only ever allow single selection, so will only need item index 0 11.base.CommandParameter = e.AddedItems[0]; 12.base.ExecuteCommand(); 13.} 14.} SelectedItemChangedEventClass.cs: 01.public class SelectedItemChangedEventClass 02.{ 03.#region The Command Stuff 04.public static ICommand GetCommand(DependencyObject obj) 05.{ 06.return (ICommand)obj.GetValue(CommandProperty); 07.} 08.public static void SetCommand(DependencyObject obj, ICommand value) 09.{ 10.obj.SetValue(CommandProperty, value); 11.} 12.public static readonly DependencyProperty CommandProperty = 13.DependencyProperty.RegisterAttached("Command", typeof(ICommand), 14.typeof(SelectedItemChangedEventClass), new PropertyMetadata(OnSetCommandCallback)); 15.public static void OnSetCommandCallback(DependencyObject dependencyObject, DependencyPropertyChangedEventArgs e) 16.{ 17.DataControl element = dependencyObject as DataControl; 18.if (element != null) 19.{ 20.SelectedItemChangedEventBehavior behavior = GetOrCreateBehavior(element); 21.behavior.Command = e.NewValue as ICommand; 22.} 23.} 24.#endregion 25.public static SelectedItemChangedEventBehavior GetOrCreateBehavior(DataControl element) 26.{ 27.SelectedItemChangedEventBehavior behavior = element.GetValue(SelectedItemChangedEventBehaviorProperty) as SelectedItemChangedEventBehavior; 28.if (behavior == null) 29.{ 30.behavior = new SelectedItemChangedEventBehavior(element); 31.element.SetValue(SelectedItemChangedEventBehaviorProperty, behavior); 32.} 33.return behavior; 34.} 35.public static SelectedItemChangedEventBehavior GetSelectedItemChangedEventBehavior(DependencyObject obj) 36.{ 37.return (SelectedItemChangedEventBehavior)obj.GetValue(SelectedItemChangedEventBehaviorProperty); 38.} 39.public static void SetSelectedItemChangedEventBehavior(DependencyObject obj, SelectedItemChangedEventBehavior value) 40.{ 41.obj.SetValue(SelectedItemChangedEventBehaviorProperty, value); 42.} 43.public static readonly DependencyProperty SelectedItemChangedEventBehaviorProperty = 44.DependencyProperty.RegisterAttached("SelectedItemChangedEventBehavior", 45.typeof(SelectedItemChangedEventBehavior), typeof(SelectedItemChangedEventClass), null); 46.} These end up looking very similar from command to command, but in a nutshell you create a command based on any event, determine what the parameter for it will be, then execute. It attaches via XAML and ties to a DelegateCommand in the viewmodel, so you get the full event experience (since some controls get a bit event-rich for added functionality). Simple enough, right? Viewmodel for the Job Posting View The Viewmodel is going to need to handle all events going back and forth, maintaining interactions with the data we are using, and both publishing and subscribing to events. Rather than breaking this into tons of little pieces, I'll give you a nice view of the entire viewmodel and then hit up the important points line-by-line: 001.public class JobPostingViewModel : ViewModelBase 002.{ 003.private readonly IEventAggregator eventAggregator; 004.private readonly IRegionManager regionManager; 005.public DelegateCommand<object> AddRecord { get; set; } 006.public DelegateCommand<object> EditRecord { get; set; } 007.public DelegateCommand<object> SelectedItemChanged { get; set; } 008.public RecruitingContext context; 009.private QueryableCollectionView _myJobs; 010.public QueryableCollectionView MyJobs 011.{ 012.get { return _myJobs; } 013.} 014.private QueryableCollectionView _selectionJobActionHistory; 015.public QueryableCollectionView SelectedJobActionHistory 016.{ 017.get { return _selectionJobActionHistory; } 018.} 019.private JobPosting _selectedJob; 020.public JobPosting SelectedJob 021.{ 022.get { return _selectedJob; } 023.set 024.{ 025.if (value != _selectedJob) 026.{ 027._selectedJob = value; 028.NotifyChanged("SelectedJob"); 029.} 030.} 031.} 032.public SubscriptionToken editToken = new SubscriptionToken(); 033.public SubscriptionToken addToken = new SubscriptionToken(); 034.public JobPostingViewModel(IEventAggregator eventAgg, IRegionManager regionmanager) 035.{ 036.// set Unity items 037.this.eventAggregator = eventAgg; 038.this.regionManager = regionmanager; 039.// load our context 040.context = new RecruitingContext(); 041.this._myJobs = new QueryableCollectionView(context.JobPostings); 042.context.Load(context.GetJobPostingsQuery()); 043.// set command events 044.this.AddRecord = new DelegateCommand<object>(this.AddNewRecord); 045.this.EditRecord = new DelegateCommand<object>(this.EditExistingRecord); 046.this.SelectedItemChanged = new DelegateCommand<object>(this.SelectedRecordChanged); 047.SetSubscriptions(); 048.} 049.#region DelegateCommands from View 050.public void AddNewRecord(object obj) 051.{ 052.this.eventAggregator.GetEvent<AddJobEvent>().Publish(true); 053.} 054.public void EditExistingRecord(object obj) 055.{ 056.if (_selectedJob == null) 057.{ 058.this.eventAggregator.GetEvent<NotifyUserEvent>().Publish("No job selected."); 059.} 060.else 061.{ 062.this._myJobs.EditItem(this._selectedJob); 063.this.eventAggregator.GetEvent<EditJobEvent>().Publish(this._selectedJob); 064.} 065.} 066.public void SelectedRecordChanged(object obj) 067.{ 068.if (obj.GetType() == typeof(ActionHistory)) 069.{ 070.// event bubbles up so we don't catch items from the ActionHistory grid 071.} 072.else 073.{ 074.JobPosting job = obj as JobPosting; 075.GrabHistory(job.PostingID); 076.} 077.} 078.#endregion 079.#region Subscription Declaration and Events 080.public void SetSubscriptions() 081.{ 082.EditJobCompleteEvent editComplete = eventAggregator.GetEvent<EditJobCompleteEvent>(); 083.if (editToken != null) 084.editComplete.Unsubscribe(editToken); 085.editToken = editComplete.Subscribe(this.EditCompleteEventHandler); 086.AddJobCompleteEvent addComplete = eventAggregator.GetEvent<AddJobCompleteEvent>(); 087.if (addToken != null) 088.addComplete.Unsubscribe(addToken); 089.addToken = addComplete.Subscribe(this.AddCompleteEventHandler); 090.} 091.public void EditCompleteEventHandler(bool complete) 092.{ 093.if (complete) 094.{ 095.JobPosting thisJob = _myJobs.CurrentEditItem as JobPosting; 096.this._myJobs.CommitEdit(); 097.this.context.SubmitChanges((s) => 098.{ 099.ActionHistory myAction = new ActionHistory(); 100.myAction.PostingID = thisJob.PostingID; 101.myAction.Description = String.Format("Job '{0}' has been edited by {1}", thisJob.JobTitle, "default user"); 102.myAction.TimeStamp = DateTime.Now; 103.eventAggregator.GetEvent<AddActionEvent>().Publish(myAction); 104.} 105., null); 106.} 107.else 108.{ 109.this._myJobs.CancelEdit(); 110.} 111.this.MakeMeActive(this.regionManager, "MainRegion", "JobPostingsView"); 112.} 113.public void AddCompleteEventHandler(JobPosting job) 114.{ 115.if (job == null) 116.{ 117.// do nothing, new job add cancelled 118.} 119.else 120.{ 121.this.context.JobPostings.Add(job); 122.this.context.SubmitChanges((s) => 123.{ 124.ActionHistory myAction = new ActionHistory(); 125.myAction.PostingID = job.PostingID; 126.myAction.Description = String.Format("Job '{0}' has been added by {1}", job.JobTitle, "default user"); 127.myAction.TimeStamp = DateTime.Now; 128.eventAggregator.GetEvent<AddActionEvent>().Publish(myAction); 129.} 130., null); 131.} 132.this.MakeMeActive(this.regionManager, "MainRegion", "JobPostingsView"); 133.} 134.#endregion 135.public void GrabHistory(int postID) 136.{ 137.context.ActionHistories.Clear(); 138._selectionJobActionHistory = new QueryableCollectionView(context.ActionHistories); 139.context.Load(context.GetHistoryForJobQuery(postID)); 140.} Taking it from the top, we're injecting an Event Aggregator and Region Manager for use down the road and also have the public DelegateCommands (just like in the Menu module). We also grab a reference to our context, which we'll obviously need for data, then set up a few fields with public properties tied to them. We're also setting subscription tokens, which we have not yet seen but I will get into below. The AddNewRecord (50) and EditExistingRecord (54) methods should speak for themselves for functionality, the one thing of note is we're sending events off to the Event Aggregator which some module, somewhere will take care of. Since these aren't entirely relying on one another, the Jobs View doesn't care if anyone is listening, but it will publish AddJobEvent (52), NotifyUserEvent (58) and EditJobEvent (63)regardless. Don't mind the GrabHistory() method so much, that is just grabbing history items (visibly being created in the SubmitChanges callbacks), and adding them to the database. Every action will trigger a history event, so we'll know who modified what and when, just in case. ;) So where are we at? Well, if we click to Add a job, we publish an event, if we edit a job, we publish an event with the selected record (attained through the magic of binding). Where is this all going though? To the Viewmodel, of course! XAML for the AddEditJobView This is pretty straightforward except for one thing, noted below: 001.<Grid x:Name="LayoutRoot" 002.Background="White"> 003.<Grid x:Name="xEditGrid" 004.Margin="10" 005.validationHelper:ValidationScope.Errors="{Binding Errors}"> 006.<Grid.Background> 007.<LinearGradientBrush EndPoint="0.5,1" 008.StartPoint="0.5,0"> 009.<GradientStop Color="#FFC7C7C7" 010.Offset="0" /> 011.<GradientStop Color="#FFF6F3F3" 012.Offset="1" /> 013.</LinearGradientBrush> 014.</Grid.Background> 015.<Grid.RowDefinitions> 016.<RowDefinition Height="40" /> 017.<RowDefinition Height="40" /> 018.<RowDefinition Height="40" /> 019.<RowDefinition Height="100" /> 020.<RowDefinition Height="100" /> 021.<RowDefinition Height="100" /> 022.<RowDefinition Height="40" /> 023.<RowDefinition Height="40" /> 024.<RowDefinition Height="40" /> 025.</Grid.RowDefinitions> 026.<Grid.ColumnDefinitions> 027.<ColumnDefinition Width="150" /> 028.<ColumnDefinition Width="150" /> 029.<ColumnDefinition Width="300" /> 030.<ColumnDefinition Width="100" /> 031.</Grid.ColumnDefinitions> 032.<!-- Title --> 033.<TextBlock Margin="8" 034.Text="{Binding AddEditString}" 035.TextWrapping="Wrap" 036.Grid.Column="1" 037.Grid.ColumnSpan="2" 038.FontSize="16" /> 039.<!-- Data entry area--> 040. 041.<TextBlock Margin="8,0,0,0" 042.Style="{StaticResource LabelTxb}" 043.Grid.Row="1" 044.Text="Job Title" 045.VerticalAlignment="Center" /> 046.<TextBox x:Name="xJobTitleTB" 047.Margin="0,8" 048.Grid.Column="1" 049.Grid.Row="1" 050.Text="{Binding activeJob.JobTitle, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}" 051.Grid.ColumnSpan="2" /> 052.<TextBlock Margin="8,0,0,0" 053.Grid.Row="2" 054.Text="Location" 055.d:LayoutOverrides="Height" 056.VerticalAlignment="Center" /> 057.<TextBox x:Name="xLocationTB" 058.Margin="0,8" 059.Grid.Column="1" 060.Grid.Row="2" 061.Text="{Binding activeJob.Location, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}" 062.Grid.ColumnSpan="2" /> 063. 064.<TextBlock Margin="8,11,8,0" 065.Grid.Row="3" 066.Text="Description" 067.TextWrapping="Wrap" 068.VerticalAlignment="Top" /> 069. 070.<TextBox x:Name="xDescriptionTB" 071.Height="84" 072.TextWrapping="Wrap" 073.ScrollViewer.VerticalScrollBarVisibility="Auto" 074.Grid.Column="1" 075.Grid.Row="3" 076.Text="{Binding activeJob.Description, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}" 077.Grid.ColumnSpan="2" /> 078.<TextBlock Margin="8,11,8,0" 079.Grid.Row="4" 080.Text="Requirements" 081.TextWrapping="Wrap" 082.VerticalAlignment="Top" /> 083. 084.<TextBox x:Name="xRequirementsTB" 085.Height="84" 086.TextWrapping="Wrap" 087.ScrollViewer.VerticalScrollBarVisibility="Auto" 088.Grid.Column="1" 089.Grid.Row="4" 090.Text="{Binding activeJob.Requirements, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}" 091.Grid.ColumnSpan="2" /> 092.<TextBlock Margin="8,11,8,0" 093.Grid.Row="5" 094.Text="Qualifications" 095.TextWrapping="Wrap" 096.VerticalAlignment="Top" /> 097. 098.<TextBox x:Name="xQualificationsTB" 099.Height="84" 100.TextWrapping="Wrap" 101.ScrollViewer.VerticalScrollBarVisibility="Auto" 102.Grid.Column="1" 103.Grid.Row="5" 104.Text="{Binding activeJob.Qualifications, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}" 105.Grid.ColumnSpan="2" /> 106.<!-- Requirements Checkboxes--> 107. 108.<CheckBox x:Name="xResumeRequiredCB" Margin="8,8,8,15" 109.Content="Resume Required" 110.Grid.Row="6" 111.Grid.ColumnSpan="2" 112.IsChecked="{Binding activeJob.NeedsResume, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}"/> 113. 114.<CheckBox x:Name="xCoverletterRequiredCB" Margin="8,8,8,15" 115.Content="Cover Letter Required" 116.Grid.Column="2" 117.Grid.Row="6" 118.IsChecked="{Binding activeJob.NeedsCV, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}"/> 119. 120.<CheckBox x:Name="xOverviewRequiredCB" Margin="8,8,8,15" 121.Content="Overview Required" 122.Grid.Row="7" 123.Grid.ColumnSpan="2" 124.IsChecked="{Binding activeJob.NeedsOverview, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}"/> 125. 126.<CheckBox x:Name="xJobActiveCB" Margin="8,8,8,15" 127.Content="Job is Active" 128.Grid.Column="2" 129.Grid.Row="7" 130.IsChecked="{Binding activeJob.IsActive, Mode=TwoWay, NotifyOnValidationError=True, ValidatesOnExceptions=True}"/> 131. 132.<!-- Buttons --> 133. 134.<Button x:Name="xAddEditButton" Margin="8,8,0,10" 135.Content="{Binding AddEditButtonString}" 136.cal:Click.Command="{Binding AddEditCommand}" 137.Grid.Column="2" 138.Grid.Row="8" 139.HorizontalAlignment="Left" 140.Width="125" 141.telerik:StyleManager.Theme="Windows7" /> 142. 143.<Button x:Name="xCancelButton" HorizontalAlignment="Right" 144.Content="Cancel" 145.cal:Click.Command="{Binding CancelCommand}" 146.Margin="0,8,8,10" 147.Width="125" 148.Grid.Column="2" 149.Grid.Row="8" 150.telerik:StyleManager.Theme="Windows7" /> 151.</Grid> 152.</Grid> The 'validationHelper:ValidationScope' line may seem odd. This is a handy little trick for catching current and would-be validation errors when working in this whole setup. This all comes from an approach found on theJoy Of Code blog, although it looks like the story for this will be changing slightly with new advances in SL4/WCF RIA Services, so this section can definitely get an overhaul a little down the road. The code is the fun part of all this, so let us see what's happening under the hood. Viewmodel for the AddEditJobView We are going to see some of the same things happening here, so I'll skip over the repeat info and get right to the good stuff: 001.public class AddEditJobViewModel : ViewModelBase 002.{ 003.private readonly IEventAggregator eventAggregator; 004.private readonly IRegionManager regionManager; 005. 006.public RecruitingContext context; 007. 008.private JobPosting _activeJob; 009.public JobPosting activeJob 010.{ 011.get { return _activeJob; } 012.set 013.{ 014.if (_activeJob != value) 015.{ 016._activeJob = value; 017.NotifyChanged("activeJob"); 018.} 019.} 020.} 021. 022.public bool isNewJob; 023. 024.private string _addEditString; 025.public string AddEditString 026.{ 027.get { return _addEditString; } 028.set 029.{ 030.if (_addEditString != value) 031.{ 032._addEditString = value; 033.NotifyChanged("AddEditString"); 034.} 035.} 036.} 037. 038.private string _addEditButtonString; 039.public string AddEditButtonString 040.{ 041.get { return _addEditButtonString; } 042.set 043.{ 044.if (_addEditButtonString != value) 045.{ 046._addEditButtonString = value; 047.NotifyChanged("AddEditButtonString"); 048.} 049.} 050.} 051. 052.public SubscriptionToken addJobToken = new SubscriptionToken(); 053.public SubscriptionToken editJobToken = new SubscriptionToken(); 054. 055.public DelegateCommand<object> AddEditCommand { get; set; } 056.public DelegateCommand<object> CancelCommand { get; set; } 057. 058.private ObservableCollection<ValidationError> _errors = new ObservableCollection<ValidationError>(); 059.public ObservableCollection<ValidationError> Errors 060.{ 061.get { return _errors; } 062.} 063. 064.private ObservableCollection<ValidationResult> _valResults = new ObservableCollection<ValidationResult>(); 065.public ObservableCollection<ValidationResult> ValResults 066.{ 067.get { return this._valResults; } 068.} 069. 070.public AddEditJobViewModel(IEventAggregator eventAgg, IRegionManager regionmanager) 071.{ 072.// set Unity items 073.this.eventAggregator = eventAgg; 074.this.regionManager = regionmanager; 075. 076.context = new RecruitingContext(); 077. 078.AddEditCommand = new DelegateCommand<object>(this.AddEditJobCommand); 079.CancelCommand = new DelegateCommand<object>(this.CancelAddEditCommand); 080. 081.SetSubscriptions(); 082.} 083. 084.#region Subscription Declaration and Events 085. 086.public void SetSubscriptions() 087.{ 088.AddJobEvent addJob = this.eventAggregator.GetEvent<AddJobEvent>(); 089. 090.if (addJobToken != null) 091.addJob.Unsubscribe(addJobToken); 092. 093.addJobToken = addJob.Subscribe(this.AddJobEventHandler); 094. 095.EditJobEvent editJob = this.eventAggregator.GetEvent<EditJobEvent>(); 096. 097.if (editJobToken != null) 098.editJob.Unsubscribe(editJobToken); 099. 100.editJobToken = editJob.Subscribe(this.EditJobEventHandler); 101.} 102. 103.public void AddJobEventHandler(bool isNew) 104.{ 105.this.activeJob = null; 106.this.activeJob = new JobPosting(); 107.this.activeJob.IsActive = true; // We assume that we want a new job to go up immediately 108.this.isNewJob = true; 109.this.AddEditString = "Add New Job Posting"; 110.this.AddEditButtonString = "Add Job"; 111. 112.MakeMeActive(this.regionManager, "MainRegion", "AddEditJobView"); 113.} 114. 115.public void EditJobEventHandler(JobPosting editJob) 116.{ 117.this.activeJob = null; 118.this.activeJob = editJob; 119.this.isNewJob = false; 120.this.AddEditString = "Edit Job Posting"; 121.this.AddEditButtonString = "Edit Job"; 122. 123.MakeMeActive(this.regionManager, "MainRegion", "AddEditJobView"); 124.} 125. 126.#endregion 127. 128.#region DelegateCommands from View 129. 130.public void AddEditJobCommand(object obj) 131.{ 132.if (this.Errors.Count > 0) 133.{ 134.List<string> errorMessages = new List<string>(); 135. 136.foreach (var valR in this.Errors) 137.{ 138.errorMessages.Add(valR.Exception.Message); 139.} 140. 141.this.eventAggregator.GetEvent<DisplayValidationErrorsEvent>().Publish(errorMessages); 142. 143.} 144.else if (!Validator.TryValidateObject(this.activeJob, new ValidationContext(this.activeJob, null, null), _valResults, true)) 145.{ 146.List<string> errorMessages = new List<string>(); 147. 148.foreach (var valR in this._valResults) 149.{ 150.errorMessages.Add(valR.ErrorMessage); 151.} 152. 153.this._valResults.Clear(); 154. 155.this.eventAggregator.GetEvent<DisplayValidationErrorsEvent>().Publish(errorMessages); 156.} 157.else 158.{ 159.if (this.isNewJob) 160.{ 161.this.eventAggregator.GetEvent<AddJobCompleteEvent>().Publish(this.activeJob); 162.} 163.else 164.{ 165.this.eventAggregator.GetEvent<EditJobCompleteEvent>().Publish(true); 166.} 167.} 168.} 169. 170.public void CancelAddEditCommand(object obj) 171.{ 172.if (this.isNewJob) 173.{ 174.this.eventAggregator.GetEvent<AddJobCompleteEvent>().Publish(null); 175.} 176.else 177.{ 178.this.eventAggregator.GetEvent<EditJobCompleteEvent>().Publish(false); 179.} 180.} 181. 182.#endregion 183.} 184.} We start seeing something new on line 103- the AddJobEventHandler will create a new job and set that to the activeJob item on the ViewModel. When this is all set, the view calls that familiar MakeMeActive method to activate itself. I made a bit of a management call on making views self-activate like this, but I figured it works for one reason. As I create this application, views may not exist that I have in mind, so after a view receives its 'ping' from being subscribed to an event, it prepares whatever it needs to do and then goes active. This way if I don't have 'edit' hooked up, I can click as the day is long on the main view and won't get lost in an empty region. Total personal preference here. :) Everything else should again be pretty straightforward, although I do a bit of validation checking in the AddEditJobCommand, which can either fire off an event back to the main view/viewmodel if everything is a success or sent a list of errors to our notification module, which pops open a RadWindow with the alerts if any exist. As a bonus side note, here's what my WCF RIA Services metadata looks like for handling all of the validation: private JobPostingMetadata() { } [StringLength(2500, ErrorMessage = "Description should be more than one and less than 2500 characters.", MinimumLength = 1)] [Required(ErrorMessage = "Description is required.")] public string Description; [Required(ErrorMessage="Active Status is Required")] public bool IsActive; [StringLength(100, ErrorMessage = "Posting title must be more than 3 but less than 100 characters.", MinimumLength = 3)] [Required(ErrorMessage = "Job Title is required.")] public bool JobTitle; [Required] public string Location; public bool NeedsCV; public bool NeedsOverview; public bool NeedsResume; public int PostingID; [Required(ErrorMessage="Qualifications are required.")] [StringLength(2500, ErrorMessage="Qualifications should be more than one and less than 2500 characters.", MinimumLength=1)] public string Qualifications; [StringLength(2500, ErrorMessage = "Requirements should be more than one and less than 2500 characters.", MinimumLength = 1)] [Required(ErrorMessage="Requirements are required.")] public string Requirements;   The RecruitCB Alternative See all that Xaml I pasted above? Those are now two pieces sitting in the JobsView.xaml file now. The only real difference is that the xEditGrid now sits in the same place as xJobsGrid, with visibility swapping out between the two for a quick switch. I also took out all the cal: and command: command references and replaced Button events with clicks and the Grid selection command replaced with a SelectedItemChanged event. Also, at the bottom of the xEditGrid after the last button, I add a ValidationSummary (with Visibility=Collapsed) to catch any errors that are popping up. Simple as can be, and leads to this being the single code-behind file: 001.public partial class JobsView : UserControl 002.{ 003.public RecruitingContext context; 004.public JobPosting activeJob; 005.public bool isNew; 006.private ObservableCollection<ValidationResult> _valResults = new ObservableCollection<ValidationResult>(); 007.public ObservableCollection<ValidationResult> ValResults 008.{ 009.get { return this._valResults; } 010.} 011.public JobsView() 012.{ 013.InitializeComponent(); 014.this.Loaded += new RoutedEventHandler(JobsView_Loaded); 015.} 016.void JobsView_Loaded(object sender, RoutedEventArgs e) 017.{ 018.context = new RecruitingContext(); 019.xJobsGrid.ItemsSource = context.JobPostings; 020.context.Load(context.GetJobPostingsQuery()); 021.} 022.private void xAddRecordButton_Click(object sender, RoutedEventArgs e) 023.{ 024.activeJob = new JobPosting(); 025.isNew = true; 026.xAddEditTitle.Text = "Add a Job Posting"; 027.xAddEditButton.Content = "Add"; 028.xEditGrid.DataContext = activeJob; 029.HideJobsGrid(); 030.} 031.private void xEditRecordButton_Click(object sender, RoutedEventArgs e) 032.{ 033.activeJob = xJobsGrid.SelectedItem as JobPosting; 034.isNew = false; 035.xAddEditTitle.Text = "Edit a Job Posting"; 036.xAddEditButton.Content = "Edit"; 037.xEditGrid.DataContext = activeJob; 038.HideJobsGrid(); 039.} 040.private void xAddEditButton_Click(object sender, RoutedEventArgs e) 041.{ 042.if (!Validator.TryValidateObject(this.activeJob, new ValidationContext(this.activeJob, null, null), _valResults, true)) 043.{ 044.List<string> errorMessages = new List<string>(); 045.foreach (var valR in this._valResults) 046.{ 047.errorMessages.Add(valR.ErrorMessage); 048.} 049.this._valResults.Clear(); 050.ShowErrors(errorMessages); 051.} 052.else if (xSummary.Errors.Count > 0) 053.{ 054.List<string> errorMessages = new List<string>(); 055.foreach (var err in xSummary.Errors) 056.{ 057.errorMessages.Add(err.Message); 058.} 059.ShowErrors(errorMessages); 060.} 061.else 062.{ 063.if (this.isNew) 064.{ 065.context.JobPostings.Add(activeJob); 066.context.SubmitChanges((s) => 067.{ 068.ActionHistory thisAction = new ActionHistory(); 069.thisAction.PostingID = activeJob.PostingID; 070.thisAction.Description = String.Format("Job '{0}' has been edited by {1}", activeJob.JobTitle, "default user"); 071.thisAction.TimeStamp = DateTime.Now; 072.context.ActionHistories.Add(thisAction); 073.context.SubmitChanges(); 074.}, null); 075.} 076.else 077.{ 078.context.SubmitChanges((s) => 079.{ 080.ActionHistory thisAction = new ActionHistory(); 081.thisAction.PostingID = activeJob.PostingID; 082.thisAction.Description = String.Format("Job '{0}' has been added by {1}", activeJob.JobTitle, "default user"); 083.thisAction.TimeStamp = DateTime.Now; 084.context.ActionHistories.Add(thisAction); 085.context.SubmitChanges(); 086.}, null); 087.} 088.ShowJobsGrid(); 089.} 090.} 091.private void xCancelButton_Click(object sender, RoutedEventArgs e) 092.{ 093.ShowJobsGrid(); 094.} 095.private void ShowJobsGrid() 096.{ 097.xAddEditRecordButtonPanel.Visibility = Visibility.Visible; 098.xEditGrid.Visibility = Visibility.Collapsed; 099.xJobsGrid.Visibility = Visibility.Visible; 100.} 101.private void HideJobsGrid() 102.{ 103.xAddEditRecordButtonPanel.Visibility = Visibility.Collapsed; 104.xJobsGrid.Visibility = Visibility.Collapsed; 105.xEditGrid.Visibility = Visibility.Visible; 106.} 107.private void ShowErrors(List<string> errorList) 108.{ 109.string nm = "Errors received: \n"; 110.foreach (string anerror in errorList) 111.nm += anerror + "\n"; 112.RadWindow.Alert(nm); 113.} 114.} The first 39 lines should be pretty familiar, not doing anything too unorthodox to get this up and running. Once we hit the xAddEditButton_Click on line 40, we're still doing pretty much the same things except instead of checking the ValidationHelper errors, we both run a check on the current activeJob object as well as check the ValidationSummary errors list. Once that is set, we again use the callback of context.SubmitChanges (lines 68 and 78) to create an ActionHistory which we will use to track these items down the line. That's all? Essentially... yes. If you look back through this post, most of the code and adventures we have taken were just to get things working in the MVVM/Prism setup. Since I have the whole 'module' self-contained in a single JobView+code-behind setup, I don't have to worry about things like sending events off into space for someone to pick up, communicating through an Infrastructure project, or even re-inventing events to be used with attached behaviors. Everything just kinda works, and again with much less code. Here's a picture of the MVVM and Code-behind versions on the Jobs and AddEdit views, but since the functionality is the same in both apps you still cannot tell them apart (for two-strike): Looking ahead, the Applicants module is effectively the same thing as the Jobs module, so most of the code is being cut-and-pasted back and forth with minor tweaks here and there. So that one is being taken care of by me behind the scenes. Next time, we get into a new world of fun- the interview scheduling module, which will pull from available jobs and applicants for each interview being scheduled, tying everything together with RadScheduler to the rescue. Did you know that DotNetSlackers also publishes .net articles written by top known .net Authors? We already have over 80 articles in several categories including Silverlight. Take a look: here.

    Read the article

  • Why Haven’t NFC Payments Taken Off?

    - by David Dorf
    With the EMV 2015 milestone approaching rapidly, there’s been renewed interest in smartcards, those credit cards with an embedded computer chip.  Back in 1996 I was working for a vendor helping Visa introduce a stored-value smartcard to the US.  Visa Cash was debuted at the 1996 Olympics in Atlanta, and I firmly believed it was the beginning of a cashless society.  (I later worked on MasterCard’s system called Mondex, from the UK, which debuted the following year in Manhattan). But since you don’t have a Visa Cash card in your wallet, it’s obvious the project never took off.  It was convenient for consumers, faster for merchants, and more cost-effective for banks, so why did it fail?  All emerging payment systems suffer from the chicken-and-egg dilemma.  Consumers won’t carry the cards if few merchants accept them, and merchants won’t install the terminals if few consumers have cards. Today’s emerging payment providers are in a similar pickle.  There has to be enough value for all three constituents – consumers, merchants, banks – to change the status quo.  And it’s not enough to exceed the value, it’s got to be a leap in value, because people generally resist change.  ATMs and transit cards are great examples of this, and airline kiosks and self-checkout systems are to a lesser extent. Although Google Wallet and ISIS, the two leading NFC payment platforms in the US, have shown strong commitment, there’s been very little traction.  Yes, I can load my credit card number into my phone then tap to pay, but what was the incremental value over swiping my old card?  For it to be a leap in value, it has to offer more than just payment, which I can do very easily today.  The other two ingredients are thought to be loyalty programs and digital coupons, but neither Google nor ISIS really did them well. Of course a large portion of the mobile phone market doesn’t even support NFC thanks to Apple, and since it’s not in their best interest that situation is unlikely to change.  Another issue is getting access to the “secure element,” the chip inside the phone where accounts numbers can be held securely.  Telco providers and handset manufacturers own that area, and they’re not willing to share with banks.  (Host Card Emulation, which has been endorsed by MasterCard and Visa, might be a solution.) Square recently gave up on its wallet, and MCX (the group of retailers trying to create a mobile payment platform) is very slow out of the gate.  That leaves PayPal and a slew of smaller companies trying to introduce easier ways to pay. But is it really so cumbersome to carry and swipe (soon to insert) a credit card?  Aren’t there more important problems to solve in the retail customer experience?  Maybe Apple will come up with some novel way to use iBeacons and fingerprint identification to make payments, but for now I think we need to focus on upgrading to Chip-and-PIN and tightening security.  In the meantime, NFC payments will continue to struggle.

    Read the article

  • How John Got 15x Improvement Without Really Trying

    - by rchrd
    The following article was published on a Sun Microsystems website a number of years ago by John Feo. It is still useful and worth preserving. So I'm republishing it here.  How I Got 15x Improvement Without Really Trying John Feo, Sun Microsystems Taking ten "personal" program codes used in scientific and engineering research, the author was able to get from 2 to 15 times performance improvement easily by applying some simple general optimization techniques. Introduction Scientific research based on computer simulation depends on the simulation for advancement. The research can advance only as fast as the computational codes can execute. The codes' efficiency determines both the rate and quality of results. In the same amount of time, a faster program can generate more results and can carry out a more detailed simulation of physical phenomena than a slower program. Highly optimized programs help science advance quickly and insure that monies supporting scientific research are used as effectively as possible. Scientific computer codes divide into three broad categories: ISV, community, and personal. ISV codes are large, mature production codes developed and sold commercially. The codes improve slowly over time both in methods and capabilities, and they are well tuned for most vendor platforms. Since the codes are mature and complex, there are few opportunities to improve their performance solely through code optimization. Improvements of 10% to 15% are typical. Examples of ISV codes are DYNA3D, Gaussian, and Nastran. Community codes are non-commercial production codes used by a particular research field. Generally, they are developed and distributed by a single academic or research institution with assistance from the community. Most users just run the codes, but some develop new methods and extensions that feed back into the general release. The codes are available on most vendor platforms. Since these codes are younger than ISV codes, there are more opportunities to optimize the source code. Improvements of 50% are not unusual. Examples of community codes are AMBER, CHARM, BLAST, and FASTA. Personal codes are those written by single users or small research groups for their own use. These codes are not distributed, but may be passed from professor-to-student or student-to-student over several years. They form the primordial ocean of applications from which community and ISV codes emerge. Government research grants pay for the development of most personal codes. This paper reports on the nature and performance of this class of codes. Over the last year, I have looked at over two dozen personal codes from more than a dozen research institutions. The codes cover a variety of scientific fields, including astronomy, atmospheric sciences, bioinformatics, biology, chemistry, geology, and physics. The sources range from a few hundred lines to more than ten thousand lines, and are written in Fortran, Fortran 90, C, and C++. For the most part, the codes are modular, documented, and written in a clear, straightforward manner. They do not use complex language features, advanced data structures, programming tricks, or libraries. I had little trouble understanding what the codes did or how data structures were used. Most came with a makefile. Surprisingly, only one of the applications is parallel. All developers have access to parallel machines, so availability is not an issue. Several tried to parallelize their applications, but stopped after encountering difficulties. Lack of education and a perception that parallelism is difficult prevented most from trying. I parallelized several of the codes using OpenMP, and did not judge any of the codes as difficult to parallelize. Even more surprising than the lack of parallelism is the inefficiency of the codes. I was able to get large improvements in performance in a matter of a few days applying simple optimization techniques. Table 1 lists ten representative codes [names and affiliation are omitted to preserve anonymity]. Improvements on one processor range from 2x to 15.5x with a simple average of 4.75x. I did not use sophisticated performance tools or drill deep into the program's execution character as one would do when tuning ISV or community codes. Using only a profiler and source line timers, I identified inefficient sections of code and improved their performance by inspection. The changes were at a high level. I am sure there is another factor of 2 or 3 in each code, and more if the codes are parallelized. The study’s results show that personal scientific codes are running many times slower than they should and that the problem is pervasive. Computational scientists are not sloppy programmers; however, few are trained in the art of computer programming or code optimization. I found that most have a working knowledge of some programming language and standard software engineering practices; but they do not know, or think about, how to make their programs run faster. They simply do not know the standard techniques used to make codes run faster. In fact, they do not even perceive that such techniques exist. The case studies described in this paper show that applying simple, well known techniques can significantly increase the performance of personal codes. It is important that the scientific community and the Government agencies that support scientific research find ways to better educate academic scientific programmers. The inefficiency of their codes is so bad that it is retarding both the quality and progress of scientific research. # cacheperformance redundantoperations loopstructures performanceimprovement 1 x x 15.5 2 x 2.8 3 x x 2.5 4 x 2.1 5 x x 2.0 6 x 5.0 7 x 5.8 8 x 6.3 9 2.2 10 x x 3.3 Table 1 — Area of improvement and performance gains of 10 codes The remainder of the paper is organized as follows: sections 2, 3, and 4 discuss the three most common sources of inefficiencies in the codes studied. These are cache performance, redundant operations, and loop structures. Each section includes several examples. The last section summaries the work and suggests a possible solution to the issues raised. Optimizing cache performance Commodity microprocessor systems use caches to increase memory bandwidth and reduce memory latencies. Typical latencies from processor to L1, L2, local, and remote memory are 3, 10, 50, and 200 cycles, respectively. Moreover, bandwidth falls off dramatically as memory distances increase. Programs that do not use cache effectively run many times slower than programs that do. When optimizing for cache, the biggest performance gains are achieved by accessing data in cache order and reusing data to amortize the overhead of cache misses. Secondary considerations are prefetching, associativity, and replacement; however, the understanding and analysis required to optimize for the latter are probably beyond the capabilities of the non-expert. Much can be gained simply by accessing data in the correct order and maximizing data reuse. 6 out of the 10 codes studied here benefited from such high level optimizations. Array Accesses The most important cache optimization is the most basic: accessing Fortran array elements in column order and C array elements in row order. Four of the ten codes—1, 2, 4, and 10—got it wrong. Compilers will restructure nested loops to optimize cache performance, but may not do so if the loop structure is too complex, or the loop body includes conditionals, complex addressing, or function calls. In code 1, the compiler failed to invert a key loop because of complex addressing do I = 0, 1010, delta_x IM = I - delta_x IP = I + delta_x do J = 5, 995, delta_x JM = J - delta_x JP = J + delta_x T1 = CA1(IP, J) + CA1(I, JP) T2 = CA1(IM, J) + CA1(I, JM) S1 = T1 + T2 - 4 * CA1(I, J) CA(I, J) = CA1(I, J) + D * S1 end do end do In code 2, the culprit is conditionals do I = 1, N do J = 1, N If (IFLAG(I,J) .EQ. 0) then T1 = Value(I, J-1) T2 = Value(I-1, J) T3 = Value(I, J) T4 = Value(I+1, J) T5 = Value(I, J+1) Value(I,J) = 0.25 * (T1 + T2 + T5 + T4) Delta = ABS(T3 - Value(I,J)) If (Delta .GT. MaxDelta) MaxDelta = Delta endif enddo enddo I fixed both programs by inverting the loops by hand. Code 10 has three-dimensional arrays and triply nested loops. The structure of the most computationally intensive loops is too complex to invert automatically or by hand. The only practical solution is to transpose the arrays so that the dimension accessed by the innermost loop is in cache order. The arrays can be transposed at construction or prior to entering a computationally intensive section of code. The former requires all array references to be modified, while the latter is cost effective only if the cost of the transpose is amortized over many accesses. I used the second approach to optimize code 10. Code 5 has four-dimensional arrays and loops are nested four deep. For all of the reasons cited above the compiler is not able to restructure three key loops. Assume C arrays and let the four dimensions of the arrays be i, j, k, and l. In the original code, the index structure of the three loops is L1: for i L2: for i L3: for i for l for l for j for k for j for k for j for k for l So only L3 accesses array elements in cache order. L1 is a very complex loop—much too complex to invert. I brought the loop into cache alignment by transposing the second and fourth dimensions of the arrays. Since the code uses a macro to compute all array indexes, I effected the transpose at construction and changed the macro appropriately. The dimensions of the new arrays are now: i, l, k, and j. L3 is a simple loop and easily inverted. L2 has a loop-carried scalar dependence in k. By promoting the scalar name that carries the dependence to an array, I was able to invert the third and fourth subloops aligning the loop with cache. Code 5 is by far the most difficult of the four codes to optimize for array accesses; but the knowledge required to fix the problems is no more than that required for the other codes. I would judge this code at the limits of, but not beyond, the capabilities of appropriately trained computational scientists. Array Strides When a cache miss occurs, a line (64 bytes) rather than just one word is loaded into the cache. If data is accessed stride 1, than the cost of the miss is amortized over 8 words. Any stride other than one reduces the cost savings. Two of the ten codes studied suffered from non-unit strides. The codes represent two important classes of "strided" codes. Code 1 employs a multi-grid algorithm to reduce time to convergence. The grids are every tenth, fifth, second, and unit element. Since time to convergence is inversely proportional to the distance between elements, coarse grids converge quickly providing good starting values for finer grids. The better starting values further reduce the time to convergence. The downside is that grids of every nth element, n > 1, introduce non-unit strides into the computation. In the original code, much of the savings of the multi-grid algorithm were lost due to this problem. I eliminated the problem by compressing (copying) coarse grids into continuous memory, and rewriting the computation as a function of the compressed grid. On convergence, I copied the final values of the compressed grid back to the original grid. The savings gained from unit stride access of the compressed grid more than paid for the cost of copying. Using compressed grids, the loop from code 1 included in the previous section becomes do j = 1, GZ do i = 1, GZ T1 = CA(i+0, j-1) + CA(i-1, j+0) T4 = CA1(i+1, j+0) + CA1(i+0, j+1) S1 = T1 + T4 - 4 * CA1(i+0, j+0) CA(i+0, j+0) = CA1(i+0, j+0) + DD * S1 enddo enddo where CA and CA1 are compressed arrays of size GZ. Code 7 traverses a list of objects selecting objects for later processing. The labels of the selected objects are stored in an array. The selection step has unit stride, but the processing steps have irregular stride. A fix is to save the parameters of the selected objects in temporary arrays as they are selected, and pass the temporary arrays to the processing functions. The fix is practical if the same parameters are used in selection as in processing, or if processing comprises a series of distinct steps which use overlapping subsets of the parameters. Both conditions are true for code 7, so I achieved significant improvement by copying parameters to temporary arrays during selection. Data reuse In the previous sections, we optimized for spatial locality. It is also important to optimize for temporal locality. Once read, a datum should be used as much as possible before it is forced from cache. Loop fusion and loop unrolling are two techniques that increase temporal locality. Unfortunately, both techniques increase register pressure—as loop bodies become larger, the number of registers required to hold temporary values grows. Once register spilling occurs, any gains evaporate quickly. For multiprocessors with small register sets or small caches, the sweet spot can be very small. In the ten codes presented here, I found no opportunities for loop fusion and only two opportunities for loop unrolling (codes 1 and 3). In code 1, unrolling the outer and inner loop one iteration increases the number of result values computed by the loop body from 1 to 4, do J = 1, GZ-2, 2 do I = 1, GZ-2, 2 T1 = CA1(i+0, j-1) + CA1(i-1, j+0) T2 = CA1(i+1, j-1) + CA1(i+0, j+0) T3 = CA1(i+0, j+0) + CA1(i-1, j+1) T4 = CA1(i+1, j+0) + CA1(i+0, j+1) T5 = CA1(i+2, j+0) + CA1(i+1, j+1) T6 = CA1(i+1, j+1) + CA1(i+0, j+2) T7 = CA1(i+2, j+1) + CA1(i+1, j+2) S1 = T1 + T4 - 4 * CA1(i+0, j+0) S2 = T2 + T5 - 4 * CA1(i+1, j+0) S3 = T3 + T6 - 4 * CA1(i+0, j+1) S4 = T4 + T7 - 4 * CA1(i+1, j+1) CA(i+0, j+0) = CA1(i+0, j+0) + DD * S1 CA(i+1, j+0) = CA1(i+1, j+0) + DD * S2 CA(i+0, j+1) = CA1(i+0, j+1) + DD * S3 CA(i+1, j+1) = CA1(i+1, j+1) + DD * S4 enddo enddo The loop body executes 12 reads, whereas as the rolled loop shown in the previous section executes 20 reads to compute the same four values. In code 3, two loops are unrolled 8 times and one loop is unrolled 4 times. Here is the before for (k = 0; k < NK[u]; k++) { sum = 0.0; for (y = 0; y < NY; y++) { sum += W[y][u][k] * delta[y]; } backprop[i++]=sum; } and after code for (k = 0; k < KK - 8; k+=8) { sum0 = 0.0; sum1 = 0.0; sum2 = 0.0; sum3 = 0.0; sum4 = 0.0; sum5 = 0.0; sum6 = 0.0; sum7 = 0.0; for (y = 0; y < NY; y++) { sum0 += W[y][0][k+0] * delta[y]; sum1 += W[y][0][k+1] * delta[y]; sum2 += W[y][0][k+2] * delta[y]; sum3 += W[y][0][k+3] * delta[y]; sum4 += W[y][0][k+4] * delta[y]; sum5 += W[y][0][k+5] * delta[y]; sum6 += W[y][0][k+6] * delta[y]; sum7 += W[y][0][k+7] * delta[y]; } backprop[k+0] = sum0; backprop[k+1] = sum1; backprop[k+2] = sum2; backprop[k+3] = sum3; backprop[k+4] = sum4; backprop[k+5] = sum5; backprop[k+6] = sum6; backprop[k+7] = sum7; } for one of the loops unrolled 8 times. Optimizing for temporal locality is the most difficult optimization considered in this paper. The concepts are not difficult, but the sweet spot is small. Identifying where the program can benefit from loop unrolling or loop fusion is not trivial. Moreover, it takes some effort to get it right. Still, educating scientific programmers about temporal locality and teaching them how to optimize for it will pay dividends. Reducing instruction count Execution time is a function of instruction count. Reduce the count and you usually reduce the time. The best solution is to use a more efficient algorithm; that is, an algorithm whose order of complexity is smaller, that converges quicker, or is more accurate. Optimizing source code without changing the algorithm yields smaller, but still significant, gains. This paper considers only the latter because the intent is to study how much better codes can run if written by programmers schooled in basic code optimization techniques. The ten codes studied benefited from three types of "instruction reducing" optimizations. The two most prevalent were hoisting invariant memory and data operations out of inner loops. The third was eliminating unnecessary data copying. The nature of these inefficiencies is language dependent. Memory operations The semantics of C make it difficult for the compiler to determine all the invariant memory operations in a loop. The problem is particularly acute for loops in functions since the compiler may not know the values of the function's parameters at every call site when compiling the function. Most compilers support pragmas to help resolve ambiguities; however, these pragmas are not comprehensive and there is no standard syntax. To guarantee that invariant memory operations are not executed repetitively, the user has little choice but to hoist the operations by hand. The problem is not as severe in Fortran programs because in the absence of equivalence statements, it is a violation of the language's semantics for two names to share memory. Codes 3 and 5 are C programs. In both cases, the compiler did not hoist all invariant memory operations from inner loops. Consider the following loop from code 3 for (y = 0; y < NY; y++) { i = 0; for (u = 0; u < NU; u++) { for (k = 0; k < NK[u]; k++) { dW[y][u][k] += delta[y] * I1[i++]; } } } Since dW[y][u] can point to the same memory space as delta for one or more values of y and u, assignment to dW[y][u][k] may change the value of delta[y]. In reality, dW and delta do not overlap in memory, so I rewrote the loop as for (y = 0; y < NY; y++) { i = 0; Dy = delta[y]; for (u = 0; u < NU; u++) { for (k = 0; k < NK[u]; k++) { dW[y][u][k] += Dy * I1[i++]; } } } Failure to hoist invariant memory operations may be due to complex address calculations. If the compiler can not determine that the address calculation is invariant, then it can hoist neither the calculation nor the associated memory operations. As noted above, code 5 uses a macro to address four-dimensional arrays #define MAT4D(a,q,i,j,k) (double *)((a)->data + (q)*(a)->strides[0] + (i)*(a)->strides[3] + (j)*(a)->strides[2] + (k)*(a)->strides[1]) The macro is too complex for the compiler to understand and so, it does not identify any subexpressions as loop invariant. The simplest way to eliminate the address calculation from the innermost loop (over i) is to define a0 = MAT4D(a,q,0,j,k) before the loop and then replace all instances of *MAT4D(a,q,i,j,k) in the loop with a0[i] A similar problem appears in code 6, a Fortran program. The key loop in this program is do n1 = 1, nh nx1 = (n1 - 1) / nz + 1 nz1 = n1 - nz * (nx1 - 1) do n2 = 1, nh nx2 = (n2 - 1) / nz + 1 nz2 = n2 - nz * (nx2 - 1) ndx = nx2 - nx1 ndy = nz2 - nz1 gxx = grn(1,ndx,ndy) gyy = grn(2,ndx,ndy) gxy = grn(3,ndx,ndy) balance(n1,1) = balance(n1,1) + (force(n2,1) * gxx + force(n2,2) * gxy) * h1 balance(n1,2) = balance(n1,2) + (force(n2,1) * gxy + force(n2,2) * gyy)*h1 end do end do The programmer has written this loop well—there are no loop invariant operations with respect to n1 and n2. However, the loop resides within an iterative loop over time and the index calculations are independent with respect to time. Trading space for time, I precomputed the index values prior to the entering the time loop and stored the values in two arrays. I then replaced the index calculations with reads of the arrays. Data operations Ways to reduce data operations can appear in many forms. Implementing a more efficient algorithm produces the biggest gains. The closest I came to an algorithm change was in code 4. This code computes the inner product of K-vectors A(i) and B(j), 0 = i < N, 0 = j < M, for most values of i and j. Since the program computes most of the NM possible inner products, it is more efficient to compute all the inner products in one triply-nested loop rather than one at a time when needed. The savings accrue from reading A(i) once for all B(j) vectors and from loop unrolling. for (i = 0; i < N; i+=8) { for (j = 0; j < M; j++) { sum0 = 0.0; sum1 = 0.0; sum2 = 0.0; sum3 = 0.0; sum4 = 0.0; sum5 = 0.0; sum6 = 0.0; sum7 = 0.0; for (k = 0; k < K; k++) { sum0 += A[i+0][k] * B[j][k]; sum1 += A[i+1][k] * B[j][k]; sum2 += A[i+2][k] * B[j][k]; sum3 += A[i+3][k] * B[j][k]; sum4 += A[i+4][k] * B[j][k]; sum5 += A[i+5][k] * B[j][k]; sum6 += A[i+6][k] * B[j][k]; sum7 += A[i+7][k] * B[j][k]; } C[i+0][j] = sum0; C[i+1][j] = sum1; C[i+2][j] = sum2; C[i+3][j] = sum3; C[i+4][j] = sum4; C[i+5][j] = sum5; C[i+6][j] = sum6; C[i+7][j] = sum7; }} This change requires knowledge of a typical run; i.e., that most inner products are computed. The reasons for the change, however, derive from basic optimization concepts. It is the type of change easily made at development time by a knowledgeable programmer. In code 5, we have the data version of the index optimization in code 6. Here a very expensive computation is a function of the loop indices and so cannot be hoisted out of the loop; however, the computation is invariant with respect to an outer iterative loop over time. We can compute its value for each iteration of the computation loop prior to entering the time loop and save the values in an array. The increase in memory required to store the values is small in comparison to the large savings in time. The main loop in Code 8 is doubly nested. The inner loop includes a series of guarded computations; some are a function of the inner loop index but not the outer loop index while others are a function of the outer loop index but not the inner loop index for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { r = i * hrmax; R = A[j]; temp = (PRM[3] == 0.0) ? 1.0 : pow(r, PRM[3]); high = temp * kcoeff * B[j] * PRM[2] * PRM[4]; low = high * PRM[6] * PRM[6] / (1.0 + pow(PRM[4] * PRM[6], 2.0)); kap = (R > PRM[6]) ? high * R * R / (1.0 + pow(PRM[4]*r, 2.0) : low * pow(R/PRM[6], PRM[5]); < rest of loop omitted > }} Note that the value of temp is invariant to j. Thus, we can hoist the computation for temp out of the loop and save its values in an array. for (i = 0; i < M; i++) { r = i * hrmax; TEMP[i] = pow(r, PRM[3]); } [N.B. – the case for PRM[3] = 0 is omitted and will be reintroduced later.] We now hoist out of the inner loop the computations invariant to i. Since the conditional guarding the value of kap is invariant to i, it behooves us to hoist the computation out of the inner loop, thereby executing the guard once rather than M times. The final version of the code is for (j = 0; j < N; j++) { R = rig[j] / 1000.; tmp1 = kcoeff * par[2] * beta[j] * par[4]; tmp2 = 1.0 + (par[4] * par[4] * par[6] * par[6]); tmp3 = 1.0 + (par[4] * par[4] * R * R); tmp4 = par[6] * par[6] / tmp2; tmp5 = R * R / tmp3; tmp6 = pow(R / par[6], par[5]); if ((par[3] == 0.0) && (R > par[6])) { for (i = 1; i <= imax1; i++) KAP[i] = tmp1 * tmp5; } else if ((par[3] == 0.0) && (R <= par[6])) { for (i = 1; i <= imax1; i++) KAP[i] = tmp1 * tmp4 * tmp6; } else if ((par[3] != 0.0) && (R > par[6])) { for (i = 1; i <= imax1; i++) KAP[i] = tmp1 * TEMP[i] * tmp5; } else if ((par[3] != 0.0) && (R <= par[6])) { for (i = 1; i <= imax1; i++) KAP[i] = tmp1 * TEMP[i] * tmp4 * tmp6; } for (i = 0; i < M; i++) { kap = KAP[i]; r = i * hrmax; < rest of loop omitted > } } Maybe not the prettiest piece of code, but certainly much more efficient than the original loop, Copy operations Several programs unnecessarily copy data from one data structure to another. This problem occurs in both Fortran and C programs, although it manifests itself differently in the two languages. Code 1 declares two arrays—one for old values and one for new values. At the end of each iteration, the array of new values is copied to the array of old values to reset the data structures for the next iteration. This problem occurs in Fortran programs not included in this study and in both Fortran 77 and Fortran 90 code. Introducing pointers to the arrays and swapping pointer values is an obvious way to eliminate the copying; but pointers is not a feature that many Fortran programmers know well or are comfortable using. An easy solution not involving pointers is to extend the dimension of the value array by 1 and use the last dimension to differentiate between arrays at different times. For example, if the data space is N x N, declare the array (N, N, 2). Then store the problem’s initial values in (_, _, 2) and define the scalar names new = 2 and old = 1. At the start of each iteration, swap old and new to reset the arrays. The old–new copy problem did not appear in any C program. In programs that had new and old values, the code swapped pointers to reset data structures. Where unnecessary coping did occur is in structure assignment and parameter passing. Structures in C are handled much like scalars. Assignment causes the data space of the right-hand name to be copied to the data space of the left-hand name. Similarly, when a structure is passed to a function, the data space of the actual parameter is copied to the data space of the formal parameter. If the structure is large and the assignment or function call is in an inner loop, then copying costs can grow quite large. While none of the ten programs considered here manifested this problem, it did occur in programs not included in the study. A simple fix is always to refer to structures via pointers. Optimizing loop structures Since scientific programs spend almost all their time in loops, efficient loops are the key to good performance. Conditionals, function calls, little instruction level parallelism, and large numbers of temporary values make it difficult for the compiler to generate tightly packed, highly efficient code. Conditionals and function calls introduce jumps that disrupt code flow. Users should eliminate or isolate conditionls to their own loops as much as possible. Often logical expressions can be substituted for if-then-else statements. For example, code 2 includes the following snippet MaxDelta = 0.0 do J = 1, N do I = 1, M < code omitted > Delta = abs(OldValue ? NewValue) if (Delta > MaxDelta) MaxDelta = Delta enddo enddo if (MaxDelta .gt. 0.001) goto 200 Since the only use of MaxDelta is to control the jump to 200 and all that matters is whether or not it is greater than 0.001, I made MaxDelta a boolean and rewrote the snippet as MaxDelta = .false. do J = 1, N do I = 1, M < code omitted > Delta = abs(OldValue ? NewValue) MaxDelta = MaxDelta .or. (Delta .gt. 0.001) enddo enddo if (MaxDelta) goto 200 thereby, eliminating the conditional expression from the inner loop. A microprocessor can execute many instructions per instruction cycle. Typically, it can execute one or more memory, floating point, integer, and jump operations. To be executed simultaneously, the operations must be independent. Thick loops tend to have more instruction level parallelism than thin loops. Moreover, they reduce memory traffice by maximizing data reuse. Loop unrolling and loop fusion are two techniques to increase the size of loop bodies. Several of the codes studied benefitted from loop unrolling, but none benefitted from loop fusion. This observation is not too surpising since it is the general tendency of programmers to write thick loops. As loops become thicker, the number of temporary values grows, increasing register pressure. If registers spill, then memory traffic increases and code flow is disrupted. A thick loop with many temporary values may execute slower than an equivalent series of thin loops. The biggest gain will be achieved if the thick loop can be split into a series of independent loops eliminating the need to write and read temporary arrays. I found such an occasion in code 10 where I split the loop do i = 1, n do j = 1, m A24(j,i)= S24(j,i) * T24(j,i) + S25(j,i) * U25(j,i) B24(j,i)= S24(j,i) * T25(j,i) + S25(j,i) * U24(j,i) A25(j,i)= S24(j,i) * C24(j,i) + S25(j,i) * V24(j,i) B25(j,i)= S24(j,i) * U25(j,i) + S25(j,i) * V25(j,i) C24(j,i)= S26(j,i) * T26(j,i) + S27(j,i) * U26(j,i) D24(j,i)= S26(j,i) * T27(j,i) + S27(j,i) * V26(j,i) C25(j,i)= S27(j,i) * S28(j,i) + S26(j,i) * U28(j,i) D25(j,i)= S27(j,i) * T28(j,i) + S26(j,i) * V28(j,i) end do end do into two disjoint loops do i = 1, n do j = 1, m A24(j,i)= S24(j,i) * T24(j,i) + S25(j,i) * U25(j,i) B24(j,i)= S24(j,i) * T25(j,i) + S25(j,i) * U24(j,i) A25(j,i)= S24(j,i) * C24(j,i) + S25(j,i) * V24(j,i) B25(j,i)= S24(j,i) * U25(j,i) + S25(j,i) * V25(j,i) end do end do do i = 1, n do j = 1, m C24(j,i)= S26(j,i) * T26(j,i) + S27(j,i) * U26(j,i) D24(j,i)= S26(j,i) * T27(j,i) + S27(j,i) * V26(j,i) C25(j,i)= S27(j,i) * S28(j,i) + S26(j,i) * U28(j,i) D25(j,i)= S27(j,i) * T28(j,i) + S26(j,i) * V28(j,i) end do end do Conclusions Over the course of the last year, I have had the opportunity to work with over two dozen academic scientific programmers at leading research universities. Their research interests span a broad range of scientific fields. Except for two programs that relied almost exclusively on library routines (matrix multiply and fast Fourier transform), I was able to improve significantly the single processor performance of all codes. Improvements range from 2x to 15.5x with a simple average of 4.75x. Changes to the source code were at a very high level. I did not use sophisticated techniques or programming tools to discover inefficiencies or effect the changes. Only one code was parallel despite the availability of parallel systems to all developers. Clearly, we have a problem—personal scientific research codes are highly inefficient and not running parallel. The developers are unaware of simple optimization techniques to make programs run faster. They lack education in the art of code optimization and parallel programming. I do not believe we can fix the problem by publishing additional books or training manuals. To date, the developers in questions have not studied the books or manual available, and are unlikely to do so in the future. Short courses are a possible solution, but I believe they are too concentrated to be much use. The general concepts can be taught in a three or four day course, but that is not enough time for students to practice what they learn and acquire the experience to apply and extend the concepts to their codes. Practice is the key to becoming proficient at optimization. I recommend that graduate students be required to take a semester length course in optimization and parallel programming. We would never give someone access to state-of-the-art scientific equipment costing hundreds of thousands of dollars without first requiring them to demonstrate that they know how to use the equipment. Yet the criterion for time on state-of-the-art supercomputers is at most an interesting project. Requestors are never asked to demonstrate that they know how to use the system, or can use the system effectively. A semester course would teach them the required skills. Government agencies that fund academic scientific research pay for most of the computer systems supporting scientific research as well as the development of most personal scientific codes. These agencies should require graduate schools to offer a course in optimization and parallel programming as a requirement for funding. About the Author John Feo received his Ph.D. in Computer Science from The University of Texas at Austin in 1986. After graduate school, Dr. Feo worked at Lawrence Livermore National Laboratory where he was the Group Leader of the Computer Research Group and principal investigator of the Sisal Language Project. In 1997, Dr. Feo joined Tera Computer Company where he was project manager for the MTA, and oversaw the programming and evaluation of the MTA at the San Diego Supercomputer Center. In 2000, Dr. Feo joined Sun Microsystems as an HPC application specialist. He works with university research groups to optimize and parallelize scientific codes. Dr. Feo has published over two dozen research articles in the areas of parallel parallel programming, parallel programming languages, and application performance.

    Read the article

  • SQL SERVER – Introduction to LEAD and LAG – Analytic Functions Introduced in SQL Server 2012

    - by pinaldave
    SQL Server 2012 introduces new analytical function LEAD() and LAG(). This functions accesses data from a subsequent row (for lead) and previous row (for lag) in the same result set without the use of a self-join . It will be very difficult to explain this in words so I will attempt small example to explain you this function. Instead of creating new table, I will be using AdventureWorks sample database as most of the developer uses that for experiment. Let us fun following query. USE AdventureWorks GO SELECT s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty, LEAD(SalesOrderDetailID) OVER (ORDER BY SalesOrderDetailID ) LeadValue, LAG(SalesOrderDetailID) OVER (ORDER BY SalesOrderDetailID ) LagValue FROM Sales.SalesOrderDetail s WHERE SalesOrderID IN (43670, 43669, 43667, 43663) ORDER BY s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty GO Above query will give us following result. When we look at above resultset it is very clear that LEAD function gives us value which is going to come in next line and LAG function gives us value which was encountered in previous line. If we have to generate the same result without using this function we will have to use self join. In future blog post we will see the same. Let us explore this function a bit more. This function not only provide previous or next line but it can also access any line before or after using offset. Let us fun following query, where LEAD and LAG function accesses the row with offset of 2. USE AdventureWorks GO SELECT s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty, LEAD(SalesOrderDetailID,2) OVER (ORDER BY SalesOrderDetailID ) LeadValue, LAG(SalesOrderDetailID,2) OVER (ORDER BY SalesOrderDetailID ) LagValue FROM Sales.SalesOrderDetail s WHERE SalesOrderID IN (43670, 43669, 43667, 43663) ORDER BY s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty GO Above query will give us following result. You can see the LEAD and LAG functions  now have interval of  rows when they are returning results. As there is interval of two rows the first two rows in LEAD function and last two rows in LAG function will return NULL value. You can easily replace this NULL Value with any other default value by passing third parameter in LEAD and LAG function. Let us fun following query. USE AdventureWorks GO SELECT s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty, LEAD(SalesOrderDetailID,2,0) OVER (ORDER BY SalesOrderDetailID ) LeadValue, LAG(SalesOrderDetailID,2,0) OVER (ORDER BY SalesOrderDetailID ) LagValue FROM Sales.SalesOrderDetail s WHERE SalesOrderID IN (43670, 43669, 43667, 43663) ORDER BY s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty GO Above query will give us following result, where NULL are now replaced with value 0. Just like any other analytic function we can easily partition this function as well. Let us see the use of PARTITION BY in this clause. USE AdventureWorks GO SELECT s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty, LEAD(SalesOrderDetailID) OVER (PARTITION BY SalesOrderID ORDER BY SalesOrderDetailID ) LeadValue, LAG(SalesOrderDetailID) OVER (PARTITION BY SalesOrderID ORDER BY SalesOrderDetailID ) LagValue FROM Sales.SalesOrderDetail s WHERE SalesOrderID IN (43670, 43669, 43667, 43663) ORDER BY s.SalesOrderID,s.SalesOrderDetailID,s.OrderQty GO Above query will give us following result, where now the data is partitioned by SalesOrderID and LEAD and LAG functions are returning the appropriate result in that window. As now there are smaller partition in my query, you will see higher presence of NULL. In future blog post we will see how this functions are compared to SELF JOIN. Reference: Pinal Dave (http://blog.SQLAuthority.com) Filed under: Pinal Dave, PostADay, SQL, SQL Authority, SQL Function, SQL Query, SQL Scripts, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Required Parameters [SSIS Denali]

    - by jamiet
    SQL Server Integration Services (SSIS) in its 2005 and 2008 incarnations expects you to set a property values within your package at runtime using Configurations. SSIS developers tend to have rather a lot of issues with SSIS configurations; in this blog post I am going to highlight one of those problems and how it has been alleviated in SQL Server code-named Denali.   A configuration is a property path/value pair that exists outside of a package, typically within SQL Server or in a collection of one or more configurations in a file called a .dtsConfig file. Within the package one defines a pointer to a configuration that says to the package “When you execute, go and get a configuration value from this location” and if all goes well the package will fetch that configuration value as it starts to execute and you will see something like the following in your output log: Information: 0x40016041 at Package: The package is attempting to configure from the XML file "C:\Configs\MyConfig.dtsConfig". Unfortunately things DON’T always go well, perhaps the .dtsConfig file is unreachable or the name of the SQL Sever holding the configuration value has been defined incorrectly – any one of a number of things can go wrong. In this circumstance you might see something like the following in your log output instead: Warning: 0x80012014 at Package: The configuration file "C:\Configs\MyConfig.dtsConfig" cannot be found. Check the directory and file name. The problem that I want to draw attention to here though is that your package will ignore the fact it can’t find the configuration and executes anyway. This is really really bad because the package will not be doing what it is supposed to do and worse, if you have not isolated your environments you might not even know about it. Can you imagine a package executing for months and all the while inserting data into the wrong server? Sounds ridiculous but I have absolutely seen this happen and the root cause was that no-one picked up on configuration warnings like the one above. Happily in SSIS code-named Denali this problem has gone away as configurations have been replaced with parameters. Each parameter has a property called ‘Required’: Any parameter with Required=True must have a value passed to it when the package executes. Any attempt to execute the package will result in an error. Here we see that error when attempting to execute using the SSMS UI: and similarly when executing using T-SQL: Error is: Msg 27184, Level 16, State 1, Procedure prepare_execution, Line 112 In order to execute this package, you need to specify values for the required parameters.   As you can see, SSIS code-named Denali has mechanisms built-in to prevent the problem I described at the top of this blog post. Specifying a Parameter required means that any packages in that project cannot execute until a value for the parameter has been supplied. This is a very good thing. I am loathe to make recommendations so early in the development cycle but right now I’m thinking that all Project Parameters should have Required=True, certainly any that are used to define external locations should be anyway. @Jamiet

    Read the article

  • SQL Server Transaction Marks: Restoring multiple databases to a common relative point

    - by Mladen Prajdic
    We’re all familiar with the ability to restore a database to point in time using the RESTORE WITH STOPAT statement. But what if we have multiple databases that are accessed from one application or are modifying each other? And over multiple instances? And all databases have different workloads? And we want to restore all of the databases to some known common relative point? The catch here is that this common relative point isn’t the same point in time for all databases. This common relative point in time might be now in DB1, now-1 hour in DB2 and yesterday in DB3. And we don’t know the exact times. Let me introduce you to Transaction Marks. When we run a marked transaction using the WITH MARK option a flag is set in the transaction log and a row is added to msdb..logmarkhistory table. When restoring a transaction log backup we can restore to either before or after that marked transaction. The best thing is that we don’t even need to have one database modifying another database. All we have to do is use a marked transaction with the same name in different database. Let’s see how this works with an example. The code comments say what’s going on. USE master GOCREATE DATABASE TestTxMark1GOUSE TestTxMark1GOCREATE TABLE TestTable1( ID INT, VALUE UNIQUEIDENTIFIER) -- insert some data into the table so we can have a starting pointINSERT INTO TestTable1SELECT ROW_NUMBER() OVER(ORDER BY number) AS RN, NULLFROM master..spt_valuesORDER BY RNSELECT *FROM TestTable1GO-- TAKE A FULL BACKUP of the databseBACKUP DATABASE TestTxMark1 TO DISK = 'c:\TestTxMark1.bak'GO USE master GOCREATE DATABASE TestTxMark2GOUSE TestTxMark2GOCREATE TABLE TestTable2( ID INT, VALUE UNIQUEIDENTIFIER)-- insert some data into the table so we can have a starting pointINSERT INTO TestTable2SELECT ROW_NUMBER() OVER(ORDER BY number) AS RN, NEWID()FROM master..spt_valuesORDER BY RNSELECT *FROM TestTable2GO-- TAKE A FULL BACKUP of our databseBACKUP DATABASE TestTxMark2 TO DISK = 'c:\TestTxMark2.bak'GO -- start a marked transaction that modifies both databasesBEGIN TRAN TxDb WITH MARK -- update values from NULL to random value UPDATE TestTable1 SET VALUE = NEWID(); -- update first 100 values from random value -- to NULL in different DB UPDATE TestTxMark2.dbo.TestTable2 SET VALUE = NULL WHERE ID <= 100;COMMITGO     -- some time goes by here -- with various database activity... -- We see two entries for marks in each database. -- This is just informational and has no bearing on the restore itself.SELECT * FROM msdb..logmarkhistory USE masterGO-- create a log backup to restore to mark pointBACKUP LOG TestTxMark1 TO DISK = 'c:\TestTxMark1.trn'GO-- drop the database so we can restore it backDROP DATABASE TestTxMark1GO USE masterGO-- create a log backup to restore to mark pointBACKUP LOG TestTxMark2 TO DISK = 'c:\TestTxMark2.trn'GO-- drop the database so we can restore it backDROP DATABASE TestTxMark2GO -- RESTORE THE DATABASE BACK BEFORE OUR TRANSACTION-- restore the full backup RESTORE DATABASE TestTxMark1 FROM DISK = 'c:\TestTxMark1.bak' WITH NORECOVERY;-- restore the log backup to the transaction markRESTORE LOG TestTxMark1 FROM DISK = 'c:\TestTxMark1.trn' WITH RECOVERY, -- recover to state before the transaction STOPBEFOREMARK = 'TxDb'; -- recover to state after the transaction -- STOPATMARK = 'TxDb';GO -- RESTORE THE DATABASE BACK BEFORE OUR TRANSACTION-- restore the full backup RESTORE DATABASE TestTxMark2 FROM DISK = 'c:\TestTxMark2.bak' WITH NORECOVERY;-- restore the log backup to the transaction markRESTORE LOG TestTxMark2 FROM DISK = 'c:\TestTxMark2.trn' WITH RECOVERY, -- recover to state before the transaction STOPBEFOREMARK = 'TxDb'; -- recover to state after the transaction -- STOPATMARK = 'TxDb';GO USE TestTxMark1-- we restored to time before the transaction -- so we have NULL values in our tableSELECT * FROM TestTable1 USE TestTxMark2-- we restored to time before the transaction -- so we DON'T have NULL values in our tableSELECT * FROM TestTable2   Transaction marks can be used like a crude sync mechanism for cross database operations. With them we can mark our databases with a common “restore to” point so we know we have a valid state between all databases to restore to.

    Read the article

  • WPF Databinding- Part 2 of 3

    - by Shervin Shakibi
    This is a follow up to my previous post WPF Databinding- Not your fathers databinding Part 1-3 you can download the source code here  http://ssccinc.com/wpfdatabinding.zip Example 04   In this example we demonstrate  the use of default properties and also binding to an instant of an object which is part of a collection bound to its container. this is actually not as complicated as it sounds. First of all, lets take a look at our Employee class notice we have overridden the ToString method, which will return employees First name , last name and employee number in parentheses, public override string ToString()        {            return String.Format("{0} {1} ({2})", FirstName, LastName, EmployeeNumber);        }   in our XAML we have set the itemsource of the list box to just  “Binding” and the Grid that contains it, has its DataContext set to a collection of our Employee objects. DataContext="{StaticResource myEmployeeList}"> ….. <ListBox Name="employeeListBox"  ItemsSource="{Binding }" Grid.Row="0" /> the ToString in the method for each instance will get executed and the following is a result of it. if we did not have a ToString the list box would look  like this: now lets take a look at the grid that will display the details when someone clicks on an Item, the Grid has the following DataContext DataContext="{Binding ElementName=employeeListBox,            Path=SelectedItem}"> Which means its bound to a specific instance of the Employee object. and within the gird we have textboxes that are bound to different Properties of our class. <TextBox Grid.Row="0" Grid.Column="1" Text="{Binding Path=FirstName}" /> <TextBox Grid.Row="1" Grid.Column="1" Text="{Binding Path=LastName}" /> <TextBox Grid.Row="2" Grid.Column="1" Text="{Binding Path=Title}" /> <TextBox Grid.Row="3" Grid.Column="1" Text="{Binding Path=Department}" />   Example 05   This project demonstrates use of the ObservableCollection and INotifyPropertyChanged interface. Lets take a look at Employee.cs first, notice it implements the INotifyPropertyChanged interface now scroll down and notice for each setter there is a call to the OnPropertyChanged method, which basically will will fire up the event notifying to the value of that specific property has been changed. Next EmployeeList.cs notice it is an ObservableCollection . Go ahead and set the start up project to example 05 and then run. Click on Add a new employee and the new employee should appear in the list box.   Example 06   This is a great example of IValueConverter its actuall a two for one deal, like most of my presentation demos I found this by “Binging” ( formerly known as g---ing) unfortunately now I can’t find the original author to give him  the credit he/she deserves. Before we look at the code lets run the app and look at the finished product, put in 0 in Celsius  and you should see Fahrenheit textbox displaying to 32 degrees, I know this is calculating correctly from my elementary school science class , also note the color changed to blue, now put in 100 in Celsius which should give us 212 Fahrenheit but now the color is red indicating it is hot, and finally put in 75 Fahrenheit and you should see 23.88 for Celsius and the color now should be black. Basically IValueConverter allows us different types to be bound, I’m sure you have had problems in the past trying to bind to Date values . First look at FahrenheitToCelciusConverter.cs first notice it implements IValueConverter. IValueConverter has two methods Convert and ConvertBack. In each method we have the code for converting Fahrenheit to Celsius and vice Versa. In our XAML, after we set a reference in our Windows.Resources section. and for txtCelsius we set the path to TxtFahrenheit and the converter to an instance our FahrenheitToCelciusConverter converter. no need to repeat this for TxtFahrenheit since we have a convert and ConvertBack. Text="{Binding  UpdateSourceTrigger=PropertyChanged,            Path=Text,ElementName=txtFahrenheit,            Converter={StaticResource myTemperatureConverter}}" As mentioned earlier this is a twofer Demo, in the second demo, we basically are converting a double datatype to a brush. Lets take a look at TemperatureToColorConverter, notice we in our Covert Method, if the value is less than our cold temperature threshold we return a blue brush and if it is higher than our hot temperature threshold we return a redbrush. since we don’t have to convert a brush to double value in our example the convert back is not being implemented. Take time and go through these three examples and I hope you have a better understanding   of databinding, ObservableCollection  and IValueConverter . Next blog posting we will talk about ValidationRule, DataTemplates and DataTemplate triggers.

    Read the article

  • CacheAdapter 2.4 – Bug fixes and minor functional update

    - by Glav
    Note: If you are unfamiliar with the CacheAdapter library and what it does, you can read all about its awesome ability to utilise memory, Asp.Net Web, Windows Azure AppFabric and memcached caching implementations via a single unified, simple to use API from here and here.. The CacheAdapter library is receiving an update to version 2.4 and is currently available on Nuget here. Update: The CacheAdapter has actualy just had a minor revision to 2.4.1. This significantly increases the performance and reliability in memcached scenario under more extreme loads. General to moderate usage wont see any noticeable difference though. Bugs This latest version fixes a big that is only present in the memcached implementation and is only seen in rare, intermittent times (making i particularly hard to find). The bug is where a cache node would be removed from the farm when errors in deserialization of cached objects would occur due to serialised data not being read from the stream in entirety. The code also contains enhancements to better surface serialization exceptions to aid in the debugging process. This is also specifically targeted at the memcached implementation. This is important when moving from something like memory or Asp.Web caching mechanisms to memcached where the serialization rules are not as lenient. There are a few other minor bug fixes, code cleanup and a little refactoring. Minor feature addition In addition to this bug fix, many people have asked for a single setting to either enable or disable the cache.In this version, you can disable the cache by setting the IsCacheEnabled flag to false in the application configuration file. Something like the example below: <Glav.CacheAdapter.MainConfig> <setting name="CacheToUse" serializeAs="String"> <value>memcached</value> </setting> <setting name="DistributedCacheServers" serializeAs="String"> <value>localhost:11211</value> </setting> <setting name="IsCacheEnabled" serializeAs="String"> <value>False</value> </setting> </Glav.CacheAdapter.MainConfig> Your reasons to use this feature may vary (perhaps some performance testing or problem diagnosis). At any rate, disabling the cache will cause every attempt to retrieve data from the cache, resulting in a cache miss and returning null. If you are using the ICacheProvider with the delegate/Func<T> syntax to populate the cache, this delegate method will get executed every single time. For example, when the cache is disabled, the following delegate/Func<T> code will be executed every time: var data1 = cacheProvider.Get<SomeData>("cache-key", DateTime.Now.AddHours(1), () => { // With the cache disabled, this data access code is executed every attempt to // get this data via the CacheProvider. var someData = new SomeData() { SomeText = "cache example1", SomeNumber = 1 }; return someData; }); One final note: If you access the cache directly via the ICache instance, instead of the higher level ICacheProvider API, you bypass this setting and still access the underlying cache implementation. Only the ICacheProvider instance observes the IsCacheEnabled setting. Thanks to those individuals who have used this library and provided feedback. Ifyou have any suggestions or ideas, please submit them to the issue register on bitbucket (which is where you can grab all the source code from too)

    Read the article

  • Smarty Tag help [closed]

    - by ntechi
    I am using an engine, where for forums Vbulletin is used and for Other thiiings social engine 3 is used, This was done by some professionals In one of the page I am having popular tags(In Social Engine) Its too long I want shortened it, How can I do it? here is my code {* SHOW POPULAR TAGS START *} <tr> <td align="left" valign="top"><img src="images/spacer.gif" alt="" width="1" height="10" /></td> </tr> <tr> <td align="left" valign="top"><table width="220" border="0" align="left" cellpadding="0" cellspacing="0"> <tr> <td width="12" align="left" valign="top"><img src="images/blog_belowtl.jpg" alt="" width="12" height="10" /></td> <td width="196" align="left" valign="top" background="images/blog_belowtbg.jpg"><img src="images/spacer.gif" width="1" height="10" /></td> <td width="12" align="right" valign="top"><img src="images/blog_belowtr.jpg" alt="" width="12" height="10" /></td> </tr> <tr> <td align="left" valign="top" background="images/blog_belowlbg.jpg" style="background-repeat: repeat-y;">&nbsp;</td> <td align="left" valign="top"><table width="196" border="0" align="left" cellpadding="0" cellspacing="0"> <tr> <td align="left" valign="top" > {*<div class="articletags" > {foreach from=$popular_tags item=poptag} <a class="tag{$poptag.class}" href="articles.php?tag={$poptag.name}">{$poptag.name}</a> {/foreach} </div>*} {foreach from=$popular_tags item=poptag} {if $poptag.count == '1' } {assign var=cssClass value='tahoma15_cloud'} {elseif $poptag.count == '2' } {assign var=cssClass value='tahoma12bold_cloud'} {elseif $poptag.count == '3'} {assign var=cssClass value='tahoma13bold_cloud'} {elseif $poptag.count == '4'} {assign var=cssClass value='tahoma14bold_cloud'} {elseif $poptag.count == '5'} {assign var=cssClass value='tahoma15_cloud'} {else} {assign var=cssClass value='tahoma18bold_cloud'} {/if} <span class="{$cssClass}"><a href="articles.php?tag={$poptag.name}" class="{$cssClass}">{$poptag.name}</a></span> {/foreach} </td> </tr> </table></td> <td align="left" valign="top" background="images/blog_belowrbg.jpg" style="background-repeat: repeat-y;">&nbsp;</td> </tr> <tr> <td align="left" valign="top"><img src="images/blog_belowbl.jpg" alt="" width="12" height="10" /></td> <td align="left" valign="top" background="images/blog_belowbbg.jpg"><img src="images/spacer.gif" alt="" width="1" height="10" /></td> <td align="right" valign="top"><img src="images/blog_belowbr.jpg" alt="" width="12" height="10" /></td> </tr> </table></td> </tr> {* SHOW POPULAR TAGS END *} You can check the lenght of this tag, on this link http://www.managementparadise.com/articles.php Currently it is displaying very long on the widget area I am new on this, so any help will be appreciated :)

    Read the article

  • How to layout class definition when inheriting from multiple interfaces

    - by gabr
    Given two interface definitions ... IOmniWorkItem = interface ['{3CE2762F-B7A3-4490-BF22-2109C042EAD1}'] function GetData: TOmniValue; function GetResult: TOmniValue; function GetUniqueID: int64; procedure SetResult(const value: TOmniValue); // procedure Cancel; function DetachException: Exception; function FatalException: Exception; function IsCanceled: boolean; function IsExceptional: boolean; property Data: TOmniValue read GetData; property Result: TOmniValue read GetResult write SetResult; property UniqueID: int64 read GetUniqueID; end; IOmniWorkItemEx = interface ['{3B48D012-CF1C-4B47-A4A0-3072A9067A3E}'] function GetOnWorkItemDone: TOmniWorkItemDoneDelegate; function GetOnWorkItemDone_Asy: TOmniWorkItemDoneDelegate; procedure SetOnWorkItemDone(const Value: TOmniWorkItemDoneDelegate); procedure SetOnWorkItemDone_Asy(const Value: TOmniWorkItemDoneDelegate); // property OnWorkItemDone: TOmniWorkItemDoneDelegate read GetOnWorkItemDone write SetOnWorkItemDone; property OnWorkItemDone_Asy: TOmniWorkItemDoneDelegate read GetOnWorkItemDone_Asy write SetOnWorkItemDone_Asy; end; ... what are your ideas of laying out class declaration that inherits from both of them? My current idea (but I don't know if I'm happy with it): TOmniWorkItem = class(TInterfacedObject, IOmniWorkItem, IOmniWorkItemEx) strict private FData : TOmniValue; FOnWorkItemDone : TOmniWorkItemDoneDelegate; FOnWorkItemDone_Asy: TOmniWorkItemDoneDelegate; FResult : TOmniValue; FUniqueID : int64; strict protected procedure FreeException; protected //IOmniWorkItem function GetData: TOmniValue; function GetResult: TOmniValue; function GetUniqueID: int64; procedure SetResult(const value: TOmniValue); protected //IOmniWorkItemEx function GetOnWorkItemDone: TOmniWorkItemDoneDelegate; function GetOnWorkItemDone_Asy: TOmniWorkItemDoneDelegate; procedure SetOnWorkItemDone(const Value: TOmniWorkItemDoneDelegate); procedure SetOnWorkItemDone_Asy(const Value: TOmniWorkItemDoneDelegate); public constructor Create(const data: TOmniValue; uniqueID: int64); destructor Destroy; override; public //IOmniWorkItem procedure Cancel; function DetachException: Exception; function FatalException: Exception; function IsCanceled: boolean; function IsExceptional: boolean; property Data: TOmniValue read GetData; property Result: TOmniValue read GetResult write SetResult; property UniqueID: int64 read GetUniqueID; public //IOmniWorkItemEx property OnWorkItemDone: TOmniWorkItemDoneDelegate read GetOnWorkItemDone write SetOnWorkItemDone; property OnWorkItemDone_Asy: TOmniWorkItemDoneDelegate read GetOnWorkItemDone_Asy write SetOnWorkItemDone_Asy; end; As noted in answers, composition is a good approach for this example but I'm not sure it applies in all cases. Sometimes I'm using multiple inheritance just to split read and write access to some property into public (typically read-only) and private (typically write-only) part. Does composition still apply here? I'm not really sure as I would have to move the property in question out from the main class and I'm not sure that's the correct way to do it. Example: // public part of the interface interface IOmniWorkItemConfig = interface function OnExecute(const aTask: TOmniBackgroundWorkerDelegate): IOmniWorkItemConfig; function OnRequestDone(const aTask: TOmniWorkItemDoneDelegate): IOmniWorkItemConfig; function OnRequestDone_Asy(const aTask: TOmniWorkItemDoneDelegate): IOmniWorkItemConfig; end; // private part of the interface IOmniWorkItemConfigEx = interface ['{42CEC5CB-404F-4868-AE81-6A13AD7E3C6B}'] function GetOnExecute: TOmniBackgroundWorkerDelegate; function GetOnRequestDone: TOmniWorkItemDoneDelegate; function GetOnRequestDone_Asy: TOmniWorkItemDoneDelegate; end; // implementing class TOmniWorkItemConfig = class(TInterfacedObject, IOmniWorkItemConfig, IOmniWorkItemConfigEx) strict private FOnExecute : TOmniBackgroundWorkerDelegate; FOnRequestDone : TOmniWorkItemDoneDelegate; FOnRequestDone_Asy: TOmniWorkItemDoneDelegate; public constructor Create(defaults: IOmniWorkItemConfig = nil); public //IOmniWorkItemConfig function OnExecute(const aTask: TOmniBackgroundWorkerDelegate): IOmniWorkItemConfig; function OnRequestDone(const aTask: TOmniWorkItemDoneDelegate): IOmniWorkItemConfig; function OnRequestDone_Asy(const aTask: TOmniWorkItemDoneDelegate): IOmniWorkItemConfig; public //IOmniWorkItemConfigEx function GetOnExecute: TOmniBackgroundWorkerDelegate; function GetOnRequestDone: TOmniWorkItemDoneDelegate; function GetOnRequestDone_Asy: TOmniWorkItemDoneDelegate; end;

    Read the article

  • Why I lose certain values from array?

    - by blankon91
    I've the code to input the values dynamically, when I use it to add the values for the first time, it's fine, but when I want to edit it, the old values didn't inserted on sql query but the new values inserted Here is the example: http://i.stack.imgur.com/dwugS.jpg here is the code: ============the function================= sub ShowItemfgEdit(query,selItemName,defValue,num,cdisable) response.write "<select " & cdisable & " num=""" & num & """ id=""itemCombo"" name=""" & selItemName & """ class=""label"" onchange=""varUsage.ChangeSatuanDt(this)"">" if NOT query.BOF then query.moveFirst WHILE NOT query.EOF tulis = "" if trim(defValue) = trim(query("ckdbarang")) then tulis = "selected" end if response.write "<option value=""" & trim(query("ckdbarang")) & """" & tulis & ">" & trim(query("ckdbarang")) & " - " & trim(query("vnamabarang")) query.moveNext WEND end if response.write "</select>" end sub ============calling the function================ <td class="rb" align="left"><% call ShowItemfgEdit(qGetItemfgGrp,"fitem",qGetUsageDt("ckdfg"),countLine,readonlyfg) %></td> ==============post the value====================== <input type="hidden" name="fitem" value=""> ================get the value=================== for i = 1 to request.form("hdnOrderNum") if request.form("selOrdItem_" & i) <> "" then 'bla...blaa...blaa... ckdfg = trim(request.form("fitem_" & i)) '<==here is the problem objCommand.commandText = "INSERT INTO IcTrPakaiDt " &_ "(id, id_h, ckdunitkey, cnopakai, dtglpakai, ckdbarang, ckdgudang, nqty1, nqty2, csatuan1, csatuan2, nqtypakai, csatuanpakai, vketerangan, cJnsPakai, ckdprodkey, ckdfg, ncountstart, ncountstop, ncounttotal) " &_ " VALUES " &_ " (" & idDt & ",'" & idHd & "','" & selLoc & "','" & nopakai & "','" & cDate(request.form("hdnUsageDate")) & "','" & trim(ckdbarang) & "','" & trim(ckdgudang) & "'," & nqty1 & "," & nqty2 & ",'" & trim(csatuan1) & "','" & trim(csatuan2) & "'," & nqtypakai & ",'" & csatuanpakai & "','" & trim(keteranganItem) & "','" & trim(cjnspakai) & "','" & ckdprodkey & "','" &ckdfg& "'," & cnt1 & "," & cnt2 & "," & totalcnt & ")" set qInsertPakaiDt = objCommand.Execute end if next problem: old value of ckdfg didn't inserted to query, but the new value inserted. How to fix this bug?

    Read the article

  • WPF TreeViewItem + Change the Highlight Color

    - by flurreh
    Hello, I have got a TreeView with a HierarchicalDataTemplate. <HierarchicalDataTemplate x:Key="treeViewItemTemplate" ItemsSource="{Binding GetChildren}"> <DockPanel Margin="0,8,8,0"> <Image Source="{Binding GetImage}" Width="16" Height="16" /> <local:MonitorTriStateCheckBox Margin="4,0,0,0" IsChecked="{Binding IsChecked}" Click="CheckBox_Clicked" Tag="{Binding UniqueKey}" Style="{DynamicResource CheckBox}"></local:MonitorTriStateCheckBox> <TextBlock Margin="4,0,0,0" Text="{Binding Name}" Style="{DynamicResource TextBlock}"> </TextBlock> </DockPanel> <HierarchicalDataTemplate.Triggers> <Trigger Property="TreeViewItem.IsSelected" Value="True"> <Setter Property="TreeViewItem.Background" Value="Orange" /> </Trigger> </HierarchicalDataTemplate.Triggers> </HierarchicalDataTemplate> As you can see in the code, i set the is selected Trigger of the TreeViewItem, but this has no effect. I alos tried this: <TreeView.ItemContainerStyle> <Style TargetType="{x:Type TreeViewItem}"> <Setter Property="IsExpanded" Value="{Binding IsExpanded, Mode=TwoWay}" /> <Setter Property="Visibility" Value="{Binding IsVisible, Mode=TwoWay}" /> <Style.Triggers> <Trigger Property="IsSelected" Value="True"> <Setter Property="Background" Value="Orange" /> </Trigger> </Style.Triggers> </Style> </TreeView.ItemContainerStyle> But that had no effect either. Has anyone got an idea what to do, to change the hightlight color of a TreeViewItem?

    Read the article

  • delete element from xml using LINQ

    - by Shishir
    Hello I've a xml file like: <starting> <start> <site>mushfiq.com</site> <site>mee.con</site> <site>ttttt.co</site> <site>jkjhkhjkh</site> <site>jhkhjkjhkhjkhjkjhkh</site> <site>dasdasdasdasdasdas</site> </start> </starting> Now I need to delete any ... and value will randomly be given from a textbox. Here is my code : XDocument doc = XDocument.Load(@"AddedSites.xml"); var deleteQuery = from r in doc.Descendants("start") where r.Element("site").Value == txt.Text.Trim() select r; foreach (var qry in deleteQuery) { qry.Element("site").Remove(); } doc.Save(@"AddedSites.xml"); If I put the value of first element in the textbox then it can delete it, but if I put any value of element except the first element's value it could not able to delete! I need I'll put any value of any element...as it can be 2nd element or 3rd or 4th and so on.... can anyone help me out? thanks in advanced!

    Read the article

  • JAVA: ICEFACES: component <ice:selectInputDate> mapped on a "java.util.Calendar" field

    - by blummihaela
    Does anybody knows how can component <ice:selectInputDate> be mapped on a java.util.Calendar field, not java.util.Date? I am using from IceFaces version 1.8.2, the component <ice:selectInputDate>. This component requires to be bound with a java.util.Date proeprty. For example, value="#{bean.myDate}", the myDate field must be of type java.util.Date. But I need my date field to be of type java.util.Calendar. My trials: I have tried to use standard converter or a custom one: Standard one: <f:convertDateTime pattern="dd/MM/yyyy" /> it formats correct the value in GUI, but when setting it on the property bean.myDate of type Calendar I get following error message: [5/3/10 12:09:18:398 EEST] 00000021 lifecycle I WARNING: FacesMessage(s) have been enqueued, but may not have been displayed. sourceId=j_id12:j_id189:myDate[severity=(ERROR 2), summary=(/WEB-INF/xhtml............file.xhtml @507,51 value="#{bean.myDate}": Can't set property 'myDate' on class 'bean' to value '5/11/10 3:00 AM'.), detail=(/WEB-INF/xhtml........file.xhtml @507,51 value="#{bean.myDate}": Can't set property 'myDate' on class '...bean...' to value '5/11/10 3:00 AM'.)] Custom one: <f:converter converterId="c2d"/> getAsObject - returns the java.util.Calendar object out of the submitted String. getAsString - receives an Object, and returns the String formatted. NOTE: this method was hacked so instead of expecting java.util.Calendar, to be complementary with getAsObject method. Instead, the hacked method getAsString, expects an java.util.Date, provided as parameter (by ice:selectInputDate) and returns the String formatted. But still an error message occurs: [5/3/10 12:55:34:299 EEST] 0000001f D2DFaceletVie E com.icesoft.faces.facelets.D2DFaceletViewHandler renderResponse Problem in renderResponse: java.util.GregorianCalendar incompatible with java.util.Date java.lang.ClassCastException: java.util.GregorianCalendar incompatible with java.util.Date at com.icesoft.faces.component.selectinputdate.SelectInputDate.getTextToRender(SelectInputDate.java:252) Any hint is very useful! Thanks, Mihaela

    Read the article

  • Wpf combobox selection change breaks Datatrigger

    - by biju
    Hi, I am trying to set the selected value of a combobox from a style trigger.It works as long as we dont manually change any value in the combobox.But it stops working altogether after manually changing the selection.How can i solve this.A sample code is attached.Please do help <Window x:Class="InputGesture.Window2" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:local="clr-namespace:InputGesture" Title="Window2" Height="300" Width="300" Name="Sample"> <Window.Resources> <Style TargetType="{x:Type ComboBox}"> <Setter Property="ComboBox.SelectedValue" Value="1"/> <Style.Triggers> <DataTrigger Binding="{Binding ElementName=chk,Path=IsChecked}" Value="True"> <Setter Property="ComboBox.IsEnabled" Value="False"/> <Setter Property="ComboBox.SelectedValue" Value="2"/> </DataTrigger> </Style.Triggers> </Style> </Window.Resources> <StackPanel> <CheckBox Name="chk" Height="23"/> <ComboBox Name="cmb" Height="23" DisplayMemberPath="Name" SelectedValuePath="Id" ItemsSource="{Binding ElementName=Sample,Path=DT}"> </ComboBox> <Button Height="23" Click="Button_Click"/> </StackPanel> </Window>

    Read the article

  • feedparser - various errors

    - by Eiriks
    I need feedparser (se http://www.feedparser.org) for a project, and want to keep third party modules in a separate folder. I did this by adding a folder to my python path, and putting relevant modules there, among them feedparser. This first attempt to import feedparser resulted in import feedparser Traceback (most recent call last): File "", line 1, in File "/home/users/me/modules/feedparser.py", line 1 ed socket timeout; added support for chardet library ^ SyntaxError: invalid syntax I found the text "socket timeout; added..." in the comments at the bottom of the file, removed these comments, and tried again: import feedparser Traceback (most recent call last): File "", line 1, in File "/home/users/me/modules/feedparser.py", line 1 = [(key, value) for key, value in attrs if key in self.acceptable_attributes] ^ IndentationError: unexpected indent Ok, so some indent error. I made sure the indent in the function in question where ok (moved some line breaks down to no-indent). And tried again: import feedparser Traceback (most recent call last): File "", line 1, in File "/home/users/me/modules/feedparser.py", line 1 , value) for key, value in attrs if key in self.acceptable_attributes] ^ SyntaxError: invalid syntax As much I google, I cannot find anything wrong with the syntax: def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) Now what? Is my approach all wrong? Why do I keep producing these errors in a module that seems so well tested and trusted?

    Read the article

< Previous Page | 281 282 283 284 285 286 287 288 289 290 291 292  | Next Page >