Search Results

Search found 39751 results on 1591 pages for 'add'.

Page 398/1591 | < Previous Page | 394 395 396 397 398 399 400 401 402 403 404 405  | Next Page >

  • Bridge or Factory and How

    - by Chris
    I'm trying to learn patterns and I've got a job that is screaming for a pattern, I just know it but I can't figure it out. I know the filter type is something that can be abstracted and possibly bridged. I'M NOT LOOKING FOR A CODE REWRITE JUST SUGGESTIONS. I'm not looking for someone to do my job. I would like to know how patterns could be applied to this example. using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Data; using System.IO; using System.Xml; using System.Text.RegularExpressions; namespace CopyTool { class CopyJob { public enum FilterType { TextFilter, RegExFilter, NoFilter } public FilterType JobFilterType { get; set; } private string _jobName; public string JobName { get { return _jobName; } set { _jobName = value; } } private int currentIndex; public int CurrentIndex { get { return currentIndex; } } private DataSet ds; public int MaxJobs { get { return ds.Tables["Job"].Rows.Count; } } private string _filter; public string Filter { get { return _filter; } set { _filter = value; } } private string _fromFolder; public string FromFolder { get { return _fromFolder; } set { if (Directory.Exists(value)) { _fromFolder = value; } else { throw new DirectoryNotFoundException(String.Format("Folder not found: {0}", value)); } } } private List<string> _toFolders; public List<string> ToFolders { get { return _toFolders; } } public CopyJob() { Initialize(); } private void Initialize() { if (ds == null) { ds = new DataSet(); } ds.ReadXml(Properties.Settings.Default.ConfigLocation); LoadValues(0); } public void Execute() { ExecuteJob(FromFolder, _toFolders, Filter, JobFilterType); } public void ExecuteAll() { string OrigPath; List<string> DestPaths; string FilterText; FilterType FilterWay; foreach (DataRow rw in ds.Tables["Job"].Rows) { OrigPath = rw["FromFolder"].ToString(); FilterText = rw["FilterText"].ToString(); switch (rw["FilterType"].ToString()) { case "TextFilter": FilterWay = FilterType.TextFilter; break; case "RegExFilter": FilterWay = FilterType.RegExFilter; break; default: FilterWay = FilterType.NoFilter; break; } DestPaths = new List<string>(); foreach (DataRow crw in rw.GetChildRows("Job_ToFolder")) { DestPaths.Add(crw["FolderPath"].ToString()); } ExecuteJob(OrigPath, DestPaths, FilterText, FilterWay); } } private void ExecuteJob(string OrigPath, List<string> DestPaths, string FilterText, FilterType FilterWay) { FileInfo[] files; switch (FilterWay) { case FilterType.RegExFilter: files = GetFilesByRegEx(new Regex(FilterText), OrigPath); break; case FilterType.TextFilter: files = GetFilesByFilter(FilterText, OrigPath); break; default: files = new DirectoryInfo(OrigPath).GetFiles(); break; } foreach (string fld in DestPaths) { CopyFiles(files, fld); } } public void MoveToJob(int RecordNumber) { Save(); LoadValues(RecordNumber - 1); } public void AddToFolder(string folderPath) { if (Directory.Exists(folderPath)) { _toFolders.Add(folderPath); } else { throw new DirectoryNotFoundException(String.Format("Folder not found: {0}", folderPath)); } } public void DeleteToFolder(int index) { _toFolders.RemoveAt(index); } public void Save() { DataRow rw = ds.Tables["Job"].Rows[currentIndex]; rw["JobName"] = _jobName; rw["FromFolder"] = _fromFolder; rw["FilterText"] = _filter; switch (JobFilterType) { case FilterType.RegExFilter: rw["FilterType"] = "RegExFilter"; break; case FilterType.TextFilter: rw["FilterType"] = "TextFilter"; break; default: rw["FilterType"] = "NoFilter"; break; } DataRow[] ToFolderRows = ds.Tables["Job"].Rows[currentIndex].GetChildRows("Job_ToFolder"); for (int i = 0; i <= ToFolderRows.GetUpperBound(0); i++) { ToFolderRows[i].Delete(); } foreach (string fld in _toFolders) { DataRow ToFolderRow = ds.Tables["ToFolder"].NewRow(); ToFolderRow["JobId"] = ds.Tables["Job"].Rows[currentIndex]["JobId"]; ToFolderRow["Job_Id"] = ds.Tables["Job"].Rows[currentIndex]["Job_Id"]; ToFolderRow["FolderPath"] = fld; ds.Tables["ToFolder"].Rows.Add(ToFolderRow); } } public void Delete() { ds.Tables["Job"].Rows.RemoveAt(currentIndex); LoadValues(currentIndex++); } public void MoveNext() { Save(); currentIndex++; LoadValues(currentIndex); } public void MovePrevious() { Save(); currentIndex--; LoadValues(currentIndex); } public void MoveFirst() { Save(); LoadValues(0); } public void MoveLast() { Save(); LoadValues(ds.Tables["Job"].Rows.Count - 1); } public void CreateNew() { Save(); int MaxJobId = 0; Int32.TryParse(ds.Tables["Job"].Compute("Max(JobId)", "").ToString(), out MaxJobId); DataRow rw = ds.Tables["Job"].NewRow(); rw["JobId"] = MaxJobId + 1; ds.Tables["Job"].Rows.Add(rw); LoadValues(ds.Tables["Job"].Rows.IndexOf(rw)); } public void Commit() { Save(); ds.WriteXml(Properties.Settings.Default.ConfigLocation); } private void LoadValues(int index) { if (index > ds.Tables["Job"].Rows.Count - 1) { currentIndex = ds.Tables["Job"].Rows.Count - 1; } else if (index < 0) { currentIndex = 0; } else { currentIndex = index; } DataRow rw = ds.Tables["Job"].Rows[currentIndex]; _jobName = rw["JobName"].ToString(); _fromFolder = rw["FromFolder"].ToString(); _filter = rw["FilterText"].ToString(); switch (rw["FilterType"].ToString()) { case "TextFilter": JobFilterType = FilterType.TextFilter; break; case "RegExFilter": JobFilterType = FilterType.RegExFilter; break; default: JobFilterType = FilterType.NoFilter; break; } if (_toFolders == null) _toFolders = new List<string>(); _toFolders.Clear(); foreach (DataRow crw in rw.GetChildRows("Job_ToFolder")) { AddToFolder(crw["FolderPath"].ToString()); } } private static FileInfo[] GetFilesByRegEx(Regex rgx, string locPath) { DirectoryInfo d = new DirectoryInfo(locPath); FileInfo[] fullFileList = d.GetFiles(); List<FileInfo> filteredList = new List<FileInfo>(); foreach (FileInfo fi in fullFileList) { if (rgx.IsMatch(fi.Name)) { filteredList.Add(fi); } } return filteredList.ToArray(); } private static FileInfo[] GetFilesByFilter(string filter, string locPath) { DirectoryInfo d = new DirectoryInfo(locPath); FileInfo[] fi = d.GetFiles(filter); return fi; } private void CopyFiles(FileInfo[] files, string destPath) { foreach (FileInfo fi in files) { bool success = false; int i = 0; string copyToName = fi.Name; string copyToExt = fi.Extension; string copyToNameWithoutExt = Path.GetFileNameWithoutExtension(fi.FullName); while (!success && i < 100) { i++; try { if (File.Exists(Path.Combine(destPath, copyToName))) throw new CopyFileExistsException(); File.Copy(fi.FullName, Path.Combine(destPath, copyToName)); success = true; } catch (CopyFileExistsException ex) { copyToName = String.Format("{0} ({1}){2}", copyToNameWithoutExt, i, copyToExt); } } } } } public class CopyFileExistsException : Exception { public string Message; } }

    Read the article

  • How to set message when I get Exception

    - by user1748932
    public class XMLParser { // constructor public XMLParser() { } public String getXmlFromUrl(String url) { String responseBody = null; getset d1 = new getset(); String d = d1.getData(); // text String y = d1.getYear(); // year String c = d1.getCircular(); String p = d1.getPage(); List<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>(); nameValuePairs.add(new BasicNameValuePair("YearID", y)); nameValuePairs.add(new BasicNameValuePair("CircularNo", c)); nameValuePairs.add(new BasicNameValuePair("SearchText", d)); nameValuePairs.add(new BasicNameValuePair("pagenumber", p)); try { HttpClient httpclient = new DefaultHttpClient(); HttpPost httppost = new HttpPost(url); httppost.setEntity(new UrlEncodedFormEntity(nameValuePairs)); HttpResponse response = httpclient.execute(httppost); HttpEntity entity = response.getEntity(); responseBody = EntityUtils.toString(entity); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } catch (ClientProtocolException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } // return XML return responseBody; } public Document getDomElement(String xml) { Document doc = null; DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); try { DocumentBuilder db = dbf.newDocumentBuilder(); InputSource is = new InputSource(); is.setCharacterStream(new StringReader(xml)); doc = db.parse(is); } catch (ParserConfigurationException e) { Log.e("Error: ", e.getMessage()); return null; } catch (SAXException e) { Log.e("Error: ", e.getMessage()); // i m getting Exception here return null; } catch (IOException e) { Log.e("Error: ", e.getMessage()); return null; } return doc; } /** * Getting node value * * @param elem * element */ public final String getElementValue(Node elem) { Node child; if (elem != null) { if (elem.hasChildNodes()) { for (child = elem.getFirstChild(); child != null; child = child .getNextSibling()) { if (child.getNodeType() == Node.TEXT_NODE) { return child.getNodeValue(); } } } } return ""; } /** * Getting node value * * @param Element * node * @param key * string * */ public String getValue(Element item, String str) { NodeList n = item.getElementsByTagName(str); return this.getElementValue(n.item(0)); } } I am getting Exception in this class for parsing data. I want print this message in another class which extends from Activity. Can you please tell me how? I tried much but not able to do.. public class AndroidXMLParsingActivity extends Activity { public int currentPage = 1; public ListView lisView1; static final String KEY_ITEM = "docdetails"; static final String KEY_NAME = "heading"; public Button btnNext; public Button btnPre; public static String url = "http://dev.taxmann.com/TaxmannService/TaxmannService.asmx/GetNotificationList"; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); // listView1 lisView1 = (ListView) findViewById(R.id.listView1); // Next btnNext = (Button) findViewById(R.id.btnNext); // Perform action on click btnNext.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { currentPage = currentPage + 1; ShowData(); } }); // Previous btnPre = (Button) findViewById(R.id.btnPre); // Perform action on click btnPre.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { currentPage = currentPage - 1; ShowData(); } }); ShowData(); } public void ShowData() { XMLParser parser = new XMLParser(); String xml = parser.getXmlFromUrl(url); // getting XML Document doc = parser.getDomElement(xml); // getting DOM element NodeList nl = doc.getElementsByTagName(KEY_ITEM); int displayPerPage = 5; // Per Page int TotalRows = nl.getLength(); int indexRowStart = ((displayPerPage * currentPage) - displayPerPage); int TotalPage = 0; if (TotalRows <= displayPerPage) { TotalPage = 1; } else if ((TotalRows % displayPerPage) == 0) { TotalPage = (TotalRows / displayPerPage); } else { TotalPage = (TotalRows / displayPerPage) + 1; // 7 TotalPage = (int) TotalPage; // 7 } int indexRowEnd = displayPerPage * currentPage; // 5 if (indexRowEnd > TotalRows) { indexRowEnd = TotalRows; } // Disabled Button Next if (currentPage >= TotalPage) { btnNext.setEnabled(false); } else { btnNext.setEnabled(true); } // Disabled Button Previos if (currentPage <= 1) { btnPre.setEnabled(false); } else { btnPre.setEnabled(true); } // Load Data from Index int RowID = 1; ArrayList<HashMap<String, String>> menuItems = new ArrayList<HashMap<String, String>>(); HashMap<String, String> map; // RowID if (currentPage > 1) { RowID = (displayPerPage * (currentPage - 1)) + 1; } for (int i = indexRowStart; i < indexRowEnd; i++) { Element e = (Element) nl.item(i); // adding each child node to HashMap key => value map = new HashMap<String, String>(); map.put("RowID", String.valueOf(RowID)); map.put(KEY_NAME, parser.getValue(e, KEY_NAME)); // adding HashList to ArrayList menuItems.add(map); RowID = RowID + 1; } SimpleAdapter sAdap; sAdap = new SimpleAdapter(AndroidXMLParsingActivity.this, menuItems, R.layout.list_item, new String[] { "RowID", KEY_NAME }, new int[] { R.id.ColRowID, R.id.ColName }); lisView1.setAdapter(sAdap); } } This my class where I want to Print that message

    Read the article

  • How To Switch Back to Outlook 2007 After the 2010 Beta Ends

    - by Matthew Guay
    Are you switching back to Outlook 2007 after trying out Office 2010 beta?  Here’s how you can restore your Outlook data and keep everything working fine after the switch. Whenever you install a newer version of Outlook, it will convert your profile and data files to the latest format.  This makes them work the best in the newer version of Outlook, but may cause problems if you decide to revert to an older version.  If you installed Outlook 2010 beta, it automatically imported and converted your profile from Outlook 2007.  When the beta expires, you will either have to reinstall Office 2007 or purchase a copy of Office 2010. If you choose to reinstall Office 2007, you may notice an error message each time you open Outlook. Outlook will still work fine and all of your data will be saved, but this error message can get annoying.  Here’s how you can create a new profile, import all of your old data, and get rid of this error message. Banish the Error Message with a New Profile To get rid of this error message, we need to create a new Outlook profile.  First, make sure your Outlook data files are backed up.  Your messages, contacts, calendar, and more are stored in a .pst file in your appdata folder.  Enter the following in the address bar of an Explorer window to open your Outlook data folder, and replace username with your user name: C:\Users\username\AppData\Local\Microsoft\Outlook Copy the Outlook Personal Folders (.pst) files that contain your data. Its name is usually your email address, though it may have a different name.  If in doubt, select all of the Outlook Personal Folders files, copy them, and save them in another safe place (such as your Documents folder). Now, let’s remove your old profile.  Open Control Panel, and select Mail.  In Windows Vista or 7, simply enter “Mail” in the search box and select the first entry. Click the “Show Profiles…” button. Now, select your Outlook profile, and click Remove.  This will not delete your data files, but will remove them from Outlook. Press Yes to confirm that you wish to remove this profile. Open Outlook, and you will be asked to create a new profile.  Enter a name for your new profile, and press Ok. Now enter your email account information to setup Outlook as normal. Outlook will attempt to automatically configure your account settings.  This usually works for accounts with popular email systems, but if it fails to find your information you can enter it manually.  Press finish when everything’s done. Outlook will now go ahead and download messages from your email account.  In our test, we used a Gmail account that still had all of our old messages online.  Those files are backed up in our old Outlook data files, so we can save time and not download them.  Click the Send/Receive button on the bottom of the window, and select “Cancel Send/Receive”. Restore Your Old Outlook Data Let’s add our old Outlook file back to Outlook 2007.  Exit Outlook, and then go back to Control Panel, and select Mail as above.  This time, click the Data Files button. Click the Add button on the top left. Select “Office Outlook Personal Folders File (.pst)”, and click Ok. Now, select your old Outlook data file.  It should be in the folder that opens by default; if not, browse to the backup copy we saved earlier, and select it. Press Ok at the next dialog to accept the default settings. Now, select the data file we just imported, and click “Set as Default”. Now, all of your old messages, appointments, contacts, and everything else will be right in Outlook ready for you.  Click Ok, and then open Outlook to see the change. All of the data that was in Outlook 2010 is now ready to use in Outlook 2007.  You won’t have to wait to re-download all of your emails from the server since everything’s still here ready to be used.  And when you open Outlook, you won’t see any error messages, either! Conclusion Migrating your Outlook profile back to Outlook 2007 is fairly easy, and with these steps, you can avoid seeing an error message every time you open Outlook.  With all your data in tact, you’re ready to get back to work instead of getting frustrated with Outlook.  Many of us use webmail and keep all of our messages in the cloud, but even on broadband connections it can take a long time to download several gigabytes of emails. Similar Articles Productive Geek Tips Opening Attachments in Outlook 2007 by KeyboardQuickly Create Appointments from Tasks with Outlook 2007’s To-Do BarFix For Outlook 2007 Constantly Asking for Password on VistaPin Microsoft Outlook to the Desktop BackgroundOur Look at the LinkedIn Social Connector for Outlook TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Download Free MP3s from Amazon Awe inspiring, inter-galactic theme (Win 7) Case Study – How to Optimize Popular Wordpress Sites Restore Hidden Updates in Windows 7 & Vista Iceland an Insurance Job? Find Downloads and Add-ins for Outlook

    Read the article

  • Adding A Custom Dropdown in RCDC for Forefront Identity Manager 2010

    - by Daniel Lackey
    My latest exploration has been FIM 2010 for Identity Management. The following is a post of how to add a custom dropdown for the FIM Portal. I have decided to document this as I cannot find documentation on how to do this anywhere else. I hope that it finds useful to others.   For starters, this was to me not an easy task to figure out. I really would like to know why it is so cumbersome to do something that seems like a lot of people would need to do, but that’s for another day J   The dropdown I wanted to add was for ‘Account Status’ which would display if the account is ‘Enabled’ or ‘Disabled’ in the data source Active Directory. This option would also allow helpdesk users or admins to administer the userAccountControl attribute in AD from the FIM Portal interface.   The first thing I had to do was create the attribute itself. This is done by going to Administration à Schema Management from the FIM 2010 portal. Once here, you click on All Attributes. What is listed here are all attributes and their associated Resource Types in FIM. To create the ‘AccountStatus’ attribute, click on New. As shown below, enter ‘AccountStatus’ with no spaces for the System Name and ‘Account Status’ for the Display Name. The Data Type is going to be ‘Indexed String’. Click Next.           Leave everything on the Localization tab default and click Next.   On the Validation tab as shown below, we will enter the regex expression ^(Enabled|Disabled)?$ with our two desired string values ‘Enabled’ and ‘Disabled’. Click on Finish and then and Submit to complete adding the attribute.       The next step involves associating the attribute with a resource type. This is called ‘Binding’ the attribute. From the Schema Management page, click on All Bindings. From the page that comes up, click on New. As shown below, enter ‘User’ for the Resource Type and ‘Account Status’ for the Attribute Type. This is essentially binding the Account Status attribute to the ‘User’ Resource Type. Click Next.    On the ‘Attribute Override’ tab, type in ‘Account Status’ for the Display Name field. Click Next.   On the ‘Localization’ tab, click Next.   On the ‘Validation’ tab, enter the regex expression ^(Enabled|Disabled)?$ we entered previously for the attribute. Click Finish and then Submit to complete.   Now that the Attribute and the Binding are complete, you have to give users permission to see the attribute on the User Edit page. Go to Administration à Management Policy Rules. Look for the rule named Administration: Administrators can read and update Users and click on it. Once it opens, click on the ‘Target Resources’ tab and look at the section named Resource Attributes. Type in at the end the ‘Account Status’ attribute and check it with the validator. Once done click on OK to save the changes.         Lastly, we need to add the actual dropdown control to the RCDC (Resource Control Display Configuration) for User Editing. Go to Administration à Resource Control Display Configuration. From here navigate until you find the RCDC named Configuration for User Editing RCDC and click on it. The following is what you will see:       First step is to export the Configuration Data file. Click on the Export configuration link and save the file to your desktop of other folder.   Find the file you just exported and open the file in your XML editor of choice. I use notepad but anything will work. Since we are adding a dropdown control, first find another control in the existing file that is already a dropdown in FIM. I used EmployeeType as my example. Copy the control from the beginning tag named <my:Control… to the ending tag </my:Control>. Now take what you copied and paste it in whatever location you desire within the form between two other controls. I chose to place the ‘Account Status’ field after the ‘Account Name’ field. After you paste the control you will need to modify so it looks like this:       Notice where you specify what attribute you are dealing with where it has AccountStatus in the XML. Once you are complete with modifying this, save the file and make sure it is a .xml file.   Now go back to the Configuration for User Editing screen and look at the section named ‘Configuration Data’. Click the ‘Browse’ button and find the XML file you just modified and choose it. Click OK on the bottom of the window and you are done!   Now when you click on a user’s name in the FIM Portal, you should see the newly added dropdown box as below:       Later I will post more about this drop down, specifically on how to automate actually ‘Disabling’ the account in the data source through the FIM Workflows and MAs.   <my:Control my:Name="AccountStatus" my:TypeName="UocDropDownList" my:Caption="{Binding Source=schema, Path=AccountStatus.DisplayName}" my:Description="{Binding Source=schema, Path=AccountStatus.Description}" my:RightsLevel="{Binding Source=rights, Path=AccountStatus}"> <my:Properties> <my:Property my:Name="ValuePath" my:Value="Value"/> <my:Property my:Name="CaptionPath" my:Value="Caption"/> <my:Property my:Name="HintPath" my:Value="Hint"/> <my:Property my:Name="ItemSource" my:Value="{Binding Source=schema, Path=AccountStatus.LocalizedAllowedValues}"/> <my:Property my:Name="SelectedValue" my:Value="{Binding Source=object, Path=AccountStatus, Mode=TwoWay}"/> </my:Properties> </my:Control>

    Read the article

  • SQL SERVER – Enumerations in Relational Database – Best Practice

    - by pinaldave
    Marko Parkkola This article has been submitted by Marko Parkkola, Data systems designer at Saarionen Oy, Finland. Marko is excellent developer and always thinking at next level. You can read his earlier comment which created very interesting discussion here: SQL SERVER- IF EXISTS(Select null from table) vs IF EXISTS(Select 1 from table). I must express my special thanks to Marko for sending this best practice for Enumerations in Relational Database. He has really wrote excellent piece here and welcome comments here. Enumerations in Relational Database This is a subject which is very basic thing in relational databases but often not very well understood and sometimes badly implemented. There are of course many ways to do this but I concentrate only two cases, one which is “the right way” and one which is definitely wrong way. The concept Let’s say we have table Person in our database. Person has properties/fields like Firstname, Lastname, Birthday and so on. Then there’s a field that tells person’s marital status and let’s name it the same way; MaritalStatus. Now MaritalStatus is an enumeration. In C# I would definitely make it an enumeration with values likes Single, InRelationship, Married, Divorced. Now here comes the problem, SQL doesn’t have enumerations. The wrong way This is, in my opinion, absolutely the wrong way to do this. It has one upside though; you’ll see the enumeration’s description instantly when you do simple SELECT query and you don’t have to deal with mysterious values. There’s plenty of downsides too and one would be database fragmentation. Consider this (I’ve left all indexes and constraints out of the query on purpose). CREATE TABLE [dbo].[Person] ( [Firstname] NVARCHAR(100), [Lastname] NVARCHAR(100), [Birthday] datetime, [MaritalStatus] NVARCHAR(10) ) You have nvarchar(20) field in the table that tells the marital status. Obvious problem with this is that what if you create a new value which doesn’t fit into 20 characters? You’ll have to come and alter the table. There are other problems also but I’ll leave those for the reader to think about. The correct way Here’s how I’ve done this in many projects. This model still has one problem but it can be alleviated in the application layer or with CHECK constraints if you like. First I will create a namespace table which tells the name of the enumeration. I will add one row to it too. I’ll write all the indexes and constraints here too. CREATE TABLE [CodeNamespace] ( [Id] INT IDENTITY(1, 1), [Name] NVARCHAR(100) NOT NULL, CONSTRAINT [PK_CodeNamespace] PRIMARY KEY ([Id]), CONSTRAINT [IXQ_CodeNamespace_Name] UNIQUE NONCLUSTERED ([Name]) ) GO INSERT INTO [CodeNamespace] SELECT 'MaritalStatus' GO Then I create a table that holds the actual values and which reference to namespace table in order to group the values under different namespaces. I’ll add couple of rows here too. CREATE TABLE [CodeValue] ( [CodeNamespaceId] INT NOT NULL, [Value] INT NOT NULL, [Description] NVARCHAR(100) NOT NULL, [OrderBy] INT, CONSTRAINT [PK_CodeValue] PRIMARY KEY CLUSTERED ([CodeNamespaceId], [Value]), CONSTRAINT [FK_CodeValue_CodeNamespace] FOREIGN KEY ([CodeNamespaceId]) REFERENCES [CodeNamespace] ([Id]) ) GO -- 1 is the 'MaritalStatus' namespace INSERT INTO [CodeValue] SELECT 1, 1, 'Single', 1 INSERT INTO [CodeValue] SELECT 1, 2, 'In relationship', 2 INSERT INTO [CodeValue] SELECT 1, 3, 'Married', 3 INSERT INTO [CodeValue] SELECT 1, 4, 'Divorced', 4 GO Now there’s four columns in CodeValue table. CodeNamespaceId tells under which namespace values belongs to. Value tells the enumeration value which is used in Person table (I’ll show how this is done below). Description tells what the value means. You can use this, for example, column in UI’s combo box. OrderBy tells if the values needs to be ordered in some way when displayed in the UI. And here’s the Person table again now with correct columns. I’ll add one row here to show how enumerations are to be used. CREATE TABLE [dbo].[Person] ( [Firstname] NVARCHAR(100), [Lastname] NVARCHAR(100), [Birthday] datetime, [MaritalStatus] INT ) GO INSERT INTO [Person] SELECT 'Marko', 'Parkkola', '1977-03-04', 3 GO Now I said earlier that there is one problem with this. MaritalStatus column doesn’t have any database enforced relationship to the CodeValue table so you can enter any value you like into this field. I’ve solved this problem in the application layer by selecting all the values from the CodeValue table and put them into a combobox / dropdownlist (with Value field as value and Description as text) so the end user can’t enter any illegal values; and of course I’ll check the entered value in data access layer also. I said in the “The wrong way” section that there is one benefit to it. In fact, you can have the same benefit here by using a simple view, which I schema bound so you can even index it if you like. CREATE VIEW [dbo].[Person_v] WITH SCHEMABINDING AS SELECT p.[Firstname], p.[Lastname], p.[BirthDay], c.[Description] MaritalStatus FROM [dbo].[Person] p JOIN [dbo].[CodeValue] c ON p.[MaritalStatus] = c.[Value] JOIN [dbo].[CodeNamespace] n ON n.[Id] = c.[CodeNamespaceId] AND n.[Name] = 'MaritalStatus' GO -- Select from View SELECT * FROM [dbo].[Person_v] GO This is excellent write up byMarko Parkkola. Do you have this kind of design setup at your organization? Let us know your opinion. Reference: Pinal Dave (http://blog.SQLAuthority.com) Filed under: Best Practices, Database, DBA, Readers Contribution, Software Development, SQL, SQL Authority, SQL Documentation, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Ask How-To Geek: Dropbox in the Start Menu, Understanding Symlinks, and Ripping TV Series DVDs

    - by Jason Fitzpatrick
    This week we take a look at how to incorporate Dropbox into your Windows Start Menu, understanding and using symbolic links, and how to rip your TV series DVDs right to unique and high-quality episode files. Once a week we dip into our reader mailbag and help readers solve their problems, sharing the useful solutions with you in the process. Read on to see our fixes for this week’s reader dilemmas. Add Drobox to Your Start Menu Dear How-To Geek, I use Dropbox all the time and would like to add it right onto my start menu along side the other major shortcuts like Documents, Pictures, etc. It seems like adding Dropbox into the menu should be part of the Dropbox installation package! Sincerely, Dropboxing in Des Moines Dear Dropboxing, We agree, it would be a nice installation option. As it stands you’re going to have to do a little simple hacking to get Dropbox nestled neatly into your start menu. The hack isn’t super elegant but when you’re done you’ll have the link you want and it’ll look like it was there all along. Check out this step-by-step guide here in order to take an existing Library shortcut and rework it to be a Dropbox link. Understanding and Using Symbolic Links Dear How-To Geek, I was talking to a coworker the other day about an issue I’d been having with a media center application I’m running. He suggested using symbolic links to better organize my media and make it easier for the application to access my collection. I had no idea what he was talking about and never got a chance to bug him about it later. Can you clear up this whole symbolic links business for me? I’ve been using computers for years and I’ve never even heard of it! Sincerely, Symbolic Who? Dear Symbolic, Symbolic links aren’t commonly used by many Windows users which is why you likely haven’t run into the concept. Symbolic links are essentially supercharged shortcuts—the newly introduced Windows library system is really just a type of symbolic link system. You can use symbolic links to do all sorts of neat stuff like link folders to your Dropbox folder, organize media, and more. The concept of symbolic links is pretty simple but the execution can be really tricky. We’d suggest reading over our guide to creating symbolic links in Windows 7, Windows XP, and Ubunutu to get a clearer idea what you’re getting into. Rip Your TV DVDs into Handy Episode Files Dear How-To Geek, My wife got me an iPod for Christmas and I still haven’t got around to filling it up. I have tons of entire TV show seasons on DVD and would like to get them on the iPod but I have absolutely no idea where to start. How do I get the shows off the discs? I thought it would be as easy to import the TV shows into iTunes as it is to import tracks off a CD but I was totally wrong. I tried downloading some applications to rip them but those didn’t work at all. Very frustrating! Surely there is an easy and/or automated way to do this, right? Sincerely, Free My DVDs Dear DVDs, Oh man is this a frustration we can relate to. It’s inordinately difficult to get movies and TV shows off physical media and into digital (and portable media player-friendly) formats. There are a multitude of ways to rip DVDs and quite a few applications out there (some good, some mediocre, and some outright malware). We’d recommend a two-part punch to solve your ripping woes. You’ll need a copy of DVDFab to strip away the protections on the discs and rip the disc and Handbrake to load the disc image and convert the files. It’s not quite as smooth as the CD-to-iTunes workflow but it’s still pretty easy. Check out all the steps and settings you’ll want to toggle here. Have a question you want to put before the How-To Geek staff? Shoot us an email at [email protected] and then keep an eye out for a solution in the Ask How-To Geek column. Latest Features How-To Geek ETC Internet Explorer 9 RC Now Available: Here’s the Most Interesting New Stuff Here’s a Super Simple Trick to Defeating Fake Anti-Virus Malware How to Change the Default Application for Android Tasks Stop Believing TV’s Lies: The Real Truth About "Enhancing" Images The How-To Geek Valentine’s Day Gift Guide Inspire Geek Love with These Hilarious Geek Valentines Google’s New Personal Blocklist Extension Kills Search Engine Spam KeyCounter Tracks Your Keystrokes and Mouse Clicks Add Custom LED Ambient Lighting to Your PC or Media Center The Trackor Monitors Amazon Prices; Integrates with Chrome, Firefox, and Safari Four Awesome TRON Legacy Themes for Chrome and Iron Anger is Illogical – Old School Style Instructional Video [Star Trek Mashup]

    Read the article

  • Announcing SonicAgile – An Agile Project Management Solution

    - by Stephen.Walther
    I’m happy to announce the public release of SonicAgile – an online tool for managing software projects. You can register for SonicAgile at www.SonicAgile.com and start using it with your team today. SonicAgile is an agile project management solution which is designed to help teams of developers coordinate their work on software projects. SonicAgile supports creating backlogs, scrumboards, and burndown charts. It includes support for acceptance criteria, story estimation, calculating team velocity, and email integration. In short, SonicAgile includes all of the tools that you need to coordinate work on a software project, get stuff done, and build great software. Let me discuss each of the features of SonicAgile in more detail. SonicAgile Backlog You use the backlog to create a prioritized list of user stories such as features, bugs, and change requests. Basically, all future work planned for a product should be captured in the backlog. We focused our attention on designing the user interface for the backlog. Because the main function of the backlog is to prioritize stories, we made it easy to prioritize a story by just drag and dropping the story from one location to another. We also wanted to make it easy to add stories from the product backlog to a sprint backlog. A sprint backlog contains the stories that you plan to complete during a particular sprint. To add a story to a sprint, you just drag the story from the product backlog to the sprint backlog. Finally, we made it easy to track team velocity — the average amount of work that your team completes in each sprint. Your team’s average velocity is displayed in the backlog. When you add too many stories to a sprint – in other words, you attempt to take on too much work – you are warned automatically: SonicAgile Scrumboard Every workday, your team meets to have their daily scrum. During the daily scrum, you can use the SonicAgile Scrumboard to see (at a glance) what everyone on the team is working on. For example, the following scrumboard shows that Stephen is working on the Fix Gravatar Bug story and Pete and Jane have finished working on the Product Details Page story: Every story can be broken into tasks. For example, to create the Product Details Page, you might need to create database objects, do page design, and create an MVC controller. You can use the Scrumboard to track the state of each task. A story can have acceptance criteria which clarify the requirements for the story to be done. For example, here is how you can specify the acceptance criteria for the Product Details Page story: You cannot close a story — and remove the story from the list of active stories on the scrumboard — until all tasks and acceptance criteria associated with the story are done. SonicAgile Burndown Charts You can use Burndown charts to track your team’s progress. SonicAgile supports Release Burndown, Sprint Burndown by Task Estimates, and Sprint Burndown by Story Points charts. For example, here’s a sample of a Sprint Burndown by Story Points chart: The downward slope shows the progress of the team when closing stories. The vertical axis represents story points and the horizontal axis represents time. Email Integration SonicAgile was designed to improve your team’s communication and collaboration. Most stories and tasks require discussion to nail down exactly what work needs to be done. The most natural way to discuss stories and tasks is through email. However, you don’t want these discussions to get lost. When you use SonicAgile, all email discussions concerning a story or a task (including all email attachments) are captured automatically. At any time in the future, you can view all of the email discussion concerning a story or a task by opening the Story Details dialog: Why We Built SonicAgile We built SonicAgile because we needed it for our team. Our consulting company, Superexpert, builds websites for financial services, startups, and large corporations. We have multiple teams working on multiple projects. Keeping on top of all of the work that needs to be done to complete a software project is challenging. You need a good sense of what needs to be done, who is doing it, and when the work will be done. We built SonicAgile because we wanted a lightweight project management tool which we could use to coordinate the work that our team performs on software projects. How We Built SonicAgile We wanted SonicAgile to be easy to use, highly scalable, and have a highly interactive client interface. SonicAgile is very close to being a pure Ajax application. We built SonicAgile using ASP.NET MVC 3, jQuery, and Knockout. We would not have been able to build such a complex Ajax application without these technologies. Almost all of our MVC controller actions return JSON results (While developing SonicAgile, I would have given my left arm to be able to use the new ASP.NET Web API). The controller actions are invoked from jQuery Ajax calls from the browser. We built SonicAgile on Windows Azure. We are taking advantage of SQL Azure, Table Storage, and Blob Storage. Windows Azure enables us to scale very quickly to handle whatever demand is thrown at us. Summary I hope that you will try SonicAgile. You can register at www.SonicAgile.com (there’s a free 30-day trial). The goal of SonicAgile is to make it easier for teams to get more stuff done, work better together, and build amazing software. Let us know what you think!

    Read the article

  • How to Play FLAC Files in Windows 7 Media Center & Player

    - by Mysticgeek
    An annoyance for music lovers who enjoy FLAC format, is there’s no native support for WMP or WMC. If you’re a music enthusiast who prefers FLAC format, we’ll look at adding support to Windows 7 Media Center and Player. For the following article we are using Windows 7 Home Premium 32-bit edition. Download and Install madFLAC v1.8 The first thing we need to do is download and install the madFLAC v1.8 decoder (link below). Just unzip the file and run install.bat… You’ll get a message that it has been successfully registered, click Ok. To verify everything is working, open up one of your FLAC files with WMP, and you’ll get the following message. Check the box Don’t ask me again for this extension and click Yes. Now Media Player should play the track you’ve chosen.   Delete Current Music Library But what if you want to add your entire collection of FLAC files to the Library? If you already have it set up as your default music player, unfortunately we need to remove the current library and delete the database. The best way to manage the music library in Windows 7 is via WMP 12. Since we don’t want to delete songs from the computer we need to Open WMP, press “Alt+T” and navigate to Tools \ Options \ Library.   Now uncheck the box Delete files from computer when deleted from library and click Ok. Now in your Library click “Ctrl + A” to highlight all of the songs in the Library, then hit the “Delete” key. If you have a lot of songs in your library (like on our system) you’ll see the following dialog box while it collects all of the information.   After all of the data is collected, make sure the radio button next to Delete from library only is marked and click Ok. Again you’ll see the Working progress window while the songs are deleted. Deleting Current Database Now we need to make sure we’re starting out fresh. Close out of Media Player, then we’ll basically follow the same directions The Geek pointed out for fixing the WMP Library. Click on Start and type in services.msc into the search box and hit Enter. Now scroll down and stop the service named Windows Media Player Network Sharing Service. Now, navigate to the following directory and the main file to delete CurrentDatabase_372.wmdb %USERPROFILE%\Local Settings\Application Data\Microsoft\Media Player\ Again, the main file to delete is CurrentDatabase_372.wmdb, though if you want, you can delete them all. If you’re uneasy about deleting these files, make sure to back them up first. Now after you restart WMP you can begin adding your FLAC files. For those of us with large collections, it’s extremely annoying to see WMP try to pick up all of your media by default. To delete the other directories go to Organize \ Manage Libraries then open the directories you want to remove. For example here we’re removing the default libraries it tries to check for music. Remove the directories you don’t want it to gather contents from in each of the categories. We removed all of the other collections and only added the FLAC music directory from our home server. SoftPointer Tag Support Plugin Even though we were able to get FLAC files to play in WMP and WMC at this point, there’s another utility from SoftPointer to add. It enables FLAC (and other file formats) to be picked up in the library much easier. It has a long name but is effective –M4a/FLAC/Ogg/Ape/Mpc Tag Support Plugin for Media Player and Media Center (link below). Just install it by accepting the defaults, and you’ll be glad you did. After installing it, and re-launching Media Player, give it some time to collect all of the data from your FLAC directory…it can take a while. In fact, if your collection is huge, just walk away and let it do its thing. If you try to use it right away, WMP slows down considerably while updating the library.   Once the library is setup you’ll be able to play your FLAC tunes in Windows 7 Media Center as well and Windows Media Player 12.   Album Art One caveat is that some of our albums didn’t show any cover art. But we were usually able to get it by right-clicking the album and selecting Find album info.   Then confirming the album information is correct…   Conclusion Although this seems like several steps to go through to play FLAC files in Windows 7 Media Center and Player, it seems to work really well after it’s set up. We haven’t tried this with a 64-bit machine, but the process should be similar, but you might want to make sure the codecs you use are 64-bit. We’re sure there are other methods out there that some of you use, and if so leave us a comment and tell us about it. Download madFlac V1.8  M4a/FLAC/Ogg/Ape/Mpc Tag Support Plugin for Media Player and Media Center from SoftPointer Similar Articles Productive Geek Tips How to Play .OGM Video Files in Windows VistaFixing When Windows Media Player Library Won’t Let You Add FilesUsing Netflix Watchnow in Windows Vista Media Center (Gmedia)Kantaris is a Unique Media Player Based on VLCEasily Change Audio File Formats with XRECODE TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 PCmover Professional OutSync will Sync Photos of your Friends on Facebook and Outlook Windows 7 Easter Theme YoWindoW, a real time weather screensaver Optimize your computer the Microsoft way Stormpulse provides slick, real time weather data Geek Parents – Did you try Parental Controls in Windows 7?

    Read the article

  • How to customise search core results web part Part1

    - by ybbest
    In this post, I’d like to show you how to customise search core results web part. It is a quite simple, most of the times what you need to do is to change the xslt to perform the changes. Here are the steps: 1. You need to change the xslt to the following, so that you can see the raw xml. <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" > <xsl:output method="xml" version="1.0" encoding="UTF-8" indent="yes" /> <xsl:template match="/"> <xmp><xsl:copy-of select="*"/></xmp> </xsl:template> </xsl:stylesheet> a. To do so , you need to go to edit page>>Edit search core results web part >>Display Properties and then untick use Location Visualization b. Open the xslt editor and copy the existing XSLT code to your preferred xslt editor so that you can customise it. c. Now you can paste in the XSLT code above. 2.Perform the search after you have completed step1 and you will see the search results returned in raw xml <All_Results> <Result> <id>1</id> <workid>678</workid> <rank>100000000</rank> <title>Ybbest</title> <author></author> <size>137531</size> <url>http://ybbest</url> <urlEncoded>http%3A%2F%2Fybbest</urlEncoded> <description>Ybbest test site</description> <write>3/17/2012</write> <sitename>http://ybbest</sitename> <collapsingstatus>0</collapsingstatus> <hithighlightedsummary> <c0>Ybbest</c0> test site <ddd /> Add a new image, change this welcome text or add new lists to this page by clicking the edit button above. You can click on Shared Documents to add files or on the <ddd /> </hithighlightedsummary> <hithighlightedproperties> <HHTitle> <c0>Ybbest</c0> </HHTitle> <HHUrl>http://<c0>ybbest</c0></HHUrl> </hithighlightedproperties> <contentclass>STS_Site</contentclass> <isdocument>False</isdocument> <picturethumbnailurl></picturethumbnailurl> <popularsocialtags /> <picturewidth>0</picturewidth> <pictureheight>0</pictureheight> <datepicturetaken></datepicturetaken> <serverredirectedurl></serverredirectedurl> <fileextension></fileextension> <ows_metadatafacetinfo></ows_metadatafacetinfo> <imageurl imageurldescription="SharePoint Site Collection">/_layouts/images/siteicon_16x16.png</imageurl> </Result> <TotalResults>69</TotalResults> <NumberOfResults>50</NumberOfResults> </All_Results> 3. Then you can read what has been returned in the raw xml and start modifying the xslt to customise your search results page. 4.You can also link an external xslt to the web part.It can be set in the Miscellaneous of Web Part section. You can also set it pragmatically using a feature receiver , you can download the source code to do so here. References: http://stackoverflow.com/questions/6548104/change-xslt-of-the-searchresultwebpart-during-the-featureactivated http://www.dotnetmafia.com/blogs/dotnettipoftheday/archive/2010/04/05/a-quick-guide-to-coreresultswebpart-configuration-changes-in-sharepoint-2010.aspx http://www.tonytestasworld.com/post/2011/01/30/HowTo-display-SharePoint-Search-results-as-raw-XML.aspx

    Read the article

  • SQL SERVER – Advanced Data Quality Services with Melissa Data – Azure Data Market

    - by pinaldave
    There has been much fanfare over the new SQL Server 2012, and especially around its new companion product Data Quality Services (DQS). Among the many new features is the addition of this integrated knowledge-driven product that enables data stewards everywhere to profile, match, and cleanse data. In addition to the homegrown rules that data stewards can design and implement, there are also connectors to third party providers that are hosted in the Azure Datamarket marketplace.  In this review, I leverage SQL Server 2012 Data Quality Services, and proceed to subscribe to a third party data cleansing product through the Datamarket to showcase this unique capability. Crucial Questions For the purposes of the review, I used a database I had in an Excel spreadsheet with name and address information. Upon a cursory inspection, there are miscellaneous problems with these records; some addresses are missing ZIP codes, others missing a city, and some records are slightly misspelled or have unparsed suites. With DQS, I can easily add a knowledge base to help standardize my values, such as for state abbreviations. But how do I know that my address is correct? And if my address is not correct, what should it be corrected to? The answer lies in a third party knowledge base by the acknowledged USPS certified address accuracy experts at Melissa Data. Reference Data Services Within DQS there is a handy feature to actually add reference data from many different third-party Reference Data Services (RDS) vendors. DQS simplifies the processes of cleansing, standardizing, and enriching data through custom rules and through service providers from the Azure Datamarket. A quick jump over to the Datamarket site shows me that there are a handful of providers that offer data directly through Data Quality Services. Upon subscribing to these services, one can attach a DQS domain or composite domain (fields in a record) to a reference data service provider, and begin using it to cleanse, standardize, and enrich that data. Besides what I am looking for (address correction and enrichment), it is possible to subscribe to a host of other services including geocoding, IP address reference, phone checking and enrichment, as well as name parsing, standardization, and genderization.  These capabilities extend the data quality that DQS has natively by quite a bit. For my current address correction review, I needed to first sign up to a reference data provider on the Azure Data Market site. For this example, I used Melissa Data’s Address Check Service. They offer free one-month trials, so if you wish to follow along, or need to add address quality to your own data, I encourage you to sign up with them. Once I subscribed to the desired Reference Data Provider, I navigated my browser to the Account Keys within My Account to view the generated account key, which I then inserted into the DQS Client – Configuration under the Administration area. Step by Step to Guide That was all it took to hook in the subscribed provider -Melissa Data- directly to my DQS Client. The next step was for me to attach and map in my Reference Data from the newly acquired reference data provider, to a domain in my knowledge base. On the DQS Client home screen, I selected “New Knowledge Base” under Knowledge Base Management on the left-hand side of the home screen. Under New Knowledge Base, I typed a Name and description of my new knowledge base, then proceeded to the Domain Management screen. Here I established a series of domains (fields) and then linked them all together as a composite domain (record set). Using the Create Domain button, I created the following domains according to the fields in my incoming data: Name Address Suite City State Zip I added a Suite column in my domain because Melissa Data has the ability to return missing Suites based on last name or company. And that’s a great benefit of using these third party providers, as they have data that the data steward would not normally have access to. The bottom line is, with these third party data providers, I can actually improve my data. Next, I created a composite domain (fulladdress) and added the (field) domains into the composite domain. This essentially groups our address fields together in a record to facilitate the full address cleansing they perform. I then selected my newly created composite domain and under the Reference Data tab, added my third party reference data provider –Melissa Data’s Address Check- and mapped in each domain that I had to the provider’s Schema. Now that my composite domain has been married to the Reference Data service, I can take the newly published knowledge base and create a project to cleanse and enrich my data. My next task was to create a new Data Quality project, mapping in my data source and matching it to the appropriate domain column, and then kick off the verification process. It took just a few minutes with some progress indicators indicating that it was working. When the process concluded, there was a helpful set of tabs that place the response records into categories: suggested; new; invalid; corrected (automatically); and correct. Accepting the suggestions provided by  Melissa Data allowed me to clean up all the records and flag the invalid ones. It is very apparent that DQS makes address data quality simplistic for any IT professional. Final Note As I have shown, DQS makes data quality very easy. Within minutes I was able to set up a data cleansing and enrichment routine within my data quality project, and ensure that my address data was clean, verified, and standardized against real reference data. As reviewed here, it’s easy to see how both SQL Server 2012 and DQS work to take what used to require a highly skilled developer, and empower an average business or database person to consume external services and clean data. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQL Utility, T SQL, Technology Tagged: DQS

    Read the article

  • How to Sync Specific Folders With Dropbox

    - by Matthew Guay
    Would you like to sync specific folders with Dropbox instead of automatically syncing all of your folders to all of your computers?  Here’s how using Selective Sync available in the latest Beta version of Dropbox. Dropbox is a great tool for keeping your important files synced between your computers, and we have covered many interesting things you can do with your Dropbox account.  But until now, there was no way to only sync certain folders with each computer; it was all or nothing.  This could be frustrating if you wanted to store large files from one computer but didn’t want them on a computer with a smaller hard drive.  The latest Beta version of Dropbox allows you to selectively choose which folders to sync between computers. Please Note: This feature is currently only available in the 0.8 beta version of Dropbox. Setup the new Beta Download the new beta version of Dropbox 0.8 (link below); choose the correct download for your system.  Run the installer as normal. It only took a couple seconds to install, though it made the taskbar disappear briefly at the end of the installation on our tests.  Strangely, the installer doesn’t let you know it’s finished installing; if you already had a previous version of Dropbox installed, it will simply start working from your system tray as before.  If this is a new installation of Dropbox, you will be asked to enter your Dropbox account info or create a new account.   Selectively Sync Folders By default, Dropbox will still sync all of your Dropbox folders to all of your computers.  Once this beta is installed, you can choose individual folders or subfolders you don’t want to sync.  Right-click the Dropbox icon in your system tray and select Preferences. Click the Advanced tab on the top, and then click the new Selective Sync button. Now uncheck any folders you don’t want to sync to this computer.  These folders will still exist on your other machines and in the Dropbox web interface, but they will not be downloaded to this computer. The default view only shows your top-level folders in your Dropbox account.  If you wish to sync certain folders but exclude their subfolders, click the Switch to Advanced View button.   Expand any folder and uncheck any subfolders you don’t want to sync.  Notice that the parent folder’s check box is filled now, showing that it is partially synced. Click OK when you’ve made the changes you want.  Dropbox will then make sure you know these folders will stop syncing to this computer; click OK again if you’re sure you don’t want to sync these folders.   Dropbox will cleanup your folder and remove the files and folders you don’t want synced.   Next time you open your Dropbox folder, you’ll notice that the folders we unchecked are no longer in this computer’s Dropbox folder.  They are still in our Dropbox online account, and on any other computers we’re syncing with. If you add a new folder with the same name as a folder you stopped syncing, you’ll notice a grey minus icon over the folder.  This folder will not sync with your other computers or your online Dropbox account. If you want to add these folders back to this computer’s Dropbox, just repeat the steps, this time checking the folders you want to sync.  If you have any folders that were not syncing before, their names will have (Selective Sync Conflict) added to the end, and will sync with all of your computers. Conclusion We’re excited that we can now choose exactly which folders we want synced on each computer.  Since everything is still synced with the online Dropbox, we can still access any of the folders from anywhere.  This makes your Dropbox much more versatile, and can help you keep the folders synced exactly the way you want. Links Download the new Dropbox 0.8.64 beta Signup for Dropbox Similar Articles Productive Geek Tips Add "My Dropbox" to Your Windows 7 Start MenuSync Your Pidgin Profile Across Multiple PCs with DropboxUser Guide to Dropbox Shared FoldersUse Any Folder For Your Ubuntu Desktop (Even a Dropbox Folder)Shut Down or Reboot a Solaris System TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips Xobni Plus for Outlook All My Movies 5.9 CloudBerry Online Backup 1.5 for Windows Home Server Snagit 10 2010 World Cup Schedule Boot Snooze – Reboot and then Standby or Hibernate Customize Everything Related to Dates, Times, Currency and Measurement in Windows 7 Google Earth replacement Icon (Icons we like) Build Great Charts in Excel with Chart Advisor tinysong gives a shortened URL for you to post on Twitter (or anywhere)

    Read the article

  • ASP.NET, HTTP 404 and SEO

    - by paxer
    The other day our SEO Manager told me that he is not happy about the way ASP.NET application return HTTP response codes for Page Not Found (404) situation. I've started research and found interesting things, which could probably help others in similar situation.  1) By default ASP.NET application handle 404 error by using next web.config settings           <customErrors defaultRedirect="GenericError.htm" mode="On">             <error statusCode="404" redirect="404.html"/>           </customErrors> However this approach has a problem, and this is actually what our SEO manager was talking about. This is what HTTP return to request in case of Page not Found situation. So first of all it return HTTP 302 Redirect code and then HTTP 200 - ok code. The problem : We need to have HTTP 404 response code at the end of response for SEO purposes.  Solution 1 Let's change a bit our web.config settings to handle 404 error not on static html page but on .aspx page      <customErrors defaultRedirect="GenericError.htm" mode="On">             <error statusCode="404" redirect="404.aspx"/>           </customErrors> And now let's add in Page_Load event on 404.aspx page next lines     protected void Page_Load(object sender, EventArgs e)             {                 Response.StatusCode = 404;             } Now let's run our test again Now it has got better, last HTTP response code is 404, but my SEO manager still was not happy, becouse we still have 302 code before it, and as he said this is bad for Google search optimization. So we need to have only 404 HTTP code alone. Solution 2 Let's comment our web.config settings     <!--<customErrors defaultRedirect="GenericError.htm" mode="On">             <error statusCode="404" redirect="404.html"/>           </customErrors>--> Now, let's open our Global.asax file, or if it does not exist in your project - add it. Then we need to add next logic which will detect if server error code is 404 (Page not found) then handle it.       protected void Application_Error(object sender, EventArgs e)             {                            Exception ex = Server.GetLastError();                 if (ex is HttpException)                 {                     if (((HttpException)(ex)).GetHttpCode() == 404)                         Server.Transfer("~/404.html");                 }                 // Code that runs when an unhandled error occurs                 Server.Transfer("~/GenericError.htm");                  } Cool, now let's start our test again... Yehaa, looks like now we have only 404 HTTP response code, SEO manager and Google are happy and so do i:) Hope this helps!  

    Read the article

  • ASP.NET MVC 3 Hosting :: ASP.NET MVC 3 First Look

    - by mbridge
    MVC 3 View Enhancements MVC 3 introduces two improvements to the MVC view engine: - Ability to select the view engine to use. MVC 3 allows you to select from any of your  installed view engines from Visual Studio by selecting Add > View (including the newly introduced ASP.NET “Razor” engine”): - Support for the next ASP.NET “Razor” syntax. The newly previewed Razor syntax is a concise lightweight syntax. MVC 3 Control Enhancements - Global Filters: ASP.NET MVC 3  allows you to specify that a filter which applies globally to all Controllers within an app by adding it to the GlobalFilters collection.  The RegisterGlobalFilters() method is now included in the default Global.asax class template and so provides a convenient place to do this since is will then be called by the Application_Start() method: void RegisterGlobalFilters(GlobalFilterCollection filters) { filters.Add(new HandleLoggingAttribute()); filters.Add(new HandleErrorAttribute()); } void Application_Start() { RegisterGlobalFilters (GlobalFilters.Filters); } - Dynamic ViewModel Property : MVC 3 augments the ViewData API with a new “ViewModel” property on Controller which is of type “dynamic” – and therefore enables you to use the new dynamic language support in C# and VB pass ViewData items using a cleaner syntax than the current dictionary API. Public ActionResult Index() { ViewModel.Message = "Hello World"; return View(); } - New ActionResult Types : MVC 3 includes three new ActionResult types and helper methods: 1. HttpNotFoundResult – indicates that a resource which was requested by the current URL was not found. HttpNotFoundResult will return a 404 HTTP status code to the calling client. 2. PermanentRedirects – The HttpRedirectResult class contains a new Boolean “Permanent” property which is used to indicate that a permanent redirect should be done. Permanent redirects use a HTTP 301 status code.  The Controller class  includes three new methods for performing these permanent redirects: RedirectPermanent(), RedirectToRoutePermanent(), andRedirectToActionPermanent(). All  of these methods will return an instance of the HttpRedirectResult object with the Permanent property set to true. 3. HttpStatusCodeResult – used for setting an explicit response status code and its associated description. MVC 3 AJAX and JavaScript Enhancements MVC 3 ships with built-in JSON binding support which enables action methods to receive JSON-encoded data and then model-bind it to action method parameters. For example a jQuery client-side JavaScript could define a “save” event handler which will be invoked when the save button is clicked on the client. The code in the event handler then constructs a client-side JavaScript “product” object with 3 fields with their values retrieved from HTML input elements. Finally, it uses jQuery’s .ajax() method to POST a JSON based request which contains the product to a /theStore/UpdateProduct URL on the server: $('#save').click(function () { var product = { ProdName: $('#Name').val() Price: $('#Price').val(), } $.ajax({ url: '/theStore/UpdateProduct', type: "POST"; data: JSON.stringify(widget), datatype: "json", contentType: "application/json; charset=utf-8", success: function () { $('#message').html('Saved').fadeIn(), }, error: function () { $('#message').html('Error').fadeIn(), } }); return false; }); MVC will allow you to implement the /theStore/UpdateProduct URL on the server by using an action method as below. The UpdateProduct() action method will accept a strongly-typed Product object for a parameter. MVC 3 can now automatically bind an incoming JSON post value to the .NET Product type on the server without having to write any custom binding. [HttpPost] public ActionResult UpdateProduct(Product product) { // save logic here return null } MVC 3 Model Validation Enhancements MVC 3 builds on the MVC 2 model validation improvements by adding   support for several of the new validation features within the System.ComponentModel.DataAnnotations namespace in .NET 4.0: - Support for the new DataAnnotations metadata attributes like DisplayAttribute. - Support for the improvements made to the ValidationAttribute class which now supports a new IsValid overload that provides more info on  the current validation context, like what object is being validated. - Support for the new IValidatableObject interface which enables you to perform model-level validation and also provide validation error messages which are specific to the state of the overall model. MVC 3 Dependency Injection Enhancements MVC 3 includes better support for applying Dependency Injection (DI) and also integrating with Dependency Injection/IOC containers. Currently MVC 3 Preview 1 has support for DI in the below places: - Controllers (registering & injecting controller factories and injecting controllers) - Views (registering & injecting view engines, also for injecting dependencies into view pages) - Action Filters (locating and  injecting filters) And this is another important blog about Microsoft .NET and technology: - Windows 2008 Blog - SharePoint 2010 Blog - .NET 4 Blog And you can visit here if you're looking for ASP.NET MVC 3 hosting

    Read the article

  • RIDC Accelerator for Portal

    - by Stefan Krantz
    What is RIDC?Remote IntraDoc Client is a Java enabled API that leverages simple transportation protocols like Socket, HTTP and JAX/WS to execute content service operations in WebCenter Content Server. Each operation by design in the Content Server will execute stateless and return a complete result of the request. Each request object simply specifies the in a Map format (key and value pairs) what service to call and what parameters settings to apply. The result responded with will be built on the same Map format (key and value pairs). The possibilities with RIDC is endless since you can consume any available service (even custom made ones), RIDC can be executed from any Java SE application that has any WebCenter Content Services needs. WebCenter Portal and the example Accelerator RIDC adapter frameworkWebCenter Portal currently integrates and leverages WebCenter Content Services to enable available use cases in the portal today, like Content Presenter and Doc Lib. However the current use cases only covers few of the scenarios that the Content Server has to offer, in addition to the existing use cases it is not rare that the customer requirements requires additional steps and functionality that is provided by WebCenter Content but not part of the use cases from the WebCenter Portal.The good news to this is RIDC, the second good news is that WebCenter Portal already leverages the RIDC and has a connection management framework in place. The million dollar question here is how can I leverage this infrastructure for my custom use cases. Oracle A-Team has during its interactions produced a accelerator adapter framework that will reuse and leverage the existing connections provisioned in the webcenter portal application (works for WebCenter Spaces as well), as well as a very comprehensive design patter to minimize the work involved when exposing functionality. Let me introduce the RIDCCommon framework for accelerating WebCenter Content consumption from WebCenter Portal including Spaces. How do I get started?Through a few easy steps you will be on your way, Extract the zip file RIDCCommon.zip to the WebCenter Portal Application file structure (PortalApp) Open you Portal Application in JDeveloper (PS4/PS5) select to open the project in your application - this will add the project as a member of the application Update the Portal project dependencies to include the new RIDCCommon project Make sure that you WebCenter Content Server connection is marked as primary (a checkbox at the top of the connection properties form) You should by this stage have a similar structure in your JDeveloper Application Project Portal Project PortalWebAssets Project RIDCCommon Since the API is coming with some example operations that has already been exposed as DataControl actions, if you open Data Controls accordion you should see following: How do I implement my own operation? Create a new Java Class in for example com.oracle.ateam.portal.ridc.operation call it (GetDocInfoOperation) Extend the abstract class com.oracle.ateam.portal.ridc.operation.RIDCAbstractOperation and implement the interface com.oracle.ateam.portal.ridc.operation.IRIDCOperation The only method you actually are required to implement is execute(RIDCManager, IdcClient, IdcContext) The best practice to set object references for the operation is through the Constructor, example below public GetDocInfoOperation(String dDocName)By leveraging the constructor you can easily force the implementing class to pass right information, you can also overload the Constructor with more or less parameters as required Implement the execute method, the work you supposed to execute here is creating a new request binder and retrieve a response binder with the information in the request binder.In this case the dDocName for which we want the DocInfo Secondly you have to process the response binder by extracting the information you need from the request and restore this information in a simple POJO Java BeanIn the example below we do this in private void processResult(DataBinder responseData) - the new SearchDataObject is a Member of the GetDocInfoOperation so we can return this from a access method. Since the RIDCCommon API leverage template pattern for the operations you are now required to add a method that will enable access to the result after the execution of the operationIn the example below we added the method public SearchDataObject getDataObject() - this method returns the pre processed SearchDataObject from the execute method  This is it, as you can see on the code below you do not need more than 32 lines of very simple code 1: public class GetDocInfoOperation extends RIDCAbstractOperation implements IRIDCOperation { 2: private static final String DOC_INFO_BY_NAME = "DOC_INFO_BY_NAME"; 3: private String dDocName = null; 4: private SearchDataObject sdo = null; 5: 6: public GetDocInfoOperation(String dDocName) { 7: super(); 8: this.dDocName = dDocName; 9: } 10:   11: public boolean execute(RIDCManager manager, IdcClient client, 12: IdcContext userContext) throws Exception { 13: DataBinder dataBinder = createNewRequestBinder(DOC_INFO_BY_NAME); 14: dataBinder.putLocal(DocumentAttributeDef.NAME.getName(), dDocName); 15: 16: DataBinder responseData = getResponseBinder(dataBinder); 17: processResult(responseData); 18: return true; 19: } 20: 21: private void processResult(DataBinder responseData) { 22: DataResultSet rs = responseData.getResultSet("DOC_INFO"); 23: for(DataObject dobj : rs.getRows()) { 24: this.sdo = new SearchDataObject(dobj); 25: } 26: super.setMessage(responseData.getLocal(ATTR_MESSAGE)); 27: } 28: 29: public SearchDataObject getDataObject() { 30: return this.sdo; 31: } 32: } How do I execute my operation? In the previous section we described how to create a operation, so by now you should be ready to execute the operation Step one either add a method to the class  com.oracle.ateam.portal.datacontrol.ContentServicesDC or a class of your own choiceRemember the RIDCManager is a very light object and can be created where needed Create a method signature look like this public SearchDataObject getDocInfo(String dDocName) throws Exception In the method body - create a new instance of GetDocInfoOperation and meet the constructor requirements by passing the dDocNameGetDocInfoOperation docInfo = new GetDocInfoOperation(dDocName) Execute the operation via the RIDCManager instance rMgr.executeOperation(docInfo) Return the result by accessing it from the executed operationreturn docInfo.getDataObject() 1: private RIDCManager rMgr = null; 2: private String lastOperationMessage = null; 3:   4: public ContentServicesDC() { 5: super(); 6: this.rMgr = new RIDCManager(); 7: } 8: .... 9: public SearchDataObject getDocInfo(String dDocName) throws Exception { 10: GetDocInfoOperation docInfo = new GetDocInfoOperation(dDocName); 11: boolean boolVal = rMgr.executeOperation(docInfo); 12: lastOperationMessage = docInfo.getMessage(); 13: return docInfo.getDataObject(); 14: }   Get the binaries! The enclosed code in a example that can be used as a reference on how to consume and leverage similar use cases, user has to guarantee appropriate quality and support.  Download link: https://blogs.oracle.com/ATEAM_WEBCENTER/resource/stefan.krantz/RIDCCommon.zip RIDC API Referencehttp://docs.oracle.com/cd/E23943_01/apirefs.1111/e17274/toc.htm

    Read the article

  • Some New .NET Toys (Repost)

    - by Kevin Grossnicklaus
    Last week I was fortunate enough to spend time in Redmond on Microsoft’s campus for the 2011 Microsoft MVP Summit. It was great to hang out with a number of old friends and get the opportunity to talk tech with the various product teams up at Microsoft. The weather wasn’t exactly sunny but Microsoft always does a great job with the Summit and everyone had a blast (heck, I even got to run the bases at SafeCo field) While much of what we saw is covered under NDA, there a ton of great things in the pipeline from Microsoft and many things that are already available (or just became so) that I wasn’t necessarily aware of. The purpose of this post is to share some of the info I learned on resources and tools available to .NET developers today. Please let me know if you have any questions (or if you know of something else cool which might benefit others). Enjoy! Visual Studio 2010 SP1 Microsoft has issued the RTM release of Visual Studio 2010 SP1. You can download the full SP1 on MSDN as of today (March 10th to the general public) and take advantage of such things as: Silverlight 4 is included in the box (as opposed to a separate install) Silverlight 4 Profiling WCF RIA Services SP1 Intellitrace for 64-bit and SharePoint ASP.NET now easily supports IIS Express and SQL CE Want a description of all that’s new beyond the above biased list (which arguably only contains items I think are important)? Check out this KB article. Portable Library Tools CTP Without much fanfare Microsoft has released a CTP of a new add-in to Visual Studio 2010 which simplifies code sharing between projects targeting different runtimes (i.e. Silverlight, WPF, Win7 Phone, XBox). With this Add-In installed you can add a new project of type “Portable Library” and specify which platforms you wish to target. Once that is done, any code added to this library will be limited to use only features which are common to all selected frameworks. Other projects can now reference this portable library and be provided assemblies custom built to their environment. This greatly simplifies the current process of sharing linked files between platforms like WPF and Silverlight. You can find out more about this CTP and how it works on this great blog post. Visual Studio Async CTP Microsoft has also released a CTP of a set of language and framework enhancements to provide a much more powerful asynchronous programming model. Due to the focus on async programming in all types of platforms (and it being the ONLY option in Silverlight and Win7 phone) a move towards a simpler and more understandable model is always a good thing. This CTP (called Visual Studio Async CTP) can be downloaded here. You can read more about this CTP on this blog post. MSDN Code Samples Gallery Microsoft has also launched new code samples gallery on their MSDN site: http://code.msdn.microsoft.com/. This site allows you to easily search for small samples of code related to a particular technology or platform. If a sample of code you are looking for is not found, you can request one via the site and other developers can see your request and provide a sample to the site to suit your needs. You can also peruse requested samples and, if you find a scenario where you can provide value, upload your own sample for the benefit of others. Samples are packaged into the VS .vsix format and include any necessary references/dependencies. By using .vsix as the deployment mechanism, as samples are installed from the site they are kept in your Visual Studio 2010 Samples Gallery and kept for your future reference. If you get a chance, check out the site and see how it is done. Although a somewhat simple concept, I was very impressed with their implementation and the way they went about trying to suit a need. I’ll definitely be looking there in the future as need something or want to share something. MSDN Search Capabilities Another item I learned recently and was not aware of (that might seem trivial to some) is the power of the MSDN site’s search capabilities. Between the Code Samples Gallery described above and the search enhancements on MSDN, Microsoft is definitely investing in their platform to help provide developers of all skill levels the tools and resources they need to be successful. What do I mean by the MSDN search capability and why should you care? If you go to the MSDN home page (http://msdn.microsoft.com) and use the “Search MSDN with Big” box at the very top of the page you will see some very interesting results. First, the search actually doesn’t just search the MSDN library it searches: MSDN Library All Microsoft Blogs CodePlex StackOverflow Downloads MSDN Magazine Support Knowledgebase (I’m not sure it even ends there but the above are all I know of) Beyond just searching all the above locations, the results are formatted very nicely to give some contextual information based on where the result came from. For example, if a keyword search returned results from CodePlex, each row in the search results screen would include a large amount of information specific to CodePlex such as: Looking at the above results immediately tells you everything from the page views to the CodePlex ratings. All in all, knowing that this much information is indexed and available from a single search location will lead me to utilize this as one of my initial searches for development information.

    Read the article

  • How to Increase the VMWare Boot Screen Delay

    - by Trevor Bekolay
    If you’ve wanted to try out a bootable CD or USB flash drive in a virtual machine environment, you’ve probably noticed that VMWare’s offerings make it difficult to change the boot device. We’ll show you how to change these options. You can do this either for one boot, or permanently for a particular virtual machine. Even experienced users of VMWare Player or Workstation may not recognize the screen above – it’s the virtual machine’s BIOS, which in most cases flashes by in the blink of an eye. If you want to boot up the virtual machine with a CD or USB key instead of the hard drive, then you’ll need more than an eye’s-blink to press Escape and bring up the Boot Menu. Fortunately, there is a way to introduce a boot delay that isn’t exposed in VMWare’s graphical interface – you have to edit the virtual machine’s settings file (a .vmx file) manually. Editing the Virtual Machine’s .vmx Find the .vmx file that contains the settings for your virtual machine. You chose a location for this when you created the virtual machine – in Windows, the default location is a folder called My Virtual Machines in your My Documents folder. In VMWare Workstation, the location of the .vmx file is listed on the virtual machine’s tab. If in doubt, search your hard drive for .vmx files. If you don’t want to use Windows default search, an awesome utility that locates files instantly is Everything. Open the .vmx file with any text editor. Somewhere in this file, enter in the following line… save the file, then close out of the text editor: bios.bootdelay = 20000 This will introduce a 20 second delay when the virtual machine loads up, giving you plenty of time to press the Escape button and access the boot menu. The number in this line is just a value in milliseconds, so for a five second boot delay, enter 5000, and so on. Change Boot Options Temporarily Now, when you boot up your virtual machine, you’ll have plenty of time to enter one of the keystrokes listed at the bottom of the BIOS screen on boot-up. Press Escape to bring up the Boot Menu. This allows you to select a different device to boot from – like a CD drive. Your selection will be forgotten the next time you boot up this virtual machine. Change Boot Options Permanently When the BIOS screen comes up, press F2 to enter the BIOS Setup menu. Switch to the Boot tab, and change the ordering of the items by pressing the “+” key to move items up on the list, and the “-” key to move items down the list. We’ve switched the order so that the CD-ROM Drive boots first. Once you make this change permanent, you may want to re-edit the .vmx file to remove the boot delay. Boot from a USB Flash Drive One thing that is noticeably missing from the list of boot options is a USB device. VMWare’s BIOS just does not allow this, but we can get around that limitation using the PLoP Boot Manager that we’ve previously written about. And as a bonus, since everything is virtual anyway, there’s no need to actually burn PLoP to a CD. Open the settings for the virtual machine you want to boot with a USB drive. Click on Add… at the bottom of the settings screen, and select CD/DVD Drive. Click Next. Click the Use ISO Image radio button, and click Next. Browse to find plpbt.iso or plpbtnoemul.iso from the PLoP zip file. Ensure that Connect at power on is checked, and then click Finish. Click OK on the main Virtual Machine Settings page. Now, if you use the steps above to boot using that CD/DVD drive, PLoP will load, allowing you to boot from a USB drive! Conclusion We’re big fans of VMWare Player and Workstation, as they let us try out a ton of geeky things without worrying about harming our systems. By introducing a boot delay, we can add bootable CDs and USB drives to the list of geeky things we can try out. Download PLoP Boot Manager Similar Articles Productive Geek Tips How To Switch to Console Mode for Ubuntu VMware GuestHack: Turn Off Debug Mode in VMWare Workstation 6 BetaStart Your Computer More Quickly by Delaying the Startup of a Service in VistaEnable Hidden BootScreen in Windows VistaEnable Copy and Paste from Ubuntu VMware Guest TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 OutlookStatView Scans and Displays General Usage Statistics How to Add Exceptions to the Windows Firewall Office 2010 reviewed in depth by Ed Bott FoxClocks adds World Times in your Statusbar (Firefox) Have Fun Editing Photo Editing with Citrify Outlook Connector Upgrade Error

    Read the article

  • Integrate Microsoft Translator into your ASP.Net application

    - by sreejukg
    In this article I am going to explain how easily you can integrate the Microsoft translator API to your ASP.Net application. Why we need a translation API? Once you published a website, you are opening a channel to the global audience. So making the web content available only in one language doesn’t cover all your audience. Especially when you are offering products/services it is important to provide contents in multiple languages. Users will be more comfortable when they see the content in their native language. How to achieve this, hiring translators and translate the content to all your user’s languages will cost you lot of money, and it is not a one time job, you need to translate the contents on the go. What is the alternative, we need to look for machine translation. Thankfully there are some translator engines available that gives you API level access, so that automatically you can translate the content and display to the user. Microsoft Translator API is an excellent set of web service APIs that allows developers to use the machine translation technology in their own applications. The Microsoft Translator API is offered through Windows Azure market place. In order to access the data services published in Windows Azure market place, you need to have an account. The registration process is simple, and it is common for all the services offered through the market place. Last year I had written an article about Bing Search API, where I covered the registration process. You can refer the article here. http://weblogs.asp.net/sreejukg/archive/2012/07/04/integrate-bing-search-api-to-asp-net-application.aspx Once you registered with Windows market place, you will get your APP ID. Now you can visit the Microsoft Translator page and click on the sign up button. http://datamarket.azure.com/dataset/bing/microsofttranslator As you can see, there are several options available for you to subscribe. There is a free version available, great. Click on the sign up button under the package that suits you. Clicking on the sign up button will bring the sign up form, where you need to agree on the terms and conditions and go ahead. You need to have a windows live account in order to sign up for any service available in Windows Azure market place. Once you signed up successfully, you will receive the thank you page. You can download the C# class library from here so that the integration can be made without writing much code. The C# file name is TranslatorContainer.cs. At any point of time, you can visit https://datamarket.azure.com/account/datasets to see the applications you are subscribed to. Click on the Use link next to each service will give you the details of the application. You need to not the primary account key and URL of the service to use in your application. Now let us start our ASP.Net project. I have created an empty ASP.Net web application using Visual Studio 2010 and named it Translator Sample, any name could work. By default, the web application in solution explorer looks as follows. Now right click the project and select Add -> Existing Item and then browse to the TranslatorContainer.cs. Now let us create a page where user enter some data and perform the translation. I have added a new web form to the project with name Translate.aspx. I have placed one textbox control for user to type the text to translate, the dropdown list to select the target language, a label to display the translated text and a button to perform the translation. For the dropdown list I have selected some languages supported by Microsoft translator. You can get all the supported languages with their codes from the below link. http://msdn.microsoft.com/en-us/library/hh456380.aspx The form looks as below in the design surface of Visual Studio. All the class libraries in the windows market place requires reference to System.Data.Services.Client, let us add the reference. You can find the documentation of how to use the downloaded class library from the below link. http://msdn.microsoft.com/en-us/library/gg312154.aspx Let us evaluate the translatorContainer.cs file. You can refer the code and it is self-explanatory. Note the namespace name used (Microsoft), you need to add the namespace reference to your page. I have added the following event for the translate button. The code is self-explanatory. You are creating an object of TranslatorContainer class by passing the translation service URL. Now you need to set credentials for your Translator container object, which will be your account key. The TranslatorContainer support a method that accept a text input, source language and destination language and returns DataServiceQuery<Translation>. Let us see this working, I just ran the application and entered Good Morning in the Textbox. Selected target language and see the output as follows. It is easy to build great translator applications using Microsoft translator API, and there is a reasonable amount of translation you can perform in your application for free. For enterprises, you can subscribe to the appropriate package and make your application multi-lingual.

    Read the article

  • Creating Visual Studio projects that only contain static files

    - by Eilon
    Have you ever wanted to create a Visual Studio project that only contained static files and didn’t contain any code? While working on ASP.NET MVC we had a need for exactly this type of project. Most of the projects in the ASP.NET MVC solution contain code, such as managed code (C#), unit test libraries (C#), and Script# code for generating our JavaScript code. However, one of the projects, MvcFuturesFiles, contains no code at all. It only contains static files that get copied to the build output folder: As you may well know, adding static files to an existing Visual Studio project is easy. Just add the file to the project and in the property grid set its Build Action to “Content” and the Copy to Output Directory to “Copy if newer.” This works great if you have just a few static files that go along with other code that gets compiled into an executable (EXE, DLL, etc.). But this solution does not work well if the projects only contains static files and has no compiled code. If you create a new project in Visual Studio and add static files to it you’ll still get an EXE or DLL copied to the output folder, despite not having any actual code. We wanted to avoid having a teeny little DLL generated in the output folder. In ASP.NET MVC 2 we came up with a simple solution to this problem. We started out with a regular C# Class Library project but then edited the project file to alter how it gets built. The critical part to get this to work is to define the MSBuild targets for Build, Clean, and Rebuild to perform custom tasks instead of running the compiler. The Build, Clean, and Rebuild targets are the three main targets that Visual Studio requires in every project so that the normal UI functions properly. If they are not defined then running certain commands in Visual Studio’s Build menu will cause errors. Once you create the class library projects there are a few easy steps to change it into a static file project: The first step in editing the csproj file is to remove the reference to the Microsoft.CSharp.targets file because the project doesn’t contain any C# code: <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" /> .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The second step is to define the new Build, Clean, and Rebuild targets to delete and then copy the content files: <Target Name="Build"> <Copy SourceFiles="@(Content)" DestinationFiles="@(Content->'$(OutputPath)%(RelativeDir)%(Filename)%(Extension)')" /> </Target> <Target Name="Clean"> <Exec Command="rd /s /q $(OutputPath)" Condition="Exists($(OutputPath))" /> </Target> <Target Name="Rebuild" DependsOnTargets="Clean;Build"> </Target> .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The third and last step is to add all the files to the project as normal Content files (as you would do in any project type). To see how we did this in the ASP.NET MVC 2 project you can download the source code and inspect the MvcFutureFules.csproj project file. If you’re working on a project that contains many static files I hope this solution helps you out!

    Read the article

  • UserAppDataPath in WPF

    - by psheriff
    In Windows Forms applications you were able to get to your user's roaming profile directory very easily using the Application.UserAppDataPath property. This folder allows you to store information for your program in a custom folder specifically for your program. The format of this directory looks like this: C:\Users\YOUR NAME\AppData\Roaming\COMPANY NAME\APPLICATION NAME\APPLICATION VERSION For example, on my Windows 7 64-bit system, this folder would look like this for a Windows Forms Application: C:\Users\PSheriff\AppData\Roaming\PDSA, Inc.\WindowsFormsApplication1\1.0.0.0 For some reason Microsoft did not expose this property from the Application object of WPF applications. I guess they think that we don't need this property in WPF? Well, sometimes we still do need to get at this folder. You have two choices on how to retrieve this property. Add a reference to the System.Windows.Forms.dll to your WPF application and use this property directly. Or, you can write your own method to build the same path. If you add a reference to the System.Windows.Forms.dll you will need to use System.Windows.Forms.Application.UserAppDataPath to access this property. Create a GetUserAppDataPath Method in WPF If you want to build this path you can do so with just a few method calls in WPF using Reflection. The code below shows this fairly simple method to retrieve the same folder as shown above. C#using System.Reflection; public string GetUserAppDataPath(){  string path = string.Empty;  Assembly assm;  Type at;  object[] r;   // Get the .EXE assembly  assm = Assembly.GetEntryAssembly();  // Get a 'Type' of the AssemblyCompanyAttribute  at = typeof(AssemblyCompanyAttribute);  // Get a collection of custom attributes from the .EXE assembly  r = assm.GetCustomAttributes(at, false);  // Get the Company Attribute  AssemblyCompanyAttribute ct =                 ((AssemblyCompanyAttribute)(r[0]));  // Build the User App Data Path  path = Environment.GetFolderPath(              Environment.SpecialFolder.ApplicationData);  path += @"\" + ct.Company;  path += @"\" + assm.GetName().Version.ToString();   return path;} Visual BasicPublic Function GetUserAppDataPath() As String  Dim path As String = String.Empty  Dim assm As Assembly  Dim at As Type  Dim r As Object()   ' Get the .EXE assembly  assm = Assembly.GetEntryAssembly()  ' Get a 'Type' of the AssemblyCompanyAttribute  at = GetType(AssemblyCompanyAttribute)  ' Get a collection of custom attributes from the .EXE assembly  r = assm.GetCustomAttributes(at, False)  ' Get the Company Attribute  Dim ct As AssemblyCompanyAttribute = _                 DirectCast(r(0), AssemblyCompanyAttribute)  ' Build the User App Data Path  path = Environment.GetFolderPath( _                 Environment.SpecialFolder.ApplicationData)  path &= "\" & ct.Company  path &= "\" & assm.GetName().Version.ToString()   Return pathEnd Function Summary Getting the User Application Data Path folder in WPF is fairly simple with just a few method calls using Reflection. Of course, there is absolutely no reason you cannot just add a reference to the System.Windows.Forms.dll to your WPF application and use that Application object. After all, System.Windows.Forms.dll is a part of the .NET Framework and can be used from WPF with no issues at all. NOTE: Visit http://www.pdsa.com/downloads to get more tips and tricks like this one. Good Luck with your Coding,Paul Sheriff ** SPECIAL OFFER FOR MY BLOG READERS **We frequently offer a FREE gift for readers of my blog. Visit http://www.pdsa.com/Event/Blog for your FREE gift!

    Read the article

  • SQL SERVER – The Story of a Lesser Known Startup Parameter in SQL Server – Guest Post by Balmukund Lakhani

    - by Pinal Dave
    This is a fantastic blog post from my dear friend Balmukund ( blog | twitter | facebook ). He had presented a fantastic session in our last UG and there were lots of requests from attendees that he blogs about it. Well, here is the blog post about the same very popular UG session. Let us read the entire blog post in the voice of the Balmukund himself. During my last session in SQL Bangalore User Group (Facebook) meeting, I was lucky enough to deliver a session on SQL Server Startup issue. The name of the session was “SQL Engine Starting Trouble – How to start?” From the feedback, I realized that one of the “not well known” startup parameter is “-m”. Okay, you might say “I know that this is used to start the SQL in single user mode”. But what you might not know is that you can pass a string with -m which has special meaning and use. I have used this parameter in my blog here but looks like not many of you have seen that. It happens most of the time when we want to start SQL Server in single user mode, someone else makes connection before you can. The only choice you have is to repeat same process again till you succeed. Some smart DBAs may disable the remote network protocols (TCP/IP and Named Pipes) of SQL Instance and allow only local connections to SQL. Once the activity is complete, our dear smart DBA has to remember to re-enable network protocols. Sometimes, it may be a local service or application getting connection to SQL before we can. There is a better way to deal with it. Yes, you have guessed it correctly: -m parameter which a string. Since I work with SQL Product Support team, I may know little more undocumented commands and parameters, but this is not an undocumented stuff. It’s already documented in books online. So in this blog, I am going to show a demo of its usage. As documentation shows, “Do not use this option as a security feature.” So please read this blog as knowledge enhancer and troubleshooting issues not security feature. In my laptop, I have a default instance of SQL Server 2012 and here is what we would in the configuration manager. Now, I would go ahead and stop SQL Service by selecting SQL Server (MSSQLServer) > Right Click > Stop. There are multiple ways to start SQL with startup parameter. 1) Use Net Start Command from command prompt Net Start MSSQLServer /mSQLCMD The above command is the simplest way to add startup parameter to SQL. This parameter would be cleared once we stop and start SQL. 2) Add Startup Parameter via configuration manager. Step is already listed here. We need to add -mSQLCMD If we compare 1 and 2, it’s clear that unless we modify startup parameter and remove -m, it would be in effect. 3) Start SQL Service via command line SQLServr.exe –mSQLCMD –s<InstanceName> Wait, what does SQLCMD mean with /m? It’s the instruction to SQL that start SQL Server in Single User Mode and allow only the application which is SQLCMD. Any other application would fail with Login Failed for User Error message. It would be important to note that string is case sensitive. This value should be picked up from application_name column from sys.dm_exec_sessions. I have made a connection using SQLCMD and as we can see it comes as upper case “SQLCMD”. If we want only management studio query windows to connect then we need to give -m” Microsoft SQL Server Management Studio – Query” as startup parameter. In below example, I have given it as SQLCMd (lower case d at the end) and we would notice that we would not be able to connect to SQL Instance. Above proves that parameter works as expected and it’s case sensitive. Error Log would show below information. How to get error log location? I have already blogged about it. Hope you have learned something new. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Server Management Studio, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • [Windows 8] Application bar popup button

    - by Benjamin Roux
    Here is a small control to create an application bar button which will display a content in a popup when the button is clicked. Visually it gives this So how to create this? First you have to use the AppBarPopupButton control below.   namespace Indeed.Controls { public class AppBarPopupButton : Button { public FrameworkElement PopupContent { get { return (FrameworkElement)GetValue(PopupContentProperty); } set { SetValue(PopupContentProperty, value); } } public static readonly DependencyProperty PopupContentProperty = DependencyProperty.Register("PopupContent", typeof(FrameworkElement), typeof(AppBarPopupButton), new PropertyMetadata(null, (o, e) => (o as AppBarPopupButton).CreatePopup())); private Popup popup; private SerialDisposable sizeChanged = new SerialDisposable(); protected override void OnTapped(Windows.UI.Xaml.Input.TappedRoutedEventArgs e) { base.OnTapped(e); if (popup != null) { var transform = this.TransformToVisual(Window.Current.Content); var offset = transform.TransformPoint(default(Point)); sizeChanged.Disposable = PopupContent.ObserveSizeChanged().Do(_ => popup.VerticalOffset = offset.Y - (PopupContent.ActualHeight + 20)).Subscribe(); popup.HorizontalOffset = offset.X + 24; popup.DataContext = this.DataContext; popup.IsOpen = true; } } private void CreatePopup() { popup = new Popup { IsLightDismissEnabled = true }; popup.Closed += (o, e) => this.GetParentOfType<AppBar>().IsOpen = false; popup.ChildTransitions = new Windows.UI.Xaml.Media.Animation.TransitionCollection(); popup.ChildTransitions.Add(new Windows.UI.Xaml.Media.Animation.PopupThemeTransition()); var container = new Grid(); container.Children.Add(PopupContent); popup.Child = container; } } } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The ObserveSizeChanged method is just an extension method which observe the SizeChanged event (using Reactive Extensions - Rx-Metro package in Nuget). If you’re not familiar with Rx, you can replace this line (and the SerialDisposable stuff) by a simple subscription to the SizeChanged event (using +=) but don’t forget to unsubscribe to it ! public static IObservable<Unit> ObserveSizeChanged(this FrameworkElement element) { return Observable.FromEventPattern<SizeChangedEventHandler, SizeChangedEventArgs>( o => element.SizeChanged += o, o => element.SizeChanged -= o) .Select(_ => Unit.Default); } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The GetParentOfType extension method just retrieve the first parent of type (it’s a common extension method that every Windows 8 developer should have created !). You can of course tweak to control (for example if you want to center the content to the button or anything else) to fit your needs. How to use this control? It’s very simple, in an AppBar control just add it and define the PopupContent property. <ic:AppBarPopupButton Style="{StaticResource RefreshAppBarButtonStyle}" HorizontalAlignment="Left"> <ic:AppBarPopupButton.PopupContent> <Grid> [...] </Grid> </ic:AppBarPopupButton.PopupContent> </ic:AppBarPopupButton> .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } When the button is clicked the popup is displayed. When the popup is closed, the app bar is closed too. I hope this will help you !

    Read the article

  • Creating a Reverse Proxy with URL Rewrite for IIS

    - by OWScott
    There are times when you need to reverse proxy through a server. The most common example is when you have an internal web server that isn’t exposed to the internet, and you have a public web server accessible to the internet. If you want to serve up traffic from the internal web server, you can do this through the public web server by creating a tunnel (aka reverse proxy). Essentially, you can front the internal web server with a friendly URL, even hiding custom ports. For example, consider an internal web server with a URL of http://10.10.0.50:8111. You can make that available through a public URL like http://tools.mysite.com/ as seen in the following image. The URL can be made public or it can be used for your internal staff and have it password protected and/or locked down by IP address. This is easy to do with URL Rewrite and IIS. You will also need Application Request Routing (ARR) installed even though for a simple reverse proxy you won’t use most of ARR’s functionality. If you don’t already have URL Rewrite and ARR installed you can do so easily with the Web Platform Installer. A lot can be said about reverse proxies and many different situations and ways to route the traffic and handle different URL patterns. However, my goal here is to get you up and going in the easiest way possible. Then you can dig in deeper after you get the base configuration in place. URL Rewrite makes a reverse proxy very easy to set up. Note that the URL Rewrite Add Rules template doesn’t include Reverse Proxy at the server level. That’s not to say that you can’t create a server-level reverse proxy, but the URL Rewrite rules template doesn’t help you with that. Getting Started First you must create a website on your public web server that has the public bindings that you need. Alternately, you can use an existing site and route using conditions for certain traffic. After you’ve created your site then open up URL Rewrite at the site level. Using the “Add Rule(s)…” template that is opened from the right-hand actions pane, create a new Reverse Proxy rule. If you receive a prompt (the first time) that the proxy functionality needs to be enabled, select OK. This is telling you that a proxy can route traffic outside of your web server, which happens to be our goal in this case. Be aware that reverse proxy rules can be dangerous if you open sites from inside you network to the world, so just be aware of what you’re doing and why. The next and final step of the template asks a few questions. The first textbox asks the name of the internal web server. In our example, it’s 10.10.0.50:8111. This can be any URL, including a subfolder like internal.mysite.com/blog. Don’t include the http or https here. The template assumes that it’s not entered. You can choose whether to perform SSL Offloading or not. If you leave this checked then all requests to the internal server will be over HTTP regardless of the original web request. This can help with performance and SSL bindings if all requests are within a trusted network. If the network path between the two web servers is not completely trusted and safe then uncheck this. Next, the template enables you to create an outbound rule. This is used to rewrite links in the page to look like your public domain name rather than the internal domain name. Outbound rules have a lot of CPU overhead because the entire web content needs to be parsed and updated. However, if you need it, then it’s well worth the extra CPU hit on the web server. If you check the “Rewrite the domain names of the links in HTTP responses” checkbox then the From textbox will be filled in with what you entered for the inbound rule. You can enter your friendly public URL for the outbound rule. This will essentially replace any reference to 10.10.0.50:8111 (or whatever you enter) with tools.mysite.com in all <a>, <form>, and <img> tags on your site. That’s it! Well, there is a lot more that you can do, this but will give you the base configuration. You can now visit www.mysite.com on your public web server and it will serve up the site from your internal web server. You should see two rules show up; one inbound and one outbound. You can edit these, add conditions, and tweak them further as needed. One common issue that can occur without outbound rules has to do with compression. If you run into errors with the new proxied site, try turning off compression to confirm if that’s the issue. Here’s a link with details on how to deal with compression and outbound rules. I hope this was helpful to get started and to see how easy it is to create a simple reverse proxy using URL Rewrite for IIS.

    Read the article

  • Demystified - BI in SharePoint 2010

    - by Sahil Malik
    Ad:: SharePoint 2007 Training in .NET 3.5 technologies (more information). Frequently, my clients ask me if there is a good guide on deciphering the seemingly daunting choice of products from Microsoft when it comes to business intelligence offerings in a SharePoint 2010 world. These are all described in detail in my book, but here is a one (well maybe two) page executive overview. Microsoft Excel: Yes, Microsoft Excel! Your favorite and most commonly used in the world database. No it isn’t a database in technical pure definitions, but this is the most commonly used ‘database’ in the world. You will find many business users craft up very compelling excel sheets with tonnes of logic inside them. Good for: Quick Ad-Hoc reports. Excel 64 bit allows the possibility of very large datasheets (Also see 32 bit vs 64 bit Office, and PowerPivot Add-In below). Audience: End business user can build such solutions. Related technologies: PowerPivot, Excel Services Microsoft Excel with PowerPivot Add-In: The powerpivot add-in is an extension to Excel that adds support for large-scale data. Think of this as Excel with the ability to deal with very large amounts of data. It has an in-memory data store as an option for Analysis services. Good for: Ad-hoc reporting and logic with very large amounts of data. Audience: End business user can build such solutions. Related technologies: Excel, and Excel Services Excel Services: Excel Services is a Microsoft SharePoint Server 2010 shared service that brings the power of Excel to SharePoint Server by providing server-side calculation and browser-based rendering of Excel workbooks. Thus, excel sheets can be created by end users, and published to SharePoint server – which are then rendered right through the browser in read-only or parameterized-read-only modes. They can also be accessed by other software via SOAP or REST based APIs. Good for: Sharing excel sheets with a larger number of people, while maintaining control/version control etc. Sharing logic embedded in excel sheets with other software across the organization via REST/SOAP interfaces Audience: End business users can build such solutions once your tech staff has setup excel services on a SharePoint server instance. Programmers can write software consuming functionality/complex formulae contained in your sheets. Related technologies: PerformancePoint Services, Excel, and PowerPivot. Visio Services: Visio Services is a shared service on the Microsoft SharePoint Server 2010 platform that allows users to share and view Visio diagrams that may or may not have data connected to them. Connected data can update these diagrams allowing a visual/graphical view into the data. The diagrams are viewable through the browser. They are rendered in silverlight, but will automatically down-convert to .png formats. Good for: Showing data as diagrams, live updating. Comes with a developer story. Audience: End business users can build such solutions once your tech staff has setup visio services on a SharePoint server instance. Developers can enhance the visualizations Related Technologies: Visio Services can be used to render workflow visualizations in SP2010 Reporting Services: SQL Server reporting services can integrate with SharePoint, allowing you to store reports and data sources in SharePoint document libraries, and render these reports and associated functionality such as subscriptions through a SharePoint site. In SharePoint 2010, you can also write reports against SharePoint lists (access services uses this technique). Good for: Showing complex reports running in a industry standard data store, such as SQL server. Audience: This is definitely developer land. Don’t expect end users to craft up reports, unless a report model has previously been published. Related Technologies: PerformancePoint Services PerformancePoint Services: PerformancePoint Services in SharePoint 2010 is now fully integrated with SharePoint, and comes with features that can either be used in the BI center site definition, or on their own as activated features in existing site collections. PerformancePoint services allows you to build reports and dashboards that target a variety of back-end datasources including: SQL Server reporting services, SQL Server analysis services, SharePoint lists, excel services, simple tables, etc. Using these you have the ability to create dashboards, scorecards/kpis, and simple reports. You can also create reports targeting hierarchical multidimensional data sources. The visual decomposition tree is a new report type that lets you quickly breakdown multi-dimensional data. Good for: Mostly everything :), except your wallet – it’s not free! But this is the most comprehensive offering. If you have SharePoint server, forget everything and go with performance point. Audience: Developers need to setup the back-end sources, manageability story. DBAs need to setup datawarehouses with cubes. Moderately sophisticated business users, or developers can craft up reports using dashboard designer which is a click-once App that deploys with PerformancePoint Related Technologies: Excel services, reporting services, etc.   Other relevant technologies to know about: Business Connectivity Services: Allows for consumption of external data in SharePoint as columns or external lists. This can be paired with one or more of the above BI offerings allowing insight into such data. Access Services: Allows the representation/publishing of an access database as a SharePoint 2010 site, leveraging many SharePoint features. Reporting services is used by Access services. Secure Store Service: The SP2010 Secure store service is a replacement for the SP2007 single sign on feature. This acts as a credential policeman providing credentials to various applications running with SharePoint. BCS, PerformancePoint Services, Excel Services, and many other apps use the SSS (Secure Store Service) for credential control. Comment on the article ....

    Read the article

  • TFS 2010 SDK: Integrating Twitter with TFS Programmatically

    - by Tarun Arora
    Technorati Tags: Team Foundation Server 2010,TFS API,Integrate Twitter TFS,TFS Programming,ALM,TwitterSharp   Friends at ‘Twitter Sharp’ have created a wonderful .net API for twitter. With this blog post i will try to show you a basic TFS – Twitter integration scenario where i will retrieve the Team Project details programmatically and then publish these details on my twitter page. In future blogs i will be demonstrating how to create a windows service to capture the events raised by TFS and then publishing them in your social eco-system. Download Working Demo: Integrate Twitter - Tfs Programmatically   1. Setting up Twitter API Download Tweet Sharp from => https://github.com/danielcrenna/tweetsharp  Before you can start playing around with this, you will need to register an application on twitter. This is because Twitter uses the OAuth authentication protocol and will not issue an Access token unless your application is registered with them. Go to https://dev.twitter.com/ and register your application   Once you have registered your application, you will need ‘Customer Key’, ‘Customer Secret’, ‘Access Token’, ‘Access Token Secret’ 2. Connecting to Twitter using the Tweet Sharp API Create a new C# windows forms project and add reference to ‘Hammock.ClientProfile’, ‘Newtonsoft.Json’, ‘TweetSharp’ Add the following keys to the App.config (Note – The values for the keys below are in correct and if you try and connect using them then you will get an authorization failure error). Add a new class ‘TwitterProxy’ and use the following code to connect to the TwitterService (Read more about OAuthentication - http://dev.twitter.com/pages/auth) using System;using System.Collections.Generic;using System.Linq;using System.Text;using System.Configuration;using TweetSharp; namespace WindowsFormsApplication2{ public class TwitterProxy { private static string _hero; private static string _consumerKey; private static string _consumerSecret; private static string _accessToken; private static string _accessTokenSecret;  public static TwitterService ConnectToTwitter() { _consumerKey = ConfigurationManager.AppSettings["ConsumerKey"]; _consumerSecret = ConfigurationManager.AppSettings["ConsumerSecret"]; _accessToken = ConfigurationManager.AppSettings["AccessToken"]; _accessTokenSecret = ConfigurationManager.AppSettings["AccessTokenSecret"];  return new TwitterService(_consumerKey, _consumerSecret, _accessToken, _accessTokenSecret); } }} Time to Tweet! _twitterService = Proxy.TwitterProxy.ConnectToTwitter(); _twitterService.SendTweet("Hello World"); SendTweet will return the TweetStatus, If you do not get a 200 OK status that means you have failed authentication, please revisit the Access tokens. --RESPONSE: https://api.twitter.com/1/statuses/update.json HTTP/1.1 200 OK X-Transaction: 1308476106-69292-41752 X-Frame-Options: SAMEORIGIN X-Runtime: 0.03040 X-Transaction-Mask: a6183ffa5f44ef11425211f25 Pragma: no-cache X-Access-Level: read-write X-Revision: DEV X-MID: bd8aa0abeccb6efba38bc0a391a73fab98e983ea Cache-Control: no-cache, no-store, must-revalidate, pre-check=0, post-check=0 Content-Type: application/json; charset=utf-8 Date: Sun, 19 Jun 2011 09:35:06 GMT Expires: Tue, 31 Mar 1981 05:00:00 GMT Last-Modified: Sun, 19 Jun 2011 09:35:06 GMT Server: hi Vary: Accept-Encoding Content-Encoding: Keep-Alive: timeout=15, max=100 Connection: Keep-Alive Transfer-Encoding: chunked   3. Integrate with TFS In my blog post Connect to TFS Programmatically i have in depth demonstrated how to connect to TFS using the TFS API. 1: // Update the AppConfig with the URI of the Team Foundation Server you want to connect to, Make sure you have View Team Project Collection Details permissions on the server 2: private static string _myUri = ConfigurationManager.AppSettings["TfsUri"]; 3: private static TwitterService _twitterService = null; 4:   5: private void button1_Click(object sender, EventArgs e) 6: { 7: lblNotes.Text = string.Empty; 8:   9: try 10: { 11: StringBuilder notes = new StringBuilder(); 12:   13: _twitterService = Proxy.TwitterProxy.ConnectToTwitter(); 14:   15: _twitterService.SendTweet("Hello World"); 16:   17: TfsConfigurationServer configurationServer = 18: TfsConfigurationServerFactory.GetConfigurationServer(new Uri(_myUri)); 19:   20: CatalogNode catalogNode = configurationServer.CatalogNode; 21:   22: ReadOnlyCollection<CatalogNode> tpcNodes = catalogNode.QueryChildren( 23: new Guid[] { CatalogResourceTypes.ProjectCollection }, 24: false, CatalogQueryOptions.None); 25:   26: // tpc = Team Project Collection 27: foreach (CatalogNode tpcNode in tpcNodes) 28: { 29: Guid tpcId = new Guid(tpcNode.Resource.Properties["InstanceId"]); 30: TfsTeamProjectCollection tpc = configurationServer.GetTeamProjectCollection(tpcId); 31:   32: notes.AppendFormat("{0} Team Project Collection : {1}{0}", Environment.NewLine, tpc.Name); 33: _twitterService.SendTweet(String.Format("http://Lunartech.codeplex.com - Connecting to Team Project Collection : {0} ", tpc.Name)); 34:   35: // Get catalog of tp = 'Team Projects' for the tpc = 'Team Project Collection' 36: var tpNodes = tpcNode.QueryChildren( 37: new Guid[] { CatalogResourceTypes.TeamProject }, 38: false, CatalogQueryOptions.None); 39:   40: foreach (var p in tpNodes) 41: { 42: notes.AppendFormat("{0} Team Project : {1} - {2}{0}", Environment.NewLine, p.Resource.DisplayName,  "This is an open source project hosted on codeplex"); 43: _twitterService.SendTweet(String.Format(" Connected to Team Project: '{0}' – '{1}' ", p.Resource.DisplayName, "This is an open source project hosted on codeplex")); 44: } 45: } 46: notes.AppendFormat("{0} Updates posted on Twitter : {1} {0}", Environment.NewLine, @"http://twitter.com/lunartech1"); 47: lblNotes.Text = notes.ToString(); 48: } 49: catch (Exception ex) 50: { 51: lblError.Text = " Message : " + ex.Message + (ex.InnerException != null ? " Inner Exception : " + ex.InnerException : string.Empty); 52: } 53: }   The extensions you can build integrating TFS and Twitter are incredible!   Share this post :

    Read the article

  • Backup Your Windows Home Server Off-Site with Asus Webstorage

    - by Mysticgeek
    Windows Home Server lets you backup machines on your network easily. But what about backing up the server data? Today we take a look at ASUS WebStorage for Windows Home Server, which provides you with secure off-site backup for WHS. To use the ASUS WebStorage service you’ll need to sign up for a free account. It offers 1GB of free storage, then you can purchase an unlimited backup package for $39.99 for a year subscription. Note: They also offer online storage for individual PCs as well. Install ASUS WebStorage for WHS Browse to your shared folders on the server and open the Add-Ins folder and copy over the WHSConnectorSetup2.2.4.088.msi file (link below) then close out of the folder. Now launch Windows Home Server Console from one of the computers on your network, click Settings, then Add-ins. Under Available Add-ins click the Available tab and you’ll see the Asus WebStorage installer file we just copied over. Click the Install button. Installation kicks off and when it’s complete, you’ll need to close out of the console and reconnect. Using ASUS WebStorage WHS Connector  When you reconnect to WHS Console, scroll over to the ASUS WebStorage icon and click on Settings. Now log into your ASUS account… Now select the folders you want to backup to the WebStorage service. Select the radio button next to Enable to initialize the backup process… The backup process begins. You can change which folders are backed up simply by disabling the backup process, uncheck the folder(s), then enable the backup again. ASUS WebStorage Site After you have files backed up to the ASUS site, log into your account, and your presented with an overview of the amount of storage you’re using. It also shows what type of files are taking certain amounts of space.   You can browse through your backed up files and folders. It allows you to share and sync backed up data as well. Navigate to the file you want and you can easily download it by clicking on it, or share it out by clicking the share link below it. If you choose to share it, you’re provided with a link to the file to send out to other users.   Conclusion Users of Windows Home Server have been looking for an inexpensive cloud backup solution for quite some time. There are services such as JungleDisk, KeepVault, Wuala…etc. These services probably do a better job, but can start getting expensive once you start uploading a GBs of data. Another disappointment of ASUS WebStorage is you can only backup your WHS shares (from what we’ve been able to determine), it’s an “all or nothing” type of thing. You cannot go in and select individual files and folders. The initial upload speeds can be a bit slow as well, although that might have something to do with limited upload speeds on the DSL connection we used to test it. Retrieving your data from the ASUS site is a breeze though, and all the data files are organized quite well. The WHS Addin is very easy to install and use. If you’re looking for an off-site solution to backup your WHS data, you can test out ASUS WebStorage for free with a 1GB limit. This is good for testing the service and it might be exactly what you’re looking for. Other users may want a more advanced solution like KeepVault or CloudBerry…which is a front end for Amazon S3 storage. Download ASUS WebStorage WHS Addin Other WHS Offsite Backup Solutions CloudBerry, JungleDisk, KeepVault, Wuala Similar Articles Productive Geek Tips Restore Files from Backups on Windows Home ServerGMedia Blog: Setting Up a Windows Home ServerCreate A Windows Home Server Home Computer Restore DiscRemove a Network Computer from Windows Home ServerShare Ubuntu Home Directories using Samba TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Gadfly is a cool Twitter/Silverlight app Enable DreamScene in Windows 7 Microsoft’s “How Do I ?” Videos Home Networks – How do they look like & the problems they cause Check Your IMAP Mail Offline In Thunderbird Follow Finder Finds You Twitter Users To Follow

    Read the article

< Previous Page | 394 395 396 397 398 399 400 401 402 403 404 405  | Next Page >