Search Results

Search found 3309 results on 133 pages for 'relative positioning'.

Page 115/133 | < Previous Page | 111 112 113 114 115 116 117 118 119 120 121 122  | Next Page >

  • how to generate tinymce to ajax generated textarea

    - by Jai_pans
    Hi, i have a image multi-uloader script which also each item uploaded was preview 1st b4 it submitted and each images has its following textarea which are also generated by javascript and my problem is i want to use the tinymce editor to each textarea generated by the ajax. Any help will be appreciated.. here is my script function fileQueueError(file, errorCode, message) { try { var imageName = "error.gif"; var errorName = ""; if (errorCode === SWFUpload.errorCode_QUEUE_LIMIT_EXCEEDED) { errorName = "You have attempted to queue too many files."; } if (errorName !== "") { alert(errorName); return; } switch (errorCode) { case SWFUpload.QUEUE_ERROR.ZERO_BYTE_FILE: imageName = "zerobyte.gif"; break; case SWFUpload.QUEUE_ERROR.FILE_EXCEEDS_SIZE_LIMIT: imageName = "toobig.gif"; break; case SWFUpload.QUEUE_ERROR.ZERO_BYTE_FILE: case SWFUpload.QUEUE_ERROR.INVALID_FILETYPE: default: alert(message); break; } addImage("images/" + imageName); } catch (ex) { this.debug(ex); } } function fileDialogComplete(numFilesSelected, numFilesQueued) { try { if (numFilesQueued 0) { this.startUpload(); } } catch (ex) { this.debug(ex); } } function uploadProgress(file, bytesLoaded) { try { var percent = Math.ceil((bytesLoaded / file.size) * 100); var progress = new FileProgress(file, this.customSettings.upload_target); progress.setProgress(percent); if (percent === 100) { progress.setStatus("Creating thumbnail..."); progress.toggleCancel(false, this); } else { progress.setStatus("Uploading..."); progress.toggleCancel(true, this); } } catch (ex) { this.debug(ex); } } function uploadSuccess(file, serverData) { try { var progress = new FileProgress(file, this.customSettings.upload_target); if (serverData.substring(0, 7) === "FILEID:") { addRow("tableID","thumbnail.php?id=" + serverData.substring(7),file.name); //setup(); //generateTinyMCE('itemdescription[]'); progress.setStatus("Thumbnail Created."); progress.toggleCancel(false); } else { addImage("images/error.gif"); progress.setStatus("Error."); progress.toggleCancel(false); alert(serverData); } } catch (ex) { this.debug(ex); } } function uploadComplete(file) { try { /* I want the next upload to continue automatically so I'll call startUpload here */ if (this.getStats().files_queued 0) { this.startUpload(); } else { var progress = new FileProgress(file, this.customSettings.upload_target); progress.setComplete(); progress.setStatus("All images received."); progress.toggleCancel(false); } } catch (ex) { this.debug(ex); } } function uploadError(file, errorCode, message) { var imageName = "error.gif"; var progress; try { switch (errorCode) { case SWFUpload.UPLOAD_ERROR.FILE_CANCELLED: try { progress = new FileProgress(file, this.customSettings.upload_target); progress.setCancelled(); progress.setStatus("Cancelled"); progress.toggleCancel(false); } catch (ex1) { this.debug(ex1); } break; case SWFUpload.UPLOAD_ERROR.UPLOAD_STOPPED: try { progress = new FileProgress(file, this.customSettings.upload_target); progress.setCancelled(); progress.setStatus("Stopped"); progress.toggleCancel(true); } catch (ex2) { this.debug(ex2); } case SWFUpload.UPLOAD_ERROR.UPLOAD_LIMIT_EXCEEDED: imageName = "uploadlimit.gif"; break; default: alert(message); break; } addImage("images/" + imageName); } catch (ex3) { this.debug(ex3); } } function addRow(tableID,src,filename) { var table = document.getElementById(tableID); var rowCount = table.rows.length; var row = table.insertRow(rowCount); rowCount + 1; row.id = "row"+rowCount; var cell0 = row.insertCell(0); cell0.innerHTML = rowCount; cell0.style.background = "#FFFFFF"; var cell1 = row.insertCell(1); cell1.align = "center"; cell1.style.background = "#FFFFFF"; var imahe = document.createElement("img"); imahe.setAttribute("src",src); var hidden = document.createElement("input"); hidden.setAttribute("type","hidden"); hidden.setAttribute("name","filename[]"); hidden.setAttribute("value",filename); /*var hidden2 = document.createElement("input"); hidden2.setAttribute("type","hidden"); hidden2.setAttribute("name","filename[]"); hidden2.setAttribute("value",filename); cell1.appendChild(hidden2);*/ cell1.appendChild(hidden); cell1.appendChild(imahe); var cell2 = row.insertCell(2); cell2.align = "left"; cell2.valign = "top"; cell2.style.background = "#FFFFFF"; //tr1.appendChild(td1); var div2 = document.createElement("div"); div2.style.padding ="0 0 0 10px"; div2.style.width = "400px"; var alink = document.createElement("a"); //alink.style.margin="40px 0 0 0"; alink.href ="#"; alink.innerHTML ="Cancel"; alink.onclick= function () { document.getElementById(row.id).style.display='none'; document.getElementById(textfield.id).disabled='disabled'; }; var div = document.createElement("div"); div.style.margin="10px 0"; div.appendChild(alink); var textfield = document.createElement("input"); textfield.id = "file"+rowCount; textfield.type = "text"; textfield.name = "itemname[]"; textfield.style.margin = "10px 0"; textfield.style.width = "400px"; textfield.value = "Item Name"; textfield.onclick= function(){ //textfield.value=""; if(textfield.value=="Item Name") textfield.value=""; if(desc.innerHTML=="") desc.innerHTML ="Item Description"; if(price.value=="") price.value="Item Price"; } var desc = document.createElement("textarea"); desc.name = "itemdescription[]"; desc.cols = "80"; desc.rows = "4"; desc.innerHTML = "Item Description"; desc.onclick = function(){ if(desc.innerHTML== "Item Description") desc.innerHTML = ""; if(textfield.value=="Item name" || textfield.value=="") textfield.value="Item Name"; if(price.value=="") price.value="Item Price"; } var price = document.createElement("input"); price.id = "file"+rowCount; price.type = "text"; price.name = "itemprice[]"; price.style.margin = "10px 0"; price.style.width = "400px"; price.value = "Item Price"; price.onclick= function(){ if(price.value=="Item Price") price.value=""; if(desc.innerHTML=="") desc.innerHTML ="Item Description"; if(textfield.value=="") textfield.value="Item Name"; } var span = document.createElement("span"); span.innerHTML = "View"; span.style.width = "auto"; span.style.padding = "10px 0"; var view = document.createElement("input"); view.id = "file"+rowCount; view.type = "checkbox"; view.name = "publicview[]"; view.value = "y"; view.checked = "checked"; var div3 = document.createElement("div"); div3.appendChild(span); div3.appendChild(view); var div4 = document.createElement("div"); div4.style.padding = "10px 0"; var span2 = document.createElement("span"); span2.innerHTML = "Default Display"; span2.style.width = "auto"; span2.style.padding = "10px 0"; var radio = document.createElement("input"); radio.type = "radio"; radio.name = "setdefault"; radio.value = "y"; div4.appendChild(span2); div4.appendChild(radio); div2.appendChild(div); //div2.appendChild(label); //div2.appendChild(table); div2.appendChild(textfield); div2.appendChild(desc); div2.appendChild(price); div2.appendChild(div3); div2.appendChild(div4); cell2.appendChild(div2); } function addImage(src,val_id) { var newImg = document.createElement("img"); newImg.style.margin = "5px 50px 5px 5px"; newImg.style.display= "inline"; newImg.id=val_id; document.getElementById("thumbnails").appendChild(newImg); if (newImg.filters) { try { newImg.filters.item("DXImageTransform.Microsoft.Alpha").opacity = 0; } catch (e) { // If it is not set initially, the browser will throw an error. This will set it if it is not set yet. newImg.style.filter = 'progid:DXImageTransform.Microsoft.Alpha(opacity=' + 0 + ')'; } } else { newImg.style.opacity = 0; } newImg.onload = function () { fadeIn(newImg, 0); }; newImg.src = src; } function fadeIn(element, opacity) { var reduceOpacityBy = 5; var rate = 30; // 15 fps if (opacity < 100) { opacity += reduceOpacityBy; if (opacity > 100) { opacity = 100; } if (element.filters) { try { element.filters.item("DXImageTransform.Microsoft.Alpha").opacity = opacity; } catch (e) { // If it is not set initially, the browser will throw an error. This will set it if it is not set yet. element.style.filter = 'progid:DXImageTransform.Microsoft.Alpha(opacity=' + opacity + ')'; } } else { element.style.opacity = opacity / 100; } } if (opacity < 100) { setTimeout(function () { fadeIn(element, opacity); }, rate); } } /* ************************************** * FileProgress Object * Control object for displaying file info * ************************************** */ function FileProgress(file, targetID) { this.fileProgressID = "divFileProgress"; this.fileProgressWrapper = document.getElementById(this.fileProgressID); if (!this.fileProgressWrapper) { this.fileProgressWrapper = document.createElement("div"); this.fileProgressWrapper.className = "progressWrapper"; this.fileProgressWrapper.id = this.fileProgressID; this.fileProgressElement = document.createElement("div"); this.fileProgressElement.className = "progressContainer"; var progressCancel = document.createElement("a"); progressCancel.className = "progressCancel"; progressCancel.href = "#"; progressCancel.style.visibility = "hidden"; progressCancel.appendChild(document.createTextNode(" ")); var progressText = document.createElement("div"); progressText.className = "progressName"; progressText.appendChild(document.createTextNode(file.name)); var progressBar = document.createElement("div"); progressBar.className = "progressBarInProgress"; var progressStatus = document.createElement("div"); progressStatus.className = "progressBarStatus"; progressStatus.innerHTML = "&nbsp;"; this.fileProgressElement.appendChild(progressCancel); this.fileProgressElement.appendChild(progressText); this.fileProgressElement.appendChild(progressStatus); this.fileProgressElement.appendChild(progressBar); this.fileProgressWrapper.appendChild(this.fileProgressElement); document.getElementById(targetID).appendChild(this.fileProgressWrapper); fadeIn(this.fileProgressWrapper, 0); } else { this.fileProgressElement = this.fileProgressWrapper.firstChild; this.fileProgressElement.childNodes[1].firstChild.nodeValue = file.name; } this.height = this.fileProgressWrapper.offsetHeight; } FileProgress.prototype.setProgress = function (percentage) { this.fileProgressElement.className = "progressContainer green"; this.fileProgressElement.childNodes[3].className = "progressBarInProgress"; this.fileProgressElement.childNodes[3].style.width = percentage + "%"; }; FileProgress.prototype.setComplete = function () { this.fileProgressElement.className = "progressContainer blue"; this.fileProgressElement.childNodes[3].className = "progressBarComplete"; this.fileProgressElement.childNodes[3].style.width = ""; }; FileProgress.prototype.setError = function () { this.fileProgressElement.className = "progressContainer red"; this.fileProgressElement.childNodes[3].className = "progressBarError"; this.fileProgressElement.childNodes[3].style.width = ""; }; FileProgress.prototype.setCancelled = function () { this.fileProgressElement.className = "progressContainer"; this.fileProgressElement.childNodes[3].className = "progressBarError"; this.fileProgressElement.childNodes[3].style.width = ""; }; FileProgress.prototype.setStatus = function (status) { this.fileProgressElement.childNodes[2].innerHTML = status; }; FileProgress.prototype.toggleCancel = function (show, swfuploadInstance) { this.fileProgressElement.childNodes[0].style.visibility = show ? "visible" : "hidden"; if (swfuploadInstance) { var fileID = this.fileProgressID; this.fileProgressElement.childNodes[0].onclick = function () { swfuploadInstance.cancelUpload(fileID); return false; }; } }; i am using a swfuploader an i jst added a input fields and a textarea when it preview the images which ready to be uploaded and from my html i have this script var swfu; window.onload = function () { swfu = new SWFUpload({ // Backend Settings upload_url: "../we_modules/upload.php", // Relative to the SWF file or absolute post_params: {"PHPSESSID": ""}, // File Upload Settings file_size_limit : "20 MB", // 2MB file_types : "*.*", //file_types : "", file_types_description : "jpg", file_upload_limit : "0", file_queue_limit : "0", // Event Handler Settings - these functions as defined in Handlers.js // The handlers are not part of SWFUpload but are part of my website and control how // my website reacts to the SWFUpload events. //file_queued_handler : fileQueued, file_queue_error_handler : fileQueueError, file_dialog_complete_handler : fileDialogComplete, upload_progress_handler : uploadProgress, upload_error_handler : uploadError, upload_success_handler : uploadSuccess, upload_complete_handler : uploadComplete, // Button Settings button_image_url : "../we_modules/images/SmallSpyGlassWithTransperancy_17x18.png", // Relative to the SWF file button_placeholder_id : "spanButtonPlaceholder", button_width: 180, button_height: 18, button_text : 'Select Files(2 MB Max)', button_text_style : '.button { font-family: Helvetica, Arial, sans-serif; font-size: 12pt;cursor:pointer } .buttonSmall { font-size: 10pt; }', button_text_top_padding: 0, button_text_left_padding: 18, button_window_mode: SWFUpload.WINDOW_MODE.TRANSPARENT, button_cursor: SWFUpload.CURSOR.HAND, // Flash Settings flash_url : "../swfupload/swfupload.swf", custom_settings : { upload_target : "divFileProgressContainer" }, // Debug Settings debug: false }); }; where should i put on the tinymce function as you mention below?

    Read the article

  • CodePlex Daily Summary for Thursday, February 25, 2010

    CodePlex Daily Summary for Thursday, February 25, 2010New ProjectsAptusSoftware.Threading: AptusSoftware.Threading is a class library designed primarily to assist in the development of multi-threaded WinForm applications, although there i...AxiomGameDesigner: It is going to be a universal scene editor for Axiom 3D game engine. It is in pure C# and will be kept portable to MONO for compatibility with linu...Badger - Unity Productivity Extensions: A set of Microsoft Unity Extensions. Why Badger? Because I love badgers.Business & System Analysis Templates and Best Practices for Russian: http://saway.codeplex.com/Conectayas: Conectayas is an open source "Connect Four" alike game but transformable to "Tic-Tac-Toe" and to a lot of similar games that uses mouse. Written in...FastCode: .NET 3.5 Extensions set to increase coding speed.Hundiyas: Hundiyas is an open source "Battleship" alike game totally written in DHTML (JavaScript, CSS and HTML) that uses mouse. This cross-platform and cro...Icelandic Online Banking: Icelandic Online Banking is project defining a web service interface for online banking.IE8 AddOns XML Creator: Application that helps on creating the xml files for IE8 Accelerators, Search Providers and the markup for Web Slices.iKnowledge: a asp.net mvc demoLearn ASP.NET MVC: Learn ASP.NET MVC is a project for the members of the Peer Learning group in Silicon Valley. It contains the SportsStore solution from the Pro ASP...Live at Education Meta Web-Service: Live at Education Meta Web-Service is intended to abstract from several technologies that are included in Live@edu set of services. This web-ser...Low level wave sound output for VB.NET: Low level sound output class for VB.NET using platform invocation services to call winmm.dllMailQ: MailQ makes it easier for developers to send mail messages from an application. The system sends mails based on a database queue system (store, se...Managed DXGI: Managed DXGI library is Fully managed wrapper writen on C# for DXGI 1.0 and 1.1 technology. It makes easier to support DXGI in managed application....Multivalue AutoComplete WinForms TextBox in C#: This project is a sample application that demonstrates how to create a multivalue WinForms textbox in C# using .NET Framework 3.5.Nifty CSharp Tools: Nifty CSharp Tools, will contain various tools and snippets. IRCBot, splashscreens, linq, world of warcraft log parsing, screenshot uploaders, twi...PHP MPQ: A port of StormLib to PHP for handling Blizzard MPQ files.RedDevils strategy - Project Hoshimi Programming Battle: Source Code of RedDevils strategy. Imagine Cup 2008 - Project Hoshimi Programming Battle.RNUNIT: rNunit is a distributed Nunit project. Many application these days are client-server application, distributed application and regular unit testing ...Samar Solution: Samar Solutions is a business system for office automation.Silverlight OOMRPG Game Engine: Silverlight OOMRPG Game EngineSimulator: GPSSimulatorSLARToolkit - Silverlight Augmented Reality Toolkit: SLARToolkit is a flexible Augmented Reality library for Silverlight with the aim to make real time Augmented Reality applications with Silverlight ...Spiral Architecture Driven Development (SADD) for Russian: Это русская версия сайта sadd.codeplex.comSQLSnapshotManager: Easily manage SQL Server database snapshots in a easy to use visual interface.Twilio with VB.NET MVC: Twilio with VB.NET MVC is a sample application for developing with Twilio's REST based telephony API. It includes an XML Schema of the TwiML respon...Ultra Speed Dial: UltraSpeedDial.com - Online Speed Dial Page.Visual HTML Editor justHTML: justHTML - is simle windows-application WYSIWYG editor that allow everyone - without any knowledge of HTML - to create and edit web-pages. It supp...WinMTR.NET: .NET Clone of the popular Windows clone of the popular Linux Matt's TracerouteWPF Dialogs: "WPF Dialogs" is a library for different Dialogs in WPF (e.g. FolderBrowseDialog, SaveFileDialog, OpenFileDialog etc.). These Dialogs are written i...WPFLogin: A small Login window in WPF and C#XNA PerformanceTimers: CPU Timers for Windows and Xbox360. Can track multiple threads, and presents output as a log on-screen.New ReleasesAptusSoftware.Threading: 2.0.0: First public release. This release is in production as part of several commercial applications and is stable. The source code download includes a...BizTalk Software Factory: BizTalk Software Factory v2.1: This is a service release for the BizTalk Software Factory for BizTalk Server 2009, containing so far: Fix for x64: the SN.EXE tool is now locate...Business & System Analysis Templates and Best Practices for Russian: R00 The Place reserver: Just to reserve the place Will be filled out soonChronos WPF: Chronos v1.0 Beta 2: Added a new SplashScreen Added a new Login View and implemented Log Off Added a new PasswordBoxHelper (http://www.codeproject.com/Articles/371...dotNetTips: dotNetTips.Utility 3.5 R2: This is a new release (version 3.5.0.3) compatible with .NET 3.5. Lots of new classes/features!! Requires SP1 if using the Entity Framework extensi...fleXdoc: template-based server-side document generator (docx): fleXdoc 1.0 beta 3: The third and final beta of fleXdoc. fleXdoc consists of a webservice and a (test)client for the service. Make sure you also download the testclien...FluentPS: FluentPS v1.0: - FluentPS is moved from ASMX to WCF interface of the Project Server Interface (PSI) - Impersonation changes to work in compliance with WCF interfa...FolderSize: FolderSize.Win32.1.0.4.0: FolderSize.Win32.1.0.3.0 A simple utility intended to be used to scan harddrives for the folders that take most place and display this to the user...iTuner - The iTunes Companion: iTuner 1.1.3707 Beta 3: As promised, the iTuner Automated Librarian is now available. This automatically cleans an entire album of dead tracks and duplicates as tracks ar...Live at Education Meta Web-Service: LAEMWS v 1.0 beta: Release Candidate for LAEMWS.Macaw Reusable Code Library: LanguageConfigurationSolution: This Solution helps developing a multi language publishing web siteManaged DXGI: Initial Release.: Base declaration of interfaces, most of them untested yet.Math.NET Numerics: 2010.2.24.667 Build: Latest alpha buildMiniTwitter: 1.08.1: MiniTwitter 1.08.1 更新内容 変更 インクリメンタル検索時には大文字小文字の区別をしないように変更 クライアント名の表示を本家にあわせて from から via に変更 修正 公式 RT 時にステータスが上に表示されたり二重に表示されるバグを修正 自分が自分へ返信...Multivalue AutoComplete WinForms TextBox in C#: 1.0 First public release: Multivalue autocomplete textbox control and host application in this release are released in a single Visual Studio 2008 projects. See my related b...NMock3: NMock3 - Beta3, .NET 3.5: This release has some exciting new features. Please start providing feedback on the tutorials. The first several are complete and the rest are no...nxAjax - an asp.net ajax library using jQuery: nxAjax v3 codeplex 7: nxAjax v3 codeplex 7 binary and test website. Bug Fixed: ajax:Form control Add: Drag and drop Rewritten: DragnDropManager DragPanel DropPan...Office Apps: 0.8.7: whats new? Document.Editor and Document.Viewer now supports FlowDocument (.xaml) files bug fix'sPDF Rider: PDF Rider 0.3: Application PrerequisitesMicrosoft Windows Operating Systems (XP (tested) - Vista - 7) Microsoft .NET Framework 3.5 runtime A PDF rendering sof...ShellLight: ShellLight 0.1.0.1 Src: Codeplex project released. This is only a preview of the product. Until the first final release there will be many improvements.Silverlight OOMRPG Game Engine: SilverlightGameTutorialSolution v1.01: Please visit my blog for Silverlight OOMROG Game Tutorial: http://www.cnblogs.com/Jax/archive/2010/02/24/1673053.html.Simple Savant: Simple Savant v0.4: Added support for full-text indexing (See Full-Text Indexing) Added support for attribute spanning and compression for property values larger tha...Spiral Architecture Driven Development (SADD) for Russian: R00: R00 to reserve site nameTeamReview - TFS Code Review: Release 1.1.3: Release Features New expanded product positioning for capturing any targeted coding work as a trackable, assignable, reportable Work Item for any r...Text Designer Outline Text Library: 10th minor release: Version 0.3.1 (10th minor release)Fixed the gradient brush being too big for the text, resulting in not much gradient shown in the text. Gradient...TFS Workflow Control: TeamExplorer and TSWA control 1.0 for TFS 2010 RC: This is a special version for TFS 2010 RC. Use the RC version of the power tools to modify the layout of your work items (http://visualstudiogaller...thinktecture WSCF.blue: WSCF.blue V1 Update (1.0.7) - VS2010 RC Support: This update adds support for Visual Studio 2010 RC in addition to Visual Studio 2008. Please note that Visual Studio 2010 Beta 2 is NOT supported a...Tumblen3: tumblen3 Version 25Feb2010: ready for Twitter's xAuthUMD文本编辑器: UMDEditor文本编辑器V2.1.0: 2.1.0 (2010-02-24) 增加查找章节内指定文本内容的功能 2.0.4 (2010-02-06) 章节内容框增加右键菜单,包含编辑文本的基本操作 ------------------------------------------------------- 执行 reg.bat ...VCC: Latest build, v2.1.30224.0: Automatic drop of latest buildVisual HTML Editor justHTML: Latest binary: Latest buid here. Executable and mshtml.dll included in this archive. Ready to use ;)Visual HTML Editor justHTML: Source code for version 2.5: Visual studio 2008 project with full source code.VOB2MKV: vob2mkv-1.0.2: The release vob2mkv-1.0.2 is a feature update of the VOB2MKV project. It now includes a DirectShow source filter, MKVSOURCE. A source filter allo...WinMTR.NET: V 1.0: V 1.0WPF Dialogs: Version 0.1.0: Version 0.1.0 FolderBrowseDialog is implementet for more information look here Version 0.1.0 (german: Version 0.1.0 - Deutsch).WPF Dialogs: Version 0.1.1: Version 0.1.1 Features FolderBrowseDialog was extended / FolderBrowseDialog - Deutsch wurde erweitertXNA PerformanceTimers: XNA PerformanceTimers 0.1: Initial release.Zeta Resource Editor: Release 2010-02-24: Added HTTP proxy server support.Most Popular ProjectsASP.NET Ajax LibraryManaged Extensibility FrameworkWindows 7 USB/DVD Download ToolDotNetZip LibraryMDownloaderVirtual Router - Wifi Hot Spot for Windows 7 / 2008 R2MFCMAPIDroid ExplorerUseful Sharepoint Designer Custom Workflow ActivitiesOxiteMost Active ProjectsDinnerNow.netBlogEngine.NETRawrInfoServiceSLARToolkit - Silverlight Augmented Reality ToolkitNB_Store - Free DotNetNuke Ecommerce Catalog ModuleSharpMap - Geospatial Application Framework for the CLRjQuery Library for SharePoint Web ServicesRapid Entity Framework. (ORM). CTP 2Common Context Adapters

    Read the article

  • Silverlight 4 Training Kit

    - by ScottGu
    We recently released a new free Silverlight 4 Training Kit that walks you through building business applications with Silverlight 4.  You can browse the training kit online or alternatively download an entire offline version of the training kit.  The training material is structured on teaching how to use the new Silverlight 4 features to build an end to end business application. The training kit includes 8 modules, 25 videos, and several hands on labs. Below is a breakdown and links to all of the content. [In addition to blogging, I am also now using Twitter for quick updates and to share links. Follow me at: twitter.com/scottgu] Module 1: Introduction Click here to watch this module. In this video John Papa and Ian Griffiths discuss the key areas that the Building Business Applications with Silverlight 4 course focuses on. This module is the overview of the course and covers many key scenarios that are faced when building business applications, and how Silverlight can help address them. Module 2: WCF RIA Services Click here to explore this module. In this lab, you will create a web site for managing conferences that will be the basis for the other labs in this course. Don’t worry if you don’t complete a particular lab in the series – all lab manual instructions are accompanied by completed solutions, so you can either build your own solution from start to finish, or dive straight in at any point using the solutions provided as a starting point. In this lab you will learn how to set up WCF RIA Services, create bindings to the domain context, filter using the domain data source, and create domain service queries. Online Link Download Source Download Lab Document Videos Module 2.1 - WCF RIA Services Ian Griffiths sets up the Entity Framework and WCF RIA Services for the sample Event Manager application for the course. He covers how to set up the services, how the Domain Services work and the role that the DomainContext plays in the sample application. He also reviews the metadata classes and integrating the navigation framework. Module 2.2 – Using WCF RIA Services to Edit Entities Ian Griffiths discusses how he adds the ability to edit and create individual entities with the features built into WCF RIA Services into the sample Event Manager application. He covers data binding fundamentals, IQueryable, LINQ, the DomainDataSource, navigation to a single entity using the navigation framework, and how to use the Visual Studio designer to do much of the work . Module 2.3 – Showing Master/Details Records Using WCF RIA Services Ian Griffiths reviews how to display master/detail records for the sample Event Manager application using WCF RIA Services. He covers how to use the Include attribute to indicate which elements to serialize back to the client. Ian also demonstrates how to use the Data Sources window in the designer to add and bind controls to specific data elements. He wraps up by showing how to create custom services to the Domain Services. Module 3 – Authentication, Validation, MVVM, Commands, Implicit Styles and RichTextBox Click here to visit this module. This lab demonstrates how to build a login screen, integrate ASP.NET authentication, and perform validation on data elements. Model-View-ViewModel (MVVM) is introduced and used in this lab as a pattern to help separate the UI and business logic. You will also learn how to use implicit styling and the new RichTextBox control. Online Link Download Source Download Lab Document Videos Module 3.1 – Authentication Ian Griffiths covers how to integrate a login screen and authentication into the sample Event Manager application. Ian shows how to use the ASP.NET authentication and integrate it into WCF RIA Services and the Silverlight presentation layer. Module 3.2 – MVVM Ian Griffiths covers how to Model-View-ViewModel (MVVM) patterns into the sample Event Manager application. He discusses why MVVM exists, what separated presentation means, and why it is important. He shows how to connect the View to the ViewModel, why data binding is important in this symbiosis, and how everything fits together in the overall application. Module 3.3 –Validation Ian Griffiths discusses how validation of user input can be integrated into the sample Event Manager application. He demonstrates how to use the DataAnnotations, the INotifyDataErrorInfo interface, binding markup extensions, and WCF RIA Services in concert to achieve great validation in the sample application. He discusses how this technique allows for property level validation, entity level validation, and asynchronous server side validation. Module 3.4 – Implicit Styles Ian Griffiths discusses how why implicit styles are important and how they can be integrated into the sample Event Manager application. He shows how implicit styles defined in a resource dictionary can be applied to all elements of a particular kind throughout the application. Module 3.5 – RichTextBox Ian Griffiths discusses how the new RichTextBox control and it can be integrated into the sample Event Manager application. He demonstrates how the RichTextBox can provide editing for the event information and how it can display the rich text for selection and copying. Module 4 – User Profiles, Drop Targets, Webcam and Clipboard Click here to visit this module. This lab builds new features into the sample application to take the user's photo. It teaches you how to use the webcam to capture an image, use Silverlight as a drop target, and take advantage of programmatic access to the clipboard. Link Download Source Download Lab Document Videos Module 4.1 – Webcam Ian Griffiths demonstrates how the webcam adds value to the sample Event Manager application by capturing an image of the attendee. He discusses the VideoCaptureDevice, the CaptureDviceConfiguration, and the CaptureSource classes and how they allow audio and video to be captured so you can grab an image from the capture device and save it. Module 4.2 - Drag and Drop in Silverlight Ian Griffiths demonstrates how to capture and handle the Drop in the sample Event Manager application so the user can drag a photo from a file and drop it into the application. Ian reviews the AllowDrop property, the Drop event, how to access the file that can be dropped, and the other drag related events. He also reviews how to make this work across browsers and the challenges for this. Module 5 – Schedule Planner and Right Mouse Click Click here to visit this module. This lab builds on the application to allow grouping in the DataGrid and implement right mouse click features to add context menu support. Link Download Source Download Lab Document Videos Module 5.1 – Grouping and Binding Ian Griffiths demonstrates how to use the grouping features for data binding in the DataGrid and how it applies to the sample Event Manager application. He reviews the role of the CollectionViewSource in grouping, customizing the templates for headers, and how to work with grouping with ItemsControls. Module 5.2 – Layout Visual States Ian Griffiths demonstrates how to use the Fluid UI animation support for visual states in the ListBox control DataGrid and how it applies to the sample Event Manager application. He reviews the 3 visual states of BeforeLoaded, AfterLoaded, and BeforeUnloaded. Module 5.3 – Right Mouse Click Ian Griffiths demonstrates how to add support for handling the right mouse button click event to display a context menu for the Event Manager application. He demonstrates how to handle the event, show a custom context menu control, and integrate it into the scheduling portion of the application. Module 6 – Printing the Schedule Click here to visit this module. This lab teaches how to use the new printing features in Silverlight 4. The lab walks through the PrintDocument class and the ViewBox control, while showing how to print multiple pages of content using them. Link Download Source Download Lab Document Videos Module 6.1 – Printing and the Viewbox Ian Griffiths demonstrates how to add the ability to print the schedule to the sample Event Manager application. He walks through the importance of the PrintDocument class and its members. He also shows how to handle printing the visual tree and how the ViewBox control can help. Module 6.2 – Multi Page Printing Ian Griffiths expands on his printing discussion by showing how to handle printing multiple pages of content for the sample Event Manager application. He shows how to paginate the content and points out various tips to keep in mind when determining the printable area. Module 7 – Running the Event Dashboard Out of Browser Click here to visit this module. This lab builds a dashboard for the sample application while explaining the fundamentals of the out of browser features, how to handle authentication, displaying notifications (toasts), and how to use native integration to use COM Interop with Silverlight. Link Download Source Download Lab Document Videos Module 7.1 – Out of Browser Ian Griffiths discusses the role of an Out of Browser application for administrators to manage the events and users in the sample Event Manager application. He discusses several reasons why out of browser applications may better suit your needs including custom chrome, toasts, window placement, cross domain access, and file access. He demonstrates the basic technique to take your application and make it work out of browser using the tools. Module 7.2 – NotificationWindow (Toasts) for Elevated Trust Out of Browser Applications Ian Griffiths discusses the how toasts can be used in the sample Event Manager application to show information that may require the user's attention. Ian covers how to create a toast using the NotificationWindow, security implications, and how to make the toast appear as needed. Module 7.3 – Out of Browser Window Placement Ian Griffiths discusses the how to manage the window positioning when building an out of browser application, handling the windows state, and controlling and handling activation of the window. Module 7.4 – Out of Browser Elevated Trust Application Overview Ian Griffiths discusses the implications of creating trusted out of browser application for the Event Manager sample application. He reviews why you might want to use elevated trust, what features is opens to you, and how to take advantage of them. Topics Ian covers include the dynamic keyword in C# 4, the AutomationFactory class, the API to check if you are in a trusted application, and communicating with Excel. Module 8 – Advanced Out of Browser and MEF Click here to visit this module. This hands-on lab walks through the creation of a trusted out of browser application and the new functionality that comes with that. You will learn to use COM Automation, handle the window closing event, set custom window chrome, digitally sign your Silverlight out of browser trusted application, create a silent install option, and take advantage of MEF. Link Download Source Download Lab Document Videos Module 8.1 – Custom Window Chrome for Elevated Trust Out of Browser Applications Ian Griffiths discusses how to replace the standard operating system window chrome with customized chrome for an elevated trusted out of browser application. He covers how it is important to handle close, resize, minimize, and maximize events. Ian mentions that the tooling was not ready when he shot this video, but the good news is that the tooling now supports setting the custom chrome directly from the property page for the Silverlight application. Module 8.2 – Window Closing Event for Out of Browser Applications Ian Griffiths discusses the WindowClosing event and how to handle and optionally cancel the event. Module 8.3 – Silent Install of Out of Browser Applications Ian Griffiths discusses how to use the SLLauncher executable to install an out of browser application. He discusses the optional command line switches that can be set including how the emulate switch can help you emulate the install process. Ian also shows how to setup a shortcut for the application and tell the application where it should look for future updates online. Module 8.4 – Digitally Signing Out of Browser Application Ian Griffiths discusses how and why to digitally sign an out of browser application using the signtool program. He covers what trusted certificates are, the implications of signing (or not signing), and the effect on the user experience. Module 8.5 – The Value of MEF with Silverlight Ian Griffiths discusses what MEF is, how your application can benefit from it, and the fundamental features it puts at your disposal. He covers the 3 step import, export and compose process as well as how to dynamically import XAP files using MEF. Summary As you can probably tell from the long list above – this series contains a ton of great content, and hopefully provides a nice end-to-end walkthrough that helps explain how to take advantage of Silverlight 4 (and all its new features).  Hope this helps, Scott

    Read the article

  • Building a database installer with WiX, datadude and Visual Studio 2010

    - by jamiet
    Today I have been using Windows Installer XML (WiX) to build an installer (.msi file) that would install a SQL Server database on a server of my choosing; the source code for that database lives in datadude (a tool which you may know by one of quite a few other names). The basis for this work was a most excellent blog post by Duke Kamstra entitled Implementing a WIX installer that calls the GDR version of VSDBCMD.EXE which coves the delicate intricacies of doing this – particularly how to call Vsdbcmd.exe in a CustomAction. Unfortunately there are a couple of things wrong with Duke’s post: Searching for “datadude wix” didn’t turn it up in the first page of search results and hence it took me a long time to find it. And I knew that it existed. If someone else were after a post on using WiX with datadude its likely that they would never have come across Duke’s post and that would be a great shame because its the definitive post on the matter. It was written in October 2009 and had not been updated for Visual Studio 2010. Well, this blog post is an attempt to solve those problems. Hopefully I’ve solved the first one just by following a few of my blogging SEO tips while writing this blog post, in the rest of it I will explain how I took Duke’s code and updated it to work in Visual Studio 2010. If you need to build a database installer using WiX, datadude and Visual Studio 2010 then you still need to follow Duke’s blog post so go and do that now. Below are the amendments that I made that enabled the project to get built in Visual Studio 2010: In VS2010 datadude’s output files have changed from being called Database.<suffix> to <ProjectName>_Database.<suffix>. Duke’s code was referencing the old file name formats. Duke used $(var.SolutionDir) and relative paths to point to datadude artefacts I have replaced these with Votive Project References http://wix.sourceforge.net/manual-wix3/votive_project_references.htm I commented out all references to MicrosoftSqlTypesDbschema in DatabaseArtifacts.wxi. I don't think this is produced in VS2010 (I may be wrong about that but it wasn't in the output from my project) Similarly I commented out component MicrosoftSqlTypesDbschema in VsdbcmdArtifacts.wxi. It wasn't where Duke's code said it should have been so am assuming/hoping it isn't needed. Duke's ?define block to work out appropriate SrcArchPath actually wasn't working for me (i.e. <?if $(var.Platform)=x64 ?> was evaluating to false)  so I just took out the conditional stuff and declared the path explicitly to the “Program Files (x86)” path. The old code is still there though if you need to put it back. None of the <RegistrySearch> stuff is needed for VS2010 - so I commented it all out! Changed to use /manifest option rather than /model option on vsdbcmd.exe command-line. Personal preference is all! Added a new component in order to bundle along the vsdbcmd.exe.config file Made the install of the Custom Action dependent on the relevant feature being selected for install. This one is actually really important – deselecting the database feature for installation does not, by default, stop the CustomAction from executing and so would cause an error - so that scenario needs to be catered for I have made my amended solution available for download at: http://cid-550f681dad532637.office.live.com/self.aspx/Public/BlogShare/20110210/InstallMyDatabase.zip It contains two projects: the WiX project and the datadude project that is the source to be deployed (for demo purposes it only contains one table). I have also made the .msi available although in order that it gets through file blockers I changed the name from InstallMyDatabase.msi to InstallMyDatabase.ms_ – simply rename the file back once you have downloaded it from: http://cid-550f681dad532637.office.live.com/self.aspx/Public/BlogShare/20110210/InstallMyDatabase.ms%5E_ .You can try it out for yourself – the only thing it does is dump the files into %Program Files%\MyDatabase and uses them to install a database onto a server of your choosing with a name of your choosing - no damaging side-affects. I will caveat this by saying “it works on my machine” and, not having access to a plethora of different machines, I haven’t tested it anywhere else. One potential issue that I know of is that Vsdbcmd.exe has a dependency on SQL Server CE although if you have SQL Server tools or Visual Studio installed you should be fine. Unfortunately its not possible to bundle along the SQL Server CE installer in the .msi because Windows will not allow you to call one installer from inside another – the recommended way to get around this problem is to build a bootstrapper to bundle the whole lot together but doing that is outside the scope of this blog post. If you discover any other issues then please let me know. Here are the screenshots from the installer: And once installed…. Hope this is useful! @jamiet 

    Read the article

  • Exam 70-480 Study Material: Programming in HTML5 with JavaScript and CSS3

    - by Stacy Vicknair
    Here’s a list of sources of information for the different elements that comprise the 70-480 exam: General Resources http://www.w3schools.com (As pointed out in David Pallmann’s blog some of this content is unverified, but it is a decent source of information. For more about when it isn’t decent, see http://www.w3fools.com ) http://www.bloggedbychris.com/2012/09/19/microsoft-exam-70-480-study-guide/ (A guy who did a lot of what I did already, sadly I found this halfway through finishing my resources list. This list is expertly put together so I would recommend checking it out.) http://davidpallmann.blogspot.com/2012/08/microsoft-certification-exam-70-480.html http://pluralsight.com/training/Courses (Yes, this isn’t free, but if you look at the course listing there is an entire section on HTML5, CSS3 and Javascript. You can always try the trial!)   Some of the links I put below will overlap with the other resources above, but I tried to find explanations that looked beneficial to me on links outside those already mentioned.   Test Breakdown Implement and Manipulate Document Structures and Objects (24%) Create the document structure. o This objective may include but is not limited to: structure the UI by using semantic markup, including for search engines and screen readers (Section, Article, Nav, Header, Footer, and Aside); create a layout container in HTML http://www.w3schools.com/html/html5_new_elements.asp   Write code that interacts with UI controls. o This objective may include but is not limited to: programmatically add and modify HTML elements; implement media controls; implement HTML5 canvas and SVG graphics http://www.w3schools.com/html/html5_canvas.asp http://www.w3schools.com/html/html5_svg.asp   Apply styling to HTML elements programmatically. o This objective may include but is not limited to: change the location of an element; apply a transform; show and hide elements   Implement HTML5 APIs. o This objective may include but is not limited to: implement storage APIs, AppCache API, and Geolocation API http://www.w3schools.com/html/html5_geolocation.asp http://www.w3schools.com/html/html5_webstorage.asp http://www.w3schools.com/html/html5_app_cache.asp   Establish the scope of objects and variables. o This objective may include but is not limited to: define the lifetime of variables; keep objects out of the global namespace; use the “this” keyword to reference an object that fired an event; scope variables locally and globally http://robertnyman.com/2008/10/09/explaining-javascript-scope-and-closures/ http://www.quirksmode.org/js/this.html   Create and implement objects and methods. o This objective may include but is not limited to: implement native objects; create custom objects and custom properties for native objects using prototypes and functions; inherit from an object; implement native methods and create custom methods http://www.javascriptkit.com/javatutors/object.shtml http://www.crockford.com/javascript/inheritance.html http://stackoverflow.com/questions/1635116/javascript-class-method-vs-class-prototype-method http://www.javascriptkit.com/javatutors/proto.shtml     Implement Program Flow (25%) Implement program flow. o This objective may include but is not limited to: iterate across collections and array items; manage program decisions by using switch statements, if/then, and operators; evaluate expressions http://www.javascriptkit.com/jsref/looping.shtml http://www.javascriptkit.com/javatutors/varshort.shtml http://www.javascriptkit.com/javatutors/switch.shtml   Raise and handle an event. o This objective may include but is not limited to: handle common events exposed by DOM (OnBlur, OnFocus, OnClick); declare and handle bubbled events; handle an event by using an anonymous function http://dev.w3.org/2006/webapi/DOM-Level-3-Events/html/DOM3-Events.html http://javascript.info/tutorial/bubbling-and-capturing   Implement exception handling. o This objective may include but is not limited to: set and respond to error codes; throw an exception; request for null checks; implement try-catch-finally blocks http://www.javascriptkit.com/javatutors/trycatch.shtml   Implement a callback. o This objective may include but is not limited to: receive messages from the HTML5 WebSocket API; use jQuery to make an AJAX call; wire up an event; implement a callback by using anonymous functions; handle the “this” pointer http://www.w3.org/TR/2011/WD-websockets-20110419/ http://www.html5rocks.com/en/tutorials/websockets/basics/ http://api.jquery.com/jQuery.ajax/   Create a web worker process. o This objective may include but is not limited to: start and stop a web worker; pass data to a web worker; configure timeouts and intervals on the web worker; register an event listener for the web worker; limitations of a web worker https://developer.mozilla.org/en-US/docs/DOM/Using_web_workers http://www.html5rocks.com/en/tutorials/workers/basics/   Access and Secure Data (26%) Validate user input by using HTML5 elements. o This objective may include but is not limited to: choose the appropriate controls based on requirements; implement HTML input types and content attributes (for example, required) to collect user input http://diveintohtml5.info/forms.html   Validate user input by using JavaScript. o This objective may include but is not limited to: evaluate a regular expression to validate the input format; validate that you are getting the right kind of data type by using built-in functions; prevent code injection http://www.regular-expressions.info/javascript.html http://msdn.microsoft.com/en-us/library/66ztdbe6(v=vs.94).aspx https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Operators/typeof http://blog.stackoverflow.com/2008/06/safe-html-and-xss/ http://stackoverflow.com/questions/942011/how-to-prevent-javascript-injection-attacks-within-user-generated-html   Consume data. o This objective may include but is not limited to: consume JSON and XML data; retrieve data by using web services; load data or get data from other sources by using XMLHTTPRequest http://www.erichynds.com/jquery/working-with-xml-jquery-and-javascript/ http://www.webdevstuff.com/86/javascript-xmlhttprequest-object.html http://www.json.org/ http://stackoverflow.com/questions/4935632/how-to-parse-json-in-javascript   Serialize, deserialize, and transmit data. o This objective may include but is not limited to: binary data; text data (JSON, XML); implement the jQuery serialize method; Form.Submit; parse data; send data by using XMLHTTPRequest; sanitize input by using URI/form encoding http://api.jquery.com/serialize/ http://www.javascript-coder.com/javascript-form/javascript-form-submit.phtml http://stackoverflow.com/questions/327685/is-there-a-way-to-read-binary-data-into-javascript https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/encodeURI     Use CSS3 in Applications (25%) Style HTML text properties. o This objective may include but is not limited to: apply styles to text appearance (color, bold, italics); apply styles to text font (WOFF and @font-face, size); apply styles to text alignment, spacing, and indentation; apply styles to text hyphenation; apply styles for a text drop shadow http://www.w3schools.com/css/css_text.asp http://www.w3schools.com/css/css_font.asp http://nicewebtype.com/notes/2009/10/30/how-to-use-css-font-face/ http://webdesign.about.com/od/beginningcss/p/aacss5text.htm http://www.w3.org/TR/css3-text/ http://www.css3.info/preview/box-shadow/   Style HTML box properties. o This objective may include but is not limited to: apply styles to alter appearance attributes (size, border and rounding border corners, outline, padding, margin); apply styles to alter graphic effects (transparency, opacity, background image, gradients, shadow, clipping); apply styles to establish and change an element’s position (static, relative, absolute, fixed) http://net.tutsplus.com/tutorials/html-css-techniques/10-css3-properties-you-need-to-be-familiar-with/ http://www.w3schools.com/css/css_image_transparency.asp http://www.w3schools.com/cssref/pr_background-image.asp http://ie.microsoft.com/testdrive/graphics/cssgradientbackgroundmaker/default.html http://www.w3.org/TR/CSS21/visufx.html http://www.barelyfitz.com/screencast/html-training/css/positioning/ http://davidwalsh.name/css-fixed-position   Create a flexible content layout. o This objective may include but is not limited to: implement a layout using a flexible box model; implement a layout using multi-column; implement a layout using position floating and exclusions; implement a layout using grid alignment; implement a layout using regions, grouping, and nesting http://www.html5rocks.com/en/tutorials/flexbox/quick/ http://www.css3.info/preview/multi-column-layout/ http://msdn.microsoft.com/en-us/library/ie/hh673558(v=vs.85).aspx http://dev.w3.org/csswg/css3-grid-layout/ http://dev.w3.org/csswg/css3-regions/   Create an animated and adaptive UI. o This objective may include but is not limited to: animate objects by applying CSS transitions; apply 3-D and 2-D transformations; adjust UI based on media queries (device adaptations for output formats, displays, and representations); hide or disable controls http://www.bloggedbychris.com/2012/09/19/microsoft-exam-70-480-study-guide/   Find elements by using CSS selectors and jQuery. o This objective may include but is not limited to: choose the correct selector to reference an element; define element, style, and attribute selectors; find elements by using pseudo-elements and pseudo-classes (for example, :before, :first-line, :first-letter, :target, :lang, :checked, :first-child) http://www.bloggedbychris.com/2012/09/19/microsoft-exam-70-480-study-guide/   Structure a CSS file by using CSS selectors. o This objective may include but is not limited to: reference elements correctly; implement inheritance; override inheritance by using !important; style an element based on pseudo-elements and pseudo-classes (for example, :before, :first-line, :first-letter, :target, :lang, :checked, :first-child) http://www.bloggedbychris.com/2012/09/19/microsoft-exam-70-480-study-guide/   Technorati Tags: 70-480,CSS3,HTML5,HTML,CSS,JavaScript,Certification

    Read the article

  • Oracle WebCenter Partner Program

    - by kellsey.ruppel
    In competitive marketplaces, your company needs to quickly respond to changes and new trends, in order to open opportunities and build long-term growth. Oracle has a variety of next-generation services, solutions and resources that will leverage the differentiators in your offerings. Name your partnering needs: Oracle has the answer. This week we’d like to focus on Partners and the value your organization can gain from working with the Oracle PartnerNetwork. The Oracle PartnerNetwork will empower your company with exceptional resources to distinguish your offerings from the competition, seize opportunities, and increase your sales. We’re happy to welcome Christine Kungl, and Brian Buzzell, from Oracle’s World Wide Alliances & Channels (WWA&C) WebCenter Partner Enablement team, as today’s guests on the Oracle WebCenter blog. Q: What is the Oracle PartnerNetwork (OPN)?A: Christine: Oracle’s PartnerNetwork (OPN) is a collaborative partnership which allows registered companies specific added value resources to help differentiate themselves from their competition. Through OPN programs it provides companies the ability to seize and target opportunities, educate and train their teams, and leverage unparalleled opportunity given Oracle’s large market footprint. OPN’s multi-level programs are targeted at different levels allowing companies to grow and evolve with Oracle based on their business needs.  As part of their OPN memberships partners are encouraged to become OPN Specialized allowing those partners additional differentiation in Oracle’s Partner Network Community.  Q: What is an OPN Specialization and what resources are available for Specialized Partners?A: Brian: Oracle wanted a better way for our partners to differentiate their special skills and expertise, as well a more effective way to communicate that difference to customers.  Oracle’s expanding product portfolio demanded that we be able to identify partners with significant product knowledge—those who had made an investment in Oracle and a continuing commitment to deliver Oracle solutions. And with more than 30,000 Oracle partners around the world, Oracle needed a way for our customers to choose the right partner for their business. So how did Oracle meet this need? With the new partner program:  Oracle PartnerNetwork (OPN) Specialized. In this new program, Oracle partners are: Specialized :  Differentiating themselves from the competition with expertise that set them apart Recognized:  Being acknowledged for investing in becoming Oracle experts in specialized areas. Preferred :  Connecting with potential customers who are seeking  value-added solutions for their business OPN Specialized provides all partners with educational opportunities, training, and tools specially designed to build competency and grow business.  Partners can serve their customers better through key resources:OPN Specialized Knowledge Zones – Located on the updated and enhanced OPN portal— provide a single point of entry for all education and training information for Oracle partners. Enablement 2.0 Resources —Enablement 2.0 helps Oracle partners build their competencies and skills through a variety of educational opportunities and expanded training choices. These resources include: Enablement 2.0 “Boot camps” provide three-tiered learning levels that help jump-start partner training The role-based training covers Oracle’s application and technology products and offers a combination of classroom lectures, hands-on lab exercises, and case studies. Enablement 2.0 Interactive guided learning paths (GLPs) with recommendations on how to achieve specialization Upgraded partner solution kits Enhanced, specialized business centers available 24/7 around the globe on the OPN portal OPN Competency Center—Tracking ProgressThe OPN Competency Center keeps track as a partner applies for and achieves specialization in selected areas. You start with an assessment that compares your organization’s current skills and experience with the requirements for specialization in the area you have chosen. The OPN Competency Center then provides a roadmap that itemizes the skills and the knowledge you need to earn specialized status. In summary, OPN Specialization not only includes key training resources but a way to track and show progression for your partner organization. Q: What is are the OPN Membership Levels and what are the benefits?A:  Christine: The base OPN membership levels are: Remarketer: At the Remarketer level, retailers can choose to resell select Oracle products with the backing of authorized, regionally located, value-added distributors (VADs). The Remarketer level has no fees and no partner agreement with Oracle, but does offer online training and sales tools through the OPN portal.Program Details: RemarketerSilver Level: The Silver level is for Oracle partners who are focused on reselling and developing business with products ordered through the Oracle 1-Click Ordering Program. The Silver level provides a cost-effective, yet scalable way for partners to start an OPN Specialized membership and offers a substantial set of benefits that lets partners increase their competitive positioning. Program Details: SilverGold Level: Gold-level partners have the ability to specialize, helping them grow their business and create differentiation in the marketplace. Oracle partners at the Gold level can develop, sell, or implement the full stack of Oracle solutions and can apply to resell Oracle Applications.Program Details: GoldPlatinum Level: The Platinum level is for Oracle partners who want the highest level of benefits and are committed to reaching a minimum of five specializations. Platinum partners are recognized for their expertise in a broad range of products and technology, and receive dedicated support from Oracle.Program Details: PlatinumIn addition we recently introduced a new level:Diamond Level: This level is the most prestigious level of OPN Specialized. It allows companies to differentiate further because of their focused depth and breadth of their expertise. Program Details: DiamondSo as you can see there are various levels cost effective ways that Partners can get assistance, differentiation through OPN membership. Q: What role does the Oracle's World Wide Alliances & Channels (WWA&C), Partner Enablement teams and the WebCenter Community play?  A: Brian: Oracle’s WWA&C teams are responsible for manage relationships, educating their teams, creating go-to-market solutions and fostering communities for Oracle partners worldwide.  The WebCenter Partner Enablement Middleware Team is tasked to create, manage and distribute Specialization resources for the WebCenter Partner community. Q: What WebCenter Specializations are currently available?A: Christine:  As of now here are the following WebCenter Specializations and their availability: Oracle WebCenter Portal Specialization (Oracle WebCenter Portal): Available NowThe Oracle WebCenter Specialization provides insight into the following products: WebCenter Services, WebCenter Spaces, and WebLogic Portal.Oracle WebCenter Specialized Partners can efficiently use Oracle WebCenter products to create social applications, enterprise portals, communities, composite applications, and Internet or intranet Web sites on a standards-based, service-oriented architecture (SOA). The suite combines the development of rich internet applications; a multi-channel portal framework; and a suite of horizontal WebCenter applications, which provide content, presence, and social networking capabilities to create a highly interactive user experience. Oracle WebCenter Content Specialization: Available NowThe Oracle WebCenter Content Specialization provides insight into the following products; Universal Content Management, WebCenter Records Management, WebCenter Imaging, WebCenter Distributed Capture, and WebCenter Capture.Oracle WebCenter Content Specialized Partners can efficiently build content-rich business applications, reuse content, and integrate hundreds of content services with other business applications. This allows our customers to decrease costs, automate processes, reduce resource bottlenecks, share content effectively, minimize the number of lost documents, and better manage risk. Oracle WebCenter Sites Specialization: Available Q1 2012Oracle WebCenter Sites is part of the broader Oracle WebCenter platform that provides organizations with a complete customer experience management solution.  Partners that align with the new Oracle WebCenter Sites platform allow their customers organizations to: Leverage customer information from all channels and systems Manage interactions across all channels Unify commerce, merchandising, marketing, and service across all channels Provide personalized, choreographed consumer journeys across all channels Integrate order orchestration, supply chain management and order fulfillment Q: What criteria does the Partner organization need to achieve Specialization? What about individual Sales, PreSales & Implementation Specialist/Technical consultants?A: Brian: Each Oracle WebCenter Specialization has unique Business Criteria that must be met in order to achieve that Specialization.  This includes a unique number of transactions (co-sell, re-sell, and referral), customer references and then unique number of specialists as part of a partner team (Sales, Pre-Sales, Implementation, and Support).   Each WebCenter Specialization provides training resources (GLPs, BootCamps, Assessments and Exams for individuals on a partner’s staff to fulfill those requirements.  That criterion can be found for each Specialization on the Specialize tab for each WebCenter Knowledge Zone.  Here are the sample criteria, recommended courses, exams for the WebCenter Portal Specialization: WebCenter Portal Specialization Criteria Q: Do you have any suggestions on the best way for partners to get started if they would like to know more?A: Christine:   The best way to start is for partners is look at their business and core Oracle team focus and then look to become specialized in one or more areas.  Once you have selected the Specializations that are right for your business, you need to follow the first 3 key steps described below. The fourth step outlines the additional process to follow if you meet the criteria to be Advanced Specialized. Note that Step 4 may not be done without first following Steps 1-3.1. Join the Knowledge Zone(s) where you want to achieve Specialized status Go to the Knowledge Zone lick on the "Why Partner" tab Click on the "Join Knowledge Zone" link 2. Meet the Specialization criteria - Define and implement plans in your organization to achieve the competency and business criteria targets of the Specialization. (Note: Worldwide OPN members at the Gold, Platinum, or Diamond level and their Associates at the Gold, Platinum, or Diamond level may count their collective resources to meet the business and competency criteria required for specialization in this area.) 3. Apply for Specialization – when you have met the business and competency criteria required, inform Oracle by completing the following steps: Click on the "Specialize" tab in the Knowledge Zone Click on the "Apply Now" button Complete the online application form Oracle will validate the information provided, and once approved, you will receive notification from Oracle of your awarded Specialized status. Need more information? Access our Step by Step Guide (PDF) 4. Apply for Advanced Specialization (Optional) – If your company has on staff 50 unique Certified Implementation Specialists in your company's approved Specialization's product set, let Oracle know by following these steps: Ensure that you have 50 or more unique individuals that are Certified Implementation Specialists in the specific Specialization awarded to your company If you are pooling resources from another Associate or Worldwide entity, ensure you know that company’s name and country Have your Oracle PRM Administrator complete the online Advanced Specialization Application Oracle will validate the information provided, and once approved, you will receive notification from Oracle of your awarded Advanced Specialized status. There are additional resources on OPN as well as the broader WebCenter Community: v\:* {behavior:url(#default#VML);} o\:* {behavior:url(#default#VML);} w\:* {behavior:url(#default#VML);} .shape {behavior:url(#default#VML);} Normal 0 false false false false EN-US X-NONE X-NONE /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:11.0pt; font-family:"Calibri","sans-serif"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:"Times New Roman"; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"Times New Roman"; mso-bidi-theme-font:minor-bidi;}

    Read the article

  • Performance considerations for common SQL queries

    - by Jim Giercyk
    Originally posted on: http://geekswithblogs.net/NibblesAndBits/archive/2013/10/16/performance-considerations-for-common-sql-queries.aspxSQL offers many different methods to produce the same results.  There is a never-ending debate between SQL developers as to the “best way” or the “most efficient way” to render a result set.  Sometimes these disputes even come to blows….well, I am a lover, not a fighter, so I decided to collect some data that will prove which way is the best and most efficient.  For the queries below, I downloaded the test database from SQLSkills:  http://www.sqlskills.com/sql-server-resources/sql-server-demos/.  There isn’t a lot of data, but enough to prove my point: dbo.member has 10,000 records, and dbo.payment has 15,554.  Our result set contains 6,706 records. The following queries produce an identical result set; the result set contains aggregate payment information for each member who has made more than 1 payment from the dbo.payment table and the first and last name of the member from the dbo.member table.   /*************/ /* Sub Query  */ /*************/ SELECT  a.[Member Number] ,         m.lastname ,         m.firstname ,         a.[Number Of Payments] ,         a.[Average Payment] ,         a.[Total Paid] FROM    ( SELECT    member_no 'Member Number' ,                     AVG(payment_amt) 'Average Payment' ,                     SUM(payment_amt) 'Total Paid' ,                     COUNT(Payment_No) 'Number Of Payments'           FROM      dbo.payment           GROUP BY  member_no           HAVING    COUNT(Payment_No) > 1         ) a         JOIN dbo.member m ON a.[Member Number] = m.member_no         /***************/ /* Cross Apply  */ /***************/ SELECT  ca.[Member Number] ,         m.lastname ,         m.firstname ,         ca.[Number Of Payments] ,         ca.[Average Payment] ,         ca.[Total Paid] FROM    dbo.member m         CROSS APPLY ( SELECT    member_no 'Member Number' ,                                 AVG(payment_amt) 'Average Payment' ,                                 SUM(payment_amt) 'Total Paid' ,                                 COUNT(Payment_No) 'Number Of Payments'                       FROM      dbo.payment                       WHERE     member_no = m.member_no                       GROUP BY  member_no                       HAVING    COUNT(Payment_No) > 1                     ) ca /********/                    /* CTEs  */ /********/ ; WITH    Payments           AS ( SELECT   member_no 'Member Number' ,                         AVG(payment_amt) 'Average Payment' ,                         SUM(payment_amt) 'Total Paid' ,                         COUNT(Payment_No) 'Number Of Payments'                FROM     dbo.payment                GROUP BY member_no                HAVING   COUNT(Payment_No) > 1              ),         MemberInfo           AS ( SELECT   p.[Member Number] ,                         m.lastname ,                         m.firstname ,                         p.[Number Of Payments] ,                         p.[Average Payment] ,                         p.[Total Paid]                FROM     dbo.member m                         JOIN Payments p ON m.member_no = p.[Member Number]              )     SELECT  *     FROM    MemberInfo /************************/ /* SELECT with Grouping   */ /************************/ SELECT  p.member_no 'Member Number' ,         m.lastname ,         m.firstname ,         COUNT(Payment_No) 'Number Of Payments' ,         AVG(payment_amt) 'Average Payment' ,         SUM(payment_amt) 'Total Paid' FROM    dbo.payment p         JOIN dbo.member m ON m.member_no = p.member_no GROUP BY p.member_no ,         m.lastname ,         m.firstname HAVING  COUNT(Payment_No) > 1   We can see what is going on in SQL’s brain by looking at the execution plan.  The Execution Plan will demonstrate which steps and in what order SQL executes those steps, and what percentage of batch time each query takes.  SO….if I execute all 4 of these queries in a single batch, I will get an idea of the relative time SQL takes to execute them, and how it renders the Execution Plan.  We can settle this once and for all.  Here is what SQL did with these queries:   Not only did the queries take the same amount of time to execute, SQL generated the same Execution Plan for each of them.  Everybody is right…..I guess we can all finally go to lunch together!  But wait a second, I may not be a fighter, but I AM an instigator.     Let’s see how a table variable stacks up.  Here is the code I executed: /********************/ /*  Table Variable  */ /********************/ DECLARE @AggregateTable TABLE     (       member_no INT ,       AveragePayment MONEY ,       TotalPaid MONEY ,       NumberOfPayments MONEY     ) INSERT  @AggregateTable         SELECT  member_no 'Member Number' ,                 AVG(payment_amt) 'Average Payment' ,                 SUM(payment_amt) 'Total Paid' ,                 COUNT(Payment_No) 'Number Of Payments'         FROM    dbo.payment         GROUP BY member_no         HAVING  COUNT(Payment_No) > 1   SELECT  at.member_no 'Member Number' ,         m.lastname ,         m.firstname ,         at.NumberOfPayments 'Number Of Payments' ,         at.AveragePayment 'Average Payment' ,         at.TotalPaid 'Total Paid' FROM    @AggregateTable at         JOIN dbo.member m ON m.member_no = at.member_no In the interest of keeping things in groupings of 4, I removed the last query from the previous batch and added the table variable query.  Here’s what I got:     Since we first insert into the table variable, then we read from it, the Execution Plan renders 2 steps.  BUT, the combination of the 2 steps is only 22% of the batch.  It is actually faster than the other methods even though it is treated as 2 separate queries in the Execution Plan.  The argument I often hear against Table Variables is that SQL only estimates 1 row for the table size in the Execution Plan.  While this is true, the estimate does not come in to play until you read from the table variable.  In this case, the table variable had 6,706 rows, but it still outperformed the other queries.  People argue that table variables should only be used for hash or lookup tables.  The fact is, you have control of what you put IN to the variable, so as long as you keep it within reason, these results suggest that a table variable is a viable alternative to sub-queries. If anyone does volume testing on this theory, I would be interested in the results.  My suspicion is that there is a breaking point where efficiency goes down the tubes immediately, and it would be interesting to see where the threshold is. Coding SQL is a matter of style.  If you’ve been around since they introduced DB2, you were probably taught a little differently than a recent computer science graduate.  If you have a company standard, I strongly recommend you follow it.    If you do not have a standard, generally speaking, there is no right or wrong answer when talking about the efficiency of these types of queries, and certainly no hard-and-fast rule.  Volume and infrastructure will dictate a lot when it comes to performance, so your results may vary in your environment.  Download the database and try it!

    Read the article

  • Unique Business Value vs. Unique IT

    - by barry.perkins
    When the age of computing started, technology was new, exciting, full of potential and had a long way to grow. Vendor architectures were proprietary, and limited in function at first, growing in capability and complexity over time. There were few if any "standards", let alone "open standards" and the concepts of "open systems", and "open architectures" were far in the future. Companies employed intelligent, talented and creative people to implement the best possible solutions for their company. At first, those solutions were "unique" to each company. As time progressed, standards emerged, companies shared knowledge, business capability supplied by technology grew, and companies continued to expand their use of technology. Taking advantage of change required companies to struggle through periodic "revolutionary" change cycles, struggling through costly changes that were fraught with risk, resulted in solutions with an increasingly shorter half-life, and frequently required altering existing business processes and retraining employees and partner businesses. The pace of technological invention and implementation grew at an ever increasing rate, making the "revolutionary" approach based upon "proprietary" or "closed" architectures or technologies no longer viable. Concurrent with the advancement of technology, the rate of change in business increased, leading us to the incredibly fast paced, highly charged, and competitive global economy that we have today, where the most successful companies are companies that are good at implementing, leveraging and exploiting change. Fast forward to today, a world where dramatic changes in business and technology happen continually, a world where "evolutionary" change is crucial. Companies can no longer afford to build "unique IT", nor can they afford regular intervals of "revolutionary" change, with the associated costs and risks. Human ingenuity was once again up to the task, turning technology into a platform supporting business through evolutionary change, by employing "open": open standards; open systems; open architectures; and open solutions. Employing "open", enables companies to implement systems based upon technology, capability and standards that will evolve over time, providing a solid platform upon which a company can drive business needs, requirements, functions, and processes down into the technology, rather than exposing technology to the business, allowing companies to focus on providing "unique business value" rather than "unique IT". The big question! Does moving from "older" technology that no longer meets the needs of today's business, to new "open" technology require yet another "revolutionary change"? A "revolutionary" change with a short half-life, camouflaging reality with great marketing? The answer is "perhaps". With the endless options available to choose from, it is entirely possible to implement a solution that may work well today, but in 5 years time will become yet another albatross for the company to bear. Some solutions may look good today, solving a budget challenge by reducing cost, or solving a specific tactical challenge, but result in highly complex environments, that may be difficult to manage and maintain and limit the future potential of your business. Put differently, some solutions might push today's challenge into the future, resulting in a more complex and expensive solution. There is no such thing as a "1 size fits all" IT solution for business. If all companies implemented business solutions based upon technology that required, or forced the same business processes across all businesses in an industry, it would be extremely difficult to show competitive advantage through "unique business value". It would be equally difficult to "evolve" to meet or exceed business needs and keep up with today's rapid pace of change. How does one ensure that they do not jump from one trap directly into another? Or to put it positively, there are solutions available today that can address these challenges and issues. How does one ensure that the buying decision of today will serve the business well for years into the future? Intelligent & Informed decisions - "buying right" In a previous blog entry, we discussed the value of linking tactical to strategic The key is driving the focus to what is best for your business, handling today's tactical issues while also aligning with a roadmap/strategy that is tightly aligned with your strategic business objectives. When considering the plethora of possible options that provide various approaches to solving today's complex business problems, it is extremely important to ensure that vendors supplying those options, focus on what is best for your business, supplying sufficient information, providing adequate answers to questions, addressing challenges, issues, concerns and objections honestly and openly, and focus on supplying solutions that are tailored for, and deliver the most business value possible for your business. Here are a few questions to consider relative to the proposed options that should help ensure that today's solution doesn't become tomorrow's problem. Do the proposed solutions: Solve the problem(s) you are trying to address? Provide a solid foundation upon which to grow/enhance your business? Provide tactical gains that align with and enable your strategic business goals/objectives? Provide an infrastructure that can be leveraged with subsequent projects? Solve problems for the business overall, the lines of business, or just IT? Simplify your current environment Provide the basis for business: Efficiency Agility Clarity governance, risk, compliance real time business visibility and trend analysis Does your IT staff have the knowledge/experience to successfully manage the proposed systems once they are deployed in production? Done well, you will be presented with options tailored to your business, that enable you to drive the "unique business value" necessary to help your business stand out from others, creating a distinct competitive advantage, delivering what your customers need, when they need it, so you can attract new customers, new business, and grow top line revenue, all at a cost that provides a strong Return on Investment/Return on Assets. The net result is growth with managed cost providing significantly improved profit margin and shareholder value.

    Read the article

  • AWS: setting up auto-scale for EC2 instances

    - by Elton Stoneman
    Originally posted on: http://geekswithblogs.net/EltonStoneman/archive/2013/10/16/aws-setting-up-auto-scale-for-ec2-instances.aspxWith Amazon Web Services, there’s no direct equivalent to Azure Worker Roles – no Elastic Beanstalk-style application for .NET background workers. But you can get the auto-scale part by configuring an auto-scaling group for your EC2 instance. This is a step-by-step guide, that shows you how to create the auto-scaling configuration, which for EC2 you need to do with the command line, and then link your scaling policies to CloudWatch alarms in the Web console. I’m using queue size as my metric for CloudWatch,  which is a good fit if your background workers are pulling messages from a queue and processing them.  If the queue is getting too big, the “high” alarm will fire and spin up a new instance to share the workload. If the queue is draining down, the “low” alarm will fire and shut down one of the instances. To start with, you need to manually set up your app in an EC2 VM, for a background worker that would mean hosting your code in a Windows Service (I always use Topshelf). If you’re dual-running Azure and AWS, then you can isolate your logic in one library, with a generic entry point that has Start() and Stop()  functions, so your Worker Role and Windows Service are essentially using the same code. When you have your instance set up with the Windows Service running automatically, and you’ve tested it starts up and works properly from a reboot, shut the machine down and take an image of the VM, using Create Image (EBS AMI) from the Web Console: When that completes, you’ll have your own AMI which you can use to spin up new instances, and you’re ready to create your auto-scaling group. You need to dip into the command-line tools for this, so follow this guide to set up the AWS autoscale command line tool. Now we’re ready to go. 1. Create a launch configuration This launch configuration tells AWS what to do when a new instance needs to be spun up. You create it with the as-create-launch-config command, which looks like this: as-create-launch-config sc-xyz-launcher # name of the launch config --image-id ami-7b9e9f12 # id of the AMI you extracted from your VM --region eu-west-1 # which region the new instance gets created in --instance-type t1.micro # size of the instance to create --group quicklaunch-1 #security group for the new instance 2. Create an auto-scaling group The auto-scaling group links to the launch config, and defines the overall configuration of the collection of instances: as-create-auto-scaling-group sc-xyz-asg # auto-scaling group name --region eu-west-1 # region to create in --launch-configuration sc-xyz-launcher # name of the launch config to invoke for new instances --min-size 1 # minimum number of nodes in the group --max-size 5 # maximum number of nodes in the group --default-cooldown 300 # period to wait (in seconds) after each scaling event, before checking if another scaling event is required --availability-zones eu-west-1a eu-west-1b eu-west-1c # which availability zones you want your instances to be allocated in – multiple entries means EC@ will use any of them 3. Create a scale-up policy The policy dictates what will happen in response to a scaling event being triggered from a “high” alarm being breached. It links to the auto-scaling group; this sample results in one additional node being spun up: as-put-scaling-policy scale-up-policy # policy name -g sc-psod-woker-asg # auto-scaling group the policy works with --adjustment 1 # size of the adjustment --region eu-west-1 # region --type ChangeInCapacity # type of adjustment, this specifies a fixed number of nodes, but you can use PercentChangeInCapacity to make an adjustment relative to the current number of nodes, e.g. increasing by 50% 4. Create a scale-down policy The policy dictates what will happen in response to a scaling event being triggered from a “low” alarm being breached. It links to the auto-scaling group; this sample results in one node from the group being taken offline: as-put-scaling-policy scale-down-policy -g sc-psod-woker-asg "--adjustment=-1" # in Windows, use double-quotes to surround a negative adjustment value –-type ChangeInCapacity --region eu-west-1 5. Create a “high” CloudWatch alarm We’re done with the command line now. In the Web Console, open up the CloudWatch view and create a new alarm. This alarm will monitor your metrics and invoke the scale-up policy from your auto-scaling group, when the group is working too hard. Configure your metric – this example will fire the alarm if there are more than 10 messages in my queue for over a minute: Then link the alarm to the scale-up policy in your group: 6. Create a “low” CloudWatch alarm The opposite of step 4, this alarm will trigger when the instances in your group don’t have enough work to do (e.g fewer than 2 messages in the queue for 1 minute), and will invoke the scale-down policy. And that’s it. You don’t need your original VM as the auto-scale group has a minimum number of nodes connected. You can test out the scaling by flexing your CloudWatch metric – in this example, filling up a queue from a  stub publisher – and watching AWS create new nodes as required, then stopping the publisher and watch AWS kill off the spare nodes.

    Read the article

  • CodePlex Daily Summary for Tuesday, April 13, 2010

    CodePlex Daily Summary for Tuesday, April 13, 2010New ProjectsChat Neo: Video chatDev-wow HappyFuwa: Silverlight + Asp.net + Ajax 实现的以北京奥运为题材,福娃在线聊天互动系统 登录系统后,你可以和线上的朋友即时互动,走动 聊天 动作等都会呈现给其他的在线用户Dynamic Configuration Manager: Dynamic Configuration Manager GameHelper: the project of myselfGeotron: Geotron is a C# geolocation library to resolve postcodes and addresses to co-ordinates, to assist developers in creating location-aware applications. InfoPath Forms Services 2010 Web Testing Toolkit: This project has the tools and information needed to write Visual Studio web tests for InfoPath Forms Services 2010.IronBrainFuck, SimpleBrainFuck: IronBrainFuck and SimpleBrainFuck makes it easier for BrainFuck programmers to develop BrainFuck-compatible programs. It's developed in C#.Runtime Intelligence API: The Runtime Intelligence API library and samples provided by PreEmptive Solutions.SilverVNC 1.0: This project is a Silverlight VNC Viewer. It requires Silverlight 4.0 and works in Out of Browser with full-trust.Snippet Creator: Yet another Visual Studio plugin for creating code snippets.Software Codex: Software Codex is a collection of projects developed in .net to provide a set of libraries and functionalities for developers. It is divided into m...TestCrm: Let go!Make our CrmVidCoder: VidCoder is a DVD ripping and video transcoding application. It uses HandBrake for the encoding engine, but has a revamped and easy to use UI writt...WPF Data Virtualization: Component for displaying and interacting a large data set in WPF application.WPF Gantt chart: Gantt chart control for WPFNew ReleasesAJAX Control Toolkit: 40412: AJAX Control Toolkit Release Notes - April 12th 2010 Release Version 40412April 12, 2010 release of the AJAX Control Toolkit. AJAX Control Toolkit...ASP.NET MVC | SCAFFOLD: ASP.NET MVC SCAFFOLD 1.0 PREVIEW: Primeiro release do ASP.NET MVC SCAFFOLD.Autenticar no OpenLDAP utilizando pGIna: LDAPAuth plugin: Release: DLL LDAPAuth Brief: pGina pluginBluetooth Radar: Version 1.8: Add position helper class to test whether a given point is on the interior of a circle. Random set of Devices on the radar + Zindex changes on Mous...Database Searcher: DB-Searcher Binaries v0.1: First beta version containing following features: Search exact database values via .NET DB-Provider Microsoft SQL MySQL .NET Connector (no .NET t...DBSourceTools: DBSourceTools_1.2.0.7: Release 1.2.0.7 Extended search engine from (pegas)'s patch. Fixed Script Data bug with reserved word (eripsni). Write Targets can now create targe...ESB Toolkit Extensions: Tellago SOA ESB Extenstions v0.4: Windows Installer file that installs Library on a BizTalk ESB 2.0 system. This Install automatically configures the esb.config to use the new compo...Fluent Assertions: Release 1.2: See this blog post for more details on this release: http://www.dennisdoomen.net/2010/04/fluent-assertions-12-has-been-released.htmlFNA Fractal Numerical Algorithm for a new encryption technology: FNA: This is a latest distribution ( 0.04 at the moment). Is a Perl package (.pm). More information on: http://search.cpan.org/~anak/Free Silverlight & WPF Chart Control - Visifire: Visifire SL and WPF Charts 3.0.6 Released!: Hi, Today we have released the final version of Visifire v3.0.6 which contains the following major features: * Zoom using interactive ZoomRec...HD-Trailers.NET Downloader: HD-Trailers.NET_Downloader_v.91_BETA: - added configuration option 'FeedAddress' to specify the URL of the RSS feed to consume - implemented fix for workitem4260: AddDate = false; will ...HobbyBrew Mobile: Beta 1: First public BetaHome Access Plus+: v3.2.6.1: v3.2.6.1 Fixed the wrong date in the iCal Generator Fixed the admin booking posting logging it as being booked by the admin Fixed the problem o...HTML Ruby: 6.21.1: Added back the space ruby text option More consistent ruby text positioning regardless to the page's stylesInfoPath Forms Services 2010 Web Testing Toolkit: IPFS 2010 Web Test Toolkit 20100412 for VS2008: The ExtractAndSubstituteDynamicInfoPathData web test plugin. To use it, simply add the plugin to your web test. It automatically recognizes the inf...IronPython: 2.6.1: Hello Python Community, We’re pleased to announce the final release of IronPython 2.6.1. This version of IronPython makes great strides in stabili...IronRuby: 1.0: IronRuby 1.0 is the first stable version of IronRuby, targeting Ruby 1.8.6 compatibility. For a high-level compatibility report solely based on Rub...METAR.NET Decoder: 0.3.x beta: First public release. Main of the application is working. Metar can be downloaded, decoded, updated and encoded back to metar string. Release incl...MiniTwitter: 1.11: MiniTwitter 1.11 更新内容 修正 設定ファイルを自動でバックアップして、破損したときは出来るだけ修正するように。 初回起動時にタイムラインを更新しようとすると落ちるバグを修正。MSBuild Mercurial Tasks: 1.0.1 Stable: Ready for Production release. This version integrates all the basic functionalities of Mercurial as defined in the Use Case 1.Multiplayer Quiz: Release 1_7_0_0: Latest Version Strongly recommended to use .NET 4.0 now that it is in RC It can be downloaded from hereMVC Foolproof Validation: Beta 0.9.3754: First Beta release. Addressed several bugs from alpha along with some considerable class refactoring. ModelAwareValidationAttribute will make creat...NodeXL: Network Overview, Discovery and Exploration for Excel: NodeXL Excel 2007 Template, version 1.0.1.121: The NodeXL Excel 2007 template displays a network graph using edge and vertex lists stored in an Excel 2007 workbook. What's NewThis version allow...Proxi [Proxy Interface]: Proxi Release 1.0.0.412: Proxi Release 1.0.0.412QueryUnit: QueryUnitPOC v. 0.0.0.8: This version add support for AreNotEqual, Greater, Less and fix some problems with "format" attribute used in conjunction with the string data type...Rainier: Trabalhos de orçamento empresarial: Estão disponíveis os arquivos de exemplo sobre o planejamento para orçamento empresarial. A resolução é referente aos exercícios explicados em sal...Runtime Intelligence API: Initial release: The initial release of the WCF contract & proxy assembly. AuthentiCode signed library.SharePoint Accelerators: Central Admin - Command Search: This web part allows you to search for a SharePoint 2010 Central Admin commands. This web part can come handy when you are demostrating SharePoint ...SharePoint Labs: SPLab5013A-FRA-Level100: SPLab5013A-FRA-Level100* This SharePoint Lab will teach you how to provision a computed site column that shows a customized view of an existing hid...SilverVNC 1.0: SilverVNC 1.0.3755.0: This download is the first release of the project published on www.silverlightplayground.org. For a detailed explanation please refer to http://www...Snippet Creator: SnippetCreator.Setup: This is the first and (I hope) final release.SQL Server Health & History (SQLH2): SQLH2 v.2.2.001: New Features Updated to use .Net 3.5 Job and Job history information implemented Last dif and log backup columns added Logical Disk implemented Dis...SQL Server Health & History (SQLH2): SQLH2PerfCollector v.2.1.003: Updated to run on .Net 3.5 Now installs to correct registry path on x64Star Trooper for XNA 2D Tutorial: Lesson one content: Here is Lesson one original content for the StarTrooper 2D XNA tutorial. The blog tutorial has now started over on http://xna-uk.net/blogs/darkgen...SysPad: SysPad 4.10.7.1: A folder management and scratchpad utility; especially useful in a business network setting that utilizes numerous, commonly used folders. The pro...TaskUnZip for SSIS: TaskUnZip for SSIS 1.1.0.0: Add: recursive compress. Add: filter option for exstract e compress file. (Tnx to: Kevin Wendler)TCP Wrapper: TCP Wrapper 1.0.0.3: Adding Client Accessor to CommingDataAvailableEventArgs ...UCD: Architecture: UCDArch 1.0: Production release of UCDArch 1.0 (for ASP.NET MVC 1.0). New Features including the ability to modify the NHibernate FlushMode, URL convention help...VCC: Latest build, v2.1.30412.0: Automatic drop of latest buildVidCoder: 0.1.0: First VidCoder beta release. It's missing a few features that will be added before release: Advanced x264 options In-GUI encode log Additiona...WatchersNET CKEditor™ Provider for DotNetNuke: CKEditor Provider 1.10.01: Whats New changesBrowser: Removed extra "\" sign from Current folder name selecting the root folder Browser: Fixed Folder Rendering Browser Fix...WPF Gantt chart: gantt: first, alpha version, of gantt chart for wpfxvanneste: Coverflow et thumbnail sharepoint: Code du coverflow silverlight du webcast sur les thumbnails sharepointMost Popular ProjectsWBFS ManagerRawrMicrosoft SQL Server Product Samples: DatabaseAJAX Control ToolkitSilverlight ToolkitWindows Presentation Foundation (WPF)ASP.NETMicrosoft SQL Server Community & SamplesFacebook Developer ToolkitPHPExcelMost Active ProjectsRawrnopCommerce. Open Source online shop e-commerce solution.AutoPocopatterns & practices – Enterprise LibraryShweet: SharePoint 2010 Team Messaging built with PexFarseer Physics EngineNB_Store - Free DotNetNuke Ecommerce Catalog ModuleIonics Isapi Rewrite FilterBlogEngine.NETBeanProxy

    Read the article

  • Oracle Fusion Middleware Innovation Award Winners 2012: ADF & Fusion Development

    - by Dana Singleterry
    Normal 0 false false false EN-US X-NONE X-NONE /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin-top:0in; mso-para-margin-right:0in; mso-para-margin-bottom:10.0pt; mso-para-margin-left:0in; line-height:115%; mso-pagination:widow-orphan; font-size:11.0pt; font-family:"Calibri","sans-serif"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:"Times New Roman"; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"Times New Roman"; mso-bidi-theme-font:minor-bidi;} Oracle Fusion Middleware Innovation Awards honor customers for their cutting-edge solutions using Oracle Fusion Middleware. Winners are selected based on the uniqueness of their business case, business benefits, level of impact relative to the size of the organization, complexity and magnitude of implementation, and the originality of architecture. The awards were presented during Oracle OpenWorld 2012 and following winners are for the category of ADF & Fusion Development. Micros – an OPN Platinum partner – has been working closely with Oracle product management teams in applying industry best practices in the development of their solutions. Their current application suite for the hospitality industry was built on Oracle Forms and the Oracle database running on MS Windows. The next generation of this suite is being developed and released in modules that are now based on Oracle FMW (including ADF) 11g technologies and Oracle Database 11g all running on Oracle Linux. The primary driver was that of modernization and hence the reason Oracle ADF was selected to provide a rich UI for business processes that could be served up through traditional methods or through mobile devices globally. SOA Suite & ADF allowed for loosely-coupled services that could evolve with the needs of the business. Micros's application innovations includes the use of business application portlets that have been published from ADF Faces Task Flows generated using WebCenter portlet libraries  & Oracle Metadata Services (MDS) with multi-layered customizations using Oracle WebCenter Composer. PCS (Marfin Egnatia Bank of Greece) – PCS Wealth Management is a WM Software Solution, which captures and automates the WM business processes allowing Service Providers to allocate enough time and effort into Customer Service and Investment Strategies, under Advisory or Execution-Only Services. The Product is built upon the latest Web Technologies and ensures Best Practices covering all functional expectations, meeting local regulatory requirements and discovering successful opportunities for the WM Customers' Portfolios. The new unified Wealth Management system offers an unparalleled User Interface taking full advantage of the user friendly ADF Faces Components to a great extent, all serving Private Banking purposes. The application offers a true Account Officer Cockpit with shallow navigation, one-click access to informed decisions and a perfect customer service. ADF Grids and Pivots, the Data Visualization Components, as well as the Calendar and Map Components are cleverly used to help the user eliminate the usage of Excel, Outlook and other systems. PCS's application is unique in the way it leverages the ADF Faces data visualization components to create a truly attractive and insightful dashboard for their application. PCS Wealth Management Demo Qualcomm – Qualcomm, a $17B per year company, designs and sells semiconductor products for wireless telecommunications, mobile and computing markets. In addition, Qualcomm companies provide various hardware and software products to facilitate the design, development and deployment of phones and the applications that run on them. Qualcomm’s challenge has been to not only develop and deploy new business system functions to keep pace with customer demand, but also to provide a customer collaboration capability that is sufficiently robust, easy to use, and flexible to meet emerging and future needs. Qualcomm has taken successful steps in building and deploying the customer engagement platform Ieveraging various Oracle technologies including Fusion Middleware (ADF, SOA, OBIEE) and their proven ERP foundation of EBS and 11g databases. The new platform delivers a more unified and “seamless” business solution with a consistent, modern “look and feel” all based on standard business processes which facilitate efficient collaboration with Qualcomm and its customers. The look and feel leverages ADF in innovative ways and includes hover over navigation, custom pagination components, and skinning. Qualcomm has exposed a services layer that provides significant functionality including order-to-ship, quote-to-order, customer on-boarding and contract validation. Qualcomm's creative designs leverage Oracle's SOA Suite to integrate with Oracle EBS and desperate applications to provide a rich user interface through the use use of Oracle ADF Faces Rich Client Components providing a self-service solution to their customers.

    Read the article

  • Create a Social Community of Trust Along With Your Federal Digital Services Governance

    - by TedMcLaughlan
    The Digital Services Governance Recommendations were recently released, supporting the US Federal Government's Digital Government Strategy Milestone Action #4.2 to establish agency-wide governance structures for developing and delivering digital services. Figure 1 - From: "Digital Services Governance Recommendations" While extremely important from a policy and procedure perspective within an Agency's information management and communications enterprise, these recommendations only very lightly reference perhaps the most important success enabler - the "Trusted Community" required for ultimate usefulness of the services delivered. By "ultimate usefulness", I mean the collection of public, transparent properties around government information and digital services that include social trust and validation, social reach, expert respect, and comparative, standard measures of relative value. In other words, do the digital services meet expectations of the public, social media ecosystem (people AND machines)? A rigid governance framework, controlling by rules, policies and roles the creation and dissemination of digital services may meet the expectations of direct end-users and most stakeholders - including the agency information stewards and security officers. All others who may share comments about the services, write about them, swap or review extracts, repackage, visualize or otherwise repurpose the output for use in entirely unanticipated, social ways - these "stakeholders" will not be governed, but may observe guidance generated by a "Trusted Community". As recognized members of the trusted community, these stakeholders may ultimately define the right scope and detail of governance that all other users might observe, promoting and refining the usefulness of the government product as the social ecosystem expects. So, as part of an agency-centric governance framework, it's advised that a flexible governance model be created for stewarding a "Community of Trust" around the digital services. The first steps follow the approach outlined in the Recommendations: Step 1: Gather a Core Team In addition to the roles and responsibilities described, perhaps a set of characteristics and responsibilities can be developed for the "Trusted Community Steward/Advocate" - i.e. a person or team who (a) are entirely cognizant of and respected within the external social media communities, and (b) are trusted both within the agency and outside as practical, responsible, non-partisan communicators of useful information. The may seem like a standard Agency PR/Outreach team role - but often an agency or stakeholder subject matter expert with a public, active social persona works even better. Step 2: Assess What You Have In addition to existing, agency or stakeholder decision-making bodies and assets, it's important to take a PR/Marketing view of the social ecosystem. How visible are the services across the social channels utilized by current or desired constituents of your agency? What's the online reputation of your agency and perhaps the service(s)? Is Search Engine Optimization (SEO) a facet of external communications/publishing lifecycles? Who are the public champions, instigators, value-adders for the digital services, or perhaps just influential "communicators" (i.e. with no stake in the game)? You're essentially assessing your market and social presence, and identifying the actors (including your own agency employees) in the existing community of trust. Step 3: Determine What You Want The evolving Community of Trust will most readily absorb, support and provide feedback regarding "Core Principles" (Element B of the "six essential elements of a digital services governance structure") shared by your Agency, and obviously play a large, though probably very unstructured part in Element D "Stakeholder Input and Participation". Plan for this, and seek input from the social media community with respect to performance metrics - these should be geared around the outcome and growth of the trusted communities actions. How big and active is this community? What's the influential reach of this community with respect to particular messaging or campaigns generated by the Agency? What's the referral rate TO your digital services, FROM channels owned or operated by members of this community? (this requires governance with respect to content generation inclusive of "markers" or "tags"). At this point, while your Agency proceeds with steps 4 ("Build/Validate the Governance Structure") and 5 ("Share, Review, Upgrade"), the Community of Trust might as well just get going, and start adding value and usefulness to the existing conversations, existing data services - loosely though directionally-stewarded by your trusted advocate(s). Why is this an "Enterprise Architecture" topic? Because it's increasingly apparent that a Public Service "Enterprise" is not wholly contained within Agency facilities, firewalls and job titles - it's also manifested in actual, perceived or representative forms outside the walls, on the social Internet. An Agency's EA model and resulting investments both facilitate and are impacted by the "Social Enterprise". At Oracle, we're very active both within our Enterprise and outside, helping foster social architectures that enable truly useful public services, digital or otherwise.

    Read the article

  • The Red Gate and .NET Reflector Debacle

    - by Rick Strahl
    About a month ago Red Gate – the company who owns the NET Reflector tool most .NET devs use at one point or another – decided to change their business model for Reflector and take the product from free to a fully paid for license model. As a bit of history: .NET Reflector was originally created by Lutz Roeder as a free community tool to inspect .NET assemblies. Using Reflector you can examine the types in an assembly, drill into type signatures and quickly disassemble code to see how a particular method works.  In case you’ve been living under a rock and you’ve never looked at Reflector, here’s what it looks like drilled into an assembly from disk with some disassembled source code showing: Note that you get tons of information about each element in the tree, and almost all related types and members are clickable both in the list and source view so it’s extremely easy to navigate and follow the code flow even in this static assembly only view. For many year’s Lutz kept the the tool up to date and added more features gradually improving an already amazing tool and making it better. Then about two and a half years ago Red Gate bought the tool from Lutz. A lot of ruckus and noise ensued in the community back then about what would happen with the tool and… for the most part very little did. Other than the incessant update notices with prominent Red Gate promo on them life with Reflector went on. The product didn’t die and and it didn’t go commercial or to a charge model. When .NET 4.0 came out it still continued to work mostly because the .NET feature set doesn’t drastically change how types behave.  Then a month back Red Gate started making noise about a new Version Version 7 which would be commercial. No more free version - and a shit storm broke out in the community. Now normally I’m not one to be critical of companies trying to make money from a product, much less for a product that’s as incredibly useful as Reflector. There isn’t day in .NET development that goes by for me where I don’t fire up Reflector. Whether it’s for examining the innards of the .NET Framework, checking out third party code, or verifying some of my own code and resources. Even more so recently I’ve been doing a lot of Interop work with a non-.NET application that needs to access .NET components and Reflector has been immensely valuable to me (and my clients) if figuring out exact type signatures required to calling .NET components in assemblies. In short Reflector is an invaluable tool to me. Ok, so what’s the problem? Why all the fuss? Certainly the $39 Red Gate is trying to charge isn’t going to kill any developer. If there’s any tool in .NET that’s worth $39 it’s Reflector, right? Right, but that’s not the problem here. The problem is how Red Gate went about moving the product to commercial which borders on the downright bizarre. It’s almost as if somebody in management wrote a slogan: “How can we piss off the .NET community in the most painful way we can?” And that it seems Red Gate has a utterly succeeded. People are rabid, and for once I think that this outrage isn’t exactly misplaced. Take a look at the message thread that Red Gate dedicated from a link off the download page. Not only is Version 7 going to be a paid commercial tool, but the older versions of Reflector won’t be available any longer. Not only that but older versions that are already in use also will continually try to update themselves to the new paid version – which when installed will then expire unless registered properly. There have also been reports of Version 6 installs shutting themselves down and failing to work if the update is refused (I haven’t seen that myself so not sure if that’s true). In other words Red Gate is trying to make damn sure they’re getting your money if you attempt to use Reflector. There’s a lot of temptation there. Think about the millions of .NET developers out there and all of them possibly upgrading – that’s a nice chunk of change that Red Gate’s sitting on. Even with all the community backlash these guys are probably making some bank right now just because people need to get life to move on. Red Gate also put up a Feedback link on the download page – which not surprisingly is chock full with hate mail condemning the move. Oddly there’s not a single response to any of those messages by the Red Gate folks except when it concerns license questions for the full version. It puzzles me what that link serves for other yet than another complete example of failure to understand how to handle customer relations. There’s no doubt that that all of this has caused some serious outrage in the community. The sad part though is that this could have been handled so much less arrogantly and without pissing off the entire community and causing so much ill-will. People are pissed off and I have no doubt that this negative publicity will show up in the sales numbers for their other products. I certainly hope so. Stupidity ought to be painful! Why do Companies do boneheaded stuff like this? Red Gate’s original decision to buy Reflector was hotly debated but at that the time most of what would happen was mostly speculation. But I thought it was a smart move for any company that is in need of spreading its marketing message and corporate image as a vendor in the .NET space. Where else do you get to flash your corporate logo to hordes of .NET developers on a regular basis?  Exploiting that marketing with some goodwill of providing a free tool breeds positive feedback that hopefully has a good effect on the company’s visibility and the products it sells. Instead Red Gate seems to have taken exactly the opposite tack of corporate bullying to try to make a quick buck – and in the process ruined any community goodwill that might have come from providing a service community for free while still getting valuable marketing. What’s so puzzling about this boneheaded escapade is that the company doesn’t need to resort to underhanded tactics like what they are trying with Reflector 7. The tools the company makes are very good. I personally use SQL Compare, Sql Data Compare and ANTS Profiler on a regular basis and all of these tools are essential in my toolbox. They certainly work much better than the tools that are in the box with Visual Studio. Chances are that if Reflector 7 added useful features I would have been more than happy to shell out my $39 to upgrade when the time is right. It’s Expensive to give away stuff for Free At the same time, this episode shows some of the big problems that come with ‘free’ tools. A lot of organizations are realizing that giving stuff away for free is actually quite expensive and the pay back is often very intangible if any at all. Those that rely on donations or other voluntary compensation find that they amount contributed is absolutely miniscule as to not matter at all. Yet at the same time I bet most of those clamouring the loudest on that Red Gate Reflector feedback page that Reflector won’t be free anymore probably have NEVER made a donation to any open source project or free tool ever. The expectation of Free these days is just too great – which is a shame I think. There’s a lot to be said for paid software and having somebody to hold to responsible to because you gave them some money. There’s an incentive –> payback –> responsibility model that seems to be missing from free software (not all of it, but a lot of it). While there certainly are plenty of bad apples in paid software as well, money tends to be a good motivator for people to continue working and improving products. Reasons for giving away stuff are many but often it’s a naïve desire to share things when things are simple. At first it might be no problem to volunteer time and effort but as products mature the fun goes out of it, and as the reality of product maintenance kicks in developers want to get something back for the time and effort they’re putting in doing non-glamorous work. It’s then when products die or languish and this is painful for all to watch. For Red Gate however, I think there was always a pretty good payback from the Reflector acquisition in terms of marketing: Visibility and possible positioning of their products although they seemed to have mostly ignored that option. On the other hand they started this off pretty badly even 2 and a half years back when they aquired Reflector from Lutz with the same arrogant attitude that is evident in the latest episode. You really gotta wonder what folks are thinking in management – the sad part is from advance emails that were circulating, they were fully aware of the shit storm they were inciting with this and I suspect they are banking on the sheer numbers of .NET developers to still make them a tidy chunk of change from upgrades… Alternatives are coming For me personally the single license isn’t a problem, but I actually have a tool that I sell (an interop Web Service proxy generation tool) to customers and one of the things I recommend to use with has been Reflector to view assembly information and to find which Interop classes to instantiate from the non-.NET environment. It’s been nice to use Reflector for this with its small footprint and zero-configuration installation. But now with V7 becoming a paid tool that option is not going to be available anymore. Luckily it looks like the .NET community is jumping to it and trying to fill the void. Amidst the Red Gate outrage a new library called ILSpy has sprung up and providing at least some of the core functionality of Reflector with an open source library. It looks promising going forward and I suspect there will be a lot more support and interest to support this project now that Reflector has gone over to the ‘dark side’…© Rick Strahl, West Wind Technologies, 2005-2011

    Read the article

  • Performing a clean database creation using msbuild

    - by Robert May
    So I’m taking a break from writing about other Agile stuff for a post. :)  I’m still going to get back to the other subjects, but this is fun too. Something I’ve done quite a bit of is MSBuild and CI work.  I’m experimenting with ways to improve what I’ve done in the past, particularly around database CI. Today, I developed a mechanism for starting from scratch with your database.  By scratch, I mean blowing away the existing database and creating it again from a single command line call.  I’m a firm believer that developers should be able to get to a known clean state at the database level with a single command and that they should be operating off of their own isolated database to improve productivity.  These scripts will help that. Here’s how I did it.  First, we have to disconnect users.  I did so using the help of a script from sql server central.  Note that I’m using sqlcmd variable replacement. -- kills all the users in a particular database -- dlhatheway/3M, 11-Jun-2000 declare @arg_dbname sysname declare @a_spid smallint declare @msg varchar(255) declare @a_dbid int set @arg_dbname = '$(DatabaseName)' select @a_dbid = sdb.dbid from master..sysdatabases sdb where sdb.name = @arg_dbname declare db_users insensitive cursor for select sp.spid from master..sysprocesses sp where sp.dbid = @a_dbid open db_users fetch next from db_users into @a_spid while @@fetch_status = 0 begin select @msg = 'kill '+convert(char(5),@a_spid) print @msg execute (@msg) fetch next from db_users into @a_spid end close db_users deallocate db_users GO Once all users are booted from the database, we can commence with recreating the database.  I generated the script that is used to create a database from SQL Server management studio, so I’m only going to show the bits that weren’t generated that are important.  There are a bunch of Alter Database statements that aren’t shown. First, I had to find the default location of the database files in the install, since they can be in many different locations.  I used Method 1 from a technet blog and then modified it a bit to do what I needed to do.  I ended up using dynamic SQL because for the life of me, I couldn’t get the “Filename” property to not return an error when I used anything besides a string.  I’m dropping the database first, if it exists.  Here’s the code:   IF EXISTS(SELECT 1 FROM [master].[sys].[databases] WHERE [name] = N'$(DatabaseName)') BEGIN drop database $(DatabaseName) END; go IF EXISTS(SELECT 1 FROM [master].[sys].[databases] WHERE [name] = 'zzTempDBForDefaultPath') BEGIN DROP DATABASE zzTempDBForDefaultPath END; -- Create temp database. Because no options are given, the default data and --- log path locations are used CREATE DATABASE zzTempDBForDefaultPath; DECLARE @Default_Data_Path VARCHAR(512), @Default_Log_Path VARCHAR(512); --Get the default data path SELECT @Default_Data_Path = ( SELECT LEFT(physical_name,LEN(physical_name)-CHARINDEX('\',REVERSE(physical_name))+1) FROM sys.master_files mf INNER JOIN sys.[databases] d ON mf.[database_id] = d.[database_id] WHERE d.[name] = 'zzTempDBForDefaultPath' AND type = 0); --Get the default Log path SELECT @Default_Log_Path = ( SELECT LEFT(physical_name,LEN(physical_name)-CHARINDEX('\',REVERSE(physical_name))+1) FROM sys.master_files mf INNER JOIN sys.[databases] d ON mf.[database_id] = d.[database_id] WHERE d.[name] = 'zzTempDBForDefaultPath' AND type = 1); --Clean up. IF EXISTS(SELECT 1 FROM [master].[sys].[databases] WHERE [name] = 'zzTempDBForDefaultPath') BEGIN DROP DATABASE zzTempDBForDefaultPath END; DECLARE @SQL nvarchar(max) SET @SQL= 'CREATE DATABASE $(DatabaseName) ON PRIMARY ( NAME = N''$(DatabaseName)'', FILENAME = N''' + @Default_Data_Path + N'$(DatabaseName)' + '.mdf' + ''', SIZE = 2048KB , FILEGROWTH = 1024KB ) LOG ON ( NAME = N''$(DatabaseName)Log'', FILENAME = N''' + @Default_Log_Path + N'$(DatabaseName)' + '.ldf' + ''', SIZE = 1024KB , FILEGROWTH = 10%) ' exec (@SQL) GO And with that, your database is created.  You can run these scripts on any server and on any database name.  To do that, I created an MSBuild script that looks like this: <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="4.0"> <PropertyGroup> <DatabaseName>MyDatabase</DatabaseName> <Server>localhost</Server> <SqlCmd>sqlcmd -v DatabaseName=$(DatabaseName) -S $(Server) -i </SqlCmd> <ScriptDirectory>.\Scripts</ScriptDirectory> </PropertyGroup> <Target Name ="Rebuild"> <ItemGroup> <ScriptFiles Include="$(ScriptDirectory)\*.sql"/> </ItemGroup> <Exec Command="$(SqlCmd) &quot;%(ScriptFiles.Identity)&quot;" ContinueOnError="false"/> </Target> </Project> Note that the Scripts directory is underneath the directory where I’m running the msbuild command and is relative to that directory.  Note also that the target is using batching to run each script in the scripts subdirectory, one after the other.  Each script is passed to the sqlcmd command line execution using the .Identity property on the itemgroup that is created.  This target file is saved in the file “Database.target”. To make this work, you’ll need msbuild in your path, and then run the following command: msbuild database.target /target:Rebuild Once you’ve got your virgin database setup, you’d then need to use a tool like dbdeploy.net to determine that it was a virgin database, build a change script based on the change scripts, and then you’d want another sqlcmd call to update the database with the appropriate scripts.  I’m doing that next, so I’ll post a blog update when I’ve got it working. Technorati Tags: MSBuild,Agile,CI,Database

    Read the article

  • NDepend 4 – First Steps

    - by Ricardo Peres
    Introduction Thanks to Patrick Smacchia I had the chance to test NDepend 4. I can only say: awesome! This will be the first of a series of posts on NDepend, where I will talk about my discoveries. Keep in mind that I am just starting to use it, so more experienced users may find these too basic, I just hope I don’t say anything foolish! I must say that I am in no way affiliated with NDepend and I never actually met Patrick. Installation No installation program – a curious decision, I’m not against it -, just unzip the files to a folder and run the executable. It will optionally register itself with Visual Studio 2008, 2010 and 11 as well as RedGate’s Reflector; also, it automatically looks for updates. NDepend can either be used as a stand-alone program (with or without a GUI) or from within Visual Studio or Reflector. Getting Started One thing that really pleases me is the Getting Started section of the stand-alone, with links to pages on NDepend’s web site, featuring detailed explanations, which usually include screenshots and small videos (<5 minutes). There’s also an How do I with hierarchical navigation that guides us to through the major features so that we can easily find what we want. Usage There are two basic ways to use NDepend: Analyze .NET solutions, projects or assemblies; Compare two versions of the same assembly. I have so far not used NDepend to compare assemblies, so I will first talk about the first option. After selecting a solution and some of its projects, it generates a single HTML page with an highly detailed report of the analysis it produced. This includes some metrics such as number of lines of code, IL instructions, comments, types, methods and properties, the calculation of the cyclomatic complexity, coupling and lots of others indicators, typically grouped by type, namespace and assembly. The HTML also includes some nice diagrams depicting assembly dependencies, type and method relative proportions (according to the number of IL instructions, I guess) and assembly analysis relating to abstractness and stability. Useful, I would say. Then there’s the rules; NDepend tests the target assemblies against a set of more than 120 rules, grouped in categories Code Quality, Object Oriented Design, Design, Architecture and Layering, Dead Code, Visibility, Naming Conventions, Source Files Organization and .NET Framework Usage. The full list can be configured on the application, and an explanation of each rule can be found on the web site. Rules can be validated, violated and violated in a critical manner, and the HTML will contain the violated rules, their queries – more on this later - and results. The HTML uses some nice JavaScript effects, which allow paging and sorting of tables, so its nice to use. Similar to the rules, there are some queries that display results for a number (about 200) questions grouped as Object Oriented Design, API Breaking Changes (for assembly version comparison), Code Diff Summary (also for version comparison) and Dead Code. The difference between queries and rules is that queries are not classified as passes, violated or critically violated, just present results. The queries and rules are expressed through CQLinq, which is a very powerful LINQ derivative specific to code analysis. All of the included rules and queries can be enabled or disabled and new ones can be added, with intellisense to help. Besides the HTML report file, the NDepend application can be used to explore all analysis results, compare different versions of analysis reports and to run custom queries. Comparison to Other Analysis Tools Unlike StyleCop, NDepend only works with assemblies, not source code, so you can’t expect it to be able to enforce brackets placement, for example. It is more similar to FxCop, but you don’t have the option to analyze at the IL level, that is, other that the number of IL instructions and the complexity. What’s Next In the next days I’ll continue my exploration with a real-life test case. References The NDepend web site is http://www.ndepend.com/. Patrick keeps an updated blog on http://codebetter.com/patricksmacchia/ and he regularly monitors StackOverflow for questions tagged NDepend, which you can find on http://stackoverflow.com/questions/tagged/ndepend. The default list of CQLinq rules, queries and statistics can be found at http://www.ndepend.com/DefaultRules/webframe.html. The syntax itself is described at http://www.ndepend.com/Doc_CQLinq_Syntax.aspx and its features at http://www.ndepend.com/Doc_CQLinq_Features.aspx.

    Read the article

  • How to UEFI install Ubuntu 12.10?

    - by Geezanansa
    Running a newer FM1 motherboard which is using an AMD 3870k APU with a new 1TB HDD. Following the advice in the motherboard manual and https://help.ubuntu.com/community/UEFI have now got to grub option screen for UEFI install. see http://imgur.com/VW5vz The dvd.iso being used is Ubuntu 12.10 desktop amd64 from ubuntu .com. The hdd has had a gpt partition table made for, by using gparted when in a live desktop session when booted in bios mode. (*edit/update: Although the old cd updates on running it is an old kernel and it did make a gpt but that version of gparted uses fdisk whereas gdisk is required to make gpt. Think am going to have to spend more time here http://www.dedoimedo.com/computers/gparted.html lol Using the gparted from 12.10 live session to make partitions; following the guidance regarding this at https://help.ubuntu.com/community/UEFI#Creating_an_EFI_partition, but can only boot to grub option screen http://imgur.com/VW5vz when 12.10 options to "try ubuntu" or "install ubuntu" are selected they give errors as described below*) but after making the gpt decided to leave it unformatted/unallocated space with the intention of using installer to set up partitions. update-originally but gparted now sees hdd as http://imgur.com/hFIvm as described above. *Booting live dvd in EFI mode gives "Secure Boot not installed" just before grub kernel option list with the option to "install ubuntu" but get "can not read cd/0" and "the kernel must be loaded first" errors; when that option is selected. Any pointers on how to get installer going for UEFI install would be good. Thanks in advance. update: Hopefully these screenshots can help better highlight where i am going wrong or if there is something else going on http://imgur.com/g30RB, http://imgur.com/VW5vz, http://imgur.com/31E0q, http://imgur.com/bnuaG, http://imgur.com/y4KGu, http://imgur.com/3u2QE, http://imgur.com/n9lN3, http://imgur.com/FEKvz, http://imgur.com/hFIvm, update: Thank you fernando garcia for pointing me in the right direction to start the process of elimantion. What i have done since asking question is a little home work starting here http://askubuntu.com/faq#bounty and here http://askubuntu.com/questions/how-to-ask. Looking at other similar questions was good fun and found this 12.10 UEFI Secure Boot install the most relative in helping getting ubuntu to uefi install on my system. In response to wolverine's question this article was referred to http://web.dodds.net/~vorlon/wiki/blog/SecureBoot_in_Ubuntu_12.10/ This article in the first sentence gives a link to http://www.ubuntu.com/download which is where i downloaded the 12.10 desktop amd64 .iso(and others) but have been unable to do a efi install of ubuntu on this system and as this is a new system have ended up just going with bios installer running which at least puts my mind at ease that i have not bricked my new mobo.(had to do a clrcmos and flash to latest bios version) So it possibly could be the bios settings or the bios version being used that is problem. To try and eliminate bios version i can not get to post screen in order to id bios version being used. Pressing tab to show post instead of logo and trying to pausebreak to catch post is proving difficult. If logo screen in bios is disabled just get black screen no post shown and pressing tab does not show post. Appreciate using appropriate bios settings and latest 12.10 release should simply get uefi installer running when selected from the grub list (nice graphic details in Identifying if computer boots the cd in efi mode section at https://help.ubuntu.com/community/UEFI#Identifying_if_the_computer_boots_the_CD_in_EFI_mode) And to confirm the hdd is booting in efi mode https://help.ubuntu.com/community/UEFI#Identifying_if_the_computer_boots_the_HDD_in_EFI_mode running the command [ -d /sys/firmware/efi ] && echo "EFI boot on HDD" || echo "Legacy boot on HDD" gave Legacy boot on HDD This is as expected because i allowed the bios installer (which was 12.04 desktop amd64 after trying 12.10 desktop amd64 in efi mode) to run to get a working installation. Which is not what was intended or wished for but wanted to get a working os to bench test new mobo i.e. prove it is working. There are other options as in installing other bootmanagers/loaders but do not wish to do so as shim should get grub2 going that is after secure boot has been signed.(Now got rough idea what should happen just it aint happening. Is it possible ahci drivers are required?) Will post boot info script url of the updated config/setup. The original question asked seems irrelevant to what is being said in this update but as the problem is not resolved will keep on trying efi installing! i.e the problem is same as when question asked just trying to update. Have tried to edit and update the best i can!

    Read the article

  • Day 6 - Game Menuing Woes and Future Screen Sneak Peeks

    - by dapostolov
    So, after my last post on Day 5 I dabbled with my game class design. I took the approach where each game objects is tightly coupled with a graphic. The good news is I got the menu working but not without some hard knocks and game growing pains. I'll explain later, but for now...here is a class diagram of my first stab at my class structure and some code...   Ok, there are few mistakes, however, I'm going to leave it as is for now... As you can see I created an inital abstract base class called GameSprite. This class when inherited will provide a simple virtual default draw method:        public virtual void DrawSprite(SpriteBatch spriteBatch)         {             spriteBatch.Draw(Sprite, Position, Color.White);         } The benefits of coding it this way allows me to inherit the class and utilise the method in the screen draw method...So regardless of what the graphic object type is it will now have the ability to render a static image on the screen. Example: public class MyStaticTreasureChest : GameSprite {} If you remember the window draw method from Day 3's post, we could use the above code as follows...         protected override void Draw(GameTime gameTime)         {             GraphicsDevice.Clear(Color.CornflowerBlue);             spriteBatch.Begin(SpriteBlendMode.AlphaBlend);             foreach(var gameSprite in ListOfGameObjects)            {                 gameSprite.DrawSprite(spriteBatch);            }             spriteBatch.End();             base.Draw(gameTime);         } I have to admit the GameSprite object is pretty plain as with its DrawSprite method... But ... we now have the ability to render 3 static menu items on the screen ... BORING! I want those menu items to do something exciting, which of course involves animation... So, let's have a peek at AnimatedGameSprite in the above game diagram. The idea with the AnimatedGameSprite is that it has an image to animate...such as ... characters, fireballs, and... menus! So after inheriting from GameSprite class, I added a few more options such as UpdateSprite...         public virtual void UpdateSprite(float elapsed)         {             _totalElapsed += elapsed;             if (_totalElapsed > _timePerFrame)             {                 _frame++;                 _frame = _frame % _framecount;                 _totalElapsed -= _timePerFrame;             }         }  And an overidden DrawSprite...         public override void DrawSprite(SpriteBatch spriteBatch)         {             int FrameWidth = Sprite.Width / _framecount;             Rectangle sourcerect = new Rectangle(FrameWidth * _frame, 0, FrameWidth, Sprite.Height);             spriteBatch.Draw(Sprite, Position, sourcerect, Color.White, _rotation, _origin, _scale, SpriteEffects.None, _depth);         } With these two methods...I can animate and image, all I had to do was add a few more lines to the screens Update Method (From Day 3), like such:             float elapsed = (float) gameTime.ElapsedGameTime.TotalSeconds;             foreach (var item in ListOfAnimatedGameObjects)             {                 item.UpdateSprite(elapsed);             } And voila! My images begin to animate in one spot, on the screen... Hmm, but how do I interact with the menu items using a mouse...well the mouse cursor was easy enough... this.IsMouseVisible = true; But, to have it "interact" with an image was a bit more tricky...I had to perform collision detection!             mouseStateCurrent = Mouse.GetState();             var uiEnabledSprites = (from s in menuItems                                    where s.IsEnabled                                    select s).ToList();             foreach (var item in uiEnabledSprites)             {                 var r = new Rectangle((int)item.Position.X, (int)item.Position.Y, item.Sprite.Width, item.Sprite.Height);                 item.MenuState = MenuState.Normal;                 if (r.Intersects(new Rectangle(mouseStateCurrent.X, mouseStateCurrent.Y, 0, 0)))                 {                     item.MenuState = MenuState.Hover;                     if (mouseStatePrevious.LeftButton == ButtonState.Pressed                         && mouseStateCurrent.LeftButton == ButtonState.Released)                     {                         item.MenuState = MenuState.Pressed;                     }                 }             }             mouseStatePrevious = mouseStateCurrent; So, basically, what it is doing above is iterating through all my interactive objects and detecting a rectangle collision and the object , plays the state animation (or static image).  Lessons Learned, Time Burned... So, I think I did well to start, but after I hammered out my prototype...well...things got sloppy and I began to realise some design flaws... At the time: I couldn't seem to figure out how to open another window, such as the character creation screen Input was not event based and it was bugging me My menu design relied heavily on mouse input and I couldn't use keyboard. Mouse input, is tightly bound with graphic rendering / positioning, so its logic will have to be in each scene. Menu animations would stop mid frame, then continue when the action occured again. This is bad, because...what if I had a sword sliding onthe screen? Then it would slide a quarter of the way, then stop due to another action, then render again mid-slide... it just looked sloppy. Menu, Solved!? To solve the above problems I did a little research and I found some great code in the XNA forums. The one worth mentioning was the GameStateManagementSample. With this sample, you can create a basic "text based" menu system which allows you to swap screens, popup screens, play the game, and quit....basic game state management... In my next post I'm going to dwelve a bit more into this code and adapt it with my code from this prototype. Text based menus just won't cut it for me, for now...however, I'm still going to stick with my animated menu item idea. A sneak peek using the Game State Management Sample...with no changes made... Cool Things to Mention: At work ... I tend to break out in random conversations every-so-often and I get talking about some of my challenges with this game (or some stupid observation about something... stupid) During one conversation I was discussing how I should animate my images; I explained that I knew I had to use the Update method provided, but I didn't know how (at the time) to render an image at an appropriate "pace" and how many frames to use, etc.. I also got thinking that if a machine rendered my images faster / slower, that was surely going to f-up my animations. To which a friend, Sheldon,  answered, surely the Draw method is like a camera taking a snapshot of a scene in time. Then it clicked...I understood the big picture of the game engine... After some research I discovered that the Draw method attempts to keep a framerate of 60 fps. From what I understand, the game engine will even leave out a few calls to the draw method if it begins to slow down. This is why we want to put our sprite updates in the update method. Then using a game timer (provided by the engine), we want to render the scene based on real time passed, not framerate. So even the engine renders at 20 fps, the animations will still animate at the same real time speed! Which brings up another point. Why 60 fps? I'm speculating that Microsoft capped it because LCD's dont' refresh faster than 60 fps? On another note, If the game engine knows its falling behind in rendering...then surely we can harness this to speed up our games. Maybe I can find some flag which tell me if the game is lagging, and what the current framerate is, etc...(instead of coding it like I did last time) Sheldon, suggested maybe I can render like WoW does, in prioritised layers...I think he's onto something, however I don't think I'll have that many graphics to worry about such a problem of graphic latency. We'll see. People to Mention: Well,as you are aware I hadn't posted in a couple days and I was surprised to see a few emails and messenger queries about my game progress (and some concern as to why I stopped). I want to thank everyone for their kind words of support and put everyone at ease by stating that I do intend on completing this project. Granted I only have a few hours each night, but, I'll do it. Thank you to Garth for mailing in my next screen! That was a nice surprise! The Sneek Peek you've been waiting for... Garth has also volunteered to render me some wizard images. He was a bit shocked when I asked for them in 2D animated strips. He said I was going backward (and that I have really bad Game Development Lingo). But, I advised Garth that I will use 3D images later...for now...2D images. Garth also had some great game design ideas to add on. I advised him that I will save his ideas and include them in the future design document (for the 3d version?). Lastly, my best friend Alek, is going to join me in developing this game. This was a project we started eons ago but never completed because of our careers. Now, priorities change and we have some spare time on our hands. Let's see what trouble Alek and I can get into! Tonight I'll be uploading my prototypes and base game to a source control for both of us to work off of. D.

    Read the article

  • Windows Azure ASP.NET MVC 2 Role with Silverlight

    - by GeekAgilistMercenary
    I was working through some scenarios recently with Azure and Silverlight.  I immediately decided a quick walk through for setting up a Silverlight Application running in an ASP.NET MVC 2 Application would be a cool project. This walk through I have Visual Studio 2010, Silverlight 4, and the Azure SDK all installed.  If you need to download any of those go get em? now. Launch Visual Studio 2010 and start a new project.  Click on the section for cloud templates as shown below. After you name the project, the dialog for what type of Windows Azure Cloud Service Role will display.  I selected ASP.NET MVC 2 Web Role, which adds the MvcWebRole1 Project to the Cloud Service Solution. Since I selected the ASP.NET MVC 2 Project type, it immediately prompts for a unit test project.  Because I just want to get everything running first, I will probably be unit testing the Silverlight and just using the MVC Project as a host for the Silverlight for now, and because I would prefer to just add the unit test project later, I am going to select no here. Once you've created the ASP.NET MVC 2 project to host the Silverlight, then create another new project.  Select the Silverlight section under the Installed Templates in the Add New Project dialog.  Then select Silverlight Application. The next dialog that comes up will inquire about using the existing ASP.NET MVC Application I just created, which I do want it to use that so I leave it checked.  The options section however I do not want to check RIA Web Services, do not want a test page added to the project, and I want Silverlight debugging enabled so I leave that checked.  Once those options are appropriately set, just click on OK and the Silverlight Project will be added to the overall solution. The next steps now are to get the Silverlight object appropriately embedded in the web page.  First open up the Site.Master file in the ASP.NET MVC 2 Project located under the Veiws/Shared/ location.  After you open the file review the content of the <header></header> section.  In that section add another <contentplaceholder></contentplaceholder> tag as shown in the code snippet below. <head runat="server"> <title> <asp:ContentPlaceHolder ID="TitleContent" runat="server" /> </title> <link href="../../Content/Site.css" rel="stylesheet" type="text/css" /> <asp:ContentPlaceHolder ID="HeaderContent" runat="server" /> </head> I usually put it toward the bottom of the header section.  It just seems the <title></title> should be on the top of the section and I like to keep it that way. Now open up the Index.aspx page under the ASP.NET MVC 2 Project located in the Views/Home/ directory.  When you open up that file add a <asp:Content><asp:Content> tag as shown in the next snippet. <asp:Content ID="Content1" ContentPlaceHolderID="TitleContent" runat="server"> Home Page </asp:Content>   <asp:Content ID=headerContent ContentPlaceHolderID=HeaderContent runat=server>   </asp:Content>   <asp:Content ID="Content2" ContentPlaceHolderID="MainContent" runat="server"> <h2><%= Html.Encode(ViewData["Message"]) %></h2> <p> To learn more about ASP.NET MVC visit <a href="http://asp.net/mvc" title="ASP.NET MVC Website">http://asp.net/mvc</a>. </p> </asp:Content> In that center tag, I am now going to add what is needed to appropriately embed the Silverlight object into the page.  The first thing I needed is a reference to the Silverlight.js file. <script type="text/javascript" src="Silverlight.js"></script> After that comes a bit of nitty gritty Javascript.  I create another tag (and for those in the know, this is exactly like the generated code that is dumped into the *.html page generated with any Silverlight Project if you select to "add a test page that references the application".  The complete Javascript is below. function onSilverlightError(sender, args) { var appSource = ""; if (sender != null && sender != 0) { appSource = sender.getHost().Source; }   var errorType = args.ErrorType; var iErrorCode = args.ErrorCode;   if (errorType == "ImageError" || errorType == "MediaError") { return; }   var errMsg = "Unhandled Error in Silverlight Application " + appSource + "\n";   errMsg += "Code: " + iErrorCode + " \n"; errMsg += "Category: " + errorType + " \n"; errMsg += "Message: " + args.ErrorMessage + " \n";   if (errorType == "ParserError") { errMsg += "File: " + args.xamlFile + " \n"; errMsg += "Line: " + args.lineNumber + " \n"; errMsg += "Position: " + args.charPosition + " \n"; } else if (errorType == "RuntimeError") { if (args.lineNumber != 0) { errMsg += "Line: " + args.lineNumber + " \n"; errMsg += "Position: " + args.charPosition + " \n"; } errMsg += "MethodName: " + args.methodName + " \n"; }   throw new Error(errMsg); } I literally, since it seems to work fine, just use what is populated in the automatically generated page.  After getting the appropriate Javascript into place I put the actual Silverlight Object Embed code into the HTML itself.  Just so I know the positioning and for final verification when running the application I insert the embed code just below the Index.aspx page message.  As shown below. <asp:Content ID="Content2" ContentPlaceHolderID="MainContent" runat="server"> <h2> <%= Html.Encode(ViewData["Message"]) %></h2> <p> To learn more about ASP.NET MVC visit <a href="http://asp.net/mvc" title="ASP.NET MVC Website"> http://asp.net/mvc</a>. </p> <div id="silverlightControlHost"> <object data="data:application/x-silverlight-2," type="application/x-silverlight-2" width="100%" height="100%"> <param name="source" value="ClientBin/CloudySilverlight.xap" /> <param name="onError" value="onSilverlightError" /> <param name="background" value="white" /> <param name="minRuntimeVersion" value="4.0.50401.0" /> <param name="autoUpgrade" value="true" /> <a href="http://go.microsoft.com/fwlink/?LinkID=149156&v=4.0.50401.0" style="text-decoration: none"> <img src="http://go.microsoft.com/fwlink/?LinkId=161376" alt="Get Microsoft Silverlight" style="border-style: none" /> </a> </object> <iframe id="_sl_historyFrame" style="visibility: hidden; height: 0px; width: 0px; border: 0px"></iframe> </div> </asp:Content> I then open up the Silverlight Project MainPage.xaml.  Just to make it visibly obvious that the Silverlight Application is running in the page, I added a button as shown below. <UserControl x:Class="CloudySilverlight.MainPage" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="d" d:DesignHeight="300" d:DesignWidth="400">   <Grid x:Name="LayoutRoot" Background="White"> <Button Content="Button" Height="23" HorizontalAlignment="Left" Margin="48,40,0,0" Name="button1" VerticalAlignment="Top" Width="75" Click="button1_Click" /> </Grid> </UserControl> Just for kicks, I added a message box that would popup, just to show executing functionality also. private void button1_Click(object sender, RoutedEventArgs e) { MessageBox.Show("It runs in the cloud!"); } I then executed the ASP.NET MVC 2 and could see the Silverlight Application in page.  With a quick click of the button, I got a message box.  Success! Now the next step is getting the ASP.NET MVC 2 Project and Silverlight published to the cloud.  As of Visual Studio 2010, Silverlight 4, and the latest Azure SDK, this is actually a ridiculously easy process. Navigate to the Azure Cloud Services web site. Once that is open go back in Visual Studio and right click on the cloud project and select publish. This will publish two files into a directory.  Copy that directory so you can easily paste it into the Azure Cloud Services web site.  You'll have to click on the application role in the cloud (I will have another blog entry soon about where, how, and best practices in the cloud). In the text boxes shown, select the application package file and the configuration file and place them in the appropriate text boxes.  This is the part were it comes in handy to have copied the directory path of the file location.  That way when you click on browser you can just paste that in, then hit enter.  The two files will be listed and you can select the appropriate file. Once that is done, name the service deployment.  Then click on publish.  After a minute or so you will see the following screen. Now click on run.  Once the MvcWebRole1 goes green (the little light symbol to the left of the status) click on the Web Site URL.  Be patient during this process too, it could take a minute or two.  The Silverlight application should again come up just like you ran it on your local machine. Once staging is up and running, click on the circular icon with two arrows to move staging to production.  Once you are done make sure the green light is again go for the production deploy, then click on the Web Site URL to verify the site is working.  At this point I had a successful development, staging, and production deployment. Thanks for reading, hope this was helpful.  I have more Windows Azure and other cloud related material coming, so stay tuned. Original Entry

    Read the article

  • Weird y offset when using custom frag shader (Cocos2d-x)

    - by Mister Guacamole
    I'm trying to mask a sprite so I wrote a simple fragment shader that renders only the pixels that are not hidden under another texture (the mask). The problem is that it seems my texture has its y-coordinate offset after passing through the shader. This is the init method of the sprite (GroundZone) I want to mask: bool GroundZone::initWithSize(Size size) { // [...] // Setup the mask of the sprite m_mask = RenderTexture::create(textureWidth, textureHeight); m_mask->retain(); m_mask->setKeepMatrix(true); Texture2D *maskTexture = m_mask->getSprite()->getTexture(); maskTexture->setAliasTexParameters(); // Disable linear interpolation on the mask // Load the custom frag shader with a default vert shader as the sprite’s program FileUtils *fileUtils = FileUtils::getInstance(); string vertexSource = ccPositionTextureA8Color_vert; string fragmentSource = fileUtils->getStringFromFile( fileUtils->fullPathForFilename("CustomShader_AlphaMask_frag.fsh")); GLProgram *shader = new GLProgram; shader->initWithByteArrays(vertexSource.c_str(), fragmentSource.c_str()); shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION); shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORDS); shader->link(); CHECK_GL_ERROR_DEBUG(); shader->updateUniforms(); CHECK_GL_ERROR_DEBUG(); int maskTexUniformLoc = shader->getUniformLocationForName("u_alphaMaskTexture"); shader->setUniformLocationWith1i(maskTexUniformLoc, 1); this->setShaderProgram(shader); shader->release(); // [...] } These are the custom drawing methods for actually drawing the mask over the sprite: You need to know that m_mask is modified externally by another class, the onDraw() method only render it. void GroundZone::draw(Renderer *renderer, const kmMat4 &transform, bool transformUpdated) { m_renderCommand.init(_globalZOrder); m_renderCommand.func = CC_CALLBACK_0(GroundZone::onDraw, this, transform, transformUpdated); renderer->addCommand(&m_renderCommand); Sprite::draw(renderer, transform, transformUpdated); } void GroundZone::onDraw(const kmMat4 &transform, bool transformUpdated) { GLProgram *shader = this->getShaderProgram(); shader->use(); glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, m_mask->getSprite()->getTexture()->getName()); glActiveTexture(GL_TEXTURE0); } Below is the method (located in another class, GroundLayer) that modify the mask by drawing a line from point start to point end. Both points are in Cocos2d coordinates (Point (0,0) is down-left). void GroundLayer::drawTunnel(Point start, Point end) { // To dig a line, we need first to get the texture of the zone we will be digging into. Then we get the // relative position of the start and end point in the zone's node space. Finally we use the custom shader to // draw a mask over the existing texture. for (auto it = _children.begin(); it != _children.end(); it++) { GroundZone *zone = static_cast<GroundZone *>(*it); Point nodeStart = zone->convertToNodeSpace(start); Point nodeEnd = zone->convertToNodeSpace(end); // Now that we have our two points converted to node space, it's easy to draw a mask that contains a line // going from the start point to the end point and that is then applied over the current texture. Size groundZoneSize = zone->getContentSize(); RenderTexture *rt = zone->getMask(); rt->begin(); { // Draw a line going from start and going to end in the texture, the line will act as a mask over the // existing texture DrawNode *line = DrawNode::create(); line->retain(); line->drawSegment(nodeStart, nodeEnd, 20, Color4F::RED); line->visit(); } rt->end(); } } Finally, here's the custom shader I wrote. #ifdef GL_ES precision mediump float; #endif varying vec2 v_texCoord; uniform sampler2D u_texture; uniform sampler2D u_alphaMaskTexture; void main() { float maskAlpha = texture2D(u_alphaMaskTexture, v_texCoord).a; float texAlpha = texture2D(u_texture, v_texCoord).a; float blendAlpha = (1.0 - maskAlpha) * texAlpha; // Show only where mask is invisible vec3 texColor = texture2D(u_texture, v_texCoord).rgb; gl_FragColor = vec4(texColor, blendAlpha); return; } I got a problem with the y coordinates. Indeed, it seems that once it has passed through my custom shader, the sprite's texture is not at the right place: Without custom shader (the sprite is the brown thing): With custom shader: What's going on here? Thanks :) EDIT It looks like after passing through the shader when I set the position of the sprite I set it in points, with (0,0) being in the top-right. Indeed, when I do sprite->setPosition(320, 480), the sprite is perfectly placed at the top of the screen.

    Read the article

  • iPack -The iOS Application Packager

    - by user13277780
    iOS applications are distributed in .ipa archive files. These files are regular zip files which contain application resources and executable-s. To protect them from unauthorized modifications and to provide identification of their sources, the content of the archives is signed. The signature is included in the application executable of an.ipa archive and protects the executable file itself and the associated resource files. Apple provides native Mac OS tools for signing iOS executable-s (which are actually generic Mach-O code signing tools), but these tools are not generally available on other platforms. To provide a multi-platform development environment for JavaFX based iOS applications, we ported iOS signing and packaging to Java and created a dedicated ipack tool for it. The iPack tool can be used as a last step of creating .ipa package on various operating systems. Prototype has been tested by creating a final distributable for JavaFX application that runs on iPad, all done on Windows 7. Source Code The source code of iPac tool is in OpenJFX project repository. You can find it in: <openjfx root>/rt/tools/ios/Maven/ipack To build the iPack tool use: rt/tools/ios/Maven/ipack$ mvn package After building, you can run the tool: java -jar <path to ipack.jar> <arguments>  Signing keystore The tool uses a java key store to read the signing certificate and the associated private key. To prepare such keystore users can use keytool from JDK. One possible scenario is to import an existing private key and the certificate from a key store used on Mac OS: To list the content of an existing key store and identify the source alias: keytool -list -keystore <src keystore>.p12 -storetype pkcs12 -storepass <src keystore password> To create Java key store and import the private key with its certificate to the keys store: keytool -importkeystore \ -destkeystore <dst keystore> -deststorepass <dst keystore password> \ -srckeystore <src keystore>.p12 -srcstorepass <src keystore password> -srcstoretype pkcs12 \ -srcalias <src alias> -destalias <dst alias> -destkeypass <dst key password> Another scenario would be to generate a private / public key pair directly in a Java key store and create a certificate request from it. After sending the request to Apple one can then import the certificate response back to the Java key store and complete the signing certificate entry. In both scenarios the resulting alias in the Java key store will contain only a single (leaf) certificate. This can be verified with the following command: keytool -list -v -keystore <ipack keystore> -storepass <keystore password> When looking at the Certificate chain length entry, the number next to it is 1. When an executable file is signed on Mac OS, the resulting signature (in CMS format) includes the whole certificate chain up to the Apple Root CA. The ipack tool includes only the chain which is stored under the alias specified on the command line. So to have the whole chain in the signature we need to replace the single certificate entry under the alias with the corresponding full certificate chain. To do that we need first to create the chain in a separate file. It is easy to create such chain when working with certificates in Base-64 encoded PEM format. A certificate chain can be created by concatenating PEM certificates, which should form the chain, into a single file. For iOS signing we need the following certificates in our chain: Apple Root CA Apple Worldwide Developer Relations CA Our signing leaf certificate To convert a certificate from the binary DER format (.der, .cer) to PEM format: keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert -file <certificate>.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert -rfc -file <certificate>.pem To export the signing certificate into PEM format: keytool -exportcert -keystore <ipack keystore> -storepass <keystore password> -alias <signing alias> -rfc -file SigningCert.pem After constructing a chain from AppleIncRootCertificate.pem, AppleWWDRCA.pem andSigningCert.pem, it can be imported back into the keystore with: keytool -importcert -noprompt -keystore <ipack keystore> -storepass <keystore password> -alias <signing alias> -keypass <key password> -file SigningCertChain.pem To summarize, the following example shows the full certificate chain replacement process: keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert1 -file AppleIncRootCertificate.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert1 -rfc -file AppleIncRootCertificate.pem keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert2 -file AppleWWDRCA.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert2 -rfc -file AppleWWDRCA.pem keytool -exportcert -keystore ipack.ks -storepass keystorepwd -alias mycert -rfc -file SigningCert.pem cat SigningCert.pem AppleWWDRCA.pem AppleIncRootCertificate.pem >SigningCertChain.pem keytool -importcert -noprompt -keystore ipack.ks -storepass keystorepwd -alias mycert -keypass keypwd -file SigningCertChain.pem keytool -list -v -keystore ipack.ks -storepass keystorepwd Usage When the ipack tool is started with no arguments it prints the following usage information: -appname MyApplication -appid com.myorg.MyApplication     Usage: ipack <archive> <signing opts> <application opts> [ <application opts> ... ] Signing options: -keystore <keystore> keystore to use for signing -storepass <password> keystore password -alias <alias> alias for the signing certificate chain and the associated private key -keypass <password> password for the private key Application options: -basedir <directory> base directory from which to derive relative paths -appdir <directory> directory with the application executable and resources -appname <file> name of the application executable -appid <id> application identifier Example: ipack MyApplication.ipa -keystore ipack.ks -storepass keystorepwd -alias mycert -keypass keypwd -basedir mysources/MyApplication/dist -appdir Payload/MyApplication.app -appname MyApplication -appid com.myorg.MyApplication    

    Read the article

  • Problems with opening CHM Help files from Network or Internet

    - by Rick Strahl
    As a publisher of a Help Creation tool called Html Help Help Builder, I’ve seen a lot of problems with help files that won't properly display actual topic content and displays an error message for topics instead. Here’s the scenario: You go ahead and happily build your fancy, schmanzy Help File for your application and deploy it to your customer. Or alternately you've created a help file and you let your customers download them off the Internet directly or in a zip file. The customer downloads the file, opens the zip file and copies the help file contained in the zip file to disk. She then opens the help file and finds the following unfortunate result:     The help file  comes up with all topics in the tree on the left, but a Navigation to the WebPage was cancelled or Operation Aborted error in the Help Viewer's content window whenever you try to open a topic. The CHM file obviously opened since the topic list is there, but the Help Viewer refuses to display the content. Looks like a broken help file, right? But it's not - it's merely a Windows security 'feature' that tries to be overly helpful in protecting you. The reason this happens is because files downloaded off the Internet - including ZIP files and CHM files contained in those zip files - are marked as as coming from the Internet and so can potentially be malicious, so do not get browsing rights on the local machine – they can’t access local Web content, which is exactly what help topics are. If you look at the URL of a help topic you see something like this:   mk:@MSITStore:C:\wwapps\wwIPStuff\wwipstuff.chm::/indexpage.htm which points at a special Microsoft Url Moniker that in turn points the CHM file and a relative path within that HTML help file. Try pasting a URL like this into Internet Explorer and you'll see the help topic pop up in your browser (along with a warning most likely). Although the URL looks weird this still equates to a call to the local computer zone, the same as if you had navigated to a local file in IE which by default is not allowed.  Unfortunately, unlike Internet Explorer where you have the option of clicking a security toolbar, the CHM viewer simply refuses to load the page and you get an error page as shown above. How to Fix This - Unblock the Help File There's a workaround that lets you explicitly 'unblock' a CHM help file. To do this: Open Windows Explorer Find your CHM file Right click and select Properties Click the Unblock button on the General tab Here's what the dialog looks like:   Clicking the Unblock button basically, tells Windows that you approve this Help File and allows topics to be viewed.   Is this insecure? Not unless you're running a really old Version of Windows (XP pre-SP1). In recent versions of Windows Internet Explorer pops up various security dialogs or fires script errors when potentially malicious operations are accessed (like loading Active Controls), so it's relatively safe to run local content in the CHM viewer. Since most help files don't contain script or only load script that runs pure JavaScript access web resources this works fine without issues. How to avoid this Problem As an application developer there's a simple solution around this problem: Always install your Help Files with an Installer. The above security warning pop up because Windows can't validate the source of the CHM file. However, if the help file is installed as part of an installation the installation and all files associated with that installation including the help file are trusted. A fully installed Help File of an application works just fine because it is trusted by Windows. Summary It's annoying as all hell that this sort of obtrusive marking is necessary, but it's admittedly a necessary evil because of Microsoft's use of the insecure Internet Explorer engine that drives the CHM Html Engine's topic viewer. Because help files are viewing local content and script is allowed to execute in CHM files there's potential for malicious code hiding in CHM files and the above precautions are supposed to avoid any issues. © Rick Strahl, West Wind Technologies, 2005-2012 Tweet !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs"); (function() { var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = 'https://apis.google.com/js/plusone.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s); })();

    Read the article

  • CodePlex Daily Summary for Saturday, November 27, 2010

    CodePlex Daily Summary for Saturday, November 27, 2010Popular ReleasesMahTweets for Windows Phone: Nightly 69: Latest nightly for build 69XamlQuery/WPF - The Write Less, Do More, WPF Library: XamlQuery-WPF v1.2 (Runtime, Source): This is the first release of popular XamlQuery library for WPF. XamlQuery has already gained recognition among Silverlight developers.Math.NET Numerics: Beta 1: First beta of Math.NET Numerics. Only contains the managed linear algebra provider. Beta 2 will include the native linear algebra providers along with better documentation and examples.WatchersNET.SiteMap: WatchersNET.SiteMap 01.03.02: Whats NewNew Tax Filter, You can now select which Terms you want to Use.Minecraft GPS: Minecraft GPS 1.1: 1.1 Release New Features Compass! New style. Set opacity on main window to allow overlay of Minecraft.Microsoft All-In-One Code Framework: Visual Studio 2010 Code Samples 2010-11-25: Code samples for Visual Studio 2010Typps (formerly jiffycms) wysiwyg rich text HTML editor for ASP.NET AJAX: Typps 2.9: -When uploading files (not images), through the file uploader and the multi-file uploader, FileUploaded and MultiFileUploaded event handlers were reporting an empty event argument, this is fixed now. -Fixed also url field not updating when uploading a file ( not image)Wii Backup Fusion: Wii Backup Fusion 0.8.5 Beta: - WBFS repair (default) options fixed - Transfer to image fixed - Settings ui widget names fixed - Some little bug fixes You need to reset the settings! Delete WiiBaFu's config file or registry entries on windows: Linux: ~/.config/WiiBaFu/wiibafu.conf Windows: HKEY_CURRENT_USER\Software\WiiBaFu\wiibafu Mac OS X: ~/Library/Preferences/com.wiibafu.wiibafu.plist Caution: This is a BETA version! Errors, crashes and data loss not impossible! Use in test environments only, not on productive syste...Minemapper: Minemapper v0.1.3: Added process count and world size calculation progress to the status bar. Added View->'Status Bar' menu item to show/hide the status bar. Status bar is automatically shown when loading a world. Added a prompt, when loading a world, to use or clear cached images.SQL Monitor: SQL Monitor 1.4: 1.added automatically load sql server instances 2.added friendly wait cursor 3.fixed problem with 4.0 fx 4.added exception handlingSexy Select: sexy select v0.4: Changes in v0.4 Added method : elements. This returns all the option elements that are currently added to the select list Added method : selectOption. This method accepts two values, the element to be modified and the selected state. (true/false)Deep Zoom for WPF: First Release: This first release of the Deep Zoom control has the same source code, binaries and demos as the CodeProject article (http://www.codeproject.com/KB/WPF/DeepZoom.aspx).Simple Service Locator: Simple Service Locator v0.12: The Simple Service Locator is an easy-to-use Inversion of Control library that is a complete implementation of the Common Service Locator interface. It solely supports code-based configuration and is an ideal starting point for developers unfamiliar with larger IoC / DI libraries New features in this release Collections that are registered using RegisterAll<T> can now be injected using automatic constructor injection. A new RegisterAll<T>(params T[]) method overload is added that allows ea...BlogEngine.NET: BlogEngine.NET 2.0 RC: This is a Release Candidate version for BlogEngine.NET 2.0. The most current, stable version of BlogEngine.NET is version 1.6. Find out more about the BlogEngine.NET 2.0 RC here. If you want to extend or modify BlogEngine.NET, you should download the source code. To get started, be sure to check out our installation documentation and the installation screencast. If you are upgrading from a previous version, please take a look at the Upgrading to BlogEngine.NET 2.0 instructions. As this ...NodeXL: Network Overview, Discovery and Exploration for Excel: NodeXL Excel Template, version 1.0.1.156: The NodeXL Excel template displays a network graph using edge and vertex lists stored in an Excel 2007 or Excel 2010 workbook. What's NewThis release adds a feature for aggregating the overall metrics in a folder full of NodeXL workbooks, adds geographical coordinates to the Twitter import features, and fixes a memory-related bug. See the Complete NodeXL Release History for details. Please Note: There is a new option in the setup program to install for "Just Me" or "Everyone." Most people...VFPX: FoxBarcode v.0.11: FoxBarcode v.0.11 - Released 2010.11.22 FoxBarcode is a 100% Visual FoxPro class that provides a tool for generating images with different bar code symbologies to be used in VFP forms and reports, or exported to other applications. Its use and distribution is free for all Visual FoxPro Community. Whats is new? Added a third parameter to the BarcodeImage() method Fixed some minor bugs History FoxBarcode v.0.10 - Released 2010.11.19 - 85 Downloads Project page: FoxBarcodeDotNetAge -a lightweight Mvc jQuery CMS: DotNetAge 1.1.0.5: What is new in DotNetAge 1.1.0.5 ?Document Library features and template added. Resolve issues of templates Improving publishing service performance Opml support added. What is new in DotNetAge 1.1 ? D.N.A Core updatesImprove runtime performance , more stabilize. The DNA core objects model added. Personalization features added that allows users create the personal website, manage their resources, store personal data DynamicUIFixed the PageManager could not move page node bug. ...ASP.NET MVC Project Awesome (jQuery Ajax helpers): 1.3.1 and demos: A rich set of helpers (controls) that you can use to build highly responsive and interactive Ajax-enabled Web applications. These helpers include Autocomplete, AjaxDropdown, Lookup, Confirm Dialog, Popup Form and Pager tested on mozilla, safari, chrome, opera, ie 9b/8/7/6MDownloader: MDownloader-0.15.24.6966: Fixed Updater; Fixed minor bugs;WPF Application Framework (WAF): WPF Application Framework (WAF) 2.0.0.1: Version: 2.0.0.1 (Milestone 1): This release contains the source code of the WPF Application Framework (WAF) and the sample applications. Requirements .NET Framework 4.0 (The package contains a solution file for Visual Studio 2010) The unit test projects require Visual Studio 2010 Professional Remark The sample applications are using Microsoft’s IoC container MEF. However, the WPF Application Framework (WAF) doesn’t force you to use the same IoC container in your application. You can use ...New ProjectsCommunity Megaphone for Windows Phone: The Official CommunityMegaphone application for Windows Phone. Releases are in XAP format, allowing you to upload the compiled application into a device using the Windows Phone Application Deployment tool. Download the source code to learn how to build Windows Phone applications.Group positioning GPS: this is a project for a subject in the university geographic information systems, its about a group of gadjets that can know the position of the other devices in the same group.IIS HTTPS Binder: On IIS 7 that you can not create more than one HTTPS binding, even though you have more then one SSL certificate and you need HTTPS binding on different hosted websites. If you have one IP address you can bind only one SSL to chosen website. This small application can fix this.Inspired Faith Now Playing: A ClickOnce-deployed desktop application that sits in your application tray and notifies you when any of your favorite Messianic Jewish and Christian artists or songs play on the Inspired Faith online radio station (http://inspiredfaith.org)MA Manager: The goal of this software is to help manage a martial arts school through multiple clients and platforms. This allows instructors to easily track students progress overtime.MathModels: This project contains commonly used algorithm implementations in C# .netMyFlickr API: MyFlickr API is a library for developers allows them to call Flickr API from any .Net Application.NerdOnRails.DynamicProxy: a small dynamic-proxy implementation using the new dynamic object that was introduced with .Net 4.NerdOnRails.Injector: a small dependency injection framework for .netNHyperV: NHyperV is a C# Hyper-V programming model.Power Scheme Switcher: This is a very simple utility that exposes an icon in the system tray and allows you to quickly change the Power Plan Scheme from there. It is developed in c# with Vs 2010(WinForm)rcforms: Custom sharepoint forms exampleRobo Commander: This is a Lego NXT commander developed under Visual Studio. This console will have basic commands and uses the NXT++ libraries. Shared Genomics Project - Workbench Codebase: The Shared Genomics workbench enables a diverse user group of researchers to explore the associations between genetic and other factors in their datasets. It provides a graphical user interface to the analysis functions published in a sister Codeplex project i.e. MPI Codebase.SharePoint 2010 PowerShell v2 reference: This project hosts a complete PowerShell v2 reference for SharePoint 2010.Social PowerFlow: Experiments with PowerWF and the social network - integrating Windows Powershell and Windows Workflow Foundation with Facebook and TwitterTweetSayings: Twitter SayingsXamlQuery/WPF - The Write Less, Do More, WPF Library: XamlQuery/WPF is a lightweight yet powerful library that enables rapid development in WPF. It simplifies several tasks like page/window traversing; finding controls by name, type, style, property value or position in control tree; event handling; animating and much more.XmlDSigEx XML Digital Signature Library: The XmlDSigEx library is an alternative to using the SignedXml classes in the .Net Framework. It addresses some of the shortcomings of the standard types particularly in regards to canonicalisation and the enveloped transform. It is currently under development.Yoi's Online Project: This is online storage for Yovi's project?????? ???? ???: ?????? ???? ?? ????? ??? ?????? ? ??????? ?? ???? ??? ????

    Read the article

  • Silverlight Cream Monday WP7 App Review # 1

    - by Dave Campbell
    I'm going to try something here... if it seems useful, I'll continue, if it doesn't, I'll stop... so give me feedback! There are *lots* of Apps in the WP7 Marketplace, and heaven help me, but the Marketplace sucks for finding stuff. I won't rehash what's already been said in the blogs, but I agree with one and all. I went out last Saturday to find 2 apps that I knew were released, and couldn't do so on my device. Even in the Zune app, it took quite a while to find them... ok, I'll back off a bit, because I just found out I can do 'Search' now if I know the name... I didn't think that was working before. So my thought is on Monday (like today), I will post a review of 5 apps/games I either use or have played with on my device. These are strictly my opinions, you understand, but hey... it's better than a poke in the eye with an iPhone! A few disclaimers:     Feel free to write me about your app and tell me about it. While it would be very cool to receive a whole bunch of xap files to review, at this point, for technical reasons, I'm unable to side-load my device. Since I plan on only doing this one day a week, and only 5, I may never get caught up, so if you send me some info, be patient. Re: games ... remember I'm old... I'm from the era of Colossal Cave and Zork. Duke-Nukem 2D and Captain Comic were awesome. I don't own an XBOX or any other game system, so take game reviews from my perspective -- who knows, it may be refreshing :) I won't pay for an app or game just to try it. If you expect me to test-drive your app, it's going to have to have a Free Trial. In this Issue:   Jingo! is the first app I bought, just to see what the experience was like. It's very much like a game we used to play in school in the Army in 1971 on paper we passed around. Sort of a cross between hangman and Mastermind, you try to figure out the hidden word in 5 tries. You get really good at 5-letter words after a while. I like this because you have to think, and you're not pressured by a clock Jingo! is by James Furdell and is $1.99 I reviewed René Schulte's Pictures Lab a while back, and have not changed my mind. This is an excellent app for playing with any photo on your device... one you've just taken, one you've synced from your PC, or one you've saved from email. I like this because you can get some cool effects for your photos, and it just works. Pictures Lab is by Schulte Software Development and is $1.99 Since I work as a consultant, and from home, I wanted something I could track my time with. I've test-driven all the contenders I could find so far on the phone, and so far I like ONTRACK! the best. If asked, I have some suggestions, but it's probably just the way I work or think. What I do like is I can tap a project to start/stop/restart a counter, and at the end of the day it shows me how much time I've been working. If there's a way to make an adjustment in case you forget to tap the counter, I don't know how to do it, and that's my biggest complaint. I like this because you can get a daily readout which you can also email as a spreadsheet. The daily results display is very good. ONTRACK! is by Qmino and is $2.99 Remember Item 4 above... I've been playing guitar for 48 years... obviously since before the invention of 'tuners', so I'm not as dependent upon these as some folks are. I've tried some in the past and have always felt I can do just as good by ear (I have perfect relative pitch). So, I gave this app by András Velvárt a dance just to see how it works, and it is surprisingly good. If you're used to one of the stage tuners this may take a little getting used to, but it does the job. The difference with this one is there is no real 'null' point inside which you can think your guitar is in tune. The soundwave stays visible on the device, and if it's moving to the right, your string is flat, if it's moving to the left, your string is sharp. Getting it exact might be tricky, but it is exact! If you need to rely on a tuner, this is a good choice in my opinion, exactly because of the sensitivity.. tune up with this and you're dead-on. Guitar Tuner is by Kinabalu Innovation Limited and is $0.99 Popper 2 is the WP7 version of a wildly popular game by Bill Reiss named Dr. Popper. You can get a trial, or you can now get a free lite version of the game. Popper 2 is a fast-paced bubble breaker game. I find it something fun to play when I just want to buzz out, but maybe the best review is that my daughter didn't want to give my phone back when I showed it to her, and always wants to grab my phone to play 'that game'. A fun distraction with great graphics and a great price Popper 2 is by Blue Rose Systems, LLC and is $1.29 Let me know what you think of the idea of doing reviews, or the layout/whatever, and Stay in the 'Light!   Twitter SilverlightNews | Twitter WynApse | WynApse.com | Tagged Posts | SilverlightCream Join me @ SilverlightCream | Phoenix Silverlight User Group Technorati Tags: Silverlight    Silverlight 3    Silverlight 4    Windows Phone MIX10

    Read the article

  • The Arab HEUG is now a reality, and other random thoughts

    - by user9147039
    I just returned from Doha, Qatar where the first of its kind HEUG (Higher Education User Group) meeting for institutions in the Middle East and North Africa was held at Qatar University and jointly hosted by Damman University from Saudi Arabia. Over 80 delegates attended including representation from education institutions in Oman, Saudi Arabia, Lebanon, and Qatar. There are many other regional HEUG organizations in place (in Australia/New Zealand, APAC, EMEA, as well as smaller regional HEUG’s in the Netherlands, South Africa, and in regions of the US), but it was truly an accomplishment to see this Middle East/North Africa group organize and launch their chapter with a meeting of this quality. To be known as the Arab HEUG going forward, I am excited about the prospects for sharing between the institutions and for the growth of Oracle solutions in the region. In particular the hosts for the event (Qatar University) did a masterful job with logistics and organization, and the quality of the event was a testament to their capabilities. Among the more interesting and enlightening presentations I attended were one from Dammam University on the lessons learned from their implementation of Campus Solutions and transition off of Banner, as well as the use by Qatar University E-business Suite for grants management (both pre-and post-award). The most notable fact coming from this latter presentation was the fit (89%) of e-Business Suite Grants to the university’s requirements. In a few weeks time we will be convening the 5th meeting of the Oracle Education & Research Industry Strategy Council in Redwood Shores (5th since my advent into my current role). The main topics of discussion will be around our Higher Education Applications Strategy for the future (including cloud approaches to ERP (HCM, Finance, and Student Information Systems), how some cases studies on the benefits of leveraging delivered functionality and extensibility in the software (versus customization). On the second day of the event we will turn our attention to Oracle in Research and also budgeting and planning in higher education. Both of these sessions will include significant participation from council members in the form of panel discussions. Our EVP’s for Systems (John Fowler) and for Global Cloud Services and North America application sales (Joanne Olson) will join us for the discussion. I recently read a couple of articles that were surprising to me. The first was from Inside Higher Ed on October 15 entitled, “As colleges prepare for major software upgrades, Kuali tries to woo them from corporate vendors.” It continues to disappointment that after all this time we are still debating whether it is better to build enterprise software through open or community source initiatives when fully functional, flexible, supported, and widely adopted options exist in the marketplace. Over a decade or more ago when these solutions were relatively immature and there was a great deal of turnover in the market I could appreciate the initiatives like Kuali. But let’s not kid ourselves – the real objective of this movement is to counter a perceived predatory commercial software industry. Again, when commercial solutions are deployed as written without significant customization, and standard business processes are adopted, the cost of these solutions (relative to the value delivered) is quite low, and certain much lower than the massive investment (and risk) in in-house developers to support a bespoke community source system. In this era of cost pressures in education and the need to refocus resources on teaching, learning, and research, I believe it’s bordering on irresponsible to continue to pursue open-source ERP. Many of the adopter’s total costs are staggering and have little to show for their efforts and expended resources. The second article was recently in the Chronicle of Higher Education and was entitled “’Big Data’ Is Bunk, Obama Campaign’s Tech Guru Tells University Leaders.” This one was so outrageous I almost don’t want to legitimize it by referencing it here. In the article the writer relays statements made by Harper Reed, President Obama’s former CTO for his 2012 re-election campaign, that big data solutions in education have no relevance and are akin to snake oil. He goes on to state that while he’s a fan of data-driven decision making in education, most of the necessary analysis can be accomplished in Excel spreadsheets. Yeah… right. This is exactly what ails education (higher education in particular). Dozens of shadow and siloed systems running on spreadsheets with limited-to-no enterprise wide initiatives to harness the data-rich environment that is a higher ed institution and transform the data into useable information. I’ll grant Mr. Reed that “Big Data” is overused and hackneyed, but imperatives like improving student success in higher education are classic big data problems that data-mining and predictive analytics can address. Further, higher ed need to be producing a massive amount more data scientists and analysts than are currently in the pipeline, to further this discipline and application of these tools to many many other problems across multiple industries.

    Read the article

  • SQL Table stored as a Heap - the dangers within

    - by MikeD
    Nearly all of the time I create a table, I include a primary key, and often that PK is implemented as a clustered index. Those two don't always have to go together, but in my world they almost always do. On a recent project, I was working on a data warehouse and a set of SSIS packages to import data from an OLTP database into my data warehouse. The data I was importing from the business database into the warehouse was mostly new rows, sometimes updates to existing rows, and sometimes deletes. I decided to use the MERGE statement to implement the insert, update or delete in the data warehouse, I found it quite performant to have a stored procedure that extracted all the new, updated, and deleted rows from the source database and dump it into a working table in my data warehouse, then run a stored proc in the warehouse that was the MERGE statement that took the rows from the working table and updated the real fact table. Use Warehouse CREATE TABLE Integration.MergePolicy (PolicyId int, PolicyTypeKey int, Premium money, Deductible money, EffectiveDate date, Operation varchar(5)) CREATE TABLE fact.Policy (PolicyKey int identity primary key, PolicyId int, PolicyTypeKey int, Premium money, Deductible money, EffectiveDate date) CREATE PROC Integration.MergePolicy as begin begin tran Merge fact.Policy as tgtUsing Integration.MergePolicy as SrcOn (tgt.PolicyId = Src.PolicyId) When not matched by Target then Insert (PolicyId, PolicyTypeKey, Premium, Deductible, EffectiveDate)values (src.PolicyId, src.PolicyTypeKey, src.Premium, src.Deductible, src.EffectiveDate) When matched and src.Operation = 'U' then Update set PolicyTypeKey = src.PolicyTypeKey,Premium = src.Premium,Deductible = src.Deductible,EffectiveDate = src.EffectiveDate When matched and src.Operation = 'D' then Delete ;delete from Integration.WorkPolicy commit end Notice that my worktable (Integration.MergePolicy) doesn't have any primary key or clustered index. I didn't think this would be a problem, since it was relatively small table and was empty after each time I ran the stored proc. For one of the work tables, during the initial loads of the warehouse, it was getting about 1.5 million rows inserted, processed, then deleted. Also, because of a bug in the extraction process, the same 1.5 million rows (plus a few hundred more each time) was getting inserted, processed, and deleted. This was being sone on a fairly hefty server that was otherwise unused, and no one was paying any attention to the time it was taking. This week I received a backup of this database and loaded it on my laptop to troubleshoot the problem, and of course it took a good ten minutes or more to run the process. However, what seemed strange to me was that after I fixed the problem and happened to run the merge sproc when the work table was completely empty, it still took almost ten minutes to complete. I immediately looked back at the MERGE statement to see if I had some sort of outer join that meant it would be scanning the target table (which had about 2 million rows in it), then turned on the execution plan output to see what was happening under the hood. Running the stored procedure again took a long time, and the plan output didn't show me much - 55% on the MERGE statement, and 45% on the DELETE statement, and table scans on the work table in both places. I was surprised at the relative cost of the DELETE statement, because there were really 0 rows to delete, but I was expecting to see the table scans. (I was beginning now to suspect that my problem was because the work table was being stored as a heap.) Then I turned on STATS_IO and ran the sproc again. The output was quite interesting.Table 'Worktable'. Scan count 0, logical reads 0, physical reads 0, read-ahead reads 0, lob logical reads 0, lob physical reads 0, lob read-ahead reads 0.Table 'Policy'. Scan count 0, logical reads 0, physical reads 0, read-ahead reads 0, lob logical reads 0, lob physical reads 0, lob read-ahead reads 0.Table 'MergePolicy'. Scan count 1, logical reads 433276, physical reads 60, read-ahead reads 0, lob logical reads 0, lob physical reads 0, lob read-ahead reads 0. I've reproduced the above from memory, the details aren't exact, but the essential bit was the very high number of logical reads on the table stored as a heap. Even just doing a SELECT Count(*) from Integration.MergePolicy incurred that sort of output, even though the result was always 0. I suppose I should research more on the allocation and deallocation of pages to tables stored as a heap, but I haven't, and my original assumption that a table stored as a heap with no rows would only need to read one page to answer any query was definitely proven wrong. It's likely that some sort of physical defragmentation of the table may have cleaned that up, but it seemed that the easiest answer was to put a clustered index on the table. After doing so, the execution plan showed a cluster index scan, and the IO stats showed only a single page read. (I aborted my first attempt at adding a clustered index on the table because it was taking too long - instead I ran TRUNCATE TABLE Integration.MergePolicy first and added the clustered index, both of which took very little time). I suspect I may not have noticed this if I had used TRUNCATE TABLE Integration.MergePolicy instead of DELETE FROM Integration.MergePolicy, since I'm guessing that the truncate operation does some rather quick releasing of pages allocated to the heap table. In the future, I will likely be much more careful to have a clustered index on every table I use, even the working tables. Mike  

    Read the article

< Previous Page | 111 112 113 114 115 116 117 118 119 120 121 122  | Next Page >