Search Results

Search found 3481 results on 140 pages for 'dvd region'.

Page 70/140 | < Previous Page | 66 67 68 69 70 71 72 73 74 75 76 77  | Next Page >

  • Invalid algorithm specified on Windows 2003 Server only

    - by JL
    I am decoding a file using the following method: string outFileName = zfoFileName.Replace(".zfo", "_tmp.zfo"); FileStream inFile = null; FileStream outFile = null; inFile = File.Open(zfoFileName, FileMode.Open); outFile = File.Create(outFileName); LargeCMS.CMS cms = new LargeCMS.CMS(); cms.Decode(inFile, outFile); This is working fine on my Win 7 dev machine, but on a Windows 2003 server production machine it fails with the following exception: Exception: System.Exception: CryptMsgUpdate error #-2146893816 --- System.ComponentModel.Win32Exception: Invalid algorithm specified --- End of inner exception stack trace --- at LargeCMS.CMS.Decode(FileStream inFile, FileStream outFile) Here are the classes below which I call to do the decoding, if needed I can upload a sample file for decoding, its just strange it works on Win 7, and not on Win2k3 server: using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.IO; using System.Security.Cryptography; using System.Security.Cryptography.X509Certificates; using System.Runtime.InteropServices; using System.ComponentModel; namespace LargeCMS { class CMS { // File stream to use in callback function private FileStream m_callbackFile; // Streaming callback function for encoding private Boolean StreamOutputCallback(IntPtr pvArg, IntPtr pbData, int cbData, Boolean fFinal) { // Write all bytes to encoded file Byte[] bytes = new Byte[cbData]; Marshal.Copy(pbData, bytes, 0, cbData); m_callbackFile.Write(bytes, 0, cbData); if (fFinal) { // This is the last piece. Close the file m_callbackFile.Flush(); m_callbackFile.Close(); m_callbackFile = null; } return true; } // Encode CMS with streaming to support large data public void Encode(X509Certificate2 cert, FileStream inFile, FileStream outFile) { // Variables Win32.CMSG_SIGNER_ENCODE_INFO SignerInfo; Win32.CMSG_SIGNED_ENCODE_INFO SignedInfo; Win32.CMSG_STREAM_INFO StreamInfo; Win32.CERT_CONTEXT[] CertContexts = null; Win32.BLOB[] CertBlobs; X509Chain chain = null; X509ChainElement[] chainElements = null; X509Certificate2[] certs = null; RSACryptoServiceProvider key = null; BinaryReader stream = null; GCHandle gchandle = new GCHandle(); IntPtr hProv = IntPtr.Zero; IntPtr SignerInfoPtr = IntPtr.Zero; IntPtr CertBlobsPtr = IntPtr.Zero; IntPtr hMsg = IntPtr.Zero; IntPtr pbPtr = IntPtr.Zero; Byte[] pbData; int dwFileSize; int dwRemaining; int dwSize; Boolean bResult = false; try { // Get data to encode dwFileSize = (int)inFile.Length; stream = new BinaryReader(inFile); pbData = stream.ReadBytes(dwFileSize); // Prepare stream for encoded info m_callbackFile = outFile; // Get cert chain chain = new X509Chain(); chain.Build(cert); chainElements = new X509ChainElement[chain.ChainElements.Count]; chain.ChainElements.CopyTo(chainElements, 0); // Get certs in chain certs = new X509Certificate2[chainElements.Length]; for (int i = 0; i < chainElements.Length; i++) { certs[i] = chainElements[i].Certificate; } // Get context of all certs in chain CertContexts = new Win32.CERT_CONTEXT[certs.Length]; for (int i = 0; i < certs.Length; i++) { CertContexts[i] = (Win32.CERT_CONTEXT)Marshal.PtrToStructure(certs[i].Handle, typeof(Win32.CERT_CONTEXT)); } // Get cert blob of all certs CertBlobs = new Win32.BLOB[CertContexts.Length]; for (int i = 0; i < CertContexts.Length; i++) { CertBlobs[i].cbData = CertContexts[i].cbCertEncoded; CertBlobs[i].pbData = CertContexts[i].pbCertEncoded; } // Get CSP of client certificate key = (RSACryptoServiceProvider)certs[0].PrivateKey; bResult = Win32.CryptAcquireContext( ref hProv, key.CspKeyContainerInfo.KeyContainerName, key.CspKeyContainerInfo.ProviderName, key.CspKeyContainerInfo.ProviderType, 0 ); if (!bResult) { throw new Exception("CryptAcquireContext error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Populate Signer Info struct SignerInfo = new Win32.CMSG_SIGNER_ENCODE_INFO(); SignerInfo.cbSize = Marshal.SizeOf(SignerInfo); SignerInfo.pCertInfo = CertContexts[0].pCertInfo; SignerInfo.hCryptProvOrhNCryptKey = hProv; SignerInfo.dwKeySpec = (int)key.CspKeyContainerInfo.KeyNumber; SignerInfo.HashAlgorithm.pszObjId = Win32.szOID_OIWSEC_sha1; // Populate Signed Info struct SignedInfo = new Win32.CMSG_SIGNED_ENCODE_INFO(); SignedInfo.cbSize = Marshal.SizeOf(SignedInfo); SignedInfo.cSigners = 1; SignerInfoPtr = Marshal.AllocHGlobal(Marshal.SizeOf(SignerInfo)); Marshal.StructureToPtr(SignerInfo, SignerInfoPtr, false); SignedInfo.rgSigners = SignerInfoPtr; SignedInfo.cCertEncoded = CertBlobs.Length; CertBlobsPtr = Marshal.AllocHGlobal(Marshal.SizeOf(CertBlobs[0]) * CertBlobs.Length); for (int i = 0; i < CertBlobs.Length; i++) { Marshal.StructureToPtr(CertBlobs[i], new IntPtr(CertBlobsPtr.ToInt64() + (Marshal.SizeOf(CertBlobs[i]) * i)), false); } SignedInfo.rgCertEncoded = CertBlobsPtr; // Populate Stream Info struct StreamInfo = new Win32.CMSG_STREAM_INFO(); StreamInfo.cbContent = dwFileSize; StreamInfo.pfnStreamOutput = new Win32.StreamOutputCallbackDelegate(StreamOutputCallback); // TODO: CMSG_DETACHED_FLAG // Open message to encode hMsg = Win32.CryptMsgOpenToEncode( Win32.X509_ASN_ENCODING | Win32.PKCS_7_ASN_ENCODING, 0, Win32.CMSG_SIGNED, ref SignedInfo, null, ref StreamInfo ); if (hMsg.Equals(IntPtr.Zero)) { throw new Exception("CryptMsgOpenToEncode error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Process the whole message gchandle = GCHandle.Alloc(pbData, GCHandleType.Pinned); pbPtr = gchandle.AddrOfPinnedObject(); dwRemaining = dwFileSize; dwSize = (dwFileSize < 1024 * 1000 * 100) ? dwFileSize : 1024 * 1000 * 100; while (dwRemaining > 0) { // Update message piece by piece bResult = Win32.CryptMsgUpdate( hMsg, pbPtr, dwSize, (dwRemaining <= dwSize) ? true : false ); if (!bResult) { throw new Exception("CryptMsgUpdate error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Move to the next piece pbPtr = new IntPtr(pbPtr.ToInt64() + dwSize); dwRemaining -= dwSize; if (dwRemaining < dwSize) { dwSize = dwRemaining; } } } finally { // Clean up if (gchandle.IsAllocated) { gchandle.Free(); } if (stream != null) { stream.Close(); } if (m_callbackFile != null) { m_callbackFile.Close(); } if (!CertBlobsPtr.Equals(IntPtr.Zero)) { Marshal.FreeHGlobal(CertBlobsPtr); } if (!SignerInfoPtr.Equals(IntPtr.Zero)) { Marshal.FreeHGlobal(SignerInfoPtr); } if (!hProv.Equals(IntPtr.Zero)) { Win32.CryptReleaseContext(hProv, 0); } if (!hMsg.Equals(IntPtr.Zero)) { Win32.CryptMsgClose(hMsg); } } } // Decode CMS with streaming to support large data public void Decode(FileStream inFile, FileStream outFile) { // Variables Win32.CMSG_STREAM_INFO StreamInfo; Win32.CERT_CONTEXT SignerCertContext; BinaryReader stream = null; GCHandle gchandle = new GCHandle(); IntPtr hMsg = IntPtr.Zero; IntPtr pSignerCertInfo = IntPtr.Zero; IntPtr pSignerCertContext = IntPtr.Zero; IntPtr pbPtr = IntPtr.Zero; IntPtr hStore = IntPtr.Zero; Byte[] pbData; Boolean bResult = false; int dwFileSize; int dwRemaining; int dwSize; int cbSignerCertInfo; try { // Get data to decode dwFileSize = (int)inFile.Length; stream = new BinaryReader(inFile); pbData = stream.ReadBytes(dwFileSize); // Prepare stream for decoded info m_callbackFile = outFile; // Populate Stream Info struct StreamInfo = new Win32.CMSG_STREAM_INFO(); StreamInfo.cbContent = dwFileSize; StreamInfo.pfnStreamOutput = new Win32.StreamOutputCallbackDelegate(StreamOutputCallback); // Open message to decode hMsg = Win32.CryptMsgOpenToDecode( Win32.X509_ASN_ENCODING | Win32.PKCS_7_ASN_ENCODING, 0, 0, IntPtr.Zero, IntPtr.Zero, ref StreamInfo ); if (hMsg.Equals(IntPtr.Zero)) { throw new Exception("CryptMsgOpenToDecode error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Process the whole message gchandle = GCHandle.Alloc(pbData, GCHandleType.Pinned); pbPtr = gchandle.AddrOfPinnedObject(); dwRemaining = dwFileSize; dwSize = (dwFileSize < 1024 * 1000 * 100) ? dwFileSize : 1024 * 1000 * 100; while (dwRemaining > 0) { // Update message piece by piece bResult = Win32.CryptMsgUpdate( hMsg, pbPtr, dwSize, (dwRemaining <= dwSize) ? true : false ); if (!bResult) { throw new Exception("CryptMsgUpdate error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Move to the next piece pbPtr = new IntPtr(pbPtr.ToInt64() + dwSize); dwRemaining -= dwSize; if (dwRemaining < dwSize) { dwSize = dwRemaining; } } // Get signer certificate info cbSignerCertInfo = 0; bResult = Win32.CryptMsgGetParam( hMsg, Win32.CMSG_SIGNER_CERT_INFO_PARAM, 0, IntPtr.Zero, ref cbSignerCertInfo ); if (!bResult) { throw new Exception("CryptMsgGetParam error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } pSignerCertInfo = Marshal.AllocHGlobal(cbSignerCertInfo); bResult = Win32.CryptMsgGetParam( hMsg, Win32.CMSG_SIGNER_CERT_INFO_PARAM, 0, pSignerCertInfo, ref cbSignerCertInfo ); if (!bResult) { throw new Exception("CryptMsgGetParam error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Open a cert store in memory with the certs from the message hStore = Win32.CertOpenStore( Win32.CERT_STORE_PROV_MSG, Win32.X509_ASN_ENCODING | Win32.PKCS_7_ASN_ENCODING, IntPtr.Zero, 0, hMsg ); if (hStore.Equals(IntPtr.Zero)) { throw new Exception("CertOpenStore error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Find the signer's cert in the store pSignerCertContext = Win32.CertGetSubjectCertificateFromStore( hStore, Win32.X509_ASN_ENCODING | Win32.PKCS_7_ASN_ENCODING, pSignerCertInfo ); if (pSignerCertContext.Equals(IntPtr.Zero)) { throw new Exception("CertGetSubjectCertificateFromStore error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } // Set message for verifying SignerCertContext = (Win32.CERT_CONTEXT)Marshal.PtrToStructure(pSignerCertContext, typeof(Win32.CERT_CONTEXT)); bResult = Win32.CryptMsgControl( hMsg, 0, Win32.CMSG_CTRL_VERIFY_SIGNATURE, SignerCertContext.pCertInfo ); if (!bResult) { throw new Exception("CryptMsgControl error #" + Marshal.GetLastWin32Error().ToString(), new Win32Exception(Marshal.GetLastWin32Error())); } } finally { // Clean up if (gchandle.IsAllocated) { gchandle.Free(); } if (!pSignerCertContext.Equals(IntPtr.Zero)) { Win32.CertFreeCertificateContext(pSignerCertContext); } if (!pSignerCertInfo.Equals(IntPtr.Zero)) { Marshal.FreeHGlobal(pSignerCertInfo); } if (!hStore.Equals(IntPtr.Zero)) { Win32.CertCloseStore(hStore, Win32.CERT_CLOSE_STORE_FORCE_FLAG); } if (stream != null) { stream.Close(); } if (m_callbackFile != null) { m_callbackFile.Close(); } if (!hMsg.Equals(IntPtr.Zero)) { Win32.CryptMsgClose(hMsg); } } } } } and using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Runtime.InteropServices; using System.Security.Cryptography.X509Certificates; using System.ComponentModel; using System.Security.Cryptography; namespace LargeCMS { class Win32 { #region "CONSTS" public const int X509_ASN_ENCODING = 0x00000001; public const int PKCS_7_ASN_ENCODING = 0x00010000; public const int CMSG_SIGNED = 2; public const int CMSG_DETACHED_FLAG = 0x00000004; public const int AT_KEYEXCHANGE = 1; public const int AT_SIGNATURE = 2; public const String szOID_OIWSEC_sha1 = "1.3.14.3.2.26"; public const int CMSG_CTRL_VERIFY_SIGNATURE = 1; public const int CMSG_CERT_PARAM = 12; public const int CMSG_SIGNER_CERT_INFO_PARAM = 7; public const int CERT_STORE_PROV_MSG = 1; public const int CERT_CLOSE_STORE_FORCE_FLAG = 1; #endregion #region "STRUCTS" [StructLayout(LayoutKind.Sequential)] public struct CRYPT_ALGORITHM_IDENTIFIER { public String pszObjId; BLOB Parameters; } [StructLayout(LayoutKind.Sequential)] public struct CERT_ID { public int dwIdChoice; public BLOB IssuerSerialNumberOrKeyIdOrHashId; } [StructLayout(LayoutKind.Sequential)] public struct CMSG_SIGNER_ENCODE_INFO { public int cbSize; public IntPtr pCertInfo; public IntPtr hCryptProvOrhNCryptKey; public int dwKeySpec; public CRYPT_ALGORITHM_IDENTIFIER HashAlgorithm; public IntPtr pvHashAuxInfo; public int cAuthAttr; public IntPtr rgAuthAttr; public int cUnauthAttr; public IntPtr rgUnauthAttr; public CERT_ID SignerId; public CRYPT_ALGORITHM_IDENTIFIER HashEncryptionAlgorithm; public IntPtr pvHashEncryptionAuxInfo; } [StructLayout(LayoutKind.Sequential)] public struct CERT_CONTEXT { public int dwCertEncodingType; public IntPtr pbCertEncoded; public int cbCertEncoded; public IntPtr pCertInfo; public IntPtr hCertStore; } [StructLayout(LayoutKind.Sequential)] public struct BLOB { public int cbData; public IntPtr pbData; } [StructLayout(LayoutKind.Sequential)] public struct CMSG_SIGNED_ENCODE_INFO { public int cbSize; public int cSigners; public IntPtr rgSigners; public int cCertEncoded; public IntPtr rgCertEncoded; public int cCrlEncoded; public IntPtr rgCrlEncoded; public int cAttrCertEncoded; public IntPtr rgAttrCertEncoded; } [StructLayout(LayoutKind.Sequential)] public struct CMSG_STREAM_INFO { public int cbContent; public StreamOutputCallbackDelegate pfnStreamOutput; public IntPtr pvArg; } #endregion #region "DELEGATES" public delegate Boolean StreamOutputCallbackDelegate(IntPtr pvArg, IntPtr pbData, int cbData, Boolean fFinal); #endregion #region "API" [DllImport("advapi32.dll", CharSet = CharSet.Auto, SetLastError = true)] public static extern Boolean CryptAcquireContext( ref IntPtr hProv, String pszContainer, String pszProvider, int dwProvType, int dwFlags ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CryptMsgOpenToEncode( int dwMsgEncodingType, int dwFlags, int dwMsgType, ref CMSG_SIGNED_ENCODE_INFO pvMsgEncodeInfo, String pszInnerContentObjID, ref CMSG_STREAM_INFO pStreamInfo ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CryptMsgOpenToDecode( int dwMsgEncodingType, int dwFlags, int dwMsgType, IntPtr hCryptProv, IntPtr pRecipientInfo, ref CMSG_STREAM_INFO pStreamInfo ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CryptMsgClose( IntPtr hCryptMsg ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CryptMsgUpdate( IntPtr hCryptMsg, Byte[] pbData, int cbData, Boolean fFinal ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CryptMsgUpdate( IntPtr hCryptMsg, IntPtr pbData, int cbData, Boolean fFinal ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CryptMsgGetParam( IntPtr hCryptMsg, int dwParamType, int dwIndex, IntPtr pvData, ref int pcbData ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CryptMsgControl( IntPtr hCryptMsg, int dwFlags, int dwCtrlType, IntPtr pvCtrlPara ); [DllImport("advapi32.dll", SetLastError = true)] public static extern Boolean CryptReleaseContext( IntPtr hProv, int dwFlags ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CertCreateCertificateContext( int dwCertEncodingType, IntPtr pbCertEncoded, int cbCertEncoded ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern Boolean CertFreeCertificateContext( IntPtr pCertContext ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CertOpenStore( int lpszStoreProvider, int dwMsgAndCertEncodingType, IntPtr hCryptProv, int dwFlags, IntPtr pvPara ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CertGetSubjectCertificateFromStore( IntPtr hCertStore, int dwCertEncodingType, IntPtr pCertId ); [DllImport("Crypt32.dll", SetLastError = true)] public static extern IntPtr CertCloseStore( IntPtr hCertStore, int dwFlags ); #endregion } }

    Read the article

  • Apache FOP - Table top and bottom borders missing pagebreak inside table

    - by Thomas
    I am using Apache FOP to generate a PDF from a XLS FO document. I have created a test XLS FO document that contains a table with collapsed borders that with several tall rows. One of the rows starts on one page and ends on the next and this works as expected. The problem is that the bottom border of the table on the first page is missing and the top border of the table on the second pages is also missing. Below is the sample XLS FO document. <?xml version="1.0" encoding="utf-8"?> <fo:root xmlns:fo="http://www.w3.org/1999/XSL/Format" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <!-- defines the layout master --> <fo:layout-master-set> <fo:simple-page-master master-name="first" page-height="29.7cm" page-width="21cm" margin-top="1cm" margin-bottom="2cm" margin-left="2.5cm" margin-right="2.5cm"> <fo:region-body margin-top="3cm"/> <fo:region-before extent="3cm"/> <fo:region-after extent="1.5cm"/> </fo:simple-page-master> </fo:layout-master-set> <!-- starts actual layout --> <fo:page-sequence master-reference="first"> <fo:title>Sample Doc</fo:title> <fo:flow flow-name="xsl-region-body" font-size="x-small" font="Times New Roman"> <!-- table start --> <fo:table table-layout="fixed" width="100%" border-collapse="collapse"> <fo:table-column column-width="35mm"/> <fo:table-column column-width="100mm"/> <fo:table-column column-width="20mm"/> <fo:table-body> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Column 1</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Columns 2</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Column 3</fo:block> </fo:table-cell> </fo:table-row> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Row 1</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Some text</fo:block> </fo:table-cell> </fo:table-row> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Row 2</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Some text</fo:block> </fo:table-cell> </fo:table-row> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Row 3</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Some text</fo:block> </fo:table-cell> </fo:table-row> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Row 4</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Some text</fo:block> </fo:table-cell> </fo:table-row> <fo:table-row> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Row 5</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout.</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> <fo:block>Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum Lorem Ipsum</fo:block> </fo:table-cell> <fo:table-cell border-width="0.5mm" border-style="solid"> <fo:block>Some text</fo:block> </fo:table-cell> </fo:table-row> </fo:table-body> </fo:table> <!-- table end --> </fo:flow> </fo:page-sequence> </fo:root> This Image shows the bottom border on page 1 missing and the top border on page 2 missing, but all text seams to be there: Please note that I have allready experimented with using an empty header and footer with borders, for example. This works, but I need to use these functions for other things than fixing this issue so what I need to know is if there is an other sollution to the problem?

    Read the article

  • Choosing a VS project type (C++)

    - by typoknig
    Hi all, I do not use C++ much (I try to stick to the easier stuff like Java and VB.NET), but the lately I have not had a choice. When I am picking a project type in VS for some C++ source I download, what project type should I pick? I had just been sticking with Win32 Console Applications, but I just downloaded some code (below) that will not work right even when it compiles with out errors. I have tried to use a CLR Console Application and an empty project too, and have changed many variables along the way, but I cannot get this code to work. I noticed that this code does not have "int main()" at its beginning, does that have something to do with it? Anyways, here is the code, got it from here: /* Demo of modified Lucas-Kanade optical flow algorithm. See the printf below */ #ifdef _CH_ #pragma package <opencv> #endif #ifndef _EiC #include "cv.h" #include "highgui.h" #include <stdio.h> #include <ctype.h> #endif #include <windows.h> #define FULL_IMAGE_AS_OUTPUT_FILE #define cvMirror cvFlip //IplImage *image = 0, *grey = 0, *prev_grey = 0, *pyramid = 0, *prev_pyramid = 0, *swap_temp; IplImage **buf = 0; IplImage *image1 = 0; IplImage *imageCopy=0; IplImage *image = 0; int win_size = 10; const int MAX_COUNT = 500; CvPoint2D32f* points[2] = {0,0}, *swap_points; char* status = 0; //int count = 0; //int need_to_init = 0; //int night_mode = 0; int flags = 0; //int add_remove_pt = 0; bool bLButtonDown = false; //bool bstopLoop = false; CvPoint pt, pt1,pt2; //IplImage* img1; FILE* FileDest; char* strImageDir = "E:\\Projects\\TSCreator\\Images"; char* strItemName = "b"; int imageCount=0; int bFirstFace = 1; // flag for first face int mode = 1; // Mode 1 - Haar Traing Sample Creation, 2 - HMM sample creation, Mode = 3 - Both Harr and HMM. //int startImgeNo = 1; bool isEqualRation = false; //Weidth to height ratio is equal //Selected Image data IplImage *selectedImage = 0; int selectedX = 0, selectedY = 0, currentImageNo = 0, selectedWidth = 0, selectedHeight= 0; CvRect selectedROI; void saveFroHarrTraining(IplImage *src, int x, int y, int width, int height, int imageCount); void saveForHMMTraining(IplImage *src, CvRect roi,int imageCount); // Code for draw ROI Cropping Image void on_mouse( int event, int x, int y, int flags, void* param ) { char f[200]; CvRect reg; if( !image ) return; if( event == CV_EVENT_LBUTTONDOWN ) { bLButtonDown = true; pt1.x = x; pt1.y = y; } else if ( event == CV_EVENT_MOUSEMOVE ) //Draw the selected area rectangle { pt2.x = x; pt2.y = y; if(bLButtonDown) { if( !image1 ) { /* allocate all the buffers */ image1 = cvCreateImage( cvGetSize(image), 8, 3 ); image1->origin = image->origin; points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); status = (char*)cvAlloc(MAX_COUNT); flags = 0; } cvCopy( image, image1, 0 ); //Equal Weight-Height Ratio if(isEqualRation) { pt2.y = pt1.y + (pt2.x-pt1.x); } //Max Height and Width is the image width and height if(pt2.x>image->width) { pt2.x = image->width; } if(pt2.y>image->height) { pt2.y = image->height; } CvPoint InnerPt1 = pt1; CvPoint InnerPt2 = pt2; if ( InnerPt1.x > InnerPt2.x) { int tempX = InnerPt1.x; InnerPt1.x = InnerPt2.x; InnerPt2.x = tempX; } if ( pt2.y < InnerPt1.y ) { int tempY = InnerPt1.y; InnerPt1.y = InnerPt2.y; InnerPt2.y = tempY; } InnerPt1.y = image->height - InnerPt1.y; InnerPt2.y = image->height - InnerPt2.y; CvFont font; double hScale=1.0; double vScale=1.0; int lineWidth=1; cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth); char size [200]; reg.x = pt1.x; reg.y = image->height - pt2.y; reg.height = abs (pt2.y - pt1.y); reg.width = InnerPt2.x -InnerPt1.x; //print width and heght of the selected reagion sprintf(size, "(%dx%d)",reg.width, reg.height); cvPutText (image1,size,cvPoint(10,10), &font, cvScalar(255,255,0)); cvRectangle(image1, InnerPt1, InnerPt2, CV_RGB(255,0,0), 1); //Mark Selected Reagion selectedImage = image; selectedX = pt1.x; selectedY = pt1.y; selectedWidth = reg.width; selectedHeight = reg.height; selectedROI = reg; //Show the modified image cvShowImage("HMM-Harr Positive Image Creator",image1); } } else if ( event == CV_EVENT_LBUTTONUP ) { bLButtonDown = false; // pt2.x = x; // pt2.y = y; // // if ( pt1.x > pt2.x) // { // int tempX = pt1.x; // pt1.x = pt2.x; // pt2.x = tempX; // } // // if ( pt2.y < pt1.y ) // { // int tempY = pt1.y; // pt1.y = pt2.y; // pt2.y = tempY; // // } // //reg.x = pt1.x; //reg.y = image->height - pt2.y; // //reg.height = abs (pt2.y - pt1.y); ////reg.width = reg.height/3; //reg.width = pt2.x -pt1.x; ////reg.height = (2 * reg.width)/3; #ifdef FULL_IMAGE_AS_OUTPUT_FILE CvRect FullImageRect; FullImageRect.x = 0; FullImageRect.y = 0; FullImageRect.width = image->width; FullImageRect.height = image->height; IplImage *regionFullImage =0; regionFullImage = cvCreateImage(cvSize (FullImageRect.width, FullImageRect.height), image->depth, image->nChannels); image->roi = NULL; //cvSetImageROI (image, FullImageRect); //cvCopy (image, regionFullImage, 0); #else IplImage *region =0; region = cvCreateImage(cvSize (reg.width, reg.height), image1->depth, image1->nChannels); image->roi = NULL; cvSetImageROI (image1, reg); cvCopy (image1, region, 0); #endif //cvNamedWindow("Result", CV_WINDOW_AUTOSIZE); //selectedImage = image; //selectedX = pt1.x; //selectedY = pt1.y; //selectedWidth = reg.width; //selectedHeight = reg.height; ////currentImageNo = startImgeNo; //selectedROI = reg; /*if(mode == 1) { saveFroHarrTraining(image,pt1.x,pt1.y,reg.width,reg.height,startImgeNo); } else if(mode == 2) { saveForHMMTraining(image,reg,startImgeNo); } else if(mode ==3) { saveFroHarrTraining(image,pt1.x,pt1.y,reg.width,reg.height,startImgeNo); saveForHMMTraining(image,reg,startImgeNo); } else { printf("Invalid mode."); } startImgeNo++;*/ } } /* Save popsitive samples for Harr Training. Also add an entry to the PositiveSample.txt with the location of the item of interest. */ void saveFroHarrTraining(IplImage *src, int x, int y, int width, int height, int imageCount) { char f[255] ; sprintf(f,"%s\\%s\\harr_%s%d%d.jpg",strImageDir,strItemName,strItemName,imageCount/10, imageCount%10); cvNamedWindow("Harr", CV_WINDOW_AUTOSIZE); cvShowImage("Harr", src); cvSaveImage(f, src); printf("output%d%d \t ", imageCount/10, imageCount%10); printf("width %d \t", width); printf("height %d \t", height); printf("x1 %d \t", x); printf("y1 %d \t\n", y); char f1[255]; sprintf(f1,"%s\\PositiveSample.txt",strImageDir); FileDest = fopen(f1, "a"); fprintf(FileDest, "%s\\harr_%s%d.jpg 1 %d %d %d %d \n",strItemName,strItemName, imageCount, x, y, width, height); fclose(FileDest); } /* Create Sample Images for HMM recognition algorythm trai ning. */ void saveForHMMTraining(IplImage *src, CvRect roi,int imageCount) { char f[255] ; printf("x=%d, y=%d, w= %d, h= %d\n",roi.x,roi.y,roi.width,roi.height); //Create the file name sprintf(f,"%s\\%s\\hmm_%s%d.pgm",strImageDir,strItemName,strItemName, imageCount); //Create storage for grayscale image IplImage* gray = cvCreateImage(cvSize(roi.width,roi.height), 8, 1); //Create storage for croped reagon IplImage* regionFullImage = cvCreateImage(cvSize(roi.width,roi.height),8,3); //Croped marked region cvSetImageROI(src,roi); cvCopy(src,regionFullImage); cvResetImageROI(src); //Flip croped image - otherwise it will saved upside down cvConvertImage(regionFullImage, regionFullImage, CV_CVTIMG_FLIP); //Convert croped image to gray scale cvCvtColor(regionFullImage,gray, CV_BGR2GRAY); //Show final grayscale image cvNamedWindow("HMM", CV_WINDOW_AUTOSIZE); cvShowImage("HMM", gray); //Save final grayscale image cvSaveImage(f, gray); } int maina( int argc, char** argv ) { CvCapture* capture = 0; //if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) // capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 ); //else if( argc == 2 ) // capture = cvCaptureFromAVI( argv[1] ); char* video; if(argc ==7) { mode = atoi(argv[1]); strImageDir = argv[2]; strItemName = argv[3]; video = argv[4]; currentImageNo = atoi(argv[5]); int a = atoi(argv[6]); if(a==1) { isEqualRation = true; } else { isEqualRation = false; } } else { printf("\nUsage: TSCreator.exe <Mode> <Sample Image Save Path> <Sample Image Save Directory> <Video File Location> <Start Image No> <Is Equal Ratio>\n"); printf("Mode = 1 - Haar Traing Sample Creation. \nMode = 2 - HMM sample creation.\nMode = 3 - Both Harr and HMM\n"); printf("Is Equal Ratio = 0 or 1. 1 - Equal weidth and height, 0 - custom."); printf("Note: You have to create the image save directory in correct path first.\n"); printf("Eg: TSCreator.exe 1 E:\Projects\TSCreator\Images A 11.avi 1 1\n\n"); return 0; } capture = cvCaptureFromAVI(video); if( !capture ) { fprintf(stderr,"Could not initialize capturing...\n"); return -1; } cvNamedWindow("HMM-Harr Positive Image Creator", CV_WINDOW_AUTOSIZE); cvSetMouseCallback("HMM-Harr Positive Image Creator", on_mouse, 0); //cvShowImage("Test", image1); for(;;) { IplImage* frame = 0; int i, k, c; frame = cvQueryFrame( capture ); if( !frame ) break; if( !image ) { /* allocate all the buffers */ image = cvCreateImage( cvGetSize(frame), 8, 3 ); image->origin = frame->origin; //grey = cvCreateImage( cvGetSize(frame), 8, 1 ); //prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 ); //pyramid = cvCreateImage( cvGetSize(frame), 8, 1 ); // prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 ); points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); status = (char*)cvAlloc(MAX_COUNT); flags = 0; } cvCopy( frame, image, 0 ); // cvCvtColor( image, grey, CV_BGR2GRAY ); cvShowImage("HMM-Harr Positive Image Creator", image); cvSetMouseCallback("HMM-Harr Positive Image Creator", on_mouse, 0); c = cvWaitKey(0); if((char)c == 's') { //Save selected reagion as training data if(selectedImage) { printf("Selected Reagion Saved\n"); if(mode == 1) { saveFroHarrTraining(selectedImage,selectedX,selectedY,selectedWidth,selectedHeight,currentImageNo); } else if(mode == 2) { saveForHMMTraining(selectedImage,selectedROI,currentImageNo); } else if(mode ==3) { saveFroHarrTraining(selectedImage,selectedX,selectedY,selectedWidth,selectedHeight,currentImageNo); saveForHMMTraining(selectedImage,selectedROI,currentImageNo); } else { printf("Invalid mode."); } currentImageNo++; } } } cvReleaseCapture( &capture ); //cvDestroyWindow("HMM-Harr Positive Image Creator"); cvDestroyAllWindows(); return 0; } #ifdef _EiC main(1,"lkdemo.c"); #endif If I put... #include "stdafx.h" int _tmain(int argc, _TCHAR* argv[]) { return 0; } ... before the previous code (and link it to the correct OpenCV .lib files) it compiles without errors, but does nothing at the command line. How do I make it work?

    Read the article

  • Camera for 2.5D Game

    - by me--
    I'm hoping someone can explain this to me like I'm 5, because I've been struggling with this for hours and simply cannot understand what I'm doing wrong. I've written a Camera class for my 2.5D game. The intention is to support world and screen spaces like this: The camera is the black thing on the right. The +Z axis is upwards in that image, with -Z heading downwards. As you can see, both world space and screen space have (0, 0) at their top-left. I started writing some unit tests to prove that my camera was working as expected, and that's where things started getting...strange. My tests plot coordinates in world, view, and screen spaces. Eventually I will use image comparison to assert that they are correct, but for now my test just displays the result. The render logic uses Camera.ViewMatrix to transform world space to view space, and Camera.WorldPointToScreen to transform world space to screen space. Here is an example test: [Fact] public void foo() { var camera = new Camera(new Viewport(0, 0, 250, 100)); DrawingVisual worldRender; DrawingVisual viewRender; DrawingVisual screenRender; this.Render(camera, out worldRender, out viewRender, out screenRender, new Vector3(30, 0, 0), new Vector3(30, 40, 0)); this.ShowRenders(camera, worldRender, viewRender, screenRender); } And here's what pops up when I run this test: World space looks OK, although I suspect the z axis is going into the screen instead of towards the viewer. View space has me completely baffled. I was expecting the camera to be sitting above (0, 0) and looking towards the center of the scene. Instead, the z axis seems to be the wrong way around, and the camera is positioned in the opposite corner to what I expect! I suspect screen space will be another thing altogether, but can anyone explain what I'm doing wrong in my Camera class? UPDATE I made some progress in terms of getting things to look visually as I expect, but only through intuition: not an actual understanding of what I'm doing. Any enlightenment would be greatly appreciated. I realized that my view space was flipped both vertically and horizontally compared to what I expected, so I changed my view matrix to scale accordingly: this.viewMatrix = Matrix.CreateLookAt(this.location, this.target, this.up) * Matrix.CreateScale(this.zoom, this.zoom, 1) * Matrix.CreateScale(-1, -1, 1); I could combine the two CreateScale calls, but have left them separate for clarity. Again, I have no idea why this is necessary, but it fixed my view space: But now my screen space needs to be flipped vertically, so I modified my projection matrix accordingly: this.projectionMatrix = Matrix.CreatePerspectiveFieldOfView(0.7853982f, viewport.AspectRatio, 1, 2) * Matrix.CreateScale(1, -1, 1); And this results in what I was expecting from my first attempt: I have also just tried using Camera to render sprites via a SpriteBatch to make sure everything works there too, and it does. But the question remains: why do I need to do all this flipping of axes to get the space coordinates the way I expect? UPDATE 2 I've since improved my rendering logic in my test suite so that it supports geometries and so that lines get lighter the further away they are from the camera. I wanted to do this to avoid optical illusions and to further prove to myself that I'm looking at what I think I am. Here is an example: In this case, I have 3 geometries: a cube, a sphere, and a polyline on the top face of the cube. Notice how the darkening and lightening of the lines correctly identifies those portions of the geometries closer to the camera. If I remove the negative scaling I had to put in, I see: So you can see I'm still in the same boat - I still need those vertical and horizontal flips in my matrices to get things to appear correctly. In the interests of giving people a repro to play with, here is the complete code needed to generate the above. If you want to run via the test harness, just install the xunit package: Camera.cs: using Microsoft.Xna.Framework; using Microsoft.Xna.Framework.Graphics; using System.Diagnostics; public sealed class Camera { private readonly Viewport viewport; private readonly Matrix projectionMatrix; private Matrix? viewMatrix; private Vector3 location; private Vector3 target; private Vector3 up; private float zoom; public Camera(Viewport viewport) { this.viewport = viewport; // for an explanation of the negative scaling, see: http://gamedev.stackexchange.com/questions/63409/ this.projectionMatrix = Matrix.CreatePerspectiveFieldOfView(0.7853982f, viewport.AspectRatio, 1, 2) * Matrix.CreateScale(1, -1, 1); // defaults this.location = new Vector3(this.viewport.Width / 2, this.viewport.Height, 100); this.target = new Vector3(this.viewport.Width / 2, this.viewport.Height / 2, 0); this.up = new Vector3(0, 0, 1); this.zoom = 1; } public Viewport Viewport { get { return this.viewport; } } public Vector3 Location { get { return this.location; } set { this.location = value; this.viewMatrix = null; } } public Vector3 Target { get { return this.target; } set { this.target = value; this.viewMatrix = null; } } public Vector3 Up { get { return this.up; } set { this.up = value; this.viewMatrix = null; } } public float Zoom { get { return this.zoom; } set { this.zoom = value; this.viewMatrix = null; } } public Matrix ProjectionMatrix { get { return this.projectionMatrix; } } public Matrix ViewMatrix { get { if (this.viewMatrix == null) { // for an explanation of the negative scaling, see: http://gamedev.stackexchange.com/questions/63409/ this.viewMatrix = Matrix.CreateLookAt(this.location, this.target, this.up) * Matrix.CreateScale(this.zoom) * Matrix.CreateScale(-1, -1, 1); } return this.viewMatrix.Value; } } public Vector2 WorldPointToScreen(Vector3 point) { var result = viewport.Project(point, this.ProjectionMatrix, this.ViewMatrix, Matrix.Identity); return new Vector2(result.X, result.Y); } public void WorldPointsToScreen(Vector3[] points, Vector2[] destination) { Debug.Assert(points != null); Debug.Assert(destination != null); Debug.Assert(points.Length == destination.Length); for (var i = 0; i < points.Length; ++i) { destination[i] = this.WorldPointToScreen(points[i]); } } } CameraFixture.cs: using Microsoft.Xna.Framework.Graphics; using System; using System.Collections.Generic; using System.Linq; using System.Windows; using System.Windows.Controls; using System.Windows.Media; using Xunit; using XNA = Microsoft.Xna.Framework; public sealed class CameraFixture { [Fact] public void foo() { var camera = new Camera(new Viewport(0, 0, 250, 100)); DrawingVisual worldRender; DrawingVisual viewRender; DrawingVisual screenRender; this.Render( camera, out worldRender, out viewRender, out screenRender, new Sphere(30, 15) { WorldMatrix = XNA.Matrix.CreateTranslation(155, 50, 0) }, new Cube(30) { WorldMatrix = XNA.Matrix.CreateTranslation(75, 60, 15) }, new PolyLine(new XNA.Vector3(0, 0, 0), new XNA.Vector3(10, 10, 0), new XNA.Vector3(20, 0, 0), new XNA.Vector3(0, 0, 0)) { WorldMatrix = XNA.Matrix.CreateTranslation(65, 55, 30) }); this.ShowRenders(worldRender, viewRender, screenRender); } #region Supporting Fields private static readonly Pen xAxisPen = new Pen(Brushes.Red, 2); private static readonly Pen yAxisPen = new Pen(Brushes.Green, 2); private static readonly Pen zAxisPen = new Pen(Brushes.Blue, 2); private static readonly Pen viewportPen = new Pen(Brushes.Gray, 1); private static readonly Pen nonScreenSpacePen = new Pen(Brushes.Black, 0.5); private static readonly Color geometryBaseColor = Colors.Black; #endregion #region Supporting Methods private void Render(Camera camera, out DrawingVisual worldRender, out DrawingVisual viewRender, out DrawingVisual screenRender, params Geometry[] geometries) { var worldDrawingVisual = new DrawingVisual(); var viewDrawingVisual = new DrawingVisual(); var screenDrawingVisual = new DrawingVisual(); const int axisLength = 15; using (var worldDrawingContext = worldDrawingVisual.RenderOpen()) using (var viewDrawingContext = viewDrawingVisual.RenderOpen()) using (var screenDrawingContext = screenDrawingVisual.RenderOpen()) { // draw lines around the camera's viewport var viewportBounds = camera.Viewport.Bounds; var viewportLines = new Tuple<int, int, int, int>[] { Tuple.Create(viewportBounds.Left, viewportBounds.Bottom, viewportBounds.Left, viewportBounds.Top), Tuple.Create(viewportBounds.Left, viewportBounds.Top, viewportBounds.Right, viewportBounds.Top), Tuple.Create(viewportBounds.Right, viewportBounds.Top, viewportBounds.Right, viewportBounds.Bottom), Tuple.Create(viewportBounds.Right, viewportBounds.Bottom, viewportBounds.Left, viewportBounds.Bottom) }; foreach (var viewportLine in viewportLines) { var viewStart = XNA.Vector3.Transform(new XNA.Vector3(viewportLine.Item1, viewportLine.Item2, 0), camera.ViewMatrix); var viewEnd = XNA.Vector3.Transform(new XNA.Vector3(viewportLine.Item3, viewportLine.Item4, 0), camera.ViewMatrix); var screenStart = camera.WorldPointToScreen(new XNA.Vector3(viewportLine.Item1, viewportLine.Item2, 0)); var screenEnd = camera.WorldPointToScreen(new XNA.Vector3(viewportLine.Item3, viewportLine.Item4, 0)); worldDrawingContext.DrawLine(viewportPen, new Point(viewportLine.Item1, viewportLine.Item2), new Point(viewportLine.Item3, viewportLine.Item4)); viewDrawingContext.DrawLine(viewportPen, new Point(viewStart.X, viewStart.Y), new Point(viewEnd.X, viewEnd.Y)); screenDrawingContext.DrawLine(viewportPen, new Point(screenStart.X, screenStart.Y), new Point(screenEnd.X, screenEnd.Y)); } // draw axes var axisLines = new Tuple<int, int, int, int, int, int, Pen>[] { Tuple.Create(0, 0, 0, axisLength, 0, 0, xAxisPen), Tuple.Create(0, 0, 0, 0, axisLength, 0, yAxisPen), Tuple.Create(0, 0, 0, 0, 0, axisLength, zAxisPen) }; foreach (var axisLine in axisLines) { var viewStart = XNA.Vector3.Transform(new XNA.Vector3(axisLine.Item1, axisLine.Item2, axisLine.Item3), camera.ViewMatrix); var viewEnd = XNA.Vector3.Transform(new XNA.Vector3(axisLine.Item4, axisLine.Item5, axisLine.Item6), camera.ViewMatrix); var screenStart = camera.WorldPointToScreen(new XNA.Vector3(axisLine.Item1, axisLine.Item2, axisLine.Item3)); var screenEnd = camera.WorldPointToScreen(new XNA.Vector3(axisLine.Item4, axisLine.Item5, axisLine.Item6)); worldDrawingContext.DrawLine(axisLine.Item7, new Point(axisLine.Item1, axisLine.Item2), new Point(axisLine.Item4, axisLine.Item5)); viewDrawingContext.DrawLine(axisLine.Item7, new Point(viewStart.X, viewStart.Y), new Point(viewEnd.X, viewEnd.Y)); screenDrawingContext.DrawLine(axisLine.Item7, new Point(screenStart.X, screenStart.Y), new Point(screenEnd.X, screenEnd.Y)); } // for all points in all geometries to be rendered, find the closest and furthest away from the camera so we can lighten lines that are further away var distancesToAllGeometrySections = from geometry in geometries let geometryViewMatrix = geometry.WorldMatrix * camera.ViewMatrix from section in geometry.Sections from point in new XNA.Vector3[] { section.Item1, section.Item2 } let viewPoint = XNA.Vector3.Transform(point, geometryViewMatrix) select viewPoint.Length(); var furthestDistance = distancesToAllGeometrySections.Max(); var closestDistance = distancesToAllGeometrySections.Min(); var deltaDistance = Math.Max(0.000001f, furthestDistance - closestDistance); // draw each geometry for (var i = 0; i < geometries.Length; ++i) { var geometry = geometries[i]; // there's probably a more correct name for this, but basically this gets the geometry relative to the camera so we can check how far away each point is from the camera var geometryViewMatrix = geometry.WorldMatrix * camera.ViewMatrix; // we order roughly by those sections furthest from the camera to those closest, so that the closer ones "overwrite" the ones further away var orderedSections = from section in geometry.Sections let startPointRelativeToCamera = XNA.Vector3.Transform(section.Item1, geometryViewMatrix) let endPointRelativeToCamera = XNA.Vector3.Transform(section.Item2, geometryViewMatrix) let startPointDistance = startPointRelativeToCamera.Length() let endPointDistance = endPointRelativeToCamera.Length() orderby (startPointDistance + endPointDistance) descending select new { Section = section, DistanceToStart = startPointDistance, DistanceToEnd = endPointDistance }; foreach (var orderedSection in orderedSections) { var start = XNA.Vector3.Transform(orderedSection.Section.Item1, geometry.WorldMatrix); var end = XNA.Vector3.Transform(orderedSection.Section.Item2, geometry.WorldMatrix); var viewStart = XNA.Vector3.Transform(start, camera.ViewMatrix); var viewEnd = XNA.Vector3.Transform(end, camera.ViewMatrix); worldDrawingContext.DrawLine(nonScreenSpacePen, new Point(start.X, start.Y), new Point(end.X, end.Y)); viewDrawingContext.DrawLine(nonScreenSpacePen, new Point(viewStart.X, viewStart.Y), new Point(viewEnd.X, viewEnd.Y)); // screen rendering is more complicated purely because I wanted geometry to fade the further away it is from the camera // otherwise, it's very hard to tell whether the rendering is actually correct or not var startDistanceRatio = (orderedSection.DistanceToStart - closestDistance) / deltaDistance; var endDistanceRatio = (orderedSection.DistanceToEnd - closestDistance) / deltaDistance; // lerp towards white based on distance from camera, but only to a maximum of 90% var startColor = Lerp(geometryBaseColor, Colors.White, startDistanceRatio * 0.9f); var endColor = Lerp(geometryBaseColor, Colors.White, endDistanceRatio * 0.9f); var screenStart = camera.WorldPointToScreen(start); var screenEnd = camera.WorldPointToScreen(end); var brush = new LinearGradientBrush { StartPoint = new Point(screenStart.X, screenStart.Y), EndPoint = new Point(screenEnd.X, screenEnd.Y), MappingMode = BrushMappingMode.Absolute }; brush.GradientStops.Add(new GradientStop(startColor, 0)); brush.GradientStops.Add(new GradientStop(endColor, 1)); var pen = new Pen(brush, 1); brush.Freeze(); pen.Freeze(); screenDrawingContext.DrawLine(pen, new Point(screenStart.X, screenStart.Y), new Point(screenEnd.X, screenEnd.Y)); } } } worldRender = worldDrawingVisual; viewRender = viewDrawingVisual; screenRender = screenDrawingVisual; } private static float Lerp(float start, float end, float amount) { var difference = end - start; var adjusted = difference * amount; return start + adjusted; } private static Color Lerp(Color color, Color to, float amount) { var sr = color.R; var sg = color.G; var sb = color.B; var er = to.R; var eg = to.G; var eb = to.B; var r = (byte)Lerp(sr, er, amount); var g = (byte)Lerp(sg, eg, amount); var b = (byte)Lerp(sb, eb, amount); return Color.FromArgb(255, r, g, b); } private void ShowRenders(DrawingVisual worldRender, DrawingVisual viewRender, DrawingVisual screenRender) { var itemsControl = new ItemsControl(); itemsControl.Items.Add(new HeaderedContentControl { Header = "World", Content = new DrawingVisualHost(worldRender)}); itemsControl.Items.Add(new HeaderedContentControl { Header = "View", Content = new DrawingVisualHost(viewRender) }); itemsControl.Items.Add(new HeaderedContentControl { Header = "Screen", Content = new DrawingVisualHost(screenRender) }); var window = new Window { Title = "Renders", Content = itemsControl, ShowInTaskbar = true, SizeToContent = SizeToContent.WidthAndHeight }; window.ShowDialog(); } #endregion #region Supporting Types // stupidly simple 3D geometry class, consisting of a series of sections that will be connected by lines private abstract class Geometry { public abstract IEnumerable<Tuple<XNA.Vector3, XNA.Vector3>> Sections { get; } public XNA.Matrix WorldMatrix { get; set; } } private sealed class Line : Geometry { private readonly XNA.Vector3 magnitude; public Line(XNA.Vector3 magnitude) { this.magnitude = magnitude; } public override IEnumerable<Tuple<XNA.Vector3, XNA.Vector3>> Sections { get { yield return Tuple.Create(XNA.Vector3.Zero, this.magnitude); } } } private sealed class PolyLine : Geometry { private readonly XNA.Vector3[] points; public PolyLine(params XNA.Vector3[] points) { this.points = points; } public override IEnumerable<Tuple<XNA.Vector3, XNA.Vector3>> Sections { get { if (this.points.Length < 2) { yield break; } var end = this.points[0]; for (var i = 1; i < this.points.Length; ++i) { var start = end; end = this.points[i]; yield return Tuple.Create(start, end); } } } } private sealed class Cube : Geometry { private readonly float size; public Cube(float size) { this.size = size; } public override IEnumerable<Tuple<XNA.Vector3, XNA.Vector3>> Sections { get { var halfSize = this.size / 2; var frontBottomLeft = new XNA.Vector3(-halfSize, halfSize, -halfSize); var frontBottomRight = new XNA.Vector3(halfSize, halfSize, -halfSize); var frontTopLeft = new XNA.Vector3(-halfSize, halfSize, halfSize); var frontTopRight = new XNA.Vector3(halfSize, halfSize, halfSize); var backBottomLeft = new XNA.Vector3(-halfSize, -halfSize, -halfSize); var backBottomRight = new XNA.Vector3(halfSize, -halfSize, -halfSize); var backTopLeft = new XNA.Vector3(-halfSize, -halfSize, halfSize); var backTopRight = new XNA.Vector3(halfSize, -halfSize, halfSize); // front face yield return Tuple.Create(frontBottomLeft, frontBottomRight); yield return Tuple.Create(frontBottomLeft, frontTopLeft); yield return Tuple.Create(frontTopLeft, frontTopRight); yield return Tuple.Create(frontTopRight, frontBottomRight); // left face yield return Tuple.Create(frontTopLeft, backTopLeft); yield return Tuple.Create(backTopLeft, backBottomLeft); yield return Tuple.Create(backBottomLeft, frontBottomLeft); // right face yield return Tuple.Create(frontTopRight, backTopRight); yield return Tuple.Create(backTopRight, backBottomRight); yield return Tuple.Create(backBottomRight, frontBottomRight); // back face yield return Tuple.Create(backBottomLeft, backBottomRight); yield return Tuple.Create(backTopLeft, backTopRight); } } } private sealed class Sphere : Geometry { private readonly float radius; private readonly int subsections; public Sphere(float radius, int subsections) { this.radius = radius; this.subsections = subsections; } public override IEnumerable<Tuple<XNA.Vector3, XNA.Vector3>> Sections { get { var latitudeLines = this.subsections; var longitudeLines = this.subsections; // see http://stackoverflow.com/a/4082020/5380 var results = from latitudeLine in Enumerable.Range(0, latitudeLines) from longitudeLine in Enumerable.Range(0, longitudeLines) let latitudeRatio = latitudeLine / (float)latitudeLines let longitudeRatio = longitudeLine / (float)longitudeLines let nextLatitudeRatio = (latitudeLine + 1) / (float)latitudeLines let nextLongitudeRatio = (longitudeLine + 1) / (float)longitudeLines let z1 = Math.Cos(Math.PI * latitudeRatio) let z2 = Math.Cos(Math.PI * nextLatitudeRatio) let x1 = Math.Sin(Math.PI * latitudeRatio) * Math.Cos(Math.PI * 2 * longitudeRatio) let y1 = Math.Sin(Math.PI * latitudeRatio) * Math.Sin(Math.PI * 2 * longitudeRatio) let x2 = Math.Sin(Math.PI * nextLatitudeRatio) * Math.Cos(Math.PI * 2 * longitudeRatio) let y2 = Math.Sin(Math.PI * nextLatitudeRatio) * Math.Sin(Math.PI * 2 * longitudeRatio) let x3 = Math.Sin(Math.PI * latitudeRatio) * Math.Cos(Math.PI * 2 * nextLongitudeRatio) let y3 = Math.Sin(Math.PI * latitudeRatio) * Math.Sin(Math.PI * 2 * nextLongitudeRatio) let start = new XNA.Vector3((float)x1 * radius, (float)y1 * radius, (float)z1 * radius) let firstEnd = new XNA.Vector3((float)x2 * radius, (float)y2 * radius, (float)z2 * radius) let secondEnd = new XNA.Vector3((float)x3 * radius, (float)y3 * radius, (float)z1 * radius) select new { First = Tuple.Create(start, firstEnd), Second = Tuple.Create(start, secondEnd) }; foreach (var result in results) { yield return result.First; yield return result.Second; } } } } #endregion }

    Read the article

  • UCM 11g is 4 days old!

    - by kyle.hatlestad
    Ok...so I missed posting a blog entry when UCM 11g and the entire ECM suite released on Tuesday. Hopefully you've already seen the announcements on any number of the Oracle ECM blogs out there such as ECM Alerts, Fusion ECM, bex huff, or C4. So I won't bore you with the same talking points like 179 million check-ins per day or 124 web site page hits per second. Instead, I thought I'd show some screenshots of the new features in UCM and URM 11g. WebLogic Server and Enterprise Manager So probably the biggest change in 11g is UCM and URM now run on top of the WebLogic Server application server. This is a huge step as ECM is now on a standard platform with the rest of Oracle Fusion Middleware which makes installation, configuration, and integration consistent among all the products. From a feature perspective, it's also beneficial because it's now integrated with Oracle Enterprise Manager. Enterprise Manager provides a lot of provisioning control over servers as well as performance monitoring and access to logs and debugging information. Desktop Integration Suite Desktop Integration Suite got a complete overhaul for 11g. It exposes a lot more features within Windows Explorer such as saved searches, workflow queue, and checked-out items. It also now support metadata pop-up screens to let users fill in additional metadata when they drag-n-drop files in! And the integration within Office applications has changed significantly by introducing a dedicated UCM menu to do open, save, compare, etc. Site Studio for External Applications In UCM Site Studio 10gR4, a major architectural shift was introduced which brought several new objects such as elements, region definitions, region templates, and placeholder definitions. This truly separated the content from the display and from the definition. It also allowed separation of the content from needing to be rendered on a complete Site Studio page. Well, the new Site Studio for External Applications takes advantage of that architecture and introduces pre-built tags and plug-ins to JDeveloper to allow to go from simply adding a content area to your web application page to building an entire web site, just like you would have done in Site Studio Designer. In addition to these changes, enhancements to the core Site Studio have been added as well. One of the big ones is called Designer Mode which allows power-users to bypass the standard rules defined by the placeholder definition or template and perform any number of additional actions. This reduces the need to go back to Site Studio Designer or JDeveloper to make more advanced changes to the site. Dashboards As part of the updated records management functionality in both UCM and URM, users can now set a dashboard view on their home page to surface common functions in a single view. It has pre-built "portlets" users can choose from to display and organize they way they want. Behind the scenes, these dashboards are stored as Content Folios. So the dashboards themselves are content items that can be revisioned and shared between users. And new dashboard portlets can be easily added (like the User Profile one in the screenshots) by getting a copy of an existing one, modifying the display, and then checking it in as a new one to select from. URM Interface Enhancements URM includes several new UI and usability enhancements in 11g. There is a new view for physical records, a place to configure "favorite" items to quickly get to, and new placement of the records management menu. BI Publisher Reports Records management in UCM and URM now offer reports generated through embedded BI Publisher. Templates are controlled by rich text files checked directly into the repository, so they can be easily modified. Other Features A new Inbound Refinery conversion option is available that does native Microsoft Office HTML conversion. If your IBR is on Windows and you have the native applications loaded, the IBR can use them to produce HTML. A new GUI template editor for Dynamic Converter is available. It's written in Java so is available through all the supported browsers and platforms. The original ActiveX based editor is also still available. The Component Manager interface has changed to help provide an easier and more descriptive way to enable core components that are installed along with UCM. All of the supported components are immediately available to turn on and do not have to be installed separately as in previous versions. My Downloads is located in the My Content Server menu and provides for easy download of client installs including Desktop Integration Suite and Site Studio Designer. Well, hopefully that gives you a taste for some of the new things in 11g. We're all pretty excited here at Oracle about all the new changes and enhancements. Over the next few months I hope to highlight some of these features more in-depth, so keep your eye out for those posts.

    Read the article

  • ASP.NET MVC 3: Razor’s @: and <text> syntax

    - by ScottGu
    This is another in a series of posts I’m doing that cover some of the new ASP.NET MVC 3 features: New @model keyword in Razor (Oct 19th) Layouts with Razor (Oct 22nd) Server-Side Comments with Razor (Nov 12th) Razor’s @: and <text> syntax (today) In today’s post I’m going to discuss two useful syntactical features of the new Razor view-engine – the @: and <text> syntax support. Fluid Coding with Razor ASP.NET MVC 3 ships with a new view-engine option called “Razor” (in addition to the existing .aspx view engine).  You can learn more about Razor, why we are introducing it, and the syntax it supports from my Introducing Razor blog post.  Razor minimizes the number of characters and keystrokes required when writing a view template, and enables a fast, fluid coding workflow. Unlike most template syntaxes, you do not need to interrupt your coding to explicitly denote the start and end of server blocks within your HTML. The Razor parser is smart enough to infer this from your code. This enables a compact and expressive syntax which is clean, fast and fun to type. For example, the Razor snippet below can be used to iterate a list of products: When run, it generates output like:   One of the techniques that Razor uses to implicitly identify when a code block ends is to look for tag/element content to denote the beginning of a content region.  For example, in the code snippet above Razor automatically treated the inner <li></li> block within our foreach loop as an HTML content block because it saw the opening <li> tag sequence and knew that it couldn’t be valid C#.  This particular technique – using tags to identify content blocks within code – is one of the key ingredients that makes Razor so clean and productive with scenarios involving HTML creation. Using @: to explicitly indicate the start of content Not all content container blocks start with a tag element tag, though, and there are scenarios where the Razor parser can’t implicitly detect a content block. Razor addresses this by enabling you to explicitly indicate the beginning of a line of content by using the @: character sequence within a code block.  The @: sequence indicates that the line of content that follows should be treated as a content block: As a more practical example, the below snippet demonstrates how we could output a “(Out of Stock!)” message next to our product name if the product is out of stock: Because I am not wrapping the (Out of Stock!) message in an HTML tag element, Razor can’t implicitly determine that the content within the @if block is the start of a content block.  We are using the @: character sequence to explicitly indicate that this line within our code block should be treated as content. Using Code Nuggets within @: content blocks In addition to outputting static content, you can also have code nuggets embedded within a content block that is initiated using a @: character sequence.  For example, we have two @: sequences in the code snippet below: Notice how within the second @: sequence we are emitting the number of units left within the content block (e.g. - “(Only 3 left!”). We are doing this by embedding a @p.UnitsInStock code nugget within the line of content. Multiple Lines of Content Razor makes it easy to have multiple lines of content wrapped in an HTML element.  For example, below the inner content of our @if container is wrapped in an HTML <p> element – which will cause Razor to treat it as content: For scenarios where the multiple lines of content are not wrapped by an outer HTML element, you can use multiple @: sequences: Alternatively, Razor also allows you to use a <text> element to explicitly identify content: The <text> tag is an element that is treated specially by Razor. It causes Razor to interpret the inner contents of the <text> block as content, and to not render the containing <text> tag element (meaning only the inner contents of the <text> element will be rendered – the tag itself will not).  This makes it convenient when you want to render multi-line content blocks that are not wrapped by an HTML element.  The <text> element can also optionally be used to denote single-lines of content, if you prefer it to the more concise @: sequence: The above code will render the same output as the @: version we looked at earlier.  Razor will automatically omit the <text> wrapping element from the output and just render the content within it.  Summary Razor enables a clean and concise templating syntax that enables a very fluid coding workflow.  Razor’s smart detection of <tag> elements to identify the beginning of content regions is one of the reasons that the Razor approach works so well with HTML generation scenarios, and it enables you to avoid having to explicitly mark the beginning/ending of content regions in about 95% of if/else and foreach scenarios. Razor’s @: and <text> syntax can then be used for scenarios where you want to avoid using an HTML element within a code container block, and need to more explicitly denote a content region. Hope this helps, Scott P.S. In addition to blogging, I am also now using Twitter for quick updates and to share links. Follow me at: twitter.com/scottgu

    Read the article

  • The Dark Knight and Team Fortress 2 Mashup Movie Trailer [Video]

    - by Asian Angel
    If you are a Batman fan then you will find this video mashup interesting to watch. YouTube user TrueOneMoreUser has created a unique Dark Knight Trailer using characters from Team Fortress 2 and select scenes from The Dark Knight movie itself. Team Fortress 2 – The Demo Knight [via Geeks are Sexy] Latest Features How-To Geek ETC How To Make Disposable Sleeves for Your In-Ear Monitors Macs Don’t Make You Creative! So Why Do Artists Really Love Apple? MacX DVD Ripper Pro is Free for How-To Geek Readers (Time Limited!) HTG Explains: What’s a Solid State Drive and What Do I Need to Know? How to Get Amazing Color from Photos in Photoshop, GIMP, and Paint.NET Learn To Adjust Contrast Like a Pro in Photoshop, GIMP, and Paint.NET Bring the Grid to Your Desktop with the TRON Legacy Theme for Windows 7 The Dark Knight and Team Fortress 2 Mashup Movie Trailer [Video] Dirt Cheap DSLR Viewfinder Improves Outdoor DSLR LCD Visibility Lakeside Sunset in the Mountains [Wallpaper] Taskbar Meters Turn Your Taskbar into a System Resource Monitor Create Shortcuts for Your Favorite or Most Used Folders in Ubuntu

    Read the article

  • How do I resize partitions using the simple installation wizard (installing a second Ubuntu)?

    - by d3vid
    I'm running 11.10 and installing 12.04 LTS Beta 1 off a DVD. Using the installation wizard, I picked "Install 12.04 LTS alongside 11.10". I am presented with a slider with approx 240GB on the left side and 60GB on the right. No other labels are present. I don't want to use the advanced partitioning tool. Which side is which Ubuntu? If it's relevant: I am installing only for testing purposes (I've been caught by kernel regressions before), so I want to give 12.04 the minimal amount of space required. Once the final release is made, and I've tested that too, my plan is to remove the second partition and upgrade 11.10 to 12.04.

    Read the article

  • This Week In Geek History: The Hitchhiker’s Guide, Compact Discs, and Whirlwind Foreshadows Operating Systems

    - by Jason Fitzpatrick
    Every week we look at fascinating facts and trivia from the history of Geekdom. This week we’re taking a look at The Hitchhiker’s Guide to the Galaxy, Compact Discs, and Whirlwind, the first computer to foreshadow modern operating systems. Latest Features How-To Geek ETC How To Make Disposable Sleeves for Your In-Ear Monitors Macs Don’t Make You Creative! So Why Do Artists Really Love Apple? MacX DVD Ripper Pro is Free for How-To Geek Readers (Time Limited!) HTG Explains: What’s a Solid State Drive and What Do I Need to Know? How to Get Amazing Color from Photos in Photoshop, GIMP, and Paint.NET Learn To Adjust Contrast Like a Pro in Photoshop, GIMP, and Paint.NET Bring the Grid to Your Desktop with the TRON Legacy Theme for Windows 7 The Dark Knight and Team Fortress 2 Mashup Movie Trailer [Video] Dirt Cheap DSLR Viewfinder Improves Outdoor DSLR LCD Visibility Lakeside Sunset in the Mountains [Wallpaper] Taskbar Meters Turn Your Taskbar into a System Resource Monitor Create Shortcuts for Your Favorite or Most Used Folders in Ubuntu

    Read the article

  • juju spends bootstrap-timeout with a final message it cannot find /var/lib/juju/nonce.txt

    - by user285199
    I build two VMware's machines. First one with MAAS, second one with a fresh installation from MAAS. Region controller was installed with Ubuntu 12.04 distribution, and upgraded (. Node computing was installed from MAAS with Quantal 12.10. Juju was installed and upgraded to 1.18 (from ppa:juju/stable repository). MAAS was upgraded from cloud-archive:tools repository. In debug mode, I got how Juju connects to node. Then I run the same instruction: ssh -o "StrictHostKeyChecking no" -o "PasswordAuthentication no" -i /home/lliurex/.juju/ssh/juju_id_rsa -i /home/lliurex/.ssh/id_rsa [email protected] /bin/bash It worked (with and without /bin/bash). When Juju spends all bootstrap-timeout tells it has not found /var/lib/juju/nonce.txt file. It's true, it doesn't exist. It doesn't mind if you put a timeout of 1800, 3600 or 72000, it always finishes the same.

    Read the article

  • Installing SharePoint 2013 on Windows 2012- standalone installation

    - by sreejukg
    In this article, I am going to share my experience while installing SharePoint 2013 on Windows 2012. This was the first time I tried SharePoint 2013. So I thought sharing the same will benefit somebody who would like to install SharePoint 2013 as a standalone installation. Standalone installation is meant for evaluation/development purposes. For production environments, you need to follow the best practices and create required service accounts. Microsoft has published the deployment guide for SharePoint 2013, you can download this from the below link. http://www.microsoft.com/en-us/download/details.aspx?id=30384 Since this is for development environment, I am not going to create any service account, I logged in to Windows 2012 as an administrator and just placed my installation DVD on the drive. When I run the setup from the DVD, the below splash screen appears. This reflects the new UI changes happening with all Microsoft based applications; the interface matches the metro style applications (Windows 8 style). As you can see the options are same as that of the SharePoint 2010 installation screen. Click on the “install software prerequisites” link to get all the prerequisites get installed. You need a valid internet connection to do this. Clicking on the install software prerequisites will bring the following dialog. Click Next, you will see the terms and conditions. Select I accept check box and click Next. The installation will start immediately. For any reason, if you stop the installation and start it later, the product preparation tool will check whether a particular component is installed and if yes, then the installation of that particular component will be skipped. If you do not have internet connection, you will face the download error as follows. At any point of failure, the error log will be available for you to review. If all OK, you will reach the below dialog, this means some components will be installed once the PC is rebooted. Be noted that the clicking on finish will not ask you for further confirmation. So make sure to save all your work before clicking on finish button. Once the server is restarted, the product preparation tool will start automatically and you will see the following dialog. Now go to the SharePoint 2013 splash page and click on “Install SharePoint Server” link. You need to enter the product key here. Enter the product key as you received and click continue. Select the Checkbox for the license agreement and click on continue button. Now you need to select the installation type. Select Stand-alone and click on “Install Now” button. A dialog will pop up that updates you with the process and progress. The installation took around 15-20 minutes with 2 GB or Ram installed in the server, seems fair. Once the installation is over, you will see the following Dialog. Make sure you select the Run the products and configuration wizard. If you miss to select the check box, you can find the products and configuration wizard from the start tiles. The products and configuration wizard will start. If you get any dialog saying some of the services will be stopped, you just accept it. Since we selected standalone installation, it will not ask for any user input, as it already knows the database to be configured. Once the configuration is over without any problems you will see the configuration successful message. Also you can find the link to central administration on the Start Screen.     Troubleshooting During my first setup process, I got the below error. System.ArgumentException: The SDDL string contains an invalid sid or a sid that cannot be translated. Parameter name: sddlForm at System.Security.AccessControl.RawSecurityDescriptor.BinaryFormFromSddlForm(String sddlForm) at System.Security.AccessControl.RawSecurityDescriptor..ctor(String sddlForm) at Microsoft.SharePoint.Win32.SPNetApi32.CreateShareSecurityDescriptor(String[] readNames, String[] changeNames, String[] fullControlNames, String& sddl) at Microsoft.SharePoint.Win32.SPNetApi32.CreateFileShare(String name, String description, String path) at Microsoft.SharePoint.Administration.SPServer.CreateFileShare(String name, String description, String path) at Microsoft.Office.Server.Search.Administration.AnalyticsAdministration.CreateAnalyticsUNCShare(String dirParentLocation, String shareName) at Microsoft.Office.Server.Search.Administration.AnalyticsAdministration.ProvisionAnalyticsShare(SearchServiceApplication serviceApplication) ………………………………………… ………………………………………… The configuration wizard displayed the error as below. The error occurred in step 8 of the configuration wizard and by the time the central administration is already provisioned. So from the start, I was able to open the central administration website, but the search service application was showing as error. I found a good blog that specifies the reason for error. http://kbdump.com/sharepoint2013-standalone-config-error-create-sample-data/ The workaround specified in the blog works fine. I think SharePoint must be provisioning Search using the Network Service account, so instead of giving permission to everyone, you could try giving permission to Network Service account(I didn’t try this yet, buy you could try and post your feedback here). In production environment you will have specific accounts that have access rights as recommended by Microsoft guidelines. Installation of SharePoint 2013 is pretty straight forward. Hope you enjoyed the article!

    Read the article

  • System locking up with suspicious messages about hard disk

    - by Chris Conway
    My system has started behaving strangely, intermittently locking up. I see messages like the following in syslog: Nov 18 22:22:00 claypool kernel: [ 3428.078156] ata3.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x0 Nov 18 22:22:00 claypool kernel: [ 3428.078163] ata3.00: irq_stat 0x40000000 Nov 18 22:22:00 claypool kernel: [ 3428.078167] sr 2:0:0:0: CDB: Test Unit Ready: 00 00 00 00 00 00 Nov 18 22:22:00 claypool kernel: [ 3428.078182] ata3.00: cmd a0/00:00:00:00:00/00:00:00:00:00/a0 tag 0 Nov 18 22:22:00 claypool kernel: [ 3428.078184] res 50/00:03:00:00:00/00:00:00:00:00/a0 Emask 0x1 (device error) Nov 18 22:22:00 claypool kernel: [ 3428.078188] ata3.00: status: { DRDY } Nov 18 22:22:00 claypool kernel: [ 3428.080887] ata3.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x0 Nov 18 22:22:00 claypool kernel: [ 3428.080890] ata3.00: irq_stat 0x40000000 Nov 18 22:22:00 claypool kernel: [ 3428.080893] sr 2:0:0:0: CDB: Test Unit Ready: 00 00 00 00 00 00 Nov 18 22:22:00 claypool kernel: [ 3428.080905] ata3.00: cmd a0/00:00:00:00:00/00:00:00:00:00/a0 tag 0 Nov 18 22:22:00 claypool kernel: [ 3428.080906] res 50/00:03:00:00:00/00:00:00:00:00/a0 Emask 0x1 (device error) Nov 18 22:22:00 claypool kernel: [ 3428.080910] ata3.00: status: { DRDY } And then this: Nov 18 23:13:56 claypool kernel: [ 6544.000798] ata1.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x6 frozen Nov 18 23:13:56 claypool kernel: [ 6544.000804] ata1.00: failed command: FLUSH CACHE EXT Nov 18 23:13:56 claypool kernel: [ 6544.000814] ata1.00: cmd ea/00:00:00:00:00/00:00:00:00:00/a0 tag 0 Nov 18 23:13:56 claypool kernel: [ 6544.000815] res 40/00:00:00:4f:c2/00:00:00:00:00/40 Emask 0x4 (timeout) Nov 18 23:13:56 claypool kernel: [ 6544.000819] ata1.00: status: { DRDY } Nov 18 23:13:56 claypool kernel: [ 6544.000825] ata1: hard resetting link Nov 18 23:14:01 claypool kernel: [ 6549.360324] ata1: link is slow to respond, please be patient (ready=0) Nov 18 23:14:06 claypool kernel: [ 6554.008091] ata1: COMRESET failed (errno=-16) Nov 18 23:14:06 claypool kernel: [ 6554.008103] ata1: hard resetting link Nov 18 23:14:11 claypool kernel: [ 6559.372246] ata1: link is slow to respond, please be patient (ready=0) Nov 18 23:14:16 claypool kernel: [ 6564.020228] ata1: COMRESET failed (errno=-16) Nov 18 23:14:16 claypool kernel: [ 6564.020235] ata1: hard resetting link Nov 18 23:14:21 claypool kernel: [ 6569.380109] ata1: link is slow to respond, please be patient (ready=0) Nov 18 23:14:31 claypool kernel: [ 6579.460243] ata1: SATA link up 3.0 Gbps (SStatus 123 SControl 300) Nov 18 23:14:31 claypool kernel: [ 6579.486595] ata1.00: configured for UDMA/133 Nov 18 23:14:31 claypool kernel: [ 6579.486601] ata1.00: retrying FLUSH 0xea Emask 0x4 Nov 18 23:14:31 claypool kernel: [ 6579.486939] ata1.00: device reported invalid CHS sector 0 Nov 18 23:14:31 claypool kernel: [ 6579.486952] ata1: EH complete Nov 18 23:17:01 claypool CRON[3910]: (root) CMD ( cd / && run-parts --report /etc/cron.hourly) Nov 18 23:17:01 claypool CRON[3908]: (CRON) error (grandchild #3910 failed with exit status 1) Nov 18 23:17:01 claypool postfix/sendmail[3925]: fatal: open /etc/postfix/main.cf: No such file or directory Nov 18 23:17:01 claypool CRON[3908]: (root) MAIL (mailed 1 byte of output; but got status 0x004b, #012) Nov 18 23:39:01 claypool CRON[4200]: (root) CMD ( [ -x /usr/lib/php5/maxlifetime ] && [ -d /var/lib/php5 ] && find /var/lib/php5/ -type f -cmin +$(/usr/lib/php5/maxlifetime) -print0 | xargs -n 200 -r -0 rm) There are no messages marked after 23:39. When I next tried to use the machine, it would not return from the screensaver (blank screen), nor switch to another terminal, and I had to hard reboot it. [UPDATE] The output of smartctl is here. I had trouble getting this, because / is being mounted read-only (?!), which prevents most applications from running. Also, it may not be related, but I have the following worrying messages in dmesg: [ 10.084596] k8temp 0000:00:18.3: Temperature readouts might be wrong - check erratum #141 [ 10.098477] i2c i2c-0: nForce2 SMBus adapter at 0x600 [ 10.098483] ACPI: resource nForce2_smbus [io 0x0700-0x073f] conflicts with ACPI region SM00 [??? 0x00000700-0x0000073f flags 0x30] [ 10.098486] ACPI: This conflict may cause random problems and system instability [ 10.098487] ACPI: If an ACPI driver is available for this device, you should use it instead of the native driver [ 10.098509] i2c i2c-1: nForce2 SMBus adapter at 0x700 [ 10.112570] Linux agpgart interface v0.103 [ 10.155329] atk: Resources not safely usable due to acpi_enforce_resources kernel parameter [ 10.161506] it87: Found IT8712F chip at 0x290, revision 8 [ 10.161517] it87: VID is disabled (pins used for GPIO) [ 10.161527] it87: in3 is VCC (+5V) [ 10.161528] it87: in7 is VCCH (+5V Stand-By) [ 10.161560] ACPI: resource it87 [io 0x0295-0x0296] conflicts with ACPI region ECRE [??? 0x00000290-0x000002af flags 0x45] [ 10.161562] ACPI: This conflict may cause random problems and system instability [ 10.161564] ACPI: If an ACPI driver is available for this device, you should use it instead of the native driver [UPDATE 2] I swapped in a new SATA cable, per Phil's suggestion. The current output of smartctl is here, if it helps. [UPDATE 3] I don't think the cable fixed it. The system hasn't locked up yet, but my media player crashed a few minutes ago and I have the following in the syslog: Nov 20 16:07:17 claypool kernel: [ 2294.400033] ata1: link is slow to respond, please be patient (ready=0) Nov 20 16:07:47 claypool kernel: [ 2324.084581] ata1: COMRESET failed (errno=-16) Nov 20 16:07:47 claypool kernel: [ 2324.084588] ata1: limiting SATA link speed to 1.5 Gbps Nov 20 16:07:47 claypool kernel: [ 2324.084592] ata1: hard resetting link I get the following response from smartctl: $ sudo smartctl -a /dev/sda [sudo] password for chris: sudo: Can't open /var/lib/sudo/chris/0: Read-only file system smartctl 5.40 2010-03-16 r3077 [i686-pc-linux-gnu] (local build) Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net Device: /0:0:0:0 Version: scsiModePageOffset: response length too short, resp_len=47 offset=50 bd_len=46 >> Terminate command early due to bad response to IEC mode page A mandatory SMART command failed: exiting. To continue, add one or more '-T permissive' options.

    Read the article

  • How to Increase the VMWare Boot Screen Delay

    - by Trevor Bekolay
    If you’ve wanted to try out a bootable CD or USB flash drive in a virtual machine environment, you’ve probably noticed that VMWare’s offerings make it difficult to change the boot device. We’ll show you how to change these options. You can do this either for one boot, or permanently for a particular virtual machine. Even experienced users of VMWare Player or Workstation may not recognize the screen above – it’s the virtual machine’s BIOS, which in most cases flashes by in the blink of an eye. If you want to boot up the virtual machine with a CD or USB key instead of the hard drive, then you’ll need more than an eye’s-blink to press Escape and bring up the Boot Menu. Fortunately, there is a way to introduce a boot delay that isn’t exposed in VMWare’s graphical interface – you have to edit the virtual machine’s settings file (a .vmx file) manually. Editing the Virtual Machine’s .vmx Find the .vmx file that contains the settings for your virtual machine. You chose a location for this when you created the virtual machine – in Windows, the default location is a folder called My Virtual Machines in your My Documents folder. In VMWare Workstation, the location of the .vmx file is listed on the virtual machine’s tab. If in doubt, search your hard drive for .vmx files. If you don’t want to use Windows default search, an awesome utility that locates files instantly is Everything. Open the .vmx file with any text editor. Somewhere in this file, enter in the following line… save the file, then close out of the text editor: bios.bootdelay = 20000 This will introduce a 20 second delay when the virtual machine loads up, giving you plenty of time to press the Escape button and access the boot menu. The number in this line is just a value in milliseconds, so for a five second boot delay, enter 5000, and so on. Change Boot Options Temporarily Now, when you boot up your virtual machine, you’ll have plenty of time to enter one of the keystrokes listed at the bottom of the BIOS screen on boot-up. Press Escape to bring up the Boot Menu. This allows you to select a different device to boot from – like a CD drive. Your selection will be forgotten the next time you boot up this virtual machine. Change Boot Options Permanently When the BIOS screen comes up, press F2 to enter the BIOS Setup menu. Switch to the Boot tab, and change the ordering of the items by pressing the “+” key to move items up on the list, and the “-” key to move items down the list. We’ve switched the order so that the CD-ROM Drive boots first. Once you make this change permanent, you may want to re-edit the .vmx file to remove the boot delay. Boot from a USB Flash Drive One thing that is noticeably missing from the list of boot options is a USB device. VMWare’s BIOS just does not allow this, but we can get around that limitation using the PLoP Boot Manager that we’ve previously written about. And as a bonus, since everything is virtual anyway, there’s no need to actually burn PLoP to a CD. Open the settings for the virtual machine you want to boot with a USB drive. Click on Add… at the bottom of the settings screen, and select CD/DVD Drive. Click Next. Click the Use ISO Image radio button, and click Next. Browse to find plpbt.iso or plpbtnoemul.iso from the PLoP zip file. Ensure that Connect at power on is checked, and then click Finish. Click OK on the main Virtual Machine Settings page. Now, if you use the steps above to boot using that CD/DVD drive, PLoP will load, allowing you to boot from a USB drive! Conclusion We’re big fans of VMWare Player and Workstation, as they let us try out a ton of geeky things without worrying about harming our systems. By introducing a boot delay, we can add bootable CDs and USB drives to the list of geeky things we can try out. Download PLoP Boot Manager Similar Articles Productive Geek Tips How To Switch to Console Mode for Ubuntu VMware GuestHack: Turn Off Debug Mode in VMWare Workstation 6 BetaStart Your Computer More Quickly by Delaying the Startup of a Service in VistaEnable Hidden BootScreen in Windows VistaEnable Copy and Paste from Ubuntu VMware Guest TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 OutlookStatView Scans and Displays General Usage Statistics How to Add Exceptions to the Windows Firewall Office 2010 reviewed in depth by Ed Bott FoxClocks adds World Times in your Statusbar (Firefox) Have Fun Editing Photo Editing with Citrify Outlook Connector Upgrade Error

    Read the article

  • Skinny controller in ASP.NET MVC 4

    - by thangchung
    Rails community are always inspire a lot of best ideas. I really love this community by the time. One of these is "Fat models and skinny controllers". I have spent a lot of time on ASP.NET MVC, and really I did some miss-takes, because I made the controller so fat. That make controller is really dirty and very hard to maintain in the future. It is violate seriously SRP principle and KISS as well. But how can we achieve that in ASP.NET MVC? That question is really clear after I read "Manning ASP.NET MVC 4 in Action". It is simple that we can separate it into ActionResult, and try to implementing logic and persistence data inside this. In last 2 years, I have read this from Jimmy Bogard blog, but in that time I never had a consideration about it. That's enough for talking now. I just published a sample on ASP.NET MVC 4, implemented on Visual Studio 2012 RC at here. I used EF framework at here for implementing persistence layer, and also use 2 free templates from internet to make the UI for this sample. In this sample, I try to implementing the simple magazine website that managing all articles, categories and news. It is not finished at all in this time, but no problems, because I just show you about how can we make the controller skinny. And I wanna hear more about your ideas. The first thing, I am abstract the base ActionResult class like this:    public abstract class MyActionResult : ActionResult, IEnsureNotNull     {         public abstract void EnsureAllInjectInstanceNotNull();     }     public abstract class ActionResultBase<TController> : MyActionResult where TController : Controller     {         protected readonly Expression<Func<TController, ActionResult>> ViewNameExpression;         protected readonly IExConfigurationManager ConfigurationManager;         protected ActionResultBase (Expression<Func<TController, ActionResult>> expr)             : this(DependencyResolver.Current.GetService<IExConfigurationManager>(), expr)         {         }         protected ActionResultBase(             IExConfigurationManager configurationManager,             Expression<Func<TController, ActionResult>> expr)         {             Guard.ArgumentNotNull(expr, "ViewNameExpression");             Guard.ArgumentNotNull(configurationManager, "ConfigurationManager");             ViewNameExpression = expr;             ConfigurationManager = configurationManager;         }         protected ViewResult GetViewResult<TViewModel>(TViewModel viewModel)         {             var m = (MethodCallExpression)ViewNameExpression.Body;             if (m.Method.ReturnType != typeof(ActionResult))             {                 throw new ArgumentException("ControllerAction method '" + m.Method.Name + "' does not return type ActionResult");             }             var result = new ViewResult             {                 ViewName = m.Method.Name             };             result.ViewData.Model = viewModel;             return result;         }         public override void ExecuteResult(ControllerContext context)         {             EnsureAllInjectInstanceNotNull();         }     } I also have an interface for validation all inject objects. This interface make sure all inject objects that I inject using Autofac container are not null. The implementation of this as below public interface IEnsureNotNull     {         void EnsureAllInjectInstanceNotNull();     } Afterwards, I am just simple implementing the HomePageViewModelActionResult class like this public class HomePageViewModelActionResult<TController> : ActionResultBase<TController> where TController : Controller     {         #region variables & ctors         private readonly ICategoryRepository _categoryRepository;         private readonly IItemRepository _itemRepository;         private readonly int _numOfPage;         public HomePageViewModelActionResult(Expression<Func<TController, ActionResult>> viewNameExpression)             : this(viewNameExpression,                    DependencyResolver.Current.GetService<ICategoryRepository>(),                    DependencyResolver.Current.GetService<IItemRepository>())         {         }         public HomePageViewModelActionResult(             Expression<Func<TController, ActionResult>> viewNameExpression,             ICategoryRepository categoryRepository,             IItemRepository itemRepository)             : base(viewNameExpression)         {             _categoryRepository = categoryRepository;             _itemRepository = itemRepository;             _numOfPage = ConfigurationManager.GetAppConfigBy("NumOfPage").ToInteger();         }         #endregion         #region implementation         public override void ExecuteResult(ControllerContext context)         {             base.ExecuteResult(context);             var cats = _categoryRepository.GetCategories();             var mainViewModel = new HomePageViewModel();             var headerViewModel = new HeaderViewModel();             var footerViewModel = new FooterViewModel();             var mainPageViewModel = new MainPageViewModel();             headerViewModel.SiteTitle = "Magazine Website";             if (cats != null && cats.Any())             {                 headerViewModel.Categories = cats.ToList();                 footerViewModel.Categories = cats.ToList();             }             mainPageViewModel.LeftColumn = BindingDataForMainPageLeftColumnViewModel();             mainPageViewModel.RightColumn = BindingDataForMainPageRightColumnViewModel();             mainViewModel.Header = headerViewModel;             mainViewModel.DashBoard = new DashboardViewModel();             mainViewModel.Footer = footerViewModel;             mainViewModel.MainPage = mainPageViewModel;             GetViewResult(mainViewModel).ExecuteResult(context);         }         public override void EnsureAllInjectInstanceNotNull()         {             Guard.ArgumentNotNull(_categoryRepository, "CategoryRepository");             Guard.ArgumentNotNull(_itemRepository, "ItemRepository");             Guard.ArgumentMustMoreThanZero(_numOfPage, "NumOfPage");         }         #endregion         #region private functions         private MainPageRightColumnViewModel BindingDataForMainPageRightColumnViewModel()         {             var mainPageRightCol = new MainPageRightColumnViewModel();             mainPageRightCol.LatestNews = _itemRepository.GetNewestItem(_numOfPage).ToList();             mainPageRightCol.MostViews = _itemRepository.GetMostViews(_numOfPage).ToList();             return mainPageRightCol;         }         private MainPageLeftColumnViewModel BindingDataForMainPageLeftColumnViewModel()         {             var mainPageLeftCol = new MainPageLeftColumnViewModel();             var items = _itemRepository.GetNewestItem(_numOfPage);             if (items != null && items.Any())             {                 var firstItem = items.First();                 if (firstItem == null)                     throw new NoNullAllowedException("First Item".ToNotNullErrorMessage());                 if (firstItem.ItemContent == null)                     throw new NoNullAllowedException("First ItemContent".ToNotNullErrorMessage());                 mainPageLeftCol.FirstItem = firstItem;                 if (items.Count() > 1)                 {                     mainPageLeftCol.RemainItems = items.Where(x => x.ItemContent != null && x.Id != mainPageLeftCol.FirstItem.Id).ToList();                 }             }             return mainPageLeftCol;         }         #endregion     }  Final step, I get into HomeController and add some line of codes like this [Authorize]     public class HomeController : BaseController     {         [AllowAnonymous]         public ActionResult Index()         {             return new HomePageViewModelActionResult<HomeController>(x=>x.Index());         }         [AllowAnonymous]         public ActionResult Details(int id)         {             return new DetailsViewModelActionResult<HomeController>(x => x.Details(id), id);         }         [AllowAnonymous]         public ActionResult Category(int id)         {             return new CategoryViewModelActionResult<HomeController>(x => x.Category(id), id);         }     } As you see, the code in controller is really skinny, and all the logic I move to the custom ActionResult class. Some people said, it just move the code out of controller and put it to another class, so it is still hard to maintain. Look like it just move the complicate codes from one place to another place. But if you have a look and think it in details, you have to find out if you have code for processing all logic that related to HttpContext or something like this. You can do it on Controller, and try to delegating another logic  (such as processing business requirement, persistence data,...) to custom ActionResult class. Tell me more your thinking, I am really willing to hear all of its from you guys. All source codes can be find out at here. Tweet !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="http://weblogs.asp.net//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");

    Read the article

  • Beginner Geek: How To Change the Boot Order in Your Computer’s BIOS

    - by Chris Hoffman
    The boot order in your computer’s BIOS controls which device it loads the operating system from. Modify your boot order to force your computer to boot from a USB drive, CD or DVD drive, or another hard drive. You may need to change this setting when booting from another device, whether you’re running an operating system from a live USB drive or installing a new operating system from a disc. Note: This process will look different on each computer. The instructions here will guide you through the process, but the screenshots won’t look exactly the same. How To Use USB Drives With the Nexus 7 and Other Android Devices Why Does 64-Bit Windows Need a Separate “Program Files (x86)” Folder? Why Your Android Phone Isn’t Getting Operating System Updates and What You Can Do About It

    Read the article

  • How do I use with LTSP with a Dell FX170 thin client?

    - by v4169sgr
    Just got a reconditioned DELL Optiplex FX170 thin client delivered, without an image. On power-up, I see the message: DISK BOOT FAILURE, INSERT SYSTEM DISK AND PRESS ENTER I am very sure all the wires etc are connected properly. I would like it to PXE boot and find my LTSP installation. I have an HP T5525 with the same connectivity that works fine. On F12 there's a BIOS boot order menu. The only options available though are LS120, the HDD, CD / DVD [via USB presumably], and various USB options. I do not see 'PXE' or any network card. And I don't know how to change the boot order :( Any help appreciated!

    Read the article

  • AJI Report 14 &ndash; Brian Lagunas on XAML and Windows 8

    - by Jeff Julian
    We sat down with Brian at the Iowa Code Camp to talk about his sessions, WPF, Application Design, and what Infragistics has to offer developers. Infragistics is a huge supporter of regional events like Iowa Code Camp and we want to thank them for their support of the Midwest region. Brian is a sharp guy and it was great to meet him and learn more about what makes him tick. Brian Lagunas is an INETA Community Speaker, co-leader of the Boise .Net Developers User Group (NETDUG), and original author of the Extended WPF Toolkit. He is a multi-recipient of the Microsoft Community Contributor Award and can be found speaking at a variety of user groups and code camps around the nation. Brian currently works at Infragistics as a Product Manager for the award winning NetAdvantage for WPF and Silverlight components. Before geeking out, Brian served his country in the United States Army as an infantryman and later served his local community as a deputy sheriff.   Listen to the Show   Site: http://brianlagunas.com Twitter: @BrianLagunas

    Read the article

  • Windows 7 restarts PC when selected from GRUB Menu

    - by Dan Still
    I installed windows 7 on a RAID 5 (2@160GB SATA +1@160GB SATA for RAID 5) I then proceeded to install Ubuntu 11.10 using the Live CD and opted: "Install along side Windows 7 Option" Upon boot GRUB appears normally and I can select and run Ubuntu with no difficulties. When I select Windows 7 from GRUB the PC restarts and consequently goes back to GRUB. I have attempted to use the Windows 7 DVD to repair the installation but to no avail. The Wizard ran twice as it described it might, after the second attempt came back with an '...inability to repair...' error. I am sure there is an answer to this somewhere but I have yet to be able to find it. (2 weeks and countless attempts and searches before posting this question. Although I am happy to use Ubuntu alone my wife likes to watch Netflix and therein requires the Windows 7 installation. Any answers are appreciated and welcomed. Thanks in advance. Dan Still

    Read the article

  • How to boot Ubuntu 12.04-64bit from a USB from Compaq CQ58

    - by user208092
    I try to boot Ubuntu 12.04, 64-bit on my Compaq CQ58 laptop from a USB but it is not working. I've correctly installed the Ubuntu on my pen drive following the instructions on Ubuntu website. (http://www.ubuntu.com/download/desktop/create-a-usb-stick-on-windows) These are my BIOS settings: Post Hotkey Delay (sec) <0 CD-ROM Boot Internal Network Adapter Boot Network Boot Protocol Legacy Support Secure Boot Platform Key Enrolled Pending Action None Clear All Secure Boot UEFI Boot Order: USB Diskette on Key/USB Hard Disk OS Boot Manager Internal CD/DVD ROM Drive ! Network Adapter With these settings when i restart my computer what shows up is: Boot Device Not Found. This is what I get on the Boot Manager: Boot Option Menu OS boot Manager Boot From EFI File (Arrow Up) and (Arrow Down) to change option, ENTER to select an option. Press F10 to BIOS Setup Options, ESC to exit. PLEASE HELP... P.S. My laptop has windows 8

    Read the article

  • Simple Preferred time control using silverlight 3.

    - by mohanbrij
    Here I am going to show you a simple preferred time control, where you can select the day of the week and the time of the day. This can be used in lots of place where you may need to display the users preferred times. Sample screenshot is attached below. This control is developed using Silverlight 3 and VS2008, I am also attaching the source code with this post. This is a very basic example. You can download and customize if further for your requirement if you want. I am trying to explain in few words how this control works and what are the different ways in which you can customize it further. File: PreferredTimeControl.xaml, in this file I have just hardcoded the controls and their positions which you can see in the screenshot above. In this example, to change the start day of the week and time, you will have to go and change the design in XAML file, its not controlled by your properties or implementation classes. You can also customize it to change the start day of the week, Language, Display format, styles, etc, etc. File: PreferredTimeControl.xaml.cs, In this control using the code below, first I am taking all the checkbox from my form and store it in the Global Variable, which I can use across my page. List<CheckBox> checkBoxList; #region Constructor public PreferredTimeControl() { InitializeComponent(); GetCheckboxes();//Keep all the checkbox in List in the Load itself } #endregion #region Helper Methods private List<CheckBox> GetCheckboxes() { //Get all the CheckBoxes in the Form checkBoxList = new List<CheckBox>(); foreach (UIElement element in LayoutRoot.Children) { if (element.GetType().ToString() == "System.Windows.Controls.CheckBox") { checkBoxList.Add(element as CheckBox); } } return checkBoxList; } Then I am exposing the two methods which you can use in the container form to get and set the values in this controls. /// <summary> /// Set the Availability on the Form, with the Provided Timings /// </summary> /// <param name="selectedTimings">Provided timings comes from the DB in the form 11,12,13....37 /// Where 11 refers to Monday Morning, 12 Tuesday Morning, etc /// Here 1, 2, 3 is for Morning, Afternoon and Evening respectively, and for weekdays /// 1,2,3,4,5,6,7 where 1 is for Monday, Tuesday, Wednesday, Thrusday, Friday, Saturday and Sunday respectively /// So if we want Monday Morning, we can can denote it as 11, similarly for Saturday Evening we can write 36, etc /// </param> public void SetAvailibility(string selectedTimings) { foreach (CheckBox chk in checkBoxList) { chk.IsChecked = false; } if (!String.IsNullOrEmpty(selectedTimings)) { string[] selectedString = selectedTimings.Split(','); foreach (string selected in selectedString) { foreach (CheckBox chk in checkBoxList) { if (chk.Tag.ToString() == selected) { chk.IsChecked = true; } } } } } /// <summary> /// Gets the Availibility from the selected checkboxes /// </summary> /// <returns>String in the format of 11,12,13...41,42...31,32...37</returns> public string GetAvailibility() { string selectedText = string.Empty; foreach (CheckBox chk in GetCheckboxes()) { if (chk.IsChecked == true) { selectedText = chk.Tag.ToString() + "," + selectedText; } } return selectedText; }   In my example I am using the matrix format for Day and Time, for example Monday=1, Tuesday=2, Wednesday=3, Thursday = 4, Friday = 5, Saturday = 6, Sunday=7. And Morning = 1, Afternoon =2, Evening = 3. So if I want to represent Morning-Monday I will have to represent it as 11, Afternoon-Tuesday as 22, Morning-Wednesday as 13, etc. And in the other way to set the values in the control I am passing the values in the control in the same format as preferredTimeControl.SetAvailibility("11,12,13,16,23,22"); So this will set the checkbox value for Morning-Monday, Morning-Tuesday, Morning-Wednesday, Morning-Saturday, Afternoon of Tuesday and Afternoon of Wednesday. To implement this control, first I have to import this control in xmlns namespace as xmlns:controls="clr-namespace:PreferredTimeControlApp" and finally put in your page wherever you want, <Grid x:Name="LayoutRoot" Style="{StaticResource LayoutRootGridStyle}"> <Border x:Name="ContentBorder" Style="{StaticResource ContentBorderStyle}"> <controls:PreferredTimeControl x:Name="preferredTimeControl"></controls:PreferredTimeControl> </Border> </Grid> And in the code behind you can just include this code: private void InitializeControl() { preferredTimeControl.SetAvailibility("11,12,13,16,23,22"); } And you are ready to go. For more details you can refer to my code attached. I know there can be even simpler and better way to do this. Let me know if any other ideas. Sorry, Guys Still I have used Silverlight 3 and VS2008, as from the system I am uploading this is still not upgraded, but still you can use the same code with Silverlight 4 and VS2010 without any changes. May be just it will ask you to upgrade your project which will take care of rest. Download Source Code.   Thanks ~Brij

    Read the article

  • Register for Cloud Computing Bootcamp: Free Technical Training on Developing for Windows Azure

    This two-day workshop will help you prepare to deliver solutions on the Windows Azure Platform. We've worked to bring the region's best Azure experts together to teach you how to work in the cloud. Each day will be filled with training, discussion, reviewing real scenarios, and hands-on labs. It's more than just a training class, it's also an event-in-a box. If you don't see a class near you, then throw your own....Did you know that DotNetSlackers also publishes .net articles written by top known .net Authors? We already have over 80 articles in several categories including Silverlight. Take a look: here.

    Read the article

  • Latest AutoVue Podcast - Customer Success at Ringhals/Vattenfall

    - by pam.petropoulos(at)oracle.com
    Ringhals, a Swedish nuclear power plant, part of the Vattenfall Group, produces 20% of the country's electricity and is the largest power station in the Nordic region. Ringhals has standardized on AutoVue for most of their engineering and asset document visualization requirements throughout their plant maintenance, design and engineering operations. This audio interview, hosted by Folia Grace, Oracle Vice President of Application Product Marketing, features Harald Carlsson, Documentation Administrator at Ringhals/Vattenfall. Hear Harald describe how they have cut IT maintenance costs, increased productivity, and improved maintenance operations throughout their facility. Click here to listen to the podcast

    Read the article

  • Hack Your Lights for Remote Control

    - by Jason Fitzpatrick
    This clever hack combines a modified wall switch with unused buttons on a universal remote to create one-touch wireless control of the lighting in a media room. Andrew, the tinker behind this home theater hack, writes: I really liked the idea of controlling my “Home Theater” lights with a remote (TV or other), this would save me the exhausting task of heaving myself off the couch to turn the lights on or off. I found one of my remotes has a spare power button, its one of those stupid “universal” remotes that comes with DVD players or TVs but only work if you have all the same brand equipment, I don’t so this made a good option for a light switch. Hit up the link below to check out more photos of his project and download the source code. Why Does 64-Bit Windows Need a Separate “Program Files (x86)” Folder? Why Your Android Phone Isn’t Getting Operating System Updates and What You Can Do About It How To Delete, Move, or Rename Locked Files in Windows

    Read the article

  • How to UEFI install Ubuntu 12.10?

    - by Geezanansa
    Running a newer FM1 motherboard which is using an AMD 3870k APU with a new 1TB HDD. Following the advice in the motherboard manual and https://help.ubuntu.com/community/UEFI have now got to grub option screen for UEFI install. see http://imgur.com/VW5vz The dvd.iso being used is Ubuntu 12.10 desktop amd64 from ubuntu .com. The hdd has had a gpt partition table made for, by using gparted when in a live desktop session when booted in bios mode. (*edit/update: Although the old cd updates on running it is an old kernel and it did make a gpt but that version of gparted uses fdisk whereas gdisk is required to make gpt. Think am going to have to spend more time here http://www.dedoimedo.com/computers/gparted.html lol Using the gparted from 12.10 live session to make partitions; following the guidance regarding this at https://help.ubuntu.com/community/UEFI#Creating_an_EFI_partition, but can only boot to grub option screen http://imgur.com/VW5vz when 12.10 options to "try ubuntu" or "install ubuntu" are selected they give errors as described below*) but after making the gpt decided to leave it unformatted/unallocated space with the intention of using installer to set up partitions. update-originally but gparted now sees hdd as http://imgur.com/hFIvm as described above. *Booting live dvd in EFI mode gives "Secure Boot not installed" just before grub kernel option list with the option to "install ubuntu" but get "can not read cd/0" and "the kernel must be loaded first" errors; when that option is selected. Any pointers on how to get installer going for UEFI install would be good. Thanks in advance. update: Hopefully these screenshots can help better highlight where i am going wrong or if there is something else going on http://imgur.com/g30RB, http://imgur.com/VW5vz, http://imgur.com/31E0q, http://imgur.com/bnuaG, http://imgur.com/y4KGu, http://imgur.com/3u2QE, http://imgur.com/n9lN3, http://imgur.com/FEKvz, http://imgur.com/hFIvm, update: Thank you fernando garcia for pointing me in the right direction to start the process of elimantion. What i have done since asking question is a little home work starting here http://askubuntu.com/faq#bounty and here http://askubuntu.com/questions/how-to-ask. Looking at other similar questions was good fun and found this 12.10 UEFI Secure Boot install the most relative in helping getting ubuntu to uefi install on my system. In response to wolverine's question this article was referred to http://web.dodds.net/~vorlon/wiki/blog/SecureBoot_in_Ubuntu_12.10/ This article in the first sentence gives a link to http://www.ubuntu.com/download which is where i downloaded the 12.10 desktop amd64 .iso(and others) but have been unable to do a efi install of ubuntu on this system and as this is a new system have ended up just going with bios installer running which at least puts my mind at ease that i have not bricked my new mobo.(had to do a clrcmos and flash to latest bios version) So it possibly could be the bios settings or the bios version being used that is problem. To try and eliminate bios version i can not get to post screen in order to id bios version being used. Pressing tab to show post instead of logo and trying to pausebreak to catch post is proving difficult. If logo screen in bios is disabled just get black screen no post shown and pressing tab does not show post. Appreciate using appropriate bios settings and latest 12.10 release should simply get uefi installer running when selected from the grub list (nice graphic details in Identifying if computer boots the cd in efi mode section at https://help.ubuntu.com/community/UEFI#Identifying_if_the_computer_boots_the_CD_in_EFI_mode) And to confirm the hdd is booting in efi mode https://help.ubuntu.com/community/UEFI#Identifying_if_the_computer_boots_the_HDD_in_EFI_mode running the command [ -d /sys/firmware/efi ] && echo "EFI boot on HDD" || echo "Legacy boot on HDD" gave Legacy boot on HDD This is as expected because i allowed the bios installer (which was 12.04 desktop amd64 after trying 12.10 desktop amd64 in efi mode) to run to get a working installation. Which is not what was intended or wished for but wanted to get a working os to bench test new mobo i.e. prove it is working. There are other options as in installing other bootmanagers/loaders but do not wish to do so as shim should get grub2 going that is after secure boot has been signed.(Now got rough idea what should happen just it aint happening. Is it possible ahci drivers are required?) Will post boot info script url of the updated config/setup. The original question asked seems irrelevant to what is being said in this update but as the problem is not resolved will keep on trying efi installing! i.e the problem is same as when question asked just trying to update. Have tried to edit and update the best i can!

    Read the article

  • How to control in the vertex shader where pixel ends up in the renderTarget?

    - by cubrman
    What if I have an arbitrary renderTarget, that is smaller than the screen (say it is 1x1 pixel) and I want to make sure in the VertexShaderFunction that all my pixels end up exactly in that 1 pixel region? No matter what I do, they all seem to get culled at some point, though GraphicDevise.Clear() works OK. Where is the top left corner of the renderTarget Vertex-shader-vise? I tried output.Position = (0,0,0,0)/(0,0,0,1)/(1,1,1,1)/(-0.5,0.5,0,1) NOTHING works! Fullscreen quad is not an option 'cause I actually need to process geometry in the shaders to get the results I need.

    Read the article

< Previous Page | 66 67 68 69 70 71 72 73 74 75 76 77  | Next Page >