Search Results

Search found 3443 results on 138 pages for 'byte'.

Page 61/138 | < Previous Page | 57 58 59 60 61 62 63 64 65 66 67 68  | Next Page >

  • Is it possible to pass a structure of delegates from managed to native?

    - by Veiva
    I am writing a wrapper for the game programming library "Allegro" and its less stable 4.9 branch. Now, I have done good insofar, except for when it comes to wrapping a structure of function pointers. Basically, I can't change the original code, despite having access to it, because that would require me to fork it in some manner. I need to know how I can somehow pass a structure of delegates from managed to native without causing an AccessViolationException that has occurred so far. Now, for the code. Here is the Allegro definition of the structure: typedef struct ALLEGRO_FILE_INTERFACE { AL_METHOD(ALLEGRO_FILE*, fi_fopen, (const char *path, const char *mode)); AL_METHOD(void, fi_fclose, (ALLEGRO_FILE *handle)); AL_METHOD(size_t, fi_fread, (ALLEGRO_FILE *f, void *ptr, size_t size)); AL_METHOD(size_t, fi_fwrite, (ALLEGRO_FILE *f, const void *ptr, size_t size)); AL_METHOD(bool, fi_fflush, (ALLEGRO_FILE *f)); AL_METHOD(int64_t, fi_ftell, (ALLEGRO_FILE *f)); AL_METHOD(bool, fi_fseek, (ALLEGRO_FILE *f, int64_t offset, int whence)); AL_METHOD(bool, fi_feof, (ALLEGRO_FILE *f)); AL_METHOD(bool, fi_ferror, (ALLEGRO_FILE *f)); AL_METHOD(int, fi_fungetc, (ALLEGRO_FILE *f, int c)); AL_METHOD(off_t, fi_fsize, (ALLEGRO_FILE *f)); } ALLEGRO_FILE_INTERFACE; My simple attempt at wrapping it: public delegate IntPtr AllegroInternalOpenFileDelegate(string path, string mode); public delegate void AllegroInternalCloseFileDelegate(IntPtr file); public delegate int AllegroInternalReadFileDelegate(IntPtr file, IntPtr data, int size); public delegate int AllegroInternalWriteFileDelegate(IntPtr file, IntPtr data, int size); public delegate bool AllegroInternalFlushFileDelegate(IntPtr file); public delegate long AllegroInternalTellFileDelegate(IntPtr file); public delegate bool AllegroInternalSeekFileDelegate(IntPtr file, long offset, int where); public delegate bool AllegroInternalIsEndOfFileDelegate(IntPtr file); public delegate bool AllegroInternalIsErrorFileDelegate(IntPtr file); public delegate int AllegroInternalUngetCharFileDelegate(IntPtr file, int c); public delegate long AllegroInternalFileSizeDelegate(IntPtr file); [StructLayout(LayoutKind.Sequential, Pack = 0)] public struct AllegroInternalFileInterface { [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalOpenFileDelegate fi_fopen; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalCloseFileDelegate fi_fclose; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalReadFileDelegate fi_fread; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalWriteFileDelegate fi_fwrite; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalFlushFileDelegate fi_fflush; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalTellFileDelegate fi_ftell; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalSeekFileDelegate fi_fseek; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalIsEndOfFileDelegate fi_feof; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalIsErrorFileDelegate fi_ferror; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalUngetCharFileDelegate fi_fungetc; [MarshalAs(UnmanagedType.FunctionPtr)] public AllegroInternalFileSizeDelegate fi_fsize; } I have a simple auxiliary wrapper that turns an ALLEGRO_FILE_INTERFACE into an ALLEGRO_FILE, like so: #define ALLEGRO_NO_MAGIC_MAIN #include <allegro5/allegro5.h> #include <stdlib.h> #include <string.h> #include <assert.h> __declspec(dllexport) ALLEGRO_FILE * al_aux_create_file(ALLEGRO_FILE_INTERFACE * fi) { ALLEGRO_FILE * file; assert(fi && "`fi' null"); file = (ALLEGRO_FILE *)malloc(sizeof(ALLEGRO_FILE)); if (!file) return NULL; file->vtable = (ALLEGRO_FILE_INTERFACE *)malloc(sizeof(ALLEGRO_FILE_INTERFACE)); if (!(file->vtable)) { free(file); return NULL; } memcpy(file->vtable, fi, sizeof(ALLEGRO_FILE_INTERFACE)); return file; } __declspec(dllexport) void al_aux_destroy_file(ALLEGRO_FILE * f) { assert(f && "`f' null"); assert(f->vtable && "`f->vtable' null"); free(f->vtable); free(f); } Lastly, I have a class that accepts a Stream and provides the proper methods to interact with the stream. Just to make sure, here it is: /// <summary> /// A semi-opaque data type that allows one to load fonts, etc from a stream. /// </summary> public class AllegroFile : AllegroResource, IDisposable { AllegroInternalFileInterface fileInterface; Stream fileStream; /// <summary> /// Gets the file interface. /// </summary> internal AllegroInternalFileInterface FileInterface { get { return fileInterface; } } /// <summary> /// Constructs an Allegro file from the stream provided. /// </summary> /// <param name="stream">The stream to use.</param> public AllegroFile(Stream stream) { fileStream = stream; fileInterface = new AllegroInternalFileInterface(); fileInterface.fi_fopen = Open; fileInterface.fi_fclose = Close; fileInterface.fi_fread = Read; fileInterface.fi_fwrite = Write; fileInterface.fi_fflush = Flush; fileInterface.fi_ftell = GetPosition; fileInterface.fi_fseek = Seek; fileInterface.fi_feof = GetIsEndOfFile; fileInterface.fi_ferror = GetIsError; fileInterface.fi_fungetc = UngetCharacter; fileInterface.fi_fsize = GetLength; Resource = AllegroFunctions.al_aux_create_file(ref fileInterface); if (!IsValid) throw new AllegroException("Unable to create file"); } /// <summary> /// Disposes of all resources. /// </summary> ~AllegroFile() { Dispose(); } /// <summary> /// Disposes of all resources used. /// </summary> public void Dispose() { if (IsValid) { Resource = IntPtr.Zero; // Should call AllegroFunctions.al_aux_destroy_file fileStream.Dispose(); } } IntPtr Open(string path, string mode) { return IntPtr.Zero; } void Close(IntPtr file) { fileStream.Close(); } int Read(IntPtr file, IntPtr data, int size) { byte[] d = new byte[size]; int read = fileStream.Read(d, 0, size); Marshal.Copy(d, 0, data, size); return read; } int Write(IntPtr file, IntPtr data, int size) { byte[] d = new byte[size]; Marshal.Copy(data, d, 0, size); fileStream.Write(d, 0, size); return size; } bool Flush(IntPtr file) { fileStream.Flush(); return true; } long GetPosition(IntPtr file) { return fileStream.Position; } bool Seek(IntPtr file, long offset, int whence) { SeekOrigin origin = SeekOrigin.Begin; if (whence == 1) origin = SeekOrigin.Current; else if (whence == 2) origin = SeekOrigin.End; fileStream.Seek(offset, origin); return true; } bool GetIsEndOfFile(IntPtr file) { return fileStream.Position == fileStream.Length; } bool GetIsError(IntPtr file) { return false; } int UngetCharacter(IntPtr file, int character) { return -1; } long GetLength(IntPtr file) { return fileStream.Length; } } Now, when I do something like this: AllegroFile file = new AllegroFile(new FileStream("Test.bmp", FileMode.Create, FileAccess.ReadWrite)); bitmap.SaveToFile(file, ".bmp"); ...I get an AccessViolationException. I think I understand why (the garbage collector can relocate structs and classes whenever), but I'd think that the method stub that is created by the framework would take this into consideration and route the calls to the valid classes. However, it seems obviously so that I'm wrong. So basically, is there any way I can successfully wrap that structure? (And I'm sorry for all the code! Hope it's not too much...)

    Read the article

  • "Content is not allowed in prolog" when parsing perfectly valid XML on GAE

    - by Adrian Petrescu
    Hey guys, I've been beating my head against this absolutely infuriating bug for the last 48 hours, so I thought I'd finally throw in the towel and try asking here before I throw my laptop out the window. I'm trying to parse the response XML from a call I made to AWS SimpleDB. The response is coming back on the wire just fine; for example, it may look like: <?xml version="1.0" encoding="utf-8"?> <ListDomainsResponse xmlns="http://sdb.amazonaws.com/doc/2009-04-15/"> <ListDomainsResult> <DomainName>Audio</DomainName> <DomainName>Course</DomainName> <DomainName>DocumentContents</DomainName> <DomainName>LectureSet</DomainName> <DomainName>MetaData</DomainName> <DomainName>Professors</DomainName> <DomainName>Tag</DomainName> </ListDomainsResult> <ResponseMetadata> <RequestId>42330b4a-e134-6aec-e62a-5869ac2b4575</RequestId> <BoxUsage>0.0000071759</BoxUsage> </ResponseMetadata> </ListDomainsResponse> I pass in this XML to a parser with XMLEventReader eventReader = xmlInputFactory.createXMLEventReader(response.getContent()); and call eventReader.nextEvent(); a bunch of times to get the data I want. Here's the bizarre part -- it works great inside the local server. The response comes in, I parse it, everyone's happy. The problem is that when I deploy the code to Google App Engine, the outgoing request still works, and the response XML seems 100% identical and correct to me, but the response fails to parse with the following exception: com.amazonaws.http.HttpClient handleResponse: Unable to unmarshall response (ParseError at [row,col]:[1,1] Message: Content is not allowed in prolog.): <?xml version="1.0" encoding="utf-8"?> <ListDomainsResponse xmlns="http://sdb.amazonaws.com/doc/2009-04-15/"><ListDomainsResult><DomainName>Audio</DomainName><DomainName>Course</DomainName><DomainName>DocumentContents</DomainName><DomainName>LectureSet</DomainName><DomainName>MetaData</DomainName><DomainName>Professors</DomainName><DomainName>Tag</DomainName></ListDomainsResult><ResponseMetadata><RequestId>42330b4a-e134-6aec-e62a-5869ac2b4575</RequestId><BoxUsage>0.0000071759</BoxUsage></ResponseMetadata></ListDomainsResponse> javax.xml.stream.XMLStreamException: ParseError at [row,col]:[1,1] Message: Content is not allowed in prolog. at com.sun.org.apache.xerces.internal.impl.XMLStreamReaderImpl.next(Unknown Source) at com.sun.xml.internal.stream.XMLEventReaderImpl.nextEvent(Unknown Source) at com.amazonaws.transform.StaxUnmarshallerContext.nextEvent(StaxUnmarshallerContext.java:153) ... (rest of lines omitted) I have double, triple, quadruple checked this XML for 'invisible characters' or non-UTF8 encoded characters, etc. I looked at it byte-by-byte in an array for byte-order-marks or something of that nature. Nothing; it passes every validation test I could throw at it. Even stranger, it happens if I use a Saxon-based parser as well -- but ONLY on GAE, it always works fine in my local environment. It makes it very hard to trace the code for problems when I can only run the debugger on an environment that works perfectly (I haven't found any good way to remotely debug on GAE). Nevertheless, using the primitive means I have, I've tried a million approaches including: XML with and without the prolog With and without newlines With and without the "encoding=" attribute in the prolog Both newline styles With and without the chunking information present in the HTTP stream And I've tried most of these in multiple combinations where it made sense they would interact -- nothing! I'm at my wit's end. Has anyone seen an issue like this before that can hopefully shed some light on it? Thanks!

    Read the article

  • how do I access XHR responseBody from Javascript?

    - by Cheeso
    I've got a web page that uses XMLHttpRequest to download a binary resource. Because it's binary I'm trying to use xhr.responseBody to access the bytes. I've seen a few posts suggesting that it's impossible to access the bytes directly from Javascript. This sounds crazy to me. Weirdly, xhr.responseBody is accessible from VBScript, so the suggestion is that I must define a method in VBScript in the webpage, and then call that method from Javascript. See jsdap for one example. var IE_HACK = (/msie/i.test(navigator.userAgent) && !/opera/i.test(navigator.userAgent)); if (IE_HACK) document.write('<script type="text/vbscript">\n\ Function BinaryToArray(Binary)\n\ Dim i\n\ ReDim byteArray(LenB(Binary))\n\ For i = 1 To LenB(Binary)\n\ byteArray(i-1) = AscB(MidB(Binary, i, 1))\n\ Next\n\ BinaryToArray = byteArray\n\ End Function\n\ </script>'); var xml = (window.XMLHttpRequest) ? new XMLHttpRequest() // Mozilla/Safari/IE7+ : (window.ActiveXObject) ? new ActiveXObject("MSXML2.XMLHTTP") // IE6 : null; // Commodore 64? xml.open("GET", url, true); if (xml.overrideMimeType) { xml.overrideMimeType('text/plain; charset=x-user-defined'); } else { xml.setRequestHeader('Accept-Charset', 'x-user-defined'); } xml.onreadystatechange = function() { if (xml.readyState == 4) { if (!binary) { callback(xml.responseText); } else if (IE_HACK) { // call a VBScript method to copy every single byte callback(BinaryToArray(xml.responseBody).toArray()); } else { callback(getBuffer(xml.responseText)); } } }; xml.send(''); Is this really true? The best way? copying every byte? For a large binary stream that's not gonna be very efficient. There is also a possible technique using ADODB.Stream, which is a COM equivalent of a MemoryStream. See here for an example. It does not require VBScript but does require a separate COM object. if (typeof (ActiveXObject) != "undefined" && typeof (httpRequest.responseBody) != "undefined") { // Convert httpRequest.responseBody byte stream to shift_jis encoded string var stream = new ActiveXObject("ADODB.Stream"); stream.Type = 1; // adTypeBinary stream.Open (); stream.Write (httpRequest.responseBody); stream.Position = 0; stream.Type = 1; // adTypeBinary; stream.Read.... /// ???? what here } I don't think that's gonna work - ADODB.Stream is disabled on most machines these days. In The IE8 developer tools - the IE equivalent of Firebug - I can see the responseBody is an array of bytes and I can even see the bytes themselves. The data is right there. I don't understand why I can't get to it. Is it possible for me to read it with responseText? hints? (other than defining a VBScript method)

    Read the article

  • What would cause ANY .NET application to crash immediately... except a project I create and Debug in

    - by blak3r
    My software recently got deployed to a customer who said that the application was crashing immediately after it started. After some initial debugging, the customer provided me remote access to one of the computers which was unable to run the application. I found that the crash wasn't specific to my application. Any application which depended on the .NET framework crashed immediately. Conveniently, Visual Studio 2008 was installed so I created a quick hello world application on it and clicked Debug. The application worked fine. But, then when I tried to execute the generated binaries in the /bin/Debug/HelloWorld.exe directory outside of visual studio it crashed. List of things i've tried (UPDATED): I checked that "Everyone" has Read&Execute permissions for c:\Windows. To test that the problem was with the .NET Framework (and not my application), I attempted to download Paint .NET on to the computers. The setup frontend crashed in the same manner. Performed a repair of the .NET framework as outlined in http://support.microsoft.com/kb/908077 (Boy was this fun and time consuming). No luck. Installed .NET 3.5 SP1 (before it just had .NET 3.5) Note: my application targets 2.0 so I did this more as a long shot... but i learned in the process that .NET 3.5 SP1 also updates the underlying frameworks. Ran Aaron Stebner's .NET Setup Verification Tool. This tool indicated that .NET was successfully installed. (I forget if i checked all the versions but at least 2.0 worked). Tested some mini hello world applications which were targeted for .NET 2.0 and .NET 3.5 and both crashed in the same way. Tried launching .NET apps via windbg cmd line. Doing this did allow me invoke my simple hello world applications. So, simple .NET hello world works when invoked by windbg or by launching via debug in visual studio... but doesn't if i try to execute it standalone. I created a dump file using WinDbg. It wasn't all that revealing to me. FAULTING_IP: mscorwks!PEImage::GetEntryPointToken+21 79f4ff9d f6401010 test byte ptr [eax+10h],10h EXCEPTION_RECORD: 0012f710 -- (.exr 0x12f710) ExceptionAddress: 79f4ff9d (mscorwks!PEImage::GetEntryPointToken+0x00000021) ExceptionCode: c0000005 (Access violation) ExceptionFlags: 00000000 NumberParameters: 2 Parameter[0]: 00000000 Parameter[1]: 00000010 Attempt to read from address 00000010 FAULTING_THREAD: 00000b44 PROCESS_NAME: MyProcess.exe ERROR_CODE: (NTSTATUS) 0x80000003 - {EXCEPTION} Breakpoint A breakpoint has been reached. EXCEPTION_CODE: (HRESULT) 0x80000003 (2147483651) - One or more arguments are invalid DETOURED_IMAGE: 1 NTGLOBALFLAG: 0 APPLICATION_VERIFIER_FLAGS: 0 MANAGED_STACK: !dumpstack -EE OS Thread Id: 0xb44 (0) Current frame: ChildEBP RetAddr Caller,Callee EXCEPTION_OBJECT: !pe cb10b4 Exception object: 00cb10b4 Exception type: System.ExecutionEngineException Message: <none> InnerException: <none> StackTrace (generated): <none> StackTraceString: <none> HResult: 80131506 MANAGED_OBJECT_NAME: System.ExecutionEngineException CONTEXT: 0012f72c -- (.cxr 0x12f72c) eax=00000000 ebx=00000000 ecx=00000000 edx=0000000e esi=001a1490 edi=00000001 eip=79f4ff9d esp=0012f9f8 ebp=0012fa1c iopl=0 nv up ei pl zr na pe nc cs=001b ss=0023 ds=0023 es=0023 fs=003b gs=0000 efl=00010246 mscorwks!PEImage::GetEntryPointToken+0x21: 79f4ff9d f6401010 test byte ptr [eax+10h],10h ds:0023:00000010=?? Resetting default scope READ_ADDRESS: 00000010 FOLLOWUP_IP: mscorwks!PEImage::GetEntryPointToken+21 79f4ff9d f6401010 test byte ptr [eax+10h],10h BUGCHECK_STR: APPLICATION_FAULT_NULL_CLASS_PTR_DEREFERENCE_SHUTDOWN PRIMARY_PROBLEM_CLASS: NULL_CLASS_PTR_DEREFERENCE_SHUTDOWN DEFAULT_BUCKET_ID: NULL_CLASS_PTR_DEREFERENCE_SHUTDOWN LAST_CONTROL_TRANSFER: from 79ef02b5 to 79f4ff9d STACK_TEXT: 79f4ff9d mscorwks!PEImage::GetEntryPointToken+0x21 79ef02b5 mscorwks!PEFile::GetEntryPointToken+0xa0 79eefeaf mscorwks!SystemDomain::ExecuteMainMethod+0xd4 79fb9793 mscorwks!ExecuteEXE+0x59 79fb96df mscorwks!_CorExeMain+0x15c 7900b1b3 mscoree!_CorExeMain+0x2c 7c817077 kernel32!BaseProcessStart+0x23 SYMBOL_STACK_INDEX: 0 SYMBOL_NAME: mscorwks!PEImage::GetEntryPointToken+21 FOLLOWUP_NAME: MachineOwner MODULE_NAME: mscorwks IMAGE_NAME: mscorwks.dll DEBUG_FLR_IMAGE_TIMESTAMP: 471ef729 STACK_COMMAND: .cxr 0012F72C ; kb ; dds 12f9f8 ; kb FAILURE_BUCKET_ID: NULL_CLASS_PTR_DEREFERENCE_SHUTDOWN_80000003_mscorwks.dll!PEImage::GetEntryPointToken BUCKET_ID: APPLICATION_FAULT_NULL_CLASS_PTR_DEREFERENCE_SHUTDOWN_DETOURED_mscorwks!PEImage::GetEntryPointToken+21 WATSON_STAGEONE_URL: http://watson.microsoft.com/StageOne/MyProcess_exe/2_4_4_39/4a8a192c/unknown/0_0_0_0/bbbbbbb4/80000003/00000000.htm?Retriage=1 Followup: MachineOwner Edit 1:The event log details for this error say it's a .NET Runtime version 2.0.50727.3053 - Fatal Execution Engine Error (7A097706)(80131506). Edit 2 (10-7-09): This issue is still active. Edit 3 (3-29-10): This update is to let everyone know that I never did successfully solve the problem. The customer who's machine this was on lost interest in solving it and just reimaged the machine :(. Thanks for all the contributions though.

    Read the article

  • Reading off a socket until end of line C#?

    - by Omar Kooheji
    I'm trying to write a service that listens to a TCP Socket on a given port until an end of line is recived and then based on the "line" that was received executes a command. I've followed a basic socket programming tutorial for c# and have come up with the following code to listen to a socket: public void StartListening() { _log.Debug("Creating Maing TCP Listen Socket"); _mainSocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); IPEndPoint ipLocal = new IPEndPoint(IPAddress.Any, _port); _log.Debug("Binding to local IP Address"); _mainSocket.Bind(ipLocal); _log.DebugFormat("Listening to port {0}",_port); _mainSocket.Listen(10); _log.Debug("Creating Asynchronous callback for client connections"); _mainSocket.BeginAccept(new AsyncCallback(OnClientConnect), null); } public void OnClientConnect(IAsyncResult asyn) { try { _log.Debug("OnClientConnect Creating worker socket"); Socket workerSocket = _mainSocket.EndAccept(asyn); _log.Debug("Adding worker socket to list"); _workerSockets.Add(workerSocket); _log.Debug("Waiting For Data"); WaitForData(workerSocket); _log.DebugFormat("Clients Connected [{0}]", _workerSockets.Count); _mainSocket.BeginAccept(new AsyncCallback(OnClientConnect), null); } catch (ObjectDisposedException) { _log.Error("OnClientConnection: Socket has been closed\n"); } catch (SocketException se) { _log.Error("Socket Exception", se); } } public class SocketPacket { private System.Net.Sockets.Socket _currentSocket; public System.Net.Sockets.Socket CurrentSocket { get { return _currentSocket; } set { _currentSocket = value; } } private byte[] _dataBuffer = new byte[1]; public byte[] DataBuffer { get { return _dataBuffer; } set { _dataBuffer = value; } } } private void WaitForData(Socket workerSocket) { _log.Debug("Entering WaitForData"); try { lock (this) { if (_workerCallback == null) { _log.Debug("Initializing worker callback to OnDataRecieved"); _workerCallback = new AsyncCallback(OnDataRecieved); } } SocketPacket socketPacket = new SocketPacket(); socketPacket.CurrentSocket = workerSocket; workerSocket.BeginReceive(socketPacket.DataBuffer, 0, socketPacket.DataBuffer.Length, SocketFlags.None, _workerCallback, socketPacket); } catch (SocketException se) { _log.Error("Socket Exception", se); } } public void OnDataRecieved(IAsyncResult asyn) { SocketPacket socketData = (SocketPacket)asyn.AsyncState; try { int iRx = socketData.CurrentSocket.EndReceive(asyn); char[] chars = new char[iRx + 1]; _log.DebugFormat("Created Char array to hold incomming data. [{0}]",iRx+1); System.Text.Decoder decoder = System.Text.Encoding.UTF8.GetDecoder(); int charLength = decoder.GetChars(socketData.DataBuffer, 0, iRx, chars, 0); _log.DebugFormat("Read [{0}] characters",charLength); String data = new String(chars); _log.DebugFormat("Read in String \"{0}\"",data); WaitForData(socketData.CurrentSocket); } catch (ObjectDisposedException) { _log.Error("OnDataReceived: Socket has been closed. Removing Socket"); _workerSockets.Remove(socketData.CurrentSocket); } catch (SocketException se) { _log.Error("SocketException:",se); _workerSockets.Remove(socketData.CurrentSocket); } } This I thought was going to be a good basis for what I wanted to do, but the code I have appended the incoming characters to a text box one by one and didn't do anything with it. Which doesn't really work for what I want to do. My main issue is the decoupling of the OnDataReceived method from the Wait for data method. which means I'm having issues building a string (I would use a string builder but I can accept multiple connections so that doesn't really work. Ideally I'd like to look while listening to a socket until I see and end of line character and then call a method with the resulting string as a parameter. What's the best way to go about doing this.

    Read the article

  • Getting EOFException while trying to read from SSLSocket

    - by Isac
    Hi, I am developing a SSL client that will do a simple request to a SSL server and wait for the response. The SSL handshake and the writing goes OK but I can't READ data from the socket. I turned on the debug of java.net.ssl and got the following: [..] main, READ: TLSv1 Change Cipher Spec, length = 1 [Raw read]: length = 5 0000: 16 03 01 00 20 .... [Raw read]: length = 32 [..] main, READ: TLSv1 Handshake, length = 32 Padded plaintext after DECRYPTION: len = 32 [..] * Finished verify_data: { 29, 1, 139, 226, 25, 1, 96, 254, 176, 51, 206, 35 } %% Didn't cache non-resumable client session: [Session-1, SSL_RSA_WITH_RC4_128_MD5] [read] MD5 and SHA1 hashes: len = 16 0000: 14 00 00 0C 1D 01 8B E2 19 01 60 FE B0 33 CE 23 ..........`..3.# Padded plaintext before ENCRYPTION: len = 70 [..] a.j.y. main, WRITE: TLSv1 Application Data, length = 70 [Raw write]: length = 75 [..] Padded plaintext before ENCRYPTION: len = 70 [..] main, WRITE: TLSv1 Application Data, length = 70 [Raw write]: length = 75 [..] main, received EOFException: ignored main, called closeInternal(false) main, SEND TLSv1 ALERT: warning, description = close_notify Padded plaintext before ENCRYPTION: len = 18 [..] main, WRITE: TLSv1 Alert, length = 18 [Raw write]: length = 23 [..] main, called close() main, called closeInternal(true) main, called close() main, called closeInternal(true) The [..] are the certificate chain. Here is a code snippet: try { System.setProperty("javax.net.debug","all"); /* * Set up a key manager for client authentication * if asked by the server. Use the implementation's * default TrustStore and secureRandom routines. */ SSLSocketFactory factory = null; try { SSLContext ctx; KeyManagerFactory kmf; KeyStore ks; char[] passphrase = "importkey".toCharArray(); ctx = SSLContext.getInstance("TLS"); kmf = KeyManagerFactory.getInstance("SunX509"); ks = KeyStore.getInstance("JKS"); ks.load(new FileInputStream("keystore.jks"), passphrase); kmf.init(ks, passphrase); ctx.init(kmf.getKeyManagers(), null, null); factory = ctx.getSocketFactory(); } catch (Exception e) { throw new IOException(e.getMessage()); } SSLSocket socket = (SSLSocket)factory.createSocket("server ip", 9999); /* * send http request * * See SSLSocketClient.java for more information about why * there is a forced handshake here when using PrintWriters. */ SSLSession session = socket.getSession(); [build query] byte[] buff = query.toWire(); out.write(buff); out.flush(); InputStream input = socket.getInputStream(); int readBytes = -1; int randomLength = 1024; byte[] buffer = new byte[randomLength]; while((readBytes = input.read(buffer, 0, randomLength)) != -1) { LOG.debug("Read: " + new String(buffer)); } input.close(); socket.close(); } catch (Exception e) { e.printStackTrace(); } I can write multiple times and I don't get any error but the EOFException happens on the first read. Am I doing something wrong with the socket or with the SSL authentication? Thank you.

    Read the article

  • How do I handle calls to AudioTrack from jni without crashing?

    - by icecream
    I was trying to write to an AudioTrack from a jni callback, and I get a signal 7 (SIGBUS), fault addr 00000000. I have looked at the Wolf3D example for odroid and they seem to use a android.os.Handler to post a Runnable that will do an update in the correct thread context. I have also tried AttachCurrentThread, but I fail in this case also. It works to play the sound when running from the constructor even if I wrap it in a thread and then post it to the handler. When I do the "same" via a callback from jni it fails. I assume I am braeaking some rules, but I haven't been able to figure out what they are. So far, I haven't found the answer here on SO. So I wonder if anyone knows how this should be done. EDIT: Answered below. The following code is to illustrate the problem. Java: package com.example.jniaudiotrack; import android.app.Activity; import android.media.AudioFormat; import android.media.AudioManager; import android.media.AudioTrack; import android.os.Bundle; import android.os.Handler; import android.util.Log; public class JniAudioTrackActivity extends Activity { AudioTrack mAudioTrack; byte[] mArr; public static final Handler mHandler = new Handler(); /** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); mArr = new byte[2048]; for (int i = 0; i < 2048; i++) { mArr[i] = (byte) (Math.sin(i) * 128); } mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 11025, AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_8BIT, 2048, AudioTrack.MODE_STREAM); mAudioTrack.play(); new Thread(new Runnable() { public void run() { mHandler.post(new Runnable() { public void run() { mAudioTrack.write(mArr, 0, 2048); Log.i(TAG, "*** Handler from constructor ***"); } }); } }).start(); new Thread(new Runnable() { public void run() { audioFunc(); } }).start(); } public native void audioFunc(); @SuppressWarnings("unused") private void audioCB() { mHandler.post(new Runnable() { public void run() { mAudioTrack.write(mArr, 0, 2048); Log.i(TAG, "*** audioCB called ***"); } }); } private static final String TAG = "JniAudioTrackActivity"; static { System.loadLibrary("jni_audiotrack"); } } cpp: #include <jni.h> extern "C" { JNIEXPORT void Java_com_example_jniaudiotrack_JniAudioTrackActivity_audioFunc(JNIEnv* env, jobject obj); } JNIEXPORT void Java_com_example_jniaudiotrack_JniAudioTrackActivity_audioFunc(JNIEnv* env, jobject obj) { JNIEnv* jniEnv; JavaVM* vm; env->GetJavaVM(&vm); vm->AttachCurrentThread(&jniEnv, 0); jclass cls = env->GetObjectClass(obj); jmethodID audioCBID = env->GetMethodID(cls, "audioCB", "()V"); if (!audioCBID) { return; } env->CallVoidMethod(cls, audioCBID); } Trace snippet: I/DEBUG ( 1653): pid: 9811, tid: 9811 >>> com.example.jniaudiotrack <<< I/DEBUG ( 1653): signal 7 (SIGBUS), fault addr 00000000 I/DEBUG ( 1653): r0 00000800 r1 00000026 r2 00000001 r3 00000000 I/DEBUG ( 1653): r4 42385726 r5 41049e54 r6 bee25570 r7 ad00e540 I/DEBUG ( 1653): r8 000040f8 r9 41048200 10 41049e44 fp 00000000 I/DEBUG ( 1653): ip 000000f8 sp bee25530 lr ad02dbb5 pc ad012358 cpsr 20000010 I/DEBUG ( 1653): #00 pc 00012358 /system/lib/libdvm.so

    Read the article

  • DataTable to JSON

    - by Joel Coehoorn
    I recently needed to serialize a datatable to JSON. Where I'm at we're still on .Net 2.0, so I can't use the JSON serializer in .Net 3.5. I figured this must have been done before, so I went looking online and found a number of different options. Some of them depend on an additional library, which I would have a hard time pushing through here. Others require first converting to List<Dictionary<>>, which seemed a little awkward and needless. Another treated all values like a string. For one reason or another I couldn't really get behind any of them, so I decided to roll my own, which is posted below. As you can see from reading the //TODO comments, it's incomplete in a few places. This code is already in production here, so it does "work" in the basic sense. The places where it's incomplete are places where we know our production data won't currently hit it (no timespans or byte arrays in the db). The reason I'm posting here is that I feel like this can be a little better, and I'd like help finishing and improving this code. Any input welcome. public static class JSONHelper { public static string FromDataTable(DataTable dt) { string rowDelimiter = ""; StringBuilder result = new StringBuilder("["); foreach (DataRow row in dt.Rows) { result.Append(rowDelimiter); result.Append(FromDataRow(row)); rowDelimiter = ","; } result.Append("]"); return result.ToString(); } public static string FromDataRow(DataRow row) { DataColumnCollection cols = row.Table.Columns; string colDelimiter = ""; StringBuilder result = new StringBuilder("{"); for (int i = 0; i < cols.Count; i++) { // use index rather than foreach, so we can use the index for both the row and cols collection result.Append(colDelimiter).Append("\"") .Append(cols[i].ColumnName).Append("\":") .Append(JSONValueFromDataRowObject(row[i], cols[i].DataType)); colDelimiter = ","; } result.Append("}"); return result.ToString(); } // possible types: // http://msdn.microsoft.com/en-us/library/system.data.datacolumn.datatype(VS.80).aspx private static Type[] numeric = new Type[] {typeof(byte), typeof(decimal), typeof(double), typeof(Int16), typeof(Int32), typeof(SByte), typeof(Single), typeof(UInt16), typeof(UInt32), typeof(UInt64)}; // I don't want to rebuild this value for every date cell in the table private static long EpochTicks = new DateTime(1970, 1, 1).Ticks; private static string JSONValueFromDataRowObject(object value, Type DataType) { // null if (value == DBNull.Value) return "null"; // numeric if (Array.IndexOf(numeric, DataType) > -1) return value.ToString(); // TODO: eventually want to use a stricter format // boolean if (DataType == typeof(bool)) return ((bool)value) ? "true" : "false"; // date -- see http://weblogs.asp.net/bleroy/archive/2008/01/18/dates-and-json.aspx if (DataType == typeof(DateTime)) return "\"\\/Date(" + new TimeSpan(((DateTime)value).ToUniversalTime().Ticks - EpochTicks).TotalMilliseconds.ToString() + ")\\/\""; // TODO: add Timespan support // TODO: add Byte[] support //TODO: this would be _much_ faster with a state machine // string/char return "\"" + value.ToString().Replace(@"\", @"\\").Replace(Environment.NewLine, @"\n").Replace("\"", @"\""") + "\""; } }

    Read the article

  • Prevent lazy loading in nHibernate

    - by Ciaran
    Hi, I'm storing some blobs in my database, so I have a Document table and a DocumentContent table. Document contains a filename, description etc and has a DocumentContent property. I have a Silverlight client, so I don't want to load up and send the DocumentContent to the client unless I explicity ask for it, but I'm having trouble doing this. I've read the blog post by Davy Brion. I have tried placing lazy=false in my config and removing the virtual access modifier but have had no luck with it as yet. Every time I do a Session.Get(id), the DocumentContent is retrieved via an outer join. I only want this property to be populated when I explicity join onto this table and ask for it. Any help is appreciated. My NHibernate mapping is as follows: <?xml version="1.0" encoding="utf-8" ?> <hibernate-mapping xmlns="urn:nhibernate-mapping-2.2" assembly="Jrm.Model" namespace="Jrm.Model"> <class name="JrmDocument" lazy="false"> <id name="JrmDocumentID"> <generator class="native" /> </id> <property name="FileName"/> <property name="Description"/> <many-to-one name="DocumentContent" class="JrmDocumentContent" unique="true" column="JrmDocumentContentID" lazy="false"/> </class> <class name="JrmDocumentContent" lazy="false"> <id name="JrmDocumentContentID"> <generator class="native" /> </id> <property name="Content" type="BinaryBlob" lazy="false"> <column name="FileBytes" sql-type="varbinary(max)"/> </property> </class> </hibernate-mapping> and my classes are: [DataContract] public class JrmDocument : ModelBase { private int jrmDocumentID; private JrmDocumentContent documentContent; private long maxFileSize; private string fileName; private string description; public JrmDocument() { } public JrmDocument(string fileName, long maxFileSize) { DocumentContent = new JrmDocumentContent(File.ReadAllBytes(fileName)); FileName = new FileInfo(fileName).Name; } [DataMember] public virtual int JrmDocumentID { get { return jrmDocumentID; } set { jrmDocumentID = value; OnPropertyChanged("JrmDocumentID"); } } [DataMember] public JrmDocumentContent DocumentContent { get { return documentContent; } set { documentContent = value; OnPropertyChanged("DocumentContent"); } } [DataMember] public virtual long MaxFileSize { get { return maxFileSize; } set { maxFileSize = value; OnPropertyChanged("MaxFileSize"); } } [DataMember] public virtual string FileName { get { return fileName; } set { fileName = value; OnPropertyChanged("FileName"); } } [DataMember] public virtual string Description { get { return description; } set { description = value; OnPropertyChanged("Description"); } } } [DataContract] public class JrmDocumentContent : ModelBase { private int jrmDocumentContentID; private byte[] content; public JrmDocumentContent() { } public JrmDocumentContent(byte[] bytes) { Content = bytes; } [DataMember] public int JrmDocumentContentID { get { return jrmDocumentContentID; } set { jrmDocumentContentID = value; OnPropertyChanged("JrmDocumentContentID"); } } [DataMember] public byte[] Content { get { return content; } set { content = value; OnPropertyChanged("Content"); } } }

    Read the article

  • Web Services c#, Sending notifications Clients

    - by Diode
    I want to send a notification (say a string) to subscribers(subscribers ip addresses are in a database on the server side) by calling another method. when ever I call that method the output becomes error-some. [WebMethod] public string GetGroupPath(string emailAddress, string password, string ipAddress) { //SqlDataAdapter dbadapter = null; DataSet returnDS = new DataSet(); string groupName = null; string groupPath = null; SqlConnection dbconn = new SqlConnection("Server = localhost;Database=server;User ID = admin;Password = password;Trusted_Connection=false;"); dbconn.Open(); SqlCommand cmd = new SqlCommand(); string getGroupName = "select users.groupname from users where emailaddress = "+"'"+ emailAddress+"'"+ " and "+ "users.password = " + "'" +password+"'"; cmd.CommandText = getGroupName; cmd.Connection = dbconn; SqlDataReader reader = null; try { reader = cmd.ExecuteReader(); while (reader.Read()) { groupName = reader["groupname"].ToString(); } } catch (Exception) { groupPath = "Invalied"; } dbconn.Close(); dbconn.Open(); if (groupName != null) { string getPath = "select groups.grouppath from groups where groupname = " + "'" + groupName + "'"; cmd.CommandText = getPath; cmd.Connection = dbconn; try { reader = cmd.ExecuteReader(); while (reader.Read()) { groupPath = reader["grouppath"].ToString(); } } catch { groupPath = "Invalied"; } } else groupPath = "Invalied"; dbconn.Close(); if (groupPath != "Invalied") { dbconn.Open(); string getPath = "update users set users.ipaddress = "+"'"+ipAddress+"'"+" where users.emailaddress = " + "'" + emailAddress + "'"; cmd.CommandText = getPath; cmd.Connection = dbconn; cmd.ExecuteNonQuery(); dbconn.Close(); } NotifyUsers(); //NotifyUsers nu = new NotifyUsers(); //List<string> ipList = new List<string>(); //ipList.Add("192.168.56.1"); //nu.Notify(); return groupPath; } private void NotifyUsers() { Socket sock = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp); byte[] ipb = Encoding.ASCII.GetBytes("255.255.255.255"); IPAddress ipAddress = new IPAddress(ipb); IPEndPoint endPoint = new IPEndPoint(ipAddress, 15000); string notification = "new_update"; byte[] sendBuffer = Encoding.ASCII.GetBytes(notification); sock.SendTo(sendBuffer, endPoint); sock.Close(); } This is what has to be basically done. in the server side I have a listening thread and it gets notification when the server sends data( assume for now the database contains client ip address). then ever I call the web method it gives a error "invalid IPAddress" atline byte[] ipb = Encoding.ASCII.GetBytes("255.255.255.255"); thank you :) since this is my first ever post please be kind enough to give me a better feedback too :) thaks

    Read the article

  • e2fsck extremly slow, although enough memory exists

    - by kaefert
    I've got this external USB-Disk: kaefert@blechmobil:~$ lsusb -s 2:3 Bus 002 Device 003: ID 0bc2:3320 Seagate RSS LLC As can be seen in this dmesg output, there are some problems that prevents that disk from beeing mounted: kaefert@blechmobil:~$ dmesg | grep sdb [ 114.474342] sd 5:0:0:0: [sdb] 732566645 4096-byte logical blocks: (3.00 TB/2.72 TiB) [ 114.475089] sd 5:0:0:0: [sdb] Write Protect is off [ 114.475092] sd 5:0:0:0: [sdb] Mode Sense: 43 00 00 00 [ 114.475959] sd 5:0:0:0: [sdb] Write cache: enabled, read cache: enabled, doesn't support DPO or FUA [ 114.477093] sd 5:0:0:0: [sdb] 732566645 4096-byte logical blocks: (3.00 TB/2.72 TiB) [ 114.501649] sdb: sdb1 [ 114.502717] sd 5:0:0:0: [sdb] 732566645 4096-byte logical blocks: (3.00 TB/2.72 TiB) [ 114.504354] sd 5:0:0:0: [sdb] Attached SCSI disk [ 116.804408] EXT4-fs (sdb1): ext4_check_descriptors: Checksum for group 3976 failed (47397!=61519) [ 116.804413] EXT4-fs (sdb1): group descriptors corrupted! So I went and fired up my favorite partition manager - gparted, and told it to verify and repair the partition sdb1. This made gparted call e2fsck (version 1.42.4 (12-Jun-2012)) e2fsck -f -y -v /dev/sdb1 Although gparted called e2fsck with the "-v" option, sadly it doesn't show me the output of my e2fsck process (bugreport https://bugzilla.gnome.org/show_bug.cgi?id=467925 ) I started this whole thing on Sunday (2012-11-04_2200) evening, so about 48 hours ago, this is what htop says about it now (2012-11-06-1900): PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command 3704 root 39 19 1560M 1166M 768 R 98.0 19.5 42h56:43 e2fsck -f -y -v /dev/sdb1 Now I found a few posts on the internet that discuss e2fsck running slow, for example: http://gparted-forum.surf4.info/viewtopic.php?id=13613 where they write that its a good idea to see if the disk is just that slow because maybe its damaged, and I think these outputs tell me that this is not the case in my case: kaefert@blechmobil:~$ sudo hdparm -tT /dev/sdb /dev/sdb: Timing cached reads: 3562 MB in 2.00 seconds = 1783.29 MB/sec Timing buffered disk reads: 82 MB in 3.01 seconds = 27.26 MB/sec kaefert@blechmobil:~$ sudo hdparm /dev/sdb /dev/sdb: multcount = 0 (off) readonly = 0 (off) readahead = 256 (on) geometry = 364801/255/63, sectors = 5860533160, start = 0 However, although I can read quickly from that disk, this disk speed doesn't seem to be used by e2fsck, considering tools like gkrellm or iotop or this: kaefert@blechmobil:~$ iostat -x Linux 3.2.0-2-amd64 (blechmobil) 2012-11-06 _x86_64_ (2 CPU) avg-cpu: %user %nice %system %iowait %steal %idle 14,24 47,81 14,63 0,95 0,00 22,37 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util sda 0,59 8,29 2,42 5,14 43,17 160,17 53,75 0,30 39,80 8,72 54,42 3,95 2,99 sdb 137,54 5,48 9,23 0,20 587,07 22,73 129,35 0,07 7,70 7,51 16,18 2,17 2,04 Now I researched a little bit on how to find out what e2fsck is doing with all that processor time, and I found the tool strace, which gives me this: kaefert@blechmobil:~$ sudo strace -p3704 lseek(4, 41026998272, SEEK_SET) = 41026998272 write(4, "\212\354K[_\361\3nl\212\245\352\255jR\303\354\312Yv\334p\253r\217\265\3567\325\257\3766"..., 4096) = 4096 lseek(4, 48404766720, SEEK_SET) = 48404766720 read(4, "\7t\260\366\346\337\304\210\33\267j\35\377'\31f\372\252\ffU\317.y\211\360\36\240c\30`\34"..., 4096) = 4096 lseek(4, 41027002368, SEEK_SET) = 41027002368 write(4, "\232]7Ws\321\352\t\1@[+5\263\334\276{\343zZx\352\21\316`1\271[\202\350R`"..., 4096) = 4096 lseek(4, 48404770816, SEEK_SET) = 48404770816 read(4, "\17\362r\230\327\25\346//\210H\v\311\3237\323K\304\306\361a\223\311\324\272?\213\tq \370\24"..., 4096) = 4096 lseek(4, 41027006464, SEEK_SET) = 41027006464 write(4, "\367yy>x\216?=\324Z\305\351\376&\25\244\210\271\22\306}\276\237\370(\214\205G\262\360\257#"..., 4096) = 4096 lseek(4, 48404774912, SEEK_SET) = 48404774912 read(4, "\365\25\0\21|T\0\21}3t_\272\373\222k\r\177\303\1\201\261\221$\261B\232\3142\21U\316"..., 4096) = 4096 ^CProcess 3704 detached around 16 of these lines every second, so 4 read and 4 write operations every second, which I don't consider to be a lot.. And finally, my question: Will this process ever finish? If those numbers from fseek (48404774912) represent bytes, that would be something like 45 gigabytes, with this beeing a 3 terrabyte disk, which would give me 134 days to go, if the speed stays constant, and he scans the disk like this completly and only once. Do you have some advice for me? I have most of the data on that disk elsewhere, but I've put a lot of hours into sorting and merging it to this disk, so I would prefer to getting this disk up and running again, without formatting it anew. I don't think that the hardware is damaged since the disk is only a few months and since I can't see any I/O errors in the dmesg output. UPDATE: I just looked at the strace output again (2012-11-06_2300), now it looks like this: lseek(4, 1419860611072, SEEK_SET) = 1419860611072 read(4, "3#\f\2447\335\0\22A\355\374\276j\204'\207|\217V|\23\245[\7VP\251\242\276\207\317:"..., 4096) = 4096 lseek(4, 43018145792, SEEK_SET) = 43018145792 write(4, "]\206\231\342Y\204-2I\362\242\344\6R\205\361\324\177\265\317C\334V\324\260\334\275t=\10F."..., 4096) = 4096 lseek(4, 1419860615168, SEEK_SET) = 1419860615168 read(4, "\262\305\314Y\367\37x\326\245\226\226\320N\333$s\34\204\311\222\7\315\236\336\300TK\337\264\236\211n"..., 4096) = 4096 lseek(4, 43018149888, SEEK_SET) = 43018149888 write(4, "\271\224m\311\224\25!I\376\16;\377\0\223H\25Yd\201Y\342\r\203\271\24eG<\202{\373V"..., 4096) = 4096 lseek(4, 1419860619264, SEEK_SET) = 1419860619264 read(4, ";d\360\177\n\346\253\210\222|\250\352T\335M\33\260\320\261\7g\222P\344H?t\240\20\2548\310"..., 4096) = 4096 lseek(4, 43018153984, SEEK_SET) = 43018153984 write(4, "\360\252j\317\310\251G\227\335{\214`\341\267\31Y\202\360\v\374\307oq\3063\217Z\223\313\36D\211"..., 4096) = 4096 So this number of the lseeks before the reads, like 1419860619264 are already a lot bigger, standing for 1.29 terabytes if the numbers are bytes, so it doesn't seem to be a linear progress on a big scale, maybe there are only some areas that need work, that have big gaps in between them. (times are in CET)

    Read the article

  • nginx bad gateway 502 with mono fastcgi

    - by Bradley Lederholz Leatherwood
    Hello so I have been trying to get my website to run on mono (on ubuntu server) and I have followed these tutorials almost to the letter: However when my directory is not blank fastcgi logs reveal this: Notice Beginning to receive records on connection. Error Failed to process connection. Reason: Exception has been thrown by the target of an invocation. I am not really sure what this means, and depending on what I do I can get another error that tells me the resource cannot be found: The resource cannot be found. Description: HTTP 404. The resource you are looking for (or one of its dependencies) could have been removed, had its name changed, or is temporarily unavailable. Please review the following URL and make sure that it is spelled correctly. Requested URL: /Default.aspx/ Version information: Mono Runtime Version: 2.10.8 (tarball Thu Aug 16 23:46:03 UTC 2012) ASP.NET Version: 4.0.30319.1 If I should provide some more information please let me know. Edit: I am now getting a nginx gateway error. My nginx configuration file looks like this: server { listen 2194; server_name localhost; access_log $HOME/WWW/nginx.log; location / { root $HOME/WWW/dev/; index index.html index.html default.aspx Default.aspx Index.cshtml; fastcgi_index Views/Home/; fastcgi_pass 127.0.0.1:8000; include /etc/nginx/fastcgi_params; } } Running the entire thing with xsp4 I have discovered what the "Exception has been thrown by the target of an invocation." Handling exception type TargetInvocationException Message is Exception has been thrown by the target of an invocation. IsTerminating is set to True System.Reflection.TargetInvocationException: Exception has been thrown by the target of an invocation. Server stack trace: at System.Reflection.MonoCMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000] in :0 at System.Reflection.MethodBase.Invoke (System.Object obj, System.Object[] parameters) [0x00000] in :0 at System.Runtime.Serialization.ObjectRecord.LoadData (System.Runtime.Serialization.ObjectManager manager, ISurrogateSelector selector, StreamingContext context) [0x00000] in :0 at System.Runtime.Serialization.ObjectManager.DoFixups () [0x00000] in :0 at System.Runtime.Serialization.Formatters.Binary.ObjectReader.ReadNextObject (System.IO.BinaryReader reader) [0x00000] in :0 at System.Runtime.Serialization.Formatters.Binary.ObjectReader.ReadObjectGraph (BinaryElement elem, System.IO.BinaryReader reader, Boolean readHeaders, System.Object& result, System.Runtime.Remoting.Messaging.Header[]& headers) [0x00000] in :0 at System.Runtime.Serialization.Formatters.Binary.BinaryFormatter.NoCheckDeserialize (System.IO.Stream serializationStream, System.Runtime.Remoting.Messaging.HeaderHandler handler) [0x00000] in :0 at System.Runtime.Serialization.Formatters.Binary.BinaryFormatter.Deserialize (System.IO.Stream serializationStream) [0x00000] in :0 at System.Runtime.Remoting.RemotingServices.DeserializeCallData (System.Byte[] array) [0x00000] in :0 at (wrapper xdomain-dispatch) System.AppDomain:DoCallBack (object,byte[]&,byte[]&) Exception rethrown at [0]: --- System.ArgumentException: Couldn't bind to method 'SetHostingEnvironment'. at System.Delegate.GetCandidateMethod (System.Type type, System.Type target, System.String method, BindingFlags bflags, Boolean ignoreCase, Boolean throwOnBindFailure) [0x00000] in :0 at System.Delegate.CreateDelegate (System.Type type, System.Type target, System.String method, Boolean ignoreCase, Boolean throwOnBindFailure) [0x00000] in :0 at System.Delegate.CreateDelegate (System.Type type, System.Type target, System.String method) [0x00000] in :0 at System.DelegateSerializationHolder+DelegateEntry.DeserializeDelegate (System.Runtime.Serialization.SerializationInfo info) [0x00000] in :0 at System.DelegateSerializationHolder..ctor (System.Runtime.Serialization.SerializationInfo info, StreamingContext ctx) [0x00000] in :0 at (wrapper managed-to-native) System.Reflection.MonoCMethod:InternalInvoke (System.Reflection.MonoCMethod,object,object[],System.Exception&) at System.Reflection.MonoCMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000] in :0 --- End of inner exception stack trace --- at (wrapper xdomain-invoke) System.AppDomain:DoCallBack (System.CrossAppDomainDelegate) at (wrapper remoting-invoke-with-check) System.AppDomain:DoCallBack (System.CrossAppDomainDelegate) at System.Web.Hosting.ApplicationHost.CreateApplicationHost (System.Type hostType, System.String virtualDir, System.String physicalDir) [0x00000] in :0 at Mono.WebServer.VPathToHost.CreateHost (Mono.WebServer.ApplicationServer server, Mono.WebServer.WebSource webSource) [0x00000] in :0 at Mono.WebServer.XSP.Server.RealMain (System.String[] args, Boolean root, IApplicationHost ext_apphost, Boolean quiet) [0x00000] in :0 at Mono.WebServer.XSP.Server.Main (System.String[] args) [0x00000] in :0

    Read the article

  • .NET GDI+ image size - file codec limitations

    - by roygbiv
    Is there a limit on the size of image that can be encoded using the image file codecs available from .NET? I'm trying to encode images 4GB in size, but it simply does not work (or does not work properly i.e. writes out an unreadable file) with .bmp, .jpg, .png or the .tif encoders. When I lower the image size to < 2GB it does work with the .jpg but not the .bmp, .tif or .png. My next attempt would be to try libtiff because I know tiff files are meant for large images. What is a good file format for large images? or am I just hitting the file format limitations? Random r = new Random((int)DateTime.Now.Ticks); int width = 64000; int height = 64000; int stride = (width % 4) > 0 ? width + (width % 4) : width; UIntPtr dataSize = new UIntPtr((ulong)stride * (ulong)height); IntPtr p = Program.VirtualAlloc(IntPtr.Zero, dataSize, Program.AllocationType.COMMIT | Program.AllocationType.RESERVE, Program.MemoryProtection.READWRITE); Bitmap bmp = new Bitmap(width, height, stride, PixelFormat.Format8bppIndexed, p); BitmapData bd = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadWrite, bmp.PixelFormat); ColorPalette cp = bmp.Palette; for (int i = 0; i < cp.Entries.Length; i++) { cp.Entries[i] = Color.FromArgb(i, i, i); } bmp.Palette = cp; unsafe { for (int y = 0; y < bd.Height; y++) { byte* row = (byte*)bd.Scan0.ToPointer() + (y * bd.Stride); for (int x = 0; x < bd.Width; x++) { *(row + x) = (byte)r.Next(256); } } } bmp.UnlockBits(bd); bmp.Save(@"c:\test.jpg", ImageFormat.Jpeg); bmp.Dispose(); Program.VirtualFree(p, UIntPtr.Zero, 0x8000); I have also tried using a pinned GC memory region, but this is limited to < 2GB. Random r = new Random((int)DateTime.Now.Ticks); int bytesPerPixel = 4; int width = 4000; int height = 4000; int padding = 4 - ((width * bytesPerPixel) % 4); padding = (padding == 4 ? 0 : padding); int stride = (width * bytesPerPixel) + padding; UInt32[] pixels = new UInt32[width * height]; GCHandle gchPixels = GCHandle.Alloc(pixels, GCHandleType.Pinned); using (Bitmap bmp = new Bitmap(width, height, stride, PixelFormat.Format32bppPArgb, gchPixels.AddrOfPinnedObject())) { for (int y = 0; y < height; y++) { int row = (y * width); for (int x = 0; x < width; x++) { pixels[row + x] = (uint)r.Next(); } } bmp.Save(@"c:\test.jpg", ImageFormat.Jpeg); } gchPixels.Free();

    Read the article

  • can't find what's wrong with my code :(

    - by blood
    the point of my code is for me to press f1 and it will scan 500 pixels down and 500 pixels and put them in a array (it just takes a box that is 500 by 500 of the screen). then after that when i hit end it will click on only on the color black or... what i set it to. anyway it has been doing odd stuff and i can't find why: #include <iostream> #include <windows.h> using namespace std; COLORREF rgb[499][499]; HDC hDC = GetDC(HWND_DESKTOP); POINT main_coner; BYTE rVal; BYTE gVal; BYTE bVal; int red; int green; int blue; int ff = 0; int main() { for(;;) { if(GetAsyncKeyState(VK_F1)) { cout << "started"; int a1 = 0; int a2 = 0; GetCursorPos(&main_coner); int x = main_coner.x; int y = main_coner.y; for(;;) { //cout << a1 << "___" << a2 << "\n"; rgb[a1][a2] = GetPixel(hDC, x, y); a1++; x++; if(x > main_coner.x + 499) { y++; x = main_coner.x; a1 = 0; a2++; } if(y > main_coner.y + 499) { ff = 1; break; } } cout << "done"; break; } if(ff == 1) break; } for(;;) { if(GetAsyncKeyState(VK_END)) { GetCursorPos(&main_coner); int x = main_coner.x; int y = main_coner.y; int a1 = -1; int a2 = -1; for(;;) { x++; a1++; rVal = GetRValue(rgb[a1][a2]); gVal = GetGValue(rgb[a1][a2]); bVal = GetBValue(rgb[a1][a2]); red = (int)rVal; // get the colors into __int8 green = (int)gVal; // get the colors into __int8 blue = (int)bVal; // get the colors into __int8 if(red == 0 && green == 0 && blue == 0) { SetCursorPos(main_coner.x + x, main_coner.y + y); mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0); Sleep(10); mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0); Sleep(100); } if(x > main_coner.x + 499) { a1 = 0; a2++; } if(y > main_coner.y + 499) { Sleep(100000000000); break; } if(GetAsyncKeyState(VK_CONTROL)) { Sleep(100000); break; } } } } for(;;) { if(GetAsyncKeyState(VK_END)) { break; } } return 0; } anyone see what's wrong with my code :( (feel free to add tags)

    Read the article

  • C++ Unions bit fields task

    - by learningtolive
    Can somebody clear me out why would I use union and with what purpose the same address for the cin'ed variable and bit field (task from the Schildts C++ book)? In other words why would I use union for : char ch; struct byte bit; // Display the ASCII code in binary for characters. #include <iostream> #include <conio.h> using namespace std; // a bit field that will be decoded struct byte { unsigned a : 1; unsigned b : 1; unsigned c : 1; unsigned d : 1; unsigned e : 1; unsigned f : 1; unsigned g : 1; unsigned h : 1; }; union bits { char ch; struct byte bit; } ascii ; void disp_bits(bits b); int main() { do { cin >> ascii.ch; cout << ": "; disp_bits(ascii); } while(ascii.ch!='q'); // quit if q typed return 0; } // Display the bit pattern for each character. void disp_bits(bits b) { if(b.bit.h) cout << "1 "; else cout << "0 "; if(b.bit.g) cout << "1 "; else cout << "0 "; if(b.bit.f) cout << "1 "; else cout << "0 "; if(b.bit.e) cout << "1 "; else cout << "0 "; if(b.bit.d) cout << "1 "; else cout << "0 "; if(b.bit.c) cout << "1 "; else cout << "0 "; if(b.bit.b) cout << "1 "; else cout << "0 "; if(b.bit.a) cout << "1 "; else cout << "0 "; cout << "\n"; }

    Read the article

  • Incorrect data when passing pointer a list of pointers to a function. (C++)

    - by Phil Elm
    I'm writing code for combining data received over multiple sources. When the objects received (I'll call them MyPacket for now), they are stored in a standard list. However, whenever I reference the payload size of a partial MyPacket, the value shows up as 1 instead of the intended size. Here's the function code: MyPacket* CombinePackets(std::list<MyPacket*>* packets, uint8* current_packet){ uint32 total_payload_size = 0; if(packets->size() <= 0) return NULL; //For now. std::list<MyPacket*>::iterator it = packets->begin(); //Some minor code here, not relevant to the problem. for(uint8 index = 0; index < packets->size(); index++){ //(*it)->GetPayloadSize() returns 1 when it should show 1024. I've tried directly accessing the variable and more, but I just can't get it to work. total_payload_size += (*it)->GetPayloadSize(); cout << "Adding to total payload size value: " << (*it)->GetPayloadSize() << endl; std::advance(it,1); } MyPacket* packet = new MyPacket(); //Byte is just a typedef'd unsigned char. packet->payload = (byte) calloc(total_payload_size, sizeof(byte)); packet->payload_size = total_payload_size; it = packets->begin(); //Go back to the beginning again. uint32 big_payload_index = 0; for(uint8 index = 0; index < packets->size(); index++){ if(current_packet != NULL) *current_packet = index; for(uint32 payload_index = 0; payload_index < (*it)->GetPayloadSize(); payload_index++){ packet->payload[big_payload_index] = (*it)->payload[payload_index]; big_payload_index++; } std::advance(it,1); } return packet; } //Calling code std::list<MyPacket*> received = std::list<MyPacket*>(); //The code that fills it is here. std::list<MyPacket*>::iterator it = received.begin(); cout << (*it)->GetPayloadSize() << endl; // Outputs 1024 correctly! MyPacket* final = CombinePackets(&received,NULL); cout << final->GetPayloadSize() << endl; //Outputs 181, which happens to be the number of elements in the received list. So, as you can see above, when I reference (*it)-GetPayloadSize(), it returns 1 instead of the intended 1024. Can anyone see the problem and if so, do you have an idea on how to fix this? I've spent 4 hours searching and trying new solutions, but they all keep returning 1... EDIT:

    Read the article

  • External HDD USB 3.0 failure

    - by Philip
    [ 2560.376113] usb 9-1: new high-speed USB device number 2 using xhci_hcd [ 2560.376186] usb 9-1: Device not responding to set address. [ 2560.580136] usb 9-1: Device not responding to set address. [ 2560.784104] usb 9-1: device not accepting address 2, error -71 [ 2560.840127] hub 9-0:1.0: unable to enumerate USB device on port 1 [ 2561.080182] usb 10-1: new SuperSpeed USB device number 5 using xhci_hcd [ 2566.096163] usb 10-1: device descriptor read/8, error -110 [ 2566.200096] usb 10-1: new SuperSpeed USB device number 5 using xhci_hcd [ 2571.216175] usb 10-1: device descriptor read/8, error -110 [ 2571.376138] hub 10-0:1.0: unable to enumerate USB device on port 1 [ 2571.744174] usb 10-1: new SuperSpeed USB device number 7 using xhci_hcd [ 2576.760116] usb 10-1: device descriptor read/8, error -110 [ 2576.864074] usb 10-1: new SuperSpeed USB device number 7 using xhci_hcd [ 2581.880153] usb 10-1: device descriptor read/8, error -110 [ 2582.040123] hub 10-0:1.0: unable to enumerate USB device on port 1 [ 2582.224139] hub 9-0:1.0: unable to enumerate USB device on port 1 [ 2582.464177] usb 10-1: new SuperSpeed USB device number 9 using xhci_hcd [ 2587.480122] usb 10-1: device descriptor read/8, error -110 [ 2587.584079] usb 10-1: new SuperSpeed USB device number 9 using xhci_hcd [ 2592.600150] usb 10-1: device descriptor read/8, error -110 [ 2592.760134] hub 10-0:1.0: unable to enumerate USB device on port 1 [ 2593.128175] usb 10-1: new SuperSpeed USB device number 11 using xhci_hcd [ 2598.144183] usb 10-1: device descriptor read/8, error -110 [ 2598.248109] usb 10-1: new SuperSpeed USB device number 11 using xhci_hcd [ 2603.264171] usb 10-1: device descriptor read/8, error -110 [ 2603.480157] usb 10-1: new SuperSpeed USB device number 12 using xhci_hcd [ 2608.496162] usb 10-1: device descriptor read/8, error -110 [ 2608.600091] usb 10-1: new SuperSpeed USB device number 12 using xhci_hcd [ 2613.616166] usb 10-1: device descriptor read/8, error -110 [ 2613.832170] usb 10-1: new SuperSpeed USB device number 13 using xhci_hcd [ 2618.848135] usb 10-1: device descriptor read/8, error -110 [ 2618.952079] usb 10-1: new SuperSpeed USB device number 13 using xhci_hcd [ 2623.968155] usb 10-1: device descriptor read/8, error -110 [ 2624.184176] usb 10-1: new SuperSpeed USB device number 14 using xhci_hcd [ 2629.200124] usb 10-1: device descriptor read/8, error -110 [ 2629.304075] usb 10-1: new SuperSpeed USB device number 14 using xhci_hcd [ 2634.320172] usb 10-1: device descriptor read/8, error -110 [ 2634.424135] hub 10-0:1.0: unable to enumerate USB device on port 1 [ 2634.776186] usb 10-1: new SuperSpeed USB device number 15 using xhci_hcd [ 2639.792105] usb 10-1: device descriptor read/8, error -110 [ 2639.896090] usb 10-1: new SuperSpeed USB device number 15 using xhci_hcd [ 2644.912172] usb 10-1: device descriptor read/8, error -110 [ 2645.128174] usb 10-1: new SuperSpeed USB device number 16 using xhci_hcd [ 2650.144160] usb 10-1: device descriptor read/8, error -110 [ 2650.248062] usb 10-1: new SuperSpeed USB device number 16 using xhci_hcd [ 2655.264120] usb 10-1: device descriptor read/8, error -110 [ 2655.480182] usb 10-1: new SuperSpeed USB device number 17 using xhci_hcd [ 2660.496121] usb 10-1: device descriptor read/8, error -110 [ 2660.600086] usb 10-1: new SuperSpeed USB device number 17 using xhci_hcd [ 2665.616167] usb 10-1: device descriptor read/8, error -110 [ 2665.832177] usb 10-1: new SuperSpeed USB device number 18 using xhci_hcd [ 2670.848110] usb 10-1: device descriptor read/8, error -110 [ 2670.952066] usb 10-1: new SuperSpeed USB device number 18 using xhci_hcd [ 2675.968081] usb 10-1: device descriptor read/8, error -110 [ 2676.072124] hub 10-0:1.0: unable to enumerate USB device on port 1 [ 2786.104531] xhci_hcd 0000:02:00.0: remove, state 4 [ 2786.104546] usb usb10: USB disconnect, device number 1 [ 2786.104686] xHCI xhci_drop_endpoint called for root hub [ 2786.104692] xHCI xhci_check_bandwidth called for root hub [ 2786.104942] xhci_hcd 0000:02:00.0: USB bus 10 deregistered [ 2786.105054] xhci_hcd 0000:02:00.0: remove, state 4 [ 2786.105065] usb usb9: USB disconnect, device number 1 [ 2786.105176] xHCI xhci_drop_endpoint called for root hub [ 2786.105181] xHCI xhci_check_bandwidth called for root hub [ 2786.109787] xhci_hcd 0000:02:00.0: USB bus 9 deregistered [ 2786.110134] xhci_hcd 0000:02:00.0: PCI INT A disabled [ 2794.268445] pci 0000:02:00.0: [1b73:1000] type 0 class 0x000c03 [ 2794.268483] pci 0000:02:00.0: reg 10: [mem 0x00000000-0x0000ffff] [ 2794.268689] pci 0000:02:00.0: PME# supported from D0 D3hot [ 2794.268700] pci 0000:02:00.0: PME# disabled [ 2794.276383] pci 0000:02:00.0: BAR 0: assigned [mem 0xd7800000-0xd780ffff] [ 2794.276398] pci 0000:02:00.0: BAR 0: set to [mem 0xd7800000-0xd780ffff] (PCI address [0xd7800000-0xd780ffff]) [ 2794.276419] pci 0000:02:00.0: no hotplug settings from platform [ 2794.276658] xhci_hcd 0000:02:00.0: enabling device (0000 -> 0002) [ 2794.276675] xhci_hcd 0000:02:00.0: PCI INT A -> GSI 16 (level, low) -> IRQ 16 [ 2794.276762] xhci_hcd 0000:02:00.0: setting latency timer to 64 [ 2794.276771] xhci_hcd 0000:02:00.0: xHCI Host Controller [ 2794.276913] xhci_hcd 0000:02:00.0: new USB bus registered, assigned bus number 9 [ 2794.395760] xhci_hcd 0000:02:00.0: irq 16, io mem 0xd7800000 [ 2794.396141] xHCI xhci_add_endpoint called for root hub [ 2794.396144] xHCI xhci_check_bandwidth called for root hub [ 2794.396195] hub 9-0:1.0: USB hub found [ 2794.396203] hub 9-0:1.0: 1 port detected [ 2794.396305] xhci_hcd 0000:02:00.0: xHCI Host Controller [ 2794.396371] xhci_hcd 0000:02:00.0: new USB bus registered, assigned bus number 10 [ 2794.396496] xHCI xhci_add_endpoint called for root hub [ 2794.396499] xHCI xhci_check_bandwidth called for root hub [ 2794.396547] hub 10-0:1.0: USB hub found [ 2794.396553] hub 10-0:1.0: 1 port detected [ 2798.004084] usb 1-3: new high-speed USB device number 8 using ehci_hcd [ 2798.140824] scsi21 : usb-storage 1-3:1.0 [ 2820.176116] usb 1-3: reset high-speed USB device number 8 using ehci_hcd [ 2824.000526] scsi 21:0:0:0: Direct-Access BUFFALO HD-PZU3 0001 PQ: 0 ANSI: 6 [ 2824.002263] sd 21:0:0:0: Attached scsi generic sg2 type 0 [ 2824.003617] sd 21:0:0:0: [sdb] 1953463728 512-byte logical blocks: (1.00 TB/931 GiB) [ 2824.005139] sd 21:0:0:0: [sdb] Write Protect is off [ 2824.005149] sd 21:0:0:0: [sdb] Mode Sense: 1f 00 00 08 [ 2824.009084] sd 21:0:0:0: [sdb] No Caching mode page present [ 2824.009094] sd 21:0:0:0: [sdb] Assuming drive cache: write through [ 2824.011944] sd 21:0:0:0: [sdb] No Caching mode page present [ 2824.011952] sd 21:0:0:0: [sdb] Assuming drive cache: write through [ 2824.049153] sdb: sdb1 [ 2824.051814] sd 21:0:0:0: [sdb] No Caching mode page present [ 2824.051821] sd 21:0:0:0: [sdb] Assuming drive cache: write through [ 2824.051825] sd 21:0:0:0: [sdb] Attached SCSI disk [ 2839.536624] usb 1-3: USB disconnect, device number 8 [ 2844.620178] usb 10-1: new SuperSpeed USB device number 2 using xhci_hcd [ 2844.640281] scsi22 : usb-storage 10-1:1.0 [ 2850.326545] scsi 22:0:0:0: Direct-Access BUFFALO HD-PZU3 0001 PQ: 0 ANSI: 6 [ 2850.327560] sd 22:0:0:0: Attached scsi generic sg2 type 0 [ 2850.329561] sd 22:0:0:0: [sdb] 1953463728 512-byte logical blocks: (1.00 TB/931 GiB) [ 2850.329889] sd 22:0:0:0: [sdb] Write Protect is off [ 2850.329897] sd 22:0:0:0: [sdb] Mode Sense: 1f 00 00 08 [ 2850.330223] sd 22:0:0:0: [sdb] No Caching mode page present [ 2850.330231] sd 22:0:0:0: [sdb] Assuming drive cache: write through [ 2850.331414] sd 22:0:0:0: [sdb] No Caching mode page present [ 2850.331423] sd 22:0:0:0: [sdb] Assuming drive cache: write through [ 2850.384116] usb 10-1: USB disconnect, device number 2 [ 2850.392050] sd 22:0:0:0: [sdb] Unhandled error code [ 2850.392056] sd 22:0:0:0: [sdb] Result: hostbyte=DID_NO_CONNECT driverbyte=DRIVER_OK [ 2850.392061] sd 22:0:0:0: [sdb] CDB: Read(10): 28 00 00 00 00 00 00 00 08 00 [ 2850.392074] end_request: I/O error, dev sdb, sector 0 [ 2850.392079] quiet_error: 70 callbacks suppressed [ 2850.392082] Buffer I/O error on device sdb, logical block 0 [ 2850.392194] ldm_validate_partition_table(): Disk read failed. [ 2850.392271] Dev sdb: unable to read RDB block 0 [ 2850.392377] sdb: unable to read partition table [ 2850.392581] sd 22:0:0:0: [sdb] READ CAPACITY failed [ 2850.392584] sd 22:0:0:0: [sdb] Result: hostbyte=DID_NO_CONNECT driverbyte=DRIVER_OK [ 2850.392588] sd 22:0:0:0: [sdb] Sense not available. [ 2850.392613] sd 22:0:0:0: [sdb] Asking for cache data failed [ 2850.392617] sd 22:0:0:0: [sdb] Assuming drive cache: write through [ 2850.392621] sd 22:0:0:0: [sdb] Attached SCSI disk [ 2850.732182] usb 10-1: new SuperSpeed USB device number 3 using xhci_hcd [ 2850.752228] scsi23 : usb-storage 10-1:1.0 [ 2851.752709] scsi 23:0:0:0: Direct-Access BUFFALO HD-PZU3 0001 PQ: 0 ANSI: 6 [ 2851.754481] sd 23:0:0:0: Attached scsi generic sg2 type 0 [ 2851.756576] sd 23:0:0:0: [sdb] 1953463728 512-byte logical blocks: (1.00 TB/931 GiB) [ 2851.758426] sd 23:0:0:0: [sdb] Write Protect is off [ 2851.758436] sd 23:0:0:0: [sdb] Mode Sense: 1f 00 00 08 [ 2851.758779] sd 23:0:0:0: [sdb] No Caching mode page present [ 2851.758787] sd 23:0:0:0: [sdb] Assuming drive cache: write through [ 2851.759968] sd 23:0:0:0: [sdb] No Caching mode page present [ 2851.759977] sd 23:0:0:0: [sdb] Assuming drive cache: write through [ 2851.817710] sdb: sdb1 [ 2851.820562] sd 23:0:0:0: [sdb] No Caching mode page present [ 2851.820568] sd 23:0:0:0: [sdb] Assuming drive cache: write through [ 2851.820572] sd 23:0:0:0: [sdb] Attached SCSI disk [ 2852.060352] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.076533] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.076538] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2852.196329] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.212593] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.212599] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2852.456290] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.472402] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.472408] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2852.624304] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.640531] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.640536] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2852.772296] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.788536] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.788541] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2852.920349] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2852.936536] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2852.936540] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2853.072287] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2853.088565] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2853.088570] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.176339] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2884.192561] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2884.192567] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.320349] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2884.336526] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2884.336531] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.468344] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2884.484551] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2884.484556] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.612349] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2884.628540] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2884.628545] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.756350] usb 10-1: reset SuperSpeed USB device number 3 using xhci_hcd [ 2884.772528] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b19060 [ 2884.772533] xhci_hcd 0000:02:00.0: xHCI xhci_drop_endpoint called with disabled ep f6b1908c [ 2884.848116] usb 10-1: USB disconnect, device number 3 [ 2884.851493] scsi 23:0:0:0: [sdb] killing request [ 2884.851501] scsi 23:0:0:0: [sdb] killing request [ 2884.851699] scsi 23:0:0:0: [sdb] Unhandled error code [ 2884.851702] scsi 23:0:0:0: [sdb] Result: hostbyte=DID_NO_CONNECT driverbyte=DRIVER_OK [ 2884.851708] scsi 23:0:0:0: [sdb] CDB: Read(10): 28 00 00 5f 2b ee 00 00 3e 00 [ 2884.851721] end_request: I/O error, dev sdb, sector 6237166 [ 2884.851726] Buffer I/O error on device sdb1, logical block 6237102 [ 2884.851730] Buffer I/O error on device sdb1, logical block 6237103 [ 2884.851738] Buffer I/O error on device sdb1, logical block 6237104 [ 2884.851741] Buffer I/O error on device sdb1, logical block 6237105 [ 2884.851744] Buffer I/O error on device sdb1, logical block 6237106 [ 2884.851747] Buffer I/O error on device sdb1, logical block 6237107 [ 2884.851750] Buffer I/O error on device sdb1, logical block 6237108 [ 2884.851753] Buffer I/O error on device sdb1, logical block 6237109 [ 2884.851757] Buffer I/O error on device sdb1, logical block 6237110 [ 2884.851807] scsi 23:0:0:0: [sdb] Unhandled error code [ 2884.851810] scsi 23:0:0:0: [sdb] Result: hostbyte=DID_NO_CONNECT driverbyte=DRIVER_OK [ 2884.851813] scsi 23:0:0:0: [sdb] CDB: Read(10): 28 00 00 5f 2c 2c 00 00 3e 00 [ 2884.851824] end_request: I/O error, dev sdb, sector 6237228 [ 2885.168190] usb 10-1: new SuperSpeed USB device number 4 using xhci_hcd [ 2885.188268] scsi24 : usb-storage 10-1:1.0 Please help me with my problem. I got this after running dmesg.

    Read the article

  • Unity – Part 5: Injecting Values

    - by Ricardo Peres
    Introduction This is the fifth post on Unity. You can find the introductory post here, the second post, on dependency injection here, a third one on Aspect Oriented Programming (AOP) here and the latest so far, on writing custom extensions, here. This time we will talk about injecting simple values. An Inversion of Control (IoC) / Dependency Injector (DI) container like Unity can be used for things other than injecting complex class dependencies. It can also be used for setting property values or method/constructor parameters whenever a class is built. The main difference is that these values do not have a lifetime manager associated with them and do not come from the regular IoC registration store. Unlike, for instance, MEF, Unity won’t let you register as a dependency a string or an integer, so you have to take a different approach, which I will describe in this post. Scenario Let’s imagine we have a base interface that describes a logger – the same as in previous examples: 1: public interface ILogger 2: { 3: void Log(String message); 4: } And a concrete implementation that writes to a file: 1: public class FileLogger : ILogger 2: { 3: public String Filename 4: { 5: get; 6: set; 7: } 8:  9: #region ILogger Members 10:  11: public void Log(String message) 12: { 13: using (Stream file = File.OpenWrite(this.Filename)) 14: { 15: Byte[] data = Encoding.Default.GetBytes(message); 16: 17: file.Write(data, 0, data.Length); 18: } 19: } 20:  21: #endregion 22: } And let’s say we want the Filename property to come from the application settings (appSettings) section on the Web/App.config file. As usual with Unity, there is an extensibility point that allows us to automatically do this, both with code configuration or statically on the configuration file. Extending Injection We start by implementing a class that will retrieve a value from the appSettings by inheriting from ValueElement: 1: sealed class AppSettingsParameterValueElement : ValueElement, IDependencyResolverPolicy 2: { 3: #region Private methods 4: private Object CreateInstance(Type parameterType) 5: { 6: Object configurationValue = ConfigurationManager.AppSettings[this.AppSettingsKey]; 7:  8: if (parameterType != typeof(String)) 9: { 10: TypeConverter typeConverter = this.GetTypeConverter(parameterType); 11:  12: configurationValue = typeConverter.ConvertFromInvariantString(configurationValue as String); 13: } 14:  15: return (configurationValue); 16: } 17: #endregion 18:  19: #region Private methods 20: private TypeConverter GetTypeConverter(Type parameterType) 21: { 22: if (String.IsNullOrEmpty(this.TypeConverterTypeName) == false) 23: { 24: return (Activator.CreateInstance(TypeResolver.ResolveType(this.TypeConverterTypeName)) as TypeConverter); 25: } 26: else 27: { 28: return (TypeDescriptor.GetConverter(parameterType)); 29: } 30: } 31: #endregion 32:  33: #region Public override methods 34: public override InjectionParameterValue GetInjectionParameterValue(IUnityContainer container, Type parameterType) 35: { 36: Object value = this.CreateInstance(parameterType); 37: return (new InjectionParameter(parameterType, value)); 38: } 39: #endregion 40:  41: #region IDependencyResolverPolicy Members 42:  43: public Object Resolve(IBuilderContext context) 44: { 45: Type parameterType = null; 46:  47: if (context.CurrentOperation is ResolvingPropertyValueOperation) 48: { 49: ResolvingPropertyValueOperation op = (context.CurrentOperation as ResolvingPropertyValueOperation); 50: PropertyInfo prop = op.TypeBeingConstructed.GetProperty(op.PropertyName); 51: parameterType = prop.PropertyType; 52: } 53: else if (context.CurrentOperation is ConstructorArgumentResolveOperation) 54: { 55: ConstructorArgumentResolveOperation op = (context.CurrentOperation as ConstructorArgumentResolveOperation); 56: String args = op.ConstructorSignature.Split('(')[1].Split(')')[0]; 57: Type[] types = args.Split(',').Select(a => Type.GetType(a.Split(' ')[0])).ToArray(); 58: ConstructorInfo ctor = op.TypeBeingConstructed.GetConstructor(types); 59: parameterType = ctor.GetParameters().Where(p => p.Name == op.ParameterName).Single().ParameterType; 60: } 61: else if (context.CurrentOperation is MethodArgumentResolveOperation) 62: { 63: MethodArgumentResolveOperation op = (context.CurrentOperation as MethodArgumentResolveOperation); 64: String methodName = op.MethodSignature.Split('(')[0].Split(' ')[1]; 65: String args = op.MethodSignature.Split('(')[1].Split(')')[0]; 66: Type[] types = args.Split(',').Select(a => Type.GetType(a.Split(' ')[0])).ToArray(); 67: MethodInfo method = op.TypeBeingConstructed.GetMethod(methodName, types); 68: parameterType = method.GetParameters().Where(p => p.Name == op.ParameterName).Single().ParameterType; 69: } 70:  71: return (this.CreateInstance(parameterType)); 72: } 73:  74: #endregion 75:  76: #region Public properties 77: [ConfigurationProperty("appSettingsKey", IsRequired = true)] 78: public String AppSettingsKey 79: { 80: get 81: { 82: return ((String)base["appSettingsKey"]); 83: } 84:  85: set 86: { 87: base["appSettingsKey"] = value; 88: } 89: } 90: #endregion 91: } As you can see from the implementation of the IDependencyResolverPolicy.Resolve method, this will work in three different scenarios: When it is applied to a property; When it is applied to a constructor parameter; When it is applied to an initialization method. The implementation will even try to convert the value to its declared destination, for example, if the destination property is an Int32, it will try to convert the appSettings stored string to an Int32. Injection By Configuration If we want to configure injection by configuration, we need to implement a custom section extension by inheriting from SectionExtension, and registering our custom element with the name “appSettings”: 1: sealed class AppSettingsParameterInjectionElementExtension : SectionExtension 2: { 3: public override void AddExtensions(SectionExtensionContext context) 4: { 5: context.AddElement<AppSettingsParameterValueElement>("appSettings"); 6: } 7: } And on the configuration file, for setting a property, we use it like this: 1: <appSettings> 2: <add key="LoggerFilename" value="Log.txt"/> 3: </appSettings> 4: <unity xmlns="http://schemas.microsoft.com/practices/2010/unity"> 5: <container> 6: <register type="MyNamespace.ILogger, MyAssembly" mapTo="MyNamespace.ConsoleLogger, MyAssembly"/> 7: <register type="MyNamespace.ILogger, MyAssembly" mapTo="MyNamespace.FileLogger, MyAssembly" name="File"> 8: <lifetime type="singleton"/> 9: <property name="Filename"> 10: <appSettings appSettingsKey="LoggerFilename"/> 11: </property> 12: </register> 13: </container> 14: </unity> If we would like to inject the value as a constructor parameter, it would be instead: 1: <unity xmlns="http://schemas.microsoft.com/practices/2010/unity"> 2: <sectionExtension type="MyNamespace.AppSettingsParameterInjectionElementExtension, MyAssembly" /> 3: <container> 4: <register type="MyNamespace.ILogger, MyAssembly" mapTo="MyNamespace.ConsoleLogger, MyAssembly"/> 5: <register type="MyNamespace.ILogger, MyAssembly" mapTo="MyNamespace.FileLogger, MyAssembly" name="File"> 6: <lifetime type="singleton"/> 7: <constructor> 8: <param name="filename" type="System.String"> 9: <appSettings appSettingsKey="LoggerFilename"/> 10: </param> 11: </constructor> 12: </register> 13: </container> 14: </unity> Notice the appSettings section, where we add a LoggerFilename entry, which is the same as the one referred by our AppSettingsParameterInjectionElementExtension extension. For more advanced behavior, you can add a TypeConverterName attribute to the appSettings declaration, where you can pass an assembly qualified name of a class that inherits from TypeConverter. This class will be responsible for converting the appSettings value to a destination type. Injection By Attribute If we would like to use attributes instead, we need to create a custom attribute by inheriting from DependencyResolutionAttribute: 1: [Serializable] 2: [AttributeUsage(AttributeTargets.Parameter | AttributeTargets.Property, AllowMultiple = false, Inherited = true)] 3: public sealed class AppSettingsDependencyResolutionAttribute : DependencyResolutionAttribute 4: { 5: public AppSettingsDependencyResolutionAttribute(String appSettingsKey) 6: { 7: this.AppSettingsKey = appSettingsKey; 8: } 9:  10: public String TypeConverterTypeName 11: { 12: get; 13: set; 14: } 15:  16: public String AppSettingsKey 17: { 18: get; 19: private set; 20: } 21:  22: public override IDependencyResolverPolicy CreateResolver(Type typeToResolve) 23: { 24: return (new AppSettingsParameterValueElement() { AppSettingsKey = this.AppSettingsKey, TypeConverterTypeName = this.TypeConverterTypeName }); 25: } 26: } As for file configuration, there is a mandatory property for setting the appSettings key and an optional TypeConverterName  for setting the name of a TypeConverter. Both the custom attribute and the custom section return an instance of the injector AppSettingsParameterValueElement that we implemented in the first place. Now, the attribute needs to be placed before the injected class’ Filename property: 1: public class FileLogger : ILogger 2: { 3: [AppSettingsDependencyResolution("LoggerFilename")] 4: public String Filename 5: { 6: get; 7: set; 8: } 9:  10: #region ILogger Members 11:  12: public void Log(String message) 13: { 14: using (Stream file = File.OpenWrite(this.Filename)) 15: { 16: Byte[] data = Encoding.Default.GetBytes(message); 17: 18: file.Write(data, 0, data.Length); 19: } 20: } 21:  22: #endregion 23: } Or, if we wanted to use constructor injection: 1: public class FileLogger : ILogger 2: { 3: public String Filename 4: { 5: get; 6: set; 7: } 8:  9: public FileLogger([AppSettingsDependencyResolution("LoggerFilename")] String filename) 10: { 11: this.Filename = filename; 12: } 13:  14: #region ILogger Members 15:  16: public void Log(String message) 17: { 18: using (Stream file = File.OpenWrite(this.Filename)) 19: { 20: Byte[] data = Encoding.Default.GetBytes(message); 21: 22: file.Write(data, 0, data.Length); 23: } 24: } 25:  26: #endregion 27: } Usage Just do: 1: ILogger logger = ServiceLocator.Current.GetInstance<ILogger>("File"); And off you go! A simple way do avoid hardcoded values in component registrations. Of course, this same concept can be applied to registry keys, environment values, XML attributes, etc, etc, just change the implementation of the AppSettingsParameterValueElement class. Next stop: custom lifetime managers.

    Read the article

  • SQL SERVER – Weekly Series – Memory Lane – #038

    - by Pinal Dave
    Here is the list of selected articles of SQLAuthority.com across all these years. Instead of just listing all the articles I have selected a few of my most favorite articles and have listed them here with additional notes below it. Let me know which one of the following is your favorite article from memory lane. 2007 CASE Statement in ORDER BY Clause – ORDER BY using Variable This article is as per request from the Application Development Team Leader of my company. His team encountered code where the application was preparing string for ORDER BY clause of the SELECT statement. Application was passing this string as variable to Stored Procedure (SP) and SP was using EXEC to execute the SQL string. This is not good for performance as Stored Procedure has to recompile every time due to EXEC. sp_executesql can do the same task but still not the best performance. SSMS – View/Send Query Results to Text/Grid/Files Results to Text – CTRL + T Results to Grid – CTRL + D Results to File – CTRL + SHIFT + F 2008 Introduction to SPARSE Columns Part 2 I wrote about Introduction to SPARSE Columns Part 1. Let us understand the concept of the SPARSE column in more detail. I suggest you read the first part before continuing reading this article. All SPARSE columns are stored as one XML column in the database. Let us see some of the advantage and disadvantage of SPARSE column. Deferred Name Resolution How come when table name is incorrect SP can be created successfully but when an incorrect column is used SP cannot be created? 2009 Backup Timeline and Understanding of Database Restore Process in Full Recovery Model In general, databases backup in full recovery mode is taken in three different kinds of database files. Full Database Backup Differential Database Backup Log Backup Restore Sequence and Understanding NORECOVERY and RECOVERY While doing RESTORE Operation if you restoring database files, always use NORECOVER option as that will keep the database in a state where more backup file are restored. This will also keep database offline also to prevent any changes, which can create itegrity issues. Once all backup file is restored run RESTORE command with a RECOVERY option to get database online and operational. Four Different Ways to Find Recovery Model for Database Perhaps, the best thing about technical domain is that most of the things can be executed in more than one ways. It is always useful to know about the various methods of performing a single task. Two Methods to Retrieve List of Primary Keys and Foreign Keys of Database When Information Schema is used, we will not be able to discern between primary key and foreign key; we will have both the keys together. In the case of sys schema, we can query the data in our preferred way and can join this table to another table, which can retrieve additional data from the same. Get Last Running Query Based on SPID PID is returns sessions ID of the current user process. The acronym SPID comes from the name of its earlier version, Server Process ID. 2010 SELECT * FROM dual – Dual Equivalent Dual is a table that is created by Oracle together with data dictionary. It consists of exactly one column named “dummy”, and one record. The value of that record is X. You can check the content of the DUAL table using the following syntax. SELECT * FROM dual Identifying Statistics Used by Query Someone asked this question in my training class of query optimization and performance tuning.  “Can I know which statistics were used by my query?” 2011 SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 14 of 31 What are the basic functions for master, msdb, model, tempdb and resource databases? What is the Maximum Number of Index per Table? Explain Few of the New Features of SQL Server 2008 Management Studio Explain IntelliSense for Query Editing Explain MultiServer Query Explain Query Editor Regions Explain Object Explorer Enhancements Explain Activity Monitors SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 15 of 31 What is Service Broker? Where are SQL server Usernames and Passwords Stored in the SQL server? What is Policy Management? What is Database Mirroring? What are Sparse Columns? What does TOP Operator Do? What is CTE? What is MERGE Statement? What is Filtered Index? Which are the New Data Types Introduced in SQL SERVER 2008? SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 16 of 31 What are the Advantages of Using CTE? How can we Rewrite Sub-Queries into Simple Select Statements or with Joins? What is CLR? What are Synonyms? What is LINQ? What are Isolation Levels? What is Use of EXCEPT Clause? What is XPath? What is NOLOCK? What is the Difference between Update Lock and Exclusive Lock? SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 17 of 31 How will you Handle Error in SQL SERVER 2008? What is RAISEERROR? What is RAISEERROR? How to Rebuild the Master Database? What is the XML Datatype? What is Data Compression? What is Use of DBCC Commands? How to Copy the Tables, Schema and Views from one SQL Server to Another? How to Find Tables without Indexes? SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 18 of 31 How to Copy Data from One Table to Another Table? What is Catalog Views? What is PIVOT and UNPIVOT? What is a Filestream? What is SQLCMD? What do you mean by TABLESAMPLE? What is ROW_NUMBER()? What are Ranking Functions? What is Change Data Capture (CDC) in SQL Server 2008? SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 19 of 31 How can I Track the Changes or Identify the Latest Insert-Update-Delete from a Table? What is the CPU Pressure? How can I Get Data from a Database on Another Server? What is the Bookmark Lookup and RID Lookup? What is Difference between ROLLBACK IMMEDIATE and WITH NO_WAIT during ALTER DATABASE? What is Difference between GETDATE and SYSDATETIME in SQL Server 2008? How can I Check that whether Automatic Statistic Update is Enabled or not? How to Find Index Size for Each Index on Table? What is the Difference between Seek Predicate and Predicate? What are Basics of Policy Management? What are the Advantages of Policy Management? SQL SERVER – Interview Questions and Answers – Frequently Asked Questions – Day 20 of 31 What are Policy Management Terms? What is the ‘FILLFACTOR’? Where in MS SQL Server is ’100’ equal to ‘0’? What are Points to Remember while Using the FILLFACTOR Argument? What is a ROLLUP Clause? What are Various Limitations of the Views? What is a Covered index? When I Delete any Data from a Table, does the SQL Server reduce the size of that table? What are Wait Types? How to Stop Log File Growing too Big? If any Stored Procedure is Encrypted, then can we see its definition in Activity Monitor? 2012 Example of Width Sensitive and Width Insensitive Collation Width Sensitive Collation: A single-byte character (half-width) represented as single-byte and the same character represented as a double-byte character (full-width) are when compared are not equal the collation is width sensitive. In this example we have one table with two columns. One column has a collation of width sensitive and the second column has a collation of width insensitive. Find Column Used in Stored Procedure – Search Stored Procedure for Column Name Very interesting conversation about how to find column used in a stored procedure. There are two different characters in the story and both are having a conversation about how to find column in the stored procedure. Here are two part story Part 1 | Part 2 SQL SERVER – 2012 Functions – FORMAT() and CONCAT() – An Interesting Usage Generate Script for Schema and Data – SQL in Sixty Seconds #021 – Video In simple words, in many cases the database move from one place to another place. It is not always possible to back up and restore databases. There are possibilities when only part of the database (with schema and data) has to be moved. In this video we learn that we can easily generate script for schema for data and move from one server to another one. INFORMATION_SCHEMA.COLUMNS and Value Character Maximum Length -1 I often see the value -1 in the CHARACTER_MAXIMUM_LENGTH column of INFORMATION_SCHEMA.COLUMNS table. I understand that the length of any column can be between 0 to large number but I do not get it when I see value in negative (i.e. -1). Any insight on this subject? Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Memory Lane, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Reading OpenDocument spreadsheets using C#

    - by DigiMortal
    Excel with its file formats is not the only spreadsheet application that is widely used. There are also users on Linux and Macs and often they are using OpenOffice and other open-source office packages that use ODF instead of OpenXML. In this post I will show you how to read Open Document spreadsheet in C#. Importer as example My previous post about importers showed you how to build flexible importers support to your web application. This post introduces you practical example of one of my importers. Of course, sensitive code is omitted. We start with ODS importer class and we add new methods as we go. public class OdsImporter : ImporterBase {     public OdsImporter()     {     }       public override string[] SupportedFileExtensions     {         get { return new[] { "ods" }; }     }       public override ImportResult Import(Stream fileStream, long companyId, short year)     {         string contentXml = GetContentXml(fileStream);           var result = new ImportResult();         var doc = XDocument.Parse(contentXml);           var rows = doc.Descendants("{urn:oasis:names:tc:opendocument:xmlns:table:1.0}table-row").Skip(1);           foreach (var row in rows)         {             ImportRow(row, companyId, year, result);         }           return result;     } } The class given here just extends base class for importers (previous post uses interface but as I already told there you move to abstract base class when writing code for real projects). Import method reads data from *.ods file, parses it (it is XML), finds all data rows and imports data. As you may see then first row is skipped. This is because the first row on my sheet is always headers row. Reading ODS file Our import method starts with getting XML from *.ods file. ODS files like OpenXml files are zipped containers that contain different files. We need content.xml as all data is kept there. To get the contents of file we use SharpZipLib library to read uploaded file as *.zip file. private static string GetContentXml(Stream fileStream) {     var contentXml = "";       using (var zipInputStream = new ZipInputStream(fileStream))     {         ZipEntry contentEntry = null;         while ((contentEntry = zipInputStream.GetNextEntry()) != null)         {             if (!contentEntry.IsFile)                 continue;             if (contentEntry.Name.ToLower() == "content.xml")                 break;         }           if (contentEntry.Name.ToLower() != "content.xml")         {             throw new Exception("Cannot find content.xml");         }           var bytesResult = new byte[] { };         var bytes = new byte[2000];         var i = 0;           while ((i = zipInputStream.Read(bytes, 0, bytes.Length)) != 0)         {             var arrayLength = bytesResult.Length;             Array.Resize<byte>(ref bytesResult, arrayLength + i);             Array.Copy(bytes, 0, bytesResult, arrayLength, i);         }         contentXml = Encoding.UTF8.GetString(bytesResult);     }     return contentXml; } If here is content.xml file then we stop browsing the file. We read this file to memory and return it as UTF-8 format string. Importing rows Our last task is to import rows. We use special method for this as we have to handle some tricks here. To keep files smaller the cell count on row is not always the same. If we have more than one empty cell one after another then ODS keeps only one cell for sequential empty cells. This cell has attribute called number-columns-repeated and it’s value is set to the number of sequential empty cells. This is why we use two indexers for cells collection. private void ImportRow(XElement row, ImportResult result) {     var cells = (from c in row.Descendants()                 where c.Name == "{urn:oasis:names:tc:opendocument:xmlns:table:1.0}table-cell"                 select c).ToList();       var dto = new DataDto();       var count = cells.Count;     var j = -1;       for (var i = 0; i < count; i++)     {         j++;         var cell = cells[i];         var attr = cell.Attribute("{urn:oasis:names:tc:opendocument:xmlns:table:1.0}number-columns-repeated");         if (attr != null)         {             var numToSkip = 0;             if (int.TryParse(attr.Value, out numToSkip))             {                 j += numToSkip - 1;             }         }           if (i > 30) break;         if (j == 0)         {             dto.SomeProperty = cells[i].Value;         }         if (j == 1)         {             dto.SomeOtherProperty = cells[i].Value;         }         // some more data reading     }       // save data } You can define your own class for import results and add there all problems found during data import. Your application gets the results and shows them to user. Conclusion Reading ODS files may seem to complex task but actually it is very easy if we need only data from those documents. We can use some zip-library to get the content file and then parse it to XML. It is not hard to go through the XML but there are some optimization tricks we have to know. The code here is safe to use in web applications as it is not using any API-s that may have special needs to server and infrastructure.

    Read the article

  • Windows Phone 7 Prototype 001: Speech Recognition on WP7

    At some point in the future it will be awesome when you can just tell your computer what to do and it does it - without typing to help those of us with a blistering 11 WPM hunk and peck technique. Siri, a mobile digital assistant using speech recognition was voted best tech at SXSW. I dont know about that one. Although, I'm sure it will get better when Apple rebuilds it and  bundles on iPhone 5. So how would you do that on WP7? There have been some videos floating around showing Bing with some voice control so obviously the phone has speech recognition. So what options are there: System.Speech? Not included in WP7/SL Nuance software like Siri? No WP7/SL version yet. Invoking the SAPI dlls on the phone? No automation factory in WP7 SL. Web services using System.Speech and mic on the phone? YES! The last one was my least favorite but that works for now. I built a quick sample app to show how to do text-to-speech and speech recognition on WP7.   @eklimczak will not be happy with the developer designed UI. In this sample there is web service with provides access to the system.speech APIs in .NET. Basically its just passing around byte arrays. On the phone its using the XNA audio frameworks to play the text-to-speech stream and to record using the microphone. The code is pretty simple and you can download from the link at the end of this post. The only things to note are adjusting the WCF config to handle larger byte uploads and the Microphone API is a little weird with that 1 second buffer. It would be nice if you could just to mic.start and mic.end which would return an array of bytes instead of managing your own stream inside the buffer ready callback. Couple of downsides to this approach: Recoding from the phone has some static. Could be my code or the my mic is bad / not calibrated right. Having to make web service calls instead of local access is not ideal (Microsoft, please add an API for the SAPI dlls) Although in the context of an app like Siri its not so bad since you need to do web service lookups to get data back Speech recognition quality really depends on either a) a limited grammar set like that pizza grammar in the sample or b) training the recognizer. For the latter it would be annoying to have users train the system. Using the System.Speech stuff youd have to have a profile for each user. So until Microsoft adds some speech client APIs on the phone or Nuance releases a wp7 product, this is a decent workaround. In the future Id like to build something similar to Siri. I shall call it Iris in homage. Im a big fan of mobile speech apps because frankly its just not safe to Google while driving. Since some of my designer co-workers have been posting UI sketches for WP7, Id like to start posting some code prototypes for things I try out on the phone. That will probably last 2 weeks, but for the moment I have like 10 posts in the queue. Sample Code 100% guaranteed to work on my emulatorDid you know that DotNetSlackers also publishes .net articles written by top known .net Authors? We already have over 80 articles in several categories including Silverlight. Take a look: here.

    Read the article

  • Text Expansion Awareness for UX Designers: Points to Consider

    - by ultan o'broin
    Awareness of translated text expansion dynamics is important for enterprise applications UX designers (I am assuming all source text for translation is in English, though apps development can takes place in other natural languages too). This consideration goes beyond the standard 'character multiplication' rule and must take into account the avoidance of other layout tricks that a designer might be tempted to try. Follow these guidelines. For general text expansion, remember the simple rule that the shorter the word is in the English, the longer it will need to be in English. See the examples provided by Richard Ishida of the W3C and you'll get the idea. So, forget the 30 percent or one inch minimum expansion rule of the old Forms days. Unfortunately remembering convoluted text expansion rules, based as a percentage of the US English character count can be tough going. Try these: Up to 10 characters: 100 to 200% 11 to 20 characters: 80 to 100% 21 to 30 characters: 60 to 80% 31 to 50 characters: 40 to 60% 51 to 70 characters: 31 to 40% Over 70 characters: 30% (Source: IBM) So it might be easier to remember a rule that if your English text is less than 20 characters then allow it to double in length (200 percent), and then after that assume an increase by half the length of the text (50%). (Bear in mind that ADF can apply truncation rules on some components in English too). (If your text is stored in a database, developers must make sure the table column widths can accommodate the expansion of your text when translated based on byte size for the translated character and not numbers of characters. Use Unicode. One character does not equal one byte in the multilingual enterprise apps world.) Rely on a graceful transformation of translated text. Let all pages to resize dynamically so the text wraps and flow naturally. ADF pages supports this already. Think websites. Don't hard-code alignments. Use Start and End properties on components and not Left or Right. Don't force alignments of components on the page by using texts of a certain length as spacers. Use proper label positioning and anchoring in ADF components or other technologies. Remember that an increase in text length means an increase in vertical space too when pages are resized. So don't hard-code vertical heights for any text areas. Don't be tempted to manually create text or printed reports this way either. They cannot be translated successfully, and are very difficult to maintain in English. Use XML, HTML, RTF and so on. Check out what Oracle BI Publisher offers. Don't force wrapping by using tricks such as /n or /t characters or HTML BR tags or forced page breaks. Once the text is translated the alignment will be destroyed. The position of the breaking character or tag would need to be moved anyway, or even removed. When creating tables, then use table components. Don't use manually created tables that reply on word length to maintain column and row alignment. For example, don't use codeblock elements in HTML; use the proper table elements instead. Once translated, the alignment of manually formatted tabular data is destroyed. Finally, if there is a space restriction, then don't use made-up acronyms, abbreviations or some form of daft text speak to save space. Besides being incomprehensible in English, they may need full translations of the shortened words, even if they can be figured out. Use approved or industry standard acronyms according to the UX style rules, not as a space-saving device. Restricted Real Estate on Mobile Devices On mobile devices real estate is limited. Using shortened text is fine once it is comprehensible. Users in the mobile space prefer brevity too, as they are on the go, performing three-minute tasks, with no time to read lengthy texts. Using fragments and lightning up on unnecessary articles and getting straight to the point with imperative forms of verbs makes sense both on real estate and user experience grounds.

    Read the article

  • Error while installing emacs23 from Software Center

    - by vrcmr
    Trying to install emacs in Software Center Ubuntu 12.04 got this error. installArchives() failed: Selecting previously unselected package emacs23. (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 182385 files and directories currently installed.) Unpacking emacs23 (from .../emacs23_23.3+1-1ubuntu9_i386.deb) ... Processing triggers for desktop-file-utils ... Processing triggers for bamfdaemon ... Rebuilding /usr/share/applications/bamf.index... Processing triggers for gnome-menus ... Processing triggers for man-db ... Setting up emacs23 (23.3+1-1ubuntu9) ... update-alternatives: using /usr/bin/emacs23-x to provide /usr/bin/emacs (emacs) in auto mode. emacs-install emacs23 install/dictionaries-common: Byte-compiling for emacsen flavour emacs23 Warning: Lisp directory `/usr/share/emacs/23.3/site-lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/site-lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/leim' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/leim' does not exist. Error: charsets directory (/usr/share/emacs/23.3/etc/charsets) does not exist. Emacs will not function correctly without the character map files. Please check your installation! Warning: Could not find simple.el nor simple.elc Cannot open load file: bytecomp emacs-install: /usr/lib/emacsen-common/packages/install/dictionaries-common emacs23 failed at /usr/lib/emacsen-common/emacs-install line 28, <TSORT> line 3. dpkg: error processing emacs23 (--configure): subprocess installed post-installation script returned error exit status 255 No apport report written because MaxReports is reached already Errors were encountered while processing: emacs23 Error in function: Setting up emacs23 (23.3+1-1ubuntu9) ... emacs-install emacs23 install/dictionaries-common: Byte-compiling for emacsen flavour emacs23 Warning: Lisp directory `/usr/share/emacs/23.3/site-lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/site-lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/leim' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/lisp' does not exist. Warning: Lisp directory `/usr/share/emacs/23.3/leim' does not exist. Error: charsets directory (/usr/share/emacs/23.3/etc/charsets) does not exist. Emacs will not function correctly without the character map files. Please check your installation! Warning: Could not find simple.el nor simple.elc Cannot open load file: bytecomp emacs-install: /usr/lib/emacsen-common/packages/install/dictionaries-common emacs23 failed at /usr/lib/emacsen-common/emacs-install line 28, <TSORT> line 3. dpkg: error processing emacs23 (--configure): subprocess installed post-installation script returned error exit status 255

    Read the article

  • MP3 player mpman TS300 not recognized

    - by Nick Lemaire
    I just received a mpman ts300 mp3 player for Christmas. But when I try to connect it to ubuntu (10.10) through usb it doesn't seem to be recognized. I searched for a linuxdriver but came up with nothing. I even tried installing mpman manager from the software center, but still the same problem... Has anybody got any ideas about how to fix this? Thank you edit: some additional information When I plug it in, it doesn't show up under /dev. And it is not listed in the result of 'lsusb' This is the output of 'dmesg' [19571.732541] usb 1-4: new high speed USB device using ehci_hcd and address 2 [19571.889154] scsi7 : usb-storage 1-4:1.0 [19572.883155] scsi 7:0:0:0: Direct-Access TS300 USB DISK 1.00 PQ: 0 ANSI: 0 [19572.885856] sd 7:0:0:0: Attached scsi generic sg2 type 0 [19572.887266] sd 7:0:0:0: [sdb] 7868416 512-byte logical blocks: (4.02 GB/3.75 GiB) [19573.012547] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19573.292550] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19573.572565] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19573.725019] sd 7:0:0:0: [sdb] Test WP failed, assume Write Enabled [19573.725024] sd 7:0:0:0: [sdb] Assuming drive cache: write through [19573.728521] sd 7:0:0:0: [sdb] Attached SCSI removable disk [19575.333015] sd 7:0:0:0: [sdb] 7868416 512-byte logical blocks: (4.02 GB/3.75 GiB) [19575.452547] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19580.603253] usb 1-4: device descriptor read/all, error -110 [19580.722560] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19595.842579] usb 1-4: device descriptor read/64, error -110 [19611.072656] usb 1-4: device descriptor read/64, error -110 [19611.302540] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19616.332568] usb 1-4: device descriptor read/8, error -110 [19621.462692] usb 1-4: device descriptor read/8, error -110 [19621.692551] usb 1-4: reset high speed USB device using ehci_hcd and address 2 [19626.722567] usb 1-4: device descriptor read/8, error -110 [19631.852802] usb 1-4: device descriptor read/8, error -110 [19631.962587] usb 1-4: USB disconnect, address 2 [19631.962587] sd 7:0:0:0: Device offlined - not ready after error recovery [19631.962840] sd 7:0:0:0: [sdb] Assuming drive cache: write through [19631.965104] sd 7:0:0:0: [sdb] READ CAPACITY failed [19631.965109] sd 7:0:0:0: [sdb] Result: hostbyte=DID_NO_CONNECT driverbyte=DRIVER_OK [19631.965112] sd 7:0:0:0: [sdb] Sense not available. [19631.965125] sd 7:0:0:0: [sdb] Assuming drive cache: write through [19631.965130] sdb: detected capacity change from 4028628992 to 0 [19632.130042] usb 1-4: new high speed USB device using ehci_hcd and address 3 [19647.242550] usb 1-4: device descriptor read/64, error -110 [19662.472560] usb 1-4: device descriptor read/64, error -110 [19662.702566] usb 1-4: new high speed USB device using ehci_hcd and address 4 [19677.822587] usb 1-4: device descriptor read/64, error -110 [19693.052575] usb 1-4: device descriptor read/64, error -110 [19693.282582] usb 1-4: new high speed USB device using ehci_hcd and address 5 [19698.312600] usb 1-4: device descriptor read/8, error -110 [19703.442594] usb 1-4: device descriptor read/8, error -110 [19703.672548] usb 1-4: new high speed USB device using ehci_hcd and address 6 [19708.702581] usb 1-4: device descriptor read/8, error -110 [19713.840077] usb 1-4: device descriptor read/8, error -110 [19713.942555] hub 1-0:1.0: unable to enumerate USB device on port 4 [19714.262545] usb 4-2: new full speed USB device using uhci_hcd and address 2 [19729.382549] usb 4-2: device descriptor read/64, error -110 [19744.612534] usb 4-2: device descriptor read/64, error -110 [19744.842543] usb 4-2: new full speed USB device using uhci_hcd and address 3 [19759.962714] usb 4-2: device descriptor read/64, error -110 [19775.200047] usb 4-2: device descriptor read/64, error -110 [19775.422630] usb 4-2: new full speed USB device using uhci_hcd and address 4 [19780.444498] usb 4-2: device descriptor read/8, error -110 [19785.574491] usb 4-2: device descriptor read/8, error -110 [19785.802547] usb 4-2: new full speed USB device using uhci_hcd and address 5 [19790.824490] usb 4-2: device descriptor read/8, error -110 [19795.961473] usb 4-2: device descriptor read/8, error -110 [19796.062556] hub 4-0:1.0: unable to enumerate USB device on port 2

    Read the article

  • FTP Upload ftpWebRequest Proxy

    - by Rodney Vinyard
    Searchable:   FTP Upload ftpWebRequest Proxy FTP command is not supported when using HTTP proxy     In the article below I will cover 2 topics   1.       C# & Windows Command-Line FTP Upload with No Proxy Server   2.       C# & Windows Command-Line FTP Upload with Proxy Server   Not covered here: Secure FTP / SFTP   Sample Attributes: ·         UploadFilePath = “\\servername\folder\file.name” ·         Proxy Server = “ftp://proxy.server/” ·         FTP Target Server = ftp.target.com ·         FTP User = “User” ·         FTP Password = “Password” with No Proxy Server ·         Windows Command-Line > ftp ftp.target.com > ftp User: User > ftp Password: Password > ftp put \\servername\folder\file.name > ftp dir           (result: file.name listed) > ftp del file.name > ftp dir           (result: file.name deleted) > ftp quit   ·         C#   //----------------- //Start FTP via _TargetFtpProxy //----------------- string relPath = Path.GetFileName(\\servername\folder\file.name);   //result: relPath = “file.name”   FtpWebRequest ftpWebRequest = (FtpWebRequest)WebRequest.Create("ftp.target.com/file.name); ftpWebRequest.Method = WebRequestMethods.Ftp.UploadFile;   //----------------- //user - password //----------------- ftpWebRequest.Credentials = new NetworkCredential("user, "password");   //----------------- // set proxy = null! //----------------- ftpWebRequest.Proxy = null;   //----------------- // Copy the contents of the file to the request stream. //----------------- StreamReader sourceStream = new StreamReader(“\\servername\folder\file.name”);   byte[] fileContents = Encoding.UTF8.GetBytes(sourceStream.ReadToEnd()); sourceStream.Close(); ftpWebRequest.ContentLength = fileContents.Length;     //----------------- // transer the stream stream. //----------------- Stream requestStream = ftpWebRequest.GetRequestStream(); requestStream.Write(fileContents, 0, fileContents.Length); requestStream.Close();   //----------------- // Look at the response results //----------------- FtpWebResponse response = (FtpWebResponse)ftpWebRequest.GetResponse();   Console.WriteLine("Upload File Complete, status {0}", response.StatusDescription);   with Proxy Server ·         Windows Command-Line > ftp proxy.server > ftp User: [email protected] > ftp Password: Password > ftp put \\servername\folder\file.name > ftp dir           (result: file.name listed) > ftp del file.name > ftp dir           (result: file.name deleted) > ftp quit   ·         C#   //----------------- //Start FTP via _TargetFtpProxy //----------------- string relPath = Path.GetFileName(\\servername\folder\file.name);   //result: relPath = “file.name”   FtpWebRequest ftpWebRequest = (FtpWebRequest)WebRequest.Create("ftp://proxy.server/" + relPath); ftpWebRequest.Method = WebRequestMethods.Ftp.UploadFile;   //----------------- //user - password //----------------- ftpWebRequest.Credentials = new NetworkCredential("[email protected], "password");   //----------------- // set proxy = null! //----------------- ftpWebRequest.Proxy = null;   //----------------- // Copy the contents of the file to the request stream. //----------------- StreamReader sourceStream = new StreamReader(“\\servername\folder\file.name”);   byte[] fileContents = Encoding.UTF8.GetBytes(sourceStream.ReadToEnd()); sourceStream.Close(); ftpWebRequest.ContentLength = fileContents.Length;     //----------------- // transer the stream stream. //----------------- Stream requestStream = ftpWebRequest.GetRequestStream(); requestStream.Write(fileContents, 0, fileContents.Length); requestStream.Close();   //----------------- // Look at the response results //----------------- FtpWebResponse response = (FtpWebResponse)ftpWebRequest.GetResponse();   Console.WriteLine("Upload File Complete, status {0}", response.StatusDescription);

    Read the article

< Previous Page | 57 58 59 60 61 62 63 64 65 66 67 68  | Next Page >