Search Results

Search found 3136 results on 126 pages for 'buffer overrun'.

Page 117/126 | < Previous Page | 113 114 115 116 117 118 119 120 121 122 123 124  | Next Page >

  • C# MP3 Player using winmm.dll

    - by JoeBeez
    I'm trying to bash together a (very) rough MP3 player during my lunch hour, and so far I've got it to play the files, and I'm working on a way of building a list of filenames to enable random songs, but I think I've just hit a snag. Is there a way of knowing when the currently playing MP3 has finished? An event or some such? As it stands I don't think I'd be able to have playlists etc unless this was possible due to it stopping after every playback. I've attatched the whole source below, feel free to pick it apart and give me any feedback you may have, cheers. using System; using System.IO; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Linq; using System.Text; using System.Windows.Forms; using System.Runtime.InteropServices; namespace X { public partial class Form1 : Form { List<string> Names = new List<string>(); StreamReader reader = File.OpenText(@"C:\X.txt"); string line; OpenFileDialog ofd = new OpenFileDialog(); StringBuilder buffer = new StringBuilder(128); string CommandString; [DllImport("winmm.dll")] private static extern long mciSendString(string lpstrCommand, StringBuilder lpstrReturnString, int uReturnLength, int hwndCallback); public Form1() { InitializeComponent(); while ((line = reader.ReadLine()) != null) { if (line.Trim() != "") { Names.Add(line.Trim()); } } } private void btnplay_Click(object sender, EventArgs e) { if (ofd.FileName == "") { if (ofd.ShowDialog() == DialogResult.OK) { ofd.Filter = "MP3 Files|*.mp3"; CommandString = "open " + "\"" + ofd.FileName + "\"" + " type MPEGVideo alias Mp3File"; mciSendString(CommandString, null, 0, 0); CommandString = "play Mp3File"; mciSendString(CommandString, null, 0, 0); } } else { CommandString = "play Mp3File"; mciSendString(CommandString, null, 0, 0); } } private void btnpause_Click(object sender, EventArgs e) { CommandString = "pause mp3file"; mciSendString(CommandString, null, 0, 0); } private void btnbrowse_Click(object sender, EventArgs e) { ofd.Filter = "Mp3 files |*.mp3"; if (ofd.ShowDialog() == DialogResult.OK) { txtpath.Text = ofd.FileName; CommandString = "close Mp3File"; mciSendString(CommandString, null, 0, 0); CommandString = "open " + "\"" + ofd.FileName + "\"" + " type MPEGVideo alias Mp3File"; mciSendString(CommandString, null, 0, 0); } } } }

    Read the article

  • ensime scala errors (class scala.Array not found, object scala not found)

    - by Jeff Bowman
    I've installed ensime according to the README.md file, however, I get errors in the inferior-ensime-server buffer with the following: INFO: Fatal Error: scala.tools.nsc.MissingRequirementError: object scala not found. scala.tools.nsc.MissingRequirementError: object scala not found. at scala.tools.nsc.symtab.Definitions$definitions$.getModuleOrClass(Definitions.scala:516) at scala.tools.nsc.symtab.Definitions$definitions$.ScalaPackage(Definitions.scala:43) at scala.tools.nsc.symtab.Definitions$definitions$.ScalaPackageClass(Definitions.scala:44) at scala.tools.nsc.symtab.Definitions$definitions$.UnitClass(Definitions.scala:89) at scala.tools.nsc.symtab.Definitions$definitions$.init(Definitions.scala:786) at scala.tools.nsc.Global$Run.(Global.scala:593) at scala.tools.nsc.interactive.Global$TyperRun.(Global.scala:473) at scala.tools.nsc.interactive.Global.newTyperRun(Global.scala:535) at scala.tools.nsc.interactive.Global.reloadSources(Global.scala:289) at scala.tools.nsc.interactive.Global$$anonfun$reload$1.apply(Global.scala:300) at scala.tools.nsc.interactive.Global$$anonfun$reload$1.apply(Global.scala:300) at scala.tools.nsc.interactive.Global.respond(Global.scala:276) at scala.tools.nsc.interactive.Global.reload(Global.scala:300) at scala.tools.nsc.interactive.CompilerControl$$anon$1.apply$mcV$sp(CompilerControl.scala:81) at scala.tools.nsc.interactive.Global.pollForWork(Global.scala:132) at scala.tools.nsc.interactive.Global$$anon$2.run(Global.scala:192) also: INFO: Fatal Error: scala.tools.nsc.MissingRequirementError: class scala.Array not found. scala.tools.nsc.MissingRequirementError: class scala.Array not found. at scala.tools.nsc.symtab.Definitions$definitions$.getModuleOrClass(Definitions.scala:516) at scala.tools.nsc.symtab.Definitions$definitions$.getClass(Definitions.scala:474) at scala.tools.nsc.symtab.Definitions$definitions$.ArrayClass(Definitions.scala:217) at scala.tools.nsc.backend.icode.TypeKinds$REFERENCE.(TypeKinds.scala:258) at scala.tools.nsc.backend.icode.GenICode$ICodePhase.(GenICode.scala:55) at scala.tools.nsc.backend.icode.GenICode.newPhase(GenICode.scala:43) at scala.tools.nsc.backend.icode.GenICode.newPhase(GenICode.scala:25) at scala.tools.nsc.Global$Run$$anonfun$4.apply(Global.scala:606) at scala.tools.nsc.Global$Run$$anonfun$4.apply(Global.scala:605) at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:62) at scala.collection.immutable.List.foreach(List.scala:46) at scala.tools.nsc.Global$Run.(Global.scala:605) at scala.tools.nsc.interactive.Global$TyperRun.(Global.scala:473) at scala.tools.nsc.interactive.Global.newTyperRun(Global.scala:535) at scala.tools.nsc.interactive.Global.reloadSources(Global.scala:289) at scala.tools.nsc.interactive.Global.typedTreeAt(Global.scala:309) at scala.tools.nsc.interactive.Global$$anonfun$getTypedTreeAt$1.apply(Global.scala:326) at scala.tools.nsc.interactive.Global$$anonfun$getTypedTreeAt$1.apply(Global.scala:326) at scala.tools.nsc.interactive.Global.respond(Global.scala:276) at scala.tools.nsc.interactive.Global.getTypedTreeAt(Global.scala:326) at scala.tools.nsc.interactive.CompilerControl$$anon$2.apply$mcV$sp(CompilerControl.scala:89) at scala.tools.nsc.interactive.Global.pollForWork(Global.scala:132) at scala.tools.nsc.interactive.Global$$anon$2.run(Global.scala:192) Also none of the type identification works for me, I get 'NA' if I get anything at all. C-c t causes emacs to lock up. I'm running: Ubuntu 10.04 (64bit version) emacs 23.1.50.1 ensime from git (as of 3 May 2010) scala is version 2.8.0.RC1 java is 1.6.0_20 (from sun) here is a copy of the log: http://dl.dropbox.com/u/5309017/ensime.log Thanks! Jeff

    Read the article

  • CGContextDrawPDFPage doesn't seem to persist in CGContext

    - by erichf
    I am trying to access the pixels of a CGContext written to with a PDF, but the bitmap buffer doesn't seem to update. Any help would be appreciated: //Get the reference to our current page pageRef = CGPDFDocumentGetPage(docRef, iCurrentPage); //Start with a media crop, but see if we can shrink to smaller crop CGRect pdfRect1 = CGRectIntegral(CGPDFPageGetBoxRect(pageRef, kCGPDFMediaBox)); CGRect r1 = CGRectIntegral(CGPDFPageGetBoxRect(pageRef, kCGPDFCropBox)); if (!CGRectIsEmpty(r1)) pdfRect1 = r1; int wide = pdfRect1.size.width + pdfRect1.origin.x; int high = pdfRect1.size.height + pdfRect1.origin.y; CGContextRef ctxBuffer = NULL; CGColorSpaceRef colorSpace; UInt8* bitmapData; int bitmapByteCount; int bitmapBytesPerRow; bitmapBytesPerRow = (wide * 4); bitmapByteCount = (bitmapBytesPerRow * high); colorSpace = CGColorSpaceCreateDeviceRGB(); bitmapData = malloc( bitmapByteCount ); if (bitmapData == NULL) { DebugLog (@"Memory not allocated!"); return; } ctxBuffer = CGBitmapContextCreate (bitmapData, wide, high, 8, // bits per component bitmapBytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); // if (ctxBuffer== NULL) { free (bitmapData); DebugLog (@"Context not created!"); return; } CGColorSpaceRelease( colorSpace ); //White out the current context CGContextSetRGBFillColor(ctxBuffer, 1.0, 1.0, 1.0, 1.0); CGContextFillRect(ctxBuffer, CGContextGetClipBoundingBox(ctxBuffer)); CGContextDrawPDFPage(ctxBuffer, pageRef); //!!!This displays just fine to the context passed in from - (void)drawLayer:(CALayer *)layer inContext:(CGContextRef)ctx. That is, I can see the PDf page rendered, so we know ctxBuffer was created correctly //However, if I view bitmapData in memory, it only shows as 0xFF (or whatever fill color I use) CGImageRef img = CGBitmapContextCreateImage(ctxBuffer); CGContextDrawImage(ctx, tiledLayer.frame, img); void *data = CGBitmapContextGetData (ctx); for (int i = 0; i < wide; i++) { for (int j = 0; j < high; j++) { //All of the bytes show as 0xFF (or whatever fill color I test with)?! int byteIndex = (j * 4) + i * 4; UInt8 red = bitmapData[byteIndex]; UInt8 green = bitmapData[byteIndex + 1]; UInt8 blue = bitmapData[byteIndex + 2]; UInt8 alpha = m_PixelBuf[byteIndex + 3]; } } I have also tried using CGDataProviderCopyData(CGImageGetDataProvider(img)) & CFDataGetBytePtr, but the results are the same?

    Read the article

  • SQL Server 2008: FileStream Insertion Failure w/ .NET 3.5SP1

    - by James Alexander
    I've configured a db w/ a FileStream group and have a table w/ File type on it. When attempting to insert a streamed file and after I create the table row, my query to read the filepath out and the buffer returns a null file path. I can't seem to figure out why though. Here is the table creation script: /****** Object: Table [dbo].[JobInstanceFile] Script Date: 03/22/2010 18:05:36 ******/ SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO SET ANSI_PADDING ON GO CREATE TABLE [dbo].[JobInstanceFile]( [JobInstanceFileId] [int] IDENTITY(1,1) NOT NULL, [JobInstanceId] [int] NOT NULL, [File] [varbinary](max) FILESTREAM NULL, [FileId] [uniqueidentifier] ROWGUIDCOL NOT NULL, [Created] [datetime] NOT NULL, CONSTRAINT [PK_JobInstanceFile] PRIMARY KEY CLUSTERED ( [JobInstanceFileId] ASC )WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] FILESTREAM_ON [JobInstanceFilesGroup], UNIQUE NONCLUSTERED ( [FileId] ASC )WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] ) ON [PRIMARY] FILESTREAM_ON [JobInstanceFilesGroup] GO SET ANSI_PADDING OFF GO ALTER TABLE [dbo].[JobInstanceFile] ADD DEFAULT (newid()) FOR [FileId] GO Here's my proc I call to create the row before streaming the file: /****** Object: StoredProcedure [dbo].[JobInstanceFileCreate] Script Date: 03/22/2010 18:06:23 ******/ SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO create proc [dbo].[JobInstanceFileCreate] @JobInstanceId int, @Created datetime as insert into JobInstanceFile (JobInstanceId, FileId, Created) values (@JobInstanceId, newid(), @Created) select scope_identity() GO And lastly, here's the code I'm using: public int CreateJobInstanceFile(int jobInstanceId, string filePath) { using (var connection = new SqlConnection(ConfigurationManager.ConnectionStrings["ConsumerMarketingStoreFiles"].ConnectionString)) using (var fileStream = new FileStream(filePath, FileMode.Open)) { connection.Open(); var tran = connection.BeginTransaction(IsolationLevel.ReadCommitted); try { //create the JobInstanceFile instance var command = new SqlCommand("JobInstanceFileCreate", connection) { Transaction = tran }; command.CommandType = CommandType.StoredProcedure; command.Parameters.AddWithValue("@JobInstanceId", jobInstanceId); command.Parameters.AddWithValue("@Created", DateTime.Now); int jobInstanceFileId = Convert.ToInt32(command.ExecuteScalar()); //read out the filestream transaction context to stream the file for storage command.CommandText = "select [File].PathName(), GET_FILESTREAM_TRANSACTION_CONTEXT() from JobInstanceFile where JobInstanceFileId = @JobInstanceFileId"; command.CommandType = CommandType.Text; command.Parameters.AddWithValue("@JobInstanceFileId", jobInstanceFileId); using (SqlDataReader dr = command.ExecuteReader()) { dr.Read(); //get the file path we're writing out to string writePath = dr.GetString(0); using (var writeStream = new SqlFileStream(writePath, (byte[])dr.GetValue(1), FileAccess.ReadWrite)) { //copy from one stream to another byte[] bytes = new byte[65536]; int numBytes; while ((numBytes = fileStream.Read(bytes, 0, 65536)) 0) writeStream.Write(bytes, 0, numBytes); } } tran.Commit(); return jobInstanceFileId; } catch (Exception e) { tran.Rollback(); throw e; } } } Can someone please let me know what I'm doing wrong. In the code, the following expression is returning null for the file path and shouldn't be: //get the file path we're writing out to string writePath = dr.GetString(0); The server is different then the computer the code is running on but the necessary shares appear to be in order and I have also run the following: EXEC sp_configure filestream_access_level, 2 Any help would be greatly appreciated. Thanks!

    Read the article

  • OpenGL FrameBuffer Objects weird behavior

    - by Ben Jones
    My algorithm is this: Render the scene to a FBO with shadow mapping from multiple locations Render the scene to the screen with shadow mapping ...black magic that I still have to imlement... Combine the samples from step 1 with the image from step 2 I'm trying to debug steps 1 and 2 and am coming across STRANGE behavior. My algorithm for each shadow mapped pass is: render the scene to a FBO connected to a depth array texture from the POV of each light render the scene from the viewpoint and use vertex/frag shaders to compare the depths When I run my algorithm this way: render from point to FBO render from point to screen glutSwapBuffers() The normal vectors in the screen pass appear to be incorrect (inverted possibly). I'm pretty sure that's the issue because my diffuse lighting calculation is incorrect, but the material colors are correct, and the shadows appear in the correct places. So, it seems like the only thing that could be the culprit is the normals. However if I do render from point to FBO render from point to Screen glutSwapBuffers() //wrong here render from point to Screen glutSwapBuffers() the second pass is correct. I assume there's a problem with my framebuffer calls. Can anyone see what the problem is from the log below? Its from a bugle trace grepped for 'buffer' with a few edits to make it a little more clear. Thanks! [INFO] trace.call: glGenFramebuffersEXT(1, 0xdfeb90 - { 1 }) [INFO] trace.call: glGenFramebuffersEXT(1, 0xdfebac - { 2 }) [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 1) [INFO] trace.call: glDrawBuffer(GL_NONE) [INFO] trace.call: glReadBuffer(GL_NONE) [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 0) //start render to FBO [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 2) [INFO] trace.call: glReadBuffer(GL_NONE) [INFO] trace.call: glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 2, 0) [INFO] trace.call: glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 3, 0) [INFO] trace.call: glDrawBuffer(GL_COLOR_ATTACHMENT0) //bind to the FBO attached to a depth tex array for shadows [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 1) [INFO] trace.call: glFramebufferTextureLayerARB(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, 1, 0, 0) [INFO] trace.call: glClear(GL_DEPTH_BUFFER_BIT) //draw geometry //bind to the FBO I want the shadow mapped image rendered to [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 2) [INFO] trace.call: glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) //draw geometry //draw to screen pass //again shadow mapping FBO [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 1) [INFO] trace.call: glFramebufferTextureLayerARB(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, 1, 0, 0) [INFO] trace.call: glClear(GL_DEPTH_BUFFER_BIT) //draw geometry //bind to the screen [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 0) [INFO] trace.call: glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) //finished, swap buffers [INFO] trace.call: glXSwapBuffers(0xd5fc10, 0x05800002) //INCORRECT OUTPUT //second try at render to screen: [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 1) [INFO] trace.call: glFramebufferTextureLayerARB(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, 1, 0, 0) [INFO] trace.call: glClear(GL_DEPTH_BUFFER_BIT) //draw geometry [INFO] trace.call: glBindFramebufferEXT(GL_FRAMEBUFFER, 0) [INFO] trace.call: glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) draw geometry [INFO] trace.call: glXSwapBuffers(0xd5fc10, 0x05800002) //correct output

    Read the article

  • Need help implementing simple socket server using GIOService (GLib, Glib-GIO)

    - by Mark Renouf
    I'm learning the basics of writing a simple, efficient socket server using GLib. I'm experimenting with GSocketService. So far I can only seem to accept connections but then they are immediately closed. From the docs I can't figure out what step I am missing. I'm hoping someone can shed some light on this for me. When running the following: # telnet localhost 4000 Trying 127.0.0.1... Connected to localhost. Escape character is '^]'. Connection closed by foreign host. # telnet localhost 4000 Trying 127.0.0.1... Connected to localhost. Escape character is '^]'. Connection closed by foreign host. # telnet localhost 4000 Trying 127.0.0.1... Connected to localhost. Escape character is '^]'. Connection closed by foreign host. Output from the server: # ./server New Connection from 127.0.0.1:36962 New Connection from 127.0.0.1:36963 New Connection from 127.0.0.1:36965 Current code: /* * server.c * * Created on: Mar 10, 2010 * Author: mark */ #include <glib.h> #include <gio/gio.h> gchar *buffer; gboolean network_read(GIOChannel *source, GIOCondition cond, gpointer data) { GString *s = g_string_new(NULL); GError *error; GIOStatus ret = g_io_channel_read_line_string(source, s, NULL, &error); if (ret == G_IO_STATUS_ERROR) g_error ("Error reading: %s\n", error->message); else g_print("Got: %s\n", s->str); } gboolean new_connection(GSocketService *service, GSocketConnection *connection, GObject *source_object, gpointer user_data) { GSocketAddress *sockaddr = g_socket_connection_get_remote_address(connection, NULL); GInetAddress *addr = g_inet_socket_address_get_address(G_INET_SOCKET_ADDRESS(sockaddr)); guint16 port = g_inet_socket_address_get_port(G_INET_SOCKET_ADDRESS(sockaddr)); g_print("New Connection from %s:%d\n", g_inet_address_to_string(addr), port); GSocket *socket = g_socket_connection_get_socket(connection); gint fd = g_socket_get_fd(socket); GIOChannel *channel = g_io_channel_unix_new(fd); g_io_add_watch(channel, G_IO_IN, (GIOFunc) network_read, NULL); return TRUE; } int main(int argc, char **argv) { g_type_init(); GSocketService *service = g_socket_service_new(); GInetAddress *address = g_inet_address_new_from_string("127.0.0.1"); GSocketAddress *socket_address = g_inet_socket_address_new(address, 4000); g_socket_listener_add_address(G_SOCKET_LISTENER(service), socket_address, G_SOCKET_TYPE_STREAM, G_SOCKET_PROTOCOL_TCP, NULL, NULL, NULL); g_object_unref(socket_address); g_object_unref(address); g_socket_service_start(service); g_signal_connect(service, "incoming", G_CALLBACK(new_connection), NULL); GMainLoop *loop = g_main_loop_new(NULL, FALSE); g_main_loop_run(loop); }

    Read the article

  • TVirtualStringTree - resetting non-visual nodes and memory consumption

    - by Remy Lebeau - TeamB
    I have an app that loads records from a binary log file and displays them in a virtual TListView. There are potentially millions of records in a file, and the display can be filtered by the user, so I do not load all of the records in memory at one time, and the ListView item indexes are not a 1-to-1 relation with the file record offsets (List item 1 may be file record 100, for instance). I use the ListView's OnDataHint event to load records for just the items the ListView is actually interested in. As the user scrolls around, the range specified by OnDataHint changes, allowing me to free records that are not in the new range, and allocate new records as needed. This works fine, speed is tolerable, and the memory footprint is very low. I am currently evaluating TVirtualStringTree as a replacement for the TListView, mainly because I want to add the ability to expand/collapse records that span multiple lines (I can fudge it with the TListView by incrementing/decrementing the item count dynamically, but this is not as straight forward as using a real tree). For the most part, I have been able to port the TListView logic and have everything work as I need. I notice that TVirtualStringTree's virtual paradigm is vastly different, though. It does not have the same kind of OnDataHint functionality that TListView does (I can use the OnScroll event to fake it, which allows my memory buffer logic to continue working), and I can use the OnInitializeNode event to associate nodes with records that are allocated. However, once a tree node is initialized, it sees that it remains initialized for the lifetime of the tree. That is not good for me. As the user scrolls around and I remove records from memory, I need to reset those non-visual nodes without removing them from the tree completely, or losing their expand/collapse states. When the user scrolls them back into view, I can re-allocate the records and re-initialize the nodes. Basically, I want to make TVirtualStringTree act as much like TListView as possible, as far as its virtualization is concerned. I have seen that TVirtualStringTree has a ResetNode() method, but I encounter various errors whenever I try to use it. I must be using it wrong. I also thought of just storing a data pointer inside each node to my record buffers, and I allocate and free memory, update those pointers accordingly. The end effect does not work so well, either. Worse, my largest test log file has ~5 million records in it. If I initialize the TVirtualStringTree with that many nodes at one time (when the log display is unfiltered), the tree's internal overhead for its nodes takes up a whopping 260MB of memory (without any records being allocated yet). Whereas with the TListView, loading the same log file and all the memory logic behind it, I can get away with using just a few MBs. Any ideas?

    Read the article

  • Modeling distribution of performance measurements

    - by peterchen
    How would you mathematically model the distribution of repeated real life performance measurements - "Real life" meaning you are not just looping over the code in question, but it is just a short snippet within a large application running in a typical user scenario? My experience shows that you usually have a peak around the average execution time that can be modeled adequately with a Gaussian distribution. In addition, there's a "long tail" containing outliers - often with a multiple of the average time. (The behavior is understandable considering the factors contributing to first execution penalty). My goal is to model aggregate values that reasonably reflect this, and can be calculated from aggregate values (like for the Gaussian, calculate mu and sigma from N, sum of values and sum of squares). In other terms, number of repetitions is unlimited, but memory and calculation requirements should be minimized. A normal Gaussian distribution can't model the long tail appropriately and will have the average biased strongly even by a very small percentage of outliers. I am looking for ideas, especially if this has been attempted/analysed before. I've checked various distributions models, and I think I could work out something, but my statistics is rusty and I might end up with an overblown solution. Oh, a complete shrink-wrapped solution would be fine, too ;) Other aspects / ideas: Sometimes you get "two humps" distributions, which would be acceptable in my scenario with a single mu/sigma covering both, but ideally would be identified separately. Extrapolating this, another approach would be a "floating probability density calculation" that uses only a limited buffer and adjusts automatically to the range (due to the long tail, bins may not be spaced evenly) - haven't found anything, but with some assumptions about the distribution it should be possible in principle. Why (since it was asked) - For a complex process we need to make guarantees such as "only 0.1% of runs exceed a limit of 3 seconds, and the average processing time is 2.8 seconds". The performance of an isolated piece of code can be very different from a normal run-time environment involving varying levels of disk and network access, background services, scheduled events that occur within a day, etc. This can be solved trivially by accumulating all data. However, to accumulate this data in production, the data produced needs to be limited. For analysis of isolated pieces of code, a gaussian deviation plus first run penalty is ok. That doesn't work anymore for the distributions found above. [edit] I've already got very good answers (and finally - maybe - some time to work on this). I'm starting a bounty to look for more input / ideas.

    Read the article

  • Piping input to a Java app with Perl

    - by user319479
    I need to write a Perl script that pipes input into a Java program. This is related to this, but that didn't help me. My issue is that the Java app doesn't get the print statements until I close the handle. What I found online was that $| needs to be set to something greater than 0, in which case newline characters will flush the buffer. This still doesn't work. This is the script: #! /usr/bin/perl -w use strict; use File::Basename; $|=1; open(TP, "| java -jar test.jar") or die "fail"; sleep(2); print TP "this is test 1\n"; print TP "this is test 2\n"; print "tests printed, waiting 5s\n"; sleep(5); print "wait over. closing handle...\n"; close TP; print "closed.\n"; print "sleeping for 5s...\n"; sleep(5); print "script finished!\n"; exit And here is a sample Java app: import java.util.Scanner; public class test{ public static void main( String[] args ){ Scanner sc = new Scanner( System.in ); int crashcount = 0; while( true ){ try{ String input = sc.nextLine(); System.out.println( ":: INPUT: " + input ); if( "bananas".equals(input) ){ break; } } catch( Exception e ){ System.out.println( ":: EXCEPTION: " + e.toString() ); crashcount++; if( crashcount == 5 ){ System.out.println( ":: Looks like stdin is broke" ); break; } } } System.out.println( ":: IT'S OVER!" ); return; } } The Java app should respond to receiving the test prints immediately, but it doesn't until the close statement in the Perl script. What am I doing wrong? Note: the fix can only be in the Perl script. The Java app can't be changed. Also, File::Basename is there because I'm using it in the real script.

    Read the article

  • How to read the 3D chart data with directX?

    - by MemoryLeak
    I am reading a open source project, and I found there is a function which read 3D data(let's say a character) from obj file, and draw it . the source code: List<Vertex3f> verts=new List<Vertex3f>(); List<Vertex3f> norms=new List<Vertex3f>(); Groups=new List<ToothGroup>(); //ArrayList ALf=new ArrayList();//faces always part of a group List<Face> faces=new List<Face>(); MemoryStream stream=new MemoryStream(buffer); using(StreamReader sr = new StreamReader(stream)){ String line; Vertex3f vertex; string[] items; string[] subitems; Face face; ToothGroup group=null; while((line = sr.ReadLine()) != null) { if(line.StartsWith("#")//comment || line.StartsWith("mtllib")//material library. We build our own. || line.StartsWith("usemtl")//use material || line.StartsWith("o")) {//object. There's only one object continue; } if(line.StartsWith("v ")) {//vertex items=line.Split(new char[] { ' ' }); vertex=new Vertex3f();//float[3]; if(flipHorizontally) { vertex.X=-Convert.ToSingle(items[1],CultureInfo.InvariantCulture); } else { vertex.X=Convert.ToSingle(items[1],CultureInfo.InvariantCulture); } vertex.Y=Convert.ToSingle(items[2],CultureInfo.InvariantCulture); vertex.Z=Convert.ToSingle(items[3],CultureInfo.InvariantCulture); verts.Add(vertex); continue; } And why it need to read the data manually in directX? As far as I know, in XDA programming, we just need to call a function a load the resource. Is this because it is in DirectX, there is no function to read resource? If yes, then how to prepare the 3D resource ? in XDA we just need to use other software draw the 3D picture and then export. but what should I do in DirectX?

    Read the article

  • Error: java.security.AccessControlException: Access denied

    - by RMD
    Hi, I have to connect to a https url with username and password to read a file. I am not able to connect to the server (see the error log below). I do not have much java experience so I need help with this code. I would really appreciate some help to solve this! Thank you. Raquel CODE: import lotus.domino.; import java.net.; import java.io.*; import javax.net.ssl.HttpsURLConnection; public class JavaAgent extends AgentBase { public void NotesMain() { try { String username = "123"; String password = "456"; String input = username + ":" + password; String encoding = new sun.misc.BASE64Encoder().encode (input.getBytes()); //Open the URL and read the text into a Buffer String urlName = "https://server.org/Export.mvc/GetMeetings?modifiedSince=4/9/2010"; URL url = new URL(urlName); HttpsURLConnection connection = (HttpsURLConnection)url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", String.valueOf (encoding.length())); connection.setUseCaches(false); connection.setDoInput(true); connection.setDoOutput(true); connection.setAllowUserInteraction(true); connection.setRequestProperty("Authorization", "Basic " + encoding); connection.setRequestProperty("Cookie", "LocationCode=Geneva"); connection.connect(); BufferedReader rd = null; try{ rd = new BufferedReader(new InputStreamReader(connection.getInputStream())); } catch (IOException e) { System.out.println("Read failed"); System.exit(-1); } String line; while((line = rd.readLine()) != null) { System.out.println(line.toString()); } rd.close(); connection.disconnect(); } catch(Exception e) { e.printStackTrace(); } } } LOG: java.security.AccessControlException: Access denied (java.lang.RuntimePermission exitVM.-1) at java.security.AccessController.checkPermission(AccessController.java:108) at java.lang.SecurityManager.checkPermission(SecurityManager.java:532) at COM.ibm.JEmpower.applet.AppletSecurity.superDotCheckPermission(AppletSecurity.java:1449) at COM.ibm.JEmpower.applet.AppletSecurity.checkRuntimePermission(AppletSecurity.java:1311) at COM.ibm.JEmpower.applet.AppletSecurity.checkPermission(AppletSecurity.java:1611) at COM.ibm.JEmpower.applet.AppletSecurity.checkPermission(AppletSecurity.java:1464) at java.lang.SecurityManager.checkExit(SecurityManager.java:744) at java.lang.Runtime.exit(Runtime.java:99) at java.lang.System.exit(System.java:275) at JavaAgent.NotesMain(Unknown Source) at lotus.domino.AgentBase.runNotes(Unknown Source) at lotus.domino.NotesThread.run(Unknown Source)

    Read the article

  • TVirtualStringTree - resetting non-visual nodes and memory comsumption

    - by Remy Lebeau - TeamB
    I have an app that loads records from a binary log file and displays them in a virtual TListView. There are potentially millions of records in a file, and the display can be filtered by the user, so I do not load all of the records in memory at one time, and the ListView item indexes are not a 1-to-1 relation with the file record offsets (List item 1 may be file record 100, for instance). I use the ListView's OnDataHint event to load records for just the items the ListView is actually interested in. As the user scrolls around, the range specified by OnDataHint changes, allowing me to free records that are not in the new range, and allocate new records as needed. This works fine, speed is tolerable, and the memory footprint is very low. I am currently evaluating TVirtualStringTree as a replacement for the TListView, mainly because I want to add the ability to expand/collapse records that span multiple lines (I can fudge it with the TListView by incrementing/decrementing the item count dynamically, but this is not as straight forward as using a real tree). For the most part, I have been able to port the TListView logic and have everything work as I need. I notice that TVirtualStringTree's virtual paridigm is vastly different, though. It does not have the same kind of OnDataHint functionality that TListView does (I can use the OnScroll event to fake it, which allows my memory buffer logic to continue working), and I can use the OnInitializeNode event to associate nodes with records that are allocated. However, once a tree node is initialized, it sees that it remains initialized for the lifetime of the tree. That is not good for me. As the user scrolls around and I remove records from memory, I need to reset those non-visual nodes without removing them from the tree completely, or losing their expand/collapse states. When the user scrolls them back into view, I can re-allocate the records and re-initialize the nodes. Basically, I want to make TVirtualStringTree act as much like TListView as possible, as far as its virtualization is concerned. I have seen that TVirtualStringTree has a ResetNode() method, but I encounter various errors whenever I try to use it. I must be using it wrong. I also thought of just storing a data pointer inside each node to my record buffers, and I allocate and free memory, update those pointers accordingly. The end effect does not work so well, either. Worse, my largest test log file has ~5 million records in it. If I initialize the TVirtualStringTree with that many nodes at one time (when the log display is unfiltered), the tree's internal overhead for its nodes takes up a whopping 260MB of memory (without any records being allocated yet). Whereas with the TListView, loading the same log file and all the memory logic behind it, I can get away with using just a few MBs. Any ideas?

    Read the article

  • LSP packet modify

    - by kellogs
    Hello, anybody care to share some insights on how to use LSP for packet modifying ? I am using the non IFS subtype and I can see how (pseudo?) packets first enter WSPRecv. But how do I modify them ? My inquiry is about one single HTTP response that causes WSPRecv to be called 3 times :((. I need to modify several parts of this response, but since it comes in 3 slices, it is pretty hard to modify it accordingly. And, maybe on other machines or under different conditions (such as high traffic) there would only be one sole WSPRecv call, or maybe 10 calls. What is the best way to work arround this (please no NDIS :D), and how to properly change the buffer (lpBuffers-buf) by increasing it ? int WSPAPI WSPRecv( SOCKET s, LPWSABUF lpBuffers, DWORD dwBufferCount, LPDWORD lpNumberOfBytesRecvd, LPDWORD lpFlags, LPWSAOVERLAPPED lpOverlapped, LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, LPWSATHREADID lpThreadId, LPINT lpErrno ) { LPWSAOVERLAPPEDPLUS ProviderOverlapped = NULL; SOCK_INFO *SocketContext = NULL; int ret = SOCKET_ERROR; *lpErrno = NO_ERROR; // // Find our provider socket corresponding to this one // SocketContext = FindAndRefSocketContext(s, lpErrno); if ( NULL == SocketContext ) { dbgprint( "WSPRecv: FindAndRefSocketContext failed!" ); goto cleanup; } // // Check for overlapped I/O // if ( NULL != lpOverlapped ) { /*bla bla .. not interesting in my case*/ } else { ASSERT( SocketContext->Provider->NextProcTable.lpWSPRecv ); SetBlockingProvider(SocketContext->Provider); ret = SocketContext->Provider->NextProcTable.lpWSPRecv( SocketContext->ProviderSocket, lpBuffers, dwBufferCount, lpNumberOfBytesRecvd, lpFlags, lpOverlapped, lpCompletionRoutine, lpThreadId, lpErrno); SetBlockingProvider(NULL); //is this the place to modify packet length and contents ? if (strstr(lpBuffers->buf, "var mapObj = null;")) { int nLen = strlen(lpBuffers->buf) + 200; /*CHAR *szNewBuf = new CHAR[]; CHAR *pIndex; pIndex = strstr(lpBuffers->buf, "var mapObj = null;"); nLen = strlen(strncpy(szNewBuf, lpBuffers->buf, (pIndex - lpBuffers->buf) * sizeof (CHAR))); nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), "var com = null;\r\n", 17 * sizeof(CHAR))); pIndex += 18 * sizeof(CHAR); nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), pIndex, 1330 * sizeof (CHAR))); nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), "if (com == null)\r\n" \ "com = new ActiveXObject(\"InterCommJS.Gateway\");\r\n" \ "com.lat = latitude;\r\n" \ "com.lon = longitude;\r\n}", 111 * sizeof (CHAR))); pIndex = strstr(szNewBuf, "Content-Length:"); pIndex += 16 * sizeof(CHAR); strncpy(pIndex, "1465", 4 * sizeof(CHAR)); lpBuffers->buf = szNewBuf; lpBuffers->len += 128;*/ } if ( SOCKET_ERROR != ret ) { SocketContext->BytesRecv += *lpNumberOfBytesRecvd; } } cleanup: if ( NULL != SocketContext ) DerefSocketContext( SocketContext, lpErrno ); return ret; } Thank you

    Read the article

  • Do you still limit line length in code?

    - by Noldorin
    This is a matter on which I would like to gauge the opinion of the community: Do you still limit the length of lines of code to a fixed maximum? This was certainly a convention of the past for many languages; one would typically cap the number of characters per line to a value such as 80 (and more recnetly 100 or 120 I believe). As far as I understand, the primary reasons for limiting line length are: Readability - You don't have to scroll over horizontally when you want to see the end of some lines. Printing - Admittedly (at least in my experience), most code that you are working on does not get printed out on paper, but by limiting the number of characters you can insure that formatting doesn't get messed up when printed. Past editors (?) - Not sure about this one, but I suspect that at some point in the distant past of programming, (at least some) text editors may have been based on a fixed-width buffer. I'm sure there are points that I am still missing out, so feel free to add to these... Now, when I tend to observe C or C# code nowadays, I often see a number of different styles, the main ones being: Line length capped to 80, 100, or even 120 characters. As far as I understand, 80 is the traditional length, but the longer ones of 100 and 120 have appeared because of the widespread use of high resolutions and widescreen monitors nowadays. No line length capping at all. This tends to be pretty horrible to read, and I don't see it too often, though it's certainly not too rare either. Inconsistent capping of line length. The length of some lines are limited to a fixed maximum (or even a maximum that changes depending on the file/location in code), while others (possibly comments) are not at all. My personal preference here (at least recently) has been to cap the line length to 100 in the Visual Studio editor. This means that in a decently sized window (on a non-widescreen monitor), the ends of lines are still fully visible. I can however see a few disadvantages in this, especially when you end up writing code that's indented 3 or 4 levels and then having to include a long string literal - though I often take this as a sign to refactor my code! In particular, I am curious what the C and C# coders (or anyone who uses Visual Studio for that matter) think about this point, though I would be interested in hearing anyone's thoughts on the subject. Edit Thanks for the all answers - I appreciate the variety of opinions here, all presenting sound reasons. Consensus does seem to be tipping in the direction of always (or almost always) limit the line length. Interestingly, it seems to be in various coding standards to limit the line length. Judging by some of the answers, both the Python and Google CPP guidelines set the limit at 80 chars. I haven't seen anything similar regarding C# or VB.NET, but I would be curious to see if there are ones anywhere.

    Read the article

  • Facing Memory Leaks in AES Encryption Method.

    - by Mubashar Ahmad
    Can anyone please identify is there any possible memory leaks in following code. I have tried with .Net Memory Profiler and it says "CreateEncryptor" and some other functions are leaving unmanaged memory leaks as I have confirmed this using Performance Monitors. but there are already dispose, clear, close calls are placed wherever possible please advise me accordingly. its a been urgent. public static string Encrypt(string plainText, string key) { //Set up the encryption objects byte[] encryptedBytes = null; using (AesCryptoServiceProvider acsp = GetProvider(Encoding.UTF8.GetBytes(key))) { byte[] sourceBytes = Encoding.UTF8.GetBytes(plainText); using (ICryptoTransform ictE = acsp.CreateEncryptor()) { //Set up stream to contain the encryption using (MemoryStream msS = new MemoryStream()) { //Perform the encrpytion, storing output into the stream using (CryptoStream csS = new CryptoStream(msS, ictE, CryptoStreamMode.Write)) { csS.Write(sourceBytes, 0, sourceBytes.Length); csS.FlushFinalBlock(); //sourceBytes are now encrypted as an array of secure bytes encryptedBytes = msS.ToArray(); //.ToArray() is important, don't mess with the buffer csS.Close(); } msS.Close(); } } acsp.Clear(); } //return the encrypted bytes as a BASE64 encoded string return Convert.ToBase64String(encryptedBytes); } private static AesCryptoServiceProvider GetProvider(byte[] key) { AesCryptoServiceProvider result = new AesCryptoServiceProvider(); result.BlockSize = 128; result.KeySize = 256; result.Mode = CipherMode.CBC; result.Padding = PaddingMode.PKCS7; result.GenerateIV(); result.IV = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; byte[] RealKey = GetKey(key, result); result.Key = RealKey; // result.IV = RealKey; return result; } private static byte[] GetKey(byte[] suggestedKey, SymmetricAlgorithm p) { byte[] kRaw = suggestedKey; List<byte> kList = new List<byte>(); for (int i = 0; i < p.LegalKeySizes[0].MaxSize; i += 8) { kList.Add(kRaw[(i / 8) % kRaw.Length]); } byte[] k = kList.ToArray(); return k; }

    Read the article

  • wrapping boost::ublas with swig

    - by leon
    I am trying to pass data around the numpy and boost::ublas layers. I have written an ultra thin wrapper because swig cannot parse ublas' header correctly. The code is shown below #include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/ublas/matrix.hpp> #include <boost/lexical_cast.hpp> #include <algorithm> #include <sstream> #include <string> using std::copy; using namespace boost; typedef boost::numeric::ublas::matrix<double> dm; typedef boost::numeric::ublas::vector<double> dv; class dvector : public dv{ public: dvector(const int rhs):dv(rhs){;}; dvector(); dvector(const int size, double* ptr):dv(size){ copy(ptr, ptr+sizeof(double)*size, &(dv::data()[0])); } ~dvector(){} }; with the SWIG interface that looks something like %apply(int DIM1, double* INPLACE_ARRAY1) {(const int size, double* ptr)} class dvector{ public: dvector(const int rhs); dvector(); dvector(const int size, double* ptr); %newobject toString; char* toString(); ~dvector(); }; I have compiled them successfully via gcc 4.3 and vc++9.0. However when I simply run a = dvector(array([1.,2.,3.])) it gives me a segfault. This is the first time I use swigh with numpy and not have fully understanding between the data conversion and memory buffer passing. Does anyone see something obvious I have missed? I have tried to trace through with a debugger but it crashed within the assmeblys of python.exe. I have no clue if this is a swig problem or of my simple wrapper. Anything is appreciated.

    Read the article

  • Creating a System::String object from a BSTR in Managed C++ - is this way a good idea???

    - by Eli
    My co-worker is filling a System::String object with double-byte characters from an unmanaged library by the following method: RFC_PARAMETER aux; Object* target; RFC_UNICODE_TYPE_ELEMENT* elm; elm = &(m_coreObject->m_pStructMeta->m_typeElements[index]); aux.name = NULL; aux.nlen = 0; aux.type = elm->type; aux.leng = elm->c2_length; aux.addr = m_coreObject->m_rfcWa + elm->c2_offset; GlobalFunctions::CreateObjectForRFCField(target,aux,elm->decimals); GlobalFunctions::ReadRFCField(target,aux,elm->decimals); Where GlobalFunctions::CreateObjectForRFCField creates a System::String object filled with spaces (for padding) to what the unmanaged library states the max length should be: static void CreateObjectForRFCField(Object*& object, RFC_PARAMETER& par, unsigned dec) { switch (par.type) { case TYPC: object = new String(' ',par.leng / sizeof(_TCHAR)); break; // unimportant afterwards. } } And GlobalFunctions::ReadRFCField() copies the data from the library into the created String object and preserves the space padding: static void ReadRFCField(String* target, RFC_PARAMETER& par) { int lngt; _TCHAR* srce; switch (par.type) { case TYPC: case TYPDATE: case TYPTIME: case TYPNUM: lngt = par.leng / sizeof(_TCHAR); srce = (_TCHAR*)par.addr; break; case RFCTYPE_STRING: lngt = (*(_TCHAR**)par.addr != NULL) ? (int)_tcslen(*(_TCHAR**)par.addr) : 0; srce = *(_TCHAR**)par.addr; break; default: throw new DotNet_Incomp_RFCType2; } if (lngt > target->Length) lngt = target->Length; GCHandle gh = GCHandle::Alloc(target,GCHandleType::Pinned); wchar_t* buff = reinterpret_cast<wchar_t*>(gh.AddrOfPinnedObject().ToPointer()); _wcsnset(buff,' ',target->Length); _snwprintf(buff,lngt,_T2WFSP,srce); gh.Free(); } Now, on occasion, we see access violations getting thrown in the _snwprintf call. My question really is: Is it appropriate to create a string padded to a length (ideally to pre-allocate the internal buffer), and then to modify the String using GCHandle::Alloc and the mess above. And yes, I know that System::String objects are supposed to be immutable - I'm looking for a definitive "This is WRONG and here is why". Thanks, Eli.

    Read the article

  • help with making a password checker in java

    - by Cheesegraterr
    Hello, I am trying to make a program in Java that checks for three specific inputs. It has to be 1. At least 7 characters. 2. Contain both upper and lower case alphabetic characters. 3. Contain at least 1 digit. So far I have been able to make it check if there is 7 characters, but I am having trouble with the last two. What should I put in my loop as an if statement to check for digits and make it upper and lower case. Any help would be greatly appreciated. Here is what I have so far. import java.awt.*; import java.io.*; import java.util.StringTokenizer; public class passCheck { private static String getStrSys () { String myInput = null; //Store the String that is read in from the command line BufferedReader mySystem; //Buffer to store the input mySystem = new BufferedReader (new InputStreamReader (System.in)); //creates a connection to system input try { myInput = mySystem.readLine (); //reads in data from the console myInput = myInput.trim (); } catch (IOException e) //check { System.out.println ("IOException: " + e); return ""; } return myInput; //return the integer to the main program } //**************************************** //main instructions go here //**************************************** static public void main (String[] args) { String pass; //the words the user inputs String temp = ""; //holds temp info int stringLength; //length of string boolean goodPass = false; System.out.print ("Please enter a password: "); //ask for words pass = getStrSys (); //get words from system temp = pass.toLowerCase (); stringLength = pass.length (); //find length of eveyrthing while (goodPass == false) { if (stringLength < 7) { System.out.println ("Your password must consist of at least 7 characters"); System.out.print ("Please enter a password: "); //ask for words pass = getStrSys (); stringLength = pass.length (); goodPass = false; } else if (something to check for digits) { } }

    Read the article

  • Delay keyboard input help

    - by Stradigos
    I'm so close! I'm using the XNA Game State Management example found here and trying to modify how it handles input so I can delay the key/create an input buffer. In GameplayScreen.cs I've declared a double called elapsedTime and set it equal to 0. In the HandleInput method I've changed the Key.Right button press to: if (keyboardState.IsKeyDown(Keys.Left)) movement.X -= 50; if (keyboardState.IsKeyDown(Keys.Right)) { elapsedTime -= gameTime.ElapsedGameTime.TotalMilliseconds; if (elapsedTime <= 0) { movement.X += 50; elapsedTime = 10; } } else { elapsedTime = 0; } The pseudo code: If the right arrow key is not pressed set elapsedTime to 0. If it is pressed, the elapsedTime equals itself minus the milliseconds since the last frame. If the difference then equals 0 or less, move the object 50, and then set the elapsedTime to 10 (the delay). If the key is being held down elapsedTime should never be set to 0 via the else. Instead, after elapsedTime is set to 10 after a successful check, the elapsedTime should get lower and lower because it's being subtracted by the TotalMilliseconds. When that reaches 0, it successfully passes the check again and moves the object once more. The problem is, it moves the object once per press but doesn't work if you hold it down. Can anyone offer any sort of tip/example/bit of knowledge towards this? Thanks in advance, it's been driving me nuts. In theory I thought this would for sure work. CLARIFICATION Think of a grid when your thinking about how I want the block to move. Instead of just fluidly moving across the screen, it's moving by it's width (sorta jumping) to the next position. If I hold down the key, it races across the screen. I want to slow this whole process down so that holding the key creates an X millisecond delay between it 'jumping'/moving by it's width. EDIT: Turns out gameTime.ElapsedGameTime.TotalMilliseconds is returning 0... all of the time. I have no idea why.

    Read the article

  • handling NSStream events when using EASession in MonoTouch

    - by scotru
    Does anyone have an example of how to handle read and write NSStream events in Monotouch when working with accessories via EASession? It looks like there isn't a strongly typed delegate for this and I'm having trouble figuring out what selectors I need to handle on the delegates of my InputStream and OutputStream and what I actually need to do with each selector in order to properly fill and empty the buffers belonging to the EASession object. Basically, I'm trying to port Apple's EADemo app to Monotouch right now. Here's the Objective-C source that I think is relevant to this problem: / / asynchronous NSStream handleEvent method - (void)stream:(NSStream *)aStream handleEvent:(NSStreamEvent)eventCode { switch (eventCode) { case NSStreamEventNone: break; case NSStreamEventOpenCompleted: break; case NSStreamEventHasBytesAvailable: [self _readData]; break; case NSStreamEventHasSpaceAvailable: [self _writeData]; break; case NSStreamEventErrorOccurred: break; case NSStreamEventEndEncountered: break; default: break; } } / low level write method - write data to the accessory while there is space available and data to write - (void)_writeData { while (([[_session outputStream] hasSpaceAvailable]) && ([_writeData length] > 0)) { NSInteger bytesWritten = [[_session outputStream] write:[_writeData bytes] maxLength:[_writeData length]]; if (bytesWritten == -1) { NSLog(@"write error"); break; } else if (bytesWritten > 0) { [_writeData replaceBytesInRange:NSMakeRange(0, bytesWritten) withBytes:NULL length:0]; } } } // low level read method - read data while there is data and space available in the input buffer - (void)_readData { #define EAD_INPUT_BUFFER_SIZE 128 uint8_t buf[EAD_INPUT_BUFFER_SIZE]; while ([[_session inputStream] hasBytesAvailable]) { NSInteger bytesRead = [[_session inputStream] read:buf maxLength:EAD_INPUT_BUFFER_SIZE]; if (_readData == nil) { _readData = [[NSMutableData alloc] init]; } [_readData appendBytes:(void *)buf length:bytesRead]; //NSLog(@"read %d bytes from input stream", bytesRead); } [[NSNotificationCenter defaultCenter] postNotificationName:EADSessionDataReceivedNotification object:self userInfo:nil]; } I'd also appreciate any architectural recommendations on how to best implement this in monotouch. For example, in the Objective C implementation these functions are not contained in any class--but in Monotouch would it make sense to make them members of my

    Read the article

  • Executing Stored Procedure for each InputRow + SSIS Script Component.

    - by Nev_Rahd
    Hello, In my Script Component, am trying to execute Stored Procedure = which return multiple rows = of which need to generate output rows. Code as below: /* Microsoft SQL Server Integration Services Script Component * Write scripts using Microsoft Visual C# 2008. * ScriptMain is the entry point class of the script.*/ using System; using System.Data; using System.Data.SqlClient; using Microsoft.SqlServer.Dts.Pipeline.Wrapper; using Microsoft.SqlServer.Dts.Runtime.Wrapper; [Microsoft.SqlServer.Dts.Pipeline.SSISScriptComponentEntryPointAttribute] public class ScriptMain : UserComponent { SqlConnection cnn = new SqlConnection(); IDTSConnectionManager100 cnManager; //string cmd; SqlCommand cmd = new SqlCommand(); public override void AcquireConnections(object Transaction) { cnManager = base.Connections.myConnection; cnn = (SqlConnection)cnManager.AcquireConnection(null); } public override void PreExecute() { base.PreExecute(); } public override void PostExecute() { base.PostExecute(); } public override void InputRows_ProcessInputRow(InputRowsBuffer Row) { while(Row.NextRow()) { DataTable dt = new DataTable(); cmd.Connection = cnn; cmd.CommandText = "OSPATTRIBUTE_GetOPNforOP"; cmd.CommandType = CommandType.StoredProcedure; cmd.Parameters.Add("@NK", SqlDbType.VarChar).Value = Row.OPNK.ToString(); cmd.Parameters.Add("@EDWSTARTDATE", SqlDbType.DateTime).Value = Row.EDWEFFECTIVESTARTDATETIME; SqlDataAdapter adapter = new SqlDataAdapter(cmd); adapter.Fill(dt); foreach (DataRow dtrow in dt.Rows) { OutputValidBuffer.AddRow(); OutputValidBuffer.OPNK = Row.OPNK; OutputValidBuffer.OSPTYPECODE = Row.OSPTYPECODE; OutputValidBuffer.ORGPROVTYPEDESC = Row.ORGPROVTYPEDESC; OutputValidBuffer.HEALTHSECTORCODE = Row.HEALTHSECTORCODE; OutputValidBuffer.HEALTHSECTORDESCRIPTION = Row.HEALTHSECTORDESCRIPTION; OutputValidBuffer.EDWEFFECTIVESTARTDATETIME = Row.EDWEFFECTIVESTARTDATETIME; OutputValidBuffer.EDWEFFECTIVEENDDATETIME = Row.EDWEFFECTIVEENDDATETIME; OutputValidBuffer.OPQI = Row.OPQI; OutputValidBuffer.OPNNK = dtrow[0].ToString(); OutputValidBuffer.OSPNAMETYPECODE = dtrow[1].ToString(); OutputValidBuffer.NAMETYPEDESC = dtrow[2].ToString(); OutputValidBuffer.OSPNAME = dtrow[3].ToString(); OutputValidBuffer.EDWEFFECTIVESTARTDATETIME1 = Row.EDWEFFECTIVESTARTDATETIME; OutputValidBuffer.EDWEFFECTIVEENDDATETIME1 = Row.EDWEFFECTIVEENDDATETIME; OutputValidBuffer.OPNQI = dtrow[6].ToString(); } } } public override void ReleaseConnections() { cnManager.ReleaseConnection(cnn); } } This is always skipping the first row. while(Row.NextRow()) is always bringing the second row of the input buffer. What am I doing wrong. Thanks

    Read the article

  • Problem with return 2 libc method

    - by jth
    Hi, I'am trying to understand the return2libc method. I'am using an ubuntu linux 9.10, 32 bit with ASLR disabled. In theory, it sounds quite easy, overwrite the saved eip with the address of system() (or whatever function you want), then put the address to which system() should return and after that, the parameter for system, the "/bin/bash"-string. But what happens is that my exploit keeps segfaulting the vulnerable program. I assume something with the system()-address went wrong. This is what I did so far: Determined the address of system(): (gdb) print system $1 = {<text variable, no debug info>} 0x167020 <system> (gdb) x/x system 0x167020 <system>: 0x890cec83 I used the subsequent x/x system because those 3 bytes returned by print system looks like an index in some sort of jumptable (PLT?), so I assume 0x890cec83 is the right address which is used to overwrite the saved eip. After that I determined the address of the /bin/bash string in memory, using a small C program which basically consists of this line: printf("Address of string /bin/bash: %p\n", getenv("SHELL")); Then I looked a little bit around in the memory and fount /bin/bash: (gdb) x/s 0xbffff6ca 0xbffff6ca: "/bin/bash" After I gathered this information, I filled the buffer: (gdb) b 9 Breakpoint 1 at 0x8048407: file victim.c, line 9. (gdb) r `perl -e 'print "A"x9 . "\x83\xec\x0c\x89FAKE\xca\f6\ff\bf";'` Breakpoint 1, main (argc=1111638594, argv=0xc360cca) at victim.c:10 10 return 0; (gdb) x/s 0xbffff6ca 0xbffff6ca: "/bin/bash" Stack frame looks like this: (gdb) i f Stack level 0, frame at 0xbffff440: eip = 0x8048407 in main (victim.c:10); saved eip 0x890cec83 source language c. Arglist at 0xbffff438, args: argc=1111638594, argv=0xc360cca Locals at 0xbffff438, Previous frame's sp is 0xbffff440 Saved registers: ebp at 0xbffff438, eip at 0xbffff43c This seems all right to me, saved eip was overwritten with the (hopefully) correct system()-address, return address for system was set to "FAKE" (shouldn't matter) and the address of /bin/bash also seems to be correct. When I'am continuing the execution, victim segfaults on some strange address and certainly not in 0x890cec83: (gdb) cont Continuing. Program received signal SIGSEGV, Segmentation fault. 0x0804840d in main (argc=Cannot access memory at address 0x41414149 ) at victim.c:11 11 } Has anyone an explanation or a hint what happens here and why the execution isn't redirected to 0x890cec83? Thanks in advance, any hint, and be it only vague, would be appreciated. I have no idea why this doesn't work.

    Read the article

  • How to I serialize a large graph of .NET object into a SQL Server BLOB without creating a large bu

    - by Ian Ringrose
    We have code like: ms = New IO.MemoryStream bin = New System.Runtime.Serialization.Formatters.Binary.BinaryFormatter bin.Serialize(ms, largeGraphOfObjects) dataToSaveToDatabase = ms.ToArray() // put dataToSaveToDatabase in a Sql server BLOB But the memory steam allocates a large buffer from the large memory heap that is giving us problems. So how can we stream the data without needing enough free memory to hold the serialized objects. I am looking for a way to get a Stream from SQL server that can then be passed to bin.Serialize() so avoiding keeping all the data in my processes memory. Likewise for reading the data back... Some more background. This is part of a complex numerical processing system that processes data in near real time looking for equipment problems etc, the serialization is done to allow a restart when there is a problem with data quality from a data feed etc. (We store the data feeds and can rerun them after the operator has edited out bad values.) Therefore we serialize the object a lot more often then we de-serialize them. The objects we are serializing include very large arrays mostly of doubles as well as a lot of small “more normal” objects. We are pushing the memory limit on a 32 bit system and make the garage collector work very hard. (Effects are being made elsewhere in the system to improve this, e.g. reusing large arrays rather then create new arrays.) Often the serialization of the state is the last straw that courses an out of memory exception; our peak memory usage is while this serialization is being done. I think we get large memory pool fragmentation when we de-serialize the object, I expect there are also other problem with large memory pool fragmentation given the size of the arrays. (This has not yet been investigated, as the person that first looked at this is a numerical processing expert, not a memory management expert.) Are customers use a mix of Sql Server 2000, 2005 and 2008 and we would rather not have different code paths for each version of Sql Server if possible. We can have many active models at a time (in different process, across many machines), each model can have many saved states. Hence the saved state is stored in a database blob rather then a file. As the spread of saving the state is important, I would rather not serialize the object to a file, and then put the file in a BLOB one block at a time. Other related questions I have asked How to Stream data from/to SQL Server BLOB fields? Is there a SqlFileStream like class that works with Sql Server 2005?

    Read the article

  • Java map / nio / NFS issue causing a VM fault: "a fault occurred in a recent unsafe memory access op

    - by Matthew Bloch
    I have written a parser class for a particular binary format (nfdump if anyone is interested) which uses java.nio's MappedByteBuffer to read through files of a few GB each. The binary format is just a series of headers and mostly fixed-size binary records, which are fed out to the called by calling nextRecord(), which pushes on the state machine, returning null when it's done. It performs well. It works on a development machine. On my production host, it can run for a few minutes or hours, but always seems to throw "java.lang.InternalError: a fault occurred in a recent unsafe memory access operation in compiled Java code", fingering one of the Map.getInt, getShort methods, i.e. a read operation in the map. The uncontroversial (?) code that sets up the map is this: /** Set up the map from the given filename and position */ protected void open() throws IOException { // Set up buffer, is this all the flexibility we'll need? channel = new FileInputStream(file).getChannel(); MappedByteBuffer map1 = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size()); map1.load(); // we want the whole thing, plus seems to reduce frequency of crashes? map = map1; // assumes the host writing the files is little-endian (x86), ought to be configurable map.order(java.nio.ByteOrder.LITTLE_ENDIAN); map.position(position); } and then I use the various map.get* methods to read shorts, ints, longs and other sequences of bytes, before hitting the end of the file and closing the map. I've never seen the exception thrown on my development host. But the significant point of difference between my production host and development is that on the former, I am reading sequences of these files over NFS (probably 6-8TB eventually, still growing). On my dev machine, I have a smaller selection of these files locally (60GB), but when it blows up on the production host it's usually well before it gets to 60GB of data. Both machines are running java 1.6.0_20-b02, though the production host is running Debian/lenny, the dev host is Ubuntu/karmic. I'm not convinced that will make any difference. Both machines have 16GB RAM, and are running with the same java heap settings. I take the view that if there is a bug in my code, there is enough of a bug in the JVM not to throw me a proper exception! But I think it is just a particular JVM implementation bug due to interactions between NFS and mmap, possibly a recurrence of 6244515 which is officially fixed. I already tried adding in a "load" call to force the MappedByteBuffer to load its contents into RAM - this seemed to delay the error in the one test run I've done, but not prevent it. Or it could be coincidence that was the longest it had gone before crashing! If you've read this far and have done this kind of thing with java.nio before, what would your instinct be? Right now mine is to rewrite it without nio :)

    Read the article

  • cannot retrieve effect.fx file

    - by numerical25
    I am having issues loading my effect.fx from directx. When I step into my application, my ID3D10Effect *m_pDefaultEffect; pointer remains empty. the address remains at 0x000000 below is my code #pragma once #include "stdafx.h" #include "resource.h" #include "d3d10.h" #include "d3dx10.h" #include "dinput.h" #define MAX_LOADSTRING 100 class RenderEngine { protected: RECT m_screenRect; //direct3d Members ID3D10Device *m_pDevice; // The IDirect3DDevice10 // interface ID3D10Texture2D *m_pBackBuffer; // Pointer to the back buffer ID3D10RenderTargetView *m_pRenderTargetView; // Pointer to render target view IDXGISwapChain *m_pSwapChain; // Pointer to the swap chain RECT m_rcScreenRect; // The dimensions of the screen ID3D10Texture2D *m_pDepthStencilBuffer; ID3D10DepthStencilState *m_pDepthStencilState; ID3D10DepthStencilView *m_pDepthStencilView; //transformation matrixs D3DXMATRIX g_mtxWorld; D3DXMATRIX g_mtxView; D3DXMATRIX g_mtxProj; //Effect members ID3D10Effect *m_pDefaultEffect; ID3D10EffectTechnique *m_pDefaultTechnique; ID3DX10Font *m_pFont; // The font used for rendering text // Sprites used to hold font characters ID3DX10Sprite *m_pFontSprite; ATOM RegisterEngineClass(); void DoFrame(float); bool LoadEffects(); public: static HINSTANCE m_hInst; HWND m_hWnd; int m_nCmdShow; TCHAR m_szTitle[MAX_LOADSTRING]; // The title bar text TCHAR m_szWindowClass[MAX_LOADSTRING]; // the main window class name void DrawTextString(int x, int y, D3DXCOLOR color, const TCHAR *strOutput); //static functions static LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam); static INT_PTR CALLBACK About(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam); bool InitWindow(); bool InitDirectX(); bool InitInstance(); int Run(); void ShutDown(); RenderEngine() { m_screenRect.right = 800; m_screenRect.bottom = 600; } }; below is the implementation bool RenderEngine::LoadEffects() { HRESULT hr; ID3D10Blob *pErrors = 0; // Create the default rendering effect hr = D3DX10CreateEffectFromFile(L"effect.fx", NULL, NULL, "fx_4_0", D3D10_SHADER_DEBUG, 0, m_pDevice, NULL, NULL, &m_pDefaultEffect, &pErrors, NULL); if(pErrors)// at this point, m_pDefaultEffect is still empty but pErrors returns data which means there is {//errors return false; //ends here } //m_pDefaultTechnique = m_pDefaultEffect->GetTechniqueByName("DefaultTechnique"); return true; } My directx Device does work. My effect.fx file is in the same folder as my solution files (.cpp and header files)

    Read the article

< Previous Page | 113 114 115 116 117 118 119 120 121 122 123 124  | Next Page >