Search Results

Search found 6492 results on 260 pages for 'trigger io'.

Page 5/260 | < Previous Page | 1 2 3 4 5 6 7 8 9 10 11 12  | Next Page >

  • Flash IO error while uploading photo with low uploading internet speed

    - by Beck
    Actionscript: System.security.allowDomain("http://" + _root.tdomain + "/"); import flash.net.FileReferenceList; import flash.net.FileReference; import flash.external.ExternalInterface; import flash.external.*; /* Main variables */ var session_photos = _root.ph; var how_much_you_can_upload = 0; var selected_photos; // container for selected photos var inside_photo_num = 0; // for photo in_array selection var created_elements = _root.ph; var for_js_num = _root.ph; /* Functions & settings for javascript<->flash conversation */ var methodName:String = "addtoflash"; var instance:Object = null; var method:Function = addnewphotonumber; var wasSuccessful:Boolean = ExternalInterface.addCallback(methodName, instance, method); function addnewphotonumber() { session_photos--; created_elements--; for_js_num--; } /* Javascript hide and show flash button functions */ function block(){getURL("Javascript: blocking();");} function unblock(){getURL("Javascript:unblocking();");} /* Creating HTML platform function */ var result = false; /* Uploading */ function uploadthis(photos:Array) { if(!photos[inside_photo_num].upload("http://" + _root.tdomain + "/upload.php?PHPSESSID=" + _root.phpsessionid)) { getURL("Javascript:error_uploading();"); } } /* Flash button(applet) options and bindings */ var fileTypes:Array = new Array(); var imageTypes:Object = new Object(); imageTypes.description = "Images (*.jpg)"; imageTypes.extension = "*.jpg;"; fileTypes.push(imageTypes); var fileListener:Object = new Object(); var btnListener:Object = new Object(); btnListener.click = function(eventObj:Object) { var fileRef:FileReferenceList = new FileReferenceList(); fileRef.addListener(fileListener); fileRef.browse(fileTypes); } uploadButton.addEventListener("click", btnListener); /* Listeners */ fileListener.onSelect = function(fileRefList:FileReferenceList):Void { // reseting values inside_photo_num = 0; var list:Array = fileRefList.fileList; var item:FileReference; // PHP photo counter how_much_you_can_upload = 3 - session_photos; if(list.length > how_much_you_can_upload) { getURL("Javascript:howmuch=" + how_much_you_can_upload + ";list_length=" + list.length + ";limit_reached();"); return; } // if session variable isn't yet refreshed, we check inner counter if(created_elements >= 3) { getURL("Javascript:limit_reached();"); return; } selected_photos = list; for(var i:Number = 0; i < list.length; i++) { how_much_you_can_upload--; item = list[i]; trace("name: " + item.name); trace(item.addListener(this)); if((item.size / 1024) > 5000) {getURL("Javascript:size_limit_reached();");return;} } result = false; setTimeout(block,500); /* Increment number for new HTML container and pass it to javascript, after javascript returns true and we start uploading */ for_js_num++; if(ExternalInterface.call("create_platform",for_js_num)) { uploadthis(selected_photos); } } fileListener.onProgress = function(file:FileReference, bytesLoaded:Number, bytesTotal:Number):Void { getURL("Javascript:files_process(" + bytesLoaded + "," + bytesTotal + "," + for_js_num + ");"); } fileListener.onComplete = function(file:FileReference, bytesLoaded:Number, bytesTotal:Number):Void { inside_photo_num++; var sendvar_lv:LoadVars = new LoadVars(); var loadvar_lv:LoadVars = new LoadVars(); loadvar_lv.onLoad = function(success:Boolean){ if(loadvar_lv.failed == 1) { getURL("Javascript:type_failed();"); return; } getURL("Javascript:filelinks='" + loadvar_lv.json + "';fullname='" + loadvar_lv.fullname + "';completed(" + for_js_num + ");"); created_elements++; if((inside_photo_num + 1) > selected_photos.length) {setTimeout(unblock,1000);return;} // don't create empty containers anymore if(created_elements >= 3) {return;} result = false; /* Increment number for new HTML container and pass it to javascript, after javascript returns true and we start uploading */ for_js_num++; if(ExternalInterface.call("create_platform",for_js_num)) { uploadthis(selected_photos); } } sendvar_lv.getnum = true; sendvar_lv.PHPSESSID = _root.phpsessionid; sendvar_lv.sendAndLoad("http://" + _root.tdomain + "/upload.php",loadvar_lv,"POST"); } fileListener.onCancel = function(file:FileReference):Void { } fileListener.onOpen = function(file:FileReference):Void { } fileListener.onHTTPError = function(file:FileReference, httpError:Number):Void { getURL("Javascript:http_error(" + httpError + ");"); } fileListener.onSecurityError = function(file:FileReference, errorString:String):Void { getURL("Javascript:security_error(" + errorString + ");"); } fileListener.onIOError = function(file:FileReference):Void { getURL("Javascript:io_error();"); selected_photos[inside_photo_num].cancel(); uploadthis(selected_photos); } <PARAM name="allowScriptAccess" value="always"> <PARAM name="swliveconnect" value="true"> <PARAM name="movie" value="http://www.localh.com/fileref.swf?ph=0&phpsessionid=8mirsjsd75v6vk583vkus50qbb2djsp6&tdomain=www.localh.com"> <PARAM name="wmode" value="opaque"> <PARAM name="quality" value="high"> <PARAM name="bgcolor" value="#ffffff"> <EMBED swliveconnect="true" wmode="opaque" src="http://www.localh.com/fileref.swf?ph=0&phpsessionid=8mirsjsd75v6vk583vkus50qbb2djsp6&tdomain=www.localh.com" quality="high" bgcolor="#ffffff" width="100" height="22" name="fileref" align="middle" allowScriptAccess="always" type="application/x-shockwave-flash" pluginspage="http://www.macromedia.com/go/getflashplayer"></EMBED> My uploading speed is 40kb/sec Getting flash error while uploading photos bigger than 500kb and getting no error while uploading photos less than 100-500kb~. My friend has 8mbit uploading speed and has no errors even while uploading 3.2mb photos and more. How to fix this problem? I have tried to re-upload on IO error trigger, but it stops at the same place. Any solution regarding this error? By the way, i was watching process via debugging proxy and figured out, that responce headers doesn't come at all on this IO error. And sometimes shows socket error. If need, i will post serverside php script as well. But it stops at if(isset($_FILES['Filedata'])) { so it won't help :) as all processing comes after this check.

    Read the article

  • Problem with File IO and splitting strings with Environment.NewLine in VB.Net

    - by Senthil
    Hi, I was experimenting with basic VB.Net file read/write and encountered this problem. I don't know whether it has something to do with the File IO or the String splitting. I am writing text to a file like so Dim sWriter As New StreamWriter("Data.txt") sWriter.WriteLine("FirstItem") sWriter.WriteLine("SecondItem") sWriter.WriteLine("ThirdItem") sWriter.Close() Then, I am reading the text from the file Dim sReader As New StreamReader("Data.txt") Dim fileContents As String = sReader.ReadToEnd() sReader.Close() Now, I am splitting the fileContents variable using Environment.NewLine and saving the returned String array. Dim tempStr() As String = fileContents.Split(Environment.NewLine) When I print the array, I get some weird results For Each str As String In tempStr Console.WriteLine("*" + str + "*") Next I added the *'s to the beginning and end to find out what is going on. Since NewLine is used as the delimiter, I expect the strings in the array to NOT have any NewLine's. But the output was this - *FirstItem* * SecondItem* * ThirdItem* * * Shouldn't it be this - *FirstItem* *SecondItem* *ThirdItem* ?? Since I am using WriteLine, my guess is a new line is added after the last string and hence the last empty item in the array after splitting. But why is there a new line in the beginning of the second and third strings?

    Read the article

  • Mixing NIO with IO

    - by Steffen Heil
    Hi Usually you have a single bound tcp port and several connections on these. At least there are usually more connections as bound ports. My case is different: I want to bind a lot of ports and usually have no (or at least very few) connections. So I want to use NIO to accept the incoming connections. However, I need to pass the accepted connections to the existing jsch ssh library. That requires IO sockets instead of NIO sockets, it spawns one (or two) thread(s) per connection. But that's fine for me. Now, I thought that the following lines would deliver the very same result: Socket a = serverSocketChannel.accept().socket(); Socket b = serverSocketChannel.socket().accep(); SocketChannel channel = serverSocketChannel.accpet(); channel.configureBlocking( true ); Socket c = channel.socket(); Socket d = serverSocket.accept(); However the getInputStream() and getOutputStream() functions of the returned sockets seem to work different. Only if the socket was accepted using the last call, jsch can work with it. In the first three cases, it fails (and I am sorry: I don't know why). So is there a way to convert such a socket? Regards, Steffen

    Read the article

  • Traditional IO vs memory-mapped

    - by Senne
    I'm trying to illustrate the difference in performance between traditional IO and memory mapped files in java to students. I found an example somewhere on internet but not everything is clear to me, I don't even think all steps are nececery. I read a lot about it here and there but I'm not convinced about a correct implementation of neither of them. The code I try to understand is: public class FileCopy{ public static void main(String args[]){ if (args.length < 1){ System.out.println(" Wrong usage!"); System.out.println(" Correct usage is : java FileCopy <large file with full path>"); System.exit(0); } String inFileName = args[0]; File inFile = new File(inFileName); if (inFile.exists() != true){ System.out.println(inFileName + " does not exist!"); System.exit(0); } try{ new FileCopy().memoryMappedCopy(inFileName, inFileName+".new" ); new FileCopy().customBufferedCopy(inFileName, inFileName+".new1"); }catch(FileNotFoundException fne){ fne.printStackTrace(); }catch(IOException ioe){ ioe.printStackTrace(); }catch (Exception e){ e.printStackTrace(); } } public void memoryMappedCopy(String fromFile, String toFile ) throws Exception{ long timeIn = new Date().getTime(); // read input file RandomAccessFile rafIn = new RandomAccessFile(fromFile, "rw"); FileChannel fcIn = rafIn.getChannel(); ByteBuffer byteBuffIn = fcIn.map(FileChannel.MapMode.READ_WRITE, 0,(int) fcIn.size()); fcIn.read(byteBuffIn); byteBuffIn.flip(); RandomAccessFile rafOut = new RandomAccessFile(toFile, "rw"); FileChannel fcOut = rafOut.getChannel(); ByteBuffer writeMap = fcOut.map(FileChannel.MapMode.READ_WRITE,0,(int) fcIn.size()); writeMap.put(byteBuffIn); long timeOut = new Date().getTime(); System.out.println("Memory mapped copy Time for a file of size :" + (int) fcIn.size() +" is "+(timeOut-timeIn)); fcOut.close(); fcIn.close(); } static final int CHUNK_SIZE = 100000; static final char[] inChars = new char[CHUNK_SIZE]; public static void customBufferedCopy(String fromFile, String toFile) throws IOException{ long timeIn = new Date().getTime(); Reader in = new FileReader(fromFile); Writer out = new FileWriter(toFile); while (true) { synchronized (inChars) { int amountRead = in.read(inChars); if (amountRead == -1) { break; } out.write(inChars, 0, amountRead); } } long timeOut = new Date().getTime(); System.out.println("Custom buffered copy Time for a file of size :" + (int) new File(fromFile).length() +" is "+(timeOut-timeIn)); in.close(); out.close(); } } When exactly is it nececary to use RandomAccessFile? Here it is used to read and write in the memoryMappedCopy, is it actually nececary just to copy a file at all? Or is it a part of memorry mapping? In customBufferedCopy, why is synchronized used here? I also found a different example that -should- test the performance between the 2: public class MappedIO { private static int numOfInts = 4000000; private static int numOfUbuffInts = 200000; private abstract static class Tester { private String name; public Tester(String name) { this.name = name; } public long runTest() { System.out.print(name + ": "); try { long startTime = System.currentTimeMillis(); test(); long endTime = System.currentTimeMillis(); return (endTime - startTime); } catch (IOException e) { throw new RuntimeException(e); } } public abstract void test() throws IOException; } private static Tester[] tests = { new Tester("Stream Write") { public void test() throws IOException { DataOutputStream dos = new DataOutputStream( new BufferedOutputStream( new FileOutputStream(new File("temp.tmp")))); for(int i = 0; i < numOfInts; i++) dos.writeInt(i); dos.close(); } }, new Tester("Mapped Write") { public void test() throws IOException { FileChannel fc = new RandomAccessFile("temp.tmp", "rw") .getChannel(); IntBuffer ib = fc.map( FileChannel.MapMode.READ_WRITE, 0, fc.size()) .asIntBuffer(); for(int i = 0; i < numOfInts; i++) ib.put(i); fc.close(); } }, new Tester("Stream Read") { public void test() throws IOException { DataInputStream dis = new DataInputStream( new BufferedInputStream( new FileInputStream("temp.tmp"))); for(int i = 0; i < numOfInts; i++) dis.readInt(); dis.close(); } }, new Tester("Mapped Read") { public void test() throws IOException { FileChannel fc = new FileInputStream( new File("temp.tmp")).getChannel(); IntBuffer ib = fc.map( FileChannel.MapMode.READ_ONLY, 0, fc.size()) .asIntBuffer(); while(ib.hasRemaining()) ib.get(); fc.close(); } }, new Tester("Stream Read/Write") { public void test() throws IOException { RandomAccessFile raf = new RandomAccessFile( new File("temp.tmp"), "rw"); raf.writeInt(1); for(int i = 0; i < numOfUbuffInts; i++) { raf.seek(raf.length() - 4); raf.writeInt(raf.readInt()); } raf.close(); } }, new Tester("Mapped Read/Write") { public void test() throws IOException { FileChannel fc = new RandomAccessFile( new File("temp.tmp"), "rw").getChannel(); IntBuffer ib = fc.map( FileChannel.MapMode.READ_WRITE, 0, fc.size()) .asIntBuffer(); ib.put(0); for(int i = 1; i < numOfUbuffInts; i++) ib.put(ib.get(i - 1)); fc.close(); } } }; public static void main(String[] args) { for(int i = 0; i < tests.length; i++) System.out.println(tests[i].runTest()); } } I more or less see whats going on, my output looks like this: Stream Write: 653 Mapped Write: 51 Stream Read: 651 Mapped Read: 40 Stream Read/Write: 14481 Mapped Read/Write: 6 What is makeing the Stream Read/Write so unbelievably long? And as a read/write test, to me it looks a bit pointless to read the same integer over and over (if I understand well what's going on in the Stream Read/Write) Wouldn't it be better to read int's from the previously written file and just read and write ints on the same place? Is there a better way to illustrate it? I've been breaking my head about a lot of these things for a while and I just can't get the whole picture..

    Read the article

  • mySQL Trigger works after console insert, but not after script insert.

    - by Marcos
    Hello, I have a strange wird problem with a trigger: I set up a trigger for update other tables after an insert in a table. If i make an insert from mysql console, all works fine, but if i do inserts from external python script, trigger does nothing, as you can see bellow. When i insert from console, works fine, but insert THE SAME DATA from python script, doesn't works, i try changing the Definer to 'user'@'%' and 'root'@'%' but stills doing nothing. Any idea of what van be wrong? Regards mysql> select vid_visit,vid_money from videos where video_id=487; +-----------+-----------+ | vid_visit | vid_money | +-----------+-----------+ | 21 | 0.297 | +-----------+-----------+ 1 row in set (0,01 sec) mysql> INSERT INTO `prusland`.`validEvents` ( `id` , `campaigns_id` , `video_id` , `date` , `producer_id` , `distributor_id` , `money_producer` , `money_distributor` , `type` ) VALUES ( NULL , '30', '487', '2010-05-20 01:20:00', '1', '0', '0.009', '0.000', 'PRE' ); Query OK, 1 row affected (0,00 sec) mysql> select vid_visit,vid_money from videos where video_id=487; +-----------+-----------+ | vid_visit | vid_money | +-----------+-----------+ | 22 | 0.306 | +-----------+-----------+ DROP TRIGGER IF EXISTS `updateVisitAndMoney`// CREATE TRIGGER `updateVisitAndMoney` BEFORE INSERT ON `validEvents` FOR EACH ROW BEGIN if (NEW.type = 'PRE') THEN SET @eventcash=NEW.money_producer + NEW.money_distributor; UPDATE campaigns SET cmp_visit_distributed = cmp_visit_distributed + 1 , cmp_money_distributed = cmp_money_distributed + NEW.money_producer + NEW.money_distributor WHERE idcampaigns = NEW.campaigns_id; UPDATE offer_producer SET ofp_visit_procesed = ofp_visit_procesed + 1 , ofp_money_procesed = ofp_money_procesed + NEW. money_producer WHERE ofp_video_id = NEW.video_id AND ofp_money_procesed = NEW. campaigns_id; UPDATE videos SET vid_visit = vid_visit + 1 , vid_money = vid_money + @eventcash WHERE video_id = NEW.video_id; if (NEW.distributor_id != '') then UPDATE agreements SET visit_procesed = visit_procesed + 1, money_producer = money_producer + NEW.money_producer, money_distributor = money_distributor + NEW.money_distributor WHERE id_campaigns = NEW. campaigns_id AND id_video = NEW.video_id AND ag_distributor_id = NEW.distributor_id; UPDATE eventForDay SET visit = visit + 1, money = money + NEW. money_distributor WHERE date = SYSDATE() AND campaign_id = NEW. campaigns_id AND user_id = NEW.distributor_id; UPDATE eventForDay SET visit = visit + 1, money = money + NEW.money_producer WHERE date = SYSDATE() AND campaign_id = NEW. campaigns_id AND user_id= NEW.producer_id; ELSE UPDATE eventForDay SET visit = visit + 1, money = money + NEW. money_producer WHERE date = SYSDATE() AND campaign_id = NEW. campaigns_id AND user_id = NEW.producer_id; END IF; END IF; END //

    Read the article

  • What's up with LDoms: Part 9 - Direct IO

    - by Stefan Hinker
    In the last article of this series, we discussed the most general of all physical IO options available for LDoms, root domains.  Now, let's have a short look at the next level of granularity: Virtualizing individual PCIe slots.  In the LDoms terminology, this feature is called "Direct IO" or DIO.  It is very similar to root domains, but instead of reassigning ownership of a complete root complex, it only moves a single PCIe slot or endpoint device to a different domain.  Let's look again at hardware available to mars in the original configuration: root@sun:~# ldm ls-io NAME TYPE BUS DOMAIN STATUS ---- ---- --- ------ ------ pci_0 BUS pci_0 primary pci_1 BUS pci_1 primary pci_2 BUS pci_2 primary pci_3 BUS pci_3 primary /SYS/MB/PCIE1 PCIE pci_0 primary EMP /SYS/MB/SASHBA0 PCIE pci_0 primary OCC /SYS/MB/NET0 PCIE pci_0 primary OCC /SYS/MB/PCIE5 PCIE pci_1 primary EMP /SYS/MB/PCIE6 PCIE pci_1 primary EMP /SYS/MB/PCIE7 PCIE pci_1 primary EMP /SYS/MB/PCIE2 PCIE pci_2 primary EMP /SYS/MB/PCIE3 PCIE pci_2 primary OCC /SYS/MB/PCIE4 PCIE pci_2 primary EMP /SYS/MB/PCIE8 PCIE pci_3 primary EMP /SYS/MB/SASHBA1 PCIE pci_3 primary OCC /SYS/MB/NET2 PCIE pci_3 primary OCC /SYS/MB/NET0/IOVNET.PF0 PF pci_0 primary /SYS/MB/NET0/IOVNET.PF1 PF pci_0 primary /SYS/MB/NET2/IOVNET.PF0 PF pci_3 primary /SYS/MB/NET2/IOVNET.PF1 PF pci_3 primary All of the "PCIE" type devices are available for SDIO, with a few limitations.  If the device is a slot, the card in that slot must support the DIO feature.  The documentation lists all such cards.  Moving a slot to a different domain works just like moving a PCI root complex.  Again, this is not a dynamic process and includes reboots of the affected domains.  The resulting configuration is nicely shown in a diagram in the Admin Guide: There are several important things to note and consider here: The domain receiving the slot/endpoint device turns into an IO domain in LDoms terminology, because it now owns some physical IO hardware. Solaris will create nodes for this hardware under /devices.  This includes entries for the virtual PCI root complex (pci_0 in the diagram) and anything between it and the actual endpoint device.  It is very important to understand that all of this PCIe infrastructure is virtual only!  Only the actual endpoint devices are true physical hardware. There is an implicit dependency between the guest owning the endpoint device and the root domain owning the real PCIe infrastructure: Only if the root domain is up and running, will the guest domain have access to the endpoint device. The root domain is still responsible for resetting and configuring the PCIe infrastructure (root complex, PCIe level configurations, error handling etc.) because it owns this part of the physical infrastructure. This also means that if the root domain needs to reset the PCIe root complex for any reason (typically a reboot of the root domain) it will reset and thus disrupt the operation of the endpoint device owned by the guest domain.  The result in the guest is not predictable.  I recommend to configure the resulting behaviour of the guest using domain dependencies as described in the Admin Guide in Chapter "Configuring Domain Dependencies". Please consult the Admin Guide in Section "Creating an I/O Domain by Assigning PCIe Endpoint Devices" for all the details! As you can see, there are several restrictions for this feature.  It was introduced in LDoms 2.0, mainly to allow the configuration of guest domains that need access to tape devices.  Today, with the higher number of PCIe root complexes and the availability of SR-IOV, the need to use this feature is declining.  I personally do not recommend to use it, mainly because of the drawbacks of the depencies on the root domain and because it can be replaced with SR-IOV (although then with similar limitations). This was a rather short entry, more for completeness.  I believe that DIO can usually be replaced by SR-IOV, which is much more flexible.  I will cover SR-IOV in the next section of this blog series.

    Read the article

  • File io error Python

    - by serpiente
    I have a program that monitors a folder with word documents for any modifications made on the files. The error -Windows Error[2] The system cannot find the file specified- comes when I run the program, open a .doc within the folder make some changes and save it. Any suggestions on how to fix this? Here's the code: def archivar(): txt = open('archivo.txt', 'r+' ) for rootdir, dirs, files in os.walk(r"C:\Users\keinsfield\Desktop\colegio"): for file in files: time = os.stat(os.path.join(rootdir, file)).st_ctime txt.write(file +','+str(time) + '\n') def check(): txt = [col.split(',') for col in (open('archivo.txt', 'r+').read().split('\n'))] files = os.listdir(r"C:\Users\keinsfield\Desktop\colegio") for file in files: for info in txt: if info[0]==os.stat(os.path.join(r"C:\Users\keinsfield\Desktop\colegio",file)).st_ctime: print "modified"

    Read the article

  • C# Multithreading File IO (Reading)

    - by washtik
    We have a situation where our application needs to process a series of files and rather than perform this function synchronously, we would like to employ multi-threading to have the workload split amongst different threads. Each item of work is: 1. Open a file for read only 2. Process the data in the file 3. Write the processed data to a Dictionary We would like to perform each file's work on a new thread? Is this possible and should be we better to use the ThreadPool or spawn new threads keeping in mind that each item of "work" only takes 30ms however its possible that hundreds of files will need to be processed. Any ideas to make this more efficient is appreciated.

    Read the article

  • gdi+ removable device IO Safe problem

    - by sxingfeng
    I am using gdi+ for image format checking. It is really surprising that gdi+ Image img(path); does not throw exception when a device is removed. for example, I am checking a list of image files on a removable device. I plug the disk off, Then My Application will crashed. How can I avoid such problem? Many Thanks! I am using c++ gdi+ windows ,many thanks.

    Read the article

  • Wrong IO actions order using putStr and getLine

    - by QWRp
    I have a code : main = do putStr "Test input : " content <- getLine putStrLn content And when I run it (with runhaskell) or compile it (ghc 6.10.4) result is like this: asd Test input : asd I'm new to haskell and in my opinion printing should be first. Am I right? In code sample on http://learnyouahaskell.com/ which used putStr then getLine presented output is different than mine (IMHO correct). When I use putStrLn program works as expected (print then prompt and print). Is it a bug in ghc, or it is the way that it should work?

    Read the article

  • Python file-io code listing current folder path instead of the specified

    - by Tom Brito
    I have the code: import os import sys fileList = os.listdir(sys.argv[1]) for file in fileList: if os.path.isfile(file): print "File >> " + os.path.abspath(file) else: print "Dir >> " + os.path.abspath(file) Located in my music folder ("/home/tom/Music") When I call it with: python test.py "/tmp" I expected it to list my "/tmp" files and folders with the full path. But it printed lines like: Dir >> /home/tom/Music/seahorse-gw2jNn Dir >> /home/tom/Music/FlashXX9kV847 Dir >> /home/tom/Music/pulse-DcIEoxW5h2gz This is, the correct file names, but the wrong path (and this files are not either in my Music folder).. What's wrong with this code?

    Read the article

  • IO operation taking long time for files in remote server

    - by user841311
    I have files of size 150 MB each in a remote server in a different domain in the network. I am accessing them thorugh UNC path. I want to read the file content and perform a basic string search. When I try reading the files line by line, the operation just don't finish and takes long time, more than 30 minutes. However when I copy those files to my local machine, the same code reads and performs the string search in less than 5 seconds. I don't have .NET framework installed in the server so I have to do this from my machine. I want to perform all this through C# code in .NET framework 3.5 so I don't want to explictly ftp all the files to my machine before performing this operation. Sample Code DirectoryInfo dir = new DirectoryInfo(@strFilePath); FileInfo[] fiArray = dir.getFiles("*.txt"); foreach (FileInfo fi in fiArray) { //reading file content from server takes long time but fast in local machine //perform string search } Let me know if my requirement is not clear. Thanks in advance!

    Read the article

  • Ruby : UTF-8 IO

    - by subtenante
    I use ruby 1.8.7. I try to parse some text files containing greek sentences, encoded in UTF-8. (I can't much paste here sample files, because they are subject to copyright. Really just some greek text encoded in UTF-8.) I want, for each file, to parse the file, extract all the words, and make a list of each new word found in this file. All that saved to one big index file. Here is my code : #!/usr/bin/ruby -KU def prepare_line(l) l.gsub(/^\s*[ST]\d+\s*:\s*|\s+$|\(\d+\)\s*/u, "") end def tokenize(l) l.split /['·.;!:\s]+/u end $dict = {} $cpt = 0 $out = File.new 'out.txt', 'w' def lesson(file) $cpt = $cpt + 1 file.readlines.each do |l| $out.puts l l = prepare_line l tokenize(l).each do |t| unless $dict[t] $dict[t] = $cpt $out.puts " #{t}\n" end end end end Dir.new('etc/').each do |filename| f = File.new("etc/#{filename}") unless File.directory? f lesson f end end Here is part of my output : ?@???†?†?????????? ?...[snip very long hangul/hanzi mishmash]... ????????†? ???N2 : ?e?te?? (2) µ???µa (Note that the puts l part seems to work fine, at the end of the given output line.) Any idea what is wrong with my code ? (General comments about ruby idioms I could use are very welcome, I'm really a beginner.)

    Read the article

  • Java io ugly try-finally block

    - by Tom Brito
    Is there a not so ugly way of treat the close() exception to close both streams then: InputStream in = new FileInputStream(inputFileName); OutputStream out = new FileOutputStream(outputFileName); try { copy(in, out); } finally { try { in.close(); } catch (Exception e) { try { // event if in.close fails, need to close the out out.close(); } catch (Exception e2) {} throw e; // and throw the 'in' exception } out.close(); }

    Read the article

  • Disk IO Performance Limitations based on numbers of folders/files

    - by Josh
    I have an application where users are allowed to upload images to the server. Our Web Server is a windows 2008 server and we have a site (images.mysite.com) that points to a shared drive on a unix box. The code used to do the uploading is C# 3.5. The system currently supports a workflow where after a threshold is met a new subfolder can be generated. The question we have is how many files and/or subfolders can you have in a single folder before there is a degredation in performance - in serving the images up through IIS 7 and reading/writing through code?

    Read the article

  • Network IO using Credentials

    - by John
    Is it possible to move files from a network location that requires credentials to another network location that also requires credentials without mapping any drive. (ie: Without any use of P/Invoke) Example: FileInfo fi = new FileInfo(@"\\SomeComputer\SomeDrive\SomeFolder\someFile.txt"); fi.MoveTo(@"\\AnotherComputer\AnotherDrive\AnotherFolder\AnotherFile.txt"); This works fine if the source and destination network drives are already mapped but if they are not It doesn't.

    Read the article

  • SQL Server – SafePeak “Logon Trigger” Feature for Managing Data Access

    - by pinaldave
    Lately I received an interesting question about the abilities of SafePeak for SQL Server acceleration software: Q: “I would like to use SafePeak to make my CRM application faster. It is an application we bought from some vendor, after a while it became slow and we can’t reprogram it. SafePeak automated caching sounds like an easy and good solution for us. But, in my application there are many servers and different other applications services that address its main database, and some even change data, and I feel that there is a chance that some servers that during the connection process we may miss some. Is there a way to ensure that SafePeak will be aware of all connections to the SQL Server, so its cache will remain intact?” Interesting question, as I remember that SafePeak (http://www.safepeak.com/Product/SafePeak-Overview) likes that all traffic to the database will go thru it. I decided to check out the features of SafePeak latest version (2.1) and seek for an answer there. A: Indeed I found SafePeak has a feature they call “Logon Trigger” and is designed for that purpose. It is located in the user interface, under: Settings -> SQL instances management  ->  [your instance]  ->  [Logon Trigger] tab. From here you activate / deactivate it and control a white-list of enabled server IPs and Login names that SafePeak will ignore them. Click to Enlarge After activation of the “logon trigger” Safepeak server is notified by the SQL Server itself on each new opened connection. Safepeak monitors those connections and decides if there is something to do with them or not. On a typical installation SafePeak likes all application and users connections to go via SafePeak – this way it knows about data and schema updates immediately (real time). With activation of the safepeak “logon trigger”  a special CLR trigger is deployed on the SQL server and notifies Safepeak on any connection that has not arrived via SafePeak. In such cases Safepeak can act to clear and lock the cache or to ignore it. This feature enables to make sure SafePeak will be aware of all connections so SafePeak cache will maintain exactly correct all times. So even if a user, like a DBA will connect to the SQL Server not via SafePeak, SafePeak will know about it and take actions. The notification does not impact the work of that connection, the user or application still continue to do whatever they planned to do. Note: I found that activation of logon trigger in SafePeak requires that SafePeak SQL login will have the next permissions: 1) CONTROL SERVER; 2) VIEW SERVER STATE; 3) And the SQL Server instance is CLR enabled; Seeing SafePeak in action, I can say SafePeak brings fantastic resource for those who seek to get performance for SQL Server critical apps. SafePeak promises to accelerate SQL Server applications in just several hours of installation, automatic learning and some optimization configuration (no code changes!!!). If better application and database performance means better business to you – I suggest you to download and try SafePeak. The solution of SafePeak is indeed unique, and the questions I receive are very interesting. Have any more questions on SafePeak? Please leave your question as a comment and I will try to get an answer for you. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: PostADay, SQL, SQL Authority, SQL Performance, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • CMT Blog: Virtual IO gets better for LDoms

    - by uwes
    As we all know virtual IO is of great use in the IT environments of today but when it comes to performance we often have to pay the price. In his Blog entry Improved vDisk Performance for Ldoms, Stefan Hinker explained how the new implementation of the vdisk/vds software stack in Solaris 11.1 SRU 19 (and a Solaris 10 patch sortly afterwards) significantly improves latency and throughput of the vitual disk IO.

    Read the article

  • How to unit test with lots of IO

    - by Eric
    I write Linux embedded software which closely integrates with hardware. My modules are such as : -CMOS video input with kernel driver (v4l2) -Hardware h264/mpeg4 encoders (texas instuments) -Audio Capture/Playback (alsa) -Network IO I'd like to have automated testing for those functionalities, such as integration testing. I am not sure how I can automate this process since most of the top level functionalities I face are IO bound. Sure, it is easy to test functions individually, but whole process checking means depending on tons of external dependencies only available at runtime.

    Read the article

  • Returning Identity Value in SQL Server: @@IDENTITY Vs SCOPE_IDENTITY Vs IDENT_CURRENT

    - by Arefin Ali
    We have some common misconceptions on returning the last inserted identity value from tables. To return the last inserted identity value we have options to use @@IDENTITY or SCOPE_IDENTITY or IDENT_CURRENT function depending on the requirement but it will be a real mess if anybody uses anyone of these functions without knowing exact purpose. So here I want to share my thoughts on this. @@IDENTITY, SCOPE_IDENTITY and IDENT_CURRENT are almost similar functions in terms of returning identity value. They all return values that are inserted into an identity column. Earlier in SQL Server 7 we used to use @@IDENTITY to return the last inserted identity value because those days we don’t have functions like SCOPE_IDENTITY or IDENT_CURRENT but now we have these three functions. So let’s check out which one responsible for what. IDENT_CURRENT returns the last inserted identity value in a particular table. It never depends on a connection or the scope of the insert statement. IDENT_CURRENT function takes a table name as parameter. Here is the syntax to get the last inserted identity value in a particular table using IDENT_CURRENT function. SELECT IDENT_CURRENT('Employee') Both the @@IDENTITY and SCOPE_IDENTITY return the last inserted identity value created in any table in the current session. But there is little difference between these two i.e. SCOPE_IDENTITY returns value inserted only within the current scope whereas @@IDENTITY is not limited to any particular scope. Here are the syntaxes to get the last inserted identity value using these functions SELECT @@IDENTITYSELECT SCOPE_IDENTITY() Now let’s have a look at the following example. Suppose I have two tables called Employee and EmployeeLog. CREATE TABLE Employee( EmpId NUMERIC(18, 0) IDENTITY(1,1) NOT NULL, EmpName VARCHAR(100) NOT NULL, EmpSal FLOAT NOT NULL, DateOfJoining DATETIME NOT NULL DEFAULT(GETDATE()))CREATE TABLE EmployeeLog( EmpId NUMERIC(18, 0) IDENTITY(1,1) NOT NULL, EmpName VARCHAR(100) NOT NULL, EmpSal FLOAT NOT NULL, DateOfJoining DATETIME NOT NULL DEFAULT(GETDATE())) I have an insert trigger defined on the table Employee which inserts a new record in the EmployeeLog whenever a record insert in the Employee table. So Suppose I insert a new record in the Employee table using following statement: INSERT INTO Employee (EmpName,EmpSal) VALUES ('Arefin','1') The trigger will be fired automatically and insert a record in EmployeeLog. Here the scope of the insert statement and the trigger are different. In this situation if I retrieve last inserted identity value using @@IDENTITY, it will simply return the identity value from the EmployeeLog because it’s not limited to a particular scope. Now if I want to get the Employee table’s identity value then I need to use SCOPE_IDENTITY in this scenario. So the moral is always use SCOPE_IDENTITY to return the identity value of a recently created record in a sql statement or stored procedure. It’s safe and ensures bug free code.

    Read the article

  • Returning Identity Value in SQL Server: @@IDENTITY Vs SCOPE_IDENTITY Vs IDENT_CURRENT

    - by Arefin Ali
    We have some common misconceptions on returning the last inserted identity value from tables. To return the last inserted identity value we have options to use @@IDENTITY or SCOPE_IDENTITY or IDENT_CURRENT function depending on the requirement but it will be a real mess if anybody uses anyone of these functions without knowing exact purpose. So here I want to share my thoughts on this. @@IDENTITY, SCOPE_IDENTITY and IDENT_CURRENT are almost similar functions in terms of returning identity value. They all return values that are inserted into an identity column. Earlier in SQL Server 7 we used to use @@IDENTITY to return the last inserted identity value because those days we don’t have functions like SCOPE_IDENTITY or IDENT_CURRENT but now we have these three functions. So let’s check out which one responsible for what. IDENT_CURRENT returns the last inserted identity value in a particular table. It never depends on a connection or the scope of the insert statement. IDENT_CURRENT function takes a table name as parameter. Here is the syntax to get the last inserted identity value in a particular table using IDENT_CURRENT function. SELECT IDENT_CURRENT('Employee') Both the @@IDENTITY and SCOPE_IDENTITY return the last inserted identity value created in any table in the current session. But there is little difference between these two i.e. SCOPE_IDENTITY returns value inserted only within the current scope whereas @@IDENTITY is not limited to any particular scope. Here are the syntaxes to get the last inserted identity value using these functions SELECT @@IDENTITY SELECT SCOPE_IDENTITY() Now let’s have a look at the following example. Suppose I have two tables called Employee and EmployeeLog. CREATE TABLE Employee ( EmpId NUMERIC(18, 0) IDENTITY(1,1) NOT NULL, EmpName VARCHAR(100) NOT NULL, EmpSal FLOAT NOT NULL, DateOfJoining DATETIME NOT NULL DEFAULT(GETDATE()) ) CREATE TABLE EmployeeLog ( EmpId NUMERIC(18, 0) IDENTITY(1,1) NOT NULL, EmpName VARCHAR(100) NOT NULL, EmpSal FLOAT NOT NULL, DateOfJoining DATETIME NOT NULL DEFAULT(GETDATE()) ) I have an insert trigger defined on the table Employee which inserts a new record in the EmployeeLog whenever a record insert in the Employee table. So Suppose I insert a new record in the Employee table using following statement: INSERT INTO Employee (EmpName,EmpSal) VALUES ('Arefin','1') The trigger will be fired automatically and insert a record in EmployeeLog. Here the scope of the insert statement and the trigger are different. In this situation if I retrieve last inserted identity value using @@IDENTITY, it will simply return the identity value from the EmployeeLog because it’s not limited to a particular scope. Now if I want to get the Employee table’s identity value then I need to use SCOPE_IDENTITY in this scenario. So the moral is always use SCOPE_IDENTITY to return the identity value of a recently created record in a sql statement or stored procedure. It’s safe and ensures bug free code.

    Read the article

< Previous Page | 1 2 3 4 5 6 7 8 9 10 11 12  | Next Page >