Search Results

Search found 5919 results on 237 pages for 'io priority'.

Page 45/237 | < Previous Page | 41 42 43 44 45 46 47 48 49 50 51 52  | Next Page >

  • Reuse Client java Socket in a Java Server

    - by user1394983
    I'm devoloping an Java server two control an android online game. It's possible save the client socket of myserversocket.accept() in a variable in Client class? This are very util because this way, server can communicate with client when server wants and no when client contact server. My actual code are: import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.net.ServerSocket; import java.net.Socket; import java.util.ArrayList; import java.util.UUID; import sal.app.shared.Packet; public class Server { private ArrayList<GameSession> games = new ArrayList<GameSession>(); private ArrayList<Client> pendent_clients = new ArrayList<Client>(); private Packet read_packet= new Packet(); private Packet sent_packet = new Packet(); private Socket clientSocket = null; public static void main(String[] args) throws ClassNotFoundException{ ServerSocket serverSocket = null; //DataInputStream dataInputStream = null; //DataOutputStream dataOutputStream = null; ObjectOutputStream oos=null; ObjectInputStream ois=null; Server myServer = new Server(); try { serverSocket = new ServerSocket(7777); System.out.println("Listening :7777"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } while(true){ try { myServer.clientSocket = new Socket(); myServer.clientSocket = serverSocket.accept(); myServer.read_packet = new Packet(); myServer.sent_packet = new Packet(); oos = new ObjectOutputStream(myServer.clientSocket.getOutputStream()); ois = new ObjectInputStream(myServer.clientSocket.getInputStream()); //dataInputStream = new DataInputStream(clientSocket.getInputStream()); //dataOutputStream = new DataOutputStream(clientSocket.getOutputStream()); //System.out.println("ip: " + clientSocket.getInetAddress()); //System.out.println("message: " + ois.read()); //dataOutputStream.writeUTF("Hello!"); /*while ((myServer.read_packet = (Packet) ois.readObject()) != null) { myServer.handlePacket(myServer.read_packet); break; }*/ myServer.read_packet=(Packet) ois.readObject(); myServer.handlePacket(myServer.read_packet); //oos.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally{ if( myServer.clientSocket!= null){ /*try { //myServer.clientSocket.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); }*/ } /*if( ois!= null){ try { ois.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } if( oos!= null){ try { oos.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }*/ } } } public void handlePacket(Packet hp) throws IOException { if(hp.getOpCode() == 1) { registPlayer(hp); } } public void registPlayer(Packet p) throws IOException { Client registClient = new Client(this.clientSocket); this.pendent_clients.add(registClient); if(pendent_clients.size() == 2) { initAGame(); } else { ObjectOutputStream out=null; Packet to_send = new Packet(); to_send.setOpCode(4); out = new ObjectOutputStream(registClient.getClientSocket().getOutputStream()); out.writeObject(to_send); } } public void initAGame() throws IOException { Client c1 = pendent_clients.get(0); Client c2 = pendent_clients.get(1); Packet to_send = new Packet(); ObjectOutputStream out=null; GameSession incomingGame = new GameSession(c1,c2); games.add(incomingGame); to_send.setGameId(incomingGame.getGameId()); to_send.setOpCode(5); out = new ObjectOutputStream(c1.getClientSocket().getOutputStream()); out.writeObject(to_send); out = new ObjectOutputStream(c2.getClientSocket().getOutputStream()); out.writeObject(to_send); pendent_clients.clear(); } public Client getClientById(UUID given_id) { for(GameSession gs: games) { if(gs.getClient1().getClientId().equals(given_id)) { return gs.getClient1(); } else if(gs.getClient2().getClientId().equals(given_id)) { return gs.getClient2(); } } return null; } } With this code i got this erros: java.net.SocketException: Broken pipe at java.net.SocketOutputStream.socketWrite0(Native Method) at java.net.SocketOutputStream.socketWrite(SocketOutputStream.java:92) at java.net.SocketOutputStream.write(SocketOutputStream.java:136) at java.io.ObjectOutputStream$BlockDataOutputStream.drain(ObjectOutputStream.java:1847) at java.io.ObjectOutputStream$BlockDataOutputStream.setBlockDataMode(ObjectOutputStream.java:1756) at java.io.ObjectOutputStream.writeNonProxyDesc(ObjectOutputStream.java:1257) at java.io.ObjectOutputStream.writeClassDesc(ObjectOutputStream.java:1211) at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1395) at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1158) at java.io.ObjectOutputStream.writeFatalException(ObjectOutputStream.java:1547) at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:333) at Server.initAGame(Server.java:146) at Server.registPlayer(Server.java:120) at Server.handlePacket(Server.java:106) at Server.main(Server.java:63) This error ocurre when second client connect and server try to send an Packet to previous client 1 in function initGame() in this code: out = new ObjectOutputStream(c1.getClientSocket().getOutputStream()); out.writeObject(to_send); my android code is this: package sal.app; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.net.Socket; import java.net.UnknownHostException; import sal.app.logic.DataBaseManager; import sal.app.shared.Packet; import android.app.Activity; import android.os.Bundle; import android.view.Window; import android.view.WindowManager; public class MultiPlayerWaitActivity extends Activity{ private DataBaseManager db; public void onCreate(Bundle savedInstanceState) { super.requestWindowFeature(Window.FEATURE_NO_TITLE); super.getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,WindowManager.LayoutParams.FLAG_FULLSCREEN); super.onCreate(savedInstanceState); setContentView(R.layout.multiwaitlayout); db=DataBaseManager.getSalDatabase(this); db.teste(); try { db.createDataBase(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } Socket socket = null; ObjectOutputStream outputStream = null; ObjectInputStream inputStream = null; //System.out.println("dadadad"); try { socket = new Socket("192.168.1.4", 7777); //Game = new MultiPlayerGame(new ServerManager("192.168.1.66"),new Session(), new Player("")); outputStream = new ObjectOutputStream(socket.getOutputStream()); inputStream = new ObjectInputStream(socket.getInputStream()); //dataOutputStream.writeUTF(textOut.getText().toString()); //textIn.setText(dataInputStream.readUTF()); Packet p = new Packet(); Packet r = new Packet(); p.setOpCode(1); outputStream.writeObject(p); /*try { r=(Packet)inputStream.readObject(); } catch (ClassNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); }*/ //while(true){ //dataInputStream = new DataInputStream(clientSocket.getInputStream()); //dataOutputStream = new DataOutputStream(clientSocket.getOutputStream()); //System.out.println("ip: " + clientSocket.getInetAddress()); //System.out.println("message: " + ois.read()); //dataOutputStream.writeUTF("Hello!"); /*while ((r= (Packet) inputStream.readObject()) != null) { handPacket(r); break; }*/ r=(Packet) inputStream.readObject(); handPacket(r); //oos.close(); //} /*System.out.println(r.getOpCode()); if(r.getOpCode() == 5) { this.finish(); }*/ } catch (UnknownHostException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } /*finally{ if (socket != null){ try { socket.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } if (outputStream != null){ try { outputStream.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } if (inputStream != null){ try { inputStream.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }*/ //catch (ClassNotFoundException e) { // TODO Auto-generated catch block //e.printStackTrace(); //} catch (ClassNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void handPacket(Packet hp) { if(hp.getOpCode() == 5) { this.finish(); } this.finish(); } } Regards

    Read the article

  • Cost Comparison Hard Disk Drive to Solid State Drive on Price per Gigabyte - dispelling a myth!

    - by tonyrogerson
    It is often said that Hard Disk Drive storage is significantly cheaper per GiByte than Solid State Devices – this is wholly inaccurate within the database space. People need to look at the cost of the complete solution and not just a single component part in isolation to what is really required to meet the business requirement. Buying a single Hitachi Ultrastar 600GB 3.5” SAS 15Krpm hard disk drive will cost approximately £239.60 (http://scan.co.uk, 22nd March 2012) compared to an OCZ 600GB Z-Drive R4 CM84 PCIe costing £2,316.54 (http://scan.co.uk, 22nd March 2012); I’ve not included FusionIO ioDrive because there is no public pricing available for it – something I never understand and personally when companies do this I immediately think what are they hiding, luckily in FusionIO’s case the product is proven though is expensive compared to OCZ enterprise offerings. On the face of it the single 15Krpm hard disk has a price per GB of £0.39, the SSD £3.86; this is what you will see in the press and this is what sales people will use in comparing the two technologies – do not be fooled by this bullshit people! What is the requirement? The requirement is the database will have a static size of 400GB kept static through archiving so growth and trim will balance the database size, the client requires resilience, there will be several hundred call centre staff querying the database where queries will read a small amount of data but there will be no hot spot in the data so the randomness will come across the entire 400GB of the database, estimates predict that the IOps required will be approximately 4,000IOps at peak times, because it’s a call centre system the IO latency is important and must remain below 5ms per IO. The balance between read and write is 70% read, 30% write. The requirement is now defined and we have three of the most important pieces of the puzzle – space required, estimated IOps and maximum latency per IO. Something to consider with regard SQL Server; write activity requires synchronous IO to the storage media specifically the transaction log; that means the write thread will wait until the IO is completed and hardened off until the thread can continue execution, the requirement has stated that 30% of the system activity will be write so we can expect a high amount of synchronous activity. The hardware solution needs to be defined; two possible solutions: hard disk or solid state based; the real question now is how many hard disks are required to achieve the IO throughput, the latency and resilience, ditto for the solid state. Hard Drive solution On a test on an HP DL380, P410i controller using IOMeter against a single 15Krpm 146GB SAS drive, the throughput given on a transfer size of 8KiB against a 40GiB file on a freshly formatted disk where the partition is the only partition on the disk thus the 40GiB file is on the outer edge of the drive so more sectors can be read before head movement is required: For 100% sequential IO at a queue depth of 16 with 8 worker threads 43,537 IOps at an average latency of 2.93ms (340 MiB/s), for 100% random IO at the same queue depth and worker threads 3,733 IOps at an average latency of 34.06ms (34 MiB/s). The same test was done on the same disk but the test file was 130GiB: For 100% sequential IO at a queue depth of 16 with 8 worker threads 43,537 IOps at an average latency of 2.93ms (340 MiB/s), for 100% random IO at the same queue depth and worker threads 528 IOps at an average latency of 217.49ms (4 MiB/s). From the result it is clear random performance gets worse as the disk fills up – I’m currently writing an article on short stroking which will cover this in detail. Given the work load is random in nature looking at the random performance of the single drive when only 40 GiB of the 146 GB is used gives near the IOps required but the latency is way out. Luckily I have tested 6 x 15Krpm 146GB SAS 15Krpm drives in a RAID 0 using the same test methodology, for the same test above on a 130 GiB for each drive added the performance boost is near linear, for each drive added throughput goes up by 5 MiB/sec, IOps by 700 IOps and latency reducing nearly 50% per drive added (172 ms, 94 ms, 65 ms, 47 ms, 37 ms, 30 ms). This is because the same 130GiB is spread out more as you add drives 130 / 1, 130 / 2, 130 / 3 etc. so implicit short stroking is occurring because there is less file on each drive so less head movement required. The best latency is still 30 ms but we have the IOps required now, but that’s on a 130GiB file and not the 400GiB we need. Some reality check here: a) the drive randomness is more likely to be 50/50 and not a full 100% but the above has highlighted the effect randomness has on the drive and the more a drive fills with data the worse the effect. For argument sake let us assume that for the given workload we need 8 disks to do the job, for resilience reasons we will need 16 because we need to RAID 1+0 them in order to get the throughput and the resilience, RAID 5 would degrade performance. Cost for hard drives: 16 x £239.60 = £3,833.60 For the hard drives we will need disk controllers and a separate external disk array because the likelihood is that the server itself won’t take the drives, a quick spec off DELL for a PowerVault MD1220 which gives the dual pathing with 16 disks 146GB 15Krpm 2.5” disks is priced at £7,438.00, note its probably more once we had two controller cards to sit in the server in, racking etc. Minimum cost taking the DELL quote as an example is therefore: {Cost of Hardware} / {Storage Required} £7,438.60 / 400 = £18.595 per GB £18.59 per GiB is a far cry from the £0.39 we had been told by the salesman and the myth. Yes, the storage array is composed of 16 x 146 disks in RAID 10 (therefore 8 usable) giving an effective usable storage availability of 1168GB but the actual storage requirement is only 400 and the extra disks have had to be purchased to get the  IOps up. Solid State Drive solution A single card significantly exceeds the IOps and latency required, for resilience two will be required. ( £2,316.54 * 2 ) / 400 = £11.58 per GB With the SSD solution only two PCIe sockets are required, no external disk units, no additional controllers, no redundant controllers etc. Conclusion I hope by showing you an example that the myth that hard disk drives are cheaper per GiB than Solid State has now been dispelled - £11.58 per GB for SSD compared to £18.59 for Hard Disk. I’ve not even touched on the running costs, compare the costs of running 18 hard disks, that’s a lot of heat and power compared to two PCIe cards!Just a quick note: I've left a fair amount of information out due to this being a blog! If in doubt, email me :)I'll also deal with the myth that SSD's wear out at a later date as well - that's just way over done still, yes, 5 years ago, but now - no.

    Read the article

  • OMAP 3530: How fast can I toggle an IO?

    - by raj.tiwari
    I am putting together an application for OMAP 3530 SoC. This application will run some user interface code on embedded linux and invoke waveform generation code on the DSP. The DSP and Linux sides will interact over DSP/BIOS link. My questions are: What is the highest frequency at which my DSP-side code can toggle a GPIO line? If I want to toggle multiple GPIO lines at this hight rate, how fast can I go? Thanks for any insights. -Raj

    Read the article

  • Writing Device Drivers for Microcontrollers, where to define IO Port pins?

    - by volting
    I always seem to encounter this dilemma when writing low level code for MCU's. I never know where to declare pin definitions so as to make the code as reusable as possible. In this case Im writing a driver to interface an 8051 to a MCP4922 12bit serial DAC. Im unsure how/where I should declare the pin definitions for The CS(chip select) and LDAC(data latch) for the DAC. At the moment there declared in the header file for the driver. Iv done a lot of research trying to figure out the best approach but havent really found anything. Im basically want to know what the best practices... if there are some books worth reading or online information, examples etc, any recommendations would be welcome. Just a snippet of the driver so you get the idea /** @brief This function is used to write a 16bit data word to DAC B -12 data bit plus 4 configuration bits @param dac_data A 12bit word @param ip_buf_unbuf_select Input Buffered/unbuffered select bit. Buffered = 1; Unbuffered = 0 @param gain_select Output Gain Selection bit. 1 = 1x (VOUT = VREF * D/4096). 0 =2x (VOUT = 2 * VREF * D/4096) */ void MCP4922_DAC_B_TX_word(unsigned short int dac_data, bit ip_buf_unbuf_select, bit gain_select) { unsigned char low_byte=0, high_byte=0; CS = 0; /**Select the chip*/ high_byte |= ((0x01 << 7) | (0x01 << 4)); /**Set bit to select DAC A and Set SHDN bit high for DAC A active operation*/ if(ip_buf_unbuf_select) high_byte |= (0x01 << 6); if(gain_select) high_byte |= (0x01 << 5); high_byte |= ((dac_data >> 8) & 0x0F); low_byte |= dac_data; SPI_master_byte(high_byte); SPI_master_byte(low_byte); CS = 1; LDAC = 0; /**Latch the Data*/ LDAC = 1; }

    Read the article

  • SCJP Book, IO section: Is this a typo or is there a reason it would look like this?

    - by iamchuckb
    My question is about line 4, where the new PrintWriter is created with the constructor taking the FileWriter fw as a parameter. I don't understand the use of chaining the BufferedWriter bw to FileWriter if it isn't used later on in the actual writing. Can Java apply chaining in a way that bw still somehow affects the rest of the program? 16. try { 17. FileWriter fw = new FileWriter(test); 18. BufferedWriter bw = new BufferedWriter(fw, 1024); 19. PrintWriter out = new PrintWriter(fw); 20. out.println("<html><body><h1>"); 21. out.println(args[0]); 22. out.println("</h1></body></html>"); 23. out.close(); 24. bw.close(); 25. fw.close(); 26. }catch(IOException e) { 27. e.printStackTrace(); 28. } I think it is probably a typo and they meant to use bw as the parameter for PrintWriter out but like the title says, I'm new to this. Thanks to all in advance.

    Read the article

  • Java MapReduce read data

    - by Tatiana
    Hi I am having following map-reduce code by which I am trying to read records from my database. There's code: import java.io.*; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.*; import org.apache.hadoop.io.*; import org.apache.hadoop.mapred.*; import org.apache.hadoop.mapred.lib.db.DBConfiguration; import org.apache.hadoop.mapred.lib.db.DBInputFormat; import org.apache.hadoop.mapred.lib.db.DBWritable; import org.apache.hadoop.util.*; import org.apache.hadoop.conf.*; public class Connection extends Configured implements Tool { public int run(String[] args) throws IOException { JobConf conf = new JobConf(getConf(), Connection.class); conf.setInputFormat(DBInputFormat.class); DBConfiguration.configureDB(conf, "com.sun.java.util.jar.pack.Driver", "jdbc:postgresql://localhost:5432/polyclinic", "postgres", "12345"); String[] fields = { "name" }; DBInputFormat.setInput(conf, MyRecord.class, "doctors", null, null, fields); conf.setMapOutputKeyClass(LongWritable.class); conf.setMapOutputValueClass(MyRecord.class); conf.setOutputKeyClass(LongWritable.class); conf.setOutputValueClass(TextOutputFormat.class); TextOutputFormat.setOutputPath(conf, new Path(args[0])); JobClient.runJob(conf); return 0; } public static void main(String[] args) throws Exception { int exitCode = ToolRunner.run(new Connection(), args); System.exit(exitCode); } } Class Mapper: import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; public class MyMapper extends MapReduceBase implements Mapper<LongWritable, MyRecord, Text, IntWritable> { public void map(LongWritable key, MyRecord val, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { output.collect(new Text(val.name), new IntWritable(1)); } } Class Record: import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.lib.db.DBWritable; class MyRecord implements Writable, DBWritable { String name; public void readFields(DataInput in) throws IOException { this.name = Text.readString(in); } public void readFields(ResultSet resultSet) throws SQLException { this.name = resultSet.getString(1); } public void write(DataOutput out) throws IOException { } public void write(PreparedStatement stmt) throws SQLException { } } After this I got error: WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String). Can you give me any suggestion how to solve this problem?

    Read the article

  • Java/Groovy File IO Replacing an Image File with it's own Contents - Why Does This Work?

    - by jboyd
    I have some JPG files that need to be replaced at runtime with a JFIF standardized version of themselves (we are using a vendor that gives us JPG that do not have proper headers so they don't work in certain applications)... I am able to create a new file from the existing image, then get a buffered image from that file and write the contents right back into the file without having to delete it and it works... imageSrcFolder.eachFileMatch ( ~/.*\.jpg/, { BufferedImage bi = ImageIO.read( it ) ImageIO.write( bi, "jpg", it ) }); The question I have is why? Why doesn't the file end up doubled in size? Why don't I have to delete it first? Why am I able to take a file object to an existing file and then treat it as if it were a brand new one? It seems that what I consider to be a "file" is not what the File object in java actually is, or else this wouldn't work at all. My code does exactly what I want it to do, but I'm not convinced it always will... it just seems way too easy

    Read the article

  • C++ to python communication. Multiple io streams?

    - by Dennis Kempin
    A python program opens a new process of the C++ program and is reading the processes stdout. No problem so far. But is it possible to have multiple streams like this for communication? I can get two if I misuse stderr too, but not more. Easy way to hack this would be using temporary files. Is there something more elegant that does not need a detour to the filesystem? PS: *nix specific solutions are welcome too

    Read the article

  • Io exception: There is no process to read data written to a pipe.

    - by Srikanth
    I'm using Hibernate3.2+Websphere6.0+struts1.3.. After deploying ,application works fine. After some idle time ,i will get this type of error repeatedly,am not able to login at all. Im not using any connection pooling. i feel after idle time its not able to connect to the database again..if i restart the server everything works fine for some time...after that same story.. please help me out

    Read the article

  • Trying to packetize TCP with non-blocking IO is hard! Am I doing something wrong?

    - by Ricket
    Oh how I wish TCP was packet-based like UDP is! But alas, that's not the case, so I'm trying to implement my own packet layer. Here's the chain of events so far (ignoring writing packets) Oh, and my Packets are very simply structured: two unsigned bytes for length, and then byte[length] data. (I can't imagine if they were any more complex, I'd be up to my ears in if statements!) Server is in an infinite loop, accepting connections and adding them to a list of Connections. PacketGatherer (another thread) uses a Selector to figure out which Connection.SocketChannels are ready for reading. It loops over the results and tells each Connection to read(). Each Connection has a partial IncomingPacket and a list of Packets which have been fully read and are waiting to be processed. On read(): Tell the partial IncomingPacket to read more data. (IncomingPacket.readData below) If it's done reading (IncomingPacket.complete()), make a Packet from it and stick the Packet into the list waiting to be processed and then replace it with a new IncomingPacket. There are a couple problems with this. First, only one packet is being read at a time. If the IncomingPacket needs only one more byte, then only one byte is read this pass. This can of course be fixed with a loop but it starts to get sorta complicated and I wonder if there is a better overall way. Second, the logic in IncomingPacket is a little bit crazy, to be able to read the two bytes for the length and then read the actual data. Here is the code, boiled down for quick & easy reading: int readBytes; // number of total bytes read so far byte length1, length2; // each byte in an unsigned short int (see getLength()) public int getLength() { // will be inaccurate if readBytes < 2 return (int)(length1 << 8 | length2); } public void readData(SocketChannel c) { if (readBytes < 2) { // we don't yet know the length of the actual data ByteBuffer lengthBuffer = ByteBuffer.allocate(2 - readBytes); numBytesRead = c.read(lengthBuffer); if(readBytes == 0) { if(numBytesRead >= 1) length1 = lengthBuffer.get(); if(numBytesRead == 2) length2 = lengthBuffer.get(); } else if(readBytes == 1) { if(numBytesRead == 1) length2 = lengthBuffer.get(); } readBytes += numBytesRead; } if(readBytes >= 2) { // then we know we have the entire length variable // lazily-instantiate data buffers based on getLength() // read into data buffers, increment readBytes // (does not read more than the amount of this packet, so it does not // need to handle overflow into the next packet's data) } } public boolean complete() { return (readBytes > 2 && readBytes == getLength()+2); } Basically I need feedback on my code. Please suggest any improvements. Even overhauling my entire system would be okay, if you have suggestions for how better to implement the whole thing. Book recommendations are welcome too; I love books. I just get the feeling that something isn't quite right.

    Read the article

  • Extract Links from a sitemap(xml)

    - by Akshat Mittal
    Lets say I have a sitemap.xml file with this data: <url> <loc>http://domain.com/pag1</loc> <lastmod>2012-08-25</lastmod> <changefreq>weekly</changefreq> <priority>0.9</priority> </url> <url> <loc>http://domain.com/pag2</loc> <lastmod>2012-08-25</lastmod> <changefreq>weekly</changefreq> <priority>0.9</priority> </url> <url> <loc>http://domain.com/pag3</loc> <lastmod>2012-08-25</lastmod> <changefreq>weekly</changefreq> <priority>0.9</priority> </url> I want to extract all the locations from it (data between <loc> and </loc>). Sample output be like: http://domain.com/pag1 http://domain.com/pag2 http://domain.com/pag3 How to do this?

    Read the article

  • Developing custom MBeans to manage J2EE Applications (Part III)

    - by philippe Le Mouel
    This is the third and final part in a series of blogs, that demonstrate how to add management capability to your own application using JMX MBeans. In Part I we saw: How to implement a custom MBean to manage configuration associated with an application. How to package the resulting code and configuration as part of the application's ear file. How to register MBeans upon application startup, and unregistered them upon application stop (or undeployment). How to use generic JMX clients such as JConsole to browse and edit our application's MBean. In Part II we saw: How to add localized descriptions to our MBean, MBean attributes, MBean operations and MBean operation parameters. How to specify meaningful name to our MBean operation parameters. We also touched on future enhancements that will simplify how we can implement localized MBeans. In this third and last part, we will re-write our MBean to simplify how we added localized descriptions. To do so we will take advantage of the functionality we already described in part II and that is now part of WebLogic 10.3.3.0. We will show how to take advantage of WebLogic's localization support to localize our MBeans based on the client's Locale independently of the server's Locale. Each client will see MBean descriptions localized based on his/her own Locale. We will show how to achieve this using JConsole, and also using a sample programmatic JMX Java client. The complete code sample and associated build files for part III are available as a zip file. The code has been tested against WebLogic Server 10.3.3.0 and JDK6. To build and deploy our sample application, please follow the instruction provided in Part I, as they also apply to part III's code and associated zip file. Providing custom descriptions take II In part II we localized our MBean descriptions by extending the StandardMBean class and overriding its many getDescription methods. WebLogic 10.3.3.0 similarly to JDK 7 can automatically localize MBean descriptions as long as those are specified according to the following conventions: Descriptions resource bundle keys are named according to: MBean description: <MBeanInterfaceClass>.mbean MBean attribute description: <MBeanInterfaceClass>.attribute.<AttributeName> MBean operation description: <MBeanInterfaceClass>.operation.<OperationName> MBean operation parameter description: <MBeanInterfaceClass>.operation.<OperationName>.<ParameterName> MBean constructor description: <MBeanInterfaceClass>.constructor.<ConstructorName> MBean constructor parameter description: <MBeanInterfaceClass>.constructor.<ConstructorName>.<ParameterName> We also purposely named our resource bundle class MBeanDescriptions and included it as part of the same package as our MBean. We already followed the above conventions when creating our resource bundle in part II, and our default resource bundle class with English descriptions looks like: package blog.wls.jmx.appmbean; import java.util.ListResourceBundle; public class MBeanDescriptions extends ListResourceBundle { protected Object[][] getContents() { return new Object[][] { {"PropertyConfigMXBean.mbean", "MBean used to manage persistent application properties"}, {"PropertyConfigMXBean.attribute.Properties", "Properties associated with the running application"}, {"PropertyConfigMXBean.operation.setProperty", "Create a new property, or change the value of an existing property"}, {"PropertyConfigMXBean.operation.setProperty.key", "Name that identify the property to set."}, {"PropertyConfigMXBean.operation.setProperty.value", "Value for the property being set"}, {"PropertyConfigMXBean.operation.getProperty", "Get the value for an existing property"}, {"PropertyConfigMXBean.operation.getProperty.key", "Name that identify the property to be retrieved"} }; } } We have now also added a resource bundle with French localized descriptions: package blog.wls.jmx.appmbean; import java.util.ListResourceBundle; public class MBeanDescriptions_fr extends ListResourceBundle { protected Object[][] getContents() { return new Object[][] { {"PropertyConfigMXBean.mbean", "Manage proprietes sauvegarde dans un fichier disque."}, {"PropertyConfigMXBean.attribute.Properties", "Proprietes associee avec l'application en cour d'execution"}, {"PropertyConfigMXBean.operation.setProperty", "Construit une nouvelle proprietee, ou change la valeur d'une proprietee existante."}, {"PropertyConfigMXBean.operation.setProperty.key", "Nom de la propriete dont la valeur est change."}, {"PropertyConfigMXBean.operation.setProperty.value", "Nouvelle valeur"}, {"PropertyConfigMXBean.operation.getProperty", "Retourne la valeur d'une propriete existante."}, {"PropertyConfigMXBean.operation.getProperty.key", "Nom de la propriete a retrouver."} }; } } So now we can just remove the many getDescriptions methods from our MBean code, and have a much cleaner: package blog.wls.jmx.appmbean; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.File; import java.net.URL; import java.util.Map; import java.util.HashMap; import java.util.Properties; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.MBeanRegistration; import javax.management.StandardMBean; import javax.management.MBeanOperationInfo; import javax.management.MBeanParameterInfo; public class PropertyConfig extends StandardMBean implements PropertyConfigMXBean, MBeanRegistration { private String relativePath_ = null; private Properties props_ = null; private File resource_ = null; private static Map operationsParamNames_ = null; static { operationsParamNames_ = new HashMap(); operationsParamNames_.put("setProperty", new String[] {"key", "value"}); operationsParamNames_.put("getProperty", new String[] {"key"}); } public PropertyConfig(String relativePath) throws Exception { super(PropertyConfigMXBean.class , true); props_ = new Properties(); relativePath_ = relativePath; } public String setProperty(String key, String value) throws IOException { String oldValue = null; if (value == null) { oldValue = String.class.cast(props_.remove(key)); } else { oldValue = String.class.cast(props_.setProperty(key, value)); } save(); return oldValue; } public String getProperty(String key) { return props_.getProperty(key); } public Map getProperties() { return (Map) props_; } private void load() throws IOException { InputStream is = new FileInputStream(resource_); try { props_.load(is); } finally { is.close(); } } private void save() throws IOException { OutputStream os = new FileOutputStream(resource_); try { props_.store(os, null); } finally { os.close(); } } public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception { // MBean must be registered from an application thread // to have access to the application ClassLoader ClassLoader cl = Thread.currentThread().getContextClassLoader(); URL resourceUrl = cl.getResource(relativePath_); resource_ = new File(resourceUrl.toURI()); load(); return name; } public void postRegister(Boolean registrationDone) { } public void preDeregister() throws Exception {} public void postDeregister() {} protected String getParameterName(MBeanOperationInfo op, MBeanParameterInfo param, int sequence) { return operationsParamNames_.get(op.getName())[sequence]; } } The only reason we are still extending the StandardMBean class, is to override the default values for our operations parameters name. If this isn't a concern, then one could just write the following code: package blog.wls.jmx.appmbean; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.File; import java.net.URL; import java.util.Properties; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.MBeanRegistration; import javax.management.StandardMBean; import javax.management.MBeanOperationInfo; import javax.management.MBeanParameterInfo; public class PropertyConfig implements PropertyConfigMXBean, MBeanRegistration { private String relativePath_ = null; private Properties props_ = null; private File resource_ = null; public PropertyConfig(String relativePath) throws Exception { props_ = new Properties(); relativePath_ = relativePath; } public String setProperty(String key, String value) throws IOException { String oldValue = null; if (value == null) { oldValue = String.class.cast(props_.remove(key)); } else { oldValue = String.class.cast(props_.setProperty(key, value)); } save(); return oldValue; } public String getProperty(String key) { return props_.getProperty(key); } public Map getProperties() { return (Map) props_; } private void load() throws IOException { InputStream is = new FileInputStream(resource_); try { props_.load(is); } finally { is.close(); } } private void save() throws IOException { OutputStream os = new FileOutputStream(resource_); try { props_.store(os, null); } finally { os.close(); } } public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception { // MBean must be registered from an application thread // to have access to the application ClassLoader ClassLoader cl = Thread.currentThread().getContextClassLoader(); URL resourceUrl = cl.getResource(relativePath_); resource_ = new File(resourceUrl.toURI()); load(); return name; } public void postRegister(Boolean registrationDone) { } public void preDeregister() throws Exception {} public void postDeregister() {} } Note: The above would also require changing the operations parameters name in the resource bundle classes. For instance: PropertyConfigMXBean.operation.setProperty.key would become: PropertyConfigMXBean.operation.setProperty.p0 Client based localization When accessing our MBean using JConsole started with the following command line: jconsole -J-Djava.class.path=$JAVA_HOME/lib/jconsole.jar:$JAVA_HOME/lib/tools.jar: $WL_HOME/server/lib/wljmxclient.jar -J-Djmx.remote.protocol.provider.pkgs=weblogic.management.remote -debug We see that our MBean descriptions are localized according to the WebLogic's server Locale. English in this case: Note: Consult Part I for information on how to use JConsole to browse/edit our MBean. Now if we specify the client's Locale as part of the JConsole command line as follow: jconsole -J-Djava.class.path=$JAVA_HOME/lib/jconsole.jar:$JAVA_HOME/lib/tools.jar: $WL_HOME/server/lib/wljmxclient.jar -J-Djmx.remote.protocol.provider.pkgs=weblogic.management.remote -J-Dweblogic.management.remote.locale=fr-FR -debug We see that our MBean descriptions are now localized according to the specified client's Locale. French in this case: We use the weblogic.management.remote.locale system property to specify the Locale that should be associated with the cient's JMX connections. The value is composed of the client's language code and its country code separated by the - character. The country code is not required, and can be omitted. For instance: -Dweblogic.management.remote.locale=fr We can also specify the client's Locale using a programmatic client as demonstrated below: package blog.wls.jmx.appmbean.client; import javax.management.MBeanServerConnection; import javax.management.ObjectName; import javax.management.MBeanInfo; import javax.management.remote.JMXConnector; import javax.management.remote.JMXServiceURL; import javax.management.remote.JMXConnectorFactory; import java.util.Hashtable; import java.util.Set; import java.util.Locale; public class JMXClient { public static void main(String[] args) throws Exception { JMXConnector jmxCon = null; try { JMXServiceURL serviceUrl = new JMXServiceURL( "service:jmx:iiop://127.0.0.1:7001/jndi/weblogic.management.mbeanservers.runtime"); System.out.println("Connecting to: " + serviceUrl); // properties associated with the connection Hashtable env = new Hashtable(); env.put(JMXConnectorFactory.PROTOCOL_PROVIDER_PACKAGES, "weblogic.management.remote"); String[] credentials = new String[2]; credentials[0] = "weblogic"; credentials[1] = "weblogic"; env.put(JMXConnector.CREDENTIALS, credentials); // specifies the client's Locale env.put("weblogic.management.remote.locale", Locale.FRENCH); jmxCon = JMXConnectorFactory.newJMXConnector(serviceUrl, env); jmxCon.connect(); MBeanServerConnection con = jmxCon.getMBeanServerConnection(); Set mbeans = con.queryNames( new ObjectName( "blog.wls.jmx.appmbean:name=myAppProperties,type=PropertyConfig,*"), null); for (ObjectName mbeanName : mbeans) { System.out.println("\n\nMBEAN: " + mbeanName); MBeanInfo minfo = con.getMBeanInfo(mbeanName); System.out.println("MBean Description: "+minfo.getDescription()); System.out.println("\n"); } } finally { // release the connection if (jmxCon != null) jmxCon.close(); } } } The above client code is part of the zip file associated with this blog, and can be run using the provided client.sh script. The resulting output is shown below: $ ./client.sh Connecting to: service:jmx:iiop://127.0.0.1:7001/jndi/weblogic.management.mbeanservers.runtime MBEAN: blog.wls.jmx.appmbean:type=PropertyConfig,name=myAppProperties MBean Description: Manage proprietes sauvegarde dans un fichier disque. $ Miscellaneous Using Description annotation to specify MBean descriptions Earlier we have seen how to name our MBean descriptions resource keys, so that WebLogic 10.3.3.0 automatically uses them to localize our MBean. In some cases we might want to implicitly specify the resource key, and resource bundle. For instance when operations are overloaded, and the operation name is no longer sufficient to uniquely identify a single operation. In this case we can use the Description annotation provided by WebLogic as follow: import weblogic.management.utils.Description; @Description(resourceKey="myapp.resources.TestMXBean.description", resourceBundleBaseName="myapp.resources.MBeanResources") public interface TestMXBean { @Description(resourceKey="myapp.resources.TestMXBean.threshold.description", resourceBundleBaseName="myapp.resources.MBeanResources" ) public int getthreshold(); @Description(resourceKey="myapp.resources.TestMXBean.reset.description", resourceBundleBaseName="myapp.resources.MBeanResources") public int reset( @Description(resourceKey="myapp.resources.TestMXBean.reset.id.description", resourceBundleBaseName="myapp.resources.MBeanResources", displayNameKey= "myapp.resources.TestMXBean.reset.id.displayName.description") int id); } The Description annotation should be applied to the MBean interface. It can be used to specify MBean, MBean attributes, MBean operations, and MBean operation parameters descriptions as demonstrated above. Retrieving the Locale associated with a JMX operation from the MBean code There are several cases where it is necessary to retrieve the Locale associated with a JMX call from the MBean implementation. For instance this can be useful when localizing exception messages. This can be done as follow: import weblogic.management.mbeanservers.JMXContextUtil; ...... // some MBean method implementation public String setProperty(String key, String value) throws IOException { Locale callersLocale = JMXContextUtil.getLocale(); // use callersLocale to localize Exception messages or // potentially some return values such a Date .... } Conclusion With this last part we conclude our three part series on how to write MBeans to manage J2EE applications. We are far from having exhausted this particular topic, but we have gone a long way and are now capable to take advantage of the latest functionality provided by WebLogic's application server to write user friendly MBeans.

    Read the article

  • More interruptions than cpu context switches

    - by Christopher Valles
    I have a machine running Debian GNU/Linux 5.0.8 (lenny) 8 cores and 12Gb of RAM. We have one core permanently around 40% ~ 60% wait time and trying to spot what is happening I realized that we have more interruptions than cpu context switches. I found that the normal ratio between context switch and interruptions is around 10x more context switching than interruptions but on my server the values are completely different. backend1:~# vmstat -s 12330788 K total memory 12221676 K used memory 3668624 K active memory 6121724 K inactive memory 109112 K free memory 3929400 K buffer memory 4095536 K swap cache 4194296 K total swap 7988 K used swap 4186308 K free swap 44547459 non-nice user cpu ticks 702408 nice user cpu ticks 13346333 system cpu ticks 1607583668 idle cpu ticks 374043393 IO-wait cpu ticks 4144149 IRQ cpu ticks 3994255 softirq cpu ticks 0 stolen cpu ticks 4445557114 pages paged in 2910596714 pages paged out 128642 pages swapped in 267400 pages swapped out 3519307319 interrupts 2464686911 CPU context switches 1306744317 boot time 11555115 forks Any ideas if that is an issue? And in that case, how can I spot the cause and fix it? Update Following the instructions of the comments and focusing on the core stuck in wait I checked the processes attached to that core and below you can find the list: PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND 24 root RT -5 0 0 0 S 0 0.0 0:03.42 7 migration/7 25 root 15 -5 0 0 0 S 0 0.0 0:04.78 7 ksoftirqd/7 26 root RT -5 0 0 0 S 0 0.0 0:00.00 7 watchdog/7 34 root 15 -5 0 0 0 S 0 0.0 1:18.90 7 events/7 83 root 15 -5 0 0 0 S 0 0.0 1:10.68 7 kblockd/7 291 root 15 -5 0 0 0 S 0 0.0 0:00.00 7 aio/7 569 root 15 -5 0 0 0 S 0 0.0 0:00.00 7 ata/7 1545 root 15 -5 0 0 0 S 0 0.0 0:00.00 7 ksnapd 1644 root 15 -5 0 0 0 S 0 0.0 0:36.73 7 kjournald 1725 root 16 -4 16940 1152 488 S 0 0.0 0:00.00 7 udevd 2342 root 20 0 8828 1140 956 S 0 0.0 0:00.00 7 sh 2375 root 20 0 8848 1220 1016 S 0 0.0 0:00.00 7 locate 2421 root 30 10 8896 1268 1016 S 0 0.0 0:00.00 7 updatedb.findut 2430 root 30 10 58272 49m 616 S 0 0.4 0:17.44 7 sort 2431 root 30 10 3792 448 360 S 0 0.0 0:00.00 7 frcode 2682 root 15 -5 0 0 0 S 0 0.0 3:25.98 7 kjournald 2683 root 15 -5 0 0 0 S 0 0.0 0:00.64 7 kjournald 2687 root 15 -5 0 0 0 S 0 0.0 1:31.30 7 kjournald 3261 root 15 -5 0 0 0 S 0 0.0 2:30.56 7 kondemand/7 3364 root 20 0 3796 596 476 S 0 0.0 0:00.00 7 acpid 3575 root 20 0 8828 1140 956 S 0 0.0 0:00.00 7 sh 3597 root 20 0 8848 1216 1016 S 0 0.0 0:00.00 7 locate 3603 root 30 10 8896 1268 1016 S 0 0.0 0:00.00 7 updatedb.findut 3612 root 30 10 58272 49m 616 S 0 0.4 0:27.04 7 sort 3655 root 20 0 11056 2852 516 S 0 0.0 5:36.46 7 redis-server 3706 root 20 0 19832 1056 816 S 0 0.0 0:01.64 7 cron 3746 root 20 0 3796 580 484 S 0 0.0 0:00.00 7 getty 3748 root 20 0 3796 580 484 S 0 0.0 0:00.00 7 getty 7674 root 20 0 28376 1000 736 S 0 0.0 0:00.00 7 cron 7675 root 20 0 8828 1140 956 S 0 0.0 0:00.00 7 sh 7708 root 30 10 58272 49m 616 S 0 0.4 0:03.36 7 sort 22049 root 20 0 8828 1136 956 S 0 0.0 0:00.00 7 sh 22095 root 20 0 8848 1220 1016 S 0 0.0 0:00.00 7 locate 22099 root 30 10 8896 1264 1016 S 0 0.0 0:00.00 7 updatedb.findut 22108 root 30 10 58272 49m 616 S 0 0.4 0:44.55 7 sort 22109 root 30 10 3792 452 360 S 0 0.0 0:00.00 7 frcode 26927 root 20 0 8828 1140 956 S 0 0.0 0:00.00 7 sh 26947 root 20 0 8848 1216 1016 S 0 0.0 0:00.00 7 locate 26951 root 30 10 8896 1268 1016 S 0 0.0 0:00.00 7 updatedb.findut 26960 root 30 10 58272 49m 616 S 0 0.4 0:10.24 7 sort 26961 root 30 10 3792 452 360 S 0 0.0 0:00.00 7 frcode 27952 root 20 0 65948 3028 2400 S 0 0.0 0:00.00 7 sshd 30731 root 20 0 0 0 0 S 0 0.0 0:01.34 7 pdflush 31204 root 20 0 0 0 0 S 0 0.0 0:00.24 7 pdflush 21857 deploy 20 0 1227m 2240 868 S 0 0.0 2:44.22 7 nginx 21858 deploy 20 0 1228m 2784 868 S 0 0.0 2:42.45 7 nginx 21862 deploy 20 0 1228m 2732 868 S 0 0.0 2:43.90 7 nginx 21869 deploy 20 0 1228m 2840 868 S 0 0.0 2:44.14 7 nginx 27994 deploy 20 0 19372 2216 1380 S 0 0.0 0:00.00 7 bash 28493 deploy 20 0 331m 32m 16m S 4 0.3 0:00.40 7 apache2 21856 deploy 20 0 1228m 2844 868 S 0 0.0 2:43.64 7 nginx 3622 nobody 30 10 21156 10m 916 D 0 0.1 4:42.31 7 find 7716 nobody 30 10 12268 1280 888 D 0 0.0 0:43.50 7 find 22116 nobody 30 10 12612 1696 916 D 0 0.0 6:32.26 7 find 26968 nobody 30 10 12268 1284 888 D 0 0.0 1:56.92 7 find Update As suggested I take a look at /proc/interrupts and below the info there: CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 0: 35 0 0 1469085485 0 0 0 0 IO-APIC-edge timer 1: 0 0 0 8 0 0 0 0 IO-APIC-edge i8042 8: 0 0 0 1 0 0 0 0 IO-APIC-edge rtc0 9: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi acpi 12: 0 0 0 105 0 0 0 0 IO-APIC-edge i8042 16: 0 0 0 0 0 0 0 580212114 IO-APIC-fasteoi 3w-9xxx, uhci_hcd:usb1 18: 0 0 142 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb6, ehci_hcd:usb7 19: 9 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb3, uhci_hcd:usb5 21: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb2 23: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb4, ehci_hcd:usb8 1273: 0 0 1600400502 0 0 0 0 0 PCI-MSI-edge eth0 1274: 0 0 0 0 0 0 0 0 PCI-MSI-edge ahci NMI: 0 0 0 0 0 0 0 0 Non-maskable interrupts LOC: 214252181 69439018 317298553 21943690 72562482 56448835 137923978 407514738 Local timer interrupts RES: 27516446 16935944 26430972 44957009 24935543 19881887 57746906 24298747 Rescheduling interrupts CAL: 10655 10705 10685 10567 10689 10669 10667 396 function call interrupts TLB: 529548 462587 801138 596193 922202 747313 2027966 946594 TLB shootdowns TRM: 0 0 0 0 0 0 0 0 Thermal event interrupts THR: 0 0 0 0 0 0 0 0 Threshold APIC interrupts SPU: 0 0 0 0 0 0 0 0 Spurious interrupts ERR: 0 All the values seems more or less the same for all the cores but this one IO-APIC-fasteoi 3w-9xxx, uhci_hcd:usb1 only affects to the core 7 (the same with the wait time of 40% ~ 60%) could be something attached to the usb port causing the issue? Thanks in advanced

    Read the article

  • Creating an XML sitemap with PHP

    - by iMaster
    I'm trying to create a sitemap that will automatically update. I've done something similiar with my RSS feed, but this sitemap refuses to work. You can view it live at http://designdeluge.com/sitemap.xml I think the main problem is that its not recognizing the PHP code. Here's the full source: <?php header('Content-type: text/xml'); ?> <?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.google.com/schemas/sitemap/0.84" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84 http://www.google.com/schemas/sitemap/0.84/sitemap.xsd"> <url> <loc>http://www.designdeluge.com/</loc> <lastmod>2010-04-29</lastmod> <changefreq>daily</changefreq> <priority>1.00</priority> </url> <url> <loc>http://www.designdeluge.com/archives</loc> <lastmod>2010-04-29</lastmod> <changefreq>daily</changefreq> <priority>0.5</priority> </url> <url> <loc>http://www.designdeluge.com/about.php</loc> <lastmod>2010-04-29</lastmod> <changefreq>daily</changefreq> <priority>0.5</priority> </url> <?php include 'connection.php'; $entries = mysql_query("SELECT * FROM Entries ORDER BY timestamp DESC"); while($row = mysql_fetch_array($entries)) { $title = stripslashes($row['title']); $date = date("Y-m-d", strtotime($row['timestamp'])); ?> <url> <loc>http://www.designdeluge.com/<?php echo $row['title']; ?></loc> <lastmod><?php echo $date; ?></lastmod> <changefreq>none</changefreq> <priority>0.5</priority> </url> <?php } ?> </urlset> The problem is that the dynamic URL's (e.g. the ones pulled from the DB) aren't being generated and the sitemap won't validate. Thanks!

    Read the article

  • Need to sort using Obout Grid ?

    - by Anand
    By default when clicking the each column header it will automatically sorted. but I have placed a image in header column by clicking that image the column has to be sorted by priority level such as 0,1,2,.....the problem if i take datafield as priority the image disappears. I want use the datafield priority and sort according the image by priority. If anyone has suggestion please reply me asap.\ Thank you

    Read the article

  • how to display fixed dropdown in django admin?

    - by FurtiveFelon
    Hi all, I would like to display priority information in a drop down. Currently i am using a integer field to store the priority, but i would like to display high/medium/low instead of letting user type in a priority. A way to approximate this is to use a Priority database which stores 3 elements, 1:high, 2:medium, 3:low, but it seems like an overkill. Any easier way would be much appreciated! Jason

    Read the article

  • First Come, First Served process scheduling

    - by user253530
    i have 4 processes: p1 - bursts 5, priority: 3 p2 - bursts 8, priority: 2 p3 - bursts 12, priority: 2 p4 - bursts 6, priority: 1 Assuming that all processes arrive at the scheduler at the same time what is the average response time and average turnaround time? For FCFS is it ok to have them in the order p1, p2, p3, p4 in the execution queue?

    Read the article

  • How to define variable for Trac TicketQuery?

    - by JOM
    Using TRAC TicketQuery template for sprint to show what's going on. How would I type name of current sprint only ONCE, when template needs it in multiple location? For example "Sprint1" is needed is 6 places: = New items = [[TicketQuery(milestone=Sprint1,status=new,format=table,order=priority,col=id|summary|priority|component|owner|type)]] = Items in progress = [[TicketQuery(milestone=Sprint1,status=in_progress,format=table,order=priority,col=id|summary|priority|component|owner|type)]]

    Read the article

  • Class Not found exception in JApplet.

    - by Nitesh Panchal
    Hello, I created a simple Applet using JApplet and everything seems to work fine but as soon i create an object of my userdefined class named ChatUser in my applet, i get this error :- SEVERE: java.lang.ClassNotFoundException: applet.ChatUser at com.sun.enterprise.loader.ASURLClassLoader.findClassData(ASURLClassLoader.java:713) at com.sun.enterprise.loader.ASURLClassLoader.findClass(ASURLClassLoader.java:626) at java.lang.ClassLoader.loadClass(ClassLoader.java:307) at java.lang.ClassLoader.loadClass(ClassLoader.java:252) at java.lang.ClassLoader.loadClassInternal(ClassLoader.java:320) at java.lang.Class.forName0(Native Method) at java.lang.Class.forName(Class.java:247) at java.io.ObjectInputStream.resolveClass(ObjectInputStream.java:604) at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1575) at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1496) at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1732) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1329) at java.io.ObjectInputStream.readObject(ObjectInputStream.java:351) at misc.ChatClient.run(ChatClient.java:43) Any idea what can be wrong? It only happens when i create an object of any user defined class. Do i need to set some security settings or something? Please help :(

    Read the article

  • Node.js Adventure - Node.js on Windows

    - by Shaun
    Two weeks ago I had had a talk with Wang Tao, a C# MVP in China who is currently running his startup company and product named worktile. He asked me to figure out a synchronization solution which helps his product in the future. And he preferred me implementing the service in Node.js, since his worktile is written in Node.js. Even though I have some experience in ASP.NET MVC, HTML, CSS and JavaScript, I don’t think I’m an expert of JavaScript. In fact I’m very new to it. So it scared me a bit when he asked me to use Node.js. But after about one week investigate I have to say Node.js is very easy to learn, use and deploy, even if you have very limited JavaScript skill. And I think I became love Node.js. Hence I decided to have a series named “Node.js Adventure”, where I will demonstrate my story of learning and using Node.js in Windows and Windows Azure. And this is the first one.   (Brief) Introduction of Node.js I don’t want to have a fully detailed introduction of Node.js. There are many resource on the internet we can find. But the best one is its homepage. Node.js was created by Ryan Dahl, sponsored by Joyent. It’s consist of about 80% C/C++ for core and 20% JavaScript for API. It utilizes CommonJS as the module system which we will explain later. The official definition of Node.js is Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. First of all, Node.js utilizes JavaScript as its development language and runs on top of V8 engine, which is being used by Chrome. It brings JavaScript, a client-side language into the backend service world. So many people said, even though not that actually, “Node.js is a server side JavaScript”. Additionally, Node.js uses an event-driven, non-blocking IO model. This means in Node.js there’s no way to block currently working thread. Every operation in Node.js executed asynchronously. This is a huge benefit especially if our code needs IO operations such as reading disks, connect to database, consuming web service, etc.. Unlike IIS or Apache, Node.js doesn’t utilize the multi-thread model. In Node.js there’s only one working thread serves all users requests and resources response, as the ST star in the figure below. And there is a POSIX async threads pool in Node.js which contains many async threads (AT stars) for IO operations. When a user have an IO request, the ST serves it but it will not do the IO operation. Instead the ST will go to the POSIX async threads pool to pick up an AT, pass this operation to it, and then back to serve any other requests. The AT will actually do the IO operation asynchronously. Assuming before the AT complete the IO operation there is another user comes. The ST will serve this new user request, pick up another AT from the POSIX and then back. If the previous AT finished the IO operation it will take the result back and wait for the ST to serve. ST will take the response and return the AT to POSIX, and then response to the user. And if the second AT finished its job, the ST will response back to the second user in the same way. As you can see, in Node.js there’s only one thread serve clients’ requests and POSIX results. This thread looping between the users and POSIX and pass the data back and forth. The async jobs will be handled by POSIX. This is the event-driven non-blocking IO model. The performance of is model is much better than the multi-threaded blocking model. For example, Apache is built in multi-threaded blocking model while Nginx is in event-driven non-blocking mode. Below is the performance comparison between them. And below is the memory usage comparison between them. These charts are captured from the video NodeJS Basics: An Introductory Training, which presented at Cloud Foundry Developer Advocate.   Node.js on Windows To execute Node.js application on windows is very simple. First of you we need to download the latest Node.js platform from its website. After installed, it will register its folder into system path variant so that we can execute Node.js at anywhere. To confirm the Node.js installation, just open up a command windows and type “node”, then it will show the Node.js console. As you can see this is a JavaScript interactive console. We can type some simple JavaScript code and command here. To run a Node.js JavaScript application, just specify the source code file name as the argument of the “node” command. For example, let’s create a Node.js source code file named “helloworld.js”. Then copy a sample code from Node.js website. 1: var http = require("http"); 2:  3: http.createServer(function (req, res) { 4: res.writeHead(200, {"Content-Type": "text/plain"}); 5: res.end("Hello World\n"); 6: }).listen(1337, "127.0.0.1"); 7:  8: console.log("Server running at http://127.0.0.1:1337/"); This code will create a web server, listening on 1337 port and return “Hello World” when any requests come. Run it in the command windows. Then open a browser and navigate to http://localhost:1337/. As you can see, when using Node.js we are not creating a web application. In fact we are likely creating a web server. We need to deal with request, response and the related headers, status code, etc.. And this is one of the benefit of using Node.js, lightweight and straightforward. But creating a website from scratch again and again is not acceptable. The good news is that, Node.js utilizes CommonJS as its module system, so that we can leverage some modules to simplify our job. And furthermore, there are about ten thousand of modules available n the internet, which covers almost all areas in server side application development.   NPM and Node.js Modules Node.js utilizes CommonJS as its module system. A module is a set of JavaScript files. In Node.js if we have an entry file named “index.js”, then all modules it needs will be located at the “node_modules” folder. And in the “index.js” we can import modules by specifying the module name. For example, in the code we’ve just created, we imported a module named “http”, which is a build-in module installed alone with Node.js. So that we can use the code in this “http” module. Besides the build-in modules there are many modules available at the NPM website. Thousands of developers are contributing and downloading modules at this website. Hence this is another benefit of using Node.js. There are many modules we can use, and the numbers of modules increased very fast, and also we can publish our modules to the community. When I wrote this post, there are totally 14,608 modules at NPN and about 10 thousand downloads per day. Install a module is very simple. Let’s back to our command windows and input the command “npm install express”. This command will install a module named “express”, which is a MVC framework on top of Node.js. And let’s create another JavaScript file named “helloweb.js” and copy the code below in it. I imported the “express” module. And then when the user browse the home page it will response a text. If the incoming URL matches “/Echo/:value” which the “value” is what the user specified, it will pass it back with the current date time in JSON format. And finally my website was listening at 12345 port. 1: var express = require("express"); 2: var app = express(); 3:  4: app.get("/", function(req, res) { 5: res.send("Hello Node.js and Express."); 6: }); 7:  8: app.get("/Echo/:value", function(req, res) { 9: var value = req.params.value; 10: res.json({ 11: "Value" : value, 12: "Time" : new Date() 13: }); 14: }); 15:  16: console.log("Web application opened."); 17: app.listen(12345); For more information and API about the “express”, please have a look here. Start our application from the command window by command “node helloweb.js”, and then navigate to the home page we can see the response in the browser. And if we go to, for example http://localhost:12345/Echo/Hello Shaun, we can see the JSON result. The “express” module is very populate in NPM. It makes the job simple when we need to build a MVC website. There are many modules very useful in NPM. - underscore: A utility module covers many common functionalities such as for each, map, reduce, select, etc.. - request: A very simple HTT request client. - async: Library for coordinate async operations. - wind: Library which enable us to control flow with plain JavaScript for asynchronous programming (and more) without additional pre-compiling steps.   Node.js and IIS I demonstrated how to run the Node.js application from console. Since we are in Windows another common requirement would be, “can I host Node.js in IIS?” The answer is “Yes”. Tomasz Janczuk created a project IISNode at his GitHub space we can find here. And Scott Hanselman had published a blog post introduced about it.   Summary In this post I provided a very brief introduction of Node.js, includes it official definition, architecture and how it implement the event-driven non-blocking model. And then I described how to install and run a Node.js application on windows console. I also described the Node.js module system and NPM command. At the end I referred some links about IISNode, an IIS extension that allows Node.js application runs on IIS. Node.js became a very popular server side application platform especially in this year. By leveraging its non-blocking IO model and async feature it’s very useful for us to build a highly scalable, asynchronously service. I think Node.js will be used widely in the cloud application development in the near future.   In the next post I will explain how to use SQL Server from Node.js.   Hope this helps, Shaun All documents and related graphics, codes are provided "AS IS" without warranty of any kind. Copyright © Shaun Ziyan Xu. This work is licensed under the Creative Commons License.

    Read the article

  • Advanced TSQL Tuning: Why Internals Knowledge Matters

    - by Paul White
    There is much more to query tuning than reducing logical reads and adding covering nonclustered indexes.  Query tuning is not complete as soon as the query returns results quickly in the development or test environments.  In production, your query will compete for memory, CPU, locks, I/O and other resources on the server.  Today’s entry looks at some tuning considerations that are often overlooked, and shows how deep internals knowledge can help you write better TSQL. As always, we’ll need some example data.  In fact, we are going to use three tables today, each of which is structured like this: Each table has 50,000 rows made up of an INTEGER id column and a padding column containing 3,999 characters in every row.  The only difference between the three tables is in the type of the padding column: the first table uses CHAR(3999), the second uses VARCHAR(MAX), and the third uses the deprecated TEXT type.  A script to create a database with the three tables and load the sample data follows: USE master; GO IF DB_ID('SortTest') IS NOT NULL DROP DATABASE SortTest; GO CREATE DATABASE SortTest COLLATE LATIN1_GENERAL_BIN; GO ALTER DATABASE SortTest MODIFY FILE ( NAME = 'SortTest', SIZE = 3GB, MAXSIZE = 3GB ); GO ALTER DATABASE SortTest MODIFY FILE ( NAME = 'SortTest_log', SIZE = 256MB, MAXSIZE = 1GB, FILEGROWTH = 128MB ); GO ALTER DATABASE SortTest SET ALLOW_SNAPSHOT_ISOLATION OFF ; ALTER DATABASE SortTest SET AUTO_CLOSE OFF ; ALTER DATABASE SortTest SET AUTO_CREATE_STATISTICS ON ; ALTER DATABASE SortTest SET AUTO_SHRINK OFF ; ALTER DATABASE SortTest SET AUTO_UPDATE_STATISTICS ON ; ALTER DATABASE SortTest SET AUTO_UPDATE_STATISTICS_ASYNC ON ; ALTER DATABASE SortTest SET PARAMETERIZATION SIMPLE ; ALTER DATABASE SortTest SET READ_COMMITTED_SNAPSHOT OFF ; ALTER DATABASE SortTest SET MULTI_USER ; ALTER DATABASE SortTest SET RECOVERY SIMPLE ; USE SortTest; GO CREATE TABLE dbo.TestCHAR ( id INTEGER IDENTITY (1,1) NOT NULL, padding CHAR(3999) NOT NULL,   CONSTRAINT [PK dbo.TestCHAR (id)] PRIMARY KEY CLUSTERED (id), ) ; CREATE TABLE dbo.TestMAX ( id INTEGER IDENTITY (1,1) NOT NULL, padding VARCHAR(MAX) NOT NULL,   CONSTRAINT [PK dbo.TestMAX (id)] PRIMARY KEY CLUSTERED (id), ) ; CREATE TABLE dbo.TestTEXT ( id INTEGER IDENTITY (1,1) NOT NULL, padding TEXT NOT NULL,   CONSTRAINT [PK dbo.TestTEXT (id)] PRIMARY KEY CLUSTERED (id), ) ; -- ============= -- Load TestCHAR (about 3s) -- ============= INSERT INTO dbo.TestCHAR WITH (TABLOCKX) ( padding ) SELECT padding = REPLICATE(CHAR(65 + (Data.n % 26)), 3999) FROM ( SELECT TOP (50000) n = ROW_NUMBER() OVER (ORDER BY (SELECT 0)) - 1 FROM master.sys.columns C1, master.sys.columns C2, master.sys.columns C3 ORDER BY n ASC ) AS Data ORDER BY Data.n ASC ; -- ============ -- Load TestMAX (about 3s) -- ============ INSERT INTO dbo.TestMAX WITH (TABLOCKX) ( padding ) SELECT CONVERT(VARCHAR(MAX), padding) FROM dbo.TestCHAR ORDER BY id ; -- ============= -- Load TestTEXT (about 5s) -- ============= INSERT INTO dbo.TestTEXT WITH (TABLOCKX) ( padding ) SELECT CONVERT(TEXT, padding) FROM dbo.TestCHAR ORDER BY id ; -- ========== -- Space used -- ========== -- EXECUTE sys.sp_spaceused @objname = 'dbo.TestCHAR'; EXECUTE sys.sp_spaceused @objname = 'dbo.TestMAX'; EXECUTE sys.sp_spaceused @objname = 'dbo.TestTEXT'; ; CHECKPOINT ; That takes around 15 seconds to run, and shows the space allocated to each table in its output: To illustrate the points I want to make today, the example task we are going to set ourselves is to return a random set of 150 rows from each table.  The basic shape of the test query is the same for each of the three test tables: SELECT TOP (150) T.id, T.padding FROM dbo.Test AS T ORDER BY NEWID() OPTION (MAXDOP 1) ; Test 1 – CHAR(3999) Running the template query shown above using the TestCHAR table as the target, we find that the query takes around 5 seconds to return its results.  This seems slow, considering that the table only has 50,000 rows.  Working on the assumption that generating a GUID for each row is a CPU-intensive operation, we might try enabling parallelism to see if that speeds up the response time.  Running the query again (but without the MAXDOP 1 hint) on a machine with eight logical processors, the query now takes 10 seconds to execute – twice as long as when run serially. Rather than attempting further guesses at the cause of the slowness, let’s go back to serial execution and add some monitoring.  The script below monitors STATISTICS IO output and the amount of tempdb used by the test query.  We will also run a Profiler trace to capture any warnings generated during query execution. DECLARE @read BIGINT, @write BIGINT ; SELECT @read = SUM(num_of_bytes_read), @write = SUM(num_of_bytes_written) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; SET STATISTICS IO ON ; SELECT TOP (150) TC.id, TC.padding FROM dbo.TestCHAR AS TC ORDER BY NEWID() OPTION (MAXDOP 1) ; SET STATISTICS IO OFF ; SELECT tempdb_read_MB = (SUM(num_of_bytes_read) - @read) / 1024. / 1024., tempdb_write_MB = (SUM(num_of_bytes_written) - @write) / 1024. / 1024., internal_use_MB = ( SELECT internal_objects_alloc_page_count / 128.0 FROM sys.dm_db_task_space_usage WHERE session_id = @@SPID ) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; Let’s take a closer look at the statistics and query plan generated from this: Following the flow of the data from right to left, we see the expected 50,000 rows emerging from the Clustered Index Scan, with a total estimated size of around 191MB.  The Compute Scalar adds a column containing a random GUID (generated from the NEWID() function call) for each row.  With this extra column in place, the size of the data arriving at the Sort operator is estimated to be 192MB. Sort is a blocking operator – it has to examine all of the rows on its input before it can produce its first row of output (the last row received might sort first).  This characteristic means that Sort requires a memory grant – memory allocated for the query’s use by SQL Server just before execution starts.  In this case, the Sort is the only memory-consuming operator in the plan, so it has access to the full 243MB (248,696KB) of memory reserved by SQL Server for this query execution. Notice that the memory grant is significantly larger than the expected size of the data to be sorted.  SQL Server uses a number of techniques to speed up sorting, some of which sacrifice size for comparison speed.  Sorts typically require a very large number of comparisons, so this is usually a very effective optimization.  One of the drawbacks is that it is not possible to exactly predict the sort space needed, as it depends on the data itself.  SQL Server takes an educated guess based on data types, sizes, and the number of rows expected, but the algorithm is not perfect. In spite of the large memory grant, the Profiler trace shows a Sort Warning event (indicating that the sort ran out of memory), and the tempdb usage monitor shows that 195MB of tempdb space was used – all of that for system use.  The 195MB represents physical write activity on tempdb, because SQL Server strictly enforces memory grants – a query cannot ‘cheat’ and effectively gain extra memory by spilling to tempdb pages that reside in memory.  Anyway, the key point here is that it takes a while to write 195MB to disk, and this is the main reason that the query takes 5 seconds overall. If you are wondering why using parallelism made the problem worse, consider that eight threads of execution result in eight concurrent partial sorts, each receiving one eighth of the memory grant.  The eight sorts all spilled to tempdb, resulting in inefficiencies as the spilled sorts competed for disk resources.  More importantly, there are specific problems at the point where the eight partial results are combined, but I’ll cover that in a future post. CHAR(3999) Performance Summary: 5 seconds elapsed time 243MB memory grant 195MB tempdb usage 192MB estimated sort set 25,043 logical reads Sort Warning Test 2 – VARCHAR(MAX) We’ll now run exactly the same test (with the additional monitoring) on the table using a VARCHAR(MAX) padding column: DECLARE @read BIGINT, @write BIGINT ; SELECT @read = SUM(num_of_bytes_read), @write = SUM(num_of_bytes_written) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; SET STATISTICS IO ON ; SELECT TOP (150) TM.id, TM.padding FROM dbo.TestMAX AS TM ORDER BY NEWID() OPTION (MAXDOP 1) ; SET STATISTICS IO OFF ; SELECT tempdb_read_MB = (SUM(num_of_bytes_read) - @read) / 1024. / 1024., tempdb_write_MB = (SUM(num_of_bytes_written) - @write) / 1024. / 1024., internal_use_MB = ( SELECT internal_objects_alloc_page_count / 128.0 FROM sys.dm_db_task_space_usage WHERE session_id = @@SPID ) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; This time the query takes around 8 seconds to complete (3 seconds longer than Test 1).  Notice that the estimated row and data sizes are very slightly larger, and the overall memory grant has also increased very slightly to 245MB.  The most marked difference is in the amount of tempdb space used – this query wrote almost 391MB of sort run data to the physical tempdb file.  Don’t draw any general conclusions about VARCHAR(MAX) versus CHAR from this – I chose the length of the data specifically to expose this edge case.  In most cases, VARCHAR(MAX) performs very similarly to CHAR – I just wanted to make test 2 a bit more exciting. MAX Performance Summary: 8 seconds elapsed time 245MB memory grant 391MB tempdb usage 193MB estimated sort set 25,043 logical reads Sort warning Test 3 – TEXT The same test again, but using the deprecated TEXT data type for the padding column: DECLARE @read BIGINT, @write BIGINT ; SELECT @read = SUM(num_of_bytes_read), @write = SUM(num_of_bytes_written) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; SET STATISTICS IO ON ; SELECT TOP (150) TT.id, TT.padding FROM dbo.TestTEXT AS TT ORDER BY NEWID() OPTION (MAXDOP 1, RECOMPILE) ; SET STATISTICS IO OFF ; SELECT tempdb_read_MB = (SUM(num_of_bytes_read) - @read) / 1024. / 1024., tempdb_write_MB = (SUM(num_of_bytes_written) - @write) / 1024. / 1024., internal_use_MB = ( SELECT internal_objects_alloc_page_count / 128.0 FROM sys.dm_db_task_space_usage WHERE session_id = @@SPID ) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; This time the query runs in 500ms.  If you look at the metrics we have been checking so far, it’s not hard to understand why: TEXT Performance Summary: 0.5 seconds elapsed time 9MB memory grant 5MB tempdb usage 5MB estimated sort set 207 logical reads 596 LOB logical reads Sort warning SQL Server’s memory grant algorithm still underestimates the memory needed to perform the sorting operation, but the size of the data to sort is so much smaller (5MB versus 193MB previously) that the spilled sort doesn’t matter very much.  Why is the data size so much smaller?  The query still produces the correct results – including the large amount of data held in the padding column – so what magic is being performed here? TEXT versus MAX Storage The answer lies in how columns of the TEXT data type are stored.  By default, TEXT data is stored off-row in separate LOB pages – which explains why this is the first query we have seen that records LOB logical reads in its STATISTICS IO output.  You may recall from my last post that LOB data leaves an in-row pointer to the separate storage structure holding the LOB data. SQL Server can see that the full LOB value is not required by the query plan until results are returned, so instead of passing the full LOB value down the plan from the Clustered Index Scan, it passes the small in-row structure instead.  SQL Server estimates that each row coming from the scan will be 79 bytes long – 11 bytes for row overhead, 4 bytes for the integer id column, and 64 bytes for the LOB pointer (in fact the pointer is rather smaller – usually 16 bytes – but the details of that don’t really matter right now). OK, so this query is much more efficient because it is sorting a very much smaller data set – SQL Server delays retrieving the LOB data itself until after the Sort starts producing its 150 rows.  The question that normally arises at this point is: Why doesn’t SQL Server use the same trick when the padding column is defined as VARCHAR(MAX)? The answer is connected with the fact that if the actual size of the VARCHAR(MAX) data is 8000 bytes or less, it is usually stored in-row in exactly the same way as for a VARCHAR(8000) column – MAX data only moves off-row into LOB storage when it exceeds 8000 bytes.  The default behaviour of the TEXT type is to be stored off-row by default, unless the ‘text in row’ table option is set suitably and there is room on the page.  There is an analogous (but opposite) setting to control the storage of MAX data – the ‘large value types out of row’ table option.  By enabling this option for a table, MAX data will be stored off-row (in a LOB structure) instead of in-row.  SQL Server Books Online has good coverage of both options in the topic In Row Data. The MAXOOR Table The essential difference, then, is that MAX defaults to in-row storage, and TEXT defaults to off-row (LOB) storage.  You might be thinking that we could get the same benefits seen for the TEXT data type by storing the VARCHAR(MAX) values off row – so let’s look at that option now.  This script creates a fourth table, with the VARCHAR(MAX) data stored off-row in LOB pages: CREATE TABLE dbo.TestMAXOOR ( id INTEGER IDENTITY (1,1) NOT NULL, padding VARCHAR(MAX) NOT NULL,   CONSTRAINT [PK dbo.TestMAXOOR (id)] PRIMARY KEY CLUSTERED (id), ) ; EXECUTE sys.sp_tableoption @TableNamePattern = N'dbo.TestMAXOOR', @OptionName = 'large value types out of row', @OptionValue = 'true' ; SELECT large_value_types_out_of_row FROM sys.tables WHERE [schema_id] = SCHEMA_ID(N'dbo') AND name = N'TestMAXOOR' ; INSERT INTO dbo.TestMAXOOR WITH (TABLOCKX) ( padding ) SELECT SPACE(0) FROM dbo.TestCHAR ORDER BY id ; UPDATE TM WITH (TABLOCK) SET padding.WRITE (TC.padding, NULL, NULL) FROM dbo.TestMAXOOR AS TM JOIN dbo.TestCHAR AS TC ON TC.id = TM.id ; EXECUTE sys.sp_spaceused @objname = 'dbo.TestMAXOOR' ; CHECKPOINT ; Test 4 – MAXOOR We can now re-run our test on the MAXOOR (MAX out of row) table: DECLARE @read BIGINT, @write BIGINT ; SELECT @read = SUM(num_of_bytes_read), @write = SUM(num_of_bytes_written) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; SET STATISTICS IO ON ; SELECT TOP (150) MO.id, MO.padding FROM dbo.TestMAXOOR AS MO ORDER BY NEWID() OPTION (MAXDOP 1, RECOMPILE) ; SET STATISTICS IO OFF ; SELECT tempdb_read_MB = (SUM(num_of_bytes_read) - @read) / 1024. / 1024., tempdb_write_MB = (SUM(num_of_bytes_written) - @write) / 1024. / 1024., internal_use_MB = ( SELECT internal_objects_alloc_page_count / 128.0 FROM sys.dm_db_task_space_usage WHERE session_id = @@SPID ) FROM tempdb.sys.database_files AS DBF JOIN sys.dm_io_virtual_file_stats(2, NULL) AS FS ON FS.file_id = DBF.file_id WHERE DBF.type_desc = 'ROWS' ; TEXT Performance Summary: 0.3 seconds elapsed time 245MB memory grant 0MB tempdb usage 193MB estimated sort set 207 logical reads 446 LOB logical reads No sort warning The query runs very quickly – slightly faster than Test 3, and without spilling the sort to tempdb (there is no sort warning in the trace, and the monitoring query shows zero tempdb usage by this query).  SQL Server is passing the in-row pointer structure down the plan and only looking up the LOB value on the output side of the sort. The Hidden Problem There is still a huge problem with this query though – it requires a 245MB memory grant.  No wonder the sort doesn’t spill to tempdb now – 245MB is about 20 times more memory than this query actually requires to sort 50,000 records containing LOB data pointers.  Notice that the estimated row and data sizes in the plan are the same as in test 2 (where the MAX data was stored in-row). The optimizer assumes that MAX data is stored in-row, regardless of the sp_tableoption setting ‘large value types out of row’.  Why?  Because this option is dynamic – changing it does not immediately force all MAX data in the table in-row or off-row, only when data is added or actually changed.  SQL Server does not keep statistics to show how much MAX or TEXT data is currently in-row, and how much is stored in LOB pages.  This is an annoying limitation, and one which I hope will be addressed in a future version of the product. So why should we worry about this?  Excessive memory grants reduce concurrency and may result in queries waiting on the RESOURCE_SEMAPHORE wait type while they wait for memory they do not need.  245MB is an awful lot of memory, especially on 32-bit versions where memory grants cannot use AWE-mapped memory.  Even on a 64-bit server with plenty of memory, do you really want a single query to consume 0.25GB of memory unnecessarily?  That’s 32,000 8KB pages that might be put to much better use. The Solution The answer is not to use the TEXT data type for the padding column.  That solution happens to have better performance characteristics for this specific query, but it still results in a spilled sort, and it is hard to recommend the use of a data type which is scheduled for removal.  I hope it is clear to you that the fundamental problem here is that SQL Server sorts the whole set arriving at a Sort operator.  Clearly, it is not efficient to sort the whole table in memory just to return 150 rows in a random order. The TEXT example was more efficient because it dramatically reduced the size of the set that needed to be sorted.  We can do the same thing by selecting 150 unique keys from the table at random (sorting by NEWID() for example) and only then retrieving the large padding column values for just the 150 rows we need.  The following script implements that idea for all four tables: SET STATISTICS IO ON ; WITH TestTable AS ( SELECT * FROM dbo.TestCHAR ), TopKeys AS ( SELECT TOP (150) id FROM TestTable ORDER BY NEWID() ) SELECT T1.id, T1.padding FROM TestTable AS T1 WHERE T1.id = ANY (SELECT id FROM TopKeys) OPTION (MAXDOP 1) ; WITH TestTable AS ( SELECT * FROM dbo.TestMAX ), TopKeys AS ( SELECT TOP (150) id FROM TestTable ORDER BY NEWID() ) SELECT T1.id, T1.padding FROM TestTable AS T1 WHERE T1.id IN (SELECT id FROM TopKeys) OPTION (MAXDOP 1) ; WITH TestTable AS ( SELECT * FROM dbo.TestTEXT ), TopKeys AS ( SELECT TOP (150) id FROM TestTable ORDER BY NEWID() ) SELECT T1.id, T1.padding FROM TestTable AS T1 WHERE T1.id IN (SELECT id FROM TopKeys) OPTION (MAXDOP 1) ; WITH TestTable AS ( SELECT * FROM dbo.TestMAXOOR ), TopKeys AS ( SELECT TOP (150) id FROM TestTable ORDER BY NEWID() ) SELECT T1.id, T1.padding FROM TestTable AS T1 WHERE T1.id IN (SELECT id FROM TopKeys) OPTION (MAXDOP 1) ; SET STATISTICS IO OFF ; All four queries now return results in much less than a second, with memory grants between 6 and 12MB, and without spilling to tempdb.  The small remaining inefficiency is in reading the id column values from the clustered primary key index.  As a clustered index, it contains all the in-row data at its leaf.  The CHAR and VARCHAR(MAX) tables store the padding column in-row, so id values are separated by a 3999-character column, plus row overhead.  The TEXT and MAXOOR tables store the padding values off-row, so id values in the clustered index leaf are separated by the much-smaller off-row pointer structure.  This difference is reflected in the number of logical page reads performed by the four queries: Table 'TestCHAR' logical reads 25511 lob logical reads 000 Table 'TestMAX'. logical reads 25511 lob logical reads 000 Table 'TestTEXT' logical reads 00412 lob logical reads 597 Table 'TestMAXOOR' logical reads 00413 lob logical reads 446 We can increase the density of the id values by creating a separate nonclustered index on the id column only.  This is the same key as the clustered index, of course, but the nonclustered index will not include the rest of the in-row column data. CREATE UNIQUE NONCLUSTERED INDEX uq1 ON dbo.TestCHAR (id); CREATE UNIQUE NONCLUSTERED INDEX uq1 ON dbo.TestMAX (id); CREATE UNIQUE NONCLUSTERED INDEX uq1 ON dbo.TestTEXT (id); CREATE UNIQUE NONCLUSTERED INDEX uq1 ON dbo.TestMAXOOR (id); The four queries can now use the very dense nonclustered index to quickly scan the id values, sort them by NEWID(), select the 150 ids we want, and then look up the padding data.  The logical reads with the new indexes in place are: Table 'TestCHAR' logical reads 835 lob logical reads 0 Table 'TestMAX' logical reads 835 lob logical reads 0 Table 'TestTEXT' logical reads 686 lob logical reads 597 Table 'TestMAXOOR' logical reads 686 lob logical reads 448 With the new index, all four queries use the same query plan (click to enlarge): Performance Summary: 0.3 seconds elapsed time 6MB memory grant 0MB tempdb usage 1MB sort set 835 logical reads (CHAR, MAX) 686 logical reads (TEXT, MAXOOR) 597 LOB logical reads (TEXT) 448 LOB logical reads (MAXOOR) No sort warning I’ll leave it as an exercise for the reader to work out why trying to eliminate the Key Lookup by adding the padding column to the new nonclustered indexes would be a daft idea Conclusion This post is not about tuning queries that access columns containing big strings.  It isn’t about the internal differences between TEXT and MAX data types either.  It isn’t even about the cool use of UPDATE .WRITE used in the MAXOOR table load.  No, this post is about something else: Many developers might not have tuned our starting example query at all – 5 seconds isn’t that bad, and the original query plan looks reasonable at first glance.  Perhaps the NEWID() function would have been blamed for ‘just being slow’ – who knows.  5 seconds isn’t awful – unless your users expect sub-second responses – but using 250MB of memory and writing 200MB to tempdb certainly is!  If ten sessions ran that query at the same time in production that’s 2.5GB of memory usage and 2GB hitting tempdb.  Of course, not all queries can be rewritten to avoid large memory grants and sort spills using the key-lookup technique in this post, but that’s not the point either. The point of this post is that a basic understanding of execution plans is not enough.  Tuning for logical reads and adding covering indexes is not enough.  If you want to produce high-quality, scalable TSQL that won’t get you paged as soon as it hits production, you need a deep understanding of execution plans, and as much accurate, deep knowledge about SQL Server as you can lay your hands on.  The advanced database developer has a wide range of tools to use in writing queries that perform well in a range of circumstances. By the way, the examples in this post were written for SQL Server 2008.  They will run on 2005 and demonstrate the same principles, but you won’t get the same figures I did because 2005 had a rather nasty bug in the Top N Sort operator.  Fair warning: if you do decide to run the scripts on a 2005 instance (particularly the parallel query) do it before you head out for lunch… This post is dedicated to the people of Christchurch, New Zealand. © 2011 Paul White email: @[email protected] twitter: @SQL_Kiwi

    Read the article

  • USB mouse does not work on boot

    - by Uku Loskit
    My problem is pretty much a duplicate of the one described in USB mouse late to load , but the solution there has not worked for me. I'm running the same OS and experiencing the exact same issue. It disappears after 10 seconds or so. Booting with the options specified in the other question did not fix it :/ Thanks in advance. sheepz@sheepz-desktop:~$ dmesg | egrep "hci|usb" [ 0.188000] usbcore: registered new interface driver usbfs [ 0.188000] usbcore: registered new interface driver hub [ 0.188000] usbcore: registered new device driver usb [ 0.358613] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver [ 0.358627] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver [ 0.358637] uhci_hcd: USB Universal Host Controller Interface driver [ 0.358683] uhci_hcd 0000:00:1d.0: PCI INT A -> GSI 23 (level, low) -> IRQ 23 [ 0.358691] uhci_hcd 0000:00:1d.0: setting latency timer to 64 [ 0.358695] uhci_hcd 0000:00:1d.0: UHCI Host Controller [ 0.358726] uhci_hcd 0000:00:1d.0: new USB bus registered, assigned bus number 1 [ 0.358758] uhci_hcd 0000:00:1d.0: irq 23, io base 0x0000e100 [ 0.358927] uhci_hcd 0000:00:1d.1: PCI INT B -> GSI 19 (level, low) -> IRQ 19 [ 0.358932] uhci_hcd 0000:00:1d.1: setting latency timer to 64 [ 0.358935] uhci_hcd 0000:00:1d.1: UHCI Host Controller [ 0.358964] uhci_hcd 0000:00:1d.1: new USB bus registered, assigned bus number 2 [ 0.358991] uhci_hcd 0000:00:1d.1: irq 19, io base 0x0000e200 [ 0.359132] uhci_hcd 0000:00:1d.2: PCI INT C -> GSI 18 (level, low) -> IRQ 18 [ 0.359137] uhci_hcd 0000:00:1d.2: setting latency timer to 64 [ 0.359139] uhci_hcd 0000:00:1d.2: UHCI Host Controller [ 0.359165] uhci_hcd 0000:00:1d.2: new USB bus registered, assigned bus number 3 [ 0.359193] uhci_hcd 0000:00:1d.2: irq 18, io base 0x0000e300 [ 0.359327] uhci_hcd 0000:00:1d.3: PCI INT D -> GSI 16 (level, low) -> IRQ 16 [ 0.359332] uhci_hcd 0000:00:1d.3: setting latency timer to 64 [ 0.359334] uhci_hcd 0000:00:1d.3: UHCI Host Controller [ 0.359360] uhci_hcd 0000:00:1d.3: new USB bus registered, assigned bus number 4 [ 0.359387] uhci_hcd 0000:00:1d.3: irq 16, io base 0x0000e400 [ 0.731933] usb 1-1: new full speed USB device using uhci_hcd and address 2 [ 1.023859] usb 1-2: new full speed USB device using uhci_hcd and address 3 [ 16.136175] usb 1-2: device descriptor read/64, error -110 [ 31.352481] usb 1-2: device descriptor read/64, error -110 [ 31.568485] usb 1-2: new full speed USB device using uhci_hcd and address 4 [ 46.680794] usb 1-2: device descriptor read/64, error -110 [ 61.903555] usb 1-2: device descriptor read/64, error -110 [ 62.119671] usb 1-2: new full speed USB device using uhci_hcd and address 5 [ 72.541078] usb 1-2: device not accepting address 5, error -110 [ 72.653194] usb 1-2: new full speed USB device using uhci_hcd and address 6 [ 83.066637] usb 1-2: device not accepting address 6, error -110 [ 83.178615] usb 3-1: new low speed USB device using uhci_hcd and address 2 [ 83.562546] usbcore: registered new interface driver hiddev [ 83.578827] input: Logitech USB-PS/2 Optical Mouse as /devices/pci0000:00/0000:00:1d.2/usb3/3-1/3-1:1.0/input/input3 [ 83.579016] generic-usb 0003:046D:C01D.0001: input,hidraw0: USB HID v1.10 Mouse [Logitech USB-PS/2 Optical Mouse] on usb-0000:00:1d.2-1/input0 [ 83.579244] usbcore: registered new interface driver usbhid [ 83.579246] usbhid: USB HID core driver [114025.224407] usb 3-1: USB disconnect, address 2 sheepz@sheepz-desktop:~$ dmesg | egrep "hci|usb" [ 0.188000] usbcore: registered new interface driver usbfs [ 0.188000] usbcore: registered new interface driver hub [ 0.188000] usbcore: registered new device driver usb [ 0.358613] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver [ 0.358627] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver [ 0.358637] uhci_hcd: USB Universal Host Controller Interface driver [ 0.358683] uhci_hcd 0000:00:1d.0: PCI INT A -> GSI 23 (level, low) -> IRQ 23 [ 0.358691] uhci_hcd 0000:00:1d.0: setting latency timer to 64 [ 0.358695] uhci_hcd 0000:00:1d.0: UHCI Host Controller [ 0.358726] uhci_hcd 0000:00:1d.0: new USB bus registered, assigned bus number 1 [ 0.358758] uhci_hcd 0000:00:1d.0: irq 23, io base 0x0000e100 [ 0.358927] uhci_hcd 0000:00:1d.1: PCI INT B -> GSI 19 (level, low) -> IRQ 19 [ 0.358932] uhci_hcd 0000:00:1d.1: setting latency timer to 64 [ 0.358935] uhci_hcd 0000:00:1d.1: UHCI Host Controller [ 0.358964] uhci_hcd 0000:00:1d.1: new USB bus registered, assigned bus number 2 [ 0.358991] uhci_hcd 0000:00:1d.1: irq 19, io base 0x0000e200 [ 0.359132] uhci_hcd 0000:00:1d.2: PCI INT C -> GSI 18 (level, low) -> IRQ 18 [ 0.359137] uhci_hcd 0000:00:1d.2: setting latency timer to 64 [ 0.359139] uhci_hcd 0000:00:1d.2: UHCI Host Controller [ 0.359165] uhci_hcd 0000:00:1d.2: new USB bus registered, assigned bus number 3 [ 0.359193] uhci_hcd 0000:00:1d.2: irq 18, io base 0x0000e300 [ 0.359327] uhci_hcd 0000:00:1d.3: PCI INT D -> GSI 16 (level, low) -> IRQ 16 [ 0.359332] uhci_hcd 0000:00:1d.3: setting latency timer to 64 [ 0.359334] uhci_hcd 0000:00:1d.3: UHCI Host Controller [ 0.359360] uhci_hcd 0000:00:1d.3: new USB bus registered, assigned bus number 4 [ 0.359387] uhci_hcd 0000:00:1d.3: irq 16, io base 0x0000e400 [ 0.731933] usb 1-1: new full speed USB device using uhci_hcd and address 2 [ 1.023859] usb 1-2: new full speed USB device using uhci_hcd and address 3 [ 16.136175] usb 1-2: device descriptor read/64, error -110 [ 31.352481] usb 1-2: device descriptor read/64, error -110 [ 31.568485] usb 1-2: new full speed USB device using uhci_hcd and address 4 [ 46.680794] usb 1-2: device descriptor read/64, error -110 [ 61.903555] usb 1-2: device descriptor read/64, error -110 [ 62.119671] usb 1-2: new full speed USB device using uhci_hcd and address 5 [ 72.541078] usb 1-2: device not accepting address 5, error -110 [ 72.653194] usb 1-2: new full speed USB device using uhci_hcd and address 6 [ 83.066637] usb 1-2: device not accepting address 6, error -110 [ 83.178615] usb 3-1: new low speed USB device using uhci_hcd and address 2 [ 83.562546] usbcore: registered new interface driver hiddev [ 83.578827] input: Logitech USB-PS/2 Optical Mouse as /devices/pci0000:00/0000:00:1d.2/usb3/3-1/3-1:1.0/input/input3 [ 83.579016] generic-usb 0003:046D:C01D.0001: input,hidraw0: USB HID v1.10 Mouse [Logitech USB-PS/2 Optical Mouse] on usb-0000:00:1d.2-1/input0 [ 83.579244] usbcore: registered new interface driver usbhid [ 83.579246] usbhid: USB HID core driver

    Read the article

  • Storage Configuration

    - by jchang
    Storage performance is not inherently complicated subject. The concepts are relatively simple. In fact, scaling storage performance is far easier compared with the difficulties encounters in scaling processor performance in NUMA systems. Storage performance is achieved by properly distributing IO over: 1) multiple independent PCI-E ports (system memory and IO bandwith is key) 2) multiple RAID controllers or host bus adapters (HBAs) 3) multiple storage IO channels (SAS or FC, complete path) most importantly,...(read more)

    Read the article

  • Storage Configuration

    - by jchang
    Storage performance is not inherently complicated subject. The concepts are relatively simple. In fact, scaling storage performance is far easier compared with the difficulties encounters in scaling processor performance in NUMA systems. Storage performance is achieved by properly distributing IO over: 1) multiple independent PCI-E ports (system memory and IO bandwith is key) 2) multiple RAID controllers or host bus adapters (HBAs) 3) multiple storage IO channels (SAS or FC, complete path) most importantly,...(read more)

    Read the article

< Previous Page | 41 42 43 44 45 46 47 48 49 50 51 52  | Next Page >