Search Results

Search found 751 results on 31 pages for 'quad'.

Page 12/31 | < Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >

  • Problem with JOGL and Framebuffer Render-to-texture: Invalid Framebuffer Operation Error

    - by quadelirus
    Okay, so I am trying to render a scene to a small 32x32 texture and ran into problems. I get an "invalid framebuffer operation" error when I try to actually draw anything to the texture. I have simplified the code below so that it simply tries to render a quad to a texture and then bind that quad as a texture for another quad that is rendered to the screen. So my question is this... where is the error? This is using JOGL 1.1.1. The error occurs at Checkpoint2 in the code. import java.awt.event.*; import javax.media.opengl.*; import javax.media.opengl.glu.*; import javax.swing.JFrame; import java.nio.*; public class Main extends JFrame implements GLEventListener, KeyListener, MouseListener, MouseMotionListener, ActionListener{ /* GL related variables */ private final GLCanvas canvas; private GL gl; private GLU glu; private int winW = 600, winH = 600; private int texRender_FBO; private int texRender_RB; private int texRender_32x32; public static void main(String args[]) { new Main(); } /* creates OpenGL window */ public Main() { super("Problem Child"); canvas = new GLCanvas(); canvas.addGLEventListener(this); canvas.addKeyListener(this); canvas.addMouseListener(this); canvas.addMouseMotionListener(this); getContentPane().add(canvas); setSize(winW, winH); setLocationRelativeTo(null); setDefaultCloseOperation(EXIT_ON_CLOSE); setVisible(true); canvas.requestFocus(); } /* gl display function */ public void display(GLAutoDrawable drawable) { gl.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, this.texRender_FBO); gl.glPushAttrib(GL.GL_VIEWPORT_BIT); gl.glViewport(0, 0, 32, 32); gl.glClearColor(1.f, 0.f, 0.f, 1.f); System.out.print("Checkpoint1: "); outputError(); gl.glBegin(GL.GL_QUADS); { //gl.glTexCoord2f(0.0f, 0.0f); gl.glColor3f(1.f, 0.f, 0.f); gl.glVertex3f(0.0f, 1.0f, 1.0f); //gl.glTexCoord2f(1.0f, 0.0f); gl.glColor3f(1.f, 1.f, 0.f); gl.glVertex3f(1.0f, 1.0f, 1.0f); //gl.glTexCoord2f(1.0f, 1.0f); gl.glColor3f(1.f, 1.f, 1.f); gl.glVertex3f(1.0f, 0.0f, 1.0f); //gl.glTexCoord2f(0.0f, 1.0f); gl.glColor3f(1.f, 0.f, 1.f); gl.glVertex3f(0.0f, 0.0f, 1.0f); } gl.glEnd(); System.out.print("Checkpoint2: "); outputError(); //Here I get an invalid framebuffer operation gl.glPopAttrib(); gl.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0); gl.glClearColor(0.f, 0.f, 0.f, 1.f); gl.glClear(GL.GL_COLOR_BUFFER_BIT); gl.glColor3f(1.f, 1.f, 1.f); gl.glBindTexture(GL.GL_TEXTURE_1D, this.texRender_32x32); gl.glBegin(GL.GL_QUADS); { gl.glTexCoord2f(0.0f, 0.0f); //gl.glColor3f(1.f, 0.f, 0.f); gl.glVertex3f(0.0f, 1.0f, 1.0f); gl.glTexCoord2f(1.0f, 0.0f); //gl.glColor3f(1.f, 1.f, 0.f); gl.glVertex3f(1.0f, 1.0f, 1.0f); gl.glTexCoord2f(1.0f, 1.0f); //gl.glColor3f(1.f, 1.f, 1.f); gl.glVertex3f(1.0f, 0.0f, 1.0f); gl.glTexCoord2f(0.0f, 1.0f); //gl.glColor3f(1.f, 0.f, 1.f); gl.glVertex3f(0.0f, 0.0f, 1.0f); } gl.glEnd(); } /* initialize GL */ public void init(GLAutoDrawable drawable) { gl = drawable.getGL(); glu = new GLU(); gl.glClearColor(.3f, .3f, .3f, 1f); gl.glClearDepth(1.0f); gl.glMatrixMode(GL.GL_PROJECTION); gl.glLoadIdentity(); gl.glOrtho(0, 1, 0, 1, -10, 10); gl.glMatrixMode(GL.GL_MODELVIEW); //Set up the 32x32 texture this.texRender_FBO = genFBO(gl); gl.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, this.texRender_FBO); this.texRender_32x32 = genTexture(gl); gl.glBindTexture(GL.GL_TEXTURE_2D, this.texRender_32x32); gl.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB_FLOAT32_ATI, 32, 32, 0, GL.GL_RGB, GL.GL_FLOAT, null); gl.glFramebufferTexture2DEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_COLOR_ATTACHMENT0_EXT, GL.GL_TEXTURE_2D, this.texRender_32x32, 0); //gl.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0_EXT); this.texRender_RB = genRB(gl); gl.glBindRenderbufferEXT(GL.GL_RENDERBUFFER_EXT, this.texRender_RB); gl.glRenderbufferStorageEXT(GL.GL_RENDERBUFFER_EXT, GL.GL_DEPTH_COMPONENT24, 32, 32); gl.glFramebufferRenderbufferEXT(GL.GL_FRAMEBUFFER_EXT, GL.GL_DEPTH_ATTACHMENT_EXT, GL.GL_RENDERBUFFER_EXT, this.texRender_RB); gl.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT, 0); gl.glBindRenderbufferEXT(GL.GL_RENDERBUFFER_EXT, 0); outputError(); } private void outputError() { int c; if ((c = gl.glGetError()) != GL.GL_NO_ERROR) System.out.println(glu.gluErrorString(c)); } private int genRB(GL gl) { int[] array = new int[1]; IntBuffer ib = IntBuffer.wrap(array); gl.glGenRenderbuffersEXT(1, ib); return ib.get(0); } private int genFBO(GL gl) { int[] array = new int[1]; IntBuffer ib = IntBuffer.wrap(array); gl.glGenFramebuffersEXT(1, ib); return ib.get(0); } private int genTexture(GL gl) { final int[] tmp = new int[1]; gl.glGenTextures(1, tmp, 0); return tmp[0]; } /* mouse and keyboard callback functions */ public void reshape(GLAutoDrawable drawable, int x, int y, int width, int height) { winW = width; winH = height; gl.glViewport(0, 0, width, height); } //Sorry about these, I just had to delete massive amounts of code to boil this thing down and these are hangers-on public void mousePressed(MouseEvent e) {} public void mouseDragged(MouseEvent e) {} public void mouseReleased(MouseEvent e) {} public void keyPressed(KeyEvent e) {} public void displayChanged(GLAutoDrawable drawable, boolean modeChanged, boolean deviceChanged) { } public void keyTyped(KeyEvent e) { } public void keyReleased(KeyEvent e) { } public void mouseMoved(MouseEvent e) { } public void actionPerformed(ActionEvent e) { } public void mouseClicked(MouseEvent e) { } public void mouseEntered(MouseEvent e) { } public void mouseExited(MouseEvent e) { } }

    Read the article

  • Large data processing in x86 C# gives System.OutOfMemory exception

    - by Cool
    I am processing XML coming from server which contains both images and data in one C# function (compiled 32 bit). When I try to parse this xml in memory it gives me System.OutOfMemory exception. Is there any way to avoid this error? My guess is, system cannot find contiguous block of 50-100MB memory. (my pc hv 8Gig ram and its quad core)

    Read the article

  • Numerical Integration of a vector in matlab?

    - by clowny
    Hello all! Well i have an issue. I have a vector of 358 numbers. I'd like to make a numerical integration of this vector, but i don't know the function of this one. I found that we can use trapz or quad, but i don't really understand how to integrate without the function. Thanx for any help!

    Read the article

  • Why can't I render objects to texture properly using FBO?

    - by Brett
    Hello, I'm trying to implement a simple program using 3 FBO's to render a scene to a texture and display a textured quad. I've successfully done this previously using fragment shaders before projecting to the textured quad but can't get it to work without a shader. What could be wrong? First I set up my textures glGenTextures(3, renderTextureID); for (GLint i = 0; i < 3; i++) { glBindTexture(GL_TEXTURE_2D, renderTextureID[i]); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // this may change with window size changes glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, fboWidth, fboHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); } Next some framebuffer state glGenFramebuffersEXT(3, framebufferID); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebufferID[0]); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, renderTextureID[0], 0); GLenum fboStatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if(fboStatus != GL_FRAMEBUFFER_COMPLETE_EXT) { fprintf(stderr, "FBO Error!"); } glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebufferID[1]); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, renderTextureID[1], 0); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebufferID[2]); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, renderTextureID[2], 0); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); Then I render the scene glViewport(0, 0, fboWidth, fboHeight); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebufferID[0]); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); drawModels(); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); glBindTexture(GL_TEXTURE_2D, renderTextureID[0]); glEnable(GL_SCISSOR_TEST); glViewport(windowWidth/2, 0, fboWidth, fboHeight); glScissor(windowWidth/2, 0, fboWidth, fboHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex2f(-1.0f, -1.0f); glTexCoord2i(1, 0); glVertex2f(1.0f, -1.0f); glTexCoord2i(1, 1); glVertex2f(1.0f, 1.0f); glTexCoord2i(0, 1); glVertex2f(-1.0f, 1.0f); glEnd(); glDisable(GL_SCISSOR_TEST); glutSwapBuffers(); The result is a yellow square. Draw models draws yellow wireframe cubes if not using the FBO so the problem is not that. I've read a previous similar post that talks about using glTexParameterf after generating and binding textures but i've done this. Thanks...

    Read the article

  • Parallel computing for integrals

    - by Iman
    I want to reduce the calculation time for a time-consuming integral by splitting the integration range. I'm using C++, Windows, and a quad-core Intel i7 CPU. How can I split it into 4 parallel computations?

    Read the article

  • What kind of data processing problems would CUDA help with?

    - by Chris McCauley
    Hi, I've worked on many data matching problems and very often they boil down to quickly and in parallel running many implementations of CPU intensive algorithms such as Hamming / Edit distance. Is this the kind of thing that CUDA would be useful for? What kinds of data processing problems have you solved with it? Is there really an uplift over the standard quad-core intel desktop? Chris

    Read the article

  • What should be the ideal number of parallel java threads for copying a large set of files from a qua

    - by ukgenie
    What should be the ideal number of parallel java threads for copying a large set of files from a quad core linux box to an external shared folder? I can see that with a single thread it is taking a hell lot of time to move the files one by one. Multiple threads is improving the copy performance, but I don't know what should be the exact number of threads. I am using Java executor service to create the thread pool.

    Read the article

  • What is `objc_msgSend_fixup`, exactly?

    - by Luis Antonio Botelho O. Leite
    I'm messing around with the Objective-C runtime, trying to compile objective-c code without linking it against libobjc, and I'm having some segmentation fault problems with a program, so I generated an assembly file from it. I think it's not necessary to show the whole assembly file. At some point of my main function, I've got the following line (which, by the way, is the line after which I get the seg fault): callq *l_objc_msgSend_fixup_alloc and here is the definition for l_objc_msgSend_fixup_alloc: .hidden l_objc_msgSend_fixup_alloc # @"\01l_objc_msgSend_fixup_alloc" .type l_objc_msgSend_fixup_alloc,@object .section "__DATA, __objc_msgrefs, coalesced","aw",@progbits .weak l_objc_msgSend_fixup_alloc .align 16 l_objc_msgSend_fixup_alloc: .quad objc_msgSend_fixup .quad L_OBJC_METH_VAR_NAME_ .size l_objc_msgSend_fixup_alloc, 16 I've reimplemented objc_msgSend_fixup as a function (id objc_msgSend_fixup(id self, SEL op, ...)) which returns nil (just to see what happens), but this function isn't even being called (the program crashes before calling it). So, my question is, what is callq *l_objc_msgSend_fixup_alloc supposed to do and what is objc_msgSend_fixup (after l_objc_msgSend_fixup_alloc:) supposed to be (a function or an object)? Edit To better explain, I'm not linking my source file against the objc library. What I'm trying to do is implement some parts of the libray, just to see how it works. Here is an approach of what I've done: #include <stdio.h> #include <objc/runtime.h> @interface MyClass { } +(id) alloc; @end @implementation MyClass +(id) alloc { // alloc the object return nil; } @end id objc_msgSend_fixup(id self, SEL op, ...) { printf("Calling objc_msgSend_fixup()...\n"); // looks for the method implementation for SEL in self's vtable return nil; // Since this is just a test, this function doesn't need to do that } int main(int argc, char *argv[]) { MyClass *m; m = [MyClass alloc]; // At this point, according to the assembly code generated // objc_msgSend_fixup should be called. So, the program should, at least, print // "Calling objc_msgSend_fixup()..." on the screen, but it crashes before // objc_msgSend_fixup() is called... return 0; } If the runtime needs to access the object's vtable to find the correct method to call, what is the function which actually does this? I think it is objc_msgSend_fixup, in this case. So, when objc_msgSend_fixup is called, it receives an object as one of its parameters, and, if this object hasn't been initialized, the function fails. So, I've implemented my own version of objc_msgSend_fixup. According to the assembly source above, it should be called. It doesn't matter if the function is actually looking for the implementation of the selector passed as parameter. I just want objc_msgSend_lookup to be called. But, it's not being called, that is, the function that looks for the object's data is not even being called, instead of being called and cause a fault (because it returns a nil (which, by the way, doesn't matter)). The program seg fails before objc_msgSend_lookup is called...

    Read the article

  • Fastest way to compute a "Visual" checksum of an image

    - by ensnare
    I'm looking to create an ID system for cataloging images. I can't use md5() since that will change if I alter the EXIF tags of the image. I am currently using the SHA1 checksum computed by imagemagick. It works perfectly, but it's really, really slow on larger images (~15 seconds on a quad-core xeon for a 21 megapixel JPG). Are there any other "visual" methods of uniquely identifying an image that would be faster? Thank you.

    Read the article

  • is there anyway to know if your supposedly fully dedicated server is really a virtually resource-sha

    - by siran
    Hi, sometimes I feel my server not responding as smoothly as I would expect (i have a Intel(R) Xeon(TM) CPU 2.80GHz Quad Core), given that for example, the 'top' commands reports a low load < 0.5, CPU are almost completely idle ... I maybe have internet connectivity issues, so I don't really know if it's me or if it's the server itself. Is there anykind of benchmarking script (or something analogous) I could run and see the actual performance of the server ?

    Read the article

  • Debugging a basic OpenGL texture fail? (iphone)

    - by Ben
    Hey all, I have a very basic texture map problem in GL on iPhone, and I'm wondering what strategies there are for debugging this kind of thing. (Frankly, just staring at state machine calls and wondering if any of them is wrong or misordered is no way to live-- are there tools for this?) I have a 512x512 PNG file that I'm loading up from disk (not specially packed), creating a CGBitmapContext, then calling CGContextDrawImage to get bytes out of it. (This code is essentially stolen from an Apple sample.) I'm trying to map the texture to a "quad", with code that looks essentially like this-- all flat 2D stuff, nothing fancy: glEnable(GL_TEXTURE_2D); glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glEnableClientState(GL_TEXTURE_COORD_ARRAY); GLfloat vertices[8] = { viewRect.origin.x, viewRect.size.height, viewRect.origin.x, viewRect.origin.y, viewRect.size.width, viewRect.origin.y, viewRect.size.width, viewRect.size.height }; GLfloat texCoords[8] = { 0, 1.0, 0, 0, 1.0, 0, 1.0, 1.0 }; glBindTexture(GL_TEXTURE_2D, myTextureRef); // This was previously bound to glVertexPointer(2, GL_FLOAT , 0, vertices); glTexCoordPointer(2, GL_FLOAT, 0, texCoords); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); glDisableClientState(GL_TEXTURE_COORD_ARRAY); glDisable(GL_TEXTURE_2D); My supposedly textured area comes out just black. I see no debug output from the CG calls to set up the texture. glGetError reports nothing. If I simplify this code block to just draw the verts, but set up a pure color, the quad area lights up exactly as expected. If I clear the whole context immediately beforehand to red, I don't see the red-- which means something is being rendered there, but not the contents of my PNG. What could I be doing wrong? And more importantly, what are the right tools and techniques for debugging this sort of thing, because running into this kind of problem and not being able to "step through it" in a debugger in any meaningful way is a bummer. Thanks!

    Read the article

  • Is it reasonable to use OpenGL for desktop applications?

    - by JamesK89
    I've been writing a small desktop gadget-type application that displays scrolling text along the bottom of the screen (Similar to the old CNN news ticker), however the performance of GDI is just unsatisfactory (As high as 8-12% on a quad core and 20% on a single core) even after I've attempted to clean out bottlenecks. I was considering using OpenGL instead to render everything, but I don't know if that is a reasonable option to require users to have hardware acceleration for a tiny app like this. Does anybody have any input on this?

    Read the article

  • How to create a CPU spike with a bash command

    - by User1
    I want to create a near 100% load on a Linux machine. It's quad core system and I want all cores going full speed. Ideally, the CPU load would last a designated amount of time and then stop. I'm hoping there's some trick in bash. I'm thinking some sort of infinite loop.

    Read the article

  • Is a display list best for this? (OpenGL)

    - by user146780
    I'm rendering 2D polygons with the GLUTesselator the first time, then they are stored in a display list for subsequent use. I think VBO's might be faster, but since I can't access the stuff that the tesselator outputs, and since it uses mixes of gl_triangle, quad, strip etc, i'm not sure how I could do this, even though I would like to use VBO's once the GLUTesselator is done with them for optimal performance. Thanks

    Read the article

  • Manually Increasing the Amount of CPU a Java Application Uses

    - by SkylineAddict
    I've just made a program with Eclipse that takes a really long time to execute. It's taking even longer because it's loading my CPU to 25% only (I'm assuming that is because I'm using a quad-core and the program is only using one core). Is there any way to make the program use all 4 cores to max it out? Java is supposed to be natively multi-threaded, so I don't understand why it would only use 25%.

    Read the article

  • Exchange Server 2007 Setup

    - by AlamedaDad
    Hi, I'm working on a upgrade to Exchange 2007 and I wanted to get some advise on hardware choices. We currently have an Exchange 2003 STD server with 400 users split between 6 AD Sites, that is housed on a single server. We need to move to a redundant, fault tolerant system to support our users. I'm planning on installing 2 Dell 1950 servers with W2k8-std to act as CAS and Hub servers, with NLB to allow abstraction of the actual server name to the users. There won't be an edge system since we have a Barracuda box already that will handle in/out spam/virus filtering. Backend I'm planning on 2 mailbox servers which will be Dell 2950s with 16GB RAM, 2 either dual-core or quad-core CPUs and 6 300GB SAS drives in some RAID config. These systems will be clustered using W2k8 Ent clustering and running CCR in Exchange. My questions are as follows: Is 16GB enough RAM for serving that many mailboxes along with the windows clustering and ccr? I'm trying to figure out disk layouts and I'm unsure of whether to use all local disk or some local and some SAN, via an OpenFiler iSCSI server. The SAN would be a Dell 2850 with 6 - 300GB SCSI drives and a PERC controller to slice as I want, with 8GB RAM. Option 1: 2 drives, RAID 1 - OS 2 drives, RAID 1 - Logs 2 drives, RAID 1 - Mail stores Option 2: 2 drives, RAID 1 - OS and logs 4 drives, RAID 5 - Mail Stores and scratch space for eseutil. Option 3: 2 drives, RAID 1 - OS 2 drives, RAID 1 - Logs 2 drives, RAID 0 - scratch space ~300GB iSCSI volume for mail stores Option 4: 2 drives, RAID 1 - OS 4 drives, RAID 5 - scratch space ~300GB iSCSI volume for mail stores ~300GB iSCSI volume for logs I have 2 sockets for CPUs and need to chose between dual and quad cores. The dual core have faster clocks but less cache and I'm thinking older architecture. Am I better off with more cores and cache while sacraficing clock speed? I am planning on adding the new E2K7 cluster to the E2K3 server and then move each mailbox over, all at once, then remove the old server. This seems more complicated than simply getting rid of the 2003 server and then adding the 2007 cluster and restoring the mailboxes using PowerControls or exmerge. The migration option lets me do this on my time, where a cutover means it all needs to work at once. If I go with the cutover method, how can I prebuild the servers and add them to the domain right after removing the 2003 server, or can't I? I think the answer is no and the migration is my only real option if I want to prebuild. I need to also migrate about 30GB of Public Folders. Is there anything special about this, other than specifying in the E2K7 install that I want older Outlook clients and PF's setup? I guess I could even keep the E2K3 server to host just the PFs? Lastly, if I have a mix of Outlook 200, 2003 and 2007 what do I need to do to make sure they all have access to the GAL and OAB? At time of cutover, we'll be at like 90% 2007, but we will have some older stuff around. My plan is to use Outlook Anywhere on laptops that are used outside the physical network. Are there any gotchas involved in that? I'm even thinking about using is for all Outlook clients, does anyone do that? The reason I'm considering it is that our WAN is really VPN tunnels over internet connections, so not a fully messhed, stable WAN. Thank you all very much for the assistance in advance and I look forward to discussion of these points! Regards...Michael

    Read the article

  • How to diagnose computer lockup/freezing problem

    - by Scott Mitchell
    I built a desktop computer a couple years back with the following specs: CPU: Intel Core 2 Quad Q9300 Yorkfield 2.5GHz 6MB L2 Cache LGA 775 95W Quad-Core Processor BX80580Q9300 Motherboard: EVGA 122-CK-NF68-T1 LGA 775 NVIDIA nForce 680i SLI ATX Intel Motherboard Video Card: Two EVGA 256-P2-N758-TR GeForce 8600GT SCC 256MB 128-bit GDDR3 PCI Express x16 SLI Supported Video Card PSU: SeaSonic S12 Energy Plus SS-550HT 550W ATX12V V2.3 / EPS12V V2.91 SLI Certified CrossFire Ready 80 PLUS Certified Active PFC Power Supply Memory: Two G.SKILL 4GB (2 x 2GB) 240-Pin DDR2 SDRAM DDR2 800 (PC2 6400) Dual Channel Kit Desktop Memory Model F2-6400CL5D-4GBPQ Since its inception, the machine has periodically locked up, the regularlity having varied over the years from once a day to once a month. Typically, lockups happen once every few days. By "lockup" I mean my computer just freezes. The screen locks up, I can't move the mouse. Hitting keys on my keyboard that normally turn LEDs on or off on the keyboard (such as Caps Lock) no longer turn the LEDs on or off. If there was music playing at the time of the lockup, noise keeps coming out of the speakers, but it's just the current frequency/note that plays indefinitely. There is no BSOD. When such a lockup occurs I have to do a hard reboot by either turning off the computer or hitting the reset button. I have the most recent version of the NVIDIA hardware drivers, and update them semi-regularly, but that hasn't seemed to help. I am currently using Windows 7 x64, but was previously using Windows Server 2003 x64 and having the same lockup issues. My guess is that it's somehow video driver or motherboard related, but I don't know how to go about diagnosing this problem to narrow down which of the two is the culprit. Additional information re: cooling Regarding cooling... I've not installed any after-market cooling systems aside from two regular fans I scavenged from an older computer. The fan atop the CPU is the one that shipped with it. One of the two scavenged fans I added it located at the bottom tower of the corner, in an attempt to create some airflow from front to back. The second fan is pointed directly at the two video cards. SpeedFan installation and readings Per studiohack's suggestion, I installed SpeedFan, which provided the following temperature readings: GPU: 63C GPU: 65C System: 76C CPU: 64C AUX: 36C Core 0: 78C Core 1: 76C Core 2: 79C Core 3: 79C Update #3: Another Lockup :-( Well, I had another lockup last night. :-( SpeedFan reported the CPU temp at 38 C when it happened, and there was no spike in temperature leading up to the freeze. One thing I notice is that the freeze seems more likely to happen if I am watching a video. In fact, of the last 5 freezes over the past month, 4 of them have been while watching a video on Flickr. Not necessarily the same video, but a video nevertheless. I don't know if this is just coincidence or if it means anything. (As an aside, each night before bedtime my 2 year old daughter sits on my lap and watches some home videos on Flickr and, in the last month, has learned the phrase, "Uh oh, computer broke.") Update #4: MemTest86 and 3DMark06 Test Results: Per suggestions in the comments, I ran the MemTest86 overnight and it cycled through the 8 GB of memory 5 times without error. I also ran the 3DMark06 test without a problem (see my scores at http://3dmark.com/3dm06/15163549). So... what now? :-) Any further suggestions on what to check? Is there some way to get a stack trace or something when the computer locks like that? Thanks

    Read the article

  • How to diagnose computer lockups and freezes?

    - by Scott Mitchell
    I built a desktop computer a couple years back with the following specs: CPU: Intel Core 2 Quad Q9300 Yorkfield 2.5GHz 6 MB L2 Cache LGA 775 95W Quad-Core Processor BX80580Q9300 Motherboard: EVGA 122-CK-NF68-T1 LGA 775 NVIDIA nForce 680i SLI ATX Intel Motherboard Video Card: Two EVGA 256-P2-N758-TR GeForce 8600GT SCC 256 MB 128-bit GDDR3 PCI Express x16 SLI Supported Video Card PSU: SeaSonic S12 Energy Plus SS-550HT 550W ATX12V V2.3 / EPS12V V2.91 SLI Certified CrossFire Ready 80 PLUS Certified Active PFC Power Supply Memory: Two G.SKILL 4 GB (2 x 2 GB) 240-Pin DDR2 SDRAM DDR2 800 (PC2 6400) Dual Channel Kit Desktop Memory Model F2-6400CL5D-4GBPQ Since its inception, the machine has periodically locked up, the regularity having varied over the years from once a day to once a month. Typically, lockups happen once every few days. By "lockup" I mean my computer just freezes. The screen locks up, I can't move the mouse. Hitting keys on my keyboard that normally turn LEDs on or off on the keyboard (such as Caps Lock) no longer turn the LEDs on or off. If there was music playing at the time of the lockup, noise keeps coming out of the speakers, but it's just the current frequency/note that plays indefinitely. There is no BSOD. When such a lockup occurs I have to do a hard reboot by either turning off the computer or hitting the reset button. I have the most recent version of the NVIDIA hardware drivers, and update them semi-regularly, but that hasn't seemed to help. I am currently using Windows 7 x64, but was previously using Windows Server 2003 x64 and having the same lockup issues. My guess is that it's somehow video driver or motherboard related, but I don't know how to go about diagnosing this problem to narrow down which of the two is the culprit. Additional information re: cooling Regarding cooling... I've not installed any after-market cooling systems aside from two regular fans I scavenged from an older computer. The fan atop the CPU is the one that shipped with it. One of the two scavenged fans I added it located at the bottom tower of the corner, in an attempt to create some airflow from front to back. The second fan is pointed directly at the two video cards. SpeedFan installation and readings Per studiohack's suggestion, I installed SpeedFan, which provided the following temperature readings: GPU: 63C GPU: 65C System: 76C CPU: 64C AUX: 36C Core 0: 78C Core 1: 76C Core 2: 79C Core 3: 79C Update #3: Another Lockup :-( Well, I had another lockup last night. :-( SpeedFan reported the CPU temp at 38 C when it happened, and there was no spike in temperature leading up to the freeze. One thing I notice is that the freeze seems more likely to happen if I am watching a video. In fact, of the last 5 freezes over the past month, 4 of them have been while watching a video on Flickr. Not necessarily the same video, but a video nevertheless. I don't know if this is just coincidence or if it means anything. (As an aside, each night before bedtime my 2 year old daughter sits on my lap and watches some home videos on Flickr and, in the last month, has learned the phrase, "Uh oh, computer broke.") Update #4: MemTest86 and 3DMark06 Test Results: Per suggestions in the comments, I ran the MemTest86 overnight and it cycled through the 8 GB of memory 5 times without error. I also ran the 3DMark06 test without a problem (see my scores at http://3dmark.com/3dm06/15163549). So... what now? :-) Any further suggestions on what to check? Is there some way to get a stack trace or something when the computer locks like that? Resolution I have never did figure out the particular problems, but based on the suggestions here and elsewhere, I'm presuming it was a motherboard issue. In any event, I recently upgraded my system, buying a new motherbeard, PSU, CPU, and RAM, and that new rig has been working splendidly the past several weeks. I am using the same graphic cards as in the old setup, so I think it's safe to reason that they weren't the cause of the problem.

    Read the article

  • Pixel Shader Giving Black output

    - by Yashwinder
    I am coding in C# using Windows Forms and the SlimDX API to show the effect of a pixel shader. When I am setting the pixel shader, I am getting a black output screen but if I am not using the pixel shader then I am getting my image rendered on the screen. I have the following C# code using System; using System.Collections.Generic; using System.Linq; using System.Windows.Forms; using System.Runtime.InteropServices; using SlimDX.Direct3D9; using SlimDX; using SlimDX.Windows; using System.Drawing; using System.Threading; namespace WindowsFormsApplication1 { // Vertex structure. [StructLayout(LayoutKind.Sequential)] struct Vertex { public Vector3 Position; public float Tu; public float Tv; public static int SizeBytes { get { return Marshal.SizeOf(typeof(Vertex)); } } public static VertexFormat Format { get { return VertexFormat.Position | VertexFormat.Texture1; } } } static class Program { public static Device D3DDevice; // Direct3D device. public static VertexBuffer Vertices; // Vertex buffer object used to hold vertices. public static Texture Image; // Texture object to hold the image loaded from a file. public static int time; // Used for rotation caculations. public static float angle; // Angle of rottaion. public static Form1 Window =new Form1(); public static string filepath; static VertexShader vertexShader = null; static ConstantTable constantTable = null; static ImageInformation info; [STAThread] static void Main() { filepath = "C:\\Users\\Public\\Pictures\\Sample Pictures\\Garden.jpg"; info = new ImageInformation(); info = ImageInformation.FromFile(filepath); PresentParameters presentParams = new PresentParameters(); // Below are the required bare mininum, needed to initialize the D3D device. presentParams.BackBufferHeight = info.Height; // BackBufferHeight, set to the Window's height. presentParams.BackBufferWidth = info.Width+200; // BackBufferWidth, set to the Window's width. presentParams.Windowed =true; presentParams.DeviceWindowHandle = Window.panel2 .Handle; // DeviceWindowHandle, set to the Window's handle. // Create the device. D3DDevice = new Device(new Direct3D (), 0, DeviceType.Hardware, Window.Handle, CreateFlags.HardwareVertexProcessing, presentParams); // Create the vertex buffer and fill with the triangle vertices. (Non-indexed) // Remember 3 vetices for a triangle, 2 tris per quad = 6. Vertices = new VertexBuffer(D3DDevice, 6 * Vertex.SizeBytes, Usage.WriteOnly, VertexFormat.None, Pool.Managed); DataStream stream = Vertices.Lock(0, 0, LockFlags.None); stream.WriteRange(BuildVertexData()); Vertices.Unlock(); // Create the texture. Image = Texture.FromFile(D3DDevice,filepath ); // Turn off culling, so we see the front and back of the triangle D3DDevice.SetRenderState(RenderState.CullMode, Cull.None); // Turn off lighting D3DDevice.SetRenderState(RenderState.Lighting, false); ShaderBytecode sbcv = ShaderBytecode.CompileFromFile("C:\\Users\\yashwinder singh\\Desktop\\vertexShader.vs", "vs_main", "vs_1_1", ShaderFlags.None); constantTable = sbcv.ConstantTable; vertexShader = new VertexShader(D3DDevice, sbcv); ShaderBytecode sbc = ShaderBytecode.CompileFromFile("C:\\Users\\yashwinder singh\\Desktop\\pixelShader.txt", "ps_main", "ps_3_0", ShaderFlags.None); PixelShader ps = new PixelShader(D3DDevice, sbc); VertexDeclaration vertexDecl = new VertexDeclaration(D3DDevice, new[] { new VertexElement(0, 0, DeclarationType.Float3, DeclarationMethod.Default, DeclarationUsage.PositionTransformed, 0), new VertexElement(0, 12, DeclarationType.Float2 , DeclarationMethod.Default, DeclarationUsage.TextureCoordinate , 0), VertexElement.VertexDeclarationEnd }); Application.EnableVisualStyles(); MessagePump.Run(Window, () => { // Clear the backbuffer to a black color. D3DDevice.Clear(ClearFlags.Target | ClearFlags.ZBuffer, Color.Black, 1.0f, 0); // Begin the scene. D3DDevice.BeginScene(); // Setup the world, view and projection matrices. //D3DDevice.VertexShader = vertexShader; //D3DDevice.PixelShader = ps; // Render the vertex buffer. D3DDevice.SetStreamSource(0, Vertices, 0, Vertex.SizeBytes); D3DDevice.VertexFormat = Vertex.Format; // Setup our texture. Using Textures introduces the texture stage states, // which govern how Textures get blended together (in the case of multiple // Textures) and lighting information. D3DDevice.SetTexture(0, Image); // Now drawing 2 triangles, for a quad. D3DDevice.DrawPrimitives(PrimitiveType.TriangleList , 0, 2); // End the scene. D3DDevice.EndScene(); // Present the backbuffer contents to the screen. D3DDevice.Present(); }); if (Image != null) Image.Dispose(); if (Vertices != null) Vertices.Dispose(); if (D3DDevice != null) D3DDevice.Dispose(); } private static Vertex[] BuildVertexData() { Vertex[] vertexData = new Vertex[6]; vertexData[0].Position = new Vector3(-1.0f, 1.0f, 0.0f); vertexData[0].Tu = 0.0f; vertexData[0].Tv = 0.0f; vertexData[1].Position = new Vector3(-1.0f, -1.0f, 0.0f); vertexData[1].Tu = 0.0f; vertexData[1].Tv = 1.0f; vertexData[2].Position = new Vector3(1.0f, 1.0f, 0.0f); vertexData[2].Tu = 1.0f; vertexData[2].Tv = 0.0f; vertexData[3].Position = new Vector3(-1.0f, -1.0f, 0.0f); vertexData[3].Tu = 0.0f; vertexData[3].Tv = 1.0f; vertexData[4].Position = new Vector3(1.0f, -1.0f, 0.0f); vertexData[4].Tu = 1.0f; vertexData[4].Tv = 1.0f; vertexData[5].Position = new Vector3(1.0f, 1.0f, 0.0f); vertexData[5].Tu = 1.0f; vertexData[5].Tv = 0.0f; return vertexData; } } } And my pixel shader and vertex shader code are as following // Pixel shader input structure struct PS_INPUT { float4 Position : POSITION; float2 Texture : TEXCOORD0; }; // Pixel shader output structure struct PS_OUTPUT { float4 Color : COLOR0; }; // Global variables sampler2D Tex0; // Name: Simple Pixel Shader // Type: Pixel shader // Desc: Fetch texture and blend with constant color // PS_OUTPUT ps_main( in PS_INPUT In ) { PS_OUTPUT Out; //create an output pixel Out.Color = tex2D(Tex0, In.Texture); //do a texture lookup Out.Color *= float4(0.9f, 0.8f, 0.0f, 1); //do a simple effect return Out; //return output pixel } // Vertex shader input structure struct VS_INPUT { float4 Position : POSITION; float2 Texture : TEXCOORD0; }; // Vertex shader output structure struct VS_OUTPUT { float4 Position : POSITION; float2 Texture : TEXCOORD0; }; // Global variables float4x4 WorldViewProj; // Name: Simple Vertex Shader // Type: Vertex shader // Desc: Vertex transformation and texture coord pass-through // VS_OUTPUT vs_main( in VS_INPUT In ) { VS_OUTPUT Out; //create an output vertex Out.Position = mul(In.Position, WorldViewProj); //apply vertex transformation Out.Texture = In.Texture; //copy original texcoords return Out; //return output vertex }

    Read the article

< Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >