Search Results

Search found 44501 results on 1781 pages for 'software development life'.

Page 562/1781 | < Previous Page | 558 559 560 561 562 563 564 565 566 567 568 569  | Next Page >

  • 2D OBB collision detection, resolving collisions?

    - by Milo
    I currently use OBBs and I have a vehicle that is a rigid body and some buildings. Here is my update() private void update() { camera.setPosition((vehicle.getPosition().x * camera.getScale()) - ((getWidth() ) / 2.0f), (vehicle.getPosition().y * camera.getScale()) - ((getHeight() ) / 2.0f)); //camera.move(input.getAnalogStick().getStickValueX() * 15.0f, input.getAnalogStick().getStickValueY() * 15.0f); if(input.isPressed(ControlButton.BUTTON_GAS)) { vehicle.setThrottle(1.0f, false); } if(input.isPressed(ControlButton.BUTTON_BRAKE)) { vehicle.setBrakes(1.0f); } vehicle.setSteering(input.getAnalogStick().getStickValueX()); vehicle.update(16.6666f / 1000.0f); ArrayList<Building> buildings = city.getBuildings(); for(Building b : buildings) { if(vehicle.getRect().overlaps(b.getRect())) { vehicle.update(-17.0f / 1000.0f); break; } } } The collision detection works well. What doesn't is how they are dealt with. My goal is simple. If the vehicle hits a building, it should stop, and never go into the building. When I apply negative torque to reverse the car should not feel buggy and move away from the building. I don't want this to look buggy. This is my rigid body class: class RigidBody extends Entity { //linear private Vector2D velocity = new Vector2D(); private Vector2D forces = new Vector2D(); private float mass; //angular private float angularVelocity; private float torque; private float inertia; //graphical private Vector2D halfSize = new Vector2D(); private Bitmap image; public RigidBody() { //set these defaults so we don't get divide by zeros mass = 1.0f; inertia = 1.0f; } //intialize out parameters public void initialize(Vector2D halfSize, float mass, Bitmap bitmap) { //store physical parameters this.halfSize = halfSize; this.mass = mass; image = bitmap; inertia = (1.0f / 20.0f) * (halfSize.x * halfSize.x) * (halfSize.y * halfSize.y) * mass; RectF rect = new RectF(); float scalar = 10.0f; rect.left = (int)-halfSize.x * scalar; rect.top = (int)-halfSize.y * scalar; rect.right = rect.left + (int)(halfSize.x * 2.0f * scalar); rect.bottom = rect.top + (int)(halfSize.y * 2.0f * scalar); setRect(rect); } public void setLocation(Vector2D position, float angle) { getRect().set(position, getWidth(), getHeight(), angle); } public Vector2D getPosition() { return getRect().getCenter(); } @Override public void update(float timeStep) { //integrate physics //linear Vector2D acceleration = Vector2D.scalarDivide(forces, mass); velocity = Vector2D.add(velocity, Vector2D.scalarMultiply(acceleration, timeStep)); Vector2D c = getRect().getCenter(); c = Vector2D.add(getRect().getCenter(), Vector2D.scalarMultiply(velocity , timeStep)); setCenter(c.x, c.y); forces = new Vector2D(0,0); //clear forces //angular float angAcc = torque / inertia; angularVelocity += angAcc * timeStep; setAngle(getAngle() + angularVelocity * timeStep); torque = 0; //clear torque } //take a relative Vector2D and make it a world Vector2D public Vector2D relativeToWorld(Vector2D relative) { Matrix mat = new Matrix(); float[] Vector2Ds = new float[2]; Vector2Ds[0] = relative.x; Vector2Ds[1] = relative.y; mat.postRotate(JMath.radToDeg(getAngle())); mat.mapVectors(Vector2Ds); return new Vector2D(Vector2Ds[0], Vector2Ds[1]); } //take a world Vector2D and make it a relative Vector2D public Vector2D worldToRelative(Vector2D world) { Matrix mat = new Matrix(); float[] Vectors = new float[2]; Vectors[0] = world.x; Vectors[1] = world.y; mat.postRotate(JMath.radToDeg(-getAngle())); mat.mapVectors(Vectors); return new Vector2D(Vectors[0], Vectors[1]); } //velocity of a point on body public Vector2D pointVelocity(Vector2D worldOffset) { Vector2D tangent = new Vector2D(-worldOffset.y, worldOffset.x); return Vector2D.add( Vector2D.scalarMultiply(tangent, angularVelocity) , velocity); } public void applyForce(Vector2D worldForce, Vector2D worldOffset) { //add linear force forces = Vector2D.add(forces ,worldForce); //add associated torque torque += Vector2D.cross(worldOffset, worldForce); } @Override public void draw( GraphicsContext c) { c.drawRotatedScaledBitmap(image, getPosition().x, getPosition().y, getWidth(), getHeight(), getAngle()); } } Essentially, when any rigid body hits a building it should exhibit the same behavior. How is collision solving usually done? Thanks

    Read the article

  • 2D Selective Gaussian Blur

    - by Joshua Thomas
    I am attempting to use Gaussian blur on a 2D platform game, selectively blurring specific types of platforms with different amounts. I am currently just messing around with simple test code, trying to get it to work correctly. What I need to eventually do is create three separate render targets, leave one normal, blur one slightly, and blur the last heavily, then recombine on the screen. Where I am now is I have successfully drawn into a new render target and performed the gaussian blur on it, but when I draw it back to the screen everything is purple aside from the platforms I drew to the target. This is my .fx file: #define RADIUS 7 #define KERNEL_SIZE (RADIUS * 2 + 1) //----------------------------------------------------------------------------- // Globals. //----------------------------------------------------------------------------- float weights[KERNEL_SIZE]; float2 offsets[KERNEL_SIZE]; //----------------------------------------------------------------------------- // Textures. //----------------------------------------------------------------------------- texture colorMapTexture; sampler2D colorMap = sampler_state { Texture = <colorMapTexture>; MipFilter = Linear; MinFilter = Linear; MagFilter = Linear; }; //----------------------------------------------------------------------------- // Pixel Shaders. //----------------------------------------------------------------------------- float4 PS_GaussianBlur(float2 texCoord : TEXCOORD) : COLOR0 { float4 color = float4(0.0f, 0.0f, 0.0f, 0.0f); for (int i = 0; i < KERNEL_SIZE; ++i) color += tex2D(colorMap, texCoord + offsets[i]) * weights[i]; return color; } //----------------------------------------------------------------------------- // Techniques. //----------------------------------------------------------------------------- technique GaussianBlur { pass { PixelShader = compile ps_2_0 PS_GaussianBlur(); } } This is the code I'm using for the gaussian blur: public Texture2D PerformGaussianBlur(Texture2D srcTexture, RenderTarget2D renderTarget1, RenderTarget2D renderTarget2, SpriteBatch spriteBatch) { if (effect == null) throw new InvalidOperationException("GaussianBlur.fx effect not loaded."); Texture2D outputTexture = null; Rectangle srcRect = new Rectangle(0, 0, srcTexture.Width, srcTexture.Height); Rectangle destRect1 = new Rectangle(0, 0, renderTarget1.Width, renderTarget1.Height); Rectangle destRect2 = new Rectangle(0, 0, renderTarget2.Width, renderTarget2.Height); // Perform horizontal Gaussian blur. game.GraphicsDevice.SetRenderTarget(renderTarget1); effect.CurrentTechnique = effect.Techniques["GaussianBlur"]; effect.Parameters["weights"].SetValue(kernel); effect.Parameters["colorMapTexture"].SetValue(srcTexture); effect.Parameters["offsets"].SetValue(offsetsHoriz); spriteBatch.Begin(0, BlendState.Opaque, null, null, null, effect); spriteBatch.Draw(srcTexture, destRect1, Color.White); spriteBatch.End(); // Perform vertical Gaussian blur. game.GraphicsDevice.SetRenderTarget(renderTarget2); outputTexture = (Texture2D)renderTarget1; effect.Parameters["colorMapTexture"].SetValue(outputTexture); effect.Parameters["offsets"].SetValue(offsetsVert); spriteBatch.Begin(0, BlendState.Opaque, null, null, null, effect); spriteBatch.Draw(outputTexture, destRect2, Color.White); spriteBatch.End(); // Return the Gaussian blurred texture. game.GraphicsDevice.SetRenderTarget(null); outputTexture = (Texture2D)renderTarget2; return outputTexture; } And this is the draw method affected: public void Draw(SpriteBatch spriteBatch) { device.SetRenderTarget(maxBlur); spriteBatch.Begin(); foreach (Brick brick in blueBricks) brick.Draw(spriteBatch); spriteBatch.End(); blue = gBlur.PerformGaussianBlur((Texture2D) maxBlur, helperTarget, maxBlur, spriteBatch); spriteBatch.Begin(); device.SetRenderTarget(null); foreach (Brick brick in redBricks) brick.Draw(spriteBatch); foreach (Brick brick in greenBricks) brick.Draw(spriteBatch); spriteBatch.Draw(blue, new Rectangle(0, 0, blue.Width, blue.Height), Color.White); foreach (Brick brick in purpleBricks) brick.Draw(spriteBatch); spriteBatch.End(); } I'm sorry about the massive brick of text and images(or not....new user, I tried, it said no), but I wanted to get my problem across clearly as I have been searching for an answer to this for quite a while now. As a side note, I have seen the bloom sample. Very well commented, but overly complicated since it deals in 3D; I was unable to take what I needed to learn form it. Thanks for any and all help.

    Read the article

  • Render 2 images that uses different shaders

    - by Code Vader
    Based on the giawa/nehe tutorials, how can I render 2 images with different shaders. I'm pretty new to OpenGl and shaders so I'm not completely sure whats happening in my code, but I think the shaders that is called last overwrites the first one. private static void OnRenderFrame() { // calculate how much time has elapsed since the last frame watch.Stop(); float deltaTime = (float)watch.ElapsedTicks / System.Diagnostics.Stopwatch.Frequency; watch.Restart(); // use the deltaTime to adjust the angle of the cube angle += deltaTime; // set up the OpenGL viewport and clear both the color and depth bits Gl.Viewport(0, 0, width, height); Gl.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); // use our shader program and bind the crate texture Gl.UseProgram(program); //<<<<<<<<<<<< TOP PYRAMID // set the transformation of the top_pyramid program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(top_pyramid, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(top_pyramidNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(top_pyramidUV, program, "vertexUV"); Gl.BindBuffer(top_pyramidTrianlges); // draw the textured top_pyramid Gl.DrawElements(BeginMode.Triangles, top_pyramidTrianlges.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<< CUBE // set the transformation of the cube program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(cube, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(cubeNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(cubeUV, program, "vertexUV"); Gl.BindBuffer(cubeQuads); // draw the textured cube Gl.DrawElements(BeginMode.Quads, cubeQuads.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<<<< BOTTOM PYRAMID // set the transformation of the bottom_pyramid program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(bottom_pyramid, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(bottom_pyramidNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(bottom_pyramidUV, program, "vertexUV"); Gl.BindBuffer(bottom_pyramidTrianlges); // draw the textured bottom_pyramid Gl.DrawElements(BeginMode.Triangles, bottom_pyramidTrianlges.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<<<<< STAR Gl.Disable(EnableCap.DepthTest); Gl.Enable(EnableCap.Blend); Gl.BlendFunc(BlendingFactorSrc.SrcAlpha, BlendingFactorDest.One); Gl.BindTexture(starTexture); //calculate the camera position using some fancy polar co-ordinates Vector3 position = 20 * new Vector3(Math.Cos(phi) * Math.Sin(theta), Math.Cos(theta), Math.Sin(phi) * Math.Sin(theta)); Vector3 upVector = ((theta % (Math.PI * 2)) > Math.PI) ? Vector3.Up : Vector3.Down; program_2["view_matrix"].SetValue(Matrix4.LookAt(position, Vector3.Zero, upVector)); // make sure the shader program and texture are being used Gl.UseProgram(program_2); // loop through the stars, drawing each one for (int i = 0; i < stars.Count; i++) { // set the position and color of this star program_2["model_matrix"].SetValue(Matrix4.CreateTranslation(new Vector3(stars[i].dist, 0, 0)) * Matrix4.CreateRotationZ(stars[i].angle)); program_2["color"].SetValue(stars[i].color); Gl.BindBufferToShaderAttribute(star, program_2, "vertexPosition"); Gl.BindBufferToShaderAttribute(starUV, program_2, "vertexUV"); Gl.BindBuffer(starQuads); Gl.DrawElements(BeginMode.Quads, starQuads.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); // update the position of the star stars[i].angle += (float)i / stars.Count * deltaTime * 2 * rotate_stars; stars[i].dist -= 0.2f * deltaTime * rotate_stars; // if we've reached the center then move this star outwards and give it a new color if (stars[i].dist < 0f) { stars[i].dist += 5f; stars[i].color = new Vector3(generator.NextDouble(), generator.NextDouble(), generator.NextDouble()); } } Glut.glutSwapBuffers(); } The same goes for the textures, whichever one I mention last gets applied to both object?

    Read the article

  • BlitzMax - generating 2D neon glowing line effect to png file

    - by zanlok
    Originally asked on StackOverflow, but it became tumbleweed. I'm looking to create a glowing line effect in BlitzMax, something like a Star Wars lightsaber or laserbeam. Doesn't have to be realtime, but just to TImage objects and then maybe saved to PNG for later use in animation. I'm happy to use 3D features, but it will be for use in a 2D game. Since it will be on black/space background, my strategy is to draw a series of white blurred lines with color and high transparency, then eventually central lines less blurred and more white. What I want to draw is actually bezier curved lines. Drawing curved lines is easy enough, but I can't use the technique above to create a good laser/neon effect because it comes out looking very segmented. So, I think it may be better to use a blur effect/shader on what does render well, which is a 1-pixel bezier curve. The problems I've been having are: Applying a shader to just a certain area of the screen where lines are drawn. If there's a way to do draw lines to a texture and then blur that texture and save the png, that would be great to hear about. There's got to be a way to do this, but I just haven't gotten the right elements working together yet. Any help from someone familiar with this stuff would be greatly appreciated. Using just 2D calls could be advantageous, simpler to understand and re-use. It would be very nice to know how to save a PNG that preserves the transparency/alpha stuff. p.s. I've reviewed this post (and many many others on the Blitz site), have samples working, and even developed my own 5x5 frag shaders. But, it's 3D and a scene-wide thing that doesn't seem to convert to 2D or just a certain area very well. I'd rather understand how to apply shading to a 2D scene, especially using the specifics of BlitzMax.

    Read the article

  • How to fix OpenGL Co-ordinate System in SFML?

    - by Marc Alexander Reed
    My OpenGL setup is somehow configured to work like so: (-1, 1) (0, 1) (1, 1) (-1, 0) (0, 0) (1, 0) (-1, -1) (0, -1) (1, -1) How do I configure it so that it works like so: (0, 0) (SW/2, 0) (SW, 0) (0, SH/2) (SW/2, SH/2) (SW, SH/2) (0, SH) (SW/2, SH) (SW/2, SH) SW as Screen Width. SH as Screen Height. This solution would have to fix the problem of I can't translate significantly(1) on the Z axis. Depth doesn't seem to be working either. The Perspective code I'm using is that of my WORKING GLUT OpenGL code which has a cool 3d grid and camera system etc. But my OpenGL setup doesn't seem to work with SFML. Help me guys. :( Thanks in advance. :) #include <SFML/Window.hpp> #include <SFML/Graphics.hpp> #include <SFML/Audio.hpp> #include <SFML/Network.hpp> #include <SFML/OpenGL.hpp> #include "ResourcePath.hpp" //Mac-only #define _USE_MATH_DEFINES #include <cmath> double screen_width = 640.f; double screen_height = 480.f; int main (int argc, const char **argv) { sf::ContextSettings settings; settings.depthBits = 24; settings.stencilBits = 8; settings.antialiasingLevel = 2; sf::Window window(sf::VideoMode(screen_width, screen_height, 32), "SFML OpenGL", sf::Style::Close, settings); window.setActive(); glEnable(GL_DEPTH_TEST); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_NORMALIZE); glEnable(GL_COLOR_MATERIAL); glShadeModel(GL_SMOOTH); glViewport(0, 0, screen_width, screen_height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); //glOrtho(0.0f, screen_width, screen_height, 0.0f, -100.0f, 100.0f); gluPerspective(45.0f, (double) screen_width / (double) screen_height , 0.f, 100.f); glClearColor(0.f, 0.f, 1.f, 0.f); //blue while (window.isOpen()) { sf::Event event; while (window.pollEvent(event)) { switch (event.type) { case sf::Event::Closed: window.close(); break; } switch (event.key.code) { case sf::Keyboard::Escape: window.close(); break; case 'W': break; case 'S': break; case 'A': break; case 'D': break; } } glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.f, 0.f, 0.f); glPushMatrix(); glBegin(GL_QUADS); glColor3f(1.f, 0.f, 0.f); glVertex3f(-1.f, 1.f, 0.f); glColor3f(0.f, 1.f, 0.f); glVertex3f(1.f, 1.f, 0.f); glColor3f(1.f, 0.f, 1.f); glVertex3f(1.f, -1.f, 0.f); glColor3f(0.f, 0.f, 1.f); glVertex3f(-1.f, -1.f, 0.f); glEnd(); glPopMatrix(); window.display(); } return EXIT_SUCCESS; }

    Read the article

  • What is wrong with my Dot Product? [Javascript]

    - by Clay Ellis Murray
    I am trying to make a pong game but I wanted to use dot products to do the collisions with the paddles, however whenever I make a dot product objects it never changes much from .9 this is my code to make vectors vector = { make:function(object){ return [object.x + object.width/2,object.y + object.height/2] }, normalize:function(v){ var length = Math.sqrt(v[0] * v[0] + v[1] * v[1]) v[0] = v[0]/length v[1] = v[1]/length return v }, dot:function(v1,v2){ return v1[0] * v2[0] + v1[1] * v2[1] } } and this is where I am calculating the dot in my code vector1 = vector.normalize(vector.make(ball)) vector2 = vector.normalize(vector.make(object)) dot = vector.dot(vector1,vector2) Here is a JsFiddle of my code currently the paddles don't move. Any help would be greatly appreciated

    Read the article

  • Encode two integers into colour values and compare them in a HLSL shader

    - by Ben Slinger
    I am writing a 2D point and click adventure game in Monogame, and I'd like to be able to create an image mask for every room which defines which parts of the background a character can walk behind, and at which Y value a character needs to be at for the background to be drawn above the character. I haven't done any shader work before but after doing some reading I thought the following solution should work: Create a mask for the room with different walk behind areas painted in a colour that defines the baseline Y value (Walk Behind Mask) Render all objects to a RenderTarget2D (Base Texture) Render all objects to a different RenderTarget2D, but changing every pixel of each object to a colour that defines its Y value (Position Mask) Pass these two textures plus the image mask into the shader, and for each pixel compare the colour of the image mask to the colour of the Position Mask to the Walk Behind Mask - if the Position Mask pixel is larger (thus lower on the screen and closer to the camera) than the Walk Behind Mask, draw the pixel from the Base Texture, otherwise draw a transparent pixel (allowing the background to show through). I've got it mostly working, but I'm having trouble packing and unpacking the Y values into colours and retrieving them correctly in the shader. Here are some code examples of how I'm doing it so far: (When drawing to the Position Mask RenderTarget2D) Color posColor = new Color(((int)Position.Y >> 16) & 255, ((int)Position.Y >> 8) & 255, (int)Position.Y & 255); So as far as I can tell, this should be taking the first 3 bytes of the position integer and encoding them into a 4 byte colour (ignoring the alpha as the 4th byte). This seems to work fine, as when my character is at Y = 600, the resulting Color from this is: {[Color: R=0, G=2, B=88, A=255, PackedValue=4283957760]}. I then have an area in my Walk Behind Mask that I only want the character to be displayed behind if his Y value is lower than 655, so I've painted it with R=0, G=2, B=143, A=255. Now, I think I have the shader OK as well, here's what I have: sampler BaseTexture : register(s0); sampler MaskTexture : register(s1); sampler PositionTexture : register(s2); float4 mask( float2 coords : TEXCOORD0 ) : COLOR0 { float4 color = tex2D(BaseTexture, coords); float4 maskColor = tex2D(MaskTexture, coords); float4 positionColor = tex2D(PositionTexture, coords); float maskCompare = (maskColor.r * pow(2,24)) + (maskColor.g * pow(2,16)) + (maskColor.b * pow(2,8)); float positionCompare = (positionColor.r * pow(2,24)) + (positionColor.g * pow(2,16)) + (positionColor.b * pow(2,8)); return positionCompare < maskCompare ? float4(0,0,0,0) : color; } technique Technique1 { pass NoEffect { PixelShader = compile ps_3_0 mask(); } } This isn't working, however - currently all characters are displayed behind the walk behind area, regardless of their Y value. I tried printing out some debug info by grabbing the pixel from both the Position Mask and the Walk Under Mask under the current mouse position, and it seems like maybe the colours aren't being rendered to the Position Mask correctly? When calculating the colour in that code above I'm getting R=0, G=2, B=88, A=255, but when I mouseover my character I get R=0, G=0, B=30, A=255. Any ideas what I'm doing wrong? It seems like maybe I'm losing some information when rendering to the RenderTarget2D, but I'm now knowledgeable enough to figure out what's happening. Also, I should probably ask, is this an efficient way to do this? Will there be a performance impact? Edit: Whoops, turns out there was a bug that I'd introduced myself, I was drawing out the Position Mask with the position Color, left over from some early testing I was doing. So this solution is working perfectly, though I'm still interested in whether this is an efficient solution performance wise.

    Read the article

  • How to restrict paddle movement using Farseer Physics engine 3.2

    - by brainydexter
    I am new to using Farseer Physics Engine 3.2(FPE), so please bear with my questions. Also, since FPE 3.2 is based on Box2D, I have been reading Box2D manual and pieces of code scattered in samples to better understand terminology and usage. Pong is usually my testbed whenever I try to do something new. Here is one of the issue I am running into: How can I restrict paddles to move only along Y axis, because the ball comes in and knocks off the paddles and everything floats in space afterwards ? (Box = Rectangle and ball = circle) I know MKS is the unit system, but is there a recommendation for sizes/position to be used ? I know this is a very generic question, but it would be good to know a simple set of values that one could use for making a game as simple as pong. Between box2d and FPE, I have some doubts: what is the recommended way of making a body in FPE ? world.CreateBody() does not exist in FPE Box2d manual recommends never to "new" body(since Box2D uses Small Object allocators), so is there a recommended way in Farseer to create a body (apart from factories) ? In box2d, it is recommended to keep a track of the body object, since it is also the parent to fixture(s). Why is it that in most of the examples, the fixture object is tracked ? Is there a reason why body is not tracked ? Thanks

    Read the article

  • RenderTarget2D behavior in XNA

    - by Utkarsh Sinha
    I've been dabbling with XNA for a couple of days now. This chunk of code doesn't work as I expect. The goal is to render sprites individually and composite them on another rendertarget. P = RenderTarget2D(with RenderTargetUsage.PreserveContents) D = RenderTarget2D(with RenderTargetUsage.DiscardContents) for all sprites: graphicsDevice.SetRenderTarget(D); <draw sprite i> graphicsDevice.SetRenderTarget(P); <Draw D> graphicsDevice.SetRenderTarget(null); <Draw P> The result I get is - only the last sprite is visible. I'm sure I'm missing some piece of information about RenderTarget2D. Any hints on what that might be? Cross posted from - http://stackoverflow.com/questions/9970349/weird-rendertarget2d-behaviour

    Read the article

  • Extract derived 3D scaling from a 3D Sprite to set to a 2D billboard

    - by Bill Kotsias
    I am trying to get the derived position and scaling of a 3D Sprite and set them to a 2D Sprite. I have managed to do the first part like this: var p:Point = sprite3d.local3DToGlobal(new Vector3D(0,0,0)); billboard.x = p.x; billboard.y = p.y; But I can't get the scaling part correctly. I am trying this: var mat:Matrix3D = sprite3d.transform.getRelativeMatrix3D(stage); // get derived matrix(?) var scaleV:Vector3D = mat.decompose()[2]; // get scaling vector from derived matrix var scale:Number = scaleV.length; billboard.scaleX = scale; billboard.scaleY = scale; ...but the result is apparently wrong. PS. One might ask what I am trying to achieve. I am trying to create "billboard" 3D sprites, i.e. sprites which are affected by all 3D transformations except rotations, thus they always face the "camera".

    Read the article

  • How to create a fountain in UDK

    - by user36425
    I'm trying to make a fountain in my level in UDK, I made the base of the fountain by using a Cylinder build and now I'm trying to put water in it. I went to use the fluidSurfaceActor but I notice that this is square but my fountain is a cylinder. Is there a way that I can change the shape of the fluidSurfaceActor to fit the builder brush shape or is there another way to do this? Or is it hopeless and I have to make my fountain into a cube? Here is a link/picture to the screenprint of what I'm talking about:

    Read the article

  • Best way to Draw a cube for 3D Picking on a specific face

    - by Kenneth Bray
    Currently I am drawing a cube for a game that I am making and the cube draw method is below. My question is, what is the best way to draw a cube and to be able to easily find the face that the cursor is over? My draw method works just fine, but I am getting ready to start to add picking (this will be used to mold the cubes into other shaps), and would like to know the best way to find a face of the cube. public void Draw() { // center point posX, posY, posZ float radius = size / 2; //top glPushMatrix(); glBegin(GL_QUADS); { glColor3f(1.0f,0.0f,0.0f); // red glVertex3f(posX + radius, posY + radius, posZ - radius); glVertex3f(posX - radius, posY + radius, posZ - radius); glVertex3f(posX - radius, posY + radius, posZ + radius); glVertex3f(posX + radius, posY + radius, posZ + radius); } glEnd(); glPopMatrix(); //bottom glPushMatrix(); glBegin(GL_QUADS); { glColor3f(1.0f,1.0f,0.0f); // ?? color glVertex3f(posX + radius, posY - radius, posZ + radius); glVertex3f(posX - radius, posY - radius, posZ + radius); glVertex3f(posX - radius, posY - radius, posZ - radius); glVertex3f(posX + radius, posY - radius, posZ - radius); } glEnd(); glPopMatrix(); //right side glPushMatrix(); glBegin(GL_QUADS); { glColor3f(1.0f,0.0f,1.0f); // ?? color glVertex3f(posX + radius, posY + radius, posZ + radius); glVertex3f(posX + radius, posY - radius, posZ + radius); glVertex3f(posX + radius, posY - radius, posZ - radius); glVertex3f(posX + radius, posY + radius, posZ - radius); } glEnd(); glPopMatrix(); //left side glPushMatrix(); glBegin(GL_QUADS); { glColor3f(0.0f,1.0f,1.0f); // ?? color glVertex3f(posX - radius, posY + radius, posZ - radius); glVertex3f(posX - radius, posY - radius, posZ - radius); glVertex3f(posX - radius, posY - radius, posZ + radius); glVertex3f(posX - radius, posY + radius, posZ + radius); } glEnd(); glPopMatrix(); //front side glPushMatrix(); glBegin(GL_QUADS); { glColor3f(0.0f,0.0f,1.0f); // blue glVertex3f(posX + radius, posY + radius, posZ + radius); glVertex3f(posX - radius, posY + radius, posZ + radius); glVertex3f(posX - radius, posY - radius, posZ + radius); glVertex3f(posX + radius, posY - radius, posZ + radius); } glEnd(); glPopMatrix(); //back side glPushMatrix(); glBegin(GL_QUADS); { glColor3f(0.0f,1.0f,0.0f); // green glVertex3f(posX + radius, posY - radius, posZ - radius); glVertex3f(posX - radius, posY - radius, posZ - radius); glVertex3f(posX - radius, posY + radius, posZ - radius); glVertex3f(posX + radius, posY + radius, posZ - radius); } glEnd(); glPopMatrix(); }

    Read the article

  • State of the art Culling and Batching techniques in rendering

    - by Kristian Skarseth
    I'm currently working with upgrading and restructuring an OpenGL render engine. The engine is used for visualising large scenes of architectural data (buildings with interior), and the amount of objects can become rather large. As is the case with any building, there is a lot of occluded objects within walls, and you naturally only see the objects that are in the same room as you, or the exterior if you are on the outside. This leaves a large number of objects that should be occluded through occlusion culling and frustum culling. At the same time there is a lot of repetative geometry that can be batched in renderbatches, and also a lot of objects that can be rendered with instanced rendering. The way I see it, it can be difficult to combine renderbatching and culling in an optimal fashion. If you batch too many objects in the same VBO it's difficult to cull the objects on the CPU in order to skip rendering that batch. At the same time if you skip the culling on the cpu, a lot of objects will be processed by the GPU while they are not visible. If you skip batching copletely in order to more easily cull on the CPU, there will be an unwanted high amount of render calls. I have done some research into existing techniques and theories as to how these problems are solved in modern graphics, but I have not been able to find any concrete solution. An idea a colleague and me came up with was restricting batches to objects relatively close to eachother e.g all chairs in a room or within a radius of n meeters. This could be simplified and optimized through use of oct-trees. Does anyone have any pointers to techniques used for scene managment, culling, batching etc in state of the art modern graphics engines?

    Read the article

  • How do I consistently re-size my game window and elements?

    - by Milo
    In my 2D game, I have a flow layout. Inside the flow layout are tables. I have a slider that lets the user make the tables larger or smaller. This makes the background larger or smaller too. Everything should scale proportionally which means the background should stay at the same position when I make things larger, and it almost does. When the scrollbar is at 0, it does exactly this. As the scrollbar gets further down problems arise. I'll toggle the slider maybe 3 times and on the fourth time, the background jumps a little lower on the Y axis. In order to be efficient, I only start rendering the background near the parent of the flow layout. Here it is: void LobbyTableManager::renderBG( GraphicsContext* g, agui::Rectangle& absRect, agui::Rectangle& childRect ) { int cx, cy, cw, ch; g->getClippingRect(cx,cy,cw,ch); g->setClippingRect(absRect.getX(),absRect.getY(),absRect.getWidth(),absRect.getHeight()); float scale = 0.35f; int w = m_bgSprite->getWidth() * getTableScale() * scale; int h = m_bgSprite->getHeight() * getTableScale() * scale; int numX = ceil(absRect.getWidth() / (float)w) + 2; int numY = ceil(absRect.getHeight() / (float)h) + 2; float offsetX = m_activeTables[0]->getLocation().getX() - w; float offsetY = m_activeTables[0]->getLocation().getY() - h; int startY = childRect.getY(); if(moo) { std::cout << "S=" << startY << ","; } int numAttempts = 0; while(startY + h < absRect.getY() && numAttempts < 1000) { startY += h; if(moo) { std::cout << startY << ","; } numAttempts++; } if(moo) { std::cout << "\n"; moo = false; } g->holdDrawing(); for(int i = 0; i < numX; ++i) { for(int j = 0; j < numY; ++j) { g->drawScaledSprite(m_bgSprite,0,0,m_bgSprite->getWidth(),m_bgSprite->getHeight(), absRect.getX() + (i * w) + (offsetX),absRect.getY() + (j * h) + startY,w,h,0); } } g->unholdDrawing(); g->setClippingRect(cx,cy,cw,ch); } The numeric problem seems to be in the value of startY. I outputted startY figuring out its value: As you can see here, this is me only zooming in, pay attention to the final number before the next s=. You'll notice that, what should happen is, the numbers should be linear, ex: -40, -38, -36, -34, -32, -30, etc. As you'll notice, the start numbers linearly correlate ex: 62k, 64k, 66k, 68k, 70k etc.. but the end result is wrong every third or 4th time. Here is most of the resize code: void LobbyTableManager::setTableScale( float scale ) { scale += 0.3f; scale *= 2.0f; agui::Gui* gotGui = getGui(); float scrollRel = m_vScroll->getRelativeValue(); setScale(scale); rescaleTables(); resizeFlow(); if(gotGui) { gotGui->toggleWidgetLocationChanged(false); } updateScrollBars(); float newVal = scrollRel * m_vScroll->getMaxValue(); if((int)(newVal + 0.5f) > (int)newVal) { newVal++; } m_vScroll->setValue(newVal); static int x = 0; x++; moo = true; //std::cout << m_vScroll->getValue() << std::endl; if(gotGui) { gotGui->toggleWidgetLocationChanged(true); } if(gotGui) { gotGui->_widgetLocationChanged(); } } void LobbyTableManager::valueChanged( agui::VScrollBar* source,int val ) { if(getGui()) { getGui()->toggleWidgetLocationChanged(false); } m_flow->setLocation(0,-val); if(getGui()) { getGui()->toggleWidgetLocationChanged(true); getGui()->_widgetLocationChanged(); } }

    Read the article

  • Best memory allocation strategy for iOS ?

    - by Mr.Gando
    Hey guys, I'm debating myself about memory allocation on iOS. I write most of my code in C++ and I really like using ObjectPools, FreeLists, etc. In order to pre-allocate a lot of the stuff that I'll be constantly "alloc/dealloc" during the course of my game, ( like particles, game entities, etc ). Still on iOS, it's not like we are developing for a console like PSP, where I can know for fact that I'll get a fixed amount of memory. iOS , will issue "memory warnings" when the system needs memory. Does anyone have some suggestions about this ? Is it too serious since the new iPod touch/iPhone 4 are carrying more RAM ? or it's still a big concern ? Thanks!

    Read the article

  • Create edges in Blender

    - by Mikey
    I've worked with 3DS Max in Uni and am trying to learn Blender. My problem is I know a lot of simple techniques from 3DS max that I'm having trouble translating into Blender. So my question is: Say I have a poly in the middle of a mesh and I want to split it in two. Simply adding an edge between two edges. This would cause a two 5gons either side. It's a simple technique I use every now and then when I want to modify geometry. It's called "Edge connect" in 3DS Max. In Blender the only edge connect method I can find is to create edge loops, not helpful when aiming at low poly iPhone games. Is there an equivalent in blender?

    Read the article

  • Accept keyboard input when game is not in focus?

    - by Corey Ogburn
    I want to be able to control the game via keyboard while the game does not have focus... How can I do this in XNA? EDIT: I bought a tablet. I want to write a separate app to overly the screen with controls that will send keyboard input to the game. Although, it's not sending the input DIRECT to the game, it's using the method discussed in this SO question: http://stackoverflow.com/questions/6446085/emulate-held-down-key-on-keyboard To my understanding, my test app is working the way it should be but the game is not responding to this input. I originally thought that Keyboard.GetState() would get the state regardless that the game is not in focus, but that doesn't appear to be the case.

    Read the article

  • What is a better abstraction layer for D3D9 and OpenGL vertex data management?

    - by Sam Hocevar
    My rendering code has always been OpenGL. I now need to support a platform that does not have OpenGL, so I have to add an abstraction layer that wraps OpenGL and Direct3D 9. I will support Direct3D 11 later. TL;DR: the differences between OpenGL and Direct3D cause redundancy for the programmer, and the data layout feels flaky. For now, my API works a bit like this. This is how a shader is created: Shader *shader = Shader::Create( " ... GLSL vertex shader ... ", " ... GLSL pixel shader ... ", " ... HLSL vertex shader ... ", " ... HLSL pixel shader ... "); ShaderAttrib a1 = shader->GetAttribLocation("Point", VertexUsage::Position, 0); ShaderAttrib a2 = shader->GetAttribLocation("TexCoord", VertexUsage::TexCoord, 0); ShaderAttrib a3 = shader->GetAttribLocation("Data", VertexUsage::TexCoord, 1); ShaderUniform u1 = shader->GetUniformLocation("WorldMatrix"); ShaderUniform u2 = shader->GetUniformLocation("Zoom"); There is already a problem here: once a Direct3D shader is compiled, there is no way to query an input attribute by its name; apparently only the semantics stay meaningful. This is why GetAttribLocation has these extra arguments, which get hidden in ShaderAttrib. Now this is how I create a vertex declaration and two vertex buffers: VertexDeclaration *decl = VertexDeclaration::Create( VertexStream<vec3,vec2>(VertexUsage::Position, 0, VertexUsage::TexCoord, 0), VertexStream<vec4>(VertexUsage::TexCoord, 1)); VertexBuffer *vb1 = new VertexBuffer(NUM * (sizeof(vec3) + sizeof(vec2)); VertexBuffer *vb2 = new VertexBuffer(NUM * sizeof(vec4)); Another problem: the information VertexUsage::Position, 0 is totally useless to the OpenGL/GLSL backend because it does not care about semantics. Once the vertex buffers have been filled with or pointed at data, this is the rendering code: shader->Bind(); shader->SetUniform(u1, GetWorldMatrix()); shader->SetUniform(u2, blah); decl->Bind(); decl->SetStream(vb1, a1, a2); decl->SetStream(vb2, a3); decl->DrawPrimitives(VertexPrimitive::Triangle, NUM / 3); decl->Unbind(); shader->Unbind(); You see that decl is a bit more than just a D3D-like vertex declaration, it kinda takes care of rendering as well. Does this make sense at all? What would be a cleaner design? Or a good source of inspiration?

    Read the article

  • Axis-Aligned Bounding Boxes vs Bounding Ellipse

    - by Griffin
    Why is it that most, if not all collision detection algorithms today require each body to have an AABB for the use in the broad phase only? It seems to me like simply placing a circle at the body's centroid, and extending the radius to where the circle encompasses the entire body would be optimal. This would not need to be updated after the body rotates and broad overlap-calculation would be faster to. Correct? Bonus: Would a bounding ellipse be practical for broad phase calculations also, since it would better represent long, skinny shapes? Or would it require extensive calculations, defeating the purpose of broad-phase?

    Read the article

  • How to get tilemap transparency color working with TiledLib's Demo implementation?

    - by Adam LaBranche
    So the problem I'm having is that when using Nick Gravelyn's tiledlib pipeline for reading and drawing tmx maps in XNA, the transparency color I set in Tiled's editor will work in the editor, but when I draw it the color that's supposed to become transparent still draws. The closest things to a solution that I've found are - 1) Change my sprite batch's BlendState to NonPremultiplied (found this in a buried Tweet). 2) Get the pixels that are supposed to be transparent at some point then Set them all to transparent. Solution 1 didn't work for me, and solution 2 seems hacky and not a very good way to approach this particular problem, especially since it looks like the custom pipeline processor reads in the transparent color and sets it to the color key for transparency according to the code, just something is going wrong somewhere. At least that's what it looks like the code is doing. TileSetContent.cs if (imageNode.Attributes["trans"] != null) { string color = imageNode.Attributes["trans"].Value; string r = color.Substring(0, 2); string g = color.Substring(2, 2); string b = color.Substring(4, 2); this.ColorKey = new Color((byte)Convert.ToInt32(r, 16), (byte)Convert.ToInt32(g, 16), (byte)Convert.ToInt32(b, 16)); } ... TiledHelpers.cs // build the asset as an external reference OpaqueDataDictionary data = new OpaqueDataDictionary(); data.Add("GenerateMipMaps", false); data.Add("ResizetoPowerOfTwo", false); data.Add("TextureFormat", TextureProcessorOutputFormat.Color); data.Add("ColorKeyEnabled", tileSet.ColorKey.HasValue); data.Add("ColorKeyColor", tileSet.ColorKey.HasValue ? tileSet.ColorKey.Value : Microsoft.Xna.Framework.Color.Magenta); tileSet.Texture = context.BuildAsset<Texture2DContent, Texture2DContent>( new ExternalReference<Texture2DContent>(path), null, data, null, asset); ... I can share more code as well if it helps to understand my problem. Thank you.

    Read the article

  • How can I support objects larger than a single tile in a 2D tile engine?

    - by Yheeky
    I´m currently working on a 2D Engine containing an isometric tile map. It´s running quite well but I'm not sure if I´ve chosen the best approach for that kind of engine. To give you an idea what I´m thinking about right now, let's have a look at a basic object for a tile map and its objects: public class TileMap { public List<MapRow> Rows = new List<MapRow>(); public int MapWidth = 50; public int MapHeight = 50; } public class MapRow { public List<MapCell> Columns = new List<MapCell>(); } public class MapCell { public int TileID { get; set; } } Having those objects it's just possible to assign a tile to a single MapCell. What I want my engine to support is like having groups of MapCells since I would like to add objects to my tile map (e.g. a house with a size of 2x2 tiles). How should I do that? Should I edit my MapCell object that it may has a reference to other related tiles and how can I find an object while clicking on single MapCells? Or should I do another approach using a global container with all objects in it?

    Read the article

  • Blending textures together, texture fade over / fade in

    - by Deukalion
    What is the best way to render a texture overlapping effect? Like in this example: I want either the grass to fade in to the snow texture, or the other way around. No rough edges. Somehow make them blend over. So the grass has a bit of snow or the snow has a bit of grass How is this possible during runtime? If that's possible. I don't render this by using the SpriteBatch, since the ground isn't rectangles (they can be moved). This is the way I render each shape (each one of those squares): // LoadTexture // Apply EffectPass device.DrawUserIndexedPrimitives<VertexPositionNormalTexture> ( PrimitiveType.TriangleList, render.Item.Points, // Array of VertexPositionNormalTexture 0, render.Item.Points.Length, render.Item.Indexes, // Array of int indexes (triangulation) 0, render.Item.Indexes.Length / 3, VertexPositionNormalTexture.VertexDeclaration );

    Read the article

  • How to raycast select a scaled OBB?

    - by user3254944
    I have the OBB picking code to select an OBB with code inspired from Real time Rendering 3 and opengl-tutorial.org. I can successfully select objects that have been moved or rotated. However, I cant correctly select an object that has been scaled. The bounding box scales right, but the I can only select the object in a thin strip on its center. How do I fix the checkForHits() function to allow it to read the scaling that I passed to it in the raycast matrix? void GLWidget::selectObjRaycast() { glm::vec2 mouse = (glm::vec2(mousePos.x(), mousePos.y()) / glm::vec2(this->width(), this->height())) * 2.0f - 1.0f; mouse.y *= -1; glm::mat4 toWorld = glm::inverse(ProjectionM * ViewM); glm::vec4 from = toWorld * glm::vec4(mouse, -1.0f, 1.0f); glm::vec4 to = toWorld * glm::vec4(mouse, 1.0f, 1.0f); from /= from.w; to /= to.w; fromAABB = glm::vec3(from); toAABB = glm::normalize(glm::vec3(to - from)); checkForHits(); } void GLWidget::checkForHits() { for (int i = 0; i < myWin.myEtc->allObj.size(); ++i) //check for hits on each obj's bb { bool miss = 0; float tMin = 0.0f; float tMax = 100000.0f; glm::vec3 bbPos(myWin.myEtc->allObj[i]->raycastM[3].x, myWin.myEtc->allObj[i]->raycastM[3].y, myWin.myEtc->allObj[i]->raycastM[3].z); glm::vec3 delta = bbPos - fromAABB; for (int j = 0; j < 3; ++j) { glm::vec3 axis(myWin.myEtc->allObj[i]->raycastM[j].x, myWin.myEtc->allObj[i]->raycastM[j].y, myWin.myEtc->allObj[i]->raycastM[j].z); float e = glm::dot(axis, delta); float f = glm::dot(toAABB, axis); if (fabs(f) > 0.001f) { float t1 = (e + myWin.myEtc->allObj[i]->bbMin[j]) / f; float t2 = (e + myWin.myEtc->allObj[i]->bbMax[j]) / f; if (t1 > t2) { float w = t1; t1 = t2; t2 = w; } if (t2 < tMax) tMax = t2; if (t1 > tMin) tMin = t1; if (tMax < tMin) miss = 1; } else { if (-e + myWin.myEtc->allObj[i]->bbMin[j] > 0.0f || -e + myWin.myEtc->allObj[i]->bbMax[j] < 0.0f) miss = 1; } } if (miss == 0) { intersection_distance = tMin; myWin.myEtc->sel.push_back(myWin.myEtc->allObj[i]); myWin.myEtc->allObj[i]->highlight = myWin.myGLHelp->highlight; break; } } } void Object::render(glm::mat4 PV) { scaleM = glm::scale(glm::mat4(), s->val_3); r_quat = glm::quat(glm::radians(r->val_3)); rotationM = glm::toMat4(r_quat); translationM = glm::translate(glm::mat4(), t->val_3); transLocal1M = glm::translate(glm::mat4(), -rsPivot->val_3); transLocal2M = glm::translate(glm::mat4(), rsPivot->val_3); raycastM = translationM * transLocal2M * rotationM * scaleM * transLocal1M; // MVP = PV * translationM * transLocal2M * rotationM * scaleM * transLocal1M; }

    Read the article

< Previous Page | 558 559 560 561 562 563 564 565 566 567 568 569  | Next Page >