Search Results

Search found 38203 results on 1529 pages for 'library development'.

Page 490/1529 | < Previous Page | 486 487 488 489 490 491 492 493 494 495 496 497  | Next Page >

  • UDK game Prisoners/Guards

    - by RR_1990
    For school I need to make a little game with UDK, the concept of the game is: The player is the headguard, he will have some other guard (bots) who will follow him. Between the other guards and the player are some prisoners who need to evade the other guards. It needs to look like this My idea was to let the guard bots follow the player at a certain distance and let the prisoners bots in the middle try to evade the guard bots. Now is the problem i'm new to Unreal Script and the school doesn't support me that well. Untill now I have only was able to make the guard bots follow me. I hope you guys can help me or make me something that will make this game work. Here is the class i'm using to let te bots follow me: class ChaseControllerAI extends AIController; var Pawn player; var float minimalDistance; var float speed; var float distanceToPlayer; var vector selfToPlayer; auto state Idle { function BeginState(Name PreviousStateName) { Super.BeginState(PreviousStateName); } event SeePlayer(Pawn p) { player = p; GotoState('Chase'); } Begin: player = none; self.Pawn.Velocity.x = 0.0; self.Pawn.Velocity.Y = 0.0; self.Pawn.Velocity.Z = 0.0; } state Chase { function BeginState(Name PreviousStateName) { Super.BeginState(PreviousStateName); } event PlayerOutOfReach() { `Log("ChaseControllerAI CHASE Player out of reach."); GotoState('Idle'); } // class ChaseController extends AIController; CONTINUED // State Chase (continued) event Tick(float deltaTime) { `Log("ChaseControllerAI in Event Tick."); selfToPlayer = self.player.Location - self.Pawn.Location; distanceToPlayer = Abs(VSize(selfToPlayer)); if (distanceToPlayer > minimalDistance) { PlayerOutOfReach(); } else { self.Pawn.Velocity = Normal(selfToPlayer) * speed; //self.Pawn.Acceleration = Normal(selfToPlayer) * speed; self.Pawn.SetRotation(rotator(selfToPlayer)); self.Pawn.Move(self.Pawn.Velocity*0.001); // or *deltaTime } } Begin: `Log("Current state Chase:Begin: " @GetStateName()@""); } defaultproperties { bAdjustFromWalls=true; bIsPlayer= true; minimalDistance = 1024; //org 1024 speed = 500; }

    Read the article

  • When "W" is held, the character moves forward, but when "W" and "A" is held, movement completely stops

    - by Vlad1k
    I am making a 2D game, and when I hold the key "w", the player goes forward, but when I hold both "w" and "a", the movement stops completely, when I want it to go forward, while shifting to the left. Here is my script: var speed = 4; function Update() { // Make the character walk forward if "w" is being held if(Input.GetKey("w")) { rigidbody.velocity = transform.forward * speed; } // Stop the movement if "w" is not being held if(Input.GetKeyUp("w")) { rigidbody.velocity = transform.forward * 0; } // Make the character walk forward if "s" is being held if(Input.GetKey("s")) { rigidbody.velocity = transform.forward * -speed; } // Stop the movement if "s" is not being held if(Input.GetKeyUp("s")) { rigidbody.velocity = transform.forward * 0; } // Make the character walk left if "a" is being held if(Input.GetKey("a")) { rigidbody.velocity = transform.right * -speed; } // Stop the movement if "a" is not being held if(Input.GetKeyUp("a")) { rigidbody.velocity = transform.right * 0; } //Make the character walk right if "d" is being held if(Input.GetKey("d")) { rigidbody.velocity = transform.right * speed; } // Stop the movement if "d" is not being held if(Input.GetKeyUp("d")) { rigidbody.velocity = transform.right * 0; } } PLEASE MAKE THE CODE BETTER! I AM NEW! EDIT: Here is a video to show my problem. http://www.screenr.com/3oxH Here is the newest code: var speed = 4f; function Update() { if(Input.GetKey("w")) { rigidbody.velocity = transform.forward * speed; } else if(Input.GetKey("s")) { rigidbody.velocity = transform.forward * -speed; } else if(Input.GetKey("a")) { rigidbody.velocity = transform.right * -speed; } else if(Input.GetKey("d")) { rigidbody.velocity = transform.right * speed; } if(Input.GetKeyUp("w")) { rigidbody.velocity = transform.forward * 0; } if(Input.GetKeyUp("s")) { rigidbody.velocity = transform.forward * 0; } if(Input.GetKeyUp("a")) { rigidbody.velocity = transform.right * 0; } if(Input.GetKey("d")) { rigidbody.velocity = transform.right * speed; } if(Input.GetKeyUp("d")) { rigidbody.velocity = transform.right * 0; } }

    Read the article

  • XNA 2D line-of-sight check

    - by bionicOnion
    I'm working on a top-down shooter in XNA, and I need to implement line-of-sight checking. I've come up with a solution that seems to work, but I get the nagging feeling that it won't be efficient enough to do every frame for multiple calls (the game already hiccups slightly at about 10 calls per frame). The code is below, but my general plan was to create a series of rectangles with a width and height of zero to act as points along the sight line, and then check to see if any of these rectangles intersects a ClutterObject (an interface I defined for things like walls or other obstacles) after first screening for any that can't possibly be in the line of sight (i.e. behind the viewer) or are too far away (a concession I made for efficiency). public static bool LOSCheck(Vector2 pos1, Vector2 pos2) { Vector2 currentPos = pos1; Vector2 perMove = (pos2 - pos1); perMove.Normalize(); HashSet<ClutterObject> clutter = new HashSet<ClutterObject>(); foreach (Room r in map.GetRooms()) { if (r != null) { foreach (ClutterObject c in r.GetClutter()) { if (c != null &&!(c.GetRectangle().X * perMove.X < 0) && !(c.GetRectangle().Y * perMove.Y < 0)) { Vector2 cVector = new Vector2(c.GetRectangle().X, c.GetRectangle().Y); if ((cVector - pos1).Length() < 1500) clutter.Add(c); } } } } while (currentPos != pos2 && ((currentPos - pos1).Length() < 1500)) { Rectangle position = new Rectangle((int)currentPos.X, (int)currentPos.Y, 0, 0); foreach (ClutterObject c in clutter) { if (position.Intersects(c.GetRectangle())) return false; } currentPos += perMove; } return true; } I'm sure that there's a better way to do this (or at least a way to make this method more efficient), but I'm not too used to XNA yet, so I figured it couldn't hurt to bring it here. At the very least, is there an efficient to determine which objects may be in front of the viewer with greater precision than the rather broad 90 degree window I've given myself?

    Read the article

  • OpenGL 2D Depth Perception

    - by Stephen James
    I have a 2D RPG game written in Java using LWJGL. All works fine, but at the moment I'm having trouble deciding what the best way to do depth perception is. So , for example, if the player goes in front of the tree/enemy (lower than the objects y-coordinate) then show the player in front), if the player goes behind the tree/enemy (higher than the objects specific y-coordinate), then show the player behind the object. I have tried writing a block of code to deal with this, and it works quite well for the trees, but not for the enemies yet. Is there a simpler way of doing this in LWJGL that I'm missing?

    Read the article

  • RenderTarget2D behavior in XNA

    - by Utkarsh Sinha
    I've been dabbling with XNA for a couple of days now. This chunk of code doesn't work as I expect. The goal is to render sprites individually and composite them on another rendertarget. P = RenderTarget2D(with RenderTargetUsage.PreserveContents) D = RenderTarget2D(with RenderTargetUsage.DiscardContents) for all sprites: graphicsDevice.SetRenderTarget(D); <draw sprite i> graphicsDevice.SetRenderTarget(P); <Draw D> graphicsDevice.SetRenderTarget(null); <Draw P> The result I get is - only the last sprite is visible. I'm sure I'm missing some piece of information about RenderTarget2D. Any hints on what that might be? Cross posted from - http://stackoverflow.com/questions/9970349/weird-rendertarget2d-behaviour

    Read the article

  • ContentManager in XNA cant find any XML

    - by user36385
    Im making a game in XNA 4 and this is the first time I'm using the Content loader to initialize a simple class with a XML file, but no matter how many guide I follow, or how simple or complicated is my XML File the ContentManager cant find the file; the Debug keep telling me: "A first chance exception of type 'Microsoft.Xna.Framework.Content.ContentLoadException' occurred in Microsoft.Xna.Framework.dll". I'm really confuse because I can load SpriteFonts and Texture2D without a problem ... I create the following XML (the most basic Xna XML): <?xml version="1.0" encoding="utf-8" ?> <XnaContent> <Asset Type="System.String">Hello</Asset> </XnaContent> and I try to load it in the LoadContent method in my main class like this: System.String hello = Content.Load<System.String>("NewXmlFile"); There is something I'm doing wrong? I really appreciate your help

    Read the article

  • What is wrong with my Dot Product? [Javascript]

    - by Clay Ellis Murray
    I am trying to make a pong game but I wanted to use dot products to do the collisions with the paddles, however whenever I make a dot product objects it never changes much from .9 this is my code to make vectors vector = { make:function(object){ return [object.x + object.width/2,object.y + object.height/2] }, normalize:function(v){ var length = Math.sqrt(v[0] * v[0] + v[1] * v[1]) v[0] = v[0]/length v[1] = v[1]/length return v }, dot:function(v1,v2){ return v1[0] * v2[0] + v1[1] * v2[1] } } and this is where I am calculating the dot in my code vector1 = vector.normalize(vector.make(ball)) vector2 = vector.normalize(vector.make(object)) dot = vector.dot(vector1,vector2) Here is a JsFiddle of my code currently the paddles don't move. Any help would be greatly appreciated

    Read the article

  • Create edges in Blender

    - by Mikey
    I've worked with 3DS Max in Uni and am trying to learn Blender. My problem is I know a lot of simple techniques from 3DS max that I'm having trouble translating into Blender. So my question is: Say I have a poly in the middle of a mesh and I want to split it in two. Simply adding an edge between two edges. This would cause a two 5gons either side. It's a simple technique I use every now and then when I want to modify geometry. It's called "Edge connect" in 3DS Max. In Blender the only edge connect method I can find is to create edge loops, not helpful when aiming at low poly iPhone games. Is there an equivalent in blender?

    Read the article

  • Oscillating Sprite Movement in XNA

    - by Nick Van Hoogenstyn
    I'm working on a 2d game and am looking to make a sprite move horizontally across the screen in XNA while oscillating vertically (basically I want the movement to look like a sin wave). Currently for movement I'm using two vectors, one for speed and one for direction. My update function for sprites just contains this: Position += direction * speed * (float)t.ElapsedGameTime.TotalSeconds; How could I utilize this setup to create the desired movement? I'm assuming I'd call Math.Sin or Math.Cos, but I'm unsure of where to start to make this sort of thing happened. My attempt looked like this: public override void Update(GameTime t) { double msElapsed = t.TotalGameTime.Milliseconds; mDirection.Y = (float)Math.Sin(msElapsed); if (mDirection.Y >= 0) mSpeed.Y = moveSpeed; else mSpeed.Y = -moveSpeed; base.Update(t, mSpeed, mDirection); } moveSpeed is just some constant positive integer. With this, the sprite simply just continuously moves downward until it's off screen. Can anyone give me some info on what I'm doing wrong here? I've never tried something like this so if I'm doing things completely wrong, let me know!

    Read the article

  • 2D Selective Gaussian Blur

    - by Joshua Thomas
    I am attempting to use Gaussian blur on a 2D platform game, selectively blurring specific types of platforms with different amounts. I am currently just messing around with simple test code, trying to get it to work correctly. What I need to eventually do is create three separate render targets, leave one normal, blur one slightly, and blur the last heavily, then recombine on the screen. Where I am now is I have successfully drawn into a new render target and performed the gaussian blur on it, but when I draw it back to the screen everything is purple aside from the platforms I drew to the target. This is my .fx file: #define RADIUS 7 #define KERNEL_SIZE (RADIUS * 2 + 1) //----------------------------------------------------------------------------- // Globals. //----------------------------------------------------------------------------- float weights[KERNEL_SIZE]; float2 offsets[KERNEL_SIZE]; //----------------------------------------------------------------------------- // Textures. //----------------------------------------------------------------------------- texture colorMapTexture; sampler2D colorMap = sampler_state { Texture = <colorMapTexture>; MipFilter = Linear; MinFilter = Linear; MagFilter = Linear; }; //----------------------------------------------------------------------------- // Pixel Shaders. //----------------------------------------------------------------------------- float4 PS_GaussianBlur(float2 texCoord : TEXCOORD) : COLOR0 { float4 color = float4(0.0f, 0.0f, 0.0f, 0.0f); for (int i = 0; i < KERNEL_SIZE; ++i) color += tex2D(colorMap, texCoord + offsets[i]) * weights[i]; return color; } //----------------------------------------------------------------------------- // Techniques. //----------------------------------------------------------------------------- technique GaussianBlur { pass { PixelShader = compile ps_2_0 PS_GaussianBlur(); } } This is the code I'm using for the gaussian blur: public Texture2D PerformGaussianBlur(Texture2D srcTexture, RenderTarget2D renderTarget1, RenderTarget2D renderTarget2, SpriteBatch spriteBatch) { if (effect == null) throw new InvalidOperationException("GaussianBlur.fx effect not loaded."); Texture2D outputTexture = null; Rectangle srcRect = new Rectangle(0, 0, srcTexture.Width, srcTexture.Height); Rectangle destRect1 = new Rectangle(0, 0, renderTarget1.Width, renderTarget1.Height); Rectangle destRect2 = new Rectangle(0, 0, renderTarget2.Width, renderTarget2.Height); // Perform horizontal Gaussian blur. game.GraphicsDevice.SetRenderTarget(renderTarget1); effect.CurrentTechnique = effect.Techniques["GaussianBlur"]; effect.Parameters["weights"].SetValue(kernel); effect.Parameters["colorMapTexture"].SetValue(srcTexture); effect.Parameters["offsets"].SetValue(offsetsHoriz); spriteBatch.Begin(0, BlendState.Opaque, null, null, null, effect); spriteBatch.Draw(srcTexture, destRect1, Color.White); spriteBatch.End(); // Perform vertical Gaussian blur. game.GraphicsDevice.SetRenderTarget(renderTarget2); outputTexture = (Texture2D)renderTarget1; effect.Parameters["colorMapTexture"].SetValue(outputTexture); effect.Parameters["offsets"].SetValue(offsetsVert); spriteBatch.Begin(0, BlendState.Opaque, null, null, null, effect); spriteBatch.Draw(outputTexture, destRect2, Color.White); spriteBatch.End(); // Return the Gaussian blurred texture. game.GraphicsDevice.SetRenderTarget(null); outputTexture = (Texture2D)renderTarget2; return outputTexture; } And this is the draw method affected: public void Draw(SpriteBatch spriteBatch) { device.SetRenderTarget(maxBlur); spriteBatch.Begin(); foreach (Brick brick in blueBricks) brick.Draw(spriteBatch); spriteBatch.End(); blue = gBlur.PerformGaussianBlur((Texture2D) maxBlur, helperTarget, maxBlur, spriteBatch); spriteBatch.Begin(); device.SetRenderTarget(null); foreach (Brick brick in redBricks) brick.Draw(spriteBatch); foreach (Brick brick in greenBricks) brick.Draw(spriteBatch); spriteBatch.Draw(blue, new Rectangle(0, 0, blue.Width, blue.Height), Color.White); foreach (Brick brick in purpleBricks) brick.Draw(spriteBatch); spriteBatch.End(); } I'm sorry about the massive brick of text and images(or not....new user, I tried, it said no), but I wanted to get my problem across clearly as I have been searching for an answer to this for quite a while now. As a side note, I have seen the bloom sample. Very well commented, but overly complicated since it deals in 3D; I was unable to take what I needed to learn form it. Thanks for any and all help.

    Read the article

  • Making an AI walk on a NavigationMesh (2D/Top-Down game)

    - by Lennard Fonteijn
    For some time I have been working on a framework which should make it possible to generate 2D levels based on a set of rules specified by level designers. You can read more about it here as I won't go into details: http://www.jorisdormans.nl/article.php?ref=engineering_emergence Anyway, I'm now at the point of putting the framework to use and have trouble coming up with a solution for AI. I decided to implement a NavigationMesh in the generated levels as I already have that information to start with. Consider the following image (borrowed from http://www.david-gouveia.com/pathfinding-on-a-2d-polygonal-map/): When I run A* on the NavigationMesh, the red path would be suggested when I want to go from point A to B (either direction). However, I don't want my AI to walk that path directly and clipping corners, I'd rather want them to follow the more logical black path. How would I go about going from the Red path to the Black path, are there any algorithms for this. Which steps do I take? Is A* the proper solution for this at all? For some additional information: The proof-of-concept game is a 2D top-down game written in C#, but examples/references in any language are welcome!

    Read the article

  • How do I consistently re-size my game window and elements?

    - by Milo
    In my 2D game, I have a flow layout. Inside the flow layout are tables. I have a slider that lets the user make the tables larger or smaller. This makes the background larger or smaller too. Everything should scale proportionally which means the background should stay at the same position when I make things larger, and it almost does. When the scrollbar is at 0, it does exactly this. As the scrollbar gets further down problems arise. I'll toggle the slider maybe 3 times and on the fourth time, the background jumps a little lower on the Y axis. In order to be efficient, I only start rendering the background near the parent of the flow layout. Here it is: void LobbyTableManager::renderBG( GraphicsContext* g, agui::Rectangle& absRect, agui::Rectangle& childRect ) { int cx, cy, cw, ch; g->getClippingRect(cx,cy,cw,ch); g->setClippingRect(absRect.getX(),absRect.getY(),absRect.getWidth(),absRect.getHeight()); float scale = 0.35f; int w = m_bgSprite->getWidth() * getTableScale() * scale; int h = m_bgSprite->getHeight() * getTableScale() * scale; int numX = ceil(absRect.getWidth() / (float)w) + 2; int numY = ceil(absRect.getHeight() / (float)h) + 2; float offsetX = m_activeTables[0]->getLocation().getX() - w; float offsetY = m_activeTables[0]->getLocation().getY() - h; int startY = childRect.getY(); if(moo) { std::cout << "S=" << startY << ","; } int numAttempts = 0; while(startY + h < absRect.getY() && numAttempts < 1000) { startY += h; if(moo) { std::cout << startY << ","; } numAttempts++; } if(moo) { std::cout << "\n"; moo = false; } g->holdDrawing(); for(int i = 0; i < numX; ++i) { for(int j = 0; j < numY; ++j) { g->drawScaledSprite(m_bgSprite,0,0,m_bgSprite->getWidth(),m_bgSprite->getHeight(), absRect.getX() + (i * w) + (offsetX),absRect.getY() + (j * h) + startY,w,h,0); } } g->unholdDrawing(); g->setClippingRect(cx,cy,cw,ch); } The numeric problem seems to be in the value of startY. I outputted startY figuring out its value: As you can see here, this is me only zooming in, pay attention to the final number before the next s=. You'll notice that, what should happen is, the numbers should be linear, ex: -40, -38, -36, -34, -32, -30, etc. As you'll notice, the start numbers linearly correlate ex: 62k, 64k, 66k, 68k, 70k etc.. but the end result is wrong every third or 4th time. Here is most of the resize code: void LobbyTableManager::setTableScale( float scale ) { scale += 0.3f; scale *= 2.0f; agui::Gui* gotGui = getGui(); float scrollRel = m_vScroll->getRelativeValue(); setScale(scale); rescaleTables(); resizeFlow(); if(gotGui) { gotGui->toggleWidgetLocationChanged(false); } updateScrollBars(); float newVal = scrollRel * m_vScroll->getMaxValue(); if((int)(newVal + 0.5f) > (int)newVal) { newVal++; } m_vScroll->setValue(newVal); static int x = 0; x++; moo = true; //std::cout << m_vScroll->getValue() << std::endl; if(gotGui) { gotGui->toggleWidgetLocationChanged(true); } if(gotGui) { gotGui->_widgetLocationChanged(); } } void LobbyTableManager::valueChanged( agui::VScrollBar* source,int val ) { if(getGui()) { getGui()->toggleWidgetLocationChanged(false); } m_flow->setLocation(0,-val); if(getGui()) { getGui()->toggleWidgetLocationChanged(true); getGui()->_widgetLocationChanged(); } }

    Read the article

  • BlitzMax - generating 2D neon glowing line effect to png file

    - by zanlok
    Originally asked on StackOverflow, but it became tumbleweed. I'm looking to create a glowing line effect in BlitzMax, something like a Star Wars lightsaber or laserbeam. Doesn't have to be realtime, but just to TImage objects and then maybe saved to PNG for later use in animation. I'm happy to use 3D features, but it will be for use in a 2D game. Since it will be on black/space background, my strategy is to draw a series of white blurred lines with color and high transparency, then eventually central lines less blurred and more white. What I want to draw is actually bezier curved lines. Drawing curved lines is easy enough, but I can't use the technique above to create a good laser/neon effect because it comes out looking very segmented. So, I think it may be better to use a blur effect/shader on what does render well, which is a 1-pixel bezier curve. The problems I've been having are: Applying a shader to just a certain area of the screen where lines are drawn. If there's a way to do draw lines to a texture and then blur that texture and save the png, that would be great to hear about. There's got to be a way to do this, but I just haven't gotten the right elements working together yet. Any help from someone familiar with this stuff would be greatly appreciated. Using just 2D calls could be advantageous, simpler to understand and re-use. It would be very nice to know how to save a PNG that preserves the transparency/alpha stuff. p.s. I've reviewed this post (and many many others on the Blitz site), have samples working, and even developed my own 5x5 frag shaders. But, it's 3D and a scene-wide thing that doesn't seem to convert to 2D or just a certain area very well. I'd rather understand how to apply shading to a 2D scene, especially using the specifics of BlitzMax.

    Read the article

  • ParticleSystem in Slick2d (with MarteEngine)

    - by Bro Kevin D.
    First of all, sorry if this sounds very newbie-ish. I'm stuck at making a ParticleSystem I made using Pedigree to work in my game. It's basically an explosion that I want to display whenever an enemy dies. The ParticleSystem has two emitters, smoke and explosion I tried putting it in my Enemy (extends Entity) class Enemy extends Entity class @Override public void update(GameContainer gc, int delta) throws SlickException { super.update(gc, delta); /** bunch of codes */ explosionSystem.update(delta); } @Override public void render(GameContainer gc, Graphics gfx) throws SlickException { super.render(gc, gfx); if(isDestroyed) { explosionSystem.render(x,y); if(explosionSystem.getEmitter(1).completed()) { this.destroy(); } } } And it does not render. I'm not sure if this is the proper way of implementing it, as I've considered creating an Entity to serve as controller for all the Enemies. Right now, I'm just adding enemies every second. So how do I render the ParticleSystem when the enemy dies? If anyone can point me to the right direction. Thank you for your time.

    Read the article

  • Problems running Java SWT application. Is this something to do with 64 vs 32-bit libraries?

    - by ?????
    I'm getting started with SWT Exception in thread "main" java.lang.UnsatisfiedLinkError: no swt-cocoa-3557 or swt-cocoa in swt.library.path, java.library.path or the jar file at org.eclipse.swt.internal.Library.loadLibrary(Unknown Source) at org.eclipse.swt.internal.Library.loadLibrary(Unknown Source) at org.eclipse.swt.internal.C.<clinit>(Unknown Source) at org.eclipse.swt.internal.cocoa.NSThread.isMainThread(Unknown Source) at org.eclipse.swt.graphics.Device.<init>(Unknown Source) at org.eclipse.swt.widgets.Display.<init>(Unknown Source) at org.eclipse.swt.widgets.Display.<init>(Unknown Source) at org.eclipse.swt.widgets.Display.getDefault(Unknown Source) at com.astrobetty.getotagdesktop.MainUIWindow.main(MainUIWindow.java:110) I've carefully followed the directions here http://www.eclipse.org/swt/eclipse.php I can see libswt-awt-cocoa-3557.jnilib, libswt-cocoa-3557.jnilib, libswt-pi-cocoa-3557.jnilib clearly listed under "Referenced Libraries" in my Eclipse package explorer. Presumably, that should make them available on the classpath when I select the java module that contains "main" and do a "run as" application. What's the problem? I suspect it may be related to 64-bit vs 32-bit VMs....

    Read the article

  • Blending textures together, texture fade over / fade in

    - by Deukalion
    What is the best way to render a texture overlapping effect? Like in this example: I want either the grass to fade in to the snow texture, or the other way around. No rough edges. Somehow make them blend over. So the grass has a bit of snow or the snow has a bit of grass How is this possible during runtime? If that's possible. I don't render this by using the SpriteBatch, since the ground isn't rectangles (they can be moved). This is the way I render each shape (each one of those squares): // LoadTexture // Apply EffectPass device.DrawUserIndexedPrimitives<VertexPositionNormalTexture> ( PrimitiveType.TriangleList, render.Item.Points, // Array of VertexPositionNormalTexture 0, render.Item.Points.Length, render.Item.Indexes, // Array of int indexes (triangulation) 0, render.Item.Indexes.Length / 3, VertexPositionNormalTexture.VertexDeclaration );

    Read the article

  • Setting effects variables in XNA

    - by Badescu Alexandru
    Hello ! I am currently reading a book named "3D Graphics with XNA Game Studio 4.0" by Sean James and have some questions to ask : If i create a effect parameter named lets say SpecularPower and have in my effect a variable named SpecularPower , if i do something like effect.Parameters["SpecularPower"].SetValue(3) That wil change the SpecularPower variable in my effect ? And a second question, not regarding the book : If i have a spaceship and i've created a "boost" functionality that speeds up my spaceship, what effects should i implement to create the impresion oh high speed ? I was thinking of making everything except my spaceship blurry but i think there would be something missing . Any ideas ? Regards, Alex Badescu

    Read the article

  • Single and Double Jump with single button.

    - by Asad
    I want to make Single Jump on Single Tap and Double Jump on Double Tap. My problem is that if I make double Tap on ground then it’s fine but if I make first Tap on ground and second Tap in Air then Player gain more height then usual As in image 1. I want to Make my jump like in Image 2, No matter from which point user gives second Tap, player Always get a specific height. I Used both Impulse and Linear velocity to make Jump but my problem did not solved.

    Read the article

  • Testing on Device Other Than the Known Brand Question (Local and Imported Phone Question)

    - by David Dimalanta
    I have a question. When testing a device by using Eclipse, it's easy to install and add device software with these specific brands commonly used in game testing like Samsung, Google, T-Mobile, and HTC; according to the Android Developers website. What if I'm using other brands that runs on Android to test the program via Eclipse (i.e. MyPhone, Starmobile), what should I look for to download in order to enable testing phones that those brands are using other than the brands that are known and commonly used: model number or simply brand? Here's some examples of these brands other than the brands we've known that runs on Android: Starmobile Engage 7 (http://www.lazada.com.ph/Starmobile-Engage-7-Android-40-4GB-with-Wi-Fi-Black-Starmobile-Mercury-B201-COMBO-39833.html/) My|Phone A898 Duo (http://www.myphone.com.ph/#!a898-duo/c1yt) Also, take note that I'm a Filipino programmer working at the Philippines to test our local smartphones for the created Android game or app. Hope you can understand me for my help.

    Read the article

  • Why is permadeath essential to a roguelike design?

    - by Gregory Weir
    Roguelikes and roguelike-likes (Spelunky, The Binding of Isaac) tend to share a number of game design elements: Procedurally generated worlds Character growth by way of new abilities and powers Permanent death I can understand why starting with permadeath as a premise would lead you to the other ideas: if you're going to be starting over a lot, you'll want variety in your experiences. But why do the first two elements imply a permadeath approach?

    Read the article

  • What is a better abstraction layer for D3D9 and OpenGL vertex data management?

    - by Sam Hocevar
    My rendering code has always been OpenGL. I now need to support a platform that does not have OpenGL, so I have to add an abstraction layer that wraps OpenGL and Direct3D 9. I will support Direct3D 11 later. TL;DR: the differences between OpenGL and Direct3D cause redundancy for the programmer, and the data layout feels flaky. For now, my API works a bit like this. This is how a shader is created: Shader *shader = Shader::Create( " ... GLSL vertex shader ... ", " ... GLSL pixel shader ... ", " ... HLSL vertex shader ... ", " ... HLSL pixel shader ... "); ShaderAttrib a1 = shader->GetAttribLocation("Point", VertexUsage::Position, 0); ShaderAttrib a2 = shader->GetAttribLocation("TexCoord", VertexUsage::TexCoord, 0); ShaderAttrib a3 = shader->GetAttribLocation("Data", VertexUsage::TexCoord, 1); ShaderUniform u1 = shader->GetUniformLocation("WorldMatrix"); ShaderUniform u2 = shader->GetUniformLocation("Zoom"); There is already a problem here: once a Direct3D shader is compiled, there is no way to query an input attribute by its name; apparently only the semantics stay meaningful. This is why GetAttribLocation has these extra arguments, which get hidden in ShaderAttrib. Now this is how I create a vertex declaration and two vertex buffers: VertexDeclaration *decl = VertexDeclaration::Create( VertexStream<vec3,vec2>(VertexUsage::Position, 0, VertexUsage::TexCoord, 0), VertexStream<vec4>(VertexUsage::TexCoord, 1)); VertexBuffer *vb1 = new VertexBuffer(NUM * (sizeof(vec3) + sizeof(vec2)); VertexBuffer *vb2 = new VertexBuffer(NUM * sizeof(vec4)); Another problem: the information VertexUsage::Position, 0 is totally useless to the OpenGL/GLSL backend because it does not care about semantics. Once the vertex buffers have been filled with or pointed at data, this is the rendering code: shader->Bind(); shader->SetUniform(u1, GetWorldMatrix()); shader->SetUniform(u2, blah); decl->Bind(); decl->SetStream(vb1, a1, a2); decl->SetStream(vb2, a3); decl->DrawPrimitives(VertexPrimitive::Triangle, NUM / 3); decl->Unbind(); shader->Unbind(); You see that decl is a bit more than just a D3D-like vertex declaration, it kinda takes care of rendering as well. Does this make sense at all? What would be a cleaner design? Or a good source of inspiration?

    Read the article

  • XNA Easy Storage XBOX 360 High Scores

    - by user1003211
    To followup from a previous query - I need some help with the implementation of easystorage high scores, which is bringing up some errors on the xbox. I get the prompt screen, a savedevice is selected and a file are all created! However the file remains empty, (I've tried prepopulating but still get errors). The full portions of the scoring code can be found here: http://pastebin.com/74v897Yt The current issue in particular is in LoadHighScores() - "There is an error in XML document (0, 0)." under line data = (HighScoreData)serializer.Deserialize(stream); I'm not sure whether this line is correct either: HighScoreData data = new HighScoreData(); public static HighScoreData LoadHighScores(string container, string filename) { HighScoreData data = new HighScoreData(); if (Global.SaveDevice.FileExists(container, filename)) { Global.SaveDevice.Load(container, filename, stream => { File.Open(Global.fileName_options, FileMode.OpenOrCreate, FileAccess.Read); try { // Read the data from the file XmlSerializer serializer = new XmlSerializer(typeof(HighScoreData)); data = (HighScoreData)serializer.Deserialize(stream); } finally { // Close the file stream.Close(); // stream.Dispose(); } }); } return (data); } I call: PromptMe(); when the Start button is pressed at the beginning. I call: if (Global.SaveDevice.IsReady){entries = LoadHighScores(HighScoresContainer, HighScoresFilename);} during the menu screen to try and display the highscore screen. I call: SaveHighScore(); when game ends. I've tried altering the struct code to a class but still no luck. Any help greatly appreciated.

    Read the article

  • Axis-Aligned Bounding Boxes vs Bounding Ellipse

    - by Griffin
    Why is it that most, if not all collision detection algorithms today require each body to have an AABB for the use in the broad phase only? It seems to me like simply placing a circle at the body's centroid, and extending the radius to where the circle encompasses the entire body would be optimal. This would not need to be updated after the body rotates and broad overlap-calculation would be faster to. Correct? Bonus: Would a bounding ellipse be practical for broad phase calculations also, since it would better represent long, skinny shapes? Or would it require extensive calculations, defeating the purpose of broad-phase?

    Read the article

  • State of the art Culling and Batching techniques in rendering

    - by Kristian Skarseth
    I'm currently working with upgrading and restructuring an OpenGL render engine. The engine is used for visualising large scenes of architectural data (buildings with interior), and the amount of objects can become rather large. As is the case with any building, there is a lot of occluded objects within walls, and you naturally only see the objects that are in the same room as you, or the exterior if you are on the outside. This leaves a large number of objects that should be occluded through occlusion culling and frustum culling. At the same time there is a lot of repetative geometry that can be batched in renderbatches, and also a lot of objects that can be rendered with instanced rendering. The way I see it, it can be difficult to combine renderbatching and culling in an optimal fashion. If you batch too many objects in the same VBO it's difficult to cull the objects on the CPU in order to skip rendering that batch. At the same time if you skip the culling on the cpu, a lot of objects will be processed by the GPU while they are not visible. If you skip batching copletely in order to more easily cull on the CPU, there will be an unwanted high amount of render calls. I have done some research into existing techniques and theories as to how these problems are solved in modern graphics, but I have not been able to find any concrete solution. An idea a colleague and me came up with was restricting batches to objects relatively close to eachother e.g all chairs in a room or within a radius of n meeters. This could be simplified and optimized through use of oct-trees. Does anyone have any pointers to techniques used for scene managment, culling, batching etc in state of the art modern graphics engines?

    Read the article

  • Render 2 images that uses different shaders

    - by Code Vader
    Based on the giawa/nehe tutorials, how can I render 2 images with different shaders. I'm pretty new to OpenGl and shaders so I'm not completely sure whats happening in my code, but I think the shaders that is called last overwrites the first one. private static void OnRenderFrame() { // calculate how much time has elapsed since the last frame watch.Stop(); float deltaTime = (float)watch.ElapsedTicks / System.Diagnostics.Stopwatch.Frequency; watch.Restart(); // use the deltaTime to adjust the angle of the cube angle += deltaTime; // set up the OpenGL viewport and clear both the color and depth bits Gl.Viewport(0, 0, width, height); Gl.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); // use our shader program and bind the crate texture Gl.UseProgram(program); //<<<<<<<<<<<< TOP PYRAMID // set the transformation of the top_pyramid program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(top_pyramid, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(top_pyramidNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(top_pyramidUV, program, "vertexUV"); Gl.BindBuffer(top_pyramidTrianlges); // draw the textured top_pyramid Gl.DrawElements(BeginMode.Triangles, top_pyramidTrianlges.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<< CUBE // set the transformation of the cube program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(cube, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(cubeNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(cubeUV, program, "vertexUV"); Gl.BindBuffer(cubeQuads); // draw the textured cube Gl.DrawElements(BeginMode.Quads, cubeQuads.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<<<< BOTTOM PYRAMID // set the transformation of the bottom_pyramid program["model_matrix"].SetValue(Matrix4.CreateRotationY(angle * rotate_cube)); program["enable_lighting"].SetValue(lighting); // bind the vertex positions, UV coordinates and element array Gl.BindBufferToShaderAttribute(bottom_pyramid, program, "vertexPosition"); Gl.BindBufferToShaderAttribute(bottom_pyramidNormals, program, "vertexNormal"); Gl.BindBufferToShaderAttribute(bottom_pyramidUV, program, "vertexUV"); Gl.BindBuffer(bottom_pyramidTrianlges); // draw the textured bottom_pyramid Gl.DrawElements(BeginMode.Triangles, bottom_pyramidTrianlges.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); //<<<<<<<<<<<<< STAR Gl.Disable(EnableCap.DepthTest); Gl.Enable(EnableCap.Blend); Gl.BlendFunc(BlendingFactorSrc.SrcAlpha, BlendingFactorDest.One); Gl.BindTexture(starTexture); //calculate the camera position using some fancy polar co-ordinates Vector3 position = 20 * new Vector3(Math.Cos(phi) * Math.Sin(theta), Math.Cos(theta), Math.Sin(phi) * Math.Sin(theta)); Vector3 upVector = ((theta % (Math.PI * 2)) > Math.PI) ? Vector3.Up : Vector3.Down; program_2["view_matrix"].SetValue(Matrix4.LookAt(position, Vector3.Zero, upVector)); // make sure the shader program and texture are being used Gl.UseProgram(program_2); // loop through the stars, drawing each one for (int i = 0; i < stars.Count; i++) { // set the position and color of this star program_2["model_matrix"].SetValue(Matrix4.CreateTranslation(new Vector3(stars[i].dist, 0, 0)) * Matrix4.CreateRotationZ(stars[i].angle)); program_2["color"].SetValue(stars[i].color); Gl.BindBufferToShaderAttribute(star, program_2, "vertexPosition"); Gl.BindBufferToShaderAttribute(starUV, program_2, "vertexUV"); Gl.BindBuffer(starQuads); Gl.DrawElements(BeginMode.Quads, starQuads.Count, DrawElementsType.UnsignedInt, IntPtr.Zero); // update the position of the star stars[i].angle += (float)i / stars.Count * deltaTime * 2 * rotate_stars; stars[i].dist -= 0.2f * deltaTime * rotate_stars; // if we've reached the center then move this star outwards and give it a new color if (stars[i].dist < 0f) { stars[i].dist += 5f; stars[i].color = new Vector3(generator.NextDouble(), generator.NextDouble(), generator.NextDouble()); } } Glut.glutSwapBuffers(); } The same goes for the textures, whichever one I mention last gets applied to both object?

    Read the article

< Previous Page | 486 487 488 489 490 491 492 493 494 495 496 497  | Next Page >