Search Results

Search found 37616 results on 1505 pages for 'model driven development'.

Page 630/1505 | < Previous Page | 626 627 628 629 630 631 632 633 634 635 636 637  | Next Page >

  • DirectX 11 Constant Buffers vs Effect Framework

    - by Alex
    I'm having some trouble understanding the differences between using constant buffers or using the effect framework of DirectX11 for updating shader constants. From what I understand they both do exactly the same thing, although from reading the documentation it appears as if using effects is meant to be 'easier'. However they seem the same to me, one uses VSSetConstantBuffers and the other GetConstantBufferByName. Is there something I'm missing here?

    Read the article

  • OpenGL directional light creating black spots

    - by AnonymousDeveloper
    I probably ought to start by saying that I suspect the problem is that one of my vectors is not in the correct "space", but I don't know for sure. I am having a strange problem with a directional light. When I move the camera away from (0.0, 0.0, 0.0) it creates tiny black spots that grow larger as the distance increases. I apologize ahead of time for the length of the code. Vertex shader: #version 410 core in vec3 vf_normal; in vec3 vf_bitangent; in vec3 vf_tangent; in vec2 vf_textureCoordinates; in vec3 vf_vertex; out vec3 tc_normal; out vec3 tc_bitangent; out vec3 tc_tangent; out vec2 tc_textureCoordinates; out vec3 tc_vertex; uniform mat3 vf_m_normal; uniform mat4 vf_m_model; uniform mat4 vf_m_mvp; uniform mat4 vf_m_projection; uniform mat4 vf_m_view; uniform float vf_te_inner; uniform float vf_te_outer; void main() { tc_normal = vf_normal; tc_bitangent = vf_bitangent; tc_tangent = vf_tangent; tc_textureCoordinates = vf_textureCoordinates; tc_vertex = vf_vertex; gl_Position = vf_m_mvp * vec4(vf_vertex, 1.0); } Tessellation Control shader: #version 410 core layout (vertices = 3) out; in vec3 tc_normal[]; in vec3 tc_bitangent[]; in vec3 tc_tangent[]; in vec2 tc_textureCoordinates[]; in vec3 tc_vertex[]; out vec3 te_normal[]; out vec3 te_bitangent[]; out vec3 te_tangent[]; out vec2 te_textureCoordinates[]; out vec3 te_vertex[]; uniform float vf_te_inner; uniform float vf_te_outer; uniform vec4 vf_l_color; uniform vec3 vf_l_position; uniform mat4 vf_m_depthBias; uniform mat4 vf_m_model; uniform mat4 vf_m_mvp; uniform mat4 vf_m_projection; uniform mat4 vf_m_view; uniform sampler2D vf_t_diffuse; uniform sampler2D vf_t_normal; uniform sampler2DShadow vf_t_shadow; uniform sampler2D vf_t_specular; #define ID gl_InvocationID float getTessLevelInner(float distance0, float distance1) { float avgDistance = (distance0 + distance1) / 2.0; return clamp((vf_te_inner - avgDistance), 1.0, vf_te_inner); } float getTessLevelOuter(float distance0, float distance1) { float avgDistance = (distance0 + distance1) / 2.0; return clamp((vf_te_outer - avgDistance), 1.0, vf_te_outer); } void main() { te_normal[gl_InvocationID] = tc_normal[gl_InvocationID]; te_bitangent[gl_InvocationID] = tc_bitangent[gl_InvocationID]; te_tangent[gl_InvocationID] = tc_tangent[gl_InvocationID]; te_textureCoordinates[gl_InvocationID] = tc_textureCoordinates[gl_InvocationID]; te_vertex[gl_InvocationID] = tc_vertex[gl_InvocationID]; float eyeToVertexDistance0 = distance(vec3(0.0), vec4(vf_m_view * vec4(tc_vertex[0], 1.0)).xyz); float eyeToVertexDistance1 = distance(vec3(0.0), vec4(vf_m_view * vec4(tc_vertex[1], 1.0)).xyz); float eyeToVertexDistance2 = distance(vec3(0.0), vec4(vf_m_view * vec4(tc_vertex[2], 1.0)).xyz); gl_TessLevelOuter[0] = getTessLevelOuter(eyeToVertexDistance1, eyeToVertexDistance2); gl_TessLevelOuter[1] = getTessLevelOuter(eyeToVertexDistance2, eyeToVertexDistance0); gl_TessLevelOuter[2] = getTessLevelOuter(eyeToVertexDistance0, eyeToVertexDistance1); gl_TessLevelInner[0] = getTessLevelInner(eyeToVertexDistance2, eyeToVertexDistance0); } Tessellation Evaluation shader: #version 410 core layout (triangles, equal_spacing, cw) in; in vec3 te_normal[]; in vec3 te_bitangent[]; in vec3 te_tangent[]; in vec2 te_textureCoordinates[]; in vec3 te_vertex[]; out vec3 g_normal; out vec3 g_bitangent; out vec4 g_patchDistance; out vec3 g_tangent; out vec2 g_textureCoordinates; out vec3 g_vertex; uniform float vf_te_inner; uniform float vf_te_outer; uniform vec4 vf_l_color; uniform vec3 vf_l_position; uniform mat4 vf_m_depthBias; uniform mat4 vf_m_model; uniform mat4 vf_m_mvp; uniform mat3 vf_m_normal; uniform mat4 vf_m_projection; uniform mat4 vf_m_view; uniform sampler2D vf_t_diffuse; uniform sampler2D vf_t_displace; uniform sampler2D vf_t_normal; uniform sampler2DShadow vf_t_shadow; uniform sampler2D vf_t_specular; vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2) { return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2; } vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2) { return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2; } float amplify(float d, float scale, float offset) { d = scale * d + offset; d = clamp(d, 0, 1); d = 1 - exp2(-2*d*d); return d; } float getDisplacement(vec2 t0, vec2 t1, vec2 t2) { float displacement = 0.0; vec2 textureCoordinates = interpolate2D(t0, t1, t2); vec2 vector = ((t0 + t1 + t2) / 3.0); float sampleDistance = sqrt((vector.x * vector.x) + (vector.y * vector.y)); sampleDistance /= ((vf_te_inner + vf_te_outer) / 2.0); displacement += texture(vf_t_displace, textureCoordinates).x; displacement += texture(vf_t_displace, textureCoordinates + vec2(-sampleDistance, -sampleDistance)).x; displacement += texture(vf_t_displace, textureCoordinates + vec2(-sampleDistance, sampleDistance)).x; displacement += texture(vf_t_displace, textureCoordinates + vec2( sampleDistance, sampleDistance)).x; displacement += texture(vf_t_displace, textureCoordinates + vec2( sampleDistance, -sampleDistance)).x; return (displacement / 5.0); } void main() { g_normal = normalize(interpolate3D(te_normal[0], te_normal[1], te_normal[2])); g_bitangent = normalize(interpolate3D(te_bitangent[0], te_bitangent[1], te_bitangent[2])); g_patchDistance = vec4(gl_TessCoord, (1.0 - gl_TessCoord.y)); g_tangent = normalize(interpolate3D(te_tangent[0], te_tangent[1], te_tangent[2])); g_textureCoordinates = interpolate2D(te_textureCoordinates[0], te_textureCoordinates[1], te_textureCoordinates[2]); g_vertex = interpolate3D(te_vertex[0], te_vertex[1], te_vertex[2]); float displacement = getDisplacement(te_textureCoordinates[0], te_textureCoordinates[1], te_textureCoordinates[2]); float d2 = min(min(min(g_patchDistance.x, g_patchDistance.y), g_patchDistance.z), g_patchDistance.w); d2 = amplify(d2, 50, -0.5); g_vertex += g_normal * displacement * 0.1 * d2; gl_Position = vf_m_mvp * vec4(g_vertex, 1.0); } Geometry shader: #version 410 core layout (triangles) in; layout (triangle_strip, max_vertices = 3) out; in vec3 g_normal[3]; in vec3 g_bitangent[3]; in vec4 g_patchDistance[3]; in vec3 g_tangent[3]; in vec2 g_textureCoordinates[3]; in vec3 g_vertex[3]; out vec3 f_tangent; out vec3 f_bitangent; out vec3 f_eyeDirection; out vec3 f_lightDirection; out vec3 f_normal; out vec4 f_patchDistance; out vec4 f_shadowCoordinates; out vec2 f_textureCoordinates; out vec3 f_vertex; uniform vec4 vf_l_color; uniform vec3 vf_l_position; uniform mat4 vf_m_depthBias; uniform mat4 vf_m_model; uniform mat4 vf_m_mvp; uniform mat3 vf_m_normal; uniform mat4 vf_m_projection; uniform mat4 vf_m_view; uniform sampler2D vf_t_diffuse; uniform sampler2D vf_t_normal; uniform sampler2DShadow vf_t_shadow; uniform sampler2D vf_t_specular; void main() { int index = 0; while (index < 3) { vec3 vertexNormal_cameraspace = vf_m_normal * normalize(g_normal[index]); vec3 vertexTangent_cameraspace = vf_m_normal * normalize(f_tangent); vec3 vertexBitangent_cameraspace = vf_m_normal * normalize(f_bitangent); mat3 TBN = transpose(mat3( vertexTangent_cameraspace, vertexBitangent_cameraspace, vertexNormal_cameraspace )); vec3 eyeDirection = -(vf_m_view * vf_m_model * vec4(g_vertex[index], 1.0)).xyz; vec3 lightDirection = normalize(-(vf_m_view * vec4(vf_l_position, 1.0)).xyz); f_eyeDirection = TBN * eyeDirection; f_lightDirection = TBN * lightDirection; f_normal = normalize(g_normal[index]); f_patchDistance = g_patchDistance[index]; f_shadowCoordinates = vf_m_depthBias * vec4(g_vertex[index], 1.0); f_textureCoordinates = g_textureCoordinates[index]; f_vertex = (vf_m_model * vec4(g_vertex[index], 1.0)).xyz; gl_Position = gl_in[index].gl_Position; EmitVertex(); index ++; } EndPrimitive(); } Fragment shader: #version 410 core in vec3 f_bitangent; in vec3 f_eyeDirection; in vec3 f_lightDirection; in vec3 f_normal; in vec4 f_patchDistance; in vec4 f_shadowCoordinates; in vec3 f_tangent; in vec2 f_textureCoordinates; in vec3 f_vertex; out vec4 fragColor; uniform vec4 vf_l_color; uniform vec3 vf_l_position; uniform mat4 vf_m_depthBias; uniform mat4 vf_m_model; uniform mat4 vf_m_mvp; uniform mat4 vf_m_projection; uniform mat4 vf_m_view; uniform sampler2D vf_t_diffuse; uniform sampler2D vf_t_normal; uniform sampler2DShadow vf_t_shadow; uniform sampler2D vf_t_specular; vec2 poissonDisk[16] = vec2[]( vec2(-0.94201624, -0.39906216), vec2( 0.94558609, -0.76890725), vec2(-0.09418410, -0.92938870), vec2( 0.34495938, 0.29387760), vec2(-0.91588581, 0.45771432), vec2(-0.81544232, -0.87912464), vec2(-0.38277543, 0.27676845), vec2( 0.97484398, 0.75648379), vec2( 0.44323325, -0.97511554), vec2( 0.53742981, -0.47373420), vec2(-0.26496911, -0.41893023), vec2( 0.79197514, 0.19090188), vec2(-0.24188840, 0.99706507), vec2(-0.81409955, 0.91437590), vec2( 0.19984126, 0.78641367), vec2( 0.14383161, -0.14100790) ); float random(vec3 seed, int i) { vec4 seed4 = vec4(seed,i); float dot_product = dot(seed4, vec4(12.9898, 78.233, 45.164, 94.673)); return fract(sin(dot_product) * 43758.5453); } float amplify(float d, float scale, float offset) { d = scale * d + offset; d = clamp(d, 0, 1); d = 1 - exp2(-2.0 * d * d); return d; } void main() { vec3 lightColor = vf_l_color.xyz; float lightPower = vf_l_color.w; vec3 materialDiffuseColor = texture(vf_t_diffuse, f_textureCoordinates).xyz; vec3 materialAmbientColor = vec3(0.1, 0.1, 0.1) * materialDiffuseColor; vec3 materialSpecularColor = texture(vf_t_specular, f_textureCoordinates).xyz; vec3 n = normalize(texture(vf_t_normal, f_textureCoordinates).rgb * 2.0 - 1.0); vec3 l = normalize(f_lightDirection); float cosTheta = clamp(dot(n, l), 0.0, 1.0); vec3 E = normalize(f_eyeDirection); vec3 R = reflect(-l, n); float cosAlpha = clamp(dot(E, R), 0.0, 1.0); float visibility = 1.0; float bias = 0.005 * tan(acos(cosTheta)); bias = clamp(bias, 0.0, 0.01); for (int i = 0; i < 4; i ++) { float shading = (0.5 / 4.0); int index = i; visibility -= shading * (1.0 - texture(vf_t_shadow, vec3(f_shadowCoordinates.xy + poissonDisk[index] / 3000.0, (f_shadowCoordinates.z - bias) / f_shadowCoordinates.w))); }\n" fragColor.xyz = materialAmbientColor + visibility * materialDiffuseColor * lightColor * lightPower * cosTheta + visibility * materialSpecularColor * lightColor * lightPower * pow(cosAlpha, 5); fragColor.w = texture(vf_t_diffuse, f_textureCoordinates).w; } The following images should be enough to give you an idea of the problem. Before moving the camera: Moving the camera just a little. Moving it to the center of the scene.

    Read the article

  • OpenGL Get Rotated X and Y of quad

    - by matejkramny
    I am developing a game in 2D using LWJGL library. So far I have a rotating box. I have done basic Rectangle collision, but it doesn't work for rotated rectangles. Does OpenGL have a function that returns the vertices of rotated rectangle? Or is there another way of doing this using trigonometry? I had researched how to do this and everything I found was using some matrix that I don't understand so I am asking if there is another way of doing this. For clarification, I am trying to find out the true (rotated) X,Y of each point of the rectangle. Let's say, the first point of a rectangle (top,left) has x=10 y=10.. Width and height is 100 pixels. When I rotate the rectangle using glRotatef() the x and y stay the same. The rotation is happening inside OpenGL. I need to extract the x,y of the rectangle so I can detect collisions properly.

    Read the article

  • Working out of a vertex array for destrucible objects

    - by bobobobo
    I have diamond-shaped polygonal bullets. There are lots of them on the screen. I did not want to create a vertex array for each, so I packed them into a single vertex array and they're all drawn at once. | bullet1.xyz | bullet1.rgb | bullet2.xyz | bullet2.rgb This is great for performance.. there is struct Bullet { vector<Vector3f*> verts ; // pointers into the vertex buffer } ; This works fine, the bullets can move and do collision detection, all while having their data in one place. Except when a bullet "dies" Then you have to clear a slot, and pack all the bullets towards the beginning of the array. Is this a good approach to handling lots of low poly objects? How else would you do it?

    Read the article

  • How exactly to implement multiple threads in a game

    - by xerwin
    So I recently started learning Java, and having a interest in playing games as well as developing them, naturally I want to create game in Java. I have experience with games in C# and C++ but all of them were single-threaded simple games. But now, I learned how easy it is to make threads in Java, I want to take things to the next level. I started thinking about how would I actually implement threading in a game. I read couple of articles that say the same thing "Usually you have thread for rendering, for updating game logic, for AI, ..." but I haven't (or didn't look hard enough) found example of implementation. My idea how to make implementation is something like this (example for AI) public class AIThread implements Runnable{ private List<AI> ai; private Player player; /*...*/ public void run() { for (int i = 0; i < ai.size(); i++){ ai.get(i).update(player); } Thread.sleep(/* sleep until the next game "tick" */); } } I think this could work. If I also had a rendering and updating thread list of AI in both those threads, since I need to draw the AI and I need to calculate the logic between player and AI(But that could be moved to AIThread, but as an example) . Coming from C++ I'm used to do thing elegantly and efficiently, and this seems like neither of those. So what would be the correct way to handle this? Should I just keep multiple copies of resources in each thread or should I have the resources on one spot, declared with synchronized keyword? I'm afraid that could cause deadlocks, but I'm not yet qualified enough to know when a code will produce deadlock.

    Read the article

  • Find Nearest Object

    - by ultifinitus
    I have a fairly sizable game engine created, and I'm adding some needed features, such as this, how do I find the nearest object from a list of points? In this case, I could simply use the Pythagorean theorem to find the distance, and check the results. I know I can't simply add x and y, because that's the distance to the object, if you only took right angle turns. However I'm wondering if there's something else I could do? I also have a collision system, where essentially I turn objects into smaller objects on a smaller grid, kind of like a minimap, and only if objects exist in the same gridspace do I check for collisions, I could do the same thing, only make the gridspace larger to check for closeness. (rather than checking every. single. object) however that would take additional setup in my base class and clutter up the already cluttered object. TL;DR Question: Is there something efficient and accurate that I can use to detect which object is closest, based on a list of points and sizes?

    Read the article

  • How was 20Q made?

    - by Dan the Man
    Ever since I was a kid, I've wondered how they made the 20Q electronic game. In this game, which is it's on device, you think of an object, thing, or animal (e.g. a potato or a donkey), once you mentally choose your thing, the device goes through a series of questions such as: Is it larger than a loaf of bread? Is it found outdoors? Is it used for recreation? For each of the questions you can answer yes, no, maybe, or unknown. The way I've always thought of it to work was with immense, nested conditionals (if statements). But, I don't think that would be very likely as it would be terribly difficult to understand while coding it. I'm not looking for a discussion as SE doesn't allow it; I'm looking for concrete knowledge or solutions.

    Read the article

  • How many achievements should I include, and of what challenge?

    - by stephelton
    I know this question is fairy broad and subjective, but I'm wondering if there's been any published research into what an optimal number of achievements is and what kind of challenge they should present. The game this question directly relates to is a shoot-em-up, but an ideal answer is fairly theoretical. If there are there are too few achievements, or they are not challenging, I would expect they would fail in their goal to keep people playing. If there are too many, or they are unreasonably difficult, I would expect people to quickly give up. I personally witnessed the latter happening in Starcraft 2; a section of the achievements would have you win hundreds of games against their AI opponents (boring!)

    Read the article

  • How do you prevent inflation in a virtual economy?

    - by Tetrad
    With your typical MMORPG, players can usually farm the world for raw materials essentially forever. Monsters/mineral veins/etc are usually on some respawn timer so, other than time, there really isn't a good way to limit the amount of new currency entering the system. So that really only leaves money sinks to try to take money out of the system. What are some strategies to prevent inflation of the in-game currency?

    Read the article

  • I want to learn to program in SDL C++where do i start? I want to learn only what i need to to start making 2d games [on hold]

    - by user2644399
    Lazyfoo of Lazyfoo.net of the SDL 2d tutorial wrote that in order for me to start game programming in SDL, I need to know these concepts well; Operators, Controls, Loops, Functions, Structures, Arrays, References, Pointers, Classes, Objects how to use a template and Bitwise and/or. I want to know the fastest way to learn as much as I need of basic c++ that would allow me to make 2d games. Thanks in advance.

    Read the article

  • Draw Cards and Eliminate Cards Problem

    - by Jen
    I am having a problem in this question. I want a system inside a game wherein the player draws 2 cards randomly, and the enemy draws 2 cards randomly. Then, what the program does is to print out to the console the cards the player draw and the enemy's. The cards should not conflict and must not be the same. Then lastly, the program prints out the card that was not drawn by both the player and the enemy. Here's how I did it but it was lengthy and full of errors: import java.util.Random; public class Draw { public static Random random = new Random(); public static String cards[] = {"Hall", "Kitchen", "Billiard", "Study", "Pool"}; public static int playercounter; public static int enemycounter; public static String playercardA = null; public static String playercardB = null; public static String enemycardA = null; public static String enemycardB = null; public String lastcard = null; public static void playercardAdraw() { playercounter = random.nextInt(5); playercardA = cards[playercounter]; } public static void playercardBdraw() { playercounter=random.nextInt(5); playercardB= cards[playercounter]; if (playercardB==playercardA || playercardB == enemycardA || playercardB == enemycardB) { return; } } public static void enemycardAdraw () { enemycounter = random.nextInt(5); enemycardA=cards[enemycounter]; if (enemycardA == playercardA || enemycardA == playercardB) { return; } } public static void enemycardBdraw () { enemycounter = random.nextInt(5); enemycardB=cards[enemycounter]; if (enemycardB == playercardA || enemycardB == playercardB || enemycardB == enemycardA) { return; } } public static void main (String args []) { System.out.println("Starting to draw..."); System.out.println("Player's Turn: "); playercardAdraw(); System.out.println("Player's first card: " + playercardA); playercardBdraw(); System.out.println("Player's second card: " + playercardB); System.out.println("Enemy's Turn: "); enemycardAdraw(); System.out.println("Enemy's first card: " + enemycardA); enemycardBdraw(); System.out.println("Enemy's Second card: " + enemycardB); } }

    Read the article

  • Choosing a mobile advertising mediator over going it alone

    - by Notbad
    We have finished our first game for IOS/Android. We would like to give it away adding ads to it. I have been reading a lot about the subject but it is a bit overwhelming for starters. From what I read, it seems there are some important points to have into consideration: 1) Do as much localization as you can (target your audience with ads they could be interested for the zone they live in). 2) Do not over advertise in your application. At this moment we have decided to go with AdMob. It seems an easy option to setup for beginners and have a good set of ad networks. My question is, will we earn less for example for iAds using adMob than implementing iAds without a mediator? Are adMob paying less than others (this is what I remember for some artilces I read)?. It would be nice to hear from people with experience on this to let us light our way a bit.

    Read the article

  • How to perform game object smoothing in multiplayer games

    - by spaceOwl
    We're developing an infrastructure to support multiplayer games for our game engine. In simple terms, each client (player) engine sends some pieces of data regarding the relevant game objects at a given time interval. On the receiving end, we step the incoming data to current time (to compensate for latency), followed by a smoothing step (which is the subject of this question). I was wondering how smoothing should be performed ? Currently the algorithm is similar to this: Receive incoming state for an object (position, velocity, acceleration, rotation, custom data like visual properties, etc). Calculate a diff between local object position and the position we have after previous prediction steps. If diff doesn't exceed some threshold value, start a smoothing step: Mark the object's CURRENT POSITION and the TARGET POSITION. Linear interpolate between these values for 0.3 seconds. I wonder if this scheme is any good, or if there is any other common implementation or algorithm that should be used? (For example - should i only smooth out the position? or other values, such as speed, etc) any help will be appreciated.

    Read the article

  • Strategy to prevent players from seeing through walls in an online FPS?

    - by geneotech
    Why do we still moan on wallhackers in multiplayer first-person shooters ? Isn't it possible to perform occlusion culling for all players server-side ? For example, send player xyz information to client only when the player is visible in client's frustum and not occluded by any object ? Even if the collision-geometry is very simplified, most of the time cheater won't receive tactical information. Why not do this ?

    Read the article

  • C++: Checking if an object faces a point (within a certain range)

    - by bojoradarial
    I have been working on a shooter game in C++, and am trying to add a feature whereby missiles shot must be within 90 degrees (PI/2 radians) of the direction the ship is facing. The missiles will be shot towards the mouse. My idea is that the ship's angle of rotation is compared with the angle between the ship and the mouse (std::atan2(mouseY - shipY, mouseX - shipX)), and if the difference is less than PI/4 (45 degrees) then the missile can be fired. However, I can't seem to get this to work. The ship's angle of rotation is increased and decreased with the A and D keys, so it is possible that it isn't between 0 and 2*PI, hence the use of fmod() below. Code: float userRotation = std::fmod(user->Angle(), 6.28318f); if (std::abs(userRotation - missileAngle) > 0.78f) return; Any help would be appreciated. Thanks!

    Read the article

  • Getting a texture from a renderbuffer in OpenGL?

    - by Rushyo
    I've got a renderbuffer (DepthStencil) in an FBO and I need to get a texture from it. I can't have both a DepthComponent texture and a DepthStencil renderbuffer in the FBO, it seems, so I need some way to convert the renderbuffer to a DepthComponent texture after I'm done with it for use later down the pipeline. I've tried plenty of techniques to grab the depth component from the renderbuffer for weeks but I always come out with junk. All I want at the end is the same texture I'd get from an FBO if I wasn't using a renderbuffer. Can anyone post some comprehensive instructions or code that covers this seemingly simple operation? EDIT: Linky to an extract version of the code http://dl.dropbox.com/u/9279501/fbo.cs Screeny of the Depth of Field effect + FBO - without depth(!) http://i.stack.imgur.com/Hj9Oe.jpg Screeny without Depth of Field effect + FBO - depth working fine http://i.stack.imgur.com/boOm1.jpg

    Read the article

  • Better solution for boolean mixing?

    - by Ruben Nunez
    Sorry if this question has been asked in the past, but searching Google and here didn't yield relevant results, so here goes. I'm working on a fragment shader that implements both conditional/boolean diffuse and bump mapping (that is to say, you don't need a diffuse texture or a normals texture, and if they're not present, they're simply changed to default values). My current solution is to use a uniform float to say "mix amount". For example, computing the diffuse texel works as: // Compute diffuse amount scaled by vCol // If no texture is present (mDif = 0.0), then DiffuseTexel = vCol // kT[0] is the diffuse texture // vTex is the texture co-ordinates // mDif is the uniform float containing the mix amount (either 0.0 or 1.0) vec4 DiffuseTexel = vCol*mix(vec4(1.0), texture2D(kT[0], vTex), mDif); While that works great and all, I was wondering if there's a better way of doing this, as I will never have any use for in-between values for funky effects. I know that perhaps the best solution is to simply write separate shaders for mDif=0.0 and mDif=1.0, but I'd like a more elegant solution than splicing shaders before compiling or writing multiple shader files and keeping each one updated. Any ideas are greatly appreciated. =)

    Read the article

  • Geometry Shader : points + Triangles

    - by CmasterG
    I have different Shaders and for each Shader a instance of the ShaderClass class, which initializes the Shaders, Renders the Shaders, etc. I use most of the Shaderclasses without Geometry Shader, but in one Shader Class i also use a Geometry Shader. The problem is, that when I render one object with the Shaderclass that uses the Geometry shader, all other object are rendered with the same geometry that I create in the Geometry Shader. Can you help me? Is it possible that I have to use a Geometry Shader for each object, when I use one for one object? I use DirectX 11 with C++.

    Read the article

  • Knowing state of game in real time

    - by evthim
    I'm trying to code a tic tac toe game in java and I need help figuring out how to efficiently and without freezing the program check if someone won the game. I'm only in the design stages now, I haven't started programming anything but I'm wondering how would I know at all times the state of the game and exactly when someone wins? Response to MarkR: (note: had to place comment here, it was too long for comment section) It's not a homework problem, I'm trying to get more practice programming GUI's which I've only done once as a freshman in my second introductory programming course. I understand I'll have a 2D array. I plan to have a 2D integer array where x would equal 1 and o would equal 0. However, won't it take too much time if I check after every move if someone won the game? Is there a way or a data structure or algorithm I can use so that the program will know the state (when I say state I mean not just knowing every position on the board, the int array will take care of that, I mean knowing that user 1 will win if he places x on this block) of the game at all times and thus can know automatically when someone won?

    Read the article

  • Combining pathfinding with global AI objectives

    - by V_Programmer
    I'm making a turn-based strategy game using Java and LibGDX. Now I want to code the AI. I haven't written the AI code yet. I've simply designed it. The AI will have two components, one focused in tactics and resource management (create troops, determine who have strategical advantage, detect important objectives, etc) and a individual component, focused in assign the work to each unit, examine its possibilites and move the unit. Now I'm facing an important problem. The map where the action take place is a grid-based map. Each terrain has different movement cost. I read about pathfinding and I think A* is a very good option to determine a good route between two points. However, imagine I have an unit with movement = 5 (i.e, it can move 5 tiles of movement cost = 1). My tactical AI has found an objective at a distance d = 20 tiles (Manhattan distance) from my unit. My problem is the following: the unit won't be able to reach the objective in one turn. So the AI will have to store a list of position and execute them in various turns. I don't know how to solve this. PS. In my unit code, I have a list called "selectionMarks" which stores all the possible places where the unit can go in this turn. This places are calculed recursively using a "getSelectionMarks" function. Any help is appreciated :D

    Read the article

  • CCSpriteHole in cocos2d 2.0?

    - by rakkarage
    i was using this cocos2d class CCSpriteHole in cocos2d 1.0 fine... http://jpsarda.tumblr.com/post/15779708304/new-cocos2d-iphone-extensions-a-progress-bar-and-a i am trying to convert it to cocos2d 2.0... i got it to compile by changing glVertexPointer to glVertexAttribPointer like in the 2.0 version of CCSpriteScale9 here http://jpsarda.tumblr.com/post/9162433577/scale9grid-for-cocos2d and changing contentSizeInPixels_ to contentSize_... -(id) init { if( (self=[super init]) ) { opacityModifyRGB_ = YES; opacity_ = 255; color_ = colorUnmodified_ = ccWHITE; capSize=capSizeInPixels=CGSizeZero; //Not used blendFunc_.src = CC_BLEND_SRC; blendFunc_.dst = CC_BLEND_DST; // update texture (calls updateBlendFunc) [self setTexture:nil]; // default transform anchor anchorPoint_ = ccp(0.5f, 0.5f); vertexDataCount=24; vertexData = (ccV2F_C4F_T2F*) malloc(vertexDataCount * sizeof(ccV2F_C4F_T2F)); [self setTextureRectInPixels:CGRectZero untrimmedSize:CGSizeZero]; } return self; } -(id) initWithTexture:(CCTexture2D*)texture rect:(CGRect)rect { NSAssert(texture!=nil, @"Invalid texture for sprite"); // IMPORTANT: [self init] and not [super init]; if( (self = [self init]) ) { [self setTexture:texture]; [self setTextureRect:rect]; } return self; } -(id) initWithTexture:(CCTexture2D*)texture { NSAssert(texture!=nil, @"Invalid texture for sprite"); CGRect rect = CGRectZero; rect.size = texture.contentSize; return [self initWithTexture:texture rect:rect]; } -(id) initWithFile:(NSString*)filename { NSAssert(filename!=nil, @"Invalid filename for sprite"); CCTexture2D *texture = [[CCTextureCache sharedTextureCache] addImage: filename]; if( texture ) return [self initWithTexture:texture]; return nil; } +(id)spriteWithFile:(NSString*)f { return [[self alloc] initWithFile:f]; } - (void) dealloc { if (vertexData) free(vertexData); } -(void) updateColor { ccColor4F color4; color4.r=(float)color_.r/255.0f; color4.g=(float)color_.g/255.0f; color4.b=(float)color_.b/255.0f; color4.a=(float)opacity_/255.0f; for (int i=0; i<vertexDataCount; i++) { vertexData[i].colors=color4; } } -(void)updateTextureCoords:(CGRect)rect { CCTexture2D *tex = texture_; if(!tex) return; float atlasWidth = (float)tex.pixelsWide; float atlasHeight = (float)tex.pixelsHigh; float left,right,top,bottom; left = rect.origin.x/atlasWidth; right = left + rect.size.width/atlasWidth; top = rect.origin.y/atlasHeight; bottom = top + rect.size.height/atlasHeight; // // |/|/|/| // CGSize capTexCoordsSize=CGSizeMake(capSizeInPixels.width/atlasWidth, capSizeInPixels.height/atlasHeight); // From left to right //Top band // Left vertexData[0].texCoords=(ccTex2F){left,top}; vertexData[1].texCoords=(ccTex2F){left,top+capTexCoordsSize.height}; vertexData[2].texCoords=(ccTex2F){left+capTexCoordsSize.width,top}; vertexData[3].texCoords=(ccTex2F){left+capTexCoordsSize.width,top+capTexCoordsSize.height}; // Center vertexData[4].texCoords=(ccTex2F){right-capTexCoordsSize.width,top}; vertexData[5].texCoords=(ccTex2F){right-capTexCoordsSize.width,top+capTexCoordsSize.height}; // Right vertexData[6].texCoords=(ccTex2F){right,top}; vertexData[7].texCoords=(ccTex2F){right,top+capTexCoordsSize.height}; //Center band // Left vertexData[8].texCoords=(ccTex2F){left,bottom-capTexCoordsSize.height}; vertexData[9].texCoords=(ccTex2F){left,top+capTexCoordsSize.height}; vertexData[10].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom-capTexCoordsSize.height}; vertexData[11].texCoords=(ccTex2F){left+capTexCoordsSize.width,top+capTexCoordsSize.height}; // Center vertexData[12].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom-capTexCoordsSize.height}; vertexData[13].texCoords=(ccTex2F){right-capTexCoordsSize.width,top+capTexCoordsSize.height}; // Right vertexData[14].texCoords=(ccTex2F){right,bottom-capTexCoordsSize.height}; vertexData[15].texCoords=(ccTex2F){right,top+capTexCoordsSize.height}; //Bottom band //Left vertexData[16].texCoords=(ccTex2F){left,bottom}; vertexData[17].texCoords=(ccTex2F){left,bottom-capTexCoordsSize.height}; vertexData[18].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom}; vertexData[19].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom-capTexCoordsSize.height}; // Center vertexData[20].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom}; vertexData[21].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom-capTexCoordsSize.height}; // Right vertexData[22].texCoords=(ccTex2F){right,bottom}; vertexData[23].texCoords=(ccTex2F){right,bottom-capTexCoordsSize.height}; } -(void) updateVertices { float left=0; //-spriteSizeInPixels.width*0.5f; float right=left+contentSize_.width; float bottom=0; //-spriteSizeInPixels.height*0.5f; float top=bottom+contentSize_.height; float holeLeft=holeRect.origin.x*CC_CONTENT_SCALE_FACTOR(); float holeRight=holeLeft+holeRect.size.width*CC_CONTENT_SCALE_FACTOR(); float holeBottom=holeRect.origin.y*CC_CONTENT_SCALE_FACTOR(); float holeTop=holeBottom+holeRect.size.height*CC_CONTENT_SCALE_FACTOR(); // // |/|/|/| // // From left to right //Top band // Left vertexData[0].vertices=(ccVertex2F){left,top}; vertexData[1].vertices=(ccVertex2F){left,holeTop}; vertexData[2].vertices=(ccVertex2F){holeLeft,top}; vertexData[3].vertices=(ccVertex2F){holeLeft,holeTop}; // Center vertexData[4].vertices=(ccVertex2F){holeRight,top}; vertexData[5].vertices=(ccVertex2F){holeRight,holeTop}; // Right vertexData[6].vertices=(ccVertex2F){right,top}; vertexData[7].vertices=(ccVertex2F){right,holeTop}; //Center band // Left vertexData[8].vertices=(ccVertex2F){left,holeBottom}; vertexData[9].vertices=(ccVertex2F){left,holeTop}; vertexData[10].vertices=(ccVertex2F){holeLeft,holeBottom}; vertexData[11].vertices=(ccVertex2F){holeLeft,holeTop}; // Center vertexData[12].vertices=(ccVertex2F){holeRight,holeBottom}; vertexData[13].vertices=(ccVertex2F){holeRight,holeTop}; // Right vertexData[14].vertices=(ccVertex2F){right,holeBottom}; vertexData[15].vertices=(ccVertex2F){right,holeTop}; //Bottom band //Left vertexData[16].vertices=(ccVertex2F){left,bottom}; vertexData[17].vertices=(ccVertex2F){left,holeBottom}; vertexData[18].vertices=(ccVertex2F){holeLeft,bottom}; vertexData[19].vertices=(ccVertex2F){holeLeft,holeBottom}; // Center vertexData[20].vertices=(ccVertex2F){holeRight,bottom}; vertexData[21].vertices=(ccVertex2F){holeRight,holeBottom}; // Right vertexData[22].vertices=(ccVertex2F){right,bottom}; vertexData[23].vertices=(ccVertex2F){right,holeBottom}; } -(void) setHole:(CGRect)r inRect:(CGRect)totalSurface { holeRect=r; self.contentSize=totalSurface.size; holeRect.origin=ccpSub(holeRect.origin,totalSurface.origin); CGPoint holeCenter=ccp(holeRect.origin.x+holeRect.size.width*0.5f,holeRect.origin.y+holeRect.size.height*0.5f); self.anchorPoint=ccp(holeCenter.x/contentSize_.width,holeCenter.y/contentSize_.height); //[self updateTextureCoords:rectInPixels_]; [self updateVertices]; [self updateColor]; } -(void) draw { BOOL newBlend = NO; if( blendFunc_.src != CC_BLEND_SRC || blendFunc_.dst != CC_BLEND_DST ) { newBlend = YES; glBlendFunc( blendFunc_.src, blendFunc_.dst ); } glBindTexture(GL_TEXTURE_2D, [texture_ name]); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); if( newBlend ) glBlendFunc(CC_BLEND_SRC, CC_BLEND_DST); } -(void)setTextureRectInPixels:(CGRect)rect untrimmedSize:(CGSize)untrimmedSize { rectInPixels_ = rect; rect_ = CC_RECT_PIXELS_TO_POINTS( rect ); //[self setContentSizeInPixels:untrimmedSize]; [self updateTextureCoords:rectInPixels_]; } -(void)setTextureRect:(CGRect)rect { CGRect rectInPixels = CC_RECT_POINTS_TO_PIXELS( rect ); [self setTextureRectInPixels:rectInPixels untrimmedSize:rectInPixels.size]; } // // RGBA protocol // #pragma mark CCSpriteHole - RGBA protocol -(GLubyte) opacity { return opacity_; } -(void) setOpacity:(GLubyte) anOpacity { opacity_ = anOpacity; // special opacity for premultiplied textures if( opacityModifyRGB_ ) [self setColor: (opacityModifyRGB_ ? colorUnmodified_ : color_ )]; [self updateColor]; } - (ccColor3B) color { if(opacityModifyRGB_){ return colorUnmodified_; } return color_; } -(void) setColor:(ccColor3B)color3 { color_ = colorUnmodified_ = color3; if( opacityModifyRGB_ ){ color_.r = color3.r * opacity_/255; color_.g = color3.g * opacity_/255; color_.b = color3.b * opacity_/255; } [self updateColor]; } -(void) setOpacityModifyRGB:(BOOL)modify { ccColor3B oldColor = self.color; opacityModifyRGB_ = modify; self.color = oldColor; } -(BOOL) doesOpacityModifyRGB { return opacityModifyRGB_; } #pragma mark CCSpriteHole - CocosNodeTexture protocol -(void) updateBlendFunc { if( !texture_ || ! [texture_ hasPremultipliedAlpha] ) { blendFunc_.src = GL_SRC_ALPHA; blendFunc_.dst = GL_ONE_MINUS_SRC_ALPHA; [self setOpacityModifyRGB:NO]; } else { blendFunc_.src = CC_BLEND_SRC; blendFunc_.dst = CC_BLEND_DST; [self setOpacityModifyRGB:YES]; } } -(void) setTexture:(CCTexture2D*)texture { // accept texture==nil as argument NSAssert( !texture || [texture isKindOfClass:[CCTexture2D class]], @"setTexture expects a CCTexture2D. Invalid argument"); texture_ = texture; [self updateBlendFunc]; } -(CCTexture2D*) texture { return texture_; } @end but now positioning and scaling seem to not work? and it starts in the wrong position... but changing the opacity still works. so i was wondering if anyone can see why my 2.0 version is not working? or if maybe there is a better way to do a sprite hole with cocos2d/opengl 2.0? shaders? thanks

    Read the article

  • Very slow direct3D texture sampling

    - by __dominic
    Hi, So I'm writing a small game using Direct3D 9 and I'm using multitexturing for the terrain. All I'm doing is sampling 3 textures and a blend map and getting the overall color from the three textures based on the color channels from the blend map. Anyway, I am getting a massive frame rate drop when I sample more than 1 texture, I'm going from 120+ fps to just under 50. This is the HLSL code responsible for the slow down: float3 ground = tex2D(GroundTex, multiTex).rgb; float3 stone = tex2D(StoneTex, multiTex).rgb; float3 grass = tex2D(GrassTex, multiTex).rgb; float3 blend = tex2D(BlendMapTex, blendMap).rgb; Am I doing it wrong ? If anyone has any info or tips about texture sampling or anything, that would be nice. Thanks.

    Read the article

  • A simple example of movement prediction

    - by Daniel
    I've seen lots of examples of theory about the reason for client-side prediction, but I'm having a hard time converting it into code. I was wondering if someone knows of some specific examples that share some of the code, or can share their knowledge to shed some light into my situation. I'm trying to run some tests to get a the movement going (smoothly) between multiple clients. I'm using mouse input to initiate movement. I'm using AS3 and C# on a local Player.IO server. Right now I'm trying to get the Client side working, as I'm only forwarding position info with the client. I have 2 timers, one is an onEnterFrame and the other is a 100ms Timer, and one on mouseClick listener. When I click anywhere with a mouse, I update my player class to give it a destination point On every enterFrame Event for the player, it moves towards the destination point At every 100ms it sends a message to the server with the position of where it should be in a 100ms. The distance traveled is calculated by taking the distance (in Pixels) that the player can travel in one second, and dividing it by the framerate for the onEnterFrame handler, and by the update frequency (1/0.100s) for the server update. For the other Players, the location is interpolated and animated on every frame based on the new location. Is this the right way of doing it?

    Read the article

  • Wheel Joint Implementation in AndEngine

    - by Siddharth
    I am currently developing car game in AndEngine. In which I was using revolute joint for car wheel and chassis attachment. But my friend suggest me that use wheel joint for that purpose for better behavior of the car. In AndEnginen I didn't found the wheel joint implementation. So what I have to do for wheel joint implementation. I think I have to manually update the box2d library for this purpose but I don't know how many things get updated. Please suggest me some guidance on achieving better car behavior in AndEngine.

    Read the article

  • Previewing a Demo Level in Mobile for UDK?

    - by Reno Yeo
    I've already clicked on "Emulate Mobile Features" and everything has been compiled. I've also set the mobile previewer settings to iPhone 4's dimensions and features. However, when i click on the mobile previewer, a new window pops up but it goes into a "Not Responding" mode after a while. Is there anything I'm doing wrong? To be honest, I'm afraid of the difficulty curve required in learning UDK, but I am interested in developing a game for it.

    Read the article

< Previous Page | 626 627 628 629 630 631 632 633 634 635 636 637  | Next Page >