Search Results

Search found 4870 results on 195 pages for '3d engines'.

Page 11/195 | < Previous Page | 7 8 9 10 11 12 13 14 15 16 17 18  | Next Page >

  • Render 2D textures on a 3D object's face

    - by www.Sillitoy.com
    I am not familiar with 3D graphics, and I'd like to know the right way to render some 2D figures on different points of a wider face of a 3D object. My 3D object is just a cube representing a poker table. I have a 2D png for players' placeholders, and I'd like to render these figures on the 3D object where needed. An alternative solution would be to render the whole face with a big picture containing all the placeholders figures. However, it would be a waste of memory and thus less efficient. What do you suggest?

    Read the article

  • 3D Graphics with XNA Game Studio 4.0 bug in light map?

    - by Eibis
    i'm following the tutorials on 3D Graphics with XNA Game Studio 4.0 and I came up with an horrible effect when I tried to implement the Light Map http://i.stack.imgur.com/BUWvU.jpg this effect shows up when I look towards the center of the house (and it moves with me). it has this shape because I'm using a sphere to represent light; using other light shapes gives different results. I'm using a class PreLightingRenderer: using System; using System.Collections.Generic; using System.Linq; using System.Text; using Microsoft.Xna.Framework; using Microsoft.Xna.Framework.Graphics; using Dhpoware; using Microsoft.Xna.Framework.Content; namespace XNAFirstPersonCamera { public class PrelightingRenderer { // Normal, depth, and light map render targets RenderTarget2D depthTarg; RenderTarget2D normalTarg; RenderTarget2D lightTarg; // Depth/normal effect and light mapping effect Effect depthNormalEffect; Effect lightingEffect; // Point light (sphere) mesh Model lightMesh; // List of models, lights, and the camera public List<CModel> Models { get; set; } public List<PPPointLight> Lights { get; set; } public FirstPersonCamera Camera { get; set; } GraphicsDevice graphicsDevice; int viewWidth = 0, viewHeight = 0; public PrelightingRenderer(GraphicsDevice GraphicsDevice, ContentManager Content) { viewWidth = GraphicsDevice.Viewport.Width; viewHeight = GraphicsDevice.Viewport.Height; // Create the three render targets depthTarg = new RenderTarget2D(GraphicsDevice, viewWidth, viewHeight, false, SurfaceFormat.Single, DepthFormat.Depth24); normalTarg = new RenderTarget2D(GraphicsDevice, viewWidth, viewHeight, false, SurfaceFormat.Color, DepthFormat.Depth24); lightTarg = new RenderTarget2D(GraphicsDevice, viewWidth, viewHeight, false, SurfaceFormat.Color, DepthFormat.Depth24); // Load effects depthNormalEffect = Content.Load<Effect>(@"Effects\PPDepthNormal"); lightingEffect = Content.Load<Effect>(@"Effects\PPLight"); // Set effect parameters to light mapping effect lightingEffect.Parameters["viewportWidth"].SetValue(viewWidth); lightingEffect.Parameters["viewportHeight"].SetValue(viewHeight); // Load point light mesh and set light mapping effect to it lightMesh = Content.Load<Model>(@"Models\PPLightMesh"); lightMesh.Meshes[0].MeshParts[0].Effect = lightingEffect; this.graphicsDevice = GraphicsDevice; } public void Draw() { drawDepthNormalMap(); drawLightMap(); prepareMainPass(); } void drawDepthNormalMap() { // Set the render targets to 'slots' 1 and 2 graphicsDevice.SetRenderTargets(normalTarg, depthTarg); // Clear the render target to 1 (infinite depth) graphicsDevice.Clear(Color.White); // Draw each model with the PPDepthNormal effect foreach (CModel model in Models) { model.CacheEffects(); model.SetModelEffect(depthNormalEffect, false); model.Draw(Camera.ViewMatrix, Camera.ProjectionMatrix, Camera.Position); model.RestoreEffects(); } // Un-set the render targets graphicsDevice.SetRenderTargets(null); } void drawLightMap() { // Set the depth and normal map info to the effect lightingEffect.Parameters["DepthTexture"].SetValue(depthTarg); lightingEffect.Parameters["NormalTexture"].SetValue(normalTarg); // Calculate the view * projection matrix Matrix viewProjection = Camera.ViewMatrix * Camera.ProjectionMatrix; // Set the inverse of the view * projection matrix to the effect Matrix invViewProjection = Matrix.Invert(viewProjection); lightingEffect.Parameters["InvViewProjection"].SetValue(invViewProjection); // Set the render target to the graphics device graphicsDevice.SetRenderTarget(lightTarg); // Clear the render target to black (no light) graphicsDevice.Clear(Color.Black); // Set render states to additive (lights will add their influences) graphicsDevice.BlendState = BlendState.Additive; graphicsDevice.DepthStencilState = DepthStencilState.None; foreach (PPPointLight light in Lights) { // Set the light's parameters to the effect light.SetEffectParameters(lightingEffect); // Calculate the world * view * projection matrix and set it to // the effect Matrix wvp = (Matrix.CreateScale(light.Attenuation) * Matrix.CreateTranslation(light.Position)) * viewProjection; lightingEffect.Parameters["WorldViewProjection"].SetValue(wvp); // Determine the distance between the light and camera float dist = Vector3.Distance(Camera.Position, light.Position); // If the camera is inside the light-sphere, invert the cull mode // to draw the inside of the sphere instead of the outside if (dist < light.Attenuation) graphicsDevice.RasterizerState = RasterizerState.CullClockwise; // Draw the point-light-sphere lightMesh.Meshes[0].Draw(); // Revert the cull mode graphicsDevice.RasterizerState = RasterizerState.CullCounterClockwise; } // Revert the blending and depth render states graphicsDevice.BlendState = BlendState.Opaque; graphicsDevice.DepthStencilState = DepthStencilState.Default; // Un-set the render target graphicsDevice.SetRenderTarget(null); } void prepareMainPass() { foreach (CModel model in Models) foreach (ModelMesh mesh in model.Model.Meshes) foreach (ModelMeshPart part in mesh.MeshParts) { // Set the light map and viewport parameters to each model's effect if (part.Effect.Parameters["LightTexture"] != null) part.Effect.Parameters["LightTexture"].SetValue(lightTarg); if (part.Effect.Parameters["viewportWidth"] != null) part.Effect.Parameters["viewportWidth"].SetValue(viewWidth); if (part.Effect.Parameters["viewportHeight"] != null) part.Effect.Parameters["viewportHeight"].SetValue(viewHeight); } } } } that uses three effect: PPDepthNormal.fx float4x4 World; float4x4 View; float4x4 Projection; struct VertexShaderInput { float4 Position : POSITION0; float3 Normal : NORMAL0; }; struct VertexShaderOutput { float4 Position : POSITION0; float2 Depth : TEXCOORD0; float3 Normal : TEXCOORD1; }; VertexShaderOutput VertexShaderFunction(VertexShaderInput input) { VertexShaderOutput output; float4x4 viewProjection = mul(View, Projection); float4x4 worldViewProjection = mul(World, viewProjection); output.Position = mul(input.Position, worldViewProjection); output.Normal = mul(input.Normal, World); // Position's z and w components correspond to the distance // from camera and distance of the far plane respectively output.Depth.xy = output.Position.zw; return output; } // We render to two targets simultaneously, so we can't // simply return a float4 from the pixel shader struct PixelShaderOutput { float4 Normal : COLOR0; float4 Depth : COLOR1; }; PixelShaderOutput PixelShaderFunction(VertexShaderOutput input) { PixelShaderOutput output; // Depth is stored as distance from camera / far plane distance // to get value between 0 and 1 output.Depth = input.Depth.x / input.Depth.y; // Normal map simply stores X, Y and Z components of normal // shifted from (-1 to 1) range to (0 to 1) range output.Normal.xyz = (normalize(input.Normal).xyz / 2) + .5; // Other components must be initialized to compile output.Depth.a = 1; output.Normal.a = 1; return output; } technique Technique1 { pass Pass1 { VertexShader = compile vs_1_1 VertexShaderFunction(); PixelShader = compile ps_2_0 PixelShaderFunction(); } } PPLight.fx float4x4 WorldViewProjection; float4x4 InvViewProjection; texture2D DepthTexture; texture2D NormalTexture; sampler2D depthSampler = sampler_state { texture = ; minfilter = point; magfilter = point; mipfilter = point; }; sampler2D normalSampler = sampler_state { texture = ; minfilter = point; magfilter = point; mipfilter = point; }; float3 LightColor; float3 LightPosition; float LightAttenuation; // Include shared functions #include "PPShared.vsi" struct VertexShaderInput { float4 Position : POSITION0; }; struct VertexShaderOutput { float4 Position : POSITION0; float4 LightPosition : TEXCOORD0; }; VertexShaderOutput VertexShaderFunction(VertexShaderInput input) { VertexShaderOutput output; output.Position = mul(input.Position, WorldViewProjection); output.LightPosition = output.Position; return output; } float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0 { // Find the pixel coordinates of the input position in the depth // and normal textures float2 texCoord = postProjToScreen(input.LightPosition) + halfPixel(); // Extract the depth for this pixel from the depth map float4 depth = tex2D(depthSampler, texCoord); // Recreate the position with the UV coordinates and depth value float4 position; position.x = texCoord.x * 2 - 1; position.y = (1 - texCoord.y) * 2 - 1; position.z = depth.r; position.w = 1.0f; // Transform position from screen space to world space position = mul(position, InvViewProjection); position.xyz /= position.w; // Extract the normal from the normal map and move from // 0 to 1 range to -1 to 1 range float4 normal = (tex2D(normalSampler, texCoord) - .5) * 2; // Perform the lighting calculations for a point light float3 lightDirection = normalize(LightPosition - position); float lighting = clamp(dot(normal, lightDirection), 0, 1); // Attenuate the light to simulate a point light float d = distance(LightPosition, position); float att = 1 - pow(d / LightAttenuation, 6); return float4(LightColor * lighting * att, 1); } technique Technique1 { pass Pass1 { VertexShader = compile vs_1_1 VertexShaderFunction(); PixelShader = compile ps_2_0 PixelShaderFunction(); } } PPShared.vsi has some common functions: float viewportWidth; float viewportHeight; // Calculate the 2D screen position of a 3D position float2 postProjToScreen(float4 position) { float2 screenPos = position.xy / position.w; return 0.5f * (float2(screenPos.x, -screenPos.y) + 1); } // Calculate the size of one half of a pixel, to convert // between texels and pixels float2 halfPixel() { return 0.5f / float2(viewportWidth, viewportHeight); } and finally from the Game class I set up in LoadContent with: effect = Content.Load(@"Effects\PPModel"); models[0] = new CModel(Content.Load(@"Models\teapot"), new Vector3(-50, 80, 0), new Vector3(0, 0, 0), 1f, Content.Load(@"Textures\prova_texture_autocad"), GraphicsDevice); house = new CModel(Content.Load(@"Models\house"), new Vector3(0, 0, 0), new Vector3((float)-Math.PI / 2, 0, 0), 35.0f, Content.Load(@"Textures\prova_texture_autocad"), GraphicsDevice); models[0].SetModelEffect(effect, true); house.SetModelEffect(effect, true); renderer = new PrelightingRenderer(GraphicsDevice, Content); renderer.Models = new List(); renderer.Models.Add(house); renderer.Models.Add(models[0]); renderer.Lights = new List() { new PPPointLight(new Vector3(0, 120, 0), Color.White * .85f, 2000) }; where PPModel.fx is: float4x4 World; float4x4 View; float4x4 Projection; texture2D BasicTexture; sampler2D basicTextureSampler = sampler_state { texture = ; addressU = wrap; addressV = wrap; minfilter = anisotropic; magfilter = anisotropic; mipfilter = linear; }; bool TextureEnabled = true; texture2D LightTexture; sampler2D lightSampler = sampler_state { texture = ; minfilter = point; magfilter = point; mipfilter = point; }; float3 AmbientColor = float3(0.15, 0.15, 0.15); float3 DiffuseColor; #include "PPShared.vsi" struct VertexShaderInput { float4 Position : POSITION0; float2 UV : TEXCOORD0; }; struct VertexShaderOutput { float4 Position : POSITION0; float2 UV : TEXCOORD0; float4 PositionCopy : TEXCOORD1; }; VertexShaderOutput VertexShaderFunction(VertexShaderInput input) { VertexShaderOutput output; float4x4 worldViewProjection = mul(World, mul(View, Projection)); output.Position = mul(input.Position, worldViewProjection); output.PositionCopy = output.Position; output.UV = input.UV; return output; } float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0 { // Sample model's texture float3 basicTexture = tex2D(basicTextureSampler, input.UV); if (!TextureEnabled) basicTexture = float4(1, 1, 1, 1); // Extract lighting value from light map float2 texCoord = postProjToScreen(input.PositionCopy) + halfPixel(); float3 light = tex2D(lightSampler, texCoord); light += AmbientColor; return float4(basicTexture * DiffuseColor * light, 1); } technique Technique1 { pass Pass1 { VertexShader = compile vs_1_1 VertexShaderFunction(); PixelShader = compile ps_2_0 PixelShaderFunction(); } } I don't have any idea on what's wrong... googling the web I found that this tutorial may have some bug but I don't know if it's the LightModel fault (the sphere) or in a shader or in the class PrelightingRenderer. Any help is very appreciated, thank you for reading!

    Read the article

  • How Do I Import a 3D Object into Adobe Flex?

    - by Kim
    Hi everyone, I wanted to know if anyone here knows how to import a 3D Object (i.e. Maya 3D Model) into Adobe Flex Application? I needed to create a simple Flex application which will allow me to rotate the 3D Object by dragging but I cannot seem to start doing it because I'm having a hard time trying to figure out how I can import my 3D model into Flex. This is exactly what I wanted to do: 3D Object in Flex I hope someone can help me. Thanks a lot :)

    Read the article

  • How to export 3D models that consist of several parts (eg. turret on a tank)?

    - by Will
    What are the standard alternatives for the mechanics of attaching turrets and such to 3D models for use in-game? I don't mean the logic, but rather the graphics aspects. My naive approach is to extend the MD2-like format that I'm using (blender-exported using a script) to include a new set of properties for a mesh that: is anchored in another 'parent' mesh. The anchor is a point and normal in the parent mesh and a point and normal in the child mesh; these will always be colinear, giving the child rotation but not translation relative to the parent point. has a normal that is aligned with a 'target'. Classically this target is the enemy that is being engaged, but it might be some other vector e.g. 'the wind' (for sails and flags (and smoke, which is a particle system but the same principle applies)) or 'upwards' (e.g. so bodies of riders bend properly when riding a horse up an incline etc). that the anchor and target alignments have maximum and minimum and a speed coeff. there is game logic for multiple turrets and on a model and deciding which engages which enemy. 'primary' and 'secondary' or 'target0' ... 'targetN' or some such annotation will be there. So to illustrate, a classic tank would be made from three meshes; a main body mesh, a turret mesh that is anchored to the top of the main body so it can spin only horizontally and a barrel mesh that is anchored to the front of the turret and can only move vertically within some bounds. And there might be a forth flag mesh on top of the turret that is aligned with 'wind' where wind is a function the engine solves that merges environment's wind angle with angle the vehicle is travelling in an velocity, or something fancy. This gives each mesh one degree of freedom relative to its parent. Things with multiple degrees of freedom can be modelled by zero-vertex connecting meshes perhaps? This is where I think the approach I outlined begins to feel inelegant, yet perhaps its still a workable system? This is why I want to know how it is done in professional games ;) Are there better approaches? Are there formats that already include this information? Is this routine?

    Read the article

  • CAD like 3D geometry .NET library

    - by Naszta
    I am looking for a good 3D CAD like library. I need basic geometry shapes (cube, sphere, torus etc.) and the library should make the surface mesh - based on the shapes and some boolean operations. I have found many libraries on google (wrapped on C++), but most of them are not really comfortable, and/or do not support union/intersection. http://www.geometros.com/sgcore/index.htm - it has wrapped interface, http://www.opencsg.org/ - I haven't found wrapped interface, http://carve-csg.com/ - I haven't found wrapped interface, http://gts.sourceforge.net/ - I haven't found wrapped interface, http://www.ogre3d.org/ - I haven't found basic geometric shapes and boolean operators, http://brlcad.org/ - its interface is not clear for me, I haven't found wrapped interface, http://www.cgal.org/ - currently I try to make it work, I haven't found wrapped interface, http://www.k-3d.org/ - I haven't found wrapped interface, http://www.opencascade.org/ - I haven't found wrapped interface, http://ilnumerics.net/ - it does not support solid boolean operations, http://www.techsoft3d.com/ - seems to be really good one. Support both C++ and C#, http://www.devdept.com/products/eyeshot/ - one more C# library. It was not tested. Open source would be nice, but not necessary. Many thanks for help. P.S.: the previous topic Update: in C# we will use Eyeshot project.

    Read the article

  • Can Linux play HDMI 1.4a 3D stereoscopic content?

    - by SofaKng
    I'm aware that there are no Bluray players for Linux but I'm wondering if it's possible to play Full 3D HD (1080p, Side-By-Side) MKV files (or Bluray BDMV folders, etc). Full 3D HD files are actually two 1080p frames "side-by-side" so the effective resolution is 3840x1200. In order to play these properly the software needs to switch to TV into 3D mode (or however HDMI 1.4a works). I don't think simply playing the 3840x1200 resolution file will work so are there any options out there?

    Read the article

  • Java 3D-API that uses JOGL

    - by yx
    Can someone recommend me a package similar to JCollada for 3d rendering that is based on JOGL or point me to a place where I can obtain JCollada? The original site for JCollada has a SVN link that is no longer working (empty svn repositry).

    Read the article

  • jQuery 3D carousel?

    - by Juan
    Has anyone seen a tutorial for a jQuery 3D carousel like this one? http://web.enavu.com/demos/3dcarouselwip/ No source is given, but was wondering if anyone had tips on how to continuously circle the DIVs and resize them. Thanks, Juan

    Read the article

  • Saving new indicies, triangles and normals after WPF 3D transform

    - by sklitzz
    Hi, I have a 3D model which is lying flat currently, I wish for it to be rotated 90 degrees around the X axis. I have no problem doing this with the transforms. But to my knowledge all the transforms are a bunch of matrices multiplied. I would like to have the transform really alter all the coordinates of the indicies of the model. Is there a way to "save changes" after I apply a transform?

    Read the article

  • 3d convolution in c++

    - by alboot
    Hello, I'm looking for some source code implementing 3d convolution. Ideally, I need C++ code or CUDA code. I'd appreciate if anybody can point me to a nice and fast implementation :-) Cheers

    Read the article

  • 3D visualization in browser

    - by Milos
    Hello, I am looking for solutions for 3D visualizations in web browsers. For now I just need to do research about this topic, i.e. I need to know how many solutions exist and which of these are good and why? Thanks

    Read the article

  • Software to draw 3D geometry

    - by ilovekonoka
    Does anyone know any software that can draw 3D geometry like figures usually seen in computer graphics papers and articles(example: http://www.iquilezles.org/www/articles/sphereao/sphereao.htm)? I know it can be done in Inskcape or Illustrator but I'm not sure they're the only choices.

    Read the article

  • 3d symmetry search algorithm

    - by aaa
    this may be more appropriate for math overflow, but nevertheless: Given 3d structure (for example molecule), what is a good approach/algorithm to find symmetry (rotational/reflection/inversion/etc.)? I came up with brute force naive algorithm, but it seems there should be better approach. I am not so much interested in genetic algorithms as I would like best symmetry rather then almost the best symmetry link to website/paper would be great. thanks

    Read the article

  • ActionScript Measuring 3D Depth

    - by TheDarkIn1978
    i'm having a difficult time understanding how to control the z property of display objects in a 3D space. i know how depth works, but what i don't understand is how i can get the maximum depth, or the number at which the display object just disappears into the background. i assume depth is based on the stage's width and height, and that is why assigning the same depth of the same display object appars mismatched with different stage sizes. so how can i appropriately measure depth?

    Read the article

  • 3d engine with telnet access

    - by zaf
    Does anyone know of a open source 3d engine which can be operated via telnet? What I'm looking for is scripting via a socket connection. To allow for world creation and/or camera movement. Does anybody know of any that has this built in or very, very easy to add as a plugin or script? The platform is not crucial.

    Read the article

  • java quaternion 3D rotation implementation

    - by MRM
    I made a method to rotate a list of points using quaternions, but all i get back as output is the same list i gave to rotate on. Maybe i did not understood corectly the math for 3d rotations or my code is not implemented the right way, could you give me a hand? This is the method i use: public static ArrayList<Float> rotation3D(ArrayList<Float> points, double angle, int x, int y, int z) { ArrayList<Float> newpoints = points; for (int i=0;i<points.size();i+=3) { float x_old = points.get(i).floatValue(); float y_old = points.get(i+1).floatValue(); float z_old = points.get(i+2).floatValue(); double[] initial = {1,0,0,0}; double[] total = new double[4]; double[] local = new double[4]; //components for local quaternion //w local[0] = Math.cos(0.5 * angle); //x local[1] = x * Math.sin(0.5 * angle); //y local[2] = y * Math.sin(0.5 * angle); //z local[3] = z * Math.sin(0.5 * angle); //components for final quaternion Q1*Q2 //w = w1w2 - x1x2 - y1y2 - z1z2 total[0] = local[0] * initial[0] - local[1] * initial[1] - local[2] * initial[2] - local[3] * initial[3]; //x = w1x2 + x1w2 + y1z2 - z1y2 total[1] = local[0] * initial[1] + local[1] * initial[0] + local[2] * initial[3] - local[3] * initial[2]; //y = w1y2 - x1z2 + y1w2 + z1x2 total[2] = local[0] * initial[2] - local[1] * initial[3] + local[2] * initial[0] + local[3] * initial[1]; //z = w1z2 + x1y2 - y1x2 + z1w2 total[3] = local[0] * initial[3] + local[1] * initial[2] - local[2] * initial[1] + local[3] * initial[0]; //new x,y,z of the 3d point using rotation matrix made from the final quaternion float x_new = (float)((1 - 2 * total[2] * total[2] - 2 * total[3] * total[3]) * x_old + (2 * total[1] * total[2] - 2 * total[0] * total[3]) * y_old + (2 * total[1] * total[3] + 2 * total[0] * total[2]) * z_old); float y_new = (float) ((2 * total[1] * total[2] + 2 * total[0] * total[3]) * x_old + (1 - 2 * total[1] * total[1] - 2 * total[3] * total[3]) * y_old + (2 * total[2] * total[3] + 2 * total[0] * total[1]) * z_old); float z_new = (float) ((2 * total[1] * total[3] - 2 * total[0] * total[2]) * x_old + (2 * total[2] * total[3] - 2 * total[0] * total[1]) * y_old + (1 - 2 * total[1] * total[1] - 2 * total[2] * total[2]) * z_old); newpoints.set(i, x_new); newpoints.set(i+1, y_new); newpoints.set(i+2, z_new); } return newpoints; } For rotation3D(points, 50, 0, 1, 0) where points is: 0.0, 0.0, -9.0; 0.0, 0.0, -11.0; 20.0, 0.0, -11.0; 20.0, 0.0, -9.0; i get back the same list.

    Read the article

  • javascript library to display / animate 3d objects?

    - by saturation
    Hi, I have saw some time ago library where you can import your 3d objects and it will draw those out. You could also animate the objects. The webpage itself was back and there were rotating gear at the corner... Can anyone recall the name of the library? Also you can mention if you know some other neat js libraries. Thanks!

    Read the article

< Previous Page | 7 8 9 10 11 12 13 14 15 16 17 18  | Next Page >