Search Results

Search found 749 results on 30 pages for 'occam pi'.

Page 12/30 | < Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >

  • Normal map lighting bug in bottom right quadrant

    - by Ryan Capote
    I am currently working on getting normal maps working in my project, and have run into a problem with lighting. As you can see, the normals in the bottom right quadrant of the lighting isn't calculating the correct direction to the light or something. Best seen by the red light If I use flat normals (z normal = 1.0), it seems to be working fine: normals for the tile sheet: Shader: #version 330 uniform sampler2D uDiffuseTexture; uniform sampler2D uNormalsTexture; uniform sampler2D uSpecularTexture; uniform sampler2D uEmissiveTexture; uniform sampler2D uWorldNormals; uniform sampler2D uShadowMap; uniform vec4 uLightColor; uniform float uConstAtten; uniform float uLinearAtten; uniform float uQuadradicAtten; uniform float uColorIntensity; in vec2 TexCoords; in vec2 GeomSize; out vec4 FragColor; float sample(vec2 coord, float r) { return step(r, texture2D(uShadowMap, coord).r); } float occluded() { float PI = 3.14; vec2 normalized = TexCoords.st * 2.0 - 1.0; float theta = atan(normalized.y, normalized.x); float r = length(normalized); float coord = (theta + PI) / (2.0 * PI); vec2 tc = vec2(coord, 0.0); float center = sample(tc, r); float sum = 0.0; float blur = (1.0 / GeomSize.x) * smoothstep(0.0, 1.0, r); sum += sample(vec2(tc.x - 4.0*blur, tc.y), r) * 0.05; sum += sample(vec2(tc.x - 3.0*blur, tc.y), r) * 0.09; sum += sample(vec2(tc.x - 2.0*blur, tc.y), r) * 0.12; sum += sample(vec2(tc.x - 1.0*blur, tc.y), r) * 0.15; sum += center * 0.16; sum += sample(vec2(tc.x + 1.0*blur, tc.y), r) * 0.15; sum += sample(vec2(tc.x + 2.0*blur, tc.y), r) * 0.12; sum += sample(vec2(tc.x + 3.0*blur, tc.y), r) * 0.09; sum += sample(vec2(tc.x + 4.0*blur, tc.y), r) * 0.05; return sum * smoothstep(1.0, 0.0, r); } float calcAttenuation(float distance) { float linearAtten = uLinearAtten * distance; float quadAtten = uQuadradicAtten * distance * distance; float attenuation = 1.0 / (uConstAtten + linearAtten + quadAtten); return attenuation; } vec3 calcFragPosition(void) { return vec3(TexCoords*GeomSize, 0.0); } vec3 calcLightPosition(void) { return vec3(GeomSize/2.0, 0.0); } float calcDistance(vec3 fragPos, vec3 lightPos) { return length(fragPos - lightPos); } vec3 calcLightDirection(vec3 fragPos, vec3 lightPos) { return normalize(lightPos - fragPos); } vec4 calcFinalLight(vec2 worldUV, vec3 lightDir, float attenuation) { float diffuseFactor = dot(normalize(texture2D(uNormalsTexture, worldUV).rgb), lightDir); vec4 diffuse = vec4(0.0); vec4 lightColor = uLightColor * uColorIntensity; if(diffuseFactor > 0.0) { diffuse = vec4(texture2D(uDiffuseTexture, worldUV.xy).rgb, 1.0); diffuse *= diffuseFactor; lightColor *= diffuseFactor; } else { discard; } vec4 final = (diffuse + lightColor); if(texture2D(uWorldNormals, worldUV).g > 0.0) { return final * attenuation; } else { return final * occluded(); } } void main(void) { vec3 fragPosition = calcFragPosition(); vec3 lightPosition = calcLightPosition(); float distance = calcDistance(fragPosition, lightPosition); float attenuation = calcAttenuation(distance); vec2 worldPos = gl_FragCoord.xy / vec2(1024, 768); vec3 lightDir = calcLightDirection(fragPosition, lightPosition); lightDir = (lightDir*0.5)+0.5; float atten = calcAttenuation(distance); vec4 emissive = texture2D(uEmissiveTexture, worldPos); FragColor = calcFinalLight(worldPos, lightDir, atten) + emissive; }

    Read the article

  • rotate player based off of joystick

    - by pengume
    Hey everyone I have this game that i am making in android and I have a touch screen joystick that moves the player around based on the joysticks position. I cant figure out how to also get the player to rotate at the same angle of the joystick. so when the joystick is to the left the players bitmap is rotated to the left as well. Maybe someone here has some sample code I could look at here is the joysticks class that I am using. `public class GameControls implements OnTouchListener { public float initx = DroidzActivity.screenWidth - 45; //255; // 320 og 425 public float inity = DroidzActivity.screenHeight - 45;//425; // 480 og 267 public Point _touchingPoint = new Point( DroidzActivity.screenWidth - 45, DroidzActivity.screenHeight - 45); public Point _pointerPosition = new Point(DroidzActivity.screenWidth - 100, DroidzActivity.screenHeight - 100); // ogx 220 ogy 150 private Boolean _dragging = false; private boolean attackMode = false; @Override public boolean onTouch(View v, MotionEvent event) { update(event); return true; } private MotionEvent lastEvent; public boolean ControlDragged; private static double angle; public void update(MotionEvent event) { if (event == null && lastEvent == null) { return; } else if (event == null && lastEvent != null) { event = lastEvent; } else { lastEvent = event; } // drag drop if (event.getAction() == MotionEvent.ACTION_DOWN) { if ((int) event.getX() > 0 && (int) event.getX() < 50 && (int) event.getY() > DroidzActivity.screenHeight - 160 && (int) event.getY() < DroidzActivity.screenHeight - 0) { setAttackMode(true); } else { _dragging = true; } } else if (event.getAction() == MotionEvent.ACTION_UP) { if(isAttackMode()){ setAttackMode(false); } _dragging = false; } if (_dragging) { ControlDragged = true; // get the pos _touchingPoint.x = (int) event.getX(); _touchingPoint.y = (int) event.getY(); // Log.d("GameControls", "x = " + _touchingPoint.x + " y = " //+ _touchingPoint.y); // bound to a box if (_touchingPoint.x < DroidzActivity.screenWidth - 75) { // og 400 _touchingPoint.x = DroidzActivity.screenWidth - 75; } if (_touchingPoint.x > DroidzActivity.screenWidth - 15) {// og 450 _touchingPoint.x = DroidzActivity.screenWidth - 15; } if (_touchingPoint.y < DroidzActivity.screenHeight - 75) {// og 240 _touchingPoint.y = DroidzActivity.screenHeight - 75; } if (_touchingPoint.y > DroidzActivity.screenHeight - 15) {// og 290 _touchingPoint.y = DroidzActivity.screenHeight - 15; } // get the angle setAngle(Math.atan2(_touchingPoint.y - inity, _touchingPoint.x - initx) / (Math.PI / 180)); // Move the ninja in proportion to how far // the joystick is dragged from its center _pointerPosition.y += Math.sin(getAngle() * (Math.PI / 180)) * (_touchingPoint.x / 70); // og 180 70 _pointerPosition.x += Math.cos(getAngle() * (Math.PI / 180)) * (_touchingPoint.x / 70); // make the pointer go thru if (_pointerPosition.x > DroidzActivity.screenWidth) { _pointerPosition.x = 0; } if (_pointerPosition.x < 0) { _pointerPosition.x = DroidzActivity.screenWidth; } if (_pointerPosition.y > DroidzActivity.screenHeight) { _pointerPosition.y = 0; } if (_pointerPosition.y < 0) { _pointerPosition.y = DroidzActivity.screenHeight; } } else if (!_dragging) { ControlDragged = false; // Snap back to center when the joystick is released _touchingPoint.x = (int) initx; _touchingPoint.y = (int) inity; // shaft.alpha = 0; } } public void setAttackMode(boolean attackMode) { this.attackMode = attackMode; } public boolean isAttackMode() { return attackMode; } public void setAngle(double angle) { this.angle = angle; } public static double getAngle() { return angle; } }` I should also note that the player has animations based on when he is moving or attacking.

    Read the article

  • The Rise of Project Intelligence and Why It Matters

    - by Melissa Centurio Lopes
    Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin-top:0in; mso-para-margin-right:0in; mso-para-margin-bottom:10.0pt; mso-para-margin-left:0in; line-height:115%; mso-pagination:widow-orphan; font-size:11.0pt; font-family:"Calibri","sans-serif"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"Times New Roman"; mso-bidi-theme-font:minor-bidi;} By Amy DeWolf Are you doing any of these in your organization? How are you leveraging historical data to forecast projects? There’s a lot going on in government today. The economic pressures agencies feel from the uncertainty of budget cuts and sequestration effect every part of an organization, including the Project Management Office (PMO).  The PMO is responsible for monitoring and administering government IT projects. As time goes on, priorities shift, technology advances, and new regulations are imposed, all of which make planning and executing projects more difficult.  For example, think about your own projects.  How many boxes do you need to check and hoops do you need to jump through to ensure you comply with new regulations? While new regulations and technology advancements can be a good thing, they add an additional layer of complexity to already complex projects. To overcome some of these pressures, particularly new regulations, many in the PMO world are adopting a new approach- Project Intelligence (PI). According to a new Oracle Primavera white paper, The Rise of Project Intelligence: When Project Management is Just Not Enough, “PI uses Business Intelligence methods to leverage historical project data to make more informed decisions and greatly enhance project execution.” Currently, project managers plan and forecast the possible phases in an execution cycle.  However, most project managers don’t have the proper tools to do this as effectively as they would like. As the white paper noted, “The underlying deficiencies in most forecasting approaches are that 1) the PM fails in most instances to leverage historical data and 2) the PM doesn’t employ current Business Intelligence tools.” PI seeks to overturn this by combining modeling tools used in Business Intelligence for projects with the understanding of Emotional Intelligence for managing people.   Simply put, Project Intelligence is built off four main pillars: Actively use historical data to forecast project cycles Understand the intricacies of complex projects Enhance social and emotional intelligence in projects Actively use Business intelligence tools Read our complimentary whitepaper and discover the importance of emotional intelligence and best practices for improving projects, specifically in terms of communication.

    Read the article

  • Rotation of viewplatform in Java3D

    - by user29163
    I have just started with Java3D programming. I thought I had built up some basic intuition about how the scene graph works, but something that should work, does not work. I made a simple program for rotating a pyramid around the y-axis. This was done just by adding a RotationInterpolator R to the TransformGroup above the pyramid. Then I thought hey, can I now remove the RotationInterpolator from this TransformGroup, then add it to the TransformGroup above my ViewPlatform leaf. This should work if I have understood how things work. Adding the RotationInterpolator to this TransformGroup, should make the children of this TransformGroup rotate, and the ViewingPlatform is a child of the TransformGroup. Any ideas on where my reasoning is flawed? Here is the code for setting up the universe, and the view branchgroup. import java.awt.*; import java.awt.event.*; import javax.media.j3d.*; import javax.vecmath.*; public class UniverseBuilder { // User-specified canvas Canvas3D canvas; // Scene graph elements to which the user may want access VirtualUniverse universe; Locale locale; TransformGroup vpTrans; View view; public UniverseBuilder(Canvas3D c) { this.canvas = c; // Establish a virtual universe that has a single // hi-res Locale universe = new VirtualUniverse(); locale = new Locale(universe); // Create a PhysicalBody and PhysicalEnvironment object PhysicalBody body = new PhysicalBody(); PhysicalEnvironment environment = new PhysicalEnvironment(); // Create a View and attach the Canvas3D and the physical // body and environment to the view. view = new View(); view.addCanvas3D(c); view.setPhysicalBody(body); view.setPhysicalEnvironment(environment); // Create a BranchGroup node for the view platform BranchGroup vpRoot = new BranchGroup(); // Create a ViewPlatform object, and its associated // TransformGroup object, and attach it to the root of the // subgraph. Attach the view to the view platform. Transform3D t = new Transform3D(); Transform3D s = new Transform3D(); t.set(new Vector3f(0.0f, 0.0f, 10.0f)); t.rotX(-Math.PI/4); s.set(new Vector3f(0.0f, 0.0f, 10.0f)); //forandre verdier her for å endre viewing position t.mul(s); ViewPlatform vp = new ViewPlatform(); vpTrans = new TransformGroup(t); vpTrans.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE); // Rotator stuff Transform3D yAxis = new Transform3D(); //yAxis.rotY(Math.PI/2); Alpha rotationAlpha = new Alpha( -1, Alpha.INCREASING_ENABLE, 0, 0,4000, 0, 0, 0, 0, 0); RotationInterpolator rotator = new RotationInterpolator( rotationAlpha, vpTrans, yAxis, 0.0f, (float) Math.PI*2.0f); RotationInterpolator rotator2 = new RotationInterpolator( rotationAlpha, vpTrans); BoundingSphere bounds = new BoundingSphere(new Point3d(0.0,0.0,0.0), 1000.0); rotator.setSchedulingBounds(bounds); vpTrans.addChild(rotator); vpTrans.addChild(vp); vpRoot.addChild(vpTrans); view.attachViewPlatform(vp); // Attach the branch graph to the universe, via the // Locale. The scene graph is now live! locale.addBranchGraph(vpRoot); } public void addBranchGraph(BranchGroup bg) { locale.addBranchGraph(bg); } }

    Read the article

  • Trouble rotating viewplatform in Java3D [closed]

    - by user29163
    I have just started with Java3D programming. I thought I had built up some basic intuition about how the scene graph works, but something that should work, does not work. I made a simple program for rotating a pyramid around the y-axis. This was done just by adding a RotationInterpolator R to the TransformGroup above the pyramid. Then I thought hey, can I now remove the RotationInterpolator from this TransformGroup, then add it to the TransformGroup above my ViewPlatform leaf. This should work if I have understood how things work. Adding the RotationInterpolator to this TransformGroup, should make the children of this TransformGroup rotate, and the ViewingPlatform is a child of the TransformGroup. Any ideas on where my reasoning is flawed? Here is the code for setting up the universe, and the view branchgroup. import java.awt.*; import java.awt.event.*; import javax.media.j3d.*; import javax.vecmath.*; public class UniverseBuilder { // User-specified canvas Canvas3D canvas; // Scene graph elements to which the user may want access VirtualUniverse universe; Locale locale; TransformGroup vpTrans; View view; public UniverseBuilder(Canvas3D c) { this.canvas = c; // Establish a virtual universe that has a single // hi-res Locale universe = new VirtualUniverse(); locale = new Locale(universe); // Create a PhysicalBody and PhysicalEnvironment object PhysicalBody body = new PhysicalBody(); PhysicalEnvironment environment = new PhysicalEnvironment(); // Create a View and attach the Canvas3D and the physical // body and environment to the view. view = new View(); view.addCanvas3D(c); view.setPhysicalBody(body); view.setPhysicalEnvironment(environment); // Create a BranchGroup node for the view platform BranchGroup vpRoot = new BranchGroup(); // Create a ViewPlatform object, and its associated // TransformGroup object, and attach it to the root of the // subgraph. Attach the view to the view platform. Transform3D t = new Transform3D(); Transform3D s = new Transform3D(); t.set(new Vector3f(0.0f, 0.0f, 10.0f)); t.rotX(-Math.PI/4); s.set(new Vector3f(0.0f, 0.0f, 10.0f)); //forandre verdier her for å endre viewing position t.mul(s); ViewPlatform vp = new ViewPlatform(); vpTrans = new TransformGroup(t); vpTrans.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE); // Rotator stuff Transform3D yAxis = new Transform3D(); //yAxis.rotY(Math.PI/2); Alpha rotationAlpha = new Alpha( -1, Alpha.INCREASING_ENABLE, 0, 0,4000, 0, 0, 0, 0, 0); RotationInterpolator rotator = new RotationInterpolator( rotationAlpha, vpTrans, yAxis, 0.0f, (float) Math.PI*2.0f); RotationInterpolator rotator2 = new RotationInterpolator( rotationAlpha, vpTrans); BoundingSphere bounds = new BoundingSphere(new Point3d(0.0,0.0,0.0), 1000.0); rotator.setSchedulingBounds(bounds); vpTrans.addChild(rotator); vpTrans.addChild(vp); vpRoot.addChild(vpTrans); view.attachViewPlatform(vp); // Attach the branch graph to the universe, via the // Locale. The scene graph is now live! locale.addBranchGraph(vpRoot); } public void addBranchGraph(BranchGroup bg) { locale.addBranchGraph(bg); } }

    Read the article

  • Drawing Color Spectrum with Waveform

    - by TheDarkIn1978
    i've come across this ActionScript sample, which demonstrates drawing of the color spectrum, one line at a time via a loop, using waveforms. however, the waveform location of each RGB channel create a color spectrum that is missing colors (pure yellow, cyan and magenta) and therefore the spectrum is incomplete. how can i remedy this problem so that the drawn color spectrum will exhibit all colors? // Loop through all of the pixels from '0' to the specified width. for(var i:int = 0; i < nWidth; i++) { // Calculate the color percentage based on the current pixel. nColorPercent = i / nWidth; // Calculate the radians of the angle to use for rotating color values. nRadians = (-360 * nColorPercent) * (Math.PI / 180); // Calculate the RGB channels based on the angle. nR = Math.cos(nRadians) * 127 + 128 << 16; nG = Math.cos(nRadians + 2 * Math.PI / 3) * 127 + 128 << 8; nB = Math.cos(nRadians + 4 * Math.PI / 3) * 127 + 128; // OR the individual color channels together. nColor = nR | nG | nB; }

    Read the article

  • Python: combining making two scripts into one

    - by Alex
    I have two separately made python scripts one that makes a sine wave sound based off time, and another that produces a sine wave graph that is based off the same time factors. I need help combining them into one running file. Here's the first: from struct import pack from math import sin, pi import time def au_file(name, freq, freq1, dur, vol): fout = open(name, 'wb') # header needs size, encoding=2, sampling_rate=8000, channel=1 fout.write('.snd' + pack('>5L', 24, 8*dur, 2, 8000, 1)) factor = 2 * pi * freq/8000 factor1 = 2 * pi * freq1/8000 # write data for seg in range(8 * dur): # sine wave calculations sin_seg = sin(seg * factor) + sin(seg * factor1) fout.write(pack('b', vol * 64 * sin_seg)) fout.close() t = time.strftime("%S", time.localtime()) ti = time.strftime("%M", time.localtime()) tis = float(t) tis = tis * 100 tim = float(ti) tim = tim * 100 if __name__ == '__main__': au_file(name='timeSound.au', freq=tim, freq1=tis, dur=1000, vol=1.0) import os os.startfile('timeSound.au') and the second is this: from Tkinter import * import math import time t = time.strftime("%S", time.localtime()) ti = time.strftime("%M", time.localtime()) tis = float(t) tis = tis / 100 tim = float(ti) tim = tim / 100 root = Tk() root.title("This very moment") width = 400 height = 300 center = height//2 x_increment = 1 # width stretch x_factor1 = tis x_factor2 = tim # height stretch y_amplitude = 50 c = Canvas(width=width, height=height, bg='black') c.pack() str1 = "sin(x)=white" c.create_text(10, 20, anchor=SW, text=str1) center_line = c.create_line(0, center, width, center, fill='red') # create the coordinate list for the sin() curve, have to be integers xy1 = [] xy2 = [] for x in range(400): # x coordinates xy1.append(x * x_increment) xy2.append(x * x_increment) # y coordinates xy1.append(int(math.sin(x * x_factor1) * y_amplitude) + center) xy2.append(int(math.sin(x * x_factor2) * y_amplitude) + center) sinS_line = c.create_line(xy1, fill='white') sinM_line = c.create_line(xy2, fill='yellow') root.mainloop()

    Read the article

  • Draw multiple circles in Google Maps

    - by snorpey
    Hi. I want to draw multiple circles on a map, using the Google Maps API anj jQuery. The following code works as long as the line with drawMapCircle() is commented out (The markers are positioned correctly). What's wrong with my code? $.getJSON( "ajax/show.php", function(data) { $.each(data.points, function(i, point) { map.addOverlay(new GMarker(new GLatLng(point.lat, point.lng))); drawMapCircle(point.lat, point.lng, 0.01, '#0066ff', 2, 0.8, '#0cf', 0.1); }); } ); function drawMapCircle(lat, lng, radius, strokeColor, strokeWidth, strokeOpacity, fillColor, fillOpacity) { var d2r = Math.PI / 180; var r2d = 180 / Math.PI; var Clat = radius * 0.014483; // statute miles into degrees latitude conversion var Clng = Clat/Math.cos(lat * d2r); var Cpoints = []; for (var i = 0; i < 33; i++) { var theta = Math.PI * (i / 16); Cy = lat + (Clat * Math.sin(theta)); Cx = lng + (Clng * Math.cos(theta)); var P = new GLatLng(Cy, Cx); Cpoints.push(P); } var polygon = new GPolygon(Cpoints, strokeColor, strokeWidth, strokeOpacity, fillColor, fillOpacity); map.addOverlay(polygon); }

    Read the article

  • addthis floating vertical bar showing horizontally in wordpress

    - by Lakshmanan PHP
    Addthis floating vertical bar not working properly in a wordpress content-single page! Here is my code <div class="addthis_bar addthis_bar_vertical addthis_bar_large" style="top:50px;left:50px;"> <div class="addthis_toolbox addthis_default_style"> <span><a class="addthis_button_facebook_like" fb:like:layout="box_count"></a></span> <span><a class="addthis_button_tweet" tw:count="vertical"></a></span> <span><a class="addthis_button_google_plusone" g:plusone:size="tall"></a></span> <span><a class="addthis_button_stumbleupon_badge" su:badge:style="5" ></a></span> <span><a class="addthis_button_pinterest_pinit" pi:pinit:url="<?php echo get_permalink(get_the_ID()); ?>" pi:pinit:media="<?php echo $thumb_image[0];?>" pi:pinit:layout="vertical"></a></span> <span><a class="addthis_counter"></a></span> </div> I have tried with other addthis tool box sizes but end up with error. Thanks in advance

    Read the article

  • [C#] How can we differentiate between SDK class objects and custom class objects?

    - by Nayan
    To give an idea of my requirement, consider these classes - class A { } class B { string m_sName; public string Name { get { return m_sName; } set { m_sName = value; } } int m_iVal; public int Val { get { return m_iVal; } set { m_iVal = value; } } A m_objA; public A AObject { get { return m_objA; } set { m_objA = value; } } } Now, I need to identify the classes of the objects passed to a function void MyFunc(object obj) { Type type = obj.GetType(); foreach (PropertyInfo pi in type.GetProperties()) { if (pi.PropertyType.IsClass) { //I need objects only if (!type.IsGenericType && type.FullName.ToLower() == "system.string") { object _obj = pi.GetValue(obj, null); //do something } } } } I don't like this piece of code - if (!type.IsGenericType && type.FullName.ToLower() == "system.string") { because then i have to filter out classes like, System.Int16, System.Int32, System.Boolean and so on. Is there an elegant way through which I can find out if the object is of a class defined by me and not of system provided basic classes?

    Read the article

  • How to test if raising an event results in a method being called conditional on value of parameters

    - by MattC
    I'm trying to write a unit test that will raise an event on a mock object which my test class is bound to. What I'm keen to test though is that when my test class gets it's eventhandler called it should only call a method on certain values of the eventhandlers parameters. My test seems to pass even if I comment the code that calls ProcessPriceUpdate(price); I'm in VS2005 so no lambdas please :( So... public delegate void PriceUpdateEventHandler(decimal price); public interface IPriceInterface{ event PriceUpdateEventHandler PriceUpdate; } public class TestClass { IPriceInterface priceInterface = null; TestClass(IPriceInterface priceInterface) { this.priceInterface = priceInterface; } public void Init() { priceInterface.PriceUpdate += OnPriceUpdate; } public void OnPriceUpdate(decimal price) { if(price > 0) ProcessPriceUpdate(price); } public void ProcessPriceUpdate(decimal price) { //do something with price } } And my test so far :s public void PriceUpdateEvent() { MockRepository mock = new MockRepository(); IPriceInterface pi = mock.DynamicMock<IPriceInterface>(); TestClass test = new TestClass(pi); decimal prc = 1M; IEventRaiser raiser; using (mock.Record()) { pi.PriceUpdate += null; raiser = LastCall.IgnoreArguments().GetEventRaiser(); Expect.Call(delegate { test.ProcessPriceUpdate(prc); }).Repeat.Once(); } using (mock.Playback()) { test.Init(); raiser.Raise(prc); } }

    Read the article

  • LaTex: how does the include-command work?

    - by HH
    I supposed the include-command copy-pastes code in the compilation, it is wrong because the code stopped working. Please, see the middle part in the code. I only copy-pasted the code to the file and added the include-command. $ cat results/frames.tex 10.31 & 8.50 & 7.40 \\ 10.34 & 8.53 & 7.81 \\ 8.22 & 8.62 & 7.78 \\ 10.16 & 8.53 & 7.44 \\ 10.41 & 8.38 & 7.63 \\ 10.38 & 8.57 & 8.03 \\ 10.13 & 8.66 & 7.41 \\ 8.50 & 8.60 & 7.15 \\ 10.41 & 8.63 & 7.21 \\ 8.53 & 8.53 & 7.12 \\ Latex code \begin{table} \begin{tabular}{ | l | m | r |} \hline $t$ / s & $d_{1}$ / s & $d_{2}$ / s \\ $\Delta h = 0,01 s$ & $\Delta d = 0,01 s$ & $\Delta d = 0,01 s$ \\ \hline % I JUST COPIED THE CODE from here to the file, included. % It stopped working, why? \include{results/frames.tex} \hline $\pi (\frac{d_{1}}{2} - \frac{d_{2}}{2})$ & $2 \pi R h$ & $2 \pi r h$ \\ \hline \end{tabular} \end{table}

    Read the article

  • Can this extension method be improved?

    - by Newbie
    I have the following extension method public static class ListExtensions { public static IEnumerable<T> Search<T>(this ICollection<T> collection, string stringToSearch) { foreach (T t in collection) { Type k = t.GetType(); PropertyInfo pi = k.GetProperty("Name"); if (pi.GetValue(t, null).Equals(stringToSearch)) { yield return t; } } } } What it does is by using reflection, it finds the name property and then filteres the record from the collection based on the matching string. This method is being called as List<FactorClass> listFC = new List<FactorClass>(); listFC.Add(new FactorClass { Name = "BKP", FactorValue="Book to price",IsGlobal =false }); listFC.Add(new FactorClass { Name = "YLD", FactorValue = "Dividend yield", IsGlobal = false }); listFC.Add(new FactorClass { Name = "EPM", FactorValue = "emp", IsGlobal = false }); listFC.Add(new FactorClass { Name = "SE", FactorValue = "something else", IsGlobal = false }); List<FactorClass> listFC1 = listFC.Search("BKP").ToList(); It is working fine. But a closer look into the extension method will reveal that Type k = t.GetType(); PropertyInfo pi = k.GetProperty("Name"); is actually inside a foreach loop which is actually not needed. I think we can take it outside the loop. But how? PLease help. (C#3.0)

    Read the article

  • Sort command not working as expected

    - by user964689
    If anybody can help me to write a loop to iterate over files in a folder it would save me a huge amount of time. I think it must be quite a simple solution ,but I currently don't know how to nest a loop within a loop. So far I have this script: cd /folderlocation/ for i in `</textfile_containing_lines_to_iterate_through` do #size=`echo $i | perl -nE '/:([\d-]+)/ && say abs(eval $1)'` #echo "$size" zcat dataset | head -n 18 > temp"$i".vcf tabix dataset $i >> temp"$i".vcf vcftools --window-pi 1000000 --vcf temp10individuals"$i".vcf >> run_summary.txt cat out.windowed.pi >> outputfile_2 #rm temp* done grep -v "PI" outputfile_2 > outputfile rm outputfile_2 I need to expand this so that the script will run multiple times, through all of the 'textfiles_containing_lines_to_iterate_through'. Currently I change the name of the textfile manually each time and re-run the script. So I'd need a loop that does this for file in folder, and also that uses the name of the file as part of the outputfile name so that I can match an output file to an inputfile. Any help would be really useful and greatly appreciated! Many thanks in advance.

    Read the article

  • developing daily alarm in android

    - by zoza
    I have this piece of code that fire the alarm once by setting a time and date using the TimePicker and the DatePicker in another activity. i want to modify it in a way that whenever i set a time and a date it will fire the alarm everyday at the same time. in other words i want the alarm to be fired dialy public class M_ReminderManager { private Context mContext; private AlarmManager mAlarmManager; public M_ReminderManager(Context context) { mContext = context; mAlarmManager = (AlarmManager)context.getSystemService(Context.ALARM_SERVICE); } public void setReminder(Long reminderId, Calendar when) { Intent i = new Intent(mContext, Medicines_OnAlarmReceiver.class); i.putExtra(RemindersDbAdapter.KEY_ROWID_MEDS, (long)reminderId); PendingIntent pi = PendingIntent.getBroadcast(mContext, 0, i, PendingIntent.FLAG_ONE_SHOT); mAlarmManager.set(AlarmManager.RTC_WAKEUP, when.getTimeInMillis(), pi); } } i have tried using setRepeating function but i dont know how exactly i should set the attributes i used this line instead of the set fuction on the code but it didn't work: mAlarmManager.setRepeating(AlarmManager.RTC_WAKEUP, when.getTimeInMillis() ,AlarmManager.INTERVAL_DAY , pi); can someone help me with it? thanks in advance,

    Read the article

  • Matlab fft function

    - by CTZStef
    The code below is from the Matlab 2011a help about fft function. I think there is a problem here : why do they multiply t(1:50) by Fs, and then say it's time in millisecond ? Certainly, it happens to be true in this very particular case, but change the value of Fs to, say, 2000, and it won't work anymore, obviously because of this factor of 2. Right ? Quite misleading, isn't it ? What do I miss ? Fs = 1000; % Sampling frequency T = 1/Fs; % Sample time L = 1000; % Length of signal t = (0:L-1)*T; % Time vector % Sum of a 50 Hz sinusoid and a 120 Hz sinusoid x = 0.7*sin(2*pi*50*t) + sin(2*pi*120*t); y = x + 2*randn(size(t)); % Sinusoids plus noise plot(Fs*t(1:50),y(1:50)) title('Signal Corrupted with Zero-Mean Random Noise') xlabel('time (milliseconds)') Clearer with this : fs = 2000; % Sampling frequency T = 1 / fs; % Sample time L = 1000; % Length of signal t2 = (0:L-1)*T; % Time vector f = 50; % signal frequency s2 = sin(2*pi*f*t2); figure, plot(fs*t2(1:50),s2(1:50)); % NOT good figure, plot(t2(1:50),s2(1:50)); % good

    Read the article

  • ReplaceAll not working as expected

    - by Tim Kemp
    Still early days with Mathematica so please forgive what is probably a very obvious question. I am trying to generate some parametric plots. I have: ParametricPlot[{ (a + b) Cos[t] - h Cos[(a + b)/b t], (a + b) Sin[t] - h Sin[(a + b)/b t]}, {t, 0, 2 \[Pi]}, PlotRange -> All] /. {a -> 2, b -> 1, h -> 1} No joy: the replacement rules are not applied and a, b and h remain undefined. If I instead do: Hold@ParametricPlot[{ (a + b) Cos[t] - h Cos[(a + b)/b t], (a + b) Sin[t] - h Sin[(a + b)/b t]}, {t, 0, 2 \[Pi]}, PlotRange -> All] /. {a -> 2, b -> 1, h -> 1} it looks like the rules ARE working, as confirmed by the output: Hold[ParametricPlot[{(2 + 1) Cos[t] - 1 Cos[(2 + 1) t], (2 + 1) Sin[t] - 1 Sin[(2 + 1) t]}, {t, 0, 2 \[Pi]}, PlotRange -> All]] Which is what I'd expect. Take the Hold off, though, and the ParametricPlot doesn't work. There's nothing wrong with the equations or the ParametricPlot itself, though, because I tried setting values for a, b and h in a separate expression (a=2; b=1; h=1) and I get my pretty double cardoid out as expected. So, what am I doing wrong with ReplaceAll and why are the transformation rules not working? This is another fundamentally important aspect of MMA that my OOP-ruined brain isn't understanding. I tried reading up on ReplaceAll and ParametricPlot and the closest clue I found was that "ParametricPlot has attribute HoldAll and evaluates f only after assigning specific numerical values to variables" which didn't help much or I wouldn't be here. Thanks.

    Read the article

  • Can this extension method be improve

    - by Newbie
    I have the following extension method public static class ListExtensions { public static IEnumerable<T> Search<T>(this ICollection<T> collection, string stringToSearch) { foreach (T t in collection) { Type k = t.GetType(); PropertyInfo pi = k.GetProperty("Name"); if (pi.GetValue(t, null).Equals(stringToSearch)) { yield return t; } } } } What it does is by using reflection, it finds the name property and then filteres the record from the collection based on the matching string. This method is being called as List<FactorClass> listFC = new List<FactorClass>(); listFC.Add(new FactorClass { Name = "BKP", FactorValue="Book to price",IsGlobal =false }); listFC.Add(new FactorClass { Name = "YLD", FactorValue = "Dividend yield", IsGlobal = false }); listFC.Add(new FactorClass { Name = "EPM", FactorValue = "emp", IsGlobal = false }); listFC.Add(new FactorClass { Name = "SE", FactorValue = "something else", IsGlobal = false }); List<FactorClass> listFC1 = listFC.Search("BKP").ToList(); It is working fine. But a closer look into the extension method will reveal that Type k = t.GetType(); PropertyInfo pi = k.GetProperty("Name"); is actually inside a foreach loop which is actually not needed. I think we can take it outside the loop. But how? PLease help. (C#3.0)

    Read the article

  • getting double value from group concact

    - by Sackling
    I am having a problem where I am getting duplicated values from what I think I should be getting. here is my sql: SELECT DISTINCT p.products_image, pd.products_name, p.products_id, p.products_model, p.manufacturers_id, m.manufacturers_name, p.products_price, p.products_sort_order, p.products_tax_class_id, pd.products_viewed, group_concat(p2i.icons_id separator ",") AS icon_ids, group_concat(pi.icon_class separator ",") AS icon_class, IF(s.status, s.specials_new_products_price, NULL) AS specials_new_products_price, IF(s.status, s.specials_new_products_price, p.products_price) AS final_price FROM products p LEFT JOIN specials s ON p.products_id = s.products_id LEFT JOIN manufacturers m ON p.manufacturers_id = m.manufacturers_id JOIN products_description pd ON p.products_id = pd.products_id JOIN products_to_categories p2c ON p.products_id = p2c.products_id INNER JOIN products_specifications ps7 ON p.products_id = ps7.products_id LEFT JOIN products_to_icon p2i ON p.products_id = p2i.products_id LEFT JOIN products_icons pi ON p2i.icons_id = pi.icons_id WHERE p.products_status = '1' AND pd.language_id = '1' AND ps7.specification IN ('Polycotton' , 'Reflective') AND ps7.specifications_id = '7' AND ps7.language_id = '1' AND p2c.categories_id = '21' GROUP BY p.products_id ORDER BY p.products_sort_order The column that is getting double values is icon_ids from the group concact. This seams to happen only if ploycotton, and reflective are both IN ps7.specification. If it is only one or the other then it works fine. The products_to_icon table contains 2 columns, products_id and icons_id. If a product has 2 icons, there are 2 rows so I'm pretty sure it is this fact that is causing the duplicate icons ids. When I run this, the icon_ids column for a product with 2 icons is "4,4,6,6" for example, when what I need is "4,6"

    Read the article

  • DB2 UDF Permissions

    - by WernerCD
    I have a custom function that I'm working on... the problem I'm having is simple: Permssions. example function: drop function circle_area go CREATE FUNCTION circle_area (radius FLOAT) RETURNS FLOAT LANGUAGE SQL BEGIN DECLARE pi FLOAT DEFAULT 3.14; DECLARE area FLOAT; SET area = pi * radius * radius; RETURN area; END GO if I then log out of my "admin" account... and log into test account I get a "Not authorized" error when I try to run something "Select circle_area(foo) from library.bar". I can log into iSeries Navigator, navigate to schema functions permissions and change the permission for public from Exclude to All. bam it works. How do I grant permission to all, either in the CREATE FUNCTION or after?

    Read the article

  • Best of OTN - Week of August 17th

    - by CassandraClark-OTN
    Architect CommunityThe Top 3 most popular OTN ArchBeat video interviews of all time: Oracle Coherence Community on Java.net | Brian Oliver and Randy Stafford [October 24, 2013] Brian Oliver (Senior Principal Solutions Architect, Oracle Coherence) and Randy Stafford (Architect At-Large, Oracle Coherence Product Development) discuss the evolution of the Oracle Coherence Community on Java.net and how developers can actively participate in product development through Coherence Community open projects. Visit the Coherence Community at: https://java.net/projects/coherence. The Raspberry Pi Java Carputer and Other Wonders | Simon Ritter [February 13, 2014] Oracle lead Java evangelist Simon Ritter talks about his Raspberry Pi-based Java Carputer IoT project and other topics he presented at QCon London 2014. Hot Features in Oracle APEX 5.0 | Joel Kallman [May 14, 2014] Joel Kallman (Director, Software Development, Oracle) shares key points from his Great Lakes Oracle Conference 2014 session on new features in Oracle APEX 5.0. Friday Funny from OTN Architect Community Manager Bob Rhubart: Comedy legend Steve Martin entertains dogs in this 1976 clip from the Carol Burnette show. Database Community OTN Database Community Home Page - See all tech articles, downloads etc. related to Oracle Database for DBA's and Developers. Java Community JavaOne Blog - JRuby and JVM Languages at JavaOne!  In this video interview, Charles shared the JRuby features he presented at the JVM Language Summit. He'll be at JavaOne read the blog to see all the sessions. Java Source Blog - IoT: Wearables! Wearables are a subset of the Internet of Things that has gained a lot of attention. Learn More. I love Java FaceBook - Java Advanced Management Console demo - Watch as Jim Weaver, Java Technology Ambassador at Oracle, walks through a demonstration of the new Java Advanced Management Console (AMC) tool. Systems Community OTN Garage Blog - Why Wouldn't Root Be Able to Change a Zone's IP Address in Oracle Solaris 11? - Read and learn the answer. OTN Garage FaceBook - Securing Your Cloud-Based Data Center with Oracle Solaris 11 - Overview of the security precautions a sysadmin needs to take to secure data in a cloud infrastructure, and how to implement them with the security features in Oracle Solaris 11.

    Read the article

  • Any way to know if two ip address points to the same machine?

    - by Vivek V K
    Is there anyway to find if two different IP address in two different network actually points to the same physical device? I need it in Linux. Edit - I have the same server(a raspberry pi) connected via 2 intranets to my client. I don't know the IP address of the server as it is DHCP. The crude way to do is to reach the raspberry pi from one intranet and check with ifconfig to find the ipadress of the machine in the other Intranet. I want to know if there is any other way I can do it? I know the mac address of the machine.But I don't know how do I find the Ipadress based on the mac address.

    Read the article

  • 3D Vector "End Point" Calculation for procedural Vector Graphics

    - by FrostFlame64
    Alright, So I need some help with some Vector Math. I've developing some game Engines that have Procedural Fractal Generation for Some Graphics, such as using Lindenmayer Systems for generating Trees and Plants. L-Systems, are drawn by using Turtle Graphics, which is a form of Vector graphics. I first created a system to draw in 2D Graphics, which works perfectly fine. But now I want to make a 3D equivalent, and I’ve run into an issue. For my 2D Version, I created a Method for quickly determining the “End Point” of a Vector-like movement. Given a starting point (X, Y), a direction (between 0 and 360 degrees), and a distance, the end point is calculated by these formulas: newX = startX + distance * Sin((PI * direction) / 180) newY = startY + distance * Cos((PI * direction) / 180) Now I need something Similarly Equivalent for performing this Calculation in 3D, But I haven’t been able to Google anything that could show me how to do this. I'm flexible enough to get whatever required information is needed for this method calculation, in any reasonable form (Vector3, Quaternion, ect). To summarize: Given a starting point/vector position in 3D space (X, Y, Z), a Direction in 3D space (Vector3, Quaternion, ect), and a Distance, I need to find the “End Point” in 3D Space. Thank you for your time and help.

    Read the article

  • Internet of Things (IoT) Thanksgiving Special: Turkey Tweeter (Part 1)

    - by hinkmond
    It's time for the Internet of Things (ioT) Thanksgiving Special. This time we are going to work on a special Do-It-Yourself project to create an Internet of Things temperature probe to connect your Turkey Day turkey to the Internet by writing a Thanksgiving Day Java Embedded app for your Raspberry Pi which will send out tweets as it cooks in your oven. If you're vegetarian, don't worry, you can follow along and just run the simulation of the Turkey Tweeter, or better yet, try a tofu version of the Turkey Tweeter. Here is the parts list: 1 Vernier Go!Temp USB Temperature Probe 1 Uncooked Turkey 1 Raspberry Pi (not Pumpkin Pie) 1 Roll thermal reflective tape You can buy the Vernier Go!Temp USB Temperature Probe for $39 from here: http://www.vernier.com/products/sensors/temperature-sensors/go-temp/. And, you can get the thermal reflective tape from any auto parts store. (Don't tell them what you need it for. Say it's for rebuilding your V-8 engine in your Dodge Hemi. Avoids the need for a long explanation and sounds cooler...) The uncooked turkey can be found in your neighborhood grocery store. But, if you're making a vegetarian Tofurkey, you're on your own... The Java Embedded app will be the same, though (Java is vegan). So, grab all your parts and come back here for the next part of this project... Hinkmond

    Read the article

  • How to do a multishot in xna?

    - by DeVonte
    I am trying to simulate a gun in which shoots multiple bullets at the same time(similar to a spread out shot). I am thinking I have to create another bullet array then do the same as I have below but in a different direction. Here is what I have so far: foreach (GameObject bullet in bullets) { // Find a bullet that isn't alive if (!bullet.alive) { //And set it to alive bullet.alive = true; if (flip == SpriteEffects.FlipHorizontally) //Facing right { float armCos = (float)Math.Cos(arm.rotation - MathHelper.PiOver2); float armSin = (float)Math.Sin(arm.rotation - MathHelper.PiOver2); // Set the initial position of our bullets at the end of our gun arm // 42 is obtained by taking the width of the Arm_Gun texture / 2 // and subtracting the width of the Bullet texture / 2. ((96/2)=(12/2)) bullet.position = new Vector2( arm.position.X + 42 * armCos, arm.position.Y + 42 * armSin); // And give it a velocity of the direction we're aiming. // Increae/decrease speed by changeing 15.0f bullet.Velocity = new Vector2( (float)Math.Cos(arm.rotation - MathHelper.PiOver4 + MathHelper.Pi + MathHelper.PiOver2), (float)Math.Sin(arm.rotation - MathHelper.PiOver4 + MathHelper.Pi + MathHelper.PiOver2)) * 15.0f; } else //Facing left { float armCos = (float)Math.Cos(arm.rotation + MathHelper.PiOver2); float armSin = (float)Math.Sin(arm.rotation + MathHelper.PiOver2); //Set the initial position of our bullet at the end of our gun arm //42 is obtained be taking the width of the Arm_Gun texture / 2 //and subtracting the width of the Bullet texture / 2. ((96/2)-(12/2)) bullet.position = new Vector2( arm.position.X - 42 * armCos, arm.position.Y - 42 * armSin); //And give it a velocity of the direction we're aiming. //Increase/decrease speed by changing 15.0f bullet.Velocity = new Vector2( -armCos, -armSin) * 15.0f; } return; }// End if }// End foreach

    Read the article

< Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >