Search Results

Search found 1659 results on 67 pages for 'bandwidth throttling'.

Page 64/67 | < Previous Page | 60 61 62 63 64 65 66 67  | Next Page >

  • Help with Collision Resolution?

    - by Milo
    I'm trying to learn about physics by trying to make a simplified GTA 2 clone. My only problem is collision resolution. Everything else works great. I have a rigid body class and from there cars and a wheel class: class RigidBody extends Entity { //linear private Vector2D velocity = new Vector2D(); private Vector2D forces = new Vector2D(); private OBB2D predictionRect = new OBB2D(new Vector2D(), 1.0f, 1.0f, 0.0f); private float mass; private Vector2D deltaVec = new Vector2D(); private Vector2D v = new Vector2D(); //angular private float angularVelocity; private float torque; private float inertia; //graphical private Vector2D halfSize = new Vector2D(); private Bitmap image; private Matrix mat = new Matrix(); private float[] Vector2Ds = new float[2]; private Vector2D tangent = new Vector2D(); private static Vector2D worldRelVec = new Vector2D(); private static Vector2D relWorldVec = new Vector2D(); private static Vector2D pointVelVec = new Vector2D(); public RigidBody() { //set these defaults so we don't get divide by zeros mass = 1.0f; inertia = 1.0f; setLayer(LAYER_OBJECTS); } protected void rectChanged() { if(getWorld() != null) { getWorld().updateDynamic(this); } } //intialize out parameters public void initialize(Vector2D halfSize, float mass, Bitmap bitmap) { //store physical parameters this.halfSize = halfSize; this.mass = mass; image = bitmap; inertia = (1.0f / 20.0f) * (halfSize.x * halfSize.x) * (halfSize.y * halfSize.y) * mass; RectF rect = new RectF(); float scalar = 10.0f; rect.left = (int)-halfSize.x * scalar; rect.top = (int)-halfSize.y * scalar; rect.right = rect.left + (int)(halfSize.x * 2.0f * scalar); rect.bottom = rect.top + (int)(halfSize.y * 2.0f * scalar); setRect(rect); predictionRect.set(rect); } public void setLocation(Vector2D position, float angle) { getRect().set(position, getWidth(), getHeight(), angle); rectChanged(); } public void setPredictionLocation(Vector2D position, float angle) { getPredictionRect().set(position, getWidth(), getHeight(), angle); } public void setPredictionCenter(Vector2D center) { getPredictionRect().moveTo(center); } public void setPredictionAngle(float angle) { predictionRect.setAngle(angle); } public Vector2D getPosition() { return getRect().getCenter(); } public OBB2D getPredictionRect() { return predictionRect; } @Override public void update(float timeStep) { doUpdate(false,timeStep); } public void doUpdate(boolean prediction, float timeStep) { //integrate physics //linear Vector2D acceleration = Vector2D.scalarDivide(forces, mass); if(prediction) { Vector2D velocity = Vector2D.add(this.velocity, Vector2D.scalarMultiply(acceleration, timeStep)); Vector2D c = getRect().getCenter(); c = Vector2D.add(getRect().getCenter(), Vector2D.scalarMultiply(velocity , timeStep)); setPredictionCenter(c); //forces = new Vector2D(0,0); //clear forces } else { velocity.x += (acceleration.x * timeStep); velocity.y += (acceleration.y * timeStep); //velocity = Vector2D.add(velocity, Vector2D.scalarMultiply(acceleration, timeStep)); Vector2D c = getRect().getCenter(); v.x = getRect().getCenter().getX() + (velocity.x * timeStep); v.y = getRect().getCenter().getY() + (velocity.y * timeStep); deltaVec.x = v.x - c.x; deltaVec.y = v.y - c.y; deltaVec.normalize(); setCenter(v.x, v.y); forces.x = 0; //clear forces forces.y = 0; } //angular float angAcc = torque / inertia; if(prediction) { float angularVelocity = this.angularVelocity + angAcc * timeStep; setPredictionAngle(getAngle() + angularVelocity * timeStep); //torque = 0; //clear torque } else { angularVelocity += angAcc * timeStep; setAngle(getAngle() + angularVelocity * timeStep); torque = 0; //clear torque } } public void updatePrediction(float timeStep) { doUpdate(true, timeStep); } //take a relative Vector2D and make it a world Vector2D public Vector2D relativeToWorld(Vector2D relative) { mat.reset(); Vector2Ds[0] = relative.x; Vector2Ds[1] = relative.y; mat.postRotate(JMath.radToDeg(getAngle())); mat.mapVectors(Vector2Ds); relWorldVec.x = Vector2Ds[0]; relWorldVec.y = Vector2Ds[1]; return new Vector2D(Vector2Ds[0], Vector2Ds[1]); } //take a world Vector2D and make it a relative Vector2D public Vector2D worldToRelative(Vector2D world) { mat.reset(); Vector2Ds[0] = world.x; Vector2Ds[1] = world.y; mat.postRotate(JMath.radToDeg(-getAngle())); mat.mapVectors(Vector2Ds); return new Vector2D(Vector2Ds[0], Vector2Ds[1]); } //velocity of a point on body public Vector2D pointVelocity(Vector2D worldOffset) { tangent.x = -worldOffset.y; tangent.y = worldOffset.x; return Vector2D.add( Vector2D.scalarMultiply(tangent, angularVelocity) , velocity); } public void applyForce(Vector2D worldForce, Vector2D worldOffset) { //add linear force forces.x += worldForce.x; forces.y += worldForce.y; //add associated torque torque += Vector2D.cross(worldOffset, worldForce); } @Override public void draw( GraphicsContext c) { c.drawRotatedScaledBitmap(image, getPosition().x, getPosition().y, getWidth(), getHeight(), getAngle()); } public Vector2D getVelocity() { return velocity; } public void setVelocity(Vector2D velocity) { this.velocity = velocity; } public Vector2D getDeltaVec() { return deltaVec; } } Vehicle public class Wheel { private Vector2D forwardVec; private Vector2D sideVec; private float wheelTorque; private float wheelSpeed; private float wheelInertia; private float wheelRadius; private Vector2D position = new Vector2D(); public Wheel(Vector2D position, float radius) { this.position = position; setSteeringAngle(0); wheelSpeed = 0; wheelRadius = radius; wheelInertia = (radius * radius) * 1.1f; } public void setSteeringAngle(float newAngle) { Matrix mat = new Matrix(); float []vecArray = new float[4]; //forward Vector vecArray[0] = 0; vecArray[1] = 1; //side Vector vecArray[2] = -1; vecArray[3] = 0; mat.postRotate(newAngle / (float)Math.PI * 180.0f); mat.mapVectors(vecArray); forwardVec = new Vector2D(vecArray[0], vecArray[1]); sideVec = new Vector2D(vecArray[2], vecArray[3]); } public void addTransmissionTorque(float newValue) { wheelTorque += newValue; } public float getWheelSpeed() { return wheelSpeed; } public Vector2D getAnchorPoint() { return position; } public Vector2D calculateForce(Vector2D relativeGroundSpeed, float timeStep, boolean prediction) { //calculate speed of tire patch at ground Vector2D patchSpeed = Vector2D.scalarMultiply(Vector2D.scalarMultiply( Vector2D.negative(forwardVec), wheelSpeed), wheelRadius); //get velocity difference between ground and patch Vector2D velDifference = Vector2D.add(relativeGroundSpeed , patchSpeed); //project ground speed onto side axis Float forwardMag = new Float(0.0f); Vector2D sideVel = velDifference.project(sideVec); Vector2D forwardVel = velDifference.project(forwardVec, forwardMag); //calculate super fake friction forces //calculate response force Vector2D responseForce = Vector2D.scalarMultiply(Vector2D.negative(sideVel), 2.0f); responseForce = Vector2D.subtract(responseForce, forwardVel); float topSpeed = 500.0f; //calculate torque on wheel wheelTorque += forwardMag * wheelRadius; //integrate total torque into wheel wheelSpeed += wheelTorque / wheelInertia * timeStep; //top speed limit (kind of a hack) if(wheelSpeed > topSpeed) { wheelSpeed = topSpeed; } //clear our transmission torque accumulator wheelTorque = 0; //return force acting on body return responseForce; } public void setTransmissionTorque(float newValue) { wheelTorque = newValue; } public float getTransmissionTourque() { return wheelTorque; } public void setWheelSpeed(float speed) { wheelSpeed = speed; } } //our vehicle object public class Vehicle extends RigidBody { private Wheel [] wheels = new Wheel[4]; private boolean throttled = false; public void initialize(Vector2D halfSize, float mass, Bitmap bitmap) { //front wheels wheels[0] = new Wheel(new Vector2D(halfSize.x, halfSize.y), 0.45f); wheels[1] = new Wheel(new Vector2D(-halfSize.x, halfSize.y), 0.45f); //rear wheels wheels[2] = new Wheel(new Vector2D(halfSize.x, -halfSize.y), 0.75f); wheels[3] = new Wheel(new Vector2D(-halfSize.x, -halfSize.y), 0.75f); super.initialize(halfSize, mass, bitmap); } public void setSteering(float steering) { float steeringLock = 0.13f; //apply steering angle to front wheels wheels[0].setSteeringAngle(steering * steeringLock); wheels[1].setSteeringAngle(steering * steeringLock); } public void setThrottle(float throttle, boolean allWheel) { float torque = 85.0f; throttled = true; //apply transmission torque to back wheels if (allWheel) { wheels[0].addTransmissionTorque(throttle * torque); wheels[1].addTransmissionTorque(throttle * torque); } wheels[2].addTransmissionTorque(throttle * torque); wheels[3].addTransmissionTorque(throttle * torque); } public void setBrakes(float brakes) { float brakeTorque = 15.0f; //apply brake torque opposing wheel vel for (Wheel wheel : wheels) { float wheelVel = wheel.getWheelSpeed(); wheel.addTransmissionTorque(-wheelVel * brakeTorque * brakes); } } public void doUpdate(float timeStep, boolean prediction) { for (Wheel wheel : wheels) { float wheelVel = wheel.getWheelSpeed(); //apply negative force to naturally slow down car if(!throttled && !prediction) wheel.addTransmissionTorque(-wheelVel * 0.11f); Vector2D worldWheelOffset = relativeToWorld(wheel.getAnchorPoint()); Vector2D worldGroundVel = pointVelocity(worldWheelOffset); Vector2D relativeGroundSpeed = worldToRelative(worldGroundVel); Vector2D relativeResponseForce = wheel.calculateForce(relativeGroundSpeed, timeStep,prediction); Vector2D worldResponseForce = relativeToWorld(relativeResponseForce); applyForce(worldResponseForce, worldWheelOffset); } //no throttling yet this frame throttled = false; if(prediction) { super.updatePrediction(timeStep); } else { super.update(timeStep); } } @Override public void update(float timeStep) { doUpdate(timeStep,false); } public void updatePrediction(float timeStep) { doUpdate(timeStep,true); } public void inverseThrottle() { float scalar = 0.2f; for(Wheel wheel : wheels) { wheel.setTransmissionTorque(-wheel.getTransmissionTourque() * scalar); wheel.setWheelSpeed(-wheel.getWheelSpeed() * 0.1f); } } } And my big hack collision resolution: private void update() { camera.setPosition((vehicle.getPosition().x * camera.getScale()) - ((getWidth() ) / 2.0f), (vehicle.getPosition().y * camera.getScale()) - ((getHeight() ) / 2.0f)); //camera.move(input.getAnalogStick().getStickValueX() * 15.0f, input.getAnalogStick().getStickValueY() * 15.0f); if(input.isPressed(ControlButton.BUTTON_GAS)) { vehicle.setThrottle(1.0f, false); } if(input.isPressed(ControlButton.BUTTON_STEAL_CAR)) { vehicle.setThrottle(-1.0f, false); } if(input.isPressed(ControlButton.BUTTON_BRAKE)) { vehicle.setBrakes(1.0f); } vehicle.setSteering(input.getAnalogStick().getStickValueX()); //vehicle.update(16.6666666f / 1000.0f); boolean colided = false; vehicle.updatePrediction(16.66666f / 1000.0f); List<Entity> buildings = world.queryStaticSolid(vehicle,vehicle.getPredictionRect()); if(buildings.size() > 0) { colided = true; } if(!colided) { vehicle.update(16.66f / 1000.0f); } else { Vector2D delta = vehicle.getDeltaVec(); vehicle.setVelocity(Vector2D.negative(vehicle.getVelocity().multiply(0.2f)). add(delta.multiply(-1.0f))); vehicle.inverseThrottle(); } } Here is OBB public class OBB2D { // Corners of the box, where 0 is the lower left. private Vector2D corner[] = new Vector2D[4]; private Vector2D center = new Vector2D(); private Vector2D extents = new Vector2D(); private RectF boundingRect = new RectF(); private float angle; //Two edges of the box extended away from corner[0]. private Vector2D axis[] = new Vector2D[2]; private double origin[] = new double[2]; public OBB2D(Vector2D center, float w, float h, float angle) { set(center,w,h,angle); } public OBB2D(float left, float top, float width, float height) { set(new Vector2D(left + (width / 2), top + (height / 2)),width,height,0.0f); } public void set(Vector2D center,float w, float h,float angle) { Vector2D X = new Vector2D( (float)Math.cos(angle), (float)Math.sin(angle)); Vector2D Y = new Vector2D((float)-Math.sin(angle), (float)Math.cos(angle)); X = X.multiply( w / 2); Y = Y.multiply( h / 2); corner[0] = center.subtract(X).subtract(Y); corner[1] = center.add(X).subtract(Y); corner[2] = center.add(X).add(Y); corner[3] = center.subtract(X).add(Y); computeAxes(); extents.x = w / 2; extents.y = h / 2; computeDimensions(center,angle); } private void computeDimensions(Vector2D center,float angle) { this.center.x = center.x; this.center.y = center.y; this.angle = angle; boundingRect.left = Math.min(Math.min(corner[0].x, corner[3].x), Math.min(corner[1].x, corner[2].x)); boundingRect.top = Math.min(Math.min(corner[0].y, corner[1].y),Math.min(corner[2].y, corner[3].y)); boundingRect.right = Math.max(Math.max(corner[1].x, corner[2].x), Math.max(corner[0].x, corner[3].x)); boundingRect.bottom = Math.max(Math.max(corner[2].y, corner[3].y),Math.max(corner[0].y, corner[1].y)); } public void set(RectF rect) { set(new Vector2D(rect.centerX(),rect.centerY()),rect.width(),rect.height(),0.0f); } // Returns true if other overlaps one dimension of this. private boolean overlaps1Way(OBB2D other) { for (int a = 0; a < axis.length; ++a) { double t = other.corner[0].dot(axis[a]); // Find the extent of box 2 on axis a double tMin = t; double tMax = t; for (int c = 1; c < corner.length; ++c) { t = other.corner[c].dot(axis[a]); if (t < tMin) { tMin = t; } else if (t > tMax) { tMax = t; } } // We have to subtract off the origin // See if [tMin, tMax] intersects [0, 1] if ((tMin > 1 + origin[a]) || (tMax < origin[a])) { // There was no intersection along this dimension; // the boxes cannot possibly overlap. return false; } } // There was no dimension along which there is no intersection. // Therefore the boxes overlap. return true; } //Updates the axes after the corners move. Assumes the //corners actually form a rectangle. private void computeAxes() { axis[0] = corner[1].subtract(corner[0]); axis[1] = corner[3].subtract(corner[0]); // Make the length of each axis 1/edge length so we know any // dot product must be less than 1 to fall within the edge. for (int a = 0; a < axis.length; ++a) { axis[a] = axis[a].divide((axis[a].length() * axis[a].length())); origin[a] = corner[0].dot(axis[a]); } } public void moveTo(Vector2D center) { Vector2D centroid = (corner[0].add(corner[1]).add(corner[2]).add(corner[3])).divide(4.0f); Vector2D translation = center.subtract(centroid); for (int c = 0; c < 4; ++c) { corner[c] = corner[c].add(translation); } computeAxes(); computeDimensions(center,angle); } // Returns true if the intersection of the boxes is non-empty. public boolean overlaps(OBB2D other) { if(right() < other.left()) { return false; } if(bottom() < other.top()) { return false; } if(left() > other.right()) { return false; } if(top() > other.bottom()) { return false; } if(other.getAngle() == 0.0f && getAngle() == 0.0f) { return true; } return overlaps1Way(other) && other.overlaps1Way(this); } public Vector2D getCenter() { return center; } public float getWidth() { return extents.x * 2; } public float getHeight() { return extents.y * 2; } public void setAngle(float angle) { set(center,getWidth(),getHeight(),angle); } public float getAngle() { return angle; } public void setSize(float w,float h) { set(center,w,h,angle); } public float left() { return boundingRect.left; } public float right() { return boundingRect.right; } public float bottom() { return boundingRect.bottom; } public float top() { return boundingRect.top; } public RectF getBoundingRect() { return boundingRect; } public boolean overlaps(float left, float top, float right, float bottom) { if(right() < left) { return false; } if(bottom() < top) { return false; } if(left() > right) { return false; } if(top() > bottom) { return false; } return true; } }; What I do is when I predict a hit on the car, I force it back. It does not work that well and seems like a bad idea. What could I do to have more proper collision resolution. Such that if I hit a wall I will never get stuck in it and if I hit the side of a wall I can steer my way out of it. Thanks I found this nice ppt. It talks about pulling objects apart and calculating new velocities. How could I calc new velocities in my case? http://www.google.ca/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&ved=0CC8QFjAB&url=http%3A%2F%2Fcoitweb.uncc.edu%2F~tbarnes2%2FGameDesignFall05%2FSlides%2FCh4.2-CollDet.ppt&ei=x4ucULy5M6-N0QGRy4D4Cg&usg=AFQjCNG7FVDXWRdLv8_-T5qnFyYld53cTQ&cad=rja

    Read the article

  • disk-to-disk backup without costly backup redundancy?

    - by AaronLS
    A good backup strategy involves a combination of 1) disconnected backups/snapshots that will not be affected by bugs, viruses, and/or security breaches 2) geographically distributed backups to protect against local disasters 3) testing backups to ensure that they can be restored as needed Generally I take an onsite backup daily, and an offsite backup weekly, and do test restores periodically. In the rare circumstance that I need to restore files, I do some from the local backup. Should a catastrophic event destroy the servers and local backups, then the offsite weekly tape backup would be used to restore the files. I don't need multiple offsite backups with redundancy. I ALREADY HAVE REDUNDANCY THROUGH THE USE OF BOTH LOCAL AND REMOTE BACKUPS. I have recovery blocks and par files with the backups, so I already have protection against a small percentage of corrupt bits. I perform test restores to ensure the backups function properly. Should the remote backups experience a dataloss, I can replace them with one of the local backups. There are historical offsite backups as well, so if a dataloss was not noticed for a few weeks(such as a bug/security breach/virus), the data could be restored from an older backup. By doing this, the only scenario that poses a risk to complete data loss would be one where both the local, remote, and servers all experienced a data loss in the same time period. I'm willing to risk that happening since the odds of that trifecta negligibly small, and the data isn't THAT valuable to me. So I hope I have emphasized that I don't need redundancy in my offsite backups because I have covered all the bases. I know this exact technique is employed by numerous businesses. Of course there are some that take multiple offsite backups, because the data is so incredibly valuable that they don't even want to risk that trifecta disaster, but in the majority of cases the trifecta disaster is an accepted risk. I HAD TO COVER ALL THIS BECAUSE SOME PEOPLE DON'T READ!!! I think I have justified my backup strategy and the majority of businesses who use offsite tape backups do not have any additional redundancy beyond what is mentioned above(recovery blocks, par files, historical snapshots). Now I would like to eliminate the use of tapes for offsite backups, and instead use a backup service. Most however are extremely costly for $/gb/month storage. I don't mind paying for transfer bandwidth, but the cost of storage is way to high. All of them advertise that they maintain backups of the data, and I imagine they use RAID as well. Obviously if you were using them to host servers this would all be necessary, but for my scenario, I am simply replacing my offsite backups with such a service. So there is no need for RAID, and absolutely no value in another layer of backups of backups. My one and only question: "Are there online data-storage/backup services that do not use redundancy or offer backups(backups of my backups) as part of their packages, and thus are more reasonably priced?" NOT my question: "Is this a flawed strategy?" I don't care if you think this is a good strategy or not. I know it pretty standard. Very few people make an extra copy of their offsite backups. They already have local backups that they can use to replace the remote backups if something catastrophic happens at the remote site. Please limit your responses to the question posed. Sorry if I seem a little abrasive, but I had some trolls in my last post who didn't read my requirements nor my question, and were trying to go off answering a totally different question. I made it pretty clear, but didn't try to justify my strategy, because I didn't ask about whether my strategy was justifyable. So I apologize if this was lengthy, as it really didn't need to be, but since there are so many trolls here who try to sidetrack questions by responding without addressing the question at hand.

    Read the article

  • Announcing Windows Azure Mobile Services

    - by ScottGu
    I’m excited to announce a new capability we are adding to Windows Azure today: Windows Azure Mobile Services Windows Azure Mobile Services makes it incredibly easy to connect a scalable cloud backend to your client and mobile applications.  It allows you to easily store structured data in the cloud that can span both devices and users, integrate it with user authentication, as well as send out updates to clients via push notifications. Today’s release enables you to add these capabilities to any Windows 8 app in literally minutes, and provides a super productive way for you to quickly build out your app ideas.  We’ll also be adding support to enable these same scenarios for Windows Phone, iOS, and Android devices soon. Read this getting started tutorial to walkthrough how you can build (in less than 5 minutes) a simple Windows 8 “Todo List” app that is cloud enabled using Windows Azure Mobile Services.  Or watch this video of me showing how to do it step by step. Getting Started If you don’t already have a Windows Azure account, you can sign up for a no-obligation Free Trial.  Once you are signed-up, click the “preview features” section under the “account” tab of the www.windowsazure.com website and enable your account to support the “Mobile Services” preview.   Instructions on how to enable this can be found here. Once you have the mobile services preview enabled, log into the Windows Azure Portal, click the “New” button and choose the new “Mobile Services” icon to create your first mobile backend.  Once created, you’ll see a quick-start page like below with instructions on how to connect your mobile service to an existing Windows 8 client app you have already started working on, or how to create and connect a brand-new Windows 8 client app with it: Read this getting started tutorial to walkthrough how you can build (in less than 5 minutes) a simple Windows 8 “Todo List” app  that stores data in Windows Azure. Storing Data in the Cloud Storing data in the cloud with Windows Azure Mobile Services is incredibly easy.  When you create a Windows Azure Mobile Service, we automatically associate it with a SQL Database inside Windows Azure.  The Windows Azure Mobile Service backend then provides built-in support for enabling remote apps to securely store and retrieve data from it (using secure REST end-points utilizing a JSON-based ODATA format) – without you having to write or deploy any custom server code.  Built-in management support is provided within the Windows Azure portal for creating new tables, browsing data, setting indexes, and controlling access permissions. This makes it incredibly easy to connect client applications to the cloud, and enables client developers who don’t have a server-code background to be productive from the very beginning.  They can instead focus on building the client app experience, and leverage Windows Azure Mobile Services to provide the cloud backend services they require.  Below is an example of client-side Windows 8 C#/XAML code that could be used to query data from a Windows Azure Mobile Service.  Client-side C# developers can write queries like this using LINQ and strongly typed POCO objects, which are then translated into HTTP REST queries that run against a Windows Azure Mobile Service.   Developers don’t have to write or deploy any custom server-side code in order to enable client-side code below to execute and asynchronously populate their client UI: Because Mobile Services is part of Windows Azure, developers can later choose to augment or extend their initial solution and add custom server functionality and more advanced logic if they want.  This provides maximum flexibility, and enables developers to grow and extend their solutions to meet any needs. User Authentication and Push Notifications Windows Azure Mobile Services also make it incredibly easy to integrate user authentication/authorization and push notifications within your applications.  You can use these capabilities to enable authentication and fine grain access control permissions to the data you store in the cloud, as well as to trigger push notifications to users/devices when the data changes.  Windows Azure Mobile Services supports the concept of “server scripts” (small chunks of server-side script that executes in response to actions) that make it really easy to enable these scenarios. Below are some tutorials that walkthrough common authentication/authorization/push scenarios you can do with Windows Azure Mobile Services and Windows 8 apps: Enabling User Authentication Authorizing Users  Get Started with Push Notifications Push Notifications to multiple Users Manage and Monitor your Mobile Service Just like with every other service in Windows Azure, you can monitor usage and metrics of your mobile service backend using the “Dashboard” tab within the Windows Azure Portal. The dashboard tab provides a built-in monitoring view of the API calls, Bandwidth, and server CPU cycles of your Windows Azure Mobile Service.   You can also use the “Logs” tab within the portal to review error messages.  This makes it easy to monitor and track how your application is doing. Scale Up as Your Business Grows Windows Azure Mobile Services now allows every Windows Azure customer to create and run up to 10 Mobile Services in a free, shared/multi-tenant hosting environment (where your mobile backend will be one of multiple apps running on a shared set of server resources).  This provides an easy way to get started on projects at no cost beyond the database you connect your Windows Azure Mobile Service to (note: each Windows Azure free trial account also includes a 1GB SQL Database that you can use with any number of apps or Windows Azure Mobile Services). If your client application becomes popular, you can click the “Scale” tab of your Mobile Service and switch from “Shared” to “Reserved” mode.  Doing so allows you to isolate your apps so that you are the only customer within a virtual machine.  This allows you to elastically scale the amount of resources your apps use – allowing you to scale-up (or scale-down) your capacity as your traffic grows: With Windows Azure you pay for compute capacity on a per-hour basis – which allows you to scale up and down your resources to match only what you need.  This enables a super flexible model that is ideal for new mobile app scenarios, as well as startups who are just getting going.  Summary I’ve only scratched the surface of what you can do with Windows Azure Mobile Services – there are a lot more features to explore.  With Windows Azure Mobile Services you’ll be able to build mobile app experiences faster than ever, and enable even better user experiences – by connecting your client apps to the cloud. Visit the Windows Azure Mobile Services development center to learn more, and build your first Windows 8 app connected with Windows Azure today.  And read this getting started tutorial to walkthrough how you can build (in less than 5 minutes) a simple Windows 8 “Todo List” app that is cloud enabled using Windows Azure Mobile Services. Hope this helps, Scott P.S. In addition to blogging, I am also now using Twitter for quick updates and to share links. Follow me at: twitter.com/scottgu

    Read the article

  • How To Remove People and Objects From Photographs In Photoshop

    - by Eric Z Goodnight
    You might think that it’s a complicated process to remove objects from photographs. But really Photoshop makes it quite simple, even when removing all traces of a person from digital photographs. Read on to see just how easy it is. Photoshop was originally created to be an image editing program, and it excels at it. With hardly any Photoshop experience, any beginner can begin removing objects or people from their photos. Have some friends that photobombed an otherwise great pic? Tell them to say their farewells, because here’s how to get rid of them with Photoshop! Tools for Removing Objects Removing an object is not really “magical” work. Your goal is basically to cover up the information you don’t want in an image with information you do want. In this sample image, we want to remove the cigar smoking man, and leave the geisha. Here’s a couple of the tools that can be useful to work with when attempting this kind of task. Clone Stamp and Pattern Stamp Tool: Samples parts of your image from your background, and allows you to paint into your image with your mouse or stylus. Eraser and Brush Tools: Paint flat colors and shapes, and erase cloned layers of image information. Basic, down and dirty photo editing tools. Pen, Quick Selection, Lasso, and Crop tools: Select, isolate, and remove parts of your image with these selection tools. All useful in their own way. Some, like the pen tool, are nightmarishly tough on beginners. Remove a Person with the Clone Stamp Tool (Video) The video above uses the Clone Stamp tool to sample and paint with the background texture. It’s a simple tool to use, although it can be confusing, possibly counter-intuitive. Here’s some pointers, in addition to the video above. Select shortcut key to choose the Clone tool stamp from the Tools Panel. Always create a copy of your background layer before doing heavy edits by right clicking on the background in your Layers Panel and selecting “Duplicate.” Hold with the Clone Tool selected, and click anywhere in your image to sample that area. When you’re sampling an area, your cursor is “Aligned” with your sample area. When you paint, your sample area moves. You can turn the “Aligned” setting off by clicking the in the Options Panel at the top of your screen if you want. Change your brush size and hardness as shown in the video by right-clicking in your image. Use your lasso to copy and paste pieces of your image in order to cover up any parts that seem appropriate. Photoshop Magic with the “Content-Aware Fill” One of the hallmark features of CS5 is the “Content-Aware Fill.” Content aware fill can be an excellent shortcut to removing objects and even people in Photoshop, but it is somewhat limited, and can get confused. Here’s a basic rundown on how it works. Select an object using your Lasso tool, shortcut key . The Lasso works fine as this selection can be rough. Navigate to Edit > Fill, and select “Content-Aware,” as illustrated above, from the pull-down menu. It’s surprisingly simple. After some processing, Photoshop has done the work of removing the object for you. It takes a few moments, and it is not perfect, so be prepared to touch it up with some Copy-Paste, or some Clone stamp action. Content Aware Fill Has Its Limits Keep in mind that the Content Aware Fill is meant to be used with other techniques in mind. It doesn’t always perform perfectly, but can give you a great starting point. Take this image for instance. It is actually plausible to hide this figure and make this image look like he was never there at all. With a selection made with the Lasso tool, navigate to Edit > Fill and select “Content Aware” again. The result is surprisingly good, but as you can see, worthy of some touch up. With a result like this one, you’ll have to get your hands dirty with copy-paste to create believable lines in the background. With many photographs, Content Aware Fill will simply get confused and give you results you won’t be happy with. Additional Touch Up for Bad Background Textures with the Pattern Stamp Tool For the perfectionist, cleaning up the lumpy looking textures that the Clone Stamp can leave is fairly simple using the Pattern Stamp Tool. Sample an piece of your image with your Marquee Tool, shortcut key . Navigate to Edit > Define Pattern to create a new Pattern from your selection. Click OK to continue. Click and hold down on the Clone Stamp tool in your Tools Panel until you can select the Pattern Stamp Tool. Pick your new pattern from the Options at the top of your screen, in the Options Panel. Then simply right click in your image in order to pick as soft a brush as possible to paint with. Paint into your image until your background is as smooth as you want it to be, making your painted out object more and more invisible. If you get lines from your repeated texture, experiment turning the on and off and paint over them. In addition to this, simple use of the Crop Tool, shortcut , can recompose an image, making it look as if it never had another object in it at all. Combine these techniques to find a method that works best for your images. Have questions or comments concerning Graphics, Photos, Filetypes, or Photoshop? Send your questions to [email protected], and they may be featured in a future How-To Geek Graphics article. Image Credits: Geisha Kyoto Gion by Todd Laracuenta via Wikipedia, used under Creative Commons. Moai Rano raraku by Aurbina, in Public Domain. Chris Young visits Wrigley by TonyTheTiger, via Wikipedia, used under Creative Commons. Latest Features How-To Geek ETC Ask How-To Geek: How Can I Monitor My Bandwidth Usage? Internet Explorer 9 RC Now Available: Here’s the Most Interesting New Stuff Here’s a Super Simple Trick to Defeating Fake Anti-Virus Malware How to Change the Default Application for Android Tasks Stop Believing TV’s Lies: The Real Truth About "Enhancing" Images The How-To Geek Valentine’s Day Gift Guide CyanogenMod Updates; Rolls out Android 2.3 to the Less Fortunate MyPaint is an Open-Source Graphics App for Digital Painters Can the Birds and Pigs Really Be Friends in the End? [Angry Birds Video] Add the 2D Version of the New Unity Interface to Ubuntu 10.10 and 11.04 MightyMintyBoost Is a 3-in-1 Gadget Charger Watson Ties Against Human Jeopardy Opponents

    Read the article

  • OS Analytics - Deep Dive Into Your OS

    - by Eran_Steiner
    Enterprise Manager Ops Center provides a feature called "OS Analytics". This feature allows you to get a better understanding of how the Operating System is being utilized. You can research the historical usage as well as real time data. This post will show how you can benefit from OS Analytics and how it works behind the scenes. We will have a call to discuss this blog - please join us!Date: Thursday, November 1, 2012Time: 11:00 am, Eastern Daylight Time (New York, GMT-04:00)1. Go to https://oracleconferencing.webex.com/oracleconferencing/j.php?ED=209833067&UID=1512092402&PW=NY2JhMmFjMmFh&RT=MiMxMQ%3D%3D2. If requested, enter your name and email address.3. If a password is required, enter the meeting password: oracle1234. Click "Join". To join the teleconference:Call-in toll-free number:       1-866-682-4770  (US/Canada)      Other countries:                https://oracle.intercallonline.com/portlets/scheduling/viewNumbers/viewNumber.do?ownerNumber=5931260&audioType=RP&viewGa=true&ga=ONConference Code:       7629343#Security code:            7777# Here is quick summary of what you can do with OS Analytics in Ops Center: View historical charts and real time value of CPU, memory, network and disk utilization Find the top CPU and Memory processes in real time or at a certain historical day Determine proper monitoring thresholds based on historical data View Solaris services status details Drill down into a process details View the busiest zones if applicable Where to start To start with OS Analytics, choose the OS asset in the tree and click the Analytics tab. You can see the CPU utilization, Memory utilization and Network utilization, along with the current real time top 5 processes in each category (click the image to see a larger version):  In the above screen, you can click each of the top 5 processes to see a more detailed view of that process. Here is an example of one of the processes: One of the cool things is that you can see the process tree for this process along with some port binding and open file descriptors. On Solaris machines with zones, you get an extra level of tabs, allowing you to get more information on the different zones: This is a good way to see the busiest zones. For example, one zone may not take a lot of CPU but it can consume a lot of memory, or perhaps network bandwidth. To see the detailed Analytics for each of the zones, simply click each of the zones in the tree and go to its Analytics tab. Next, click the "Processes" tab to see real time information of all the processes on the machine: An interesting column is the "Target" column. If you configured Ops Center to work with Enterprise Manager Cloud Control, then the two products will talk to each other and Ops Center will display the correlated target from Cloud Control in this table. If you are only using Ops Center - this column will remain empty. Next, if you view a Solaris machine, you will have a "Services" tab: By default, all services will be displayed, but you can choose to display only certain states, for example, those in maintenance or the degraded ones. You can highlight a service and choose to view the details, where you can see the Dependencies, Dependents and also the location of the service log file (not shown in the picture as you need to scroll down to see the log file). The "Threshold" tab is particularly helpful - you can view historical trends of different monitored values and based on the graph - determine what the monitoring values should be: You can ask Ops Center to suggest monitoring levels based on the historical values or you can set your own. The different colors in the graph represent the current set levels: Red for critical, Yellow for warning and Blue for Information, allowing you to quickly see how they're positioned against real data. It's important to note that when looking at longer periods, Ops Center smooths out the data and uses averages. So when looking at values such as CPU Usage, try shorter time frames which are more detailed, such as one hour or one day. Applying new monitoring values When first applying new values to monitored attributes - a popup will come up asking if it's OK to get you out of the current Monitoring Policy. This is OK if you want to either have custom monitoring for a specific machine, or if you want to use this current machine as a "Gold image" and extract a Monitoring Policy from it. You can later apply the new Monitoring Policy to other machines and also set it as a default Monitoring Profile. Once you're done with applying the different monitoring values, you can review and change them in the "Monitoring" tab. You can also click the "Extract a Monitoring Policy" in the actions pane on the right to save all the new values to a new Monitoring Policy, which can then be found under "Plan Management" -> "Monitoring Policies". Visiting the past Under the "History" tab you can "go back in time". This is very helpful when you know that a machine was busy a few hours ago (perhaps in the middle of the night?), but you were not around to take a look at it in real time. Here's a view into yesterday's data on one of the machines: You can see an interesting CPU spike happening at around 3:30 am along with some memory use. In the bottom table you can see the top 5 CPU and Memory consumers at the requested time. Very quickly you can see that this spike is related to the Solaris 11 IPS repository synchronization process using the "pkgrecv" command. The "time machine" doesn't stop here - you can also view historical data to determine which of the zones was the busiest at a given time: Under the hood The data collected is stored on each of the agents under /var/opt/sun/xvm/analytics/historical/ An "os.zip" file exists for the main OS. Inside you will find many small text files, named after the Epoch time stamp in which they were taken If you have any zones, there will be a file called "guests.zip" containing the same small files for all the zones, as well as a folder with the name of the zone along with "os.zip" in it If this is the Enterprise Controller or the Proxy Controller, you will have folders called "proxy" and "sat" in which you will find the "os.zip" for that controller The actual script collecting the data can be viewed for debugging purposes as well: On Linux, the location is: /opt/sun/xvmoc/private/os_analytics/collect On Solaris, the location is /opt/SUNWxvmoc/private/os_analytics/collect If you would like to redirect all the standard error into a file for debugging, touch the following file and the output will go into it: # touch /tmp/.collect.stderr   The temporary data is collected under /var/opt/sun/xvm/analytics/.collectdb until it is zipped. If you would like to review the properties for the Analytics, you can view those per each agent in /opt/sun/n1gc/lib/XVM.properties. Find the section "Analytics configurable properties for OS and VSC" to view the Analytics specific values. I hope you find this helpful! Please post questions in the comments below. Eran Steiner

    Read the article

  • NIC Bonding/balance-rr with Dell PowerConnect 5324

    - by Branden Martin
    I'm trying to get NIC bonding to work with balance-rr so that three NIC ports are combined, so that instead of getting 1 Gbps we get 3 Gbps. We are doing this on two servers connected to the same switch. However, we're only getting the speed of one physical link. We are using 1 Dell PowerConnect 5324, SW version 2.0.1.3, Boot version 1.0.2.02, HW version 00.00.02. Both servers are CentOS 5.9 (Final) running OnApp Hypervisor (CloudBoot) Server 1 is using ports g5-g7 in port-channel 1. Server 2 is using ports g9-g11 in port-channel 2. Switch show interface status Port Type Duplex Speed Neg ctrl State Pressure Mode -------- ------------ ------ ----- -------- ---- ----------- -------- ------- g1 1G-Copper -- -- -- -- Down -- -- g2 1G-Copper Full 1000 Enabled Off Up Disabled Off g3 1G-Copper -- -- -- -- Down -- -- g4 1G-Copper -- -- -- -- Down -- -- g5 1G-Copper Full 1000 Enabled Off Up Disabled Off g6 1G-Copper Full 1000 Enabled Off Up Disabled Off g7 1G-Copper Full 1000 Enabled Off Up Disabled On g8 1G-Copper Full 1000 Enabled Off Up Disabled Off g9 1G-Copper Full 1000 Enabled Off Up Disabled On g10 1G-Copper Full 1000 Enabled Off Up Disabled On g11 1G-Copper Full 1000 Enabled Off Up Disabled Off g12 1G-Copper Full 1000 Enabled Off Up Disabled On g13 1G-Copper -- -- -- -- Down -- -- g14 1G-Copper -- -- -- -- Down -- -- g15 1G-Copper -- -- -- -- Down -- -- g16 1G-Copper -- -- -- -- Down -- -- g17 1G-Copper -- -- -- -- Down -- -- g18 1G-Copper -- -- -- -- Down -- -- g19 1G-Copper -- -- -- -- Down -- -- g20 1G-Copper -- -- -- -- Down -- -- g21 1G-Combo-C -- -- -- -- Down -- -- g22 1G-Combo-C -- -- -- -- Down -- -- g23 1G-Combo-C -- -- -- -- Down -- -- g24 1G-Combo-C Full 100 Enabled Off Up Disabled On Flow Link Ch Type Duplex Speed Neg control State -------- ------- ------ ----- -------- ------- ----------- ch1 1G Full 1000 Enabled Off Up ch2 1G Full 1000 Enabled Off Up ch3 -- -- -- -- -- Not Present ch4 -- -- -- -- -- Not Present ch5 -- -- -- -- -- Not Present ch6 -- -- -- -- -- Not Present ch7 -- -- -- -- -- Not Present ch8 -- -- -- -- -- Not Present Server 1: cat /etc/sysconfig/network-scripts/ifcfg-eth3 DEVICE=eth3 HWADDR=00:1b:21:ac:d5:55 USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-eth4 DEVICE=eth4 HWADDR=68:05:ca:18:28:ae USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-eth5 DEVICE=eth5 HWADDR=68:05:ca:18:28:af USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-onappstorebond DEVICE=onappstorebond IPADDR=10.200.52.1 NETMASK=255.255.0.0 GATEWAY=10.200.2.254 NETWORK=10.200.0.0 USERCTL=no BOOTPROTO=none ONBOOT=yes cat /proc/net/bonding/onappstorebond Ethernet Channel Bonding Driver: v3.4.0-1 (October 7, 2008) Bonding Mode: load balancing (round-robin) MII Status: up MII Polling Interval (ms): 100 Up Delay (ms): 0 Down Delay (ms): 0 Slave Interface: eth3 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 00:1b:21:ac:d5:55 Slave Interface: eth4 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 68:05:ca:18:28:ae Slave Interface: eth5 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 68:05:ca:18:28:af Server 2: cat /etc/sysconfig/network-scripts/ifcfg-eth3 DEVICE=eth3 HWADDR=00:1b:21:ac:d5:a7 USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-eth4 DEVICE=eth4 HWADDR=68:05:ca:18:30:30 USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-eth5 DEVICE=eth5 HWADDR=68:05:ca:18:30:31 USERCTL=no BOOTPROTO=none ONBOOT=yes MASTER=onappstorebond SLAVE=yes cat /etc/sysconfig/network-scripts/ifcfg-onappstorebond DEVICE=onappstorebond IPADDR=10.200.53.1 NETMASK=255.255.0.0 GATEWAY=10.200.3.254 NETWORK=10.200.0.0 USERCTL=no BOOTPROTO=none ONBOOT=yes cat /proc/net/bonding/onappstorebond Ethernet Channel Bonding Driver: v3.4.0-1 (October 7, 2008) Bonding Mode: load balancing (round-robin) MII Status: up MII Polling Interval (ms): 100 Up Delay (ms): 0 Down Delay (ms): 0 Slave Interface: eth3 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 00:1b:21:ac:d5:a7 Slave Interface: eth4 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 68:05:ca:18:30:30 Slave Interface: eth5 MII Status: up Speed: 1000 Mbps Duplex: full Link Failure Count: 0 Permanent HW addr: 68:05:ca:18:30:31 Here are the results of iperf. ------------------------------------------------------------ Client connecting to 10.200.52.1, TCP port 5001 TCP window size: 27.7 KByte (default) ------------------------------------------------------------ [ 3] local 10.200.3.254 port 53766 connected with 10.200.52.1 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0-10.0 sec 950 MBytes 794 Mbits/sec

    Read the article

  • Windows Azure Evolution &ndash; Deploy Web Sites (WAWS Part 3)

    - by Shaun
    This is the sixth post of my Windows Azure Evolution series. After talked a bit about the new caching preview feature in the previous one, let’s back to the Windows Azure Web Sites (WAWS).   Git and GitHub Integration In the third post I introduced the overview functionality of WAWS and demonstrated how to create a WordPress blog through the build-in application gallery. And in the fourth post I covered how to use the TFS service preview to deploy an ASP.NET MVC application to the web site through the TFS integration. WAWS also have the Git integration. I’m not going to talk very detailed about the Git and GitHub integration since there are a bunch of information on the internet you can refer to. To enable the Git just go to the web site item in the developer portal and click the “Set up Git publishing”. After specified the username and password the windows azure platform will establish the Git integration and provide some basic guide. As you can see, you can download the Git binaries, commit the files and then push to the remote repository. Regarding the GitHub, since it’s built on top of Git it should work. Maarten Balliauw have a wonderful post about how to integrate GitHub to Windows Azure Web Site you can find here.   WebMatrix 2 RC WebMatrix is a lightweight web application development tool provided by Microsoft. It utilizes WebDeploy or FTP to deploy the web application to the server. And in WebMatrix 2.0 RC it added the feature to work with Windows Azure. First of all we need to download the latest WebMatrix 2 through the Web Platform Installer 4.0. Just open the WebPI and search “WebMatrix”, or go to its home page download its web installer. Once we have WebMatrix 2, we need to download the publish file of our WAWS. Let’s go to the developer portal and open the web site we want to deploy and download the publish file from the link on the right hand side. This file contains the necessary information of publishing the web site through WebDeploy and FTP, which can be used in WebMatrix, Visual Studio, etc.. Once we have the publish file we can open the WebMatrix, click the Open Site, Remote Site. Then it will bring up a dialog where we can input the information of the remote site. Since we have our publish file already, we can click the “Import publish settings” and select the publish file, then we can see the site information will be populated automatically. Click OK, the WebMatrix will connect to the remote site, which is the WAWS we had deployed already, retrieve the folders and files information. We can open files in WebMatrix and modify. But since WebMatrix is a lightweight web application tool, we cannot update the backend C# code. So in this case, we will modify the frontend home page only. After saved our modification, WebMatrix will compare the files between in local and remote and then it will only upload the modified files to Windows Azure through the connection information in the publish file. Since it only update the files which were changed, this minimized the bandwidth and deployment duration. After few seconds we back to the website and the modification had been applied.   Visual Studio and WebDeploy The publish file we had downloaded can be used not only in WebMatrix but also Visual Studio. As we know in Visual Studio we can publish a web application by clicking the “Publish” item from the project context menu in the solution explorer, and we can specify the WebDeploy, FTP or File System for the publish target. Now we can use the WAWS publish file to let Visual Studio publish the web application to WAWS. Let’s create a new ASP.NET MVC Web Application in Visual Studio 2010 and then click the “Publish” in solution explorer. Once we have the Windows Azure SDK 1.7 installed, it will update the web application publish dialog. So now we can import the publish information from the publish file. Select WebDeploy as the publish method. We can select FTP as well, which is supported by Windows Azure and the FTP information was in the same publish file. In the last step the publish wizard can check the files which will be uploaded to the remote site before the actually publishing. This gives us a chance to review and amend the files. Same as the WebMatrix, Visual Studio will compare the files between local and WAWS and determined which had been changed and need to be published. Finally Visual Studio will publish the web application to windows azure through WebDeploy protocol. Once it finished we can browse our website.   FTP Deployment The publish file we downloaded contains the connection information to our web site via both WebDeploy and FTP. When using WebMatrix and Visual Studio we can select WebDeploy or FTP. WebDeploy method can be used very easily from WebMatrix and Visual Studio, with the file compare feature. But the FTP gives more flexibility. We can use any FTP client to upload files to windows azure regardless which client and OS we are using. Open the publish file in any text editor, we can find the connection information very easily. As you can see the publish file is actually a XML file with WebDeploy and FTP information in plain text attributes. And once we have the FTP URL, username and password, when can connect to the site and upload and download files. For example I opened FileZilla and connected to my WAWS through FTP. Then I can download files I am interested in and modify them on my local disk. Then upload back to windows azure through FileZilla. Then I can see the new page.   Summary In this simple and quick post I introduced vary approaches to deploy our web application to Windows Azure Web Site. It supports TFS integration which I mentioned previously. It also supports Git and GitHub, WebDeploy and FTP as well.   Hope this helps, Shaun All documents and related graphics, codes are provided "AS IS" without warranty of any kind. Copyright © Shaun Ziyan Xu. This work is licensed under the Creative Commons License.

    Read the article

  • J2EE Applications, SPARC T4, Solaris Containers, and Resource Pools

    - by user12620111
    I've obtained a substantial performance improvement on a SPARC T4-2 Server running a J2EE Application Server Cluster by deploying the cluster members into Oracle Solaris Containers and binding those containers to cores of the SPARC T4 Processor. This is not a surprising result, in fact, it is consistent with other results that are available on the Internet. See the "references", below, for some examples. Nonetheless, here is a summary of my configuration and results. (1.0) Before deploying a J2EE Application Server Cluster into a virtualized environment, many decisions need to be made. I'm not claiming that all of the decisions that I have a made will work well for every environment. In fact, I'm not even claiming that all of the decisions are the best possible for my environment. I'm only claiming that of the small sample of configurations that I've tested, this is the one that is working best for me. Here are some of the decisions that needed to be made: (1.1) Which virtualization option? There are several virtualization options and isolation levels that are available. Options include: Hard partitions:  Dynamic Domains on Sun SPARC Enterprise M-Series Servers Hypervisor based virtualization such as Oracle VM Server for SPARC (LDOMs) on SPARC T-Series Servers OS Virtualization using Oracle Solaris Containers Resource management tools in the Oracle Solaris OS to control the amount of resources an application receives, such as CPU cycles, physical memory, and network bandwidth. Oracle Solaris Containers provide the right level of isolation and flexibility for my environment. To borrow some words from my friends in marketing, "The SPARC T4 processor leverages the unique, no-cost virtualization capabilities of Oracle Solaris Zones"  (1.2) How to associate Oracle Solaris Containers with resources? There are several options available to associate containers with resources, including (a) resource pool association (b) dedicated-cpu resources and (c) capped-cpu resources. I chose to create resource pools and associate them with the containers because I wanted explicit control over the cores and virtual processors.  (1.3) Cluster Topology? Is it best to deploy (a) multiple application servers on one node, (b) one application server on multiple nodes, or (c) multiple application servers on multiple nodes? After a few quick tests, it appears that one application server per Oracle Solaris Container is a good solution. (1.4) Number of cluster members to deploy? I chose to deploy four big 64-bit application servers. I would like go back a test many 32-bit application servers, but that is left for another day. (2.0) Configuration tested. (2.1) I was using a SPARC T4-2 Server which has 2 CPU and 128 virtual processors. To understand the physical layout of the hardware on Solaris 10, I used the OpenSolaris psrinfo perl script available at http://hub.opensolaris.org/bin/download/Community+Group+performance/files/psrinfo.pl: test# ./psrinfo.pl -pv The physical processor has 8 cores and 64 virtual processors (0-63) The core has 8 virtual processors (0-7)   The core has 8 virtual processors (8-15)   The core has 8 virtual processors (16-23)   The core has 8 virtual processors (24-31)   The core has 8 virtual processors (32-39)   The core has 8 virtual processors (40-47)   The core has 8 virtual processors (48-55)   The core has 8 virtual processors (56-63)     SPARC-T4 (chipid 0, clock 2848 MHz) The physical processor has 8 cores and 64 virtual processors (64-127)   The core has 8 virtual processors (64-71)   The core has 8 virtual processors (72-79)   The core has 8 virtual processors (80-87)   The core has 8 virtual processors (88-95)   The core has 8 virtual processors (96-103)   The core has 8 virtual processors (104-111)   The core has 8 virtual processors (112-119)   The core has 8 virtual processors (120-127)     SPARC-T4 (chipid 1, clock 2848 MHz) (2.2) The "before" test: without processor binding. I started with a 4-member cluster deployed into 4 Oracle Solaris Containers. Each container used a unique gigabit Ethernet port for HTTP traffic. The containers shared a 10 gigabit Ethernet port for JDBC traffic. (2.3) The "after" test: with processor binding. I ran one application server in the Global Zone and another application server in each of the three non-global zones (NGZ):  (3.0) Configuration steps. The following steps need to be repeated for all three Oracle Solaris Containers. (3.1) Stop AppServers from the BUI. (3.2) Stop the NGZ. test# ssh test-z2 init 5 (3.3) Enable resource pools: test# svcadm enable pools (3.4) Create the resource pool: test# poolcfg -dc 'create pool pool-test-z2' (3.5) Create the processor set: test# poolcfg -dc 'create pset pset-test-z2' (3.6) Specify the maximum number of CPU's that may be addd to the processor set: test# poolcfg -dc 'modify pset pset-test-z2 (uint pset.max=32)' (3.7) bash syntax to add Virtual CPUs to the processor set: test# (( i = 64 )); while (( i < 96 )); do poolcfg -dc "transfer to pset pset-test-z2 (cpu $i)"; (( i = i + 1 )) ; done (3.8) Associate the resource pool with the processor set: test# poolcfg -dc 'associate pool pool-test-z2 (pset pset-test-z2)' (3.9) Tell the zone to use the resource pool that has been created: test# zonecfg -z test-z1 set pool=pool-test-z2 (3.10) Boot the Oracle Solaris Container test# zoneadm -z test-z2 boot (3.11) Save the configuration to /etc/pooladm.conf test# pooladm -s (4.0) Results. Using the resource pools improves both throughput and response time: (5.0) References: System Administration Guide: Oracle Solaris Containers-Resource Management and Oracle Solaris Zones Capitalizing on large numbers of processors with WebSphere Portal on Solaris WebSphere Application Server and T5440 (Dileep Kumar's Weblog)  http://www.brendangregg.com/zones.html Reuters Market Data System, RMDS 6 Multiple Instances (Consolidated), Performance Test Results in Solaris, Containers/Zones Environment on Sun Blade X6270 by Amjad Khan, 2009.

    Read the article

  • Bizarre and very specific Internet connection loss

    - by Synetech
    Yesterday (Friday, September 21, 2012), my Internet connection started acting up. After some testing, I confirmed a very specific and baffling set of symptoms: Internet connection goes away every 25-35 minutes (I did not confirm the exact interval, but it seems to be about 30 mins.) Only some protocols are affected; HTTP*, P2P, etc. stop working; FTP, etc. continue to work When it’s stopped, cannot even ping router or cable-modem IPs or view their firmware pages Domain-names and IPs are irrelevant (for protocols that stop working, neither work, for those that still work, both work) Resetting router fixes it for another 30 minutes Keeping the connection idle or active doesn’t seem to make a difference (nor the bandwidth usage in that period) Connecting directly to cable-modem allows it to work indefinitely Disconnecting the router from the cable-modem works indefinitely (no Internet connection obviously, but can still access router IP and firmware page) Connecting the router to the cable-modem, but putting the modem on standby also works indefinitely Same problem with both a wireless laptop and wired (on any port) desktop (both Windows 7; will try to test Windows XP when possible) Nothing had changed in the days leading up to the issue. No modifications to the networking configuration or the router; there were not even any Windows updates except for an MSSE definition update. Waiting does not fix it, nor does any amount of fiddling with anything; only resetting the router fixes it for 30 minutes (resetting the cable-modem doesn't work either) I tried cleaning the pins in the router’s plugs, but that didn’t help, which was not really a surprise since I was not getting a lost connection error. Obviously my first thought was that the router was having a problem, and this is borne out by some tests. The problem is that when it drops, it is not a full drop since I can still do things like ftp ftp.mcafee.com and such which means that the connection and DNS are still working. Moreover, if it were the router, then why does it stay alive indefinitely when not connected to the cable-modem (i.e., no outside influence)? The problem doesn't seem to be either the cable-modem nor the router, but rather an interaction between the two, like something from the outside (port scan? hacker? ISP?) that is triggering a problem in the router. I see that there have been a couple of vulnerabilities for the DI-524, but those were a while back and should be fixed since I have the last firmware for it. I don’t think it’s my ISP (Rogers) since I have been using the router for several years without problem and can connect indefinitely when bypassing it. But I can’t rule them out since that is one of the only possible things that could have suddenly changed. Does anybody have any ideas of explanations, fixed, or tests? (I note that when I opened the router, I heard a very high-pitched noise from somewhere near the capacitors/ferrite ring which I don’t think I heard the last time I opened it a few years ago, but then if it were that, then why would it affect only a very small, specific set of functions?)

    Read the article

  • Developing a Cost Model for Cloud Applications

    - by BuckWoody
    Note - please pay attention to the date of this post. As much as I attempt to make the information below accurate, the nature of distributed computing means that components, units and pricing will change over time. The definitive costs for Microsoft Windows Azure and SQL Azure are located here, and are more accurate than anything you will see in this post: http://www.microsoft.com/windowsazure/offers/  When writing software that is run on a Platform-as-a-Service (PaaS) offering like Windows Azure / SQL Azure, one of the questions you must answer is how much the system will cost. I will not discuss the comparisons between on-premise costs (which are nigh impossible to calculate accurately) versus cloud costs, but instead focus on creating a general model for estimating costs for a given application. You should be aware that there are (at this writing) two billing mechanisms for Windows and SQL Azure: “Pay-as-you-go” or consumption, and “Subscription” or commitment. Conceptually, you can consider the former a pay-as-you-go cell phone plan, where you pay by the unit used (at a slightly higher rate) and the latter as a standard cell phone plan where you commit to a contract and thus pay lower rates. In this post I’ll stick with the pay-as-you-go mechanism for simplicity, which should be the maximum cost you would pay. From there you may be able to get a lower cost if you use the other mechanism. In any case, the model you create should hold. Developing a good cost model is essential. As a developer or architect, you’ll most certainly be asked how much something will cost, and you need to have a reliable way to estimate that. Businesses and Organizations have been used to paying for servers, software licenses, and other infrastructure as an up-front cost, and power, people to the systems and so on as an ongoing (and sometimes not factored) cost. When presented with a new paradigm like distributed computing, they may not understand the true cost/value proposition, and that’s where the architect and developer can guide the conversation to make a choice based on features of the application versus the true costs. The two big buckets of use-types for these applications are customer-based and steady-state. In the customer-based use type, each successful use of the program results in a sale or income for your organization. Perhaps you’ve written an application that provides the spot-price of foo, and your customer pays for the use of that application. In that case, once you’ve estimated your cost for a successful traversal of the application, you can build that into the price you charge the user. It’s a standard restaurant model, where the price of the meal is determined by the cost of making it, plus any profit you can make. In the second use-type, the application will be used by a more-or-less constant number of processes or users and no direct revenue is attached to the system. A typical example is a customer-tracking system used by the employees within your company. In this case, the cost model is often created “in reverse” - meaning that you pilot the application, monitor the use (and costs) and that cost is held steady. This is where the comparison with an on-premise system becomes necessary, even though it is more difficult to estimate those on-premise true costs. For instance, do you know exactly how much cost the air conditioning is because you have a team of system administrators? This may sound trivial, but that, along with the insurance for the building, the wiring, and every other part of the system is in fact a cost to the business. There are three primary methods that I’ve been successful with in estimating the cost. None are perfect, all are demand-driven. The general process is to lay out a matrix of: components units cost per unit and then multiply that times the usage of the system, based on which components you use in the program. That sounds a bit simplistic, but using those metrics in a calculation becomes more detailed. In all of the methods that follow, you need to know your application. The components for a PaaS include computing instances, storage, transactions, bandwidth and in the case of SQL Azure, database size. In most cases, architects start with the first model and progress through the other methods to gain accuracy. Simple Estimation The simplest way to calculate costs is to architect the application (even UML or on-paper, no coding involved) and then estimate which of the components you’ll use, and how much of each will be used. Microsoft provides two tools to do this - one is a simple slider-application located here: http://www.microsoft.com/windowsazure/pricing-calculator/  The other is a tool you download to create an “Return on Investment” (ROI) spreadsheet, which has the advantage of leading you through various questions to estimate what you plan to use, located here: https://roianalyst.alinean.com/msft/AutoLogin.do?d=176318219048082115  You can also just create a spreadsheet yourself with a structure like this: Program Element Azure Component Unit of Measure Cost Per Unit Estimated Use of Component Total Cost Per Component Cumulative Cost               Of course, the consideration with this model is that it is difficult to predict a system that is not running or hasn’t even been developed. Which brings us to the next model type. Measure and Project A more accurate model is to actually write the code for the application, using the Software Development Kit (SDK) which can run entirely disconnected from Azure. The code should be instrumented to estimate the use of the application components, logging to a local file on the development system. A series of unit and integration tests should be run, which will create load on the test system. You can use standard development concepts to track this usage, and even use Windows Performance Monitor counters. The best place to start with this method is to use the Windows Azure Diagnostics subsystem in your code, which you can read more about here: http://blogs.msdn.com/b/sumitm/archive/2009/11/18/introducing-windows-azure-diagnostics.aspx This set of API’s greatly simplifies tracking the application, and in fact you can use this information for more than just a cost model. After you have the tracking logs, you can plug the numbers into ay of the tools above, which should give a representative cost or in some cases a unit cost. The consideration with this model is that the SDK fabric is not a one-to-one comparison with performance on the actual Windows Azure fabric. Those differences are usually smaller, but they do need to be considered. Also, you may not be able to accurately predict the load on the system, which might lead to an architectural change, which changes the model. This leads us to the next, most accurate method for a cost model. Sample and Estimate Using standard statistical and other predictive math, once the application is deployed you will get a bill each month from Microsoft for your Azure usage. The bill is quite detailed, and you can export the data from it to do analysis, and using methods like regression and so on project out into the future what the costs will be. I normally advise that the architect also extrapolate a unit cost from those metrics as well. This is the information that should be reported back to the executives that pay the bills: the past cost, future projected costs, and unit cost “per click” or “per transaction”, as your case warrants. The challenge here is in the model itself - statistical methods are not foolproof, and the larger the sample (in this case I recommend the entire population, not a smaller sample) is key. References and Tools Articles: http://blogs.msdn.com/b/patrick_butler_monterde/archive/2010/02/10/windows-azure-billing-overview.aspx http://technet.microsoft.com/en-us/magazine/gg213848.aspx http://blog.codingoutloud.com/2011/06/05/azure-faq-how-much-will-it-cost-me-to-run-my-application-on-windows-azure/ http://blogs.msdn.com/b/johnalioto/archive/2010/08/25/10054193.aspx http://geekswithblogs.net/iupdateable/archive/2010/02/08/qampa-how-can-i-calculate-the-tco-and-roi-when.aspx   Other Tools: http://cloud-assessment.com/ http://communities.quest.com/community/cloud_tools

    Read the article

  • Agile Like Jazz

    - by Jeff Certain
    (I’ve been sitting on this for a week or so now, thinking that it needed to be tightened up a bit to make it less rambling. Since that’s clearly not going to happen, reader beware!) I had the privilege of spending around 90 minutes last night sitting and listening to Sonny Rollins play a concert at the Disney Center in LA. If you don’t know who Sonny Rollins is, I don’t know how to explain the experience; if you know who he is, I don’t need to. Suffice it to say that he has been recording professionally for over 50 years, and helped create an entire genre of music. A true master by any definition. One of the most intriguing aspects of a concert like this, however, is watching the master step aside and let the rest of the musicians play. Not just play their parts, but really play… letting them take over the spotlight, to strut their stuff, to soak up enthusiastic applause from the crowd. Maybe a lot of it has to do with the fact that Sonny Rollins has been doing this for more than a half-century. Maybe it has something to do with a kind of patience you learn when you’re on the far side of 80 – and the man can still blow a mean sax for 90 minutes without stopping! Maybe it has to do with the fact that he was out there for the love of the music and the love of the show, not because he had anything to prove to anyone and, I like to think, not for the money. Perhaps it had more to do with the fact that, when you’re at that level of mastery, the other musicians are going to be good. Really good. Whatever the reasons, there was a incredible freedom on that stage – the ability to improvise, for each musician to showcase their own specialization and skills, and them come back to the common theme, back to being on the same page, as it were. All this took place in the same venue that is home to the L.A. Phil. Somehow, I can’t ever see the same kind of free-wheeling improvisation happening in that context. And, since I’m a geek, I started thinking about agility. Rollins has put together a quintet that reflects his own particular style and past. No upright bass or piano for Rollins – drums, bongos, electric guitar and bass guitar along with his sax. It’s not about the mix of instruments. Other trios, quartets, and sextets use different mixes of instruments. New Orleans jazz tends towards trombones instead of sax; some prefer cornet or trumpet. But no matter what the choice of instruments, size matters. Team sizes are something I’ve been thinking about for a while. We’re on a quest to rethink how our teams are organized. They just feel too big, too unwieldy. In fact, they really don’t feel like teams at all. Most of the time, they feel more like collections or people who happen to report to the same manager. I attribute this to a couple factors. One is over-specialization; we have a tendency to have people work in silos. Although the teams are product-focused, within them our developers are both generalists and specialists. On the one hand, we expect them to be able to build an entire vertical slice of the application; on the other hand, each developer tends to be responsible for the vertical slice. As a result, developers often work on their own piece of the puzzle, in isolation. This sort of feels like working on a jigsaw in a group – each person taking a set of colors and piecing them together to reveal a portion of the overall picture. But what inevitably happens when you go to meld all those pieces together? Inevitably, you have some sections that are too big to move easily. These sections end up falling apart under their own weight as you try to move them. Not only that, but there are other challenges – figuring out where that section fits, and how to tie it into the rest of the puzzle. Often, this is when you find a few pieces need to be added – these pieces are “glue,” if you will. The other issue that arises is due to the overhead of maintaining communications in a team. My mother, who worked in IT for around 30 years, once told me that 20% per team member is a good rule of thumb for maintaining communication. While this is a rule of thumb, it seems to imply that any team over about 6 people is going to become less agile simple because of the communications burden. Teams of ten or twelve seem like they fall into the philharmonic organizational model. Complicated pieces of music requiring dozens of players to all be on the same page requires a much different model than the jazz quintet. There’s much less room for improvisation, originality or freedom. (There are probably orchestral musicians who will take exception to this characterization; I’m calling it like I see it from the cheap seats.) And, there’s one guy up front who is running the show, whose job is to keep all of those dozens of players on the same page, to facilitate communications. Somehow, the orchestral model doesn’t feel much like a self-organizing team, either. The first violin may be the best violinist in the orchestra, but they don’t get to perform free-wheeling solos. I’ve never heard of an orchestra getting together for a jam session. But I have heard of teams that organize their work based on the developers available, rather than organizing the developers based on the work required. I have heard of teams where desired functionality is deferred – or worse yet, schedules are missed – because one critical person doesn’t have any bandwidth available. I’ve heard of teams where people simply don’t have the big picture, because there is too much communication overhead for everyone to be aware of everything that is happening on a project. I once heard Paul Rayner say something to the effect of “you have a process that is perfectly designed to give you exactly the results you have.” Given a choice, I want a process that’s much more like jazz than orchestral music. I want a process that doesn’t burden me with lots of forms and checkboxes and stuff. Give me the simplest, most lightweight process that will work – and a smaller team of the best developers I can find. This seems like the kind of process that will get the kind of result I want to be part of.

    Read the article

  • What's new in Solaris 11.1?

    - by Karoly Vegh
    Solaris 11.1 is released. This is the first release update since Solaris 11 11/11, the versioning has been changed from MM/YY style to 11.1 highlighting that this is Solaris 11 Update 1.  Solaris 11 itself has been great. What's new in Solaris 11.1? Allow me to pick some new features from the What's New PDF that can be found in the official Oracle Solaris 11.1 Documentation. The updates are very numerous, I really can't include all.  I. New AI Automated Installer RBAC profiles have been introduced to enable delegation of installation tasks. II. The interactive installer now supports installing the OS to iSCSI targets. III. ASR (Auto Service Request) and OCM (Oracle Configuration Manager) have been enabled by default to proactively provide support information and create service requests to speed up support processes. This is optional and can be disabled but helps a lot in supportcases. For further information, see: http://oracle.com/goto/solarisautoreg IV. The new command svcbundle helps you to create SMF manifests without having to struggle with XML editing. (btw, do you know the interactive editprop subcommand in svccfg? The listprop/setprop subcommands are great for scripting and automating, but for an interactive property editing session try, for example, this: svccfg -s svc:/application/pkg/system-repository:default editprop )  V. pfedit: Ever wondered how to delegate editing permissions to certain files? It is well known "sudo /usr/bin/vi /etc/hosts" is not the right way, for sudo elevates the complete vi process to admin levels, and the user can "break" out of the session as root with simply starting a shell from that vi. Now, the new pfedit command provides a solution exactly to this challenge - an auditable, secure, per-user configurable editing possibility. See the pfedit man page for examples.   VI. rsyslog, the popular logging daemon (filters, SSL, formattable output, SQL collect...) has been included in Solaris 11.1 as an alternative to syslog.  VII: Zones: Solaris Zones - as a major Solaris differentiator - got lots of love in terms of new features: ZOSS - Zones on Shared Storage: Placing your zones to shared storage (FC, iSCSI) has never been this easy - via zonecfg.  parallell updates - with S11's bootenvironments updating zones was no problem and meant no downtime anyway, but still, now you can update them parallelly, a way faster update action if you are running a large number of zones. This is like parallell patching in Solaris 10, but with all the IPS/ZFS/S11 goodness.  per-zone fstype statistics: Running zones on a shared filesystems complicate the I/O debugging, since ZFS collects all the random writes and delivers them sequentially to boost performance. Now, over kstat you can find out which zone's I/O has an impact on the other ones, see the examples in the documentation: http://docs.oracle.com/cd/E26502_01/html/E29024/gmheh.html#scrolltoc Zones got RDSv3 protocol support for InfiniBand, and IPoIB support with Crossbow's anet (automatic vnic creation) feature.  NUMA I/O support for Zones: customers can now determine the NUMA I/O topology of the system from within zones.  VIII: Security got a lot of attention too:  Automated security/audit reporting, with builtin reporting templates e.g. for PCI (payment card industry) audits.  PAM is now configureable on a per-user basis instead of system wide, allowing different authentication requirements for different users  SSH in Solaris 11.1 now supports running in FIPS 140-2 mode, that is, in a U.S. government security accredited fashion.  SHA512/224 and SHA512/256 cryptographic hash functions are implemented in a FIPS-compliant way - and on a T4 implemented in silicon! That is, goverment-approved cryptography at HW-speed.  Generally, Solaris is currently under evaluation to be both FIPS and Common Criteria certified.  IX. Networking, as one of the core strengths of Solaris 11, has been extended with:  Data Center Bridging (DCB) - not only setups where network and storage share the same fabric (FCoE, anyone?) can have Quality-of-Service requirements. DCB enables peers to distinguish traffic based on priorities. Your NICs have to support DCB, see the documentation, and additional information on Wikipedia. DataLink MultiPathing, DLMP, enables link aggregation to span across multiple switches, even between those of different vendors. But there are essential differences to the good old bandwidth-aggregating LACP, see the documentation: http://docs.oracle.com/cd/E26502_01/html/E28993/gmdlu.html#scrolltoc VNIC live migration is now supported from one physical NIC to another on-the-fly  X. Data management:  FedFS, (Federated FileSystem) is new, it relies on Solaris 11's NFS referring mechanism to join separate shares of different NFS servers into a single filesystem namespace. The referring system has been there since S11 11/11, in Solaris 11.1 FedFS uses a LDAP - as the one global nameservice to bind them all.  The iSCSI initiator now uses the T4 CPU's HW-implemented CRC32 algorithm - thus improving iSCSI throughput while reducing CPU utilization on a T4 Storage locking improvements are now RAC aware, speeding up throughput with better locking-communication between nodes up to 20%!  XI: Kernel performance optimizations: The new Virtual Memory subsystem ("VM2") scales now to 100+ TB Memory ranges.  The memory predictor monitors large memory page usage, and adjust memory page sizes to applications' needs OSM, the Optimized Shared Memory allows Oracle DBs' SGA to be resized online XII: The Power Aware Dispatcher in now by default enabled, reducing power consumption of idle CPUs. Also, the LDoms' Power Management policies and the poweradm settings in Solaris 11 OS will cooperate. XIII: x86 boot: upgrade to the (Grand Unified Bootloader) GRUB2. Because grub2 differs in the configuration syntactically from grub1, one shall not edit the new grub configuration (grub.cfg) but use the new bootadm features to update it. GRUB2 adds UEFI support and also support for disks over 2TB. XIV: Improved viewing of per-CPU statistics of mpstat. This one might seem of less importance at first, but nowadays having better sorting/filtering possibilities on a periodically updated mpstat output of 256+ vCPUs can be a blessing. XV: Support for Solaris Cluster 4.1: The What's New document doesn't actually mention this one, since OSC 4.1 has not been released at the time 11.1 was. But since then it is available, and it requires Solaris 11.1. And it's only a "pkg update" away. ...aand I seriously need to stop here. There's a lot I missed, Edge Virtual Bridging, lofi tuning, ZFS sharing and crypto enhancements, USB3.0, pulseaudio, trusted extensions updates, etc - but if I mention all those then I effectively copy the What's New document. Which I recommend reading now anyway, it is a great extract of the 300+ new projects and RFE-followups in S11.1. And this blogpost is a summary of that extract.  For closing words, allow me to come back to Request For Enhancements, RFEs. Any customer can request features. Open up a Support Request, explain that this is an RFE, describe the feature you/your company desires to have in S11 implemented. The more SRs are collected for an RFE, the more chance it's got to get implemented. Feel free to provide feedback about the product, as well as about the Solaris 11.1 Documentation using the "Feedback" button there. Both the Solaris engineers and the documentation writers are eager to hear your input.Feel free to comment about this post too. Except that it's too long ;)  wbr,charlie

    Read the article

  • Emtel Knowledge Series - Q2/2014

    From Cyber Island to Smart Mauritius Cyber Island? Smart Mauritius? - What is Emtel talking about? "With the majority of the population living in urban environments today, the concept of "Smart Cities" has become an urgent necessity. "Smart Cities" refer to an urban transformation which, by using latest ICT technologies makes cities more efficient. Many Governments are setting out ambitious plans to build the cities of the future based on massive connectivity, high bandwidth communications, intelligent sensors and analysis of huge volumes of data. Various researches have shown four key enablers for smart city success - Government leadership, suitable technology infrastructure, solid public-private partnerships and engaged citizens. It is around these enabling factors that telecoms companies can play a vital role in assisting governments to deliver on the smart city vision." The Emtel Knowledge Series goes in compliance with Emtel's 25th anniversary celebrations throughout the year and the master of ceremony, Kim Andersen, mentioned that there will be more upcoming events on a quarterly base. As a representative of the Mauritius Software Craftsmanship Community (MSCC) there was absolutely no hesitation to join in again. Following my visit to the first Emtel Knowledge Series workshop back in February this year, it was great to have another opportunity to meet and exchange with technology experts. But quite frankly what is it with those buzz words... As far as I remember and how it was mentioned "Cyber Island" is an old initiative from around 2005/2006 which has been refreshed in 2010. It implies the empowerment of Information & Communication Technologies (ICT) as an essential factor of growth by the government here in Mauritius. Actually, the first promotional period of Cyber Island brought me here but that's another story. The venue and its own problems Like last time the event was organised and held at the Conference Hall at Cyber Tower I in Ebene. As I've been working there for some years, I know about the frustrating situation of finding a proper parking. So, does Smart Island include better solutions for the search of parking spaces? Maybe, let's see whether I will be able to answer that question at the end of the article. Anyway, after circling around the tower almost two times, I finally got a decent space to put the car, without risking to get a ticket or damage actually. International speakers and their experience Once again, Emtel did a great job to get international expertise onto the stage to share their experience and vision on this kind of embarkment. Personally, I really appreciated the fact they were speakers of global reach and could provide own-experience knowledge. Johan Gott spoke about the fundamental change that the Swedish government ignited in order to move their society and workers' environment away from heavy industry towards a knowledge-based approach. Additionally, we spoke about the effort and transformation of New York City into a greener and more efficient Smart City. Given modern technology he also advised that any kind of available Big Data should be opened to the general public - this openness would provide a playground for anyone to garner new ideas and most probably solid solutions of which no one else thought about before. Emtel Knowledge Series on moving from Cyber Island to Smart Mauritus Later during the afternoon that exact statement regarding openness to and transparency of government-owned Big Data has been emphasised again by the Danish speaker Kim Andersen and his former colleague Mika Jantunen from Finland. Mika continued to underline the important role of the government to provide a solid foundation for a knowledge-based society and mentioned that Finnish citizens have a constitutional right to broadband connectivity. Next to free higher (tertiary) education Finland already produced a good number of innovations, among them are: First country to grant voting rights to women Free higher education Constitutional right to broadband connectivity Nokia Linux Angry Birds Sauna and others...  General access to internet via broadband and/or mobile connectivity is surely a key factor towards Smart Cities, or better said Smart Mauritius given the area dimensions and size of population. CTO Paul Valette gave the audience a brief overview of the essential role that Emtel will have to move Mauritius forward towards a knowledge-based and innovation-driven environment for its citizen. What I have seen looks really promising and with recently published information that Mauritians have 127% of mobile capacity - meaning more than 1 mobile, smartphone or tablet per person - it will be crucial to have the right infrastructure for these connected devices. How would it be possible to achieve a knowledge-based society? YouTube to the rescue!Seriously, gaining more knowledge will require to have fast access to educational course material as explained by Dr Kaviraj Sukon, General Director of the Open University of Mauritius. According to him a good number of high-profile universities in the world have opened their course libraries to the general public, among them EDX, Coursera and Open University. Nowadays, you're actually able and enabled to learn for and earn a BSc or even MSc certification on your own pace - no need to attend classed on campus. It was really impressive to see the number of available hours - more than enough for a life-long learning experience! {loadposition content_adsense} Networking in the name of MSCC As briefly mentioned above I was about to combine two approaches for this workshop. Of course, getting latest information and updates on Emtel services available, especially for my business here on the west coast of the island, but also to meet and greet new people for the MSCC. And I think it was very positive on both sides. Let me quickly describe some of the key aspects that happened during the day: Met with Arnaud Meslier and Kellie, both Microsoft to swap latest information on IT events. Hereby, I got an invite to Microsoft Windows Phone 8.1 Dev Camp. Got in touch with Arvin Lockee, Emtel to check our options to meet with the data team, and seizing the opportunity to have a visiting tour at the Emtel Data Centre. Had a great chat with Avinash Meetoo, Knowledge 7, Kim Andersen and Mika Jantunen about the situation of teaching and learning in general and specifically in the private sector here in Mauritius. Additionally, a number of various other interesting chats... Once again, I'm catching up on a couple of business cards in order to provide more background information about the MSCC, and to create a better awareness of MSCC within the local IT businesses. There is more to come soon!  Resume of the day The number of attendees during this event has been doubled or even tripled this time. The whole organisation has been improved massively and the combination of presentation and summarizing panel discussions was better than during the previous workshop back in February. Overall, once again a well-organised workshop and I'm already looking forward to join the next workshop in Q3. Update End of July we finally managed to visit the Emtel Data Centre in Arsenal. It was an interesting opportunity for some of our MSCC members.

    Read the article

  • unattended-upgrades does not reboot

    - by Cheiron
    I am running Debian 7 stable with unattended-upgrades (every morning at 6 AM) to make sure I am always fully updated. I have the following config: $ cat /etc/apt/apt.conf.d/50unattended-upgrades // Automatically upgrade packages from these origin patterns Unattended-Upgrade::Origins-Pattern { // Archive or Suite based matching: // Note that this will silently match a different release after // migration to the specified archive (e.g. testing becomes the // new stable). "o=Debian,a=stable"; "o=Debian,a=stable-updates"; // "o=Debian,a=proposed-updates"; "origin=Debian,archive=stable,label=Debian-Security"; }; // List of packages to not update Unattended-Upgrade::Package-Blacklist { // "vim"; // "libc6"; // "libc6-dev"; // "libc6-i686"; }; // This option allows you to control if on a unclean dpkg exit // unattended-upgrades will automatically run // dpkg --force-confold --configure -a // The default is true, to ensure updates keep getting installed //Unattended-Upgrade::AutoFixInterruptedDpkg "false"; // Split the upgrade into the smallest possible chunks so that // they can be interrupted with SIGUSR1. This makes the upgrade // a bit slower but it has the benefit that shutdown while a upgrade // is running is possible (with a small delay) //Unattended-Upgrade::MinimalSteps "true"; // Install all unattended-upgrades when the machine is shuting down // instead of doing it in the background while the machine is running // This will (obviously) make shutdown slower //Unattended-Upgrade::InstallOnShutdown "true"; // Send email to this address for problems or packages upgrades // If empty or unset then no email is sent, make sure that you // have a working mail setup on your system. A package that provides // 'mailx' must be installed. E.g. "[email protected]" Unattended-Upgrade::Mail "root"; // Set this value to "true" to get emails only on errors. Default // is to always send a mail if Unattended-Upgrade::Mail is set Unattended-Upgrade::MailOnlyOnError "true"; // Do automatic removal of new unused dependencies after the upgrade // (equivalent to apt-get autoremove) //Unattended-Upgrade::Remove-Unused-Dependencies "false"; // Automatically reboot *WITHOUT CONFIRMATION* if a // the file /var/run/reboot-required is found after the upgrade Unattended-Upgrade::Automatic-Reboot "true"; // Use apt bandwidth limit feature, this example limits the download // speed to 70kb/sec //Acquire::http::Dl-Limit "70"; As you can see Automatic-Reboot is true and thus the server should automaticly reboot. Last time I checked the server was online for over 100 days, which means that the update from Debian 7.1 to Debian 7.2 has happened while the server was up (and indeed, all updates were installed), but this involves kernel updates, which means that the server should reboot. It did not. The server was running very slow, so I rebooted which fixed that. I did some research and found out that unattended-upgrades responds to the reboot-required file in /var/run/. I touched this file and waited one week, the file still exists and the server did not reboot. So I think that unattended-uppgrades ignores the auto-reboot part. So, am I doing somthing wrong here? Why did the server not restart? The upgrade part works perfect by the way, its just the reboot part that does not seem to work as it should.

    Read the article

  • What NAS setup for two-way syncing over the internet?

    - by Jamse
    I have family living a few hours away and have a lot of files that I would like to share - especially lots of folders of digital photos, but also documents etc. - partially so they can see them, partially so I can have access when I visit them and partially for backup / redundancy purposes. My current hard drives on my main machine are getting pretty full anyway, and I have a MythTV box where my music is currently stored, so I was thinking of getting a NAS anyway. And at the other end my family have a few computers, so they would probably benefit from a NAS too. My general idea (though I'm willing to shift on this if there are any bright ideas about other ways of achieving my objectives) is to get a matching pair of NASs and have them sync over the internet. (To cut down on bandwidth use I would get them in sync locally to start with.) Having read around as best I can it seems that syncing over the internet is generally only a feature on quite high end units. However, I have seen that QNAP seem to feature this on their TS-110 and TS-210 units, which might work (they call it "remote replication"). They seem pretty reasonably priced for what they are, but of course with buying 2 of them and then adding the drives (say 1TB or 2TB each) I'd be looking at about £400 total. So, I'm looking for recommendations really. I don't want to spend more than the QNAPs would cost me, but any other ideas would be most appreciated. I am comfortable with technology and tinkering around, but I don't have as much time for that as I would like, so I guess I would favour solutions that require less tinkering rather than more (even though that's less fun!). Any thoughts would be welcome, as would any comments from people who have used the QNAP boxes for this. Thanks in advance. Some specifications: Two-way syncing. Changes made at either end should be synced to the other. There shouldn't be one unit that is effectively a read-only mirror of the other. Not real time. The syncing doesn't need to be real time - if it updated, say, daily overnight that would be fine. Set and forget. I would prefer minimal user interaction once set up - it would be great if syncs were scheduled and automatic. OS independence. I am running Windows XP plus an Ubuntu-based MythTV box. At the other end there are Windows 7 and Windows XP machines, plus a networked TV set top box which I think can play files off the network. Machine independence. I would favour a system that is self-contained, i.e. not reliant on any particular PC being switched on. If the system had enough else going for it I could perhaps work around it at this end, where I only have one PC that's used as such, but it would be harder at the other where there are at least two PCs that might be accessing the files. Notifications. I guess things like getting an email notification if the syncing fell over for any reason would be useful, though it's not a deal breaker. Update I've been digging some more and it looks like QNAP's Remote Replication function is actually just Rsync, so only really suitable for one-way syncing. I've posted on their forum to double check, but I think that's the case. In which case, I think the focus of my question is now either: do any reasonably-priced NASs support bidirectional syncing over the internet?, or has anyone had any luck installing onto NASs for this purpose? (Also, updated question to clarify that I'm after two-way syncing.)

    Read the article

  • Ubuntu 12.04 wireless (wifi) not working, can not upgrade to 12.10, touchpad gestures not working. What to do?

    - by Ritwik
    I installed ubuntu 12.04 LTS 3 days ago and since then wireless feature and touchpad gestures are not working. Tried everything on internet but still unsuccessful. I cant upgrade to ubuntu 12.10. These are the following comments I tried. Please help me. EDIT: just realized usb 3.0 is also not working. COMMAND lsb_release -r OUTPUT ----------------------------------------------------------------- Release: 12.04 ----------------------------------------------------------------- COMMAND lspci OUTPUT ------------------------------------------------------------------ 00:00.0 Host bridge: Intel Corporation Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller (rev 06) 00:01.0 PCI bridge: Intel Corporation Xeon E3-1200 v3/4th Gen Core Processor PCI Express x16 Controller (rev 06) 00:01.1 PCI bridge: Intel Corporation Xeon E3-1200 v3/4th Gen Core Processor PCI Express x8 Controller (rev 06) 00:02.0 VGA compatible controller: Intel Corporation 4th Gen Core Processor Integrated Graphics Controller (rev 06) 00:03.0 Audio device: Intel Corporation Xeon E3-1200 v3/4th Gen Core Processor HD Audio Controller (rev 06) 00:14.0 USB controller: Intel Corporation 8 Series/C220 Series Chipset Family USB xHCI (rev 05) 00:16.0 Communication controller: Intel Corporation 8 Series/C220 Series Chipset Family MEI Controller #1 (rev 04) 00:1a.0 USB controller: Intel Corporation 8 Series/C220 Series Chipset Family USB EHCI #2 (rev 05) 00:1b.0 Audio device: Intel Corporation 8 Series/C220 Series Chipset High Definition Audio Controller (rev 05) 00:1c.0 PCI bridge: Intel Corporation 8 Series/C220 Series Chipset Family PCI Express Root Port #1 (rev d5) 00:1c.1 PCI bridge: Intel Corporation 8 Series/C220 Series Chipset Family PCI Express Root Port #2 (rev d5) 00:1c.2 PCI bridge: Intel Corporation 8 Series/C220 Series Chipset Family PCI Express Root Port #3 (rev d5) 00:1d.0 USB controller: Intel Corporation 8 Series/C220 Series Chipset Family USB EHCI #1 (rev 05) 00:1f.0 ISA bridge: Intel Corporation HM86 Express LPC Controller (rev 05) 00:1f.2 SATA controller: Intel Corporation 8 Series/C220 Series Chipset Family 6-port SATA Controller 1 [AHCI mode] (rev 05) 00:1f.3 SMBus: Intel Corporation 8 Series/C220 Series Chipset Family SMBus Controller (rev 05) 07:00.0 3D controller: NVIDIA Corporation GF117M [GeForce 610M/710M / GT 620M/625M/630M/720M] (rev a1) 08:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8101E/RTL8102E PCI Express Fast Ethernet controller (rev 07) 09:00.0 Unassigned class [ff00]: Realtek Semiconductor Co., Ltd. RTS5229 PCI Express Card Reader (rev 01) 0f:00.0 Network controller: Qualcomm Atheros QCA9565 / AR9565 Wireless Network Adapter (rev 01) ------------------------------------------------------------------ COMMAND sudo apt-get install linux-backports-modules-wireless-lucid-generic OUTPUT ------------------------------------------------------------------- Reading package lists... Done Building dependency tree Reading state information... Done E: Unable to locate package linux-backports-modules-wireless-lucid-generic ------------------------------------------------------------------- COMMAND cat /etc/lsb-release; uname -a OUTPUT ------------------------------------------------------------------- DISTRIB_ID=Ubuntu DISTRIB_RELEASE=12.04 DISTRIB_CODENAME=precise DISTRIB_DESCRIPTION="Ubuntu 12.04.5 LTS" Linux ritwik-PC 3.2.0-67-generic #101-Ubuntu SMP Tue Jul 15 17:46:11 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux ------------------------------------------------------------------- COMMAND lspci -nnk | grep -iA2 net OUTPUT ------------------------------------------------------------------- 08:00.0 Ethernet controller [0200]: Realtek Semiconductor Co., Ltd. RTL8101E/RTL8102E PCI Express Fast Ethernet controller [10ec:8136] (rev 07) Subsystem: Hewlett-Packard Company Device [103c:225d] Kernel driver in use: r8169 -- 0f:00.0 Network controller [0280]: Qualcomm Atheros QCA9565 / AR9565 Wireless Network Adapter [168c:0036] (rev 01) Subsystem: Hewlett-Packard Company Device [103c:217f] ------------------------------------------------------------------- COMMAND lsusb OUTPUT ------------------------------------------------------------------- Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub Bus 002 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub Bus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub Bus 001 Device 002: ID 8087:8008 Intel Corp. Bus 002 Device 002: ID 8087:8000 Intel Corp. ------------------------------------------------------------------- COMMAND iwconfig OUTPUT ------------------------------------------------------------------- lo no wireless extensions. eth0 no wireless extensions. ------------------------------------------------------------------- COMMAND rfkill list all OUTPUT ------------------------------------------------------------------- 0: hp-wifi: Wireless LAN Soft blocked: no Hard blocked: no 1: hp-bluetooth: Bluetooth Soft blocked: no Hard blocked: no ------------------------------------------------------------------- COMMAND lsmod OUTPUT ------------------------------------------------------------------- Module Size Used by snd_hda_codec_realtek 224215 1 bnep 18281 2 rfcomm 47604 0 bluetooth 180113 10 bnep,rfcomm parport_pc 32866 0 ppdev 17113 0 nls_iso8859_1 12713 1 nls_cp437 16991 1 vfat 17585 1 fat 61512 1 vfat snd_hda_intel 33719 3 snd_hda_codec 127706 2 snd_hda_codec_realtek,snd_hda_intel snd_hwdep 17764 1 snd_hda_codec snd_pcm 97275 2 snd_hda_intel,snd_hda_codec snd_seq_midi 13324 0 snd_rawmidi 30748 1 snd_seq_midi snd_seq_midi_event 14899 1 snd_seq_midi snd_seq 61929 2 snd_seq_midi,snd_seq_midi_event nouveau 775039 0 joydev 17693 0 snd_timer 29990 2 snd_pcm,snd_seq snd_seq_device 14540 3 snd_seq_midi,snd_rawmidi,snd_seq ttm 76949 1 nouveau uvcvideo 72627 0 snd 79041 15 snd_hda_codec_realtek,snd_hda_intel,snd_hda_codec,snd_hwdep,snd_pcm,snd_rawmidi,snd_seq,snd_timer,snd_seq_device videodev 98259 1 uvcvideo drm_kms_helper 46978 1 nouveau psmouse 98051 0 drm 241971 3 nouveau,ttm,drm_kms_helper i2c_algo_bit 13423 1 nouveau soundcore 15091 1 snd snd_page_alloc 18529 2 snd_hda_intel,snd_pcm v4l2_compat_ioctl32 17128 1 videodev hp_wmi 18092 0 serio_raw 13211 0 sparse_keymap 13890 1 hp_wmi mxm_wmi 13021 1 nouveau video 19651 1 nouveau wmi 19256 2 hp_wmi,mxm_wmi mac_hid 13253 0 lp 17799 0 parport 46562 3 parport_pc,ppdev,lp r8169 62190 0 ------------------------------------------------------------------- COMMAND sudo su modprobe -v ath9k OUTPUT ------------------------------------------------------------------- insmod /lib/modules/3.2.0-67-generic/kernel/net/wireless/cfg80211.ko insmod /lib/modules/3.2.0-67-generic/kernel/drivers/net/wireless/ath/ath.ko insmod /lib/modules/3.2.0-67-generic/kernel/drivers/net/wireless/ath/ath9k/ath9k_hw.ko insmod /lib/modules/3.2.0-67-generic/kernel/drivers/net/wireless/ath/ath9k/ath9k_common.ko insmod /lib/modules/3.2.0-67-generic/kernel/net/mac80211/mac80211.ko insmod /lib/modules/3.2.0-67-generic/kernel/drivers/net/wireless/ath/ath9k/ath9k.ko ------------------------------------------------------------------- COMMAND do-release-upgrade OUTPUT ------------------------------------------------------------------- Err Upgrade tool signature 404 Not Found [IP: 91.189.88.149 80] Err Upgrade tool 404 Not Found [IP: 91.189.88.149 80] Fetched 0 B in 0s (0 B/s) WARNING:root:file 'quantal.tar.gz.gpg' missing Failed to fetch Fetching the upgrade failed. There may be a network problem. ------------------------------------------------------------------- COMMAND sudo modprobe ath9k dmesg | grep ath9k NO OUTPUT FOR THEM COMMAND dmesg | grep -e ath -e 80211 OUTPUT ------------------------------------------------------------------- [ 13.232372] type=1400 audit(1408867538.399:9): apparmor="STATUS" operation="profile_load" name="/usr/lib/telepathy/mission-control-5" pid=975 comm="apparmor_parser" [ 13.232615] type=1400 audit(1408867538.399:10): apparmor="STATUS" operation="profile_load" name="/usr/lib/telepathy/telepathy-*" pid=975 comm="apparmor_parser" [ 15.186599] ath3k: probe of 3-4:1.0 failed with error -110 [ 15.186635] usbcore: registered new interface driver ath3k [ 88.219329] cfg80211: Calling CRDA to update world regulatory domain [ 88.351665] cfg80211: World regulatory domain updated: [ 88.351667] cfg80211: (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp) [ 88.351670] cfg80211: (2402000 KHz - 2472000 KHz @ 40000 KHz), (300 mBi, 2000 mBm) [ 88.351671] cfg80211: (2457000 KHz - 2482000 KHz @ 20000 KHz), (300 mBi, 2000 mBm) [ 88.351673] cfg80211: (2474000 KHz - 2494000 KHz @ 20000 KHz), (300 mBi, 2000 mBm) [ 88.351674] cfg80211: (5170000 KHz - 5250000 KHz @ 40000 KHz), (300 mBi, 2000 mBm) [ 88.351675] cfg80211: (5735000 KHz - 5835000 KHz @ 40000 KHz), (300 mBi, 2000 mBm) ------------------------------------------------------------------- COMMAND sudo apt-get install touchpad-indicator OUTPUT ------------------------------------------------------------------- Reading package lists... Done Building dependency tree Reading state information... Done The following extra packages will be installed: gir1.2-gconf-2.0 python-pyudev Suggested packages: python-qt4 python-pyside.qtcore The following NEW packages will be installed: gir1.2-gconf-2.0 python-pyudev touchpad-indicator 0 upgraded, 3 newly installed, 0 to remove and 0 not upgraded. Need to get 84.1 kB of archives. After this operation, 1,136 kB of additional disk space will be used. Do you want to continue [Y/n]? Y Get:1 http://ppa.launchpad.net/atareao/atareao/ubuntu/ precise/main touchpad-indicator all 0.9.3.12-1ubuntu1 [46.5 kB] Get:2 http://archive.ubuntu.com/ubuntu/ precise/main gir1.2-gconf-2.0 amd64 3.2.5-0ubuntu2 [7,098 B] Get:3 http://archive.ubuntu.com/ubuntu/ precise/main python-pyudev all 0.13-1 [30.5 kB] Fetched 84.1 kB in 2s (31.6 kB/s) Selecting previously unselected package gir1.2-gconf-2.0. (Reading database ... 169322 files and directories currently installed.) Unpacking gir1.2-gconf-2.0 (from .../gir1.2-gconf-2.0_3.2.5-0ubuntu2_amd64.deb) ... Selecting previously unselected package python-pyudev. Unpacking python-pyudev (from .../python-pyudev_0.13-1_all.deb) ... Selecting previously unselected package touchpad-indicator. Unpacking touchpad-indicator (from .../touchpad-indicator_0.9.3.12-1ubuntu1_all.deb) ... Processing triggers for bamfdaemon ... Rebuilding /usr/share/applications/bamf.index... Processing triggers for desktop-file-utils ... Processing triggers for gnome-menus ... Processing triggers for hicolor-icon-theme ... Processing triggers for software-center ... INFO:softwarecenter.db.update:no translation information in database needed Setting up gir1.2-gconf-2.0 (3.2.5-0ubuntu2) ... Setting up python-pyudev (0.13-1) ... Setting up touchpad-indicator (0.9.3.12-1ubuntu1) ... ------------------------------------------------------------------- Not able to find ( drivers/net/wireless/ath/ath9k/hw.c ) or ( drivers/net/wireless/ath/ath9k/hw.h )

    Read the article

  • How to diagnose and solve an erratic "HDCP Support Required"?

    - by Jom Orgstrom
    I am playing a digital tv broadcast on Windows Media Center for Windows 7. I built this system so it works with HDCP, and in fact I have been able to watch tv and bluray before with this same computer. However, I suddenly started getting an "HDCP Support Required" error from WMC. The entire message is as follows: HDCP Support Required High-bandwidth Digital Content Protection (HDCP) may not be supported by the current video card. Use an HDCP-compliant display, video card, and video driver. Or, connect using an analog connection such as component or VGA. Relevant specs are: CPU: Ivy Bridge Core i7-3770 Motherboard: Asus P8H77-I Memory: 16GB DDR3-1600 Graphics: Radeon HD 7850 (Driver by AMD, version 8.982.0.0 built on 2012/07/27) Display: Acer P243w connected by HDMI Sound: Roland Quad-Capture (It complains even when I use the bundled VIA HD Audio) TV Tuner: I-O Data GV-MC7/HZ3 OS: Windows 7 Professional SP1, Windows Update enabled. All patched and up to date. As you can see, there is nothing weird or old about my setup. I am also not doing anything strange, not doing any overclocking, weird system changes and so on. One thing that does happen from time to time, is that the display goes black for a few seconds (sometimes when watching media contents, sometimes when just using photoshop or Visual Studio). This happened with my previous setup as well, so I'd be inclined to think it is a display or cable issue (apart from the BD drive, these are the only things I kept from my previous setup to this one). But being a digital transfer, as far as I know, these things either work or not. Never erratically or with decreased quality. The thing is that sometimes I can watch the TV, sometimes not. This happens with recorded programs as well, so it's not a per-program thing. Sometimes rebooting helps, sometimes it doesn't. Sometimes unplugging and plugging back the HDMI connector helps, sometimes it doesn't. Sometimes doing so doesn't even turn the screen back on, so I have to reboot. Unfortunately, WMC's error message is quite unhelpful. I'd like to know exactly where the problem is, so I can solve it. I don't want to buy a brand new display just to then find out it was a registry setting that was misconfigured. I've tried looking at the system event viewer, but these errors don't show up at all in there. Other people who have this problem seem to have a setup that is not HDCP compliant, so I turn to you guys here. Anybody knows how to diagnose this problem? Edit: So I got the Cyberlink Blu-ray disc advisor. I ran it and told me everything was okay, except for the Video Connection Type, which showed as "Digital (without HDCP)". I then proceeded to unplug the power cable from the monitor, plugged it in again, ran the tool again, and now it's "Digital (with HDCP)". Needless to say, I can watch my TV and recorded programs on WMP again. I'm guessing that at some point, something may be slightly wrong with the HDCP setup, and Windows decides to reset the entire content protection path (which leads to the screen blanking out). Usually the reset succeeds, but sometimes it doesn't, so Windows defaults to turning HDCP off. There's no way to turn it back on, except by doing a hard reset of the display. I really want to know what the exact error was, so I can fix it. Is it the cable? is it the display? is it the video card? the driver? Also, is there any other way to try and turn HDCP on again without having to hard reset the display? Oh, questions, questions...

    Read the article

  • What would cause Memcached to Hang for 2+ seconds?

    - by Brad Dwyer
    I'm going nuts trying to scale memcached. From their site: Memcached operations are almost all O(1). Connecting to it and issuing a get or stat command should never lag. If connecting lags, you may be hitting the max connections limit. See ServerMaint for details on stats to monitor. If issuing commands lags, you can have a number of tuning problems. Most common are hardware problems, not enough RAM (swapping), network problems (bandwidth, dropped packets, half-duplex connections). On rare occasion OS bugs or memcached bugs can contribute. Well.. it is most certainly not performing like an O(1) operation for me. Under low to normal load on our site memcached response times for get and set ops are about 0.001 seconds. Not bad. But if we triple the load we get outliers that take 100x (or in rare cases 1000x!) that long. I even had one instance where it took 2.2442 seconds for memcached to store a value. Obviously this is killing our site. Here's the output of Memcached-getStats during one of the slow periods: [pid] => 18079 [uptime] => 8903 [threads] => 4 [time] => 1332795759 [pointer_size] => 32 [rusage_user_seconds] => 26 [rusage_user_microseconds] => 503872 [rusage_system_seconds] => 125 [rusage_system_microseconds] => 477008 [curr_items] => 42099 [total_items] => 422500 [limit_maxbytes] => 943718400 [curr_connections] => 84 [total_connections] => 4946 [connection_structures] => 178 [bytes] => 7259957 [cmd_get] => 1679091 [cmd_set] => 351809 [get_hits] => 1662048 [get_misses] => 17043 [evictions] => 0 [bytes_read] => 109388476 [bytes_written] => 3187646458 [version] => 1.4.13 So things that I have ruled out so far are: Hitting the max connections limit (curr_connections of 84 is well below the default of max of 1024) Swapping - the machine has 900M out of 1024M of memory dedicated to memcached on a dedicated machine. It only appears to be using about 7MB of data as per the bytes stat. How would I diagnose the other hardware problems? prstat doesn't really show a whole lot going on in terms of CPU or memory usage. Not sure how to figure out the network problems but as this is a dedicated server on the same private network as the web box I don't think it's a connectivity issue (ping is less than a millisecond between the boxes). Is there something else I'm missing here? It's driving me nuts. Edit: Also forgot to mention that I've tried both persistent and non-persistent connections with minimal-to-no impact.

    Read the article

  • Clouds Everywhere But not a Drop of Rain – Part 3

    - by sxkumar
    I was sharing with you how a broad-based transformation such as cloud will increase agility and efficiency of an organization if process re-engineering is part of the plan.  I have also stressed on the key enterprise requirements such as “broad and deep solutions, “running your mission critical applications” and “automated and integrated set of capabilities”. Let me walk you through some key cloud attributes such as “elasticity” and “self-service” and what they mean for an enterprise class cloud. I will also talk about how we at Oracle have taken a very enterprise centric view to developing cloud solutions and how our products have been specifically engineered to address enterprise cloud needs. Cloud Elasticity and Enterprise Applications Requirements Easy and quick scalability for a short-period of time is the signature of cloud based solutions. It is this elasticity that allows you to dynamically redistribute your resources according to business priorities, helps increase your overall resource utilization, and reduces operational costs by allowing you to get the most out of your existing investment. Most public clouds are offering a instant provisioning mechanism of compute power (CPU, RAM, Disk), customer pay for the instance-hours(and bandwidth) they use, adding computing resources at peak times and removing them when they are no longer needed. This type of “just-in-time” serving of compute resources is well known for mid-tiers “state less” servers such as web application servers and web servers that just need another machine to start and run on it but what does it really mean for an enterprise application and its underlying data? Most enterprise applications are not as quite as “state less” and justifiably so. As such, how do you take advantage of cloud elasticity and make it relevant for your enterprise apps? This is where Cloud meets Grid Computing. At Oracle, we have invested enormous amount of time, energy and resources in creating enterprise grid solutions. All our technology products offer built-in elasticity via clustering and dynamic scaling. With products like Real Application Clusters (RAC), Automatic Storage Management, WebLogic Clustering, and Coherence In-Memory Grid, we allow all your enterprise applications to benefit from Cloud elasticity –both vertically and horizontally - without requiring any application changes. A number of technology vendors take a rather simplistic route of starting up additional or removing unneeded VM as the "Cloud Scale-Out" solution. While this may work for stateless mid-tier servers where load balancers can handle the addition and remove of instances transparently but following a similar approach for the database tier - often called as "database sharding" - requires significant application modification and typically does not work with off the shelf packaged applications. Technologies like Oracle Database Real Application Clusters, Automatic Storage Management, etc. on the other hand bring the benefits of incremental scalability and on-demand elasticity to ANY application by providing a simplified abstraction layers where the application does not need deal with data spread over multiple database instances. Rather they just talk to a single database and the database software takes care of aggregating resources across multiple hardware components. It is the technologies like these that truly make a cloud solution relevant for enterprises.  For customers who are looking for a next generation hardware consolidation platform, our engineered systems (e.g. Exadata, Exalogic) not only provide incredible amount of performance and capacity, they also reduce the data center complexity and simplify operations. Assemble, Deploy and Manage Enterprise Applications for Cloud Products like Oracle Virtual assembly builder (OVAB) resolve the complex problem of bringing the cloud speed to complex multi-tier applications. With assemblies, you can not only provision all components of a multi-tier application and wire them together by push of a button, other aspects of application lifecycle, such as real-time application testing, scale-up/scale-down, performance and availability monitoring, etc., are also automated using Oracle Enterprise Manager.  An essential criteria for an enterprise cloud to succeed is the ability to ensure business service levels especially when business users have either full visibility on the usage cost with a “show back” or a “charge back”. With Oracle Enterprise Manager 12c, we have created the most comprehensive cloud management solution in the industry that is capable of managing business service levels “applications-to-disk” in a enterprise private cloud – all from a single console. It is the only cloud management platform in the industry that allows you to deliver infrastructure, platform and application cloud services out of the box. Moreover, it offers integrated and complete lifecycle management of the cloud - including planning and set up, service delivery, operations management, metering and chargeback, etc .  Sounds unbelievable? Well, just watch this space for more details on how Oracle Enterprise Manager 12c is the nerve center of Oracle Cloud! Our cloud solution portfolio is also the broadest and most deep in the industry  - covering public, private, hybrid, Infrastructure, platform and applications clouds. It is no coincidence therefore that the Oracle Cloud today offers the most comprehensive set of public cloud services in the industry.  And to a large part, this has been made possible thanks to our years on investment in creating cloud enabling technologies.  Summary  But the intent of this blog post isn't to dwell on how great our solutions are (these are just some examples to illustrate how we at Oracle have approached this problem space). Rather it is to help you ask the right questions before you embark on your cloud journey.  So to summarize, here are the key takeaways.       It is critical that you are clear on why you are building the cloud. Successful organizations keep business benefits as the first and foremost cloud objective. On the other hand, those who approach this purely as a technology project are more likely to fail. Think about where you want to be in 3-5 years before you get started. Your long terms objectives should determine what your first step ought to be. As obvious as it may seem, more people than not make the first move without knowing where they are headed.  Don’t make the mistake of equating cloud to virtualization and Infrastructure-as-a-Service (IaaS). Spinning a VM on-demand will give some short term relief to your IT staff but is unlikely to solve your larger business problems. As such, even if IaaS is your first step towards a more comprehensive cloud, plan the roadmap around those higher level services before you begin. And ask your vendors on how they are going to be your partners in this journey. Capabilities like self-service access and chargeback/showback are absolutely critical if you really expect your cloud to be transformational. Your business won't see the full benefits of the cloud until it empowers them with same kind of control and transparency that they are used to while using a public cloud service.  Evaluate the benefits of integration, as opposed to blindly following the best-of-breed strategy. Integration is a huge challenge and more so in a cloud environment. There are enormous costs associated with stitching a solution out of disparate components and even more in maintaining it. Hope you found these ideas helpful. Looking forward to hearing your thoughts and experiences.

    Read the article

  • VMware vSphere cluster design for site redundancy

    - by Stefan Radovanovici
    I have a question about the best design for site redudancy when using vSphere clusters. A bit of background info about our situation first though. We are a medium-sized company with two main offices, located in different countries. Our networks are linked by a Layer2 150Mbps leased line which is currently underused. We have a variety of services running for internal use within the company, some on physycal servers and some on existing vSphere clusters. In our department we also run several services (almost all running under various forms of Linux) like NTP, Syslog, jump servers, monitoring servers and so on. We have now the requirement that those servers need to be redundant within each location (which they are not at the moment) and also site redudant (which they are to some extent, the servers are duplicated in the 2nd location with configurations kept in sync via various methods at the application layer). There is no SAN available for us, at least not something that we can use at the moment. Cost is also an issue. While we do have some budget available for this, we can't afford to buy SANs for both locations for example. I looked at the VSA feature and it seems that this could be something for us but I am unsure how to solve the site-redudancy requirement. At the moment for testing purposes I am setting up in a lab a vSphere 5 with VSA on two ESXi hosts. I am currently using the Essentials Plus kit with VSA license, which allows me to build a VSA cluster on up to 3 hosts, together with a vCenter license to manage them. The hosts each have two dual-port network cards and two 600GB drives, running in Raid1. Hardware-wise this will be enough for us to run the all the services we need as VMs and will provide redundandcy within the site. At the moment I see only two option to have site redundancy: build an identical VSA cluter in the second location and keep the various services sync'ed at application layer (database sync, rsync and so on). simply move one of the hosts from the existing cluster to the second location, basically having the VSA cluster span the 150Mbps link between the sites. I would very much prefer the second option but I am unsure how well it'll work, if it can work at all. Technically it should, we can span the needed VLANs across the leased line and have them available in the second location. The advantage would be that we don't need to worry at all about sync'ing databases and the like. But I have the feeling that the bandwidth will not be enough, I have no way of knowing how much traffic will the VSA cluster generate between the hosts. I realize that this will most likely depend on the individual usage of the VMs but still, I have no idea how VSA replicates data between the ESXi hosts. Are these my only options or can my goals be achieved in some other way ? Is there perhaps a way to have some sort of "cold stand by" cluster in the second location where the VMs would be sync'ed once per night from the main location ? The idea is that in case the first site becomes unavailable, we would be able to bring all those VMs online there. We would be ok with the data being 1 day old. Any answers are appreciated. Best regards, Stefan

    Read the article

  • What NAS setup for syncing over the internet?

    - by Jamse
    I have family living a few hours away and have a lot of files that I would like to share - especially lots of folders of digital photos, but also documents etc. - partially so they can see them, partially so I can have access when I visit them and partially for backup / redundancy purposes. My current hard drives on my main machine are getting pretty full anyway, and I have a MythTV box where my music is currently stored, so I was thinking of getting a NAS anyway. And at the other end my family have a few computers, so they would probably benefit from a NAS too. My general idea (though I'm willing to shift on this if there are any bright ideas about other ways of achieving my objectives) is to get a matching pair of NASs and have them sync over the internet. (To cut down on bandwidth use I would get them in sync locally to start with.) Having read around as best I can it seems that syncing over the internet is generally only a feature on quite high end units. However, I have seen that QNAP seem to feature this on their TS-110 and TS-210 units, which might work (they call it "remote replication"). They seem pretty reasonably priced for what they are, but of course with buying 2 of them and then adding the drives (say 1TB or 2TB each) I'd be looking at about £400 total. So, I'm looking for recommendations really. I don't want to spend more than the QNAPs would cost me, but any other ideas would be most appreciated. I am comfortable with technology and tinkering around, but I don't have as much time for that as I would like, so I guess I would favour solutions that require less tinkering rather than more (even though that's less fun!). Any thoughts would be welcome, as would any comments from people who have used the QNAP boxes for this. Thanks in advance. Some specifications: Two-way syncing. Changes made at either end should be synced to the other. There shouldn't be one unit that is effectively a read-only mirror of the other. Not real time. The syncing doesn't need to be real time - if it updated, say, daily overnight that would be fine. Set and forget. I would prefer minimal user interaction once set up - it would be great if syncs were scheduled and automatic. OS independence. I am running Windows XP plus an Ubuntu-based MythTV box. At the other end there are Windows 7 and Windows XP machines, plus a networked TV set top box which I think can play files off the network. Machine independence. I would favour a system that is self-contained, i.e. not reliant on any particular PC being switched on. If the system had enough else going for it I could perhaps work around it at this end, where I only have one PC that's used as such, but it would be harder at the other where there are at least two PCs that might be accessing the files. Notifications. I guess things like getting an email notification if the syncing fell over for any reason would be useful, though it's not a deal breaker.

    Read the article

  • WCF timeout exception detailed investigation

    - by Jason Kealey
    We have an application that has a WCF service (*.svc) running on IIS7 and various clients querying the service. The server is running Win 2008 Server. The clients are running either Windows 2008 Server or Windows 2003 server. I am getting the following exception, which I have seen can in fact be related to a large number of potential WCF issues. System.TimeoutException: The request channel timed out while waiting for a reply after 00:00:59.9320000. Increase the timeout value passed to the call to Request or increase the SendTimeout value on the Binding. The time allotted to this operation may have been a portion of a longer timeout. ---> System.TimeoutException: The HTTP request to 'http://www.domain.com/WebServices/myservice.svc/gzip' has exceeded the allotted timeout of 00:01:00. The time allotted to this operation may have been a portion of a longer timeout. I have increased the timeout to 30min and the error still occurred. This tells me that something else is at play, because the quantity of data could never take 30min to upload or download. The error comes and goes. At the moment, it is more frequent. It does not seem to matter if I have 3 clients running simultaneously or 100, it still occurs once in a while. Most of the time, there are no timeouts but I still get a few per hour. The error comes from any of the methods that are invoked. One of these methods does not have parameters and returns a bit of data. Another takes in lots of data as a parameter but executes asynchronously. The errors always originate from the client and never reference any code on the server in the stack trace. It always ends with: at System.Net.HttpWebRequest.GetResponse() at System.ServiceModel.Channels.HttpChannelFactory.HttpRequestChannel.HttpChannelRequest.WaitForReply(TimeSpan timeout) On the server: I've tried (and currently have) the following binding settings: maxBufferSize="2147483647" maxReceivedMessageSize="2147483647" maxBufferPoolSize="2147483647" It does not seem to have an impact. I've tried (and currently have) the following throttling settings: <serviceThrottling maxConcurrentCalls="1500" maxConcurrentInstances="1500" maxConcurrentSessions="1500"/> It does not seem to have an impact. I currently have the following settings for the WCF service. [ServiceBehavior(InstanceContextMode = InstanceContextMode.Single, ConcurrencyMode = ConcurrencyMode.Single)] I ran with ConcurrencyMode.Multiple for a while, and the error still occurred. I've tried restarting IIS, restarting my underlying SQL Server, restarting the machine. All of these don't seem to have an impact. I've tried disabling the Windows firewall. It does not seem to have an impact. On the client, I have these settings: maxReceivedMessageSize="2147483647" <system.net> <connectionManagement> <add address="*" maxconnection="16"/> </connectionManagement> </system.net> My client closes its connections: var client = new MyClient(); try { return client.GetConfigurationOptions(); } finally { client.Close(); } I have changed the registry settings to allow more outgoing connections: MaxConnectionsPerServer=24, MaxConnectionsPer1_0Server=32. I have now just recently tried SvcTraceViewer.exe. I managed to catch one exception on the client end. I see that its duration is 1 minute. Looking at the server side trace, I can see that the server is not aware of this exception. The maximum duration I can see is 10 seconds. I have looked at active database connections using exec sp_who on the server. I only have a few (2-3). I have looked at TCP connections from one client using TCPview. It usually is around 2-3 and I have seen up to 5 or 6. Simply put, I am stumped. I have tried everything I could find, and must be missing something very simple that a WCF expert would be able to see. It is my gut feeling that something is blocking my clients at the low-level (TCP), before the server actually receives the message and/or that something is queuing the messages at the server level and never letting them process. If you have any performance counters I should look at, please let me know. (please indicate what values are bad, as some of these counters are hard to decypher). Also, how could I log the WCF message size? Finally, are there any tools our there that would allow me to test how many connections I can establish between my client and server (independently from my application) Thanks for your time! Extra information added June 20th: My WCF application does something similar to the following. while (true) { Step1GetConfigurationSettingsFromServerViaWCF(); // can change between calls Step2GetWorkUnitFromServerViaWCF(); DoWorkLocally(); // takes 5-15minutes. Step3SendBackResultsToServerViaWCF(); } Using WireShark, I did see that when the error occurs, I have a five TCP retransmissions followed by a TCP reset later on. My guess is the RST is coming from WCF killing the connection. The exception report I get is from Step3 timing out. I discovered this by looking at the tcp stream "tcp.stream eq 192". I then expanded my filter to "tcp.stream eq 192 and http and http.request.method eq POST" and saw 6 POSTs during this stream. This seemed odd, so I checked with another stream such as tcp.stream eq 100. I had three POSTs, which seems a bit more normal because I am doing three calls. However, I do close my connection after every WCF call, so I would have expected one call per stream (but I don't know much about TCP). Investigating a bit more, I dumped the http packet load to disk to look at what these six calls where. 1) Step3 2) Step1 3) Step2 4) Step3 - corrupted 5) Step1 6) Step2 My guess is two concurrent clients are using the same connection, that is why I saw duplicates. However, I still have a few more issues that I can't comprehend: a) Why is the packet corrupted? Random network fluke - maybe? The load is gzipped using this sample code: http://msdn.microsoft.com/en-us/library/ms751458.aspx - Could the code be buggy once in a while when used concurrently? I should test without the gzip library. b) Why would I see step 1 & step 2 running AFTER the corrupted operation timed out? It seems to me as if these operations should not have occurred. Maybe I am not looking at the right stream because my understanding of TCP is flawed. I have other streams that occur at the same time. I should investigate other streams - a quick glance at streams 190-194 show that the Step3 POST have proper payload data (not corrupted). Pushing me to look at the gzip library again.

    Read the article

  • Xen DomU on DRBD device: barrier errors

    - by Halfgaar
    I'm testing setting up a Xen DomU with a DRBD storage for easy failover. Most of the time, immediatly after booting the DomU, I get an IO error: [ 3.153370] EXT3-fs (xvda2): using internal journal [ 3.277115] ip_tables: (C) 2000-2006 Netfilter Core Team [ 3.336014] nf_conntrack version 0.5.0 (3899 buckets, 15596 max) [ 3.515604] init: failsafe main process (397) killed by TERM signal [ 3.801589] blkfront: barrier: write xvda2 op failed [ 3.801597] blkfront: xvda2: barrier or flush: disabled [ 3.801611] end_request: I/O error, dev xvda2, sector 52171168 [ 3.801630] end_request: I/O error, dev xvda2, sector 52171168 [ 3.801642] Buffer I/O error on device xvda2, logical block 6521396 [ 3.801652] lost page write due to I/O error on xvda2 [ 3.801755] Aborting journal on device xvda2. [ 3.804415] EXT3-fs (xvda2): error: ext3_journal_start_sb: Detected aborted journal [ 3.804434] EXT3-fs (xvda2): error: remounting filesystem read-only [ 3.814754] journal commit I/O error [ 6.973831] init: udev-fallback-graphics main process (538) terminated with status 1 [ 6.992267] init: plymouth-splash main process (546) terminated with status 1 The manpage of drbdsetup says that LVM (which I use) doesn't support barriers (better known as tagged command queuing or native command queing), so I configured the drbd device not to use barriers. This can be seen in /proc/drbd (by "wo:f, meaning flush, the next method drbd chooses after barrier): 3: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r---- ns:2160152 nr:520204 dw:2680344 dr:2678107 al:3549 bm:9183 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0 And on the other host: 3: cs:Connected ro:Secondary/Primary ds:UpToDate/UpToDate C r---- ns:0 nr:2160152 dw:2160152 dr:0 al:0 bm:8052 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0 I also enabled the option disable_sendpage, as per the drbd docs: cat /sys/module/drbd/parameters/disable_sendpage Y I also tried adding barriers=0 to fstab as mount option. Still it sometimes says: [ 58.603896] blkfront: barrier: write xvda2 op failed [ 58.603903] blkfront: xvda2: barrier or flush: disabled I don't even know if ext3 has a nobarrier option. And, because only one of my storage systems is battery backed, it would not be smart anyway. Why does it still compain about barriers when I disabled that? Both host are: Debian: 6.0.4 uname -a: Linux 2.6.32-5-xen-amd64 drbd: 8.3.7 Xen: 4.0.1 Guest: Ubuntu 12.04 LTS uname -a: Linux 3.2.0-24-generic pvops drbd resource: resource drbdvm { meta-disk internal; device /dev/drbd3; startup { # The timeout value when the last known state of the other side was available. 0 means infinite. wfc-timeout 0; # Timeout value when the last known state was disconnected. 0 means infinite. degr-wfc-timeout 180; } syncer { # This is recommended only for low-bandwidth lines, to only send those # blocks which really have changed. #csums-alg md5; # Set to about half your net speed rate 60M; # It seems that this option moved to the 'net' section in drbd 8.4. (later release than Debian has currently) verify-alg md5; } net { # The manpage says this is recommended only in pre-production (because of its performance), to determine # if your LAN card has a TCP checksum offloading bug. #data-integrity-alg md5; } disk { # Detach causes the device to work over-the-network-only after the # underlying disk fails. Detach is not default for historical reasons, but is # recommended by the docs. # However, the Debian defaults in drbd.conf suggest the machine will reboot in that event... on-io-error detach; # LVM doesn't support barriers, so disabling it. It will revert to flush. Check wo: in /proc/drbd. If you don't disable it, you get IO errors. no-disk-barrier; } on host1 { # universe is a VG disk /dev/universe/drbdvm-disk; address 10.0.0.1:7792; } on host2 { # universe is a VG disk /dev/universe/drbdvm-disk; address 10.0.0.2:7792; } } DomU cfg: bootloader = '/usr/lib/xen-default/bin/pygrub' vcpus = '2' memory = '512' # # Disk device(s). # root = '/dev/xvda2 ro' disk = [ 'phy:/dev/drbd3,xvda2,w', 'phy:/dev/universe/drbdvm-swap,xvda1,w', ] # # Hostname # name = 'drbdvm' # # Networking # # fake IP for posting vif = [ 'ip=1.2.3.4,mac=00:16:3E:22:A8:A7' ] # # Behaviour # on_poweroff = 'destroy' on_reboot = 'restart' on_crash = 'restart' In my test setup: the primary host's storage is 9650SE SATA-II RAID PCIe with battery. The secondary is software RAID1. Isn't DRBD+Xen widely used? With these problems, it's not going to work.

    Read the article

  • fd partitions gone from 2 discs, md happy with it and resyncs. How to recover ?

    - by d0nd
    Hey gurus, need some help badly with this one. I run a server with a 6Tb md raid5 volume built over 7*1Tb disks. I've had to shut down the server lately and when it went back up, 2 out of the 7 disks used for the raid volume had lost its conf : dmesg : [ 10.184167] sda: sda1 sda2 sda3 // System disk [ 10.202072] sdb: sdb1 [ 10.210073] sdc: sdc1 [ 10.222073] sdd: sdd1 [ 10.229330] sde: sde1 [ 10.239449] sdf: sdf1 [ 11.099896] sdg: unknown partition table [ 11.255641] sdh: unknown partition table All 7 disks have same geometry and were configured alike : dmesg : Disk /dev/sdb: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0x1e7481a5 Device Boot Start End Blocks Id System /dev/sdb1 1 121601 976760001 fd Linux raid autodetect All 7 disks (sdb1, sdc1, sdd1, sde1, sdf1, sdg1, sdh1) were used in a md raid5 xfs volume. When booting, md, which was (obviously) out of sync kicked in and automatically started rebuilding over the 7 disks, including the two "faulty" ones; xfs tried to do some shenanigans as well: dmesg : [ 19.566941] md: md0 stopped. [ 19.817038] md: bind<sdc1> [ 19.817339] md: bind<sdd1> [ 19.817465] md: bind<sde1> [ 19.817739] md: bind<sdf1> [ 19.817917] md: bind<sdh> [ 19.818079] md: bind<sdg> [ 19.818198] md: bind<sdb1> [ 19.818248] md: md0: raid array is not clean -- starting background reconstruction [ 19.825259] raid5: device sdb1 operational as raid disk 0 [ 19.825261] raid5: device sdg operational as raid disk 6 [ 19.825262] raid5: device sdh operational as raid disk 5 [ 19.825264] raid5: device sdf1 operational as raid disk 4 [ 19.825265] raid5: device sde1 operational as raid disk 3 [ 19.825267] raid5: device sdd1 operational as raid disk 2 [ 19.825268] raid5: device sdc1 operational as raid disk 1 [ 19.825665] raid5: allocated 7334kB for md0 [ 19.825667] raid5: raid level 5 set md0 active with 7 out of 7 devices, algorithm 2 [ 19.825669] RAID5 conf printout: [ 19.825670] --- rd:7 wd:7 [ 19.825671] disk 0, o:1, dev:sdb1 [ 19.825672] disk 1, o:1, dev:sdc1 [ 19.825673] disk 2, o:1, dev:sdd1 [ 19.825675] disk 3, o:1, dev:sde1 [ 19.825676] disk 4, o:1, dev:sdf1 [ 19.825677] disk 5, o:1, dev:sdh [ 19.825679] disk 6, o:1, dev:sdg [ 19.899787] PM: Starting manual resume from disk [ 28.663228] Filesystem "md0": Disabling barriers, not supported by the underlying device [ 28.663228] XFS mounting filesystem md0 [ 28.884433] md: resync of RAID array md0 [ 28.884433] md: minimum _guaranteed_ speed: 1000 KB/sec/disk. [ 28.884433] md: using maximum available idle IO bandwidth (but not more than 200000 KB/sec) for resync. [ 28.884433] md: using 128k window, over a total of 976759936 blocks. [ 29.025980] Starting XFS recovery on filesystem: md0 (logdev: internal) [ 32.680486] XFS: xlog_recover_process_data: bad clientid [ 32.680495] XFS: log mount/recovery failed: error 5 [ 32.682773] XFS: log mount failed I ran fdisk and flagged sdg1 and sdh1 as fd. I tried to reassemble the array but it didnt work: no matter what was in mdadm.conf, it still uses sdg and sdh instead of sdg1 and sdh1. I checked in /dev and I see no sdg1 and and sdh1, shich explains why it wont use it. I just don't know why those partitions are gone from /dev and how to readd those... blkid : /dev/sda1: LABEL="boot" UUID="519790ae-32fe-4c15-a7f6-f1bea8139409" TYPE="ext2" /dev/sda2: TYPE="swap" /dev/sda3: LABEL="root" UUID="91390d23-ed31-4af0-917e-e599457f6155" TYPE="ext3" /dev/sdb1: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sdc1: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sdd1: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sde1: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sdf1: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sdg: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" /dev/sdh: UUID="2802e68a-dd11-c519-e8af-0d8f4ed72889" TYPE="mdraid" fdisk -l : Disk /dev/sda: 40.0 GB, 40020664320 bytes 255 heads, 63 sectors/track, 4865 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0x8c878c87 Device Boot Start End Blocks Id System /dev/sda1 * 1 12 96358+ 83 Linux /dev/sda2 13 134 979965 82 Linux swap / Solaris /dev/sda3 135 4865 38001757+ 83 Linux Disk /dev/sdb: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0x1e7481a5 Device Boot Start End Blocks Id System /dev/sdb1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sdc: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0xc9bdc1e9 Device Boot Start End Blocks Id System /dev/sdc1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sdd: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0xcc356c30 Device Boot Start End Blocks Id System /dev/sdd1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sde: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0xe87f7a3d Device Boot Start End Blocks Id System /dev/sde1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sdf: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0xb17a2d22 Device Boot Start End Blocks Id System /dev/sdf1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sdg: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0x8f3bce61 Device Boot Start End Blocks Id System /dev/sdg1 1 121601 976760001 fd Linux raid autodetect Disk /dev/sdh: 1000.2 GB, 1000204886016 bytes 255 heads, 63 sectors/track, 121601 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes Disk identifier: 0xa98062ce Device Boot Start End Blocks Id System /dev/sdh1 1 121601 976760001 fd Linux raid autodetect I really dont know what happened nor how to recover from this mess. Needless to say the 5TB or so worth of data sitting on those disks are very valuable to me... Any idea any one? Did anybody ever experienced a similar situation or know how to recover from it ? Can someone help me? I'm really desperate... :x

    Read the article

  • Help me upgrade my pf.conf for OpenBSD 4.7

    - by polemon
    I'm planning on upgrading my OpenBSD to 4.7 (from 4.6) and as you may or may not know, they changed the syntax for pf.conf. This is the relevant portion from the upgrade guide: pf(4) NAT syntax change As described in more detail in this mailing list post, PF's separate nat/rdr/binat (translation) rules have been replaced with actions on regular match/filter rules. Simple rulesets may be converted like this: nat on $ext_if from 10/8 -> ($ext_if) rdr on $ext_if to ($ext_if) -> 1.2.3.4 becomes match out on $ext_if from 10/8 nat-to ($ext_if) match in on $ext_if to ($ext_if) rdr-to 1.2.3.4 and... binat on $ext_if from $web_serv_int to any -> $web_serv_ext becomes match on $ext_if from $web_serv_int to any binat-to $web_serv_ext nat-anchor and/or rdr-anchor lines, e.g. for relayd(8), ftp-proxy(8) and tftp-proxy(8), are no longer used and should be removed from pf.conf(5), leaving only the anchor lines. Translation rules relating to these and spamd(8) will need to be adjusted as appropriate. N.B.: Previously, translation rules had "stop at first match" behaviour, with binat being evaluated first, followed by nat/rdr depending on direction of the packet. Now the filter rules are subject to the usual "last match" behaviour, so care must be taken with rule ordering when converting. pf(4) route-to/reply-to syntax change The route-to, reply-to, dup-to and fastroute options in pf.conf move to filteropts; pass in on $ext_if route-to (em1 192.168.1.1) from 10.1.1.1 pass in on $ext_if reply-to (em1 192.168.1.1) to 10.1.1.1 becomes pass in on $ext_if from 10.1.1.1 route-to (em1 192.168.1.1) pass in on $ext_if to 10.1.1.1 reply-to (em1 192.168.1.1) Now, this is my current pf.conf: # $OpenBSD: pf.conf,v 1.38 2009/02/23 01:18:36 deraadt Exp $ # # See pf.conf(5) for syntax and examples; this sample ruleset uses # require-order to permit mixing of NAT/RDR and filter rules. # Remember to set net.inet.ip.forwarding=1 and/or net.inet6.ip6.forwarding=1 # in /etc/sysctl.conf if packets are to be forwarded between interfaces. ext_if="pppoe0" int_if="nfe0" int_net="192.168.0.0/24" polemon="192.168.0.10" poletopw="192.168.0.12" segatop="192.168.0.20" table <leechers> persist set loginterface $ext_if set skip on lo match on $ext_if all scrub (no-df max-mss 1440) altq on $ext_if priq bandwidth 950Kb queue {q_pri, q_hi, q_std, q_low} queue q_pri priority 15 queue q_hi priority 10 queue q_std priority 7 priq(default) queue q_low priority 0 nat-anchor "ftp-proxy/*" rdr-anchor "ftp-proxy/*" nat on $ext_if from !($ext_if) -> ($ext_if) rdr pass on $int_if proto tcp to port ftp -> 127.0.0.1 port 8021 rdr pass on $ext_if proto tcp to port 2080 -> $segatop port 80 rdr pass on $ext_if proto tcp to port 2022 -> $segatop port 22 rdr pass on $ext_if proto tcp to port 4000 -> $polemon port 4000 rdr pass on $ext_if proto tcp to port 6600 -> $polemon port 6600 anchor "ftp-proxy/*" block pass on $int_if queue(q_hi, q_pri) pass out on $ext_if queue(q_std, q_pri) pass out on $ext_if proto icmp queue q_pri pass out on $ext_if proto {tcp, udp} to any port ssh queue(q_hi, q_pri) pass out on $ext_if proto {tcp, udp} to any port http queue(q_std, q_pri) #pass out on $ext_if proto {tcp, udp} all queue(q_low, q_hi) pass out on $ext_if proto {tcp, udp} from <leechers> queue(q_low, q_std) pass in on $ext_if proto tcp to ($ext_if) port ident queue(q_hi, q_pri) pass in on $ext_if proto tcp to ($ext_if) port ssh queue(q_hi, q_pri) pass in on $ext_if proto tcp to ($ext_if) port http queue(q_hi, q_pri) pass in on $ext_if inet proto icmp all icmp-type echoreq queue q_pri If someone has experience with porting the 4.6 pf.conf to 4.7, please help me do the correct changes. OK, this is how far I've got: I commented out nat-anchor and rdr-anchor, as describted in the guide: #nat-anchor "ftp-proxy/*" #rdr-anchor "ftp-proxy/*" And this is how I've "converted" the rdr rules: #nat on $ext_if from !($ext_if) -> ($ext_if) match out on $ext_if from !($ext_if) nat-to ($ext_if) #rdr pass on $int_if proto tcp to port ftp -> 127.0.0.1 port 8021 match in on $int_if proto tcp to port ftp rdr-to 127.0.0.1 port 8021 #rdr pass on $ext_if proto tcp to port 2080 -> $segatop port 80 match in on $ext_if proto tcp tp port 2080 rdr-to $segatop port 80 #rdr pass on $ext_if proto tcp to port 2022 -> $segatop port 22 match in on $ext_if proto tcp tp port 2022 rdr-to $segatop port 22 rdr pass on $ext_if proto tcp to port 4000 -> $polemon port 4000 match in on $ext_if proto tcp tp port 4000 rdr-to $polemon port 4000 rdr pass on $ext_if proto tcp to port 6600 -> $polemon port 6600 match in on $ext_if proto tcp tp port 6600 rdr-to $polemon port 6600 Did I miss anything? Is the anchor for ftp-proxy OK as it is now? Do I need to change something in the other pass in on... lines?

    Read the article

< Previous Page | 60 61 62 63 64 65 66 67  | Next Page >