Search Results

Search found 44309 results on 1773 pages for 'hp command view'.

Page 305/1773 | < Previous Page | 301 302 303 304 305 306 307 308 309 310 311 312  | Next Page >

  • how to define a field of view for the entire map for shadow?

    - by Mehdi Bugnard
    I recently added "Shadow Mapping" in my XNA games to include shadows. I followed the nice and famous tutorial from "Riemers" : http://www.riemers.net/eng/Tutorials/XNA/Csharp/Series3/Shadow_map.php . This code work nice and I can see my source of light and shadow. But the problem is that my light source does not match the field of view that I created. I want the light covers the entire map of my game. I don't know why , but the light only affect 2-3 cubes of my map. ScreenShot: (the emission of light illuminates only 2-3 blocks and not the full map) Here is my code i create the fieldOfView for LightviewProjection Matrix: Vector3 lightDir = new Vector3(10, 52, 10); lightPos = new Vector3(10, 52, 10); Matrix lightsView = Matrix.CreateLookAt(lightPos, new Vector3(105, 50, 105), new Vector3(0, 1, 0)); Matrix lightsProjection = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver2, 1f, 20f, 1000f); lightsViewProjectionMatrix = lightsView * lightsProjection; As you can see , my nearPlane and FarPlane are set to 20f and 100f . So i don't know why the light stop after 2 cubes. it's should be bigger Here is set the value to my custom effect HLSL in the shader file /* SHADOW VALUE */ effectWorld.Parameters["LightDirection"].SetValue(lightDir); effectWorld.Parameters["xLightsWorldViewProjection"].SetValue(Matrix.Identity * .lightsViewProjectionMatrix); effectWorld.Parameters["xWorldViewProjection"].SetValue(Matrix.Identity * arcadia.camera.View * arcadia.camera.Projection); effectWorld.Parameters["xLightPower"].SetValue(1f); effectWorld.Parameters["xAmbient"].SetValue(0.3f); Here is my custom HLSL shader effect file "*.fx" // This sample uses a simple Lambert lighting model. float3 LightDirection = normalize(float3(-1, -1, -1)); float3 DiffuseLight = 1.25; float3 AmbientLight = 0.25; uniform const float3 DiffuseColor = 1; uniform const float Alpha = 1; uniform const float3 EmissiveColor = 0; uniform const float3 SpecularColor = 1; uniform const float SpecularPower = 16; uniform const float3 EyePosition; // FOG attribut uniform const float FogEnabled ; uniform const float FogStart ; uniform const float FogEnd ; uniform const float3 FogColor ; float3 cameraPos : CAMERAPOS; texture Texture; sampler Sampler = sampler_state { Texture = (Texture); magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = mirror; AddressV = mirror; }; texture xShadowMap; sampler ShadowMapSampler = sampler_state { Texture = <xShadowMap>; magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = clamp; AddressV = clamp; }; /* *************** */ /* SHADOW MAP CODE */ /* *************** */ struct SMapVertexToPixel { float4 Position : POSITION; float4 Position2D : TEXCOORD0; }; struct SMapPixelToFrame { float4 Color : COLOR0; }; struct SSceneVertexToPixel { float4 Position : POSITION; float4 Pos2DAsSeenByLight : TEXCOORD0; float2 TexCoords : TEXCOORD1; float3 Normal : TEXCOORD2; float4 Position3D : TEXCOORD3; }; struct SScenePixelToFrame { float4 Color : COLOR0; }; float DotProduct(float3 lightPos, float3 pos3D, float3 normal) { float3 lightDir = normalize(pos3D - lightPos); return dot(-lightDir, normal); } SSceneVertexToPixel ShadowedSceneVertexShader(float4 inPos : POSITION, float2 inTexCoords : TEXCOORD0, float3 inNormal : NORMAL) { SSceneVertexToPixel Output = (SSceneVertexToPixel)0; Output.Position = mul(inPos, xWorldViewProjection); Output.Pos2DAsSeenByLight = mul(inPos, xLightsWorldViewProjection); Output.Normal = normalize(mul(inNormal, (float3x3)World)); Output.Position3D = mul(inPos, World); Output.TexCoords = inTexCoords; return Output; } SScenePixelToFrame ShadowedScenePixelShader(SSceneVertexToPixel PSIn) { SScenePixelToFrame Output = (SScenePixelToFrame)0; float2 ProjectedTexCoords; ProjectedTexCoords[0] = PSIn.Pos2DAsSeenByLight.x / PSIn.Pos2DAsSeenByLight.w / 2.0f + 0.5f; ProjectedTexCoords[1] = -PSIn.Pos2DAsSeenByLight.y / PSIn.Pos2DAsSeenByLight.w / 2.0f + 0.5f; float diffuseLightingFactor = 0; if ((saturate(ProjectedTexCoords).x == ProjectedTexCoords.x) && (saturate(ProjectedTexCoords).y == ProjectedTexCoords.y)) { float depthStoredInShadowMap = tex2D(ShadowMapSampler, ProjectedTexCoords).r; float realDistance = PSIn.Pos2DAsSeenByLight.z / PSIn.Pos2DAsSeenByLight.w; if ((realDistance - 1.0f / 100.0f) <= depthStoredInShadowMap) { diffuseLightingFactor = DotProduct(xLightPos, PSIn.Position3D, PSIn.Normal); diffuseLightingFactor = saturate(diffuseLightingFactor); diffuseLightingFactor *= xLightPower; } } float4 baseColor = tex2D(Sampler, PSIn.TexCoords); Output.Color = baseColor*(diffuseLightingFactor + xAmbient); return Output; } SMapVertexToPixel ShadowMapVertexShader(float4 inPos : POSITION) { SMapVertexToPixel Output = (SMapVertexToPixel)0; Output.Position = mul(inPos, xLightsWorldViewProjection); Output.Position2D = Output.Position; return Output; } SMapPixelToFrame ShadowMapPixelShader(SMapVertexToPixel PSIn) { SMapPixelToFrame Output = (SMapPixelToFrame)0; Output.Color = PSIn.Position2D.z / PSIn.Position2D.w; return Output; } /* ******************* */ /* END SHADOW MAP CODE */ /* ******************* */ / For rendering without instancing. technique ShadowMap { pass Pass0 { VertexShader = compile vs_2_0 ShadowMapVertexShader(); PixelShader = compile ps_2_0 ShadowMapPixelShader(); } } technique ShadowedScene { /* pass Pass0 { VertexShader = compile vs_2_0 VSBasicTx(); PixelShader = compile ps_2_0 PSBasicTx(); } */ pass Pass1 { VertexShader = compile vs_2_0 ShadowedSceneVertexShader(); PixelShader = compile ps_2_0 ShadowedScenePixelShader(); } } technique SimpleFog { pass Pass0 { VertexShader = compile vs_2_0 VSBasicTx(); PixelShader = compile ps_2_0 PSBasicTx(); } } I edited my fx file , for show you only information and functions about the shadow ;-)

    Read the article

  • Complete Guide to Symbolic Links (symlinks) on Windows or Linux

    - by Matthew Guay
    Want to easily access folders and files from different folders without maintaining duplicate copies?  Here’s how you can use Symbolic Links to link anything in Windows 7, Vista, XP, and Ubuntu. So What Are Symbolic Links Anyway? Symbolic links, otherwise known as symlinks, are basically advanced shortcuts. You can create symbolic links to individual files or folders, and then these will appear like they are stored in the folder with the symbolic link even though the symbolic link only points to their real location. There are two types of symbolic links: hard and soft. Soft symbolic links work essentially the same as a standard shortcut.  When you open a soft link, you will be redirected to the folder where the files are stored.  However, a hard link makes it appear as though the file or folder actually exists at the location of the symbolic link, and your applications won’t know any different. Thus, hard links are of the most interest in this article. Why should I use Symbolic Links? There are many things we use symbolic links for, so here’s some of the top uses we can think of: Sync any folder with Dropbox – say, sync your Pidgin Profile Across Computers Move the settings folder for any program from its original location Store your Music/Pictures/Videos on a second hard drive, but make them show up in your standard Music/Pictures/Videos folders so they’ll be detected my your media programs (Windows 7 Libraries can also be good for this) Keep important files accessible from multiple locations And more! If you want to move files to a different drive or folder and then symbolically link them, follow these steps: Close any programs that may be accessing that file or folder Move the file or folder to the new desired location Follow the correct instructions below for your operating system to create the symbolic link. Caution: Make sure to never create a symbolic link inside of a symbolic link. For instance, don’t create a symbolic link to a file that’s contained in a symbolic linked folder. This can create a loop, which can cause millions of problems you don’t want to deal with. Seriously. Create Symlinks in Any Edition of Windows in Explorer Creating symlinks is usually difficult, but thanks to the free Link Shell Extension, you can create symbolic links in all modern version of Windows pain-free.  You need to download both Visual Studio 2005 redistributable, which contains the necessary prerequisites, and Link Shell Extension itself (links below).  Download the correct version (32 bit or 64 bit) for your computer. Run and install the Visual Studio 2005 Redistributable installer first. Then install the Link Shell Extension on your computer. Your taskbar will temporally disappear during the install, but will quickly come back. Now you’re ready to start creating symbolic links.  Browse to the folder or file you want to create a symbolic link from.  Right-click the folder or file and select Pick Link Source. To create your symlink, right-click in the folder you wish to save the symbolic link, select “Drop as…”, and then choose the type of link you want.  You can choose from several different options here; we chose the Hardlink Clone.  This will create a hard link to the file or folder we selected.  The Symbolic link option creates a soft link, while the smart copy will fully copy a folder containing symbolic links without breaking them.  These options can be useful as well.   Here’s our hard-linked folder on our desktop.  Notice that the folder looks like its contents are stored in Desktop\Downloads, when they are actually stored in C:\Users\Matthew\Desktop\Downloads.  Also, when links are created with the Link Shell Extension, they have a red arrow on them so you can still differentiate them. And, this works the same way in XP as well. Symlinks via Command Prompt Or, for geeks who prefer working via command line, here’s how you can create symlinks in Command Prompt in Windows 7/Vista and XP. In Windows 7/Vista In Windows Vista and 7, we’ll use the mklink command to create symbolic links.  To use it, we have to open an administrator Command Prompt.  Enter “command” in your start menu search, right-click on Command Prompt, and select “Run as administrator”. To create a symbolic link, we need to enter the following in command prompt: mklink /prefix link_path file/folder_path First, choose the correct prefix.  Mklink can create several types of links, including the following: /D – creates a soft symbolic link, which is similar to a standard folder or file shortcut in Windows.  This is the default option, and mklink will use it if you do not enter a prefix. /H – creates a hard link to a file /J – creates a hard link to a directory or folder So, once you’ve chosen the correct prefix, you need to enter the path you want for the symbolic link, and the path to the original file or folder.  For example, if I wanted a folder in my Dropbox folder to appear like it was also stored in my desktop, I would enter the following: mklink /J C:\Users\Matthew\Desktop\Dropbox C:\Users\Matthew\Documents\Dropbox Note that the first path was to the symbolic folder I wanted to create, while the second path was to the real folder. Here, in this command prompt screenshot, you can see that I created a symbolic link of my Music folder to my desktop.   And here’s how it looks in Explorer.  Note that all of my music is “really” stored in C:\Users\Matthew\Music, but here it looks like it is stored in C:\Users\Matthew\Desktop\Music. If your path has any spaces in it, you need to place quotes around it.  Note also that the link can have a different name than the file it links to.  For example, here I’m going to create a symbolic link to a document on my desktop: mklink /H “C:\Users\Matthew\Desktop\ebook.pdf”  “C:\Users\Matthew\Downloads\Before You Call Tech Support.pdf” Don’t forget the syntax: mklink /prefix link_path Target_file/folder_path In Windows XP Windows XP doesn’t include built-in command prompt support for symbolic links, but we can use the free Junction tool instead.  Download Junction (link below), and unzip the folder.  Now open Command Prompt (click Start, select All Programs, then Accessories, and select Command Prompt), and enter cd followed by the path of the folder where you saved Junction. Junction only creates hard symbolic links, since you can use shortcuts for soft ones.  To create a hard symlink, we need to enter the following in command prompt: junction –s link_path file/folder_path As with mklink in Windows 7 or Vista, if your file/folder path has spaces in it make sure to put quotes around your paths.  Also, as usual, your symlink can have a different name that the file/folder it points to. Here, we’re going to create a symbolic link to our My Music folder on the desktop.  We entered: junction -s “C:\Documents and Settings\Administrator\Desktop\Music” “C:\Documents and Settings\Administrator\My Documents\My Music” And here’s the contents of our symlink.  Note that the path looks like these files are stored in a Music folder directly on the Desktop, when they are actually stored in My Documents\My Music.  Once again, this works with both folders and individual files. Please Note: Junction would work the same in Windows 7 or Vista, but since they include a built-in symbolic link tool we found it better to use it on those versions of Windows. Symlinks in Ubuntu Unix-based operating systems have supported symbolic links since their inception, so it is straightforward to create symbolic links in Linux distros such as Ubuntu.  There’s no graphical way to create them like the Link Shell Extension for Windows, so we’ll just do it in Terminal. Open terminal (open the Applications menu, select Accessories, and then click Terminal), and enter the following: ln –s file/folder_path link_path Note that this is opposite of the Windows commands; you put the source for the link first, and then the path second. For example, let’s create a symbolic link of our Pictures folder in our Desktop.  To do this, we entered: ln -s /home/maguay/Pictures /home/maguay/Desktop   Once again, here is the contents of our symlink folder.  The pictures look as if they’re stored directly in a Pictures folder on the Desktop, but they are actually stored in maguay\Pictures. Delete Symlinks Removing symbolic links is very simple – just delete the link!  Most of the command line utilities offer a way to delete a symbolic link via command prompt, but you don’t need to go to the trouble.   Conclusion Symbolic links can be very handy, and we use them constantly to help us stay organized and keep our hard drives from overflowing.  Let us know how you use symbolic links on your computers! Download Link Shell Extension for Windows 7, Vista, and XP Download Junction for XP Similar Articles Productive Geek Tips Using Symlinks in Windows VistaHow To Figure Out Your PC’s Host Name From the Command PromptInstall IceWM on Ubuntu LinuxAdd Color Coding to Windows 7 Media Center Program GuideSync Your Pidgin Profile Across Multiple PCs with Dropbox TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Gadfly is a cool Twitter/Silverlight app Enable DreamScene in Windows 7 Microsoft’s “How Do I ?” Videos Home Networks – How do they look like & the problems they cause Check Your IMAP Mail Offline In Thunderbird Follow Finder Finds You Twitter Users To Follow

    Read the article

  • SPARC T4-4 Beats 8-CPU IBM POWER7 on TPC-H @3000GB Benchmark

    - by Brian
    Oracle's SPARC T4-4 server delivered a world record TPC-H @3000GB benchmark result for systems with four processors. This result beats eight processor results from IBM (POWER7) and HP (x86). The SPARC T4-4 server also delivered better performance per core than these eight processor systems from IBM and HP. Comparisons below are based upon system to system comparisons, highlighting Oracle's complete software and hardware solution. This database world record result used Oracle's Sun Storage 2540-M2 arrays (rotating disk) connected to a SPARC T4-4 server running Oracle Solaris 11 and Oracle Database 11g Release 2 demonstrating the power of Oracle's integrated hardware and software solution. The SPARC T4-4 server based configuration achieved a TPC-H scale factor 3000 world record for four processor systems of 205,792 QphH@3000GB with price/performance of $4.10/QphH@3000GB. The SPARC T4-4 server with four SPARC T4 processors (total of 32 cores) is 7% faster than the IBM Power 780 server with eight POWER7 processors (total of 32 cores) on the TPC-H @3000GB benchmark. The SPARC T4-4 server is 36% better in price performance compared to the IBM Power 780 server on the TPC-H @3000GB Benchmark. The SPARC T4-4 server is 29% faster than the IBM Power 780 for data loading. The SPARC T4-4 server is up to 3.4 times faster than the IBM Power 780 server for the Refresh Function. The SPARC T4-4 server with four SPARC T4 processors is 27% faster than the HP ProLiant DL980 G7 server with eight x86 processors on the TPC-H @3000GB benchmark. The SPARC T4-4 server is 52% faster than the HP ProLiant DL980 G7 server for data loading. The SPARC T4-4 server is up to 3.2 times faster than the HP ProLiant DL980 G7 for the Refresh Function. The SPARC T4-4 server achieved a peak IO rate from the Oracle database of 17 GB/sec. This rate was independent of the storage used, as demonstrated by the TPC-H @3000TB benchmark which used twelve Sun Storage 2540-M2 arrays (rotating disk) and the TPC-H @1000TB benchmark which used four Sun Storage F5100 Flash Array devices (flash storage). [*] The SPARC T4-4 server showed linear scaling from TPC-H @1000GB to TPC-H @3000GB. This demonstrates that the SPARC T4-4 server can handle the increasingly larger databases required of DSS systems. [*] The SPARC T4-4 server benchmark results demonstrate a complete solution of building Decision Support Systems including data loading, business questions and refreshing data. Each phase usually has a time constraint and the SPARC T4-4 server shows superior performance during each phase. [*] The TPC believes that comparisons of results published with different scale factors are misleading and discourages such comparisons. Performance Landscape The table lists the leading TPC-H @3000GB results for non-clustered systems. TPC-H @3000GB, Non-Clustered Systems System Processor P/C/T – Memory Composite(QphH) $/perf($/QphH) Power(QppH) Throughput(QthH) Database Available SPARC Enterprise M9000 3.0 GHz SPARC64 VII+ 64/256/256 – 1024 GB 386,478.3 $18.19 316,835.8 471,428.6 Oracle 11g R2 09/22/11 SPARC T4-4 3.0 GHz SPARC T4 4/32/256 – 1024 GB 205,792.0 $4.10 190,325.1 222,515.9 Oracle 11g R2 05/31/12 SPARC Enterprise M9000 2.88 GHz SPARC64 VII 32/128/256 – 512 GB 198,907.5 $15.27 182,350.7 216,967.7 Oracle 11g R2 12/09/10 IBM Power 780 4.1 GHz POWER7 8/32/128 – 1024 GB 192,001.1 $6.37 210,368.4 175,237.4 Sybase 15.4 11/30/11 HP ProLiant DL980 G7 2.27 GHz Intel Xeon X7560 8/64/128 – 512 GB 162,601.7 $2.68 185,297.7 142,685.6 SQL Server 2008 10/13/10 P/C/T = Processors, Cores, Threads QphH = the Composite Metric (bigger is better) $/QphH = the Price/Performance metric in USD (smaller is better) QppH = the Power Numerical Quantity QthH = the Throughput Numerical Quantity The following table lists data load times and refresh function times during the power run. TPC-H @3000GB, Non-Clustered Systems Database Load & Database Refresh System Processor Data Loading(h:m:s) T4Advan RF1(sec) T4Advan RF2(sec) T4Advan SPARC T4-4 3.0 GHz SPARC T4 04:08:29 1.0x 67.1 1.0x 39.5 1.0x IBM Power 780 4.1 GHz POWER7 05:51:50 1.5x 147.3 2.2x 133.2 3.4x HP ProLiant DL980 G7 2.27 GHz Intel Xeon X7560 08:35:17 2.1x 173.0 2.6x 126.3 3.2x Data Loading = database load time RF1 = power test first refresh transaction RF2 = power test second refresh transaction T4 Advan = the ratio of time to T4 time Complete benchmark results found at the TPC benchmark website http://www.tpc.org. Configuration Summary and Results Hardware Configuration: SPARC T4-4 server 4 x SPARC T4 3.0 GHz processors (total of 32 cores, 128 threads) 1024 GB memory 8 x internal SAS (8 x 300 GB) disk drives External Storage: 12 x Sun Storage 2540-M2 array storage, each with 12 x 15K RPM 300 GB drives, 2 controllers, 2 GB cache Software Configuration: Oracle Solaris 11 11/11 Oracle Database 11g Release 2 Enterprise Edition Audited Results: Database Size: 3000 GB (Scale Factor 3000) TPC-H Composite: 205,792.0 QphH@3000GB Price/performance: $4.10/QphH@3000GB Available: 05/31/2012 Total 3 year Cost: $843,656 TPC-H Power: 190,325.1 TPC-H Throughput: 222,515.9 Database Load Time: 4:08:29 Benchmark Description The TPC-H benchmark is a performance benchmark established by the Transaction Processing Council (TPC) to demonstrate Data Warehousing/Decision Support Systems (DSS). TPC-H measurements are produced for customers to evaluate the performance of various DSS systems. These queries and updates are executed against a standard database under controlled conditions. Performance projections and comparisons between different TPC-H Database sizes (100GB, 300GB, 1000GB, 3000GB, 10000GB, 30000GB and 100000GB) are not allowed by the TPC. TPC-H is a data warehousing-oriented, non-industry-specific benchmark that consists of a large number of complex queries typical of decision support applications. It also includes some insert and delete activity that is intended to simulate loading and purging data from a warehouse. TPC-H measures the combined performance of a particular database manager on a specific computer system. The main performance metric reported by TPC-H is called the TPC-H Composite Query-per-Hour Performance Metric (QphH@SF, where SF is the number of GB of raw data, referred to as the scale factor). QphH@SF is intended to summarize the ability of the system to process queries in both single and multiple user modes. The benchmark requires reporting of price/performance, which is the ratio of the total HW/SW cost plus 3 years maintenance to the QphH. A secondary metric is the storage efficiency, which is the ratio of total configured disk space in GB to the scale factor. Key Points and Best Practices Twelve Sun Storage 2540-M2 arrays were used for the benchmark. Each Sun Storage 2540-M2 array contains 12 15K RPM drives and is connected to a single dual port 8Gb FC HBA using 2 ports. Each Sun Storage 2540-M2 array showed 1.5 GB/sec for sequential read operations and showed linear scaling, achieving 18 GB/sec with twelve Sun Storage 2540-M2 arrays. These were stand alone IO tests. The peak IO rate measured from the Oracle database was 17 GB/sec. Oracle Solaris 11 11/11 required very little system tuning. Some vendors try to make the point that storage ratios are of customer concern. However, storage ratio size has more to do with disk layout and the increasing capacities of disks – so this is not an important metric in which to compare systems. The SPARC T4-4 server and Oracle Solaris efficiently managed the system load of over one thousand Oracle Database parallel processes. Six Sun Storage 2540-M2 arrays were mirrored to another six Sun Storage 2540-M2 arrays on which all of the Oracle database files were placed. IO performance was high and balanced across all the arrays. The TPC-H Refresh Function (RF) simulates periodical refresh portion of Data Warehouse by adding new sales and deleting old sales data. Parallel DML (parallel insert and delete in this case) and database log performance are a key for this function and the SPARC T4-4 server outperformed both the IBM POWER7 server and HP ProLiant DL980 G7 server. (See the RF columns above.) See Also Transaction Processing Performance Council (TPC) Home Page Ideas International Benchmark Page SPARC T4-4 Server oracle.com OTN Oracle Solaris oracle.com OTN Oracle Database 11g Release 2 Enterprise Edition oracle.com OTN Sun Storage 2540-M2 Array oracle.com OTN Disclosure Statement TPC-H, QphH, $/QphH are trademarks of Transaction Processing Performance Council (TPC). For more information, see www.tpc.org. SPARC T4-4 205,792.0 QphH@3000GB, $4.10/QphH@3000GB, available 5/31/12, 4 processors, 32 cores, 256 threads; IBM Power 780 QphH@3000GB, 192,001.1 QphH@3000GB, $6.37/QphH@3000GB, available 11/30/11, 8 processors, 32 cores, 128 threads; HP ProLiant DL980 G7 162,601.7 QphH@3000GB, $2.68/QphH@3000GB available 10/13/10, 8 processors, 64 cores, 128 threads.

    Read the article

  • Should MVVM ViewModel inject an HTML template for default view?

    - by explunit
    I'm working on web application design that includes Knockout.js and have an overall MVVM question: Does it make sense for the ViewModel to automatically inject a default HTML template (pulled from separate file)? More detail: suppose I have a site like this... header... widget 1 widget 2 widget 3 footer ...and widgets 1/2/3 are going to be Knockout.js ViewModels determined at runtime from an overall list of available widgets, each with associated HTML template file. I understand that in MVVM you want the view (HTML template in this case) to be separate from the ViewModel (Javascript file in this case) so that people can edit it separately and possibly provide multiple templates for different "skins". However, it seems like it would also make sense for the ViewModel to point to a default html template that gets automatically used unless the controlling code provides a different one. Am I looking at this correctly? As an example, see this answer on StackOverflow where he recommends injecting the HTML and then the ViewModel. Seems like a one-liner would make more sense in that case, with the possibility of overriding a default template value.

    Read the article

  • How do I troubleshoot a segfault in Ubuntu that occurs when typing a bogus command?

    - by Alan
    We've got a production server running Ubuntu 11.10. We're encountering segfaults that appear under various conditions. The simplest reproducible case is when we login to an ssh session as our administrative user and enter a bogus command. You'd expect the standard "command not found" error message. Instead, we get a segfault in python. The user's default shell is /bin/bash. For example: $ asdf Segmentation fault Info from /var/log/syslog: Jul 6 15:39:20 PROD001 kernel: [2155960.605695] python[7873]: segfault at 0 ip (null) sp 00007fffd030b808 error 14 in python2.7[400000+233000] Some details about the server: $ uname -a Linux PROD001 3.0.0-16-server #29-Ubuntu SMP Tue Feb 14 13:08:12 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux $ cat /etc/issue Ubuntu 11.10 \n \l Before we ask the IT department to reinstall the O.S., I'd like to understand what got us here. The system and/or this particular user's environment is suspect. Many people have touched this server over the past year, so I'm wondering if it is missing libraries, incorrectly installed packages, etc. I'm hoping that if we can understand what's going wrong in this case, it will help explain why we're getting segfaults in a couple of other scenarios. Any tips on troubleshooting this segfault will be appreciated!

    Read the article

  • How do I give a user permisson to view scheduled task history on Server 2008?

    - by pplrppl
    I've set up a scheduled task on Server 2008 and want to run it as a user other than the local administrator. So I choose a domain account created specifically for this task and once I've closed the scheduled task and entered a valid password I want to run it and look a the history tab for this task. On the history tab I see: The user account does not have permission to view task history on this computer. What permission must I grant to allow this user to view history and/or how can I view the history as a local admin/domain admin instead of the user the job will run under? Steps to hopefully reproduce: I'm starting from the "Server Manager" - Configuration - Task Scheduler - Task Scheduler Library. IN the top middle pane I have tasks that have been running for several months as the local administrator. In the process of troubleshooting another issue I changed the task to run as Domain\ABCuser. Later in the process of troubleshooting I tried unchecking "run with highest privileges". I have since changed the job back to SERVERNAME\Administrator but the history tab still showed the permissions message. I may have had multiple Server Manager windows open. After Closing the Server Manager and being sure no other management consoles were open I was able to reopen the Server Manager and see the History tab without error. At this point the task works properly but should I ever need to run a task as a task specific account I'd like to know how to make the history viewable. It may be something as simple as closing all Server Manger windows to allow cached permissions to be refreshed the next time you open the Manager but at this point I don't know exactly what the solution is.

    Read the article

  • Can't send commands via SSH to Juniper firewalls

    - by Massimo
    I have some Juniper SSG firewalls which I need to manage, and I'd like to be able to send commands to them from some monitoring scripts. I configured SSH access using public keys, and I'm able to automatically login to the firewalls. When I run SSH interactively, everything works fine: $ssh <firewall IP> FIREWALL-> <command> <command output> FIREWALL-> exit Connection to <firewall IP> closed. $ But when I try to run the command from the command line, it doesn't work: $ssh <firewall IP> <command> $ This, of course, works fine when sending a command to a remote Linux box: $ssh <linux box IP> <command> <command output> $ Why is this happening? What is the difference between running SSH interactively and specifying the command to run on the SSH command line? Update: It also works fine with a Cisco router. Only these Juniper firewalls seem to behave this way. From the debug output from SSH, it looks like the connection gets established correctly, but the Juniper box replies with an EOF when sending the command, while instead the Linux box replies with the actual command output: Linux: debug1: Authentication succeeded (publickey). debug1: channel 0: new [client-session] debug2: channel 0: send open debug1: Entering interactive session. debug2: callback start debug2: client_session2_setup: id 0 debug1: Sending command: uptime debug2: channel 0: request exec confirm 0 debug2: callback done debug2: channel 0: open confirm rwindow 0 rmax 32768 debug2: channel 0: rcvd adjust 131072 debug1: client_input_channel_req: channel 0 rtype exit-status reply 0 16:44:44 up 25 days, 1:06, 3 users, load average: 0.08, 0.02, 0.01 debug2: channel 0: rcvd eof debug2: channel 0: output open -> drain debug2: channel 0: obuf empty debug2: channel 0: close_write debug2: channel 0: output drain -> closed debug2: channel 0: rcvd close debug2: channel 0: close_read debug2: channel 0: input open -> closed debug2: channel 0: almost dead debug2: channel 0: gc: notify user debug2: channel 0: gc: user detached debug2: channel 0: send close debug2: channel 0: is dead debug2: channel 0: garbage collecting debug1: channel 0: free: client-session, nchannels 1 debug1: Transferred: stdin 0, stdout 0, stderr 0 bytes in 0.1 seconds debug1: Bytes per second: stdin 0.0, stdout 0.0, stderr 0.0 debug1: Exit status 0 Juniper: debug1: Authentication succeeded (publickey). debug1: channel 0: new [client-session] debug2: channel 0: send open debug1: Entering interactive session. debug2: callback start debug2: client_session2_setup: id 0 debug1: Sending environment. debug1: Sending env LANG = en_US.UTF-8 debug2: channel 0: request env confirm 0 debug1: Sending command: get system debug2: channel 0: request exec confirm 0 debug2: callback done debug2: channel 0: open confirm rwindow 2048 rmax 1024 debug2: channel 0: rcvd eof debug2: channel 0: output open -> drain debug2: channel 0: obuf empty debug2: channel 0: close_write debug2: channel 0: output drain -> closed debug1: client_input_channel_req: channel 0 rtype exit-status reply 0 debug2: channel 0: rcvd close debug2: channel 0: close_read debug2: channel 0: input open -> closed debug2: channel 0: almost dead debug2: channel 0: gc: notify user debug2: channel 0: gc: user detached debug2: channel 0: send close debug2: channel 0: is dead debug2: channel 0: garbage collecting debug1: channel 0: free: client-session, nchannels 1 debug1: Transferred: stdin 0, stdout 0, stderr 0 bytes in 0.2 seconds debug1: Bytes per second: stdin 0.0, stdout 0.0, stderr 0.0 debug1: Exit status 1

    Read the article

  • Running django custom management commands with supervisord

    - by mfsaint
    I'd like to use supervisord to run some commands for my Django project but I keep getting the following error: supervisor.log: 2012-05-18 17:52:15,784 INFO spawnerr: can't find command 'source' If I remove the "source" command, the log shows the same error: can't find command 'python'. supervisord.conf excerpt: [program:django] directory=/home/mf/projects/djangopj/ command=beanstalkd -l 127.0.0.1 -p 11300 command=source /home/mf/virtualenvs/env/bin/activate command=python manage.py command1 command=python manage.py command2 user=mf autostart=true autorestart=true I tried removing the directory and adding the absolute path to the commands but I kept getting the same error. I run supervisord with the following command: supervisord -c supervisord.conf -l supervisor.log

    Read the article

  • How to list all my packages from command line which can show package name, license, source url, etc?

    - by YumYumYum
    How to get all the installed package list with there license, source url? Such as following only shows name of the package only. $ dpkg --get-selections acpi-support install acpid install adduser install adium-theme-ubuntu install aisleriot install alacarte install For example in Fedora/CentOS (RED HAT LINUX BRANCH), you can see that: $ yum info busybox Loaded plugins: auto-update-debuginfo, langpacks, presto, refresh-packagekit Available Packages Name : busybox Arch : i686 Epoch : 1 Version : 1.18.2 Release : 5.fc15 Size : 615 k Repo : updates Summary : Statically linked binary providing simplified versions of system commands URL : http://www.busybox.net License : GPLv2 Description : Busybox is a single binary which includes versions of a large number : of system commands, including a shell. This package can be very : useful for recovering from certain types of system failures, : particularly those involving broken shared libraries. Follow up: /var/lib/apt/lists$ ls extras.ubuntu.com_ubuntu_dists_natty_main_binary-amd64_Packages extras.ubuntu.com_ubuntu_dists_natty_main_source_Sources extras.ubuntu.com_ubuntu_dists_natty_Release extras.ubuntu.com_ubuntu_dists_natty_Release.gpg lock partial security.ubuntu.com_ubuntu_dists_natty-security_main_binary-amd64_Packages security.ubuntu.com_ubuntu_dists_natty-security_main_source_Sources security.ubuntu.com_ubuntu_dists_natty-security_multiverse_binary-amd64_Packages security.ubuntu.com_ubuntu_dists_natty-security_multiverse_source_Sources security.ubuntu.com_ubuntu_dists_natty-security_Release security.ubuntu.com_ubuntu_dists_natty-security_Release.gpg security.ubuntu.com_ubuntu_dists_natty-security_restricted_binary-amd64_Packages security.ubuntu.com_ubuntu_dists_natty-security_restricted_source_Sources security.ubuntu.com_ubuntu_dists_natty-security_universe_binary-amd64_Packages security.ubuntu.com_ubuntu_dists_natty-security_universe_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty_main_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty_main_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty_multiverse_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty_multiverse_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty_Release us.archive.ubuntu.com_ubuntu_dists_natty_Release.gpg us.archive.ubuntu.com_ubuntu_dists_natty_restricted_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty_restricted_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty_universe_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty_universe_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty-updates_main_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty-updates_main_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty-updates_multiverse_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty-updates_multiverse_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty-updates_Release us.archive.ubuntu.com_ubuntu_dists_natty-updates_Release.gpg us.archive.ubuntu.com_ubuntu_dists_natty-updates_restricted_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty-updates_restricted_source_Sources us.archive.ubuntu.com_ubuntu_dists_natty-updates_universe_binary-amd64_Packages us.archive.ubuntu.com_ubuntu_dists_natty-updates_universe_source_Sources

    Read the article

  • Help with complex MVVM (multiple views)

    - by jsjslim
    I need help creating view models for the following scenario: Deep, hierarchical data Multiple views for the same set of data Each view is a single, dynamically-changing view, based on the active selection Depending on the value of a property, display different types of tabs in a tab control My questions: Should I create a view-model representation for each view (VM1, VM2, etc)? 1. Yes: a. Should I model the entire hierarchical relationship? (ie, SubVM1, HouseVM1, RoomVM1) b. How do I keep all hierarchies in sync? (e.g, adding/removing nodes) 2. No: a. Do I use a huge, single view model that caters for all views? Here's an example of a single view Figure 1: Multiple views updated based on active room. Notice Tab control Figure 2: Different active room. Multiple views updated. Tab control items changed based on object's property. Figure 3: Different selection type. Entire view changes

    Read the article

  • Az Oracle üzleti intelligencia csomag Windows Server 2008-on is, a kliens Vista op.rsz-en is

    - by Fekete Zoltán
    Tegnap az Oracle BI Hands On rendezvényen felmerült a kérdés, hogy az Oracle Business Intelligence Enterprise Editon fut-e Windows Server 2008-on. A válasz: IGEN. Az Oracle BI EE fut a Windows Server 2008-on. Emellett a másik kérdésre a válasz: IGEN, a kliens lehet Windows Vista is. Mivel az Oracle BI szerver szoftver, amit egy böngészovel érnek el a felhasználók elemzési, lekérdezés/jelentés/riport- készítési feladatok elvégzésére, ezért az Oracle BI csak szerver operációs rendszerekre van bevizsgálva: Linux, Solaris, HP-UX, AIX és Windows platformokon. A jelenleg támogatott operációs rendszerek: Microsoft Windows 2000/2003 Server; Microsoft Windows Server 2008 Enterprise Edition x86 32 bit2 - Red Hat Enterprise Linux AS 4.x; Red Hat Enterprise Linux Server/Advanced Platform 5 - Novell SUSE 9.x - Oracle Enterprise Linux 4; Oracle Enterprise Linux 5 - Sun Solaris 9 SPARC 32 bit ; Sun Solaris 9 SPARC 64 bit; Sun Solaris 10 SPARC 32 bit; Sun Solaris 10 SPARC 64 bit - AIX 5.2 PowerPC 32 bit; AIX 5.2 PowerPC 64 bit; AIX 5.3 PowerPC 32 bit; AIX 5.3 PowerPC 64 bit; AIX 6.1 PowerPC 32 bit; AIX 6.1 PowerPC 64 bit - HP-UX 11.11 PA-RISC 64 bit; HP-UX 11.23 PA-RISC 64 bit; HP-UX 11.23 Itanium 64 bit; HP-UX 11.31 Itanium 64 bit A böngészos hozzáférést az irányítópultokhoz (dashboard), interaktív elemzo munkához használható operációs rendszerek: Windows, Vista, Linux, Solaris, Apple Mac OS 10.x.

    Read the article

  • Where do I have to paste an "xinput" command so that it executes it when GNOME is started?

    - by michuk
    On my Thinkpad I need to execute something like this in the terminal: xinput set-int-prop "TPPS/2 IBM TrackPoint" "Evdev Middle Button Emulation" 8 1 so that my the 2 buttons on my touchpad emulate the middle mouse click. Now I need this line to be executed each time I start GNOMe or X or whatever, so that it "just works". I tried ~/.xsession or ~/.bashrc but to no avail. Should I put it in GNOME start scripts or in /etc/X somewhere? I'm using Ubuntu 11.10.

    Read the article

  • What command should be used to connect via SSH through squid proxy?

    - by Raul Cardoso
    I have set up a Squid HTTP Proxy (in centOS) and intended to use it also for ssh connections. Managed to configure putty (in a windows client) to use this proxy while connecting by ssh. Confirmed in the "target host" that the ssh connection was coming from Proxy server ip instead of the windows client IP. Used: targethost: 22 for ssh proxyserv: 3128 for proxy (along with proxy credentials) I'm now having problems connecting to the "target host" using Ubuntu and the same proxy server. I have tried the following: me@mycomp:~$ connect-proxy -H test@proxyserv:3128 targethost 22 Enter proxy authentication password for test@proxyserv: SSH-2.0-OpenSSH_6.2p2 Ubuntu-6 It hangs in last line, expecting some input. All attempts resulted in a "Protocol mismatch." error. Putty successfully connects to the http proxy and sends credentials, showing me ssh login right away. - How to do (with commands in Ubuntu) the same putty does? - Is there any other way than connect-proxy command to do this? Edit: Also tried the following with same result ("Protocol mismatch") me@mycomp:~$ connect-proxy -H test@proxyserv:3128 targethost 22 ssh -l myshel_login Thanks in advance Edit: Solution details (thanks to NickW pointing the right way) installed corkscrew and added to ssh_config Host targethost ProxyCommand corkscrew proxyserv 3128 %h %p /etc/ssh/proxypass created proxypass file login:password Restarted ssh and used a simple ssh command ssh mylogin@targethost (ssh password was asked as usual)

    Read the article

  • What could cause the file command in Linux to report a text file as data?

    - by Jonah Bishop
    I have a couple of C++ source files (one .cpp and one .h) that are being reported as type data by the file command in Linux. When I run the file -bi command against these files, I'm given this output (same output for each file): application/octet-stream; charset=binary Each file is clearly plain-text (I can view them in vi). What's causing file to misreport the type of these files? Could it be some sort of Unicode thing? Both of these files were created in Windows-land (using Visual Studio 2005), but they're being compiled in Linux (it's a cross-platform application). Any ideas would be appreciated. Update: I don't see any null characters in either file. I found some extended characters in the .cpp file (in a comment block), removed them, but file still reports the same encoding. I've tried forcing the encoding in SlickEdit, but that didn't seem to have an effect. When I open the file in vim, I see a [converted] line as soon as I open the file. Perhaps I can get vim to force the encoding?

    Read the article

  • Filezilla client unable to get directory listing from Filezilla Server (Windows)

    - by sestocker
    I've set up a self signed certificate in FileZilla server and enabled FTP over SSL/TPS. When I connect from the client FileZilla, I am able to authenticate but cannot get a directory listing: Status: Connecting to MY_SERVER_IP:21... Status: Connection established, waiting for welcome message... Response: 220-FileZilla Server version 0.9.39 beta Response: 220-written by Tim Kosse ([email protected]) Response: 220 Please visit http://sourceforge.net/projects/filezilla/ Command: AUTH TLS Response: 234 Using authentication type TLS Status: Initializing TLS... Status: Verifying certificate... Command: USER MYUSER Status: TLS/SSL connection established. Response: 331 Password required for MYUSER Command: PASS ******** Response: 230 Logged on Command: PBSZ 0 Response: 200 PBSZ=0 Command: PROT P Response: 200 Protection level set to P Status: Connected Status: Retrieving directory listing... Command: PWD Response: 257 "/" is current directory. Command: TYPE I Response: 200 Type set to I Command: PORT 10,10,25,85,219,172 Response: 200 Port command successful Command: MLSD Response: 150 Opening data channel for directory list. Response: 425 Can't open data connection. Error: Failed to retrieve directory listing I have ports 21 and 50001 through 50005 open on the firewall. We are migrating servers - the 50001 - 50005 is one of the things that helped get FTPS working on the old server. I'm not sure this installation would use the same ports? What else could be the problem?

    Read the article

  • Is it possible to execute keyboard input programmatically in Linux?

    - by Taylor Hawkes
    For example is there a Linux command or way that I could from a program (c++ | python| or other) enter a series of keyboard inputs that are interpreted as though they are keyboard inputs. I have a bad case of Repetitive Stress Injury (RSI) from typing. To ease my pain I developed a voice controlled interface using pocket sphinx and a custom grammar and to run a number of very common commands. ex: "open chrome" , "open vim". Basically what is shown here, but with slightly diff tools: http://bloc.eurion.net/archives/2008/writing-a-command-and-control-application-with-voice-recognition/ I have run into some limitation as I can only execute command line commands given a voice command. Rather than having a "voice command" - "command line command" mapping, I would like to have "voice command" - "keyboard input" mapping. So when my active window is a browser and I type + n, and new tab opens. If I'm in vim and new vim tab opens. Any suggestions, ideas, tools or approaches to this problem would be much appreciated. I understand the answer may not be simple, but would like to develop it none the less.

    Read the article

  • Controlling what data populates STAR

    - by user10747017
    Beginning with the Primavera Reporting Database 2.2\P6 Analytics 1.2 release, the first release that supported the P6 Extended Schema, a new ability was added to filter which projects could be included during an ETL run. In previous releases, all projects were included in an ETL run. Additionally, all projects with the option to enable publication are included in the ETL run by default.Because the reporting needs for P6 Extended Schema are different from those of STAR, you can define a filter that will limit the data that is included in the STAR schema. For example, your STAR schema can be filter to only include all projects in a specific Portfolio, or all projects with a project code assignment of 'For Analytics.'  Any criteria that can be defined in a Where clause and added to a view can be used to filter the projects included in the STAR schema. I highly suggest this approach when dealing with large databases. Unnecessary projects could cause the Extract portion of the ETL process to take longer. A table in STAR called etl_projectlist is the key for what projects are targeted during the ETL process. To setup the filter, perform the following steps:1. Connect to your Primavera P6 Project Management Database as Pxrptuser (extended schema owner) and create a new view:create or replace view star_project_viewasselect PROJECTOBJECTID objectidfrom projectportfolio pp, projectprojectportfolio pppwhere pp.objectid = ppp.PROJECTPORTFOLIOOBJECTIDand pp.name = 'STAR Projects'--The main field that MUST be selected in the view is the projectobjectid. Selecting any other field besides the projectobjectid will cause the view to be invalid and will not work. Any Where clause can be used, but projectobjectid is the key.2. In your STAR installation directory go the \res folder and edit the staretl.properties file.  Here you will define the view to be used.  Add the following line or update if exists:star.project.filter.ds1=star_project_view3. When running the  staretl.cmd or staretl.sh process the database link to Pxrtpuser will be accessed and this view will be used to populate the etl_projectlist table  with the appropriate projectobjectids as defined in the view created in step 1 above.

    Read the article

  • is it possible to execute keyboard input programmaticly in linux?

    - by Taylor Hawkes
    For example is there a Linux command or way that I could from a program (c++ | python| or other) enter a serious of keyboard inputs that are interpreted as though they are keyboard inputs. I have a bad case of RSI from typing. To ease my pain I developed a voice controlled interface using pocket sphinx and a custom grammar and to run a number of very common commands. ex: "open chrome" , "open vim". basically what is shown here, but with slightly diff tools: http://bloc.eurion.net/archives/2008/writing-a-command-and-control-application-with-voice-recognition/ I have run into some limitation as I can only execute command line commands given a voice command. Rather than having a "voice command" - "command line command" mapping I would like to have "voice command" - "keyboard input" mapping. So when my active window is a browser and i type + n, and new tab opens. If i'm in vim and new vim tab opens. Any suggestions, ideas, tools or approaches to this problem would be much appreciated. I understand the answer may not be simple, but would like to develop it none the less.

    Read the article

  • HP Probook 4530s great specs, but lagging. Hard Drive?

    - by Mark
    I have this laptop, which has a i3 processor, 4gb memory, 7200rpm hard drive. So there is nothing wrong with the specs. Even when I have no applications open, simply closing and opening windows, lags. Or opening the start menu, or dragging icons across the desktop. sometimes even the cursor lags. So I checked out the resource monitor, and the resources using disk activity are svchost Avast ------- my antivirus, but not much System (PID 4) ------ This is using a huge chunk The total disk activity fluctuates between %50 - %100

    Read the article

  • Using Groovy Aggregate Functions in ADF BC

    - by Sireesha Pinninti
    This article explains how groovy aggregate functions(sum, count, min, max and avg) can be used in ADF Business components and demonstrates how these can be used at entity and view level Let's consider EMP and DEPT tables and an usecase to track number of employees in each department   Entity-Level To use aggregate functions at entity level, we need to have association between entities representing master and child relationship and the destination accessor name is what we are going to use in our groovy Syntax: <Accessor>.count(Groovyexpression) - Note down the destination accessor name(EMP) in the association or AccessorAttribute name in source entity - Add a transient attribute in source entity with persistent property set to false and provide the groovy expression in the syntax provided above - Finally, Add newly added attribute to view object View-Level To use aggregate functions at view level, we need to have a view link between viewobjects representing master and child relationship and the destination accessor name is what we are going to use in our groovy Syntax: <ViewLinkAccessor>.count(Groovyexpression) - Note down the destination accessor name(EmpView) in the view link or viewLinkAccessor name in source view - Add a transient attribute in view object and provide a groovy aggregate function count as a value to it in the syntax provided above Now, If you run application module tester and execute DeptView / ViewLink, you should see employee count in EmpCount field  In similar way, one can use other groovy aggregate functions sum, avg, min and max.

    Read the article

  • How can i use the `eject` command on a computer i have SSH'd into?

    - by will
    So if i do eject on my machine, it works exactly as expected, however, if i ssh into the machine next to me, and do the same thing, it does not work... my computer: eject: using default device `cdrom' eject: device name is `cdrom' eject: expanded name is `/dev/cdrom' eject: `/dev/cdrom' is a link to `/dev/sr0' eject: `/dev/sr0' is not mounted eject: `/dev/sr0' is not a mount point eject: checking if device "/dev/sr0" has a removable or hotpluggable flag eject: `/dev/sr0' is not a multipartition device eject: trying to eject `/dev/sr0' using CD-ROM eject command eject: CD-ROM eject command succeeded other computer: eject: using default device `cdrom' eject: device name is `cdrom' eject: expanded name is `/dev/cdrom' eject: `/dev/cdrom' is a link to `/dev/sr0' eject: `/dev/sr0' is not mounted eject: `/dev/sr0' is not a mount point eject: checking if device "/dev/sr0" has a removable or hotpluggable flag eject: `/dev/sr0' is not a multipartition device eject: unable to open `/dev/sr0' if i look in the /dev/ dir, then i find cdrom which is a symlink to sr0 - as mentioned by the verbose outputs of eject -v. On my machine, if i try and look at it, if the drive is open, it will close it, and then give this: $ less sr0 sr0 is not a regular file (use -f to see it) so $ less -f sr0 sr0: No medium found but if i do it on the other computer, $ less -f sr0 sr0: Permission denied so i look at the files more, and get this on both machines: $ ls -la sr0 brw-rw----+ 1 root cdrom 11, 0 Nov 12 10:13 sr0 Does anyone know a way around this? I do not have root access.

    Read the article

  • MVC and individual elements of the model under a common base class

    - by Stewart
    Admittedly my experience of using the MVC pattern is limited. It might be argued that I don't really separate the V from the C, though I keep the M separate from the VC to the extent I can manage. I'm considering the scenario in which the application's model includes a number of elements that have a common base class. For example, enemy characters in a video game, or shape types in a vector graphics app. The view wants to render these elements. Of course, the different subclasses call for different rendering. The problem is that the elements are part of the model. Rendering them is conceptually part of the view. But how they are to be rendered depends on parameters of both: Attributes and state of the element are parameters of the model User settings are parameters of the view - and to support multiple platforms and/or view modes, different views may be used What's your preferred way of dealing with this? Put the rendering code in the model classes, passing in any view parameters? Put the rendering code in the view, using a switch or similar to select the right rendering for the model element type? Have some intermediate classes as a model-view interface, of which the model will create objects on demand and the view will then render them? Something else?

    Read the article

  • HP Smart Array; Is it possible to convert Raid1 to Raid0 by dropping a failed drive?

    - by Erik Heppler
    I have a server that was running two 60GB drives as a logical RAID1. At some point the second drive was physically removed and the logical drive has been in "Interim Recovery Mode" for several months now. There's no need for the redundancy of RAID1 on this machine, and I have no intention of replacing the missing drive. If possible I would like to convert the current RAID1 to a single-drive RAID0 by simply dropping the failed drive from the current configuration. I'm only interested in doing this if it can be done in-place. Otherwise I'm perfectly content to leave it in "Interim Recovery Mode" indefinitely.

    Read the article

  • gitolite mac don't add new user to authorized_keys

    - by crashbus
    I installed gitolite and every thing works fine for me as admin. But when I'd like to add add a new user the new user can't connect to the server. After I looked into the file authorized_keys I saw that the new user wasn't added to the file. During the commit of the new public-key I get some workings: WARNING: split conf not set, gl-conf present for 'gitolite-admin' Counting objects: 6, done. Delta compression using up to 8 threads. Compressing objects: 100% (4/4), done. Writing objects: 100% (4/4), 882 bytes, done. Total 4 (delta 1), reused 0 (delta 0) remote: WARNING: split conf not set, gl-conf present for 'gitolite-admin' remote: WARNING: ?? @staff christianwaldmann markwelch remote: sh: find: command not found remote: sh: find: command not found remote: sh: sort: command not found remote: sh: find: command not found remote: /usr/local/bin/triggers/post-compile/update-gitweb-access-list: line 26: cut: command not found remote: /usr/local/bin/triggers/post-compile/update-gitweb-access-list: line 23: grep: command not found remote: /usr/local/bin/triggers/post-compile/update-gitweb-access-list: line 26: sort: command not found remote: /usr/local/bin/triggers/post-compile/update-gitweb-access-list: line 26: sed: command not found remote: sh: find: command not found remote: sh: find: command not found How can I fix it that gitolite auto-add the new user to the authorized_keys.

    Read the article

  • What are the steps needed to set up and use security for AWS command line tools?

    - by chris
    I've been trying to set up the AWS command-line tools following Eric's most useful guide at http://alestic.com/2012/09/aws-command-line-tools. I can't seem to find a good how-to for how to generate the x509 certificate and private key, and how that relates to the various security files the guide creates. Update: I have found a couple of links that describe the some steps. These steps seem to work, however I'm not sure if this is secure & the best way to do it: 1) Create a private key openssl genrsa -out my-private-key.pem 2048 2) Create x.509 cert openssl req -new -x509 -key my-private-key.pem -out my-x509-cert.pem -days 365 Hit enter to accept all of the defaults. Then, from the IAM Dashboard, User, select a user & click on the "Security Credentials" tab. Click on "Manage Signing Certificates", then "Upload Signing Certificate", paste in the contents of my-x509-cert.pem, click OK and it should be accepted. One step that is discussed, but not required for me, was the addition and subsequent removal of a pass phrase on the private key. Should I have been prompted for one, and is my cert potentially unsafe because of this?

    Read the article

< Previous Page | 301 302 303 304 305 306 307 308 309 310 311 312  | Next Page >