Search Results

Search found 6079 results on 244 pages for 'define'.

Page 30/244 | < Previous Page | 26 27 28 29 30 31 32 33 34 35 36 37  | Next Page >

  • What is a Delphi version of the C++ header for the DVP7010B video card DLL?

    - by grzegorz1
    I need help with converting c++ header file to delphi. I spent several days on this problem without success. Below is the original header file and my Delphi translation. C++ header #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #ifdef DVP7010BDLL_EXPORTS #define DVP7010BDLL_API __declspec(dllexport) #else #define DVP7010BDLL_API __declspec(dllimport) #endif #define MAXBOARDS 4 #define MAXDEVS 4 #define ID_NEW_FRAME 37810 #define ID_MUX0_NEW_FRAME 37800 #define ID_MUX1_NEW_FRAME 37801 #define ID_MUX2_NEW_FRAME 37802 #define ID_MUX3_NEW_FRAME 37803 typedef enum { SUCCEEDED = 1, FAILED = 0, SDKINITFAILED = -1, PARAMERROR = -2, NODEVICES = -3, NOSAMPLE = -4, DEVICENUMERROR = -5, INPUTERROR = -6, // VERIFYHWERROR = -7 } Res; typedef enum tagAnalogVideoFormat { Video_None = 0x00000000, Video_NTSC_M = 0x00000001, Video_NTSC_M_J = 0x00000002, Video_PAL_B = 0x00000010, Video_PAL_M = 0x00000200, Video_PAL_N = 0x00000400, Video_SECAM_B = 0x00001000 } AnalogVideoFormat; typedef enum { SIZEFULLPAL=0, SIZED1, SIZEVGA, SIZEQVGA, SIZESUBQVGA } VideoSize; typedef enum { STOPPED = 1, RUNNING = 2, UNINITIALIZED = -1, UNKNOWNSTATE = -2 } CapState; class IDVP7010BDLL { public: int AdvDVP_CreateSDKInstence(void **pp); virtual int AdvDVP_InitSDK() PURE; virtual int AdvDVP_CloseSDK() PURE; virtual int AdvDVP_GetNoOfDevices(int *pNoOfDevs) PURE; virtual int AdvDVP_Start(int nDevNum, int SwitchingChans, HWND Main, HWND hwndPreview) PURE; virtual int AdvDVP_Stop(int nDevNum) PURE; virtual int AdvDVP_GetCapState(int nDevNum) PURE; virtual int AdvDVP_IsVideoPresent(int nDevNum, BOOL* VPresent) PURE; virtual int AdvDVP_GetCurFrameBuffer(int nDevNum, int VMux, long* bufSize, BYTE* buf) PURE; virtual int AdvDVP_SetNewFrameCallback(int nDevNum, int callback) PURE; virtual int AdvDVP_GetVideoFormat(int nDevNum, AnalogVideoFormat* vFormat) PURE; virtual int AdvDVP_SetVideoFormat(int nDevNum, AnalogVideoFormat vFormat) PURE; virtual int AdvDVP_GetFrameRate(int nDevNum, int *nFrameRate) PURE; virtual int AdvDVP_SetFrameRate(int nDevNum, int SwitchingChans, int nFrameRate) PURE; virtual int AdvDVP_GetResolution(int nDevNum, VideoSize *Size) PURE; virtual int AdvDVP_SetResolution(int nDevNum, VideoSize Size) PURE; virtual int AdvDVP_GetVideoInput(int nDevNum, int* input) PURE; virtual int AdvDVP_SetVideoInput(int nDevNum, int input) PURE; virtual int AdvDVP_GetBrightness(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetBrightness(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetContrast(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetContrast(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetHue(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetHue(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetSaturation(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetSaturation(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GPIOGetData(int nDevNum, int DINum, BOOL* value) PURE; virtual int AdvDVP_GPIOSetData(int nDevNum, int DONum, BOOL value) PURE; }; Delphi unit IDVP7010BDLL_h; interface uses Windows, Messages, SysUtils, Classes; //{$if _MSC_VER > 1000} //pragma once //{$endif} // _MSC_VER > 1000 {$ifdef DVP7010BDLL_EXPORTS} //const DVP7010BDLL_API = __declspec(dllexport); {$else} //const DVP7010BDLL_API = __declspec(dllimport); {$endif} const MAXDEVS = 4; MAXMUXS = 4; ID_NEW_FRAME = 37810; ID_MUX0_NEW_FRAME = 37800; ID_MUX1_NEW_FRAME = 37801; ID_MUX2_NEW_FRAME = 37802; ID_MUX3_NEW_FRAME = 37803; // TRec SUCCEEDED = 1; FAILED = 0; SDKINITFAILED = -1; PARAMERROR = -2; NODEVICES = -3; NOSAMPLE = -4; DEVICENUMERROR = -5; INPUTERROR = -6; // TRec // TAnalogVideoFormat Video_None = $00000000; Video_NTSC_M = $00000001; Video_NTSC_M_J = $00000002; Video_PAL_B = $00000010; Video_PAL_M = $00000200; Video_PAL_N = $00000400; Video_SECAM_B = $00001000; // TAnalogVideoFormat // TCapState STOPPED = 1; RUNNING = 2; UNINITIALIZED = -1; UNKNOWNSTATE = -2; // TCapState type TCapState = Longint; TRes = Longint; TtagAnalogVideoFormat = DWORD; TAnalogVideoFormat = TtagAnalogVideoFormat; PAnalogVideoFormat = ^TAnalogVideoFormat; TVideoSize = ( SIZEFULLPAL, SIZED1, SIZEVGA, SIZEQVGA, SIZESUBQVGA); PVideoSize = ^TVideoSize; P_Pointer = ^Pointer; TIDVP7010BDLL = class function AdvDVP_CreateSDKInstence(pp: P_Pointer): integer; virtual; stdcall; abstract; function AdvDVP_InitSDK():Integer; virtual; stdcall; abstract; function AdvDVP_CloseSDK():Integer; virtual; stdcall; abstract; function AdvDVP_GetNoOfDevices(pNoOfDevs : PInteger) :Integer; virtual; stdcall; abstract; function AdvDVP_Start(nDevNum : Integer; SwitchingChans : Integer; Main : HWND; hwndPreview: HWND ) :Integer; virtual; stdcall; abstract; function AdvDVP_Stop(nDevNum : Integer ):Integer; virtual; stdcall; abstract; function AdvDVP_GetCapState(nDevNum : Integer ):Integer; virtual; stdcall; abstract; function AdvDVP_IsVideoPresent(nDevNum : Integer; VPresent : PBool) :Integer; virtual; stdcall; abstract; function AdvDVP_GetCurFrameBuffer(nDevNum : Integer; VMux : Integer; bufSize : PLongInt; buf : PByte) :Integer; virtual; stdcall; abstract; function AdvDVP_SetNewFrameCallback(nDevNum : Integer; callback : Integer ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetVideoFormat(nDevNum : Integer; vFormat : PAnalogVideoFormat) :Integer; virtual; stdcall; abstract; function AdvDVP_SetVideoFormat(nDevNum : Integer; vFormat : TAnalogVideoFormat ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetFrameRate(nDevNum : Integer; nFrameRate : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_SetFrameRate(nDevNum : Integer; SwitchingChans : Integer; nFrameRate : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_GetResolution(nDevNum : Integer; Size : PVideoSize) :Integer; virtual; stdcall; abstract; function AdvDVP_SetResolution(nDevNum : Integer; Size : TVideoSize ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetVideoInput(nDevNum : Integer; input : PInteger) :Integer; virtual; stdcall; abstract; function AdvDVP_SetVideoInput(nDevNum : Integer; input : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_GetBrightness(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetBrightness(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetContrast(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetContrast(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetHue(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetHue(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetSaturation(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetSaturation(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GPIOGetData(nDevNum : Integer; DINum:Integer; value : PBool) :Integer; virtual; stdcall; abstract; function AdvDVP_GPIOSetData(nDevNum : Integer; DONum:Integer; value : Boolean) :Integer; virtual; stdcall; abstract; end; function IDVP7010BDLL : TIDVP7010BDLL ; stdcall; implementation function IDVP7010BDLL; external 'DVP7010B.dll'; end.

    Read the article

  • Need help converting a C++ header file to delphi

    - by grzegorz1
    I need help with converting c++ header file to delphi. I spent several days on this problem without success. Below is the original header file and my Delphi translation. ///////////////////////// C++ header file //////////////////////////////////// if _MSC_VER 1000 pragma once endif // _MSC_VER 1000 ifdef DVP7010BDLL_EXPORTS define DVP7010BDLL_API __declspec(dllexport) else define DVP7010BDLL_API __declspec(dllimport) endif define MAXBOARDS 4 define MAXDEVS 4 define ID_NEW_FRAME 37810 define ID_MUX0_NEW_FRAME 37800 define ID_MUX1_NEW_FRAME 37801 define ID_MUX2_NEW_FRAME 37802 define ID_MUX3_NEW_FRAME 37803 typedef enum { SUCCEEDED = 1, FAILED = 0, SDKINITFAILED = -1, PARAMERROR = -2, NODEVICES = -3, NOSAMPLE = -4, DEVICENUMERROR = -5, INPUTERROR = -6, // VERIFYHWERROR = -7 } Res; typedef enum tagAnalogVideoFormat { Video_None = 0x00000000, Video_NTSC_M = 0x00000001, Video_NTSC_M_J = 0x00000002, Video_PAL_B = 0x00000010, Video_PAL_M = 0x00000200, Video_PAL_N = 0x00000400, Video_SECAM_B = 0x00001000 } AnalogVideoFormat; typedef enum { SIZEFULLPAL=0, SIZED1, SIZEVGA, SIZEQVGA, SIZESUBQVGA } VideoSize; typedef enum { STOPPED = 1, RUNNING = 2, UNINITIALIZED = -1, UNKNOWNSTATE = -2 } CapState; class IDVP7010BDLL { public: int AdvDVP_CreateSDKInstence(void **pp); virtual int AdvDVP_InitSDK() PURE; virtual int AdvDVP_CloseSDK() PURE; virtual int AdvDVP_GetNoOfDevices(int *pNoOfDevs) PURE; virtual int AdvDVP_Start(int nDevNum, int SwitchingChans, HWND Main, HWND hwndPreview) PURE; virtual int AdvDVP_Stop(int nDevNum) PURE; virtual int AdvDVP_GetCapState(int nDevNum) PURE; virtual int AdvDVP_IsVideoPresent(int nDevNum, BOOL* VPresent) PURE; virtual int AdvDVP_GetCurFrameBuffer(int nDevNum, int VMux, long* bufSize, BYTE* buf) PURE; virtual int AdvDVP_SetNewFrameCallback(int nDevNum, int callback) PURE; virtual int AdvDVP_GetVideoFormat(int nDevNum, AnalogVideoFormat* vFormat) PURE; virtual int AdvDVP_SetVideoFormat(int nDevNum, AnalogVideoFormat vFormat) PURE; virtual int AdvDVP_GetFrameRate(int nDevNum, int *nFrameRate) PURE; virtual int AdvDVP_SetFrameRate(int nDevNum, int SwitchingChans, int nFrameRate) PURE; virtual int AdvDVP_GetResolution(int nDevNum, VideoSize *Size) PURE; virtual int AdvDVP_SetResolution(int nDevNum, VideoSize Size) PURE; virtual int AdvDVP_GetVideoInput(int nDevNum, int* input) PURE; virtual int AdvDVP_SetVideoInput(int nDevNum, int input) PURE; virtual int AdvDVP_GetBrightness(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetBrightness(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetContrast(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetContrast(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetHue(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetHue(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GetSaturation(int nDevNum, int input, long *pnValue) PURE; virtual int AdvDVP_SetSaturation(int nDevNum, int input, long nValue) PURE; virtual int AdvDVP_GPIOGetData(int nDevNum, int DINum, BOOL* value) PURE; virtual int AdvDVP_GPIOSetData(int nDevNum, int DONum, BOOL value) PURE; }; /////////////////// delphi /////////////////////////////////////// unit IDVP7010BDLL_h; interface uses Windows, Messages, SysUtils, Classes; //{$if _MSC_VER 1000} //pragma once //{$endif} // _MSC_VER 1000 {$ifdef DVP7010BDLL_EXPORTS} //const DVP7010BDLL_API = __declspec(dllexport); {$else} //const DVP7010BDLL_API = __declspec(dllimport); {$endif} const MAXDEVS = 4; MAXMUXS = 4; ID_NEW_FRAME = 37810; ID_MUX0_NEW_FRAME = 37800; ID_MUX1_NEW_FRAME = 37801; ID_MUX2_NEW_FRAME = 37802; ID_MUX3_NEW_FRAME = 37803; // TRec SUCCEEDED = 1; FAILED = 0; SDKINITFAILED = -1; PARAMERROR = -2; NODEVICES = -3; NOSAMPLE = -4; DEVICENUMERROR = -5; INPUTERROR = -6; // TRec // TAnalogVideoFormat Video_None = $00000000; Video_NTSC_M = $00000001; Video_NTSC_M_J = $00000002; Video_PAL_B = $00000010; Video_PAL_M = $00000200; Video_PAL_N = $00000400; Video_SECAM_B = $00001000; // TAnalogVideoFormat // TCapState STOPPED = 1; RUNNING = 2; UNINITIALIZED = -1; UNKNOWNSTATE = -2; // TCapState type TCapState = Longint; TRes = Longint; TtagAnalogVideoFormat = DWORD; TAnalogVideoFormat = TtagAnalogVideoFormat; PAnalogVideoFormat = ^TAnalogVideoFormat; TVideoSize = ( SIZEFULLPAL, SIZED1, SIZEVGA, SIZEQVGA, SIZESUBQVGA); PVideoSize = ^TVideoSize; P_Pointer = ^Pointer; TIDVP7010BDLL = class function AdvDVP_CreateSDKInstence(pp: P_Pointer): integer; virtual; stdcall; abstract; function AdvDVP_InitSDK():Integer; virtual; stdcall; abstract; function AdvDVP_CloseSDK():Integer; virtual; stdcall; abstract; function AdvDVP_GetNoOfDevices(pNoOfDevs : PInteger) :Integer; virtual; stdcall; abstract; function AdvDVP_Start(nDevNum : Integer; SwitchingChans : Integer; Main : HWND; hwndPreview: HWND ) :Integer; virtual; stdcall; abstract; function AdvDVP_Stop(nDevNum : Integer ):Integer; virtual; stdcall; abstract; function AdvDVP_GetCapState(nDevNum : Integer ):Integer; virtual; stdcall; abstract; function AdvDVP_IsVideoPresent(nDevNum : Integer; VPresent : PBool) :Integer; virtual; stdcall; abstract; function AdvDVP_GetCurFrameBuffer(nDevNum : Integer; VMux : Integer; bufSize : PLongInt; buf : PByte) :Integer; virtual; stdcall; abstract; function AdvDVP_SetNewFrameCallback(nDevNum : Integer; callback : Integer ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetVideoFormat(nDevNum : Integer; vFormat : PAnalogVideoFormat) :Integer; virtual; stdcall; abstract; function AdvDVP_SetVideoFormat(nDevNum : Integer; vFormat : TAnalogVideoFormat ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetFrameRate(nDevNum : Integer; nFrameRate : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_SetFrameRate(nDevNum : Integer; SwitchingChans : Integer; nFrameRate : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_GetResolution(nDevNum : Integer; Size : PVideoSize) :Integer; virtual; stdcall; abstract; function AdvDVP_SetResolution(nDevNum : Integer; Size : TVideoSize ) :Integer; virtual; stdcall; abstract; function AdvDVP_GetVideoInput(nDevNum : Integer; input : PInteger) :Integer; virtual; stdcall; abstract; function AdvDVP_SetVideoInput(nDevNum : Integer; input : Integer) :Integer; virtual; stdcall; abstract; function AdvDVP_GetBrightness(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetBrightness(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetContrast(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetContrast(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetHue(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetHue(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GetSaturation(nDevNum : Integer; input: Integer; pnValue : PLongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_SetSaturation(nDevNum : Integer; input: Integer; nValue : LongInt) :Integer; virtual; stdcall; abstract; function AdvDVP_GPIOGetData(nDevNum : Integer; DINum:Integer; value : PBool) :Integer; virtual; stdcall; abstract; function AdvDVP_GPIOSetData(nDevNum : Integer; DONum:Integer; value : Boolean) :Integer; virtual; stdcall; abstract; end; function IDVP7010BDLL : TIDVP7010BDLL ; stdcall; implementation function IDVP7010BDLL; external 'DVP7010B.dll'; end.

    Read the article

  • Reading train stop display names from a resource bundle

    - by Frank Nimphius
    v\:* {behavior:url(#default#VML);} o\:* {behavior:url(#default#VML);} w\:* {behavior:url(#default#VML);} .shape {behavior:url(#default#VML);} Normal 0 false false false false EN-US X-NONE X-NONE /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:10.0pt; font-family:"Times New Roman","serif";} In Oracle JDeveloper 11g R1, you set the display name of a train stop of an ADF bounded task flow train model by using the Oracle JDeveloper Structure Window. To do so Double-click onto the bounded task flow configuration file (XML) located in the Application Navigator so the task flow diagram open In the task flow diagram, select the view activity node for which you want to define the display name. In the Structure Window., expand the view activity node and then the train-stop node therein Add the display name element by using the right-click context menu on the train-stop node, selecting Insert inside train-stop > Display Name Edit the Display Name value with the Property Inspector Following the steps outlined above, you can define static display names – like "PF1" for page fragment 1 shown in the image below - for train stops to show at runtime. In the following, I explain how you can change the static display string to a dynamic string that reads the display label from a resource bundle so train stop labels can be internationalized. There are different strategies available for managing message bundles within an Oracle JDeveloper project. In this blog entry, I decided to build and configure the default properties file as indicated by the projects properties. To learn about the suggested file name and location, open the JDeveloper project properties (use a right mouse click on the project node in the Application Navigator and choose Project Properties. Select the Resource Bundle node to see the suggested name and location for the default message bundle. Note that this is the resource bundle that Oracle JDeveloper would automatically create when you assign a text resource to an ADF Faces component in a page. For the train stop display name, we need to create the message bundle manually as there is no context menu help available in Oracle JDeveloper. For this, use a right mouse click on the JDeveloper project and choose New | General | File from the menu and in the opened dialog. Specify the message bundle file name as the name looked up before in the project properties Resource Bundle option. Also, ensure that the file is saved in a directory structure that matches the package structure shown in the Resource Bundle dialog. For example, you would save the properties file in the View Project's src > adf > sample directory if the package structure was "adf.sample" (adf.sample.ViewControllerBundle). Edit the properties file and define key – values pairs for the train stop component. In the sample, such key value pairs are TrainStop1=Train Stop 1 TrainStop2=Train Stop 2 TrainStop3=Train Stop 3 Next, double click the faces-config.xml file and switch the opened editor to the Overview tab. Select the Application category and press the green plus icon next to the Resource Bundle section. Define the resource bundle Base Name as the package and properties file name, for example adf.sample.ViewControllerBundle Finally, define a variable name for the message bundle so the bundle can be accessed from Expression Language. For this blog example, the name is chosen as "messageBundle". <resource-bundle>   <base-name>adf.sample.ViewControllerBundle</base-name>   <var>messageBundle</var> </resource-bundle> Next, select the display-name element in the train stop node (similar to when creating the display name) and use the Property Inspector to change the static display string to an EL expression referencing the message bundle. For example: #{messageBundle.TrainStop1} At runtime, the train stops now show display names read from a message bundle (the properties file).

    Read the article

  • Tomcat IIS 7 Integration gives 503 errors on all requests.

    - by Yvan JANSSENS
    Hi, After many attempts to install Tomcat on IIS 7, I finally managed to get it working. At least I think so :-S. I finally got the 500 errors away, by setting the correct permissions. The only thing that doesn't work is ... serving stuff: neither regular stuff (like ASP, HTML files, or directory browsing) or Tomcat things work. Here are my configs: Worker.properties # The workers that your plugins should create and work with # worker.list=worker1 #------ DEFAULT ajp13 WORKER DEFINITION ------------------------------ #--------------------------------------------------------------------- # Defining a worker named ajp13 and of type ajp13 # Note that the name and the type do not have to match. # worker.worker1.port=8009 worker.worker1.host=127.0.0.1 worker.worker1.type=ajp13 URIWorkerMap.properties /|/*=worker1 # Exclude the subdirectory static: !/static|/*=worker1 # Exclude some suffixes: !*.html=worker1 !*.asp=worker1 Server.xml <?xml version='1.0' encoding='utf-8'?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Note: A "Server" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/server.html --> <Server port="8005" shutdown="SHUTDOWN"> <!--APR library loader. Documentation at /docs/apr.html --> <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" /> <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> <Listener className="org.apache.catalina.core.JasperListener" /> <!-- Prevent memory leaks due to use of particular java/javax APIs--> <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" /> <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html --> <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" /> <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" /> <!-- Global JNDI resources Documentation at /docs/jndi-resources-howto.html --> <GlobalNamingResources> <!-- Editable user database that can also be used by UserDatabaseRealm to authenticate users --> <Resource name="UserDatabase" auth="Container" type="org.apache.catalina.UserDatabase" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" pathname="conf/tomcat-users.xml" /> </GlobalNamingResources> <!-- A "Service" is a collection of one or more "Connectors" that share a single "Container" Note: A "Service" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/service.html --> <Service name="Catalina"> <!--The connectors can use a shared executor, you can define one or more named thread pools--> <!-- <Executor name="tomcatThreadPool" namePrefix="catalina-exec-" maxThreads="150" minSpareThreads="4"/> --> <!-- A "Connector" represents an endpoint by which requests are received and responses are returned. Documentation at : Java HTTP Connector: /docs/config/http.html (blocking & non-blocking) Java AJP Connector: /docs/config/ajp.html APR (HTTP/AJP) Connector: /docs/apr.html Define a non-SSL HTTP/1.1 Connector on port 8080 --> <Connector port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> <!-- A "Connector" using the shared thread pool--> <!-- <Connector executor="tomcatThreadPool" port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> --> <!-- Define a SSL HTTP/1.1 Connector on port 8443 This connector uses the JSSE configuration, when using APR, the connector should be using the OpenSSL style configuration described in the APR documentation --> <!-- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" maxThreads="150" scheme="https" secure="true" clientAuth="false" sslProtocol="TLS" /> --> <!-- Define an AJP 1.3 Connector on port 8009 --> <Connector port="8009" protocol="AJP/1.3" redirectPort="8443" /> <!-- An Engine represents the entry point (within Catalina) that processes every request. The Engine implementation for Tomcat stand alone analyzes the HTTP headers included with the request, and passes them on to the appropriate Host (virtual host). Documentation at /docs/config/engine.html --> <!-- You should set jvmRoute to support load-balancing via AJP ie : <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1"> --> <Engine name="Catalina" defaultHost="localhost"> <!--For clustering, please take a look at documentation at: /docs/cluster-howto.html (simple how to) /docs/config/cluster.html (reference documentation) --> <!-- <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/> --> <!-- The request dumper valve dumps useful debugging information about the request and response data received and sent by Tomcat. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.RequestDumperValve"/> --> <!-- This Realm uses the UserDatabase configured in the global JNDI resources under the key "UserDatabase". Any edits that are performed against this UserDatabase are immediately available for use by the Realm. --> <Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase"/> <!-- Define the default virtual host Note: XML Schema validation will not work with Xerces 2.2. --> <Host name="localhost" appBase="webapps" unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false"> <!-- SingleSignOn valve, share authentication between web applications Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> --> <!-- Access log processes all example. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/> --> </Host> </Engine> </Service> </Server> http://localhost:8080 is working, I can view the apps and configure them there... I'm quite new to IIS 7, I used to work with IIS 6. Thanks in advance, Yvan

    Read the article

  • cannot access localhost using ip

    - by Robert
    I have done a small web development project using eclipse. It runs well when I try running it on browser with url localhost:8080/myproject/home.html. But if I want to access it on another machine (laptop, mobile, etc. using the same wifi) it is not possible; it is not able to connect. After Googling for a while found out that I have to use the IP address instead of 'localhost'. So I tried 10.0.0.4:8080/myproject/home.html, but still does not work. In fact i am unable to open that url on the same machine (where localhost:8080/myproject/home.html works fine). I also added a new Inbound rule in control panel firewall settings, allowing access to all ports for protocol TCP. Still have problem in running application with the url 10.0.0.4:8080/myproject/home.html (both on same machine as well as laptop and mobile). FYI i am using Eclipse Indigo, Apache tomcat 6.0 and server.xml file contents is as below: <?xml version="1.0" encoding="UTF-8"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --><!-- Note: A "Server" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/server.html --><Server port="8005" shutdown="SHUTDOWN"> <!--APR library loader. Documentation at /docs/apr.html --> <Listener SSLEngine="on" className="org.apache.catalina.core.AprLifecycleListener"/> <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> <Listener className="org.apache.catalina.core.JasperListener"/> <!-- Prevent memory leaks due to use of particular java/javax APIs--> <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/> <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html --> <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/> <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/> <!-- Global JNDI resources Documentation at /docs/jndi-resources-howto.html --> <GlobalNamingResources> <!-- Editable user database that can also be used by UserDatabaseRealm to authenticate users --> <Resource auth="Container" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" name="UserDatabase" pathname="conf/tomcat-users.xml" type="org.apache.catalina.UserDatabase"/> </GlobalNamingResources> <!-- A "Service" is a collection of one or more "Connectors" that share a single "Container" Note: A "Service" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/service.html --> <Service name="Catalina"> <!--The connectors can use a shared executor, you can define one or more named thread pools--> <!-- <Executor name="tomcatThreadPool" namePrefix="catalina-exec-" maxThreads="150" minSpareThreads="4"/> --> <!-- A "Connector" represents an endpoint by which requests are received and responses are returned. Documentation at : Java HTTP Connector: /docs/config/http.html (blocking & non-blocking) Java AJP Connector: /docs/config/ajp.html APR (HTTP/AJP) Connector: /docs/apr.html Define a non-SSL HTTP/1.1 Connector on port 8080 --> <Connector port="8080" protocol="HTTP/1.1" address="10.0.0.4" connectionTimeout="20000" redirectPort="8443" /> <!-- A "Connector" using the shared thread pool--> <!-- <Connector executor="tomcatThreadPool" port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> --> <!-- Define a SSL HTTP/1.1 Connector on port 8443 This connector uses the JSSE configuration, when using APR, the connector should be using the OpenSSL style configuration described in the APR documentation --> <!-- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" maxThreads="150" scheme="https" secure="true" clientAuth="false" sslProtocol="TLS" /> --> <!-- Define an AJP 1.3 Connector on port 8009 --> <Connector port="8009" protocol="AJP/1.3" redirectPort="8443"/> <!-- An Engine represents the entry point (within Catalina) that processes every request. The Engine implementation for Tomcat stand alone analyzes the HTTP headers included with the request, and passes them on to the appropriate Host (virtual host). Documentation at /docs/config/engine.html --> <!-- You should set jvmRoute to support load-balancing via AJP ie : <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1"> --> <Engine defaultHost="localhost" name="Catalina"> <!--For clustering, please take a look at documentation at: /docs/cluster-howto.html (simple how to) /docs/config/cluster.html (reference documentation) --> <!-- <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/> --> <!-- The request dumper valve dumps useful debugging information about the request and response data received and sent by Tomcat. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.RequestDumperValve"/> --> <!-- This Realm uses the UserDatabase configured in the global JNDI resources under the key "UserDatabase". Any edits that are performed against this UserDatabase are immediately available for use by the Realm. --> <Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase"/> <!-- Define the default virtual host Note: XML Schema validation will not work with Xerces 2.2. --> <Host appBase="webapps" autoDeploy="true" name="localhost" unpackWARs="true" xmlNamespaceAware="false" xmlValidation="false"> <!-- SingleSignOn valve, share authentication between web applications Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> --> <!-- Access log processes all example. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/> --> <Context docBase="myproject" path="/myproject" reloadable="true" source="org.eclipse.jst.jee.server:myproject"/></Host> </Engine> </Service> </Server>

    Read the article

  • Segmentation fault in my C program

    - by user233542
    I don't understand why this would give me a seg fault. Any ideas? This is the function that returns the signal to stop the program (plus the other function that is called within this): double bisect(double A0,double A1,double Sol[N],double tol,double c) { double Amid,shot; while (A1-A0 > tol) { Amid = 0.5*(A0+A1); shot = shoot(Sol, Amid, c); if (shot==2.*Pi) { return Amid; } if (shot > 2.*Pi){ A1 = Amid; } else if (shot < 2.*Pi){ A0 = Amid; } } return 0.5*(A1+A0); } double shoot(double Sol[N],double A,double c) { int i,j; /*Initial Conditions*/ for (i=0;i<buff;i++) { Sol[i] = 0.; } for (i=buff+l;i<N;i++) { Sol[i] = 2.*Pi; } Sol[buff]= 0; Sol[buff+1]= A*exp(sqrt(1+3*c)*dx); for (i=buff+2;i<buff+l;i++) { Sol[i] = (dx*dx)*( sin(Sol[i-1]) + c*sin(3.*(Sol[i-1])) ) - Sol[i-2] + 2.*Sol[i-1]; } return Sol[i-1]; } The values buff, l, N are defined using a #define statement. l = 401, buff = 50, N = 2000 Here is the full code: #include <stdio.h> #include <stdlib.h> #include <math.h> #define w 10 /*characteristic width of a soliton*/ #define dx 0.05 /*distance between lattice sites*/ #define s (2*w)/dx /*size of soliton shape*/ #define l (int)(s+1) /*array length for soliton*/ #define N (int)2000 /*length of field array--lattice sites*/ #define Pi (double)4*atan(1) #define buff (int)50 double shoot(double Sol[N],double A,double c); double bisect(double A0,double A1,double Sol[N],double tol,double c); void super_pos(double antiSol[N],double Sol[N],double phi[][N]); void vel_ver(double phi[][N],double v,double c,int tsteps,double dt); int main(int argc, char **argv) { double c,Sol[N],antiSol[N],A,A0,A1,tol,v,dt; int tsteps,i; FILE *fp1,*fp2,*fp3; fp1 = fopen("soliton.dat","w"); fp2 = fopen("final-phi.dat","w"); fp3 = fopen("energy.dat","w"); printf("Please input the number of time steps:"); scanf("%d",&tsteps); printf("Also, enter the time step size:"); scanf("%lf",&dt); do{ printf("Please input the parameter c in the interval [-1/3,1]:"); scanf("%lf",&c);} while(c < (-1./3.) || c > 1.); printf("Please input the inital speed of eiter soliton:"); scanf("%lf",&v); double phi[tsteps+1][N]; tol = 0.0000001; A0 = 0.; A1 = 2.*Pi; A = bisect(A0,A1,Sol,tol,c); shoot(Sol,A,c); for (i=0;i<N;i++) { fprintf(fp1,"%d\t",i); fprintf(fp1,"%lf\n",Sol[i]); } fclose(fp1); super_pos(antiSol,Sol,phi); /*vel_ver(phi,v,c,tsteps,dt); for (i=0;i<N;i++){ fprintf(fp2,"%d\t",i); fprintf(fp2,"%lf\n",phi[tsteps][i]); }*/ } double shoot(double Sol[N],double A,double c) { int i,j; /*Initial Conditions*/ for (i=0;i<buff;i++) { Sol[i] = 0.; } for (i=buff+l;i<N;i++) { Sol[i] = 2.*Pi; } Sol[buff]= 0; Sol[buff+1]= A*exp(sqrt(1+3*c)*dx); for (i=buff+2;i<buff+l;i++) { Sol[i] = (dx*dx)*( sin(Sol[i-1]) + c*sin(3.*(Sol[i-1])) ) - Sol[i-2] + 2.*Sol[i-1]; } return Sol[i-1]; } double bisect(double A0,double A1,double Sol[N],double tol,double c) { double Amid,shot; while (A1-A0 > tol) { Amid = 0.5*(A0+A1); shot = shoot(Sol, Amid, c); if (shot==2.*Pi) { return Amid; } if (shot > 2.*Pi){ A1 = Amid; } else if (shot < 2.*Pi){ A0 = Amid; } } return 0.5*(A1+A0); } void super_pos(double antiSol[N],double Sol[N],double phi[][N]) { int i; /*for (i=0;i<N;i++) { phi[i]=0; } for (i=buffer+s;i<1950-s;i++) { phi[i]=2*Pi; }*/ for (i=0;i<N;i++) { antiSol[i] = Sol[N-i]; } /*for (i=0;i<s+1;i++) { phi[buffer+j] = Sol[j]; phi[1549+j] = antiSol[j]; }*/ for (i=0;i<N;i++) { phi[0][i] = antiSol[i] + Sol[i] - 2.*Pi; } } /* This funciton will set the 2nd input array to the derivative at the time t, for all points x in the lattice */ void deriv2(double phi[][N],double DphiDx2[][N],int t) { //double SolDer2[s+1]; int x; for (x=0;x<N;x++) { DphiDx2[t][x] = (phi[buff+x+1][t] + phi[buff+x-1][t] - 2.*phi[x][t])/(dx*dx); } /*for (i=0;i<N;i++) { ptr[i] = &SolDer2[i]; }*/ //return DphiDx2[x]; } void vel_ver(double phi[][N],double v,double c,int tsteps,double dt) { int t,x; double d1,d2,dp,DphiDx1[tsteps+1][N],DphiDx2[tsteps+1][N],dpdt[tsteps+1][N],p[tsteps+1][N]; for (t=0;t<tsteps;t++){ if (t==0){ for (x=0;x<N;x++){//inital conditions deriv2(phi,DphiDx2,t); dpdt[t][x] = DphiDx2[t][x] - sin(phi[t][x]) - sin(3.*phi[t][x]); DphiDx1[t][x] = (phi[t][x+1] - phi[t][x])/dx; p[t][x] = -v*DphiDx1[t][x]; } } for (x=0;x<N;x++){//velocity-verlet phi[t+1][x] = phi[t][x] + dt*p[t][x] + (dt*dt/2)*dpdt[t][x]; p[t+1][x] = p[t][x] + (dt/2)*dpdt[t][x]; deriv2(phi,DphiDx2,t+1); dpdt[t][x] = DphiDx2[t][x] - sin(phi[t+1][x]) - sin(3.*phi[t+1][x]); p[t+1][x] += (dt/2)*dpdt[t+1][x]; } } } So, this really isn't due to my overwriting the end of the Sol array. I've commented out both functions that I suspected of causing the problem (bisect or shoot) and inserted a print function. Two things happen. When I have code like below: double A,Pi,B,c; c=0; Pi = 4.*atan(1.); A = Pi; B = 1./4.; printf("%lf",B); B = shoot(Sol,A,c); printf("%lf",B); I get a segfault from the function, shoot. However, if I take away the shoot function so that I have: double A,Pi,B,c; c=0; Pi = 4.*atan(1.); A = Pi; B = 1./4.; printf("%lf",B); it gives me a segfault at the printf... Why!?

    Read the article

  • ./a.out termniated . Garbage output due to smashing of stack . How to remove this error ?

    - by mekasperasky
    #include <iostream> #include <fstream> #include <cstring> using namespace std; typedef unsigned long int WORD; /* Should be 32-bit = 4 bytes */ #define w 32 /* word size in bits */ #define r 12 /* number of rounds */ #define b 16 /* number of bytes in key */ #define c 4 /* number words in key */ /* c = max(1,ceil(8*b/w)) */ #define t 26 /* size of table S = 2*(r+1) words */ WORD S [t],L[c]; /* expanded key table */ WORD P = 0xb7e15163, Q = 0x9e3779b9; /* magic constants */ /* Rotation operators. x must be unsigned, to get logical right shift*/ #define ROTL(x,y) (((x)<<(y&(w-1))) | ((x)>>(w-(y&(w-1))))) #define ROTR(x,y) (((x)>>(y&(w-1))) | ((x)<<(w-(y&(w-1))))) void RC5_ENCRYPT(WORD *pt, WORD *ct) /* 2 WORD input pt/output ct */ { WORD i, A=pt[0]+S[0], B=pt[1]+S[1]; for (i=1; i<=r; i++) { A = ROTL(A^B,B)+S[2*i]; B = ROTL(B^A,A)+S[2*i+1]; } ct [0] = A ; ct [1] = B ; } void RC5_DECRYPT(WORD *ct, WORD *pt) /* 2 WORD input ct/output pt */ { WORD i, B=ct[1], A=ct[ 0]; for (i=r; i>0; i--) { B = ROTR(B-S [2*i+1],A)^A; A = ROTR(A-S [2*i],B)^B; } pt [1] = B-S [1] ;pt [0] = A-S [0]; } void RC5_SETUP(unsigned char *K) /* secret input key K 0...b-1] */ { WORD i, j, k, u=w/8, A, B, L [c]; /* Initialize L, then S, then mix key into S */ for (i=b-1,L[c-1]=0; i!=-1; i--) L[i/u] = (L[i/u]<<8)+K[ i]; for (S [0]=P,i=1; i<t; i++) S [i] = S [i-1]+Q; for (A=B=i=j=k=0; k<3*t; k++,i=(i+1)%t,j=(j+1)%c) /* 3*t > 3*c */ { A = S[i] = ROTL(S [i]+(A+B),3); B = L[j] = ROTL(L[j]+(A+B),(A+B)); } } void printword(WORD A) { WORD k; for (k=0 ;k<w; k+=8) printf("%c"); } int main() { WORD i, j, k,ptext, pt1 [2], pt2 [2], ct [2] = {0,0}; ifstream in("key1.txt"); ifstream in1("plt.txt"); ofstream out1("cpt.txt"); if(!in) { cout << "Cannot open file.\n"; return 1; } if(!in1) { cout << "Cannot open file.\n"; return 1; } unsigned char key[b]; in >> key; in1 >> pt1[0]; in1 >> pt1[0]; if (sizeof(WORD)!=4) printf("RC5 error: WORD has %d bytes.\n",sizeof(WORD)); RC5_SETUP(key); RC5_ENCRYPT(pt1,ct); printf("\n plaintext "); printword(pt1 [0]); printword(pt1 [1]); printf(" ---> ciphertext "); printword(ct [0]); printword(ct [1]); printf("\n"); RC5_SETUP(key); RC5_DECRYPT(ct,pt2); out1<<ct[0]; out1<<ct[1]; out1 <<"\n"; printf("\n plaintext "); printword(pt1 [0]); printword(pt1 [1]); return 0; } Let the plt.txt file contain 101 100 let the key be 111

    Read the article

  • C Language: Why I cannot transfer file from server to client?

    - by user275753
    I want to ask, why I cannot transfer file from server to client? When I start to send the file from server, the client side program will have problem. So, I spend some times to check the code, But I still cannot find out the problem Can anyone point out the problem for me? thanks a lot! [client side code] include include include include include include include define SA struct sockaddr define S_PORT 5678 define MAXLEN 1000 define true 1 void errexit(const char *format, ...) { va_list args; va_start(args, format); vfprintf(stderr, format, args); va_end(args); WSACleanup(); exit(1); } int main(int argc, char *argv []) { WSADATA wsadata; SOCKET sockfd; int number,message; char outbuff[MAXLEN],inbuff[MAXLEN]; char PWD_buffer[_MAX_PATH]; struct sockaddr_in servaddr; FILE *fp; int numbytes; char buf[2048]; if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) errexit("WSAStartup failed\n"); if (argc != 2) errexit("client IPaddress"); if ( (sockfd = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET ) errexit("socket error: error number %d\n", WSAGetLastError()); memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_port = htons(S_PORT); if ( (servaddr.sin_addr.s_addr = inet_addr(argv[1])) == INADDR_NONE) errexit("inet_addr error: error number %d\n", WSAGetLastError()); if (connect(sockfd, (SA *) &servaddr, sizeof(servaddr)) == SOCKET_ERROR) errexit("connect error: error number %d\n", WSAGetLastError()); if ( (fp = fopen("C:\\users\\pc\\desktop\\COPY.c", "wb")) == NULL){ perror("fopen"); exit(1); } printf("Still NO PROBLEM!\n"); //Receive file from server while(1){ numbytes = read(sockfd, buf, sizeof(buf)); printf("read %d bytes, ", numbytes); if(numbytes == 0){ printf("\n"); break; } numbytes = fwrite(buf, sizeof(char), numbytes, fp); printf("fwrite %d bytes\n", numbytes); } fclose(fp); close(sockfd); return 0; } server side code include include include include include include include include define SA struct sockaddr define S_PORT 5678 define MAXLEN 1000 void errexit(const char *format, ...) { va_list args; va_start(args, format); vfprintf(stderr, format, args); va_end(args); WSACleanup(); exit(1); } int main(int argc, char *argv []) { WSADATA wsadata; SOCKET listenfd, connfd; int number, message, numbytes; int h, i, j, alen; int nread; struct sockaddr_in servaddr, cliaddr; FILE *in_file, *out_file, *fp; char buf[4096]; if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) errexit("WSAStartup failed\n"); listenfd = socket(AF_INET, SOCK_STREAM, 0); if (listenfd == INVALID_SOCKET) errexit("cannot create socket: error number %d\n", WSAGetLastError()); memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_addr.s_addr = htonl(INADDR_ANY); servaddr.sin_port = htons(S_PORT); if (bind(listenfd, (SA *) &servaddr, sizeof(servaddr)) == SOCKET_ERROR) errexit("can't bind to port %d: error number %d\n", S_PORT, WSAGetLastError()); if (listen(listenfd, 5) == SOCKET_ERROR) errexit("can't listen on port %d: error number %d\n", S_PORT, WSAGetLastError()); alen = sizeof(SA); connfd = accept(listenfd, (SA *) &cliaddr, &alen); if (connfd == INVALID_SOCKET) errexit("accept failed: error number %d\n", WSAGetLastError()); printf("accept one client from %s!\n", inet_ntoa(cliaddr.sin_addr)); fp = fopen ("client.c", "rb"); // open file stored in server if (fp == NULL) { printf("\nfile NOT exist"); } //Sending file while(!feof(fp)){ numbytes = fread(buf, sizeof(char), sizeof(buf), fp); printf("fread %d bytes, ", numbytes); numbytes = write(connfd, buf, numbytes); printf("Sending %d bytes\n",numbytes); } fclose (fp); closesocket(listenfd); closesocket(connfd); return 0; }

    Read the article

  • Upload File to Windows Azure Blob in Chunks through ASP.NET MVC, JavaScript and HTML5

    - by Shaun
    Originally posted on: http://geekswithblogs.net/shaunxu/archive/2013/07/01/upload-file-to-windows-azure-blob-in-chunks-through-asp.net.aspxMany people are using Windows Azure Blob Storage to store their data in the cloud. Blob storage provides 99.9% availability with easy-to-use API through .NET SDK and HTTP REST. For example, we can store JavaScript files, images, documents in blob storage when we are building an ASP.NET web application on a Web Role in Windows Azure. Or we can store our VHD files in blob and mount it as a hard drive in our cloud service. If you are familiar with Windows Azure, you should know that there are two kinds of blob: page blob and block blob. The page blob is optimized for random read and write, which is very useful when you need to store VHD files. The block blob is optimized for sequential/chunk read and write, which has more common usage. Since we can upload block blob in blocks through BlockBlob.PutBlock, and them commit them as a whole blob with invoking the BlockBlob.PutBlockList, it is very powerful to upload large files, as we can upload blocks in parallel, and provide pause-resume feature. There are many documents, articles and blog posts described on how to upload a block blob. Most of them are focus on the server side, which means when you had received a big file, stream or binaries, how to upload them into blob storage in blocks through .NET SDK.  But the problem is, how can we upload these large files from client side, for example, a browser. This questioned to me when I was working with a Chinese customer to help them build a network disk production on top of azure. The end users upload their files from the web portal, and then the files will be stored in blob storage from the Web Role. My goal is to find the best way to transform the file from client (end user’s machine) to the server (Web Role) through browser. In this post I will demonstrate and describe what I had done, to upload large file in chunks with high speed, and save them as blocks into Windows Azure Blob Storage.   Traditional Upload, Works with Limitation The simplest way to implement this requirement is to create a web page with a form that contains a file input element and a submit button. 1: @using (Html.BeginForm("About", "Index", FormMethod.Post, new { enctype = "multipart/form-data" })) 2: { 3: <input type="file" name="file" /> 4: <input type="submit" value="upload" /> 5: } And then in the backend controller, we retrieve the whole content of this file and upload it in to the blob storage through .NET SDK. We can split the file in blocks and upload them in parallel and commit. The code had been well blogged in the community. 1: [HttpPost] 2: public ActionResult About(HttpPostedFileBase file) 3: { 4: var container = _client.GetContainerReference("test"); 5: container.CreateIfNotExists(); 6: var blob = container.GetBlockBlobReference(file.FileName); 7: var blockDataList = new Dictionary<string, byte[]>(); 8: using (var stream = file.InputStream) 9: { 10: var blockSizeInKB = 1024; 11: var offset = 0; 12: var index = 0; 13: while (offset < stream.Length) 14: { 15: var readLength = Math.Min(1024 * blockSizeInKB, (int)stream.Length - offset); 16: var blockData = new byte[readLength]; 17: offset += stream.Read(blockData, 0, readLength); 18: blockDataList.Add(Convert.ToBase64String(BitConverter.GetBytes(index)), blockData); 19:  20: index++; 21: } 22: } 23:  24: Parallel.ForEach(blockDataList, (bi) => 25: { 26: blob.PutBlock(bi.Key, new MemoryStream(bi.Value), null); 27: }); 28: blob.PutBlockList(blockDataList.Select(b => b.Key).ToArray()); 29:  30: return RedirectToAction("About"); 31: } This works perfect if we selected an image, a music or a small video to upload. But if I selected a large file, let’s say a 6GB HD-movie, after upload for about few minutes the page will be shown as below and the upload will be terminated. In ASP.NET there is a limitation of request length and the maximized request length is defined in the web.config file. It’s a number which less than about 4GB. So if we want to upload a really big file, we cannot simply implement in this way. Also, in Windows Azure, a cloud service network load balancer will terminate the connection if exceed the timeout period. From my test the timeout looks like 2 - 3 minutes. Hence, when we need to upload a large file we cannot just use the basic HTML elements. Besides the limitation mentioned above, the simple HTML file upload cannot provide rich upload experience such as chunk upload, pause and pause-resume. So we need to find a better way to upload large file from the client to the server.   Upload in Chunks through HTML5 and JavaScript In order to break those limitation mentioned above we will try to upload the large file in chunks. This takes some benefit to us such as - No request size limitation: Since we upload in chunks, we can define the request size for each chunks regardless how big the entire file is. - No timeout problem: The size of chunks are controlled by us, which means we should be able to make sure request for each chunk upload will not exceed the timeout period of both ASP.NET and Windows Azure load balancer. It was a big challenge to upload big file in chunks until we have HTML5. There are some new features and improvements introduced in HTML5 and we will use them to implement our solution.   In HTML5, the File interface had been improved with a new method called “slice”. It can be used to read part of the file by specifying the start byte index and the end byte index. For example if the entire file was 1024 bytes, file.slice(512, 768) will read the part of this file from the 512nd byte to 768th byte, and return a new object of interface called "Blob”, which you can treat as an array of bytes. In fact,  a Blob object represents a file-like object of immutable, raw data. The File interface is based on Blob, inheriting blob functionality and expanding it to support files on the user's system. For more information about the Blob please refer here. File and Blob is very useful to implement the chunk upload. We will use File interface to represent the file the user selected from the browser and then use File.slice to read the file in chunks in the size we wanted. For example, if we wanted to upload a 10MB file with 512KB chunks, then we can read it in 512KB blobs by using File.slice in a loop.   Assuming we have a web page as below. User can select a file, an input box to specify the block size in KB and a button to start upload. 1: <div> 2: <input type="file" id="upload_files" name="files[]" /><br /> 3: Block Size: <input type="number" id="block_size" value="512" name="block_size" />KB<br /> 4: <input type="button" id="upload_button_blob" name="upload" value="upload (blob)" /> 5: </div> Then we can have the JavaScript function to upload the file in chunks when user clicked the button. 1: <script type="text/javascript"> 1: 2: $(function () { 3: $("#upload_button_blob").click(function () { 4: }); 5: });</script> Firstly we need to ensure the client browser supports the interfaces we are going to use. Just try to invoke the File, Blob and FormData from the “window” object. If any of them is “undefined” the condition result will be “false” which means your browser doesn’t support these premium feature and it’s time for you to get your browser updated. FormData is another new feature we are going to use in the future. It could generate a temporary form for us. We will use this interface to create a form with chunk and associated metadata when invoked the service through ajax. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: if (window.File && window.Blob && window.FormData) { 4: alert("Your brwoser is awesome, let's rock!"); 5: } 6: else { 7: alert("Oh man plz update to a modern browser before try is cool stuff out."); 8: return; 9: } 10: }); Each browser supports these interfaces by their own implementation and currently the Blob, File and File.slice are supported by Chrome 21, FireFox 13, IE 10, Opera 12 and Safari 5.1 or higher. After that we worked on the files the user selected one by one since in HTML5, user can select multiple files in one file input box. 1: var files = $("#upload_files")[0].files; 2: for (var i = 0; i < files.length; i++) { 3: var file = files[i]; 4: var fileSize = file.size; 5: var fileName = file.name; 6: } Next, we calculated the start index and end index for each chunks based on the size the user specified from the browser. We put them into an array with the file name and the index, which will be used when we upload chunks into Windows Azure Blob Storage as blocks since we need to specify the target blob name and the block index. At the same time we will store the list of all indexes into another variant which will be used to commit blocks into blob in Azure Storage once all chunks had been uploaded successfully. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10:  11: // calculate the start and end byte index for each blocks(chunks) 12: // with the index, file name and index list for future using 13: var blockSizeInKB = $("#block_size").val(); 14: var blockSize = blockSizeInKB * 1024; 15: var blocks = []; 16: var offset = 0; 17: var index = 0; 18: var list = ""; 19: while (offset < fileSize) { 20: var start = offset; 21: var end = Math.min(offset + blockSize, fileSize); 22:  23: blocks.push({ 24: name: fileName, 25: index: index, 26: start: start, 27: end: end 28: }); 29: list += index + ","; 30:  31: offset = end; 32: index++; 33: } 34: } 35: }); Now we have all chunks’ information ready. The next step should be upload them one by one to the server side, and at the server side when received a chunk it will upload as a block into Blob Storage, and finally commit them with the index list through BlockBlobClient.PutBlockList. But since all these invokes are ajax calling, which means not synchronized call. So we need to introduce a new JavaScript library to help us coordinate the asynchronize operation, which named “async.js”. You can download this JavaScript library here, and you can find the document here. I will not explain this library too much in this post. We will put all procedures we want to execute as a function array, and pass into the proper function defined in async.js to let it help us to control the execution sequence, in series or in parallel. Hence we will define an array and put the function for chunk upload into this array. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4:  5: // start to upload each files in chunks 6: var files = $("#upload_files")[0].files; 7: for (var i = 0; i < files.length; i++) { 8: var file = files[i]; 9: var fileSize = file.size; 10: var fileName = file.name; 11: // calculate the start and end byte index for each blocks(chunks) 12: // with the index, file name and index list for future using 13: ... ... 14:  15: // define the function array and push all chunk upload operation into this array 16: blocks.forEach(function (block) { 17: putBlocks.push(function (callback) { 18: }); 19: }); 20: } 21: }); 22: }); As you can see, I used File.slice method to read each chunks based on the start and end byte index we calculated previously, and constructed a temporary HTML form with the file name, chunk index and chunk data through another new feature in HTML5 named FormData. Then post this form to the backend server through jQuery.ajax. This is the key part of our solution. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: blocks.forEach(function (block) { 15: putBlocks.push(function (callback) { 16: // load blob based on the start and end index for each chunks 17: var blob = file.slice(block.start, block.end); 18: // put the file name, index and blob into a temporary from 19: var fd = new FormData(); 20: fd.append("name", block.name); 21: fd.append("index", block.index); 22: fd.append("file", blob); 23: // post the form to backend service (asp.net mvc controller action) 24: $.ajax({ 25: url: "/Home/UploadInFormData", 26: data: fd, 27: processData: false, 28: contentType: "multipart/form-data", 29: type: "POST", 30: success: function (result) { 31: if (!result.success) { 32: alert(result.error); 33: } 34: callback(null, block.index); 35: } 36: }); 37: }); 38: }); 39: } 40: }); Then we will invoke these functions one by one by using the async.js. And once all functions had been executed successfully I invoked another ajax call to the backend service to commit all these chunks (blocks) as the blob in Windows Azure Storage. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.series(putBlocks, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: }); That’s all in the client side. The outline of our logic would be - Calculate the start and end byte index for each chunks based on the block size. - Defined the functions of reading the chunk form file and upload the content to the backend service through ajax. - Execute the functions defined in previous step with “async.js”. - Commit the chunks by invoking the backend service in Windows Azure Storage finally.   Save Chunks as Blocks into Blob Storage In above we finished the client size JavaScript code. It uploaded the file in chunks to the backend service which we are going to implement in this step. We will use ASP.NET MVC as our backend service, and it will receive the chunks, upload into Windows Azure Bob Storage in blocks, then finally commit as one blob. As in the client side we uploaded chunks by invoking the ajax call to the URL "/Home/UploadInFormData", I created a new action under the Index controller and it only accepts HTTP POST request. 1: [HttpPost] 2: public JsonResult UploadInFormData() 3: { 4: var error = string.Empty; 5: try 6: { 7: } 8: catch (Exception e) 9: { 10: error = e.ToString(); 11: } 12:  13: return new JsonResult() 14: { 15: Data = new 16: { 17: success = string.IsNullOrWhiteSpace(error), 18: error = error 19: } 20: }; 21: } Then I retrieved the file name, index and the chunk content from the Request.Form object, which was passed from our client side. And then, used the Windows Azure SDK to create a blob container (in this case we will use the container named “test”.) and create a blob reference with the blob name (same as the file name). Then uploaded the chunk as a block of this blob with the index, since in Blob Storage each block must have an index (ID) associated with so that finally we can put all blocks as one blob by specifying their block ID list. 1: [HttpPost] 2: public JsonResult UploadInFormData() 3: { 4: var error = string.Empty; 5: try 6: { 7: var name = Request.Form["name"]; 8: var index = int.Parse(Request.Form["index"]); 9: var file = Request.Files[0]; 10: var id = Convert.ToBase64String(BitConverter.GetBytes(index)); 11:  12: var container = _client.GetContainerReference("test"); 13: container.CreateIfNotExists(); 14: var blob = container.GetBlockBlobReference(name); 15: blob.PutBlock(id, file.InputStream, null); 16: } 17: catch (Exception e) 18: { 19: error = e.ToString(); 20: } 21:  22: return new JsonResult() 23: { 24: Data = new 25: { 26: success = string.IsNullOrWhiteSpace(error), 27: error = error 28: } 29: }; 30: } Next, I created another action to commit the blocks into blob once all chunks had been uploaded. Similarly, I retrieved the blob name from the Request.Form. I also retrieved the chunks ID list, which is the block ID list from the Request.Form in a string format, split them as a list, then invoked the BlockBlob.PutBlockList method. After that our blob will be shown in the container and ready to be download. 1: [HttpPost] 2: public JsonResult Commit() 3: { 4: var error = string.Empty; 5: try 6: { 7: var name = Request.Form["name"]; 8: var list = Request.Form["list"]; 9: var ids = list 10: .Split(',') 11: .Where(id => !string.IsNullOrWhiteSpace(id)) 12: .Select(id => Convert.ToBase64String(BitConverter.GetBytes(int.Parse(id)))) 13: .ToArray(); 14:  15: var container = _client.GetContainerReference("test"); 16: container.CreateIfNotExists(); 17: var blob = container.GetBlockBlobReference(name); 18: blob.PutBlockList(ids); 19: } 20: catch (Exception e) 21: { 22: error = e.ToString(); 23: } 24:  25: return new JsonResult() 26: { 27: Data = new 28: { 29: success = string.IsNullOrWhiteSpace(error), 30: error = error 31: } 32: }; 33: } Now we finished all code we need. The whole process of uploading would be like this below. Below is the full client side JavaScript code. 1: <script type="text/javascript" src="~/Scripts/async.js"></script> 2: <script type="text/javascript"> 3: $(function () { 4: $("#upload_button_blob").click(function () { 5: // assert the browser support html5 6: if (window.File && window.Blob && window.FormData) { 7: alert("Your brwoser is awesome, let's rock!"); 8: } 9: else { 10: alert("Oh man plz update to a modern browser before try is cool stuff out."); 11: return; 12: } 13:  14: // start to upload each files in chunks 15: var files = $("#upload_files")[0].files; 16: for (var i = 0; i < files.length; i++) { 17: var file = files[i]; 18: var fileSize = file.size; 19: var fileName = file.name; 20:  21: // calculate the start and end byte index for each blocks(chunks) 22: // with the index, file name and index list for future using 23: var blockSizeInKB = $("#block_size").val(); 24: var blockSize = blockSizeInKB * 1024; 25: var blocks = []; 26: var offset = 0; 27: var index = 0; 28: var list = ""; 29: while (offset < fileSize) { 30: var start = offset; 31: var end = Math.min(offset + blockSize, fileSize); 32:  33: blocks.push({ 34: name: fileName, 35: index: index, 36: start: start, 37: end: end 38: }); 39: list += index + ","; 40:  41: offset = end; 42: index++; 43: } 44:  45: // define the function array and push all chunk upload operation into this array 46: var putBlocks = []; 47: blocks.forEach(function (block) { 48: putBlocks.push(function (callback) { 49: // load blob based on the start and end index for each chunks 50: var blob = file.slice(block.start, block.end); 51: // put the file name, index and blob into a temporary from 52: var fd = new FormData(); 53: fd.append("name", block.name); 54: fd.append("index", block.index); 55: fd.append("file", blob); 56: // post the form to backend service (asp.net mvc controller action) 57: $.ajax({ 58: url: "/Home/UploadInFormData", 59: data: fd, 60: processData: false, 61: contentType: "multipart/form-data", 62: type: "POST", 63: success: function (result) { 64: if (!result.success) { 65: alert(result.error); 66: } 67: callback(null, block.index); 68: } 69: }); 70: }); 71: }); 72:  73: // invoke the functions one by one 74: // then invoke the commit ajax call to put blocks into blob in azure storage 75: async.series(putBlocks, function (error, result) { 76: var data = { 77: name: fileName, 78: list: list 79: }; 80: $.post("/Home/Commit", data, function (result) { 81: if (!result.success) { 82: alert(result.error); 83: } 84: else { 85: alert("done!"); 86: } 87: }); 88: }); 89: } 90: }); 91: }); 92: </script> And below is the full ASP.NET MVC controller code. 1: public class HomeController : Controller 2: { 3: private CloudStorageAccount _account; 4: private CloudBlobClient _client; 5:  6: public HomeController() 7: : base() 8: { 9: _account = CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting("DataConnectionString")); 10: _client = _account.CreateCloudBlobClient(); 11: } 12:  13: public ActionResult Index() 14: { 15: ViewBag.Message = "Modify this template to jump-start your ASP.NET MVC application."; 16:  17: return View(); 18: } 19:  20: [HttpPost] 21: public JsonResult UploadInFormData() 22: { 23: var error = string.Empty; 24: try 25: { 26: var name = Request.Form["name"]; 27: var index = int.Parse(Request.Form["index"]); 28: var file = Request.Files[0]; 29: var id = Convert.ToBase64String(BitConverter.GetBytes(index)); 30:  31: var container = _client.GetContainerReference("test"); 32: container.CreateIfNotExists(); 33: var blob = container.GetBlockBlobReference(name); 34: blob.PutBlock(id, file.InputStream, null); 35: } 36: catch (Exception e) 37: { 38: error = e.ToString(); 39: } 40:  41: return new JsonResult() 42: { 43: Data = new 44: { 45: success = string.IsNullOrWhiteSpace(error), 46: error = error 47: } 48: }; 49: } 50:  51: [HttpPost] 52: public JsonResult Commit() 53: { 54: var error = string.Empty; 55: try 56: { 57: var name = Request.Form["name"]; 58: var list = Request.Form["list"]; 59: var ids = list 60: .Split(',') 61: .Where(id => !string.IsNullOrWhiteSpace(id)) 62: .Select(id => Convert.ToBase64String(BitConverter.GetBytes(int.Parse(id)))) 63: .ToArray(); 64:  65: var container = _client.GetContainerReference("test"); 66: container.CreateIfNotExists(); 67: var blob = container.GetBlockBlobReference(name); 68: blob.PutBlockList(ids); 69: } 70: catch (Exception e) 71: { 72: error = e.ToString(); 73: } 74:  75: return new JsonResult() 76: { 77: Data = new 78: { 79: success = string.IsNullOrWhiteSpace(error), 80: error = error 81: } 82: }; 83: } 84: } And if we selected a file from the browser we will see our application will upload chunks in the size we specified to the server through ajax call in background, and then commit all chunks in one blob. Then we can find the blob in our Windows Azure Blob Storage.   Optimized by Parallel Upload In previous example we just uploaded our file in chunks. This solved the problem that ASP.NET MVC request content size limitation as well as the Windows Azure load balancer timeout. But it might introduce the performance problem since we uploaded chunks in sequence. In order to improve the upload performance we could modify our client side code a bit to make the upload operation invoked in parallel. The good news is that, “async.js” library provides the parallel execution function. If you remembered the code we invoke the service to upload chunks, it utilized “async.series” which means all functions will be executed in sequence. Now we will change this code to “async.parallel”. This will invoke all functions in parallel. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.parallel(putBlocks, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: }); In this way all chunks will be uploaded to the server side at the same time to maximize the bandwidth usage. This should work if the file was not very large and the chunk size was not very small. But for large file this might introduce another problem that too many ajax calls are sent to the server at the same time. So the best solution should be, upload the chunks in parallel with maximum concurrency limitation. The code below specified the concurrency limitation to 4, which means at the most only 4 ajax calls could be invoked at the same time. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.parallelLimit(putBlocks, 4, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: });   Summary In this post we discussed how to upload files in chunks to the backend service and then upload them into Windows Azure Blob Storage in blocks. We focused on the frontend side and leverage three new feature introduced in HTML 5 which are - File.slice: Read part of the file by specifying the start and end byte index. - Blob: File-like interface which contains the part of the file content. - FormData: Temporary form element that we can pass the chunk alone with some metadata to the backend service. Then we discussed the performance consideration of chunk uploading. Sequence upload cannot provide maximized upload speed, but the unlimited parallel upload might crash the browser and server if too many chunks. So we finally came up with the solution to upload chunks in parallel with the concurrency limitation. We also demonstrated how to utilize “async.js” JavaScript library to help us control the asynchronize call and the parallel limitation.   Regarding the chunk size and the parallel limitation value there is no “best” value. You need to test vary composition and find out the best one for your particular scenario. It depends on the local bandwidth, client machine cores and the server side (Windows Azure Cloud Service Virtual Machine) cores, memory and bandwidth. Below is one of my performance test result. The client machine was Windows 8 IE 10 with 4 cores. I was using Microsoft Cooperation Network. The web site was hosted on Windows Azure China North data center (in Beijing) with one small web role (1.7GB 1 core CPU, 1.75GB memory with 100Mbps bandwidth). The test cases were - Chunk size: 512KB, 1MB, 2MB, 4MB. - Upload Mode: Sequence, parallel (unlimited), parallel with limit (4 threads, 8 threads). - Chunk Format: base64 string, binaries. - Target file: 100MB. - Each case was tested 3 times. Below is the test result chart. Some thoughts, but not guidance or best practice: - Parallel gets better performance than series. - No significant performance improvement between parallel 4 threads and 8 threads. - Transform with binaries provides better performance than base64. - In all cases, chunk size in 1MB - 2MB gets better performance.   Hope this helps, Shaun All documents and related graphics, codes are provided "AS IS" without warranty of any kind. Copyright © Shaun Ziyan Xu. This work is licensed under the Creative Commons License.

    Read the article

  • AngularJS on top of ASP.NET: Moving the MVC framework out to the browser

    - by Varun Chatterji
    Heavily drawing inspiration from Ruby on Rails, MVC4’s convention over configuration model of development soon became the Holy Grail of .NET web development. The MVC model brought with it the goodness of proper separation of concerns between business logic, data, and the presentation logic. However, the MVC paradigm, was still one in which server side .NET code could be mixed with presentation code. The Razor templating engine, though cleaner than its predecessors, still encouraged and allowed you to mix .NET server side code with presentation logic. Thus, for example, if the developer required a certain <div> tag to be shown if a particular variable ShowDiv was true in the View’s model, the code could look like the following: Fig 1: To show a div or not. Server side .NET code is used in the View Mixing .NET code with HTML in views can soon get very messy. Wouldn’t it be nice if the presentation layer (HTML) could be pure HTML? Also, in the ASP.NET MVC model, some of the business logic invariably resides in the controller. It is tempting to use an anti­pattern like the one shown above to control whether a div should be shown or not. However, best practice would indicate that the Controller should not be aware of the div. The ShowDiv variable in the model should not exist. A controller should ideally, only be used to do the plumbing of getting the data populated in the model and nothing else. The view (ideally pure HTML) should render the presentation layer based on the model. In this article we will see how Angular JS, a new JavaScript framework by Google can be used effectively to build web applications where: 1. Views are pure HTML 2. Controllers (in the server sense) are pure REST based API calls 3. The presentation layer is loaded as needed from partial HTML only files. What is MVVM? MVVM short for Model View View Model is a new paradigm in web development. In this paradigm, the Model and View stuff exists on the client side through javascript instead of being processed on the server through postbacks. These frameworks are JavaScript frameworks that facilitate the clear separation of the “frontend” or the data rendering logic from the “backend” which is typically just a REST based API that loads and processes data through a resource model. The frameworks are called MVVM as a change to the Model (through javascript) gets reflected in the view immediately i.e. Model > View. Also, a change on the view (through manual input) gets reflected in the model immediately i.e. View > Model. The following figure shows this conceptually (comments are shown in red): Fig 2: Demonstration of MVVM in action In Fig 2, two text boxes are bound to the same variable model.myInt. Thus, changing the view manually (changing one text box through keyboard input) also changes the other textbox in real time demonstrating V > M property of a MVVM framework. Furthermore, clicking the button adds 1 to the value of model.myInt thus changing the model through JavaScript. This immediately updates the view (the value in the two textboxes) thus demonstrating the M > V property of a MVVM framework. Thus we see that the model in a MVVM JavaScript framework can be regarded as “the single source of truth“. This is an important concept. Angular is one such MVVM framework. We shall use it to build a simple app that sends SMS messages to a particular number. Application, Routes, Views, Controllers, Scope and Models Angular can be used in many ways to construct web applications. For this article, we shall only focus on building Single Page Applications (SPAs). Many of the approaches we will follow in this article have alternatives. It is beyond the scope of this article to explain every nuance in detail but we shall try to touch upon the basic concepts and end up with a working application that can be used to send SMS messages using Sent.ly Plus (a service that is itself built using Angular). Before you read on, we would like to urge you to forget what you know about Models, Views, Controllers and Routes in the ASP.NET MVC4 framework. All these words have different meanings in the Angular world. Whenever these words are used in this article, they will refer to Angular concepts and not ASP.NET MVC4 concepts. The following figure shows the skeleton of the root page of an SPA: Fig 3: The skeleton of a SPA The skeleton of the application is based on the Bootstrap starter template which can be found at: http://getbootstrap.com/examples/starter­template/ Apart from loading the Angular, jQuery and Bootstrap JavaScript libraries, it also loads our custom scripts /app/js/controllers.js /app/js/app.js These scripts define the routes, views and controllers which we shall come to in a moment. Application Notice that the body tag (Fig. 3) has an extra attribute: ng­app=”smsApp” Providing this tag “bootstraps” our single page application. It tells Angular to load a “module” called smsApp. This “module” is defined /app/js/app.js angular.module('smsApp', ['smsApp.controllers', function () {}]) Fig 4: The definition of our application module The line shows above, declares a module called smsApp. It also declares that this module “depends” on another module called “smsApp.controllers”. The smsApp.controllers module will contain all the controllers for our SPA. Routing and Views Notice that in the Navbar (in Fig 3) we have included two hyperlinks to: “#/app” “#/help” This is how Angular handles routing. Since the URLs start with “#”, they are actually just bookmarks (and not server side resources). However, our route definition (in /app/js/app.js) gives these URLs a special meaning within the Angular framework. angular.module('smsApp', ['smsApp.controllers', function () { }]) //Configure the routes .config(['$routeProvider', function ($routeProvider) { $routeProvider.when('/binding', { templateUrl: '/app/partials/bindingexample.html', controller: 'BindingController' }); }]); Fig 5: The definition of a route with an associated partial view and controller As we can see from the previous code sample, we are using the $routeProvider object in the configuration of our smsApp module. Notice how the code “asks for” the $routeProvider object by specifying it as a dependency in the [] braces and then defining a function that accepts it as a parameter. This is known as dependency injection. Please refer to the following link if you want to delve into this topic: http://docs.angularjs.org/guide/di What the above code snippet is doing is that it is telling Angular that when the URL is “#/binding”, then it should load the HTML snippet (“partial view”) found at /app/partials/bindingexample.html. Also, for this URL, Angular should load the controller called “BindingController”. We have also marked the div with the class “container” (in Fig 3) with the ng­view attribute. This attribute tells Angular that views (partial HTML pages) defined in the routes will be loaded within this div. You can see that the Angular JavaScript framework, unlike many other frameworks, works purely by extending HTML tags and attributes. It also allows you to extend HTML with your own tags and attributes (through directives) if you so desire, you can find out more about directives at the following URL: http://www.codeproject.com/Articles/607873/Extending­HTML­with­AngularJS­Directives Controllers and Models We have seen how we define what views and controllers should be loaded for a particular route. Let us now consider how controllers are defined. Our controllers are defined in the file /app/js/controllers.js. The following snippet shows the definition of the “BindingController” which is loaded when we hit the URL http://localhost:port/index.html#/binding (as we have defined in the route earlier as shown in Fig 5). Remember that we had defined that our application module “smsApp” depends on the “smsApp.controllers” module (see Fig 4). The code snippet below shows how the “BindingController” defined in the route shown in Fig 5 is defined in the module smsApp.controllers: angular.module('smsApp.controllers', [function () { }]) .controller('BindingController', ['$scope', function ($scope) { $scope.model = {}; $scope.model.myInt = 6; $scope.addOne = function () { $scope.model.myInt++; } }]); Fig 6: The definition of a controller in the “smsApp.controllers” module. The pieces are falling in place! Remember Fig.2? That was the code of a partial view that was loaded within the container div of the skeleton SPA shown in Fig 3. The route definition shown in Fig 5 also defined that the controller called “BindingController” (shown in Fig 6.) was loaded when we loaded the URL: http://localhost:22544/index.html#/binding The button in Fig 2 was marked with the attribute ng­click=”addOne()” which added 1 to the value of model.myInt. In Fig 6, we can see that this function is actually defined in the “BindingController”. Scope We can see from Fig 6, that in the definition of “BindingController”, we defined a dependency on $scope and then, as usual, defined a function which “asks for” $scope as per the dependency injection pattern. So what is $scope? Any guesses? As you might have guessed a scope is a particular “address space” where variables and functions may be defined. This has a similar meaning to scope in a programming language like C#. Model: The Scope is not the Model It is tempting to assign variables in the scope directly. For example, we could have defined myInt as $scope.myInt = 6 in Fig 6 instead of $scope.model.myInt = 6. The reason why this is a bad idea is that scope in hierarchical in Angular. Thus if we were to define a controller which was defined within the another controller (nested controllers), then the inner controller would inherit the scope of the parent controller. This inheritance would follow JavaScript prototypal inheritance. Let’s say the parent controller defined a variable through $scope.myInt = 6. The child controller would inherit the scope through java prototypical inheritance. This basically means that the child scope has a variable myInt that points to the parent scopes myInt variable. Now if we assigned the value of myInt in the parent, the child scope would be updated with the same value as the child scope’s myInt variable points to the parent scope’s myInt variable. However, if we were to assign the value of the myInt variable in the child scope, then the link of that variable to the parent scope would be broken as the variable myInt in the child scope now points to the value 6 and not to the parent scope’s myInt variable. But, if we defined a variable model in the parent scope, then the child scope will also have a variable model that points to the model variable in the parent scope. Updating the value of $scope.model.myInt in the parent scope would change the model variable in the child scope too as the variable is pointed to the model variable in the parent scope. Now changing the value of $scope.model.myInt in the child scope would ALSO change the value in the parent scope. This is because the model reference in the child scope is pointed to the scope variable in the parent. We did no new assignment to the model variable in the child scope. We only changed an attribute of the model variable. Since the model variable (in the child scope) points to the model variable in the parent scope, we have successfully changed the value of myInt in the parent scope. Thus the value of $scope.model.myInt in the parent scope becomes the “single source of truth“. This is a tricky concept, thus it is considered good practice to NOT use scope inheritance. More info on prototypal inheritance in Angular can be found in the “JavaScript Prototypal Inheritance” section at the following URL: https://github.com/angular/angular.js/wiki/Understanding­Scopes. Building It: An Angular JS application using a .NET Web API Backend Now that we have a perspective on the basic components of an MVVM application built using Angular, let’s build something useful. We will build an application that can be used to send out SMS messages to a given phone number. The following diagram describes the architecture of the application we are going to build: Fig 7: Broad application architecture We are going to add an HTML Partial to our project. This partial will contain the form fields that will accept the phone number and message that needs to be sent as an SMS. It will also display all the messages that have previously been sent. All the executable code that is run on the occurrence of events (button clicks etc.) in the view resides in the controller. The controller interacts with the ASP.NET WebAPI to get a history of SMS messages, add a message etc. through a REST based API. For the purposes of simplicity, we will use an in memory data structure for the purposes of creating this application. Thus, the tasks ahead of us are: Creating the REST WebApi with GET, PUT, POST, DELETE methods. Creating the SmsView.html partial Creating the SmsController controller with methods that are called from the SmsView.html partial Add a new route that loads the controller and the partial. 1. Creating the REST WebAPI This is a simple task that should be quite straightforward to any .NET developer. The following listing shows our ApiController: public class SmsMessage { public string to { get; set; } public string message { get; set; } } public class SmsResource : SmsMessage { public int smsId { get; set; } } public class SmsResourceController : ApiController { public static Dictionary<int, SmsResource> messages = new Dictionary<int, SmsResource>(); public static int currentId = 0; // GET api/<controller> public List<SmsResource> Get() { List<SmsResource> result = new List<SmsResource>(); foreach (int key in messages.Keys) { result.Add(messages[key]); } return result; } // GET api/<controller>/5 public SmsResource Get(int id) { if (messages.ContainsKey(id)) return messages[id]; return null; } // POST api/<controller> public List<SmsResource> Post([FromBody] SmsMessage value) { //Synchronize on messages so we don't have id collisions lock (messages) { SmsResource res = (SmsResource) value; res.smsId = currentId++; messages.Add(res.smsId, res); //SentlyPlusSmsSender.SendMessage(value.to, value.message); return Get(); } } // PUT api/<controller>/5 public List<SmsResource> Put(int id, [FromBody] SmsMessage value) { //Synchronize on messages so we don't have id collisions lock (messages) { if (messages.ContainsKey(id)) { //Update the message messages[id].message = value.message; messages[id].to = value.message; } return Get(); } } // DELETE api/<controller>/5 public List<SmsResource> Delete(int id) { if (messages.ContainsKey(id)) { messages.Remove(id); } return Get(); } } Once this class is defined, we should be able to access the WebAPI by a simple GET request using the browser: http://localhost:port/api/SmsResource Notice the commented line: //SentlyPlusSmsSender.SendMessage The SentlyPlusSmsSender class is defined in the attached solution. We have shown this line as commented as we want to explain the core Angular concepts. If you load the attached solution, this line is uncommented in the source and an actual SMS will be sent! By default, the API returns XML. For consumption of the API in Angular, we would like it to return JSON. To change the default to JSON, we make the following change to WebApiConfig.cs file located in the App_Start folder. public static class WebApiConfig { public static void Register(HttpConfiguration config) { config.Routes.MapHttpRoute( name: "DefaultApi", routeTemplate: "api/{controller}/{id}", defaults: new { id = RouteParameter.Optional } ); var appXmlType = config.Formatters.XmlFormatter. SupportedMediaTypes. FirstOrDefault( t => t.MediaType == "application/xml"); config.Formatters.XmlFormatter.SupportedMediaTypes.Remove(appXmlType); } } We now have our backend REST Api which we can consume from Angular! 2. Creating the SmsView.html partial This simple partial will define two fields: the destination phone number (international format starting with a +) and the message. These fields will be bound to model.phoneNumber and model.message. We will also add a button that we shall hook up to sendMessage() in the controller. A list of all previously sent messages (bound to model.allMessages) will also be displayed below the form input. The following code shows the code for the partial: <!--­­ If model.errorMessage is defined, then render the error div -­­> <div class="alert alert-­danger alert-­dismissable" style="margin­-top: 30px;" ng­-show="model.errorMessage != undefined"> <button type="button" class="close" data­dismiss="alert" aria­hidden="true">&times;</button> <strong>Error!</strong> <br /> {{ model.errorMessage }} </div> <!--­­ The input fields bound to the model --­­> <div class="well" style="margin-­top: 30px;"> <table style="width: 100%;"> <tr> <td style="width: 45%; text-­align: center;"> <input type="text" placeholder="Phone number (eg; +44 7778 609466)" ng­-model="model.phoneNumber" class="form-­control" style="width: 90%" onkeypress="return checkPhoneInput();" /> </td> <td style="width: 45%; text-­align: center;"> <input type="text" placeholder="Message" ng­-model="model.message" class="form-­control" style="width: 90%" /> </td> <td style="text-­align: center;"> <button class="btn btn-­danger" ng-­click="sendMessage();" ng-­disabled="model.isAjaxInProgress" style="margin­right: 5px;">Send</button> <img src="/Content/ajax-­loader.gif" ng­-show="model.isAjaxInProgress" /> </td> </tr> </table> </div> <!--­­ The past messages ­­--> <div style="margin-­top: 30px;"> <!­­-- The following div is shown if there are no past messages --­­> <div ng­-show="model.allMessages.length == 0"> No messages have been sent yet! </div> <!--­­ The following div is shown if there are some past messages --­­> <div ng-­show="model.allMessages.length == 0"> <table style="width: 100%;" class="table table-­striped"> <tr> <td>Phone Number</td> <td>Message</td> <td></td> </tr> <!--­­ The ng-­repeat directive is line the repeater control in .NET, but as you can see this partial is pure HTML which is much cleaner --> <tr ng-­repeat="message in model.allMessages"> <td>{{ message.to }}</td> <td>{{ message.message }}</td> <td> <button class="btn btn-­danger" ng-­click="delete(message.smsId);" ng­-disabled="model.isAjaxInProgress">Delete</button> </td> </tr> </table> </div> </div> The above code is commented and should be self explanatory. Conditional rendering is achieved through using the ng-­show=”condition” attribute on various div tags. Input fields are bound to the model and the send button is bound to the sendMessage() function in the controller as through the ng­click=”sendMessage()” attribute defined on the button tag. While AJAX calls are taking place, the controller sets model.isAjaxInProgress to true. Based on this variable, buttons are disabled through the ng-­disabled directive which is added as an attribute to the buttons. The ng-­repeat directive added as an attribute to the tr tag causes the table row to be rendered multiple times much like an ASP.NET repeater. 3. Creating the SmsController controller The penultimate piece of our application is the controller which responds to events from our view and interacts with our MVC4 REST WebAPI. The following listing shows the code we need to add to /app/js/controllers.js. Note that controller definitions can be chained. Also note that this controller “asks for” the $http service. The $http service is a simple way in Angular to do AJAX. So far we have only encountered modules, controllers, views and directives in Angular. The $http is new entity in Angular called a service. More information on Angular services can be found at the following URL: http://docs.angularjs.org/guide/dev_guide.services.understanding_services. .controller('SmsController', ['$scope', '$http', function ($scope, $http) { //We define the model $scope.model = {}; //We define the allMessages array in the model //that will contain all the messages sent so far $scope.model.allMessages = []; //The error if any $scope.model.errorMessage = undefined; //We initially load data so set the isAjaxInProgress = true; $scope.model.isAjaxInProgress = true; //Load all the messages $http({ url: '/api/smsresource', method: "GET" }). success(function (data, status, headers, config) { this callback will be called asynchronously //when the response is available $scope.model.allMessages = data; //We are done with AJAX loading $scope.model.isAjaxInProgress = false; }). error(function (data, status, headers, config) { //called asynchronously if an error occurs //or server returns response with an error status. $scope.model.errorMessage = "Error occurred status:" + status; //We are done with AJAX loading $scope.model.isAjaxInProgress = false; }); $scope.delete = function (id) { //We are making an ajax call so we set this to true $scope.model.isAjaxInProgress = true; $http({ url: '/api/smsresource/' + id, method: "DELETE" }). success(function (data, status, headers, config) { // this callback will be called asynchronously // when the response is available $scope.model.allMessages = data; //We are done with AJAX loading $scope.model.isAjaxInProgress = false; }); error(function (data, status, headers, config) { // called asynchronously if an error occurs // or server returns response with an error status. $scope.model.errorMessage = "Error occurred status:" + status; //We are done with AJAX loading $scope.model.isAjaxInProgress = false; }); } $scope.sendMessage = function () { $scope.model.errorMessage = undefined; var message = ''; if($scope.model.message != undefined) message = $scope.model.message.trim(); if ($scope.model.phoneNumber == undefined || $scope.model.phoneNumber == '' || $scope.model.phoneNumber.length < 10 || $scope.model.phoneNumber[0] != '+') { $scope.model.errorMessage = "You must enter a valid phone number in international format. Eg: +44 7778 609466"; return; } if (message.length == 0) { $scope.model.errorMessage = "You must specify a message!"; return; } //We are making an ajax call so we set this to true $scope.model.isAjaxInProgress = true; $http({ url: '/api/smsresource', method: "POST", data: { to: $scope.model.phoneNumber, message: $scope.model.message } }). success(function (data, status, headers, config) { // this callback will be called asynchronously // when the response is available $scope.model.allMessages = data; //We are done with AJAX loading $scope.model.isAjaxInProgress = false; }). error(function (data, status, headers, config) { // called asynchronously if an error occurs // or server returns response with an error status. $scope.model.errorMessage = "Error occurred status:" + status // We are done with AJAX loading $scope.model.isAjaxInProgress = false; }); } }]); We can see from the previous listing how the functions that are called from the view are defined in the controller. It should also be evident how easy it is to make AJAX calls to consume our MVC4 REST WebAPI. Now we are left with the final piece. We need to define a route that associates a particular path with the view we have defined and the controller we have defined. 4. Add a new route that loads the controller and the partial This is the easiest part of the puzzle. We simply define another route in the /app/js/app.js file: $routeProvider.when('/sms', { templateUrl: '/app/partials/smsview.html', controller: 'SmsController' }); Conclusion In this article we have seen how much of the server side functionality in the MVC4 framework can be moved to the browser thus delivering a snappy and fast user interface. We have seen how we can build client side HTML only views that avoid the messy syntax offered by server side Razor views. We have built a functioning app from the ground up. The significant advantage of this approach to building web apps is that the front end can be completely platform independent. Even though we used ASP.NET to create our REST API, we could just easily have used any other language such as Node.js, Ruby etc without changing a single line of our front end code. Angular is a rich framework and we have only touched on basic functionality required to create a SPA. For readers who wish to delve further into the Angular framework, we would recommend the following URL as a starting point: http://docs.angularjs.org/misc/started. To get started with the code for this project: Sign up for an account at http://plus.sent.ly (free) Add your phone number Go to the “My Identies Page” Note Down your Sender ID, Consumer Key and Consumer Secret Download the code for this article at: https://docs.google.com/file/d/0BzjEWqSE31yoZjZlV0d0R2Y3eW8/edit?usp=sharing Change the values of Sender Id, Consumer Key and Consumer Secret in the web.config file Run the project through Visual Studio!

    Read the article

  • Icinga error "Icinga Startup Delay does not exist" although it does

    - by aaron
    I just installed icinga to monitor my server following this guide: http://docs.icinga.org/0.8.1/en/wb_quickstart-idoutils.html Everything built and installed correctly, but icinga is reporting a critical error with the reason: "The command defined for service Icinga Startup Delay does not exist" However, I can see that ${ICINGA_BASE}/etc/objects/localhost.cfg contains: define service{ use local-service ; Name of service template to use host_name localhost service_description Icinga Startup Delay check_command check_icinga_startup_delay notifications_enabled 0 } and ${ICINGA_BASE}/etc/objects/commands.cfg contains: define command { command_name check_icinga_startup_delay command_line $USER1$/check_dummy 0 "Icinga started with $$(($EVENTSTARTTIME$-$PROCESSSTARTTIME$)) seconds delay | delay=$$(($EVENTSTARTTIME$-$PROCESSSTARTTIME$))" } both of these files had not been modified since the whole make/install process. I am running on Ubuntu 10.04, most recent build of icinga-core, and apache2 2.2.14 What must I do to tell Icinga that the command exists? Or is the problem that check_dummy does not exist? Where or how would I define that?

    Read the article

  • What is good practice in .NET system architecture design concerning multiple models and aggregates

    - by BuzzBubba
    I'm designing a larger enterprise architecture and I'm in a doubt about how to separate the models and design those. There are several points I'd like suggestions for: - models to define - way to define models Currently my idea is to define: Core (domain) model Repositories to get data to that domain model from a database or other store Business logic model that would contain business logic, validation logic and more specific versions of forms of data retrieval methods View models prepared for specifically formated data output that would be parsed by views of different kind (web, silverlight, etc). For the first model I'm puzzled at what to use and how to define the mode. Should this model entities contain collections and in what form? IList, IEnumerable or IQueryable collections? - I'm thinking of immutable collections which IEnumerable is, but I'd like to avoid huge data collections and to offer my Business logic layer access with LINQ expressions so that query trees get executed at Data level and retrieve only really required data for situations like the one when I'm retrieving a very specific subset of elements amongst thousands or hundreds of thousands. What if I have an item with several thousands of bids? I can't just make an IEnumerable collection of those on the model and then retrieve an item list in some Repository method or even Business model method. Should it be IQueryable so that I actually pass my queries to Repository all the way from the Business logic model layer? Should I just avoid collections in my domain model? Should I void only some collections? Should I separate Domain model and BusinessLogic model or integrate those? Data would be dealt trough repositories which would use Domain model classes. Should repositories be used directly using only classes from domain model like data containers? This is an example of what I had in mind: So, my Domain objects would look like (e.g.) public class Item { public string ItemName { get; set; } public int Price { get; set; } public bool Available { get; set; } private IList<Bid> _bids; public IQueryable<Bid> Bids { get { return _bids.AsQueryable(); } private set { _bids = value; } } public AddNewBid(Bid newBid) { _bids.Add(new Bid {.... } } Where Bid would be defined as a normal class. Repositories would be defined as data retrieval factories and used to get data into another (Business logic) model which would again be used to get data to ViewModels which would then be rendered by different consumers. I would define IQueryable interfaces for all aggregating collections to get flexibility and minimize data retrieved from real data store. Or should I make Domain Model "anemic" with pure data store entities and all collections define for business logic model? One of the most important questions is, where to have IQueryable typed collections? - All the way from Repositories to Business model or not at all and expose only solid IList and IEnumerable from Repositories and deal with more specific queries inside Business model, but have more finer grained methods for data retrieval within Repositories. So, what do you think? Have any suggestions?

    Read the article

  • Need help configurating my Tomcat server without any WAR files

    - by gablin
    I just reinstalled my entire server, and now I can't seem to get my JSP-based website to work on Tomcat anymore. I use the same server.xml file, which worked perfectly before the reinstallation, but no longer. Here's the content of the server.xml file which worked before: <!--APR library loader. Documentation at /docs/apr.html --> <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" /> <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> <Listener className="org.apache.catalina.core.JasperListener" /> <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html --> <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" /> <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" /> <!-- Global JNDI resources Documentation at /docs/jndi-resources-howto.html --> <GlobalNamingResources> <!-- Editable user database that can also be used by UserDatabaseRealm to authenticate users --> <Resource name="UserDatabase" auth="Container" type="org.apache.catalina.UserDatabase" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" pathname="conf/tomcat-users.xml" /> </GlobalNamingResources> <!-- A "Service" is a collection of one or more "Connectors" that share a single "Container" Note: A "Service" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/service.html --> <Service name="Catalina"> <!--The connectors can use a shared executor, you can define one or more named thread pools--> <!-- <Executor name="tomcatThreadPool" namePrefix="catalina-exec-" maxThreads="150" minSpareThreads="4"/> --> <!-- A "Connector" represents an endpoint by which requests are received and responses are returned. Documentation at : Java HTTP Connector: /docs/config/http.html (blocking & non-blocking) Java AJP Connector: /docs/config/ajp.html APR (HTTP/AJP) Connector: /docs/apr.html Define a non-SSL HTTP/1.1 Connector on port 8080 --> <Connector port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> <!-- A "Connector" using the shared thread pool--> <!-- <Connector executor="tomcatThreadPool" port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> --> <!-- Define a SSL HTTP/1.1 Connector on port 8443 This connector uses the JSSE configuration, when using APR, the connector should be using the OpenSSL style configuration described in the APR documentation --> <!-- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" maxThreads="150" scheme="https" secure="true" clientAuth="false" sslProtocol="TLS" /> --> <!-- Define an AJP 1.3 Connector on port 8009 --> <Connector port="8009" protocol="AJP/1.3" redirectPort="8443" /> <!-- An Engine represents the entry point (within Catalina) that processes every request. The Engine implementation for Tomcat stand alone analyzes the HTTP headers included with the request, and passes them on to the appropriate Host (virtual host). Documentation at /docs/config/engine.html --> <!-- You should set jvmRoute to support load-balancing via AJP ie : <Engine name="Standalone" defaultHost="localhost" jvmRoute="jvm1"> --> <Engine name="Catalina" defaultHost="localhost"> <!--For clustering, please take a look at documentation at: /docs/cluster-howto.html (simple how to) /docs/config/cluster.html (reference documentation) --> <!-- <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/> --> <!-- The request dumper valve dumps useful debugging information about the request and response data received and sent by Tomcat. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.RequestDumperValve"/> --> <!-- This Realm uses the UserDatabase configured in the global JNDI resources under the key "UserDatabase". Any edits that are performed against this UserDatabase are immediately available for use by the Realm. --> <Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase"/> <!-- Define the default virtual host Note: XML Schema validation will not work with Xerces 2.2. --> <!-- <Host name="localhost" appBase="webapps" unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false"> --> <!-- SingleSignOn valve, share authentication between web applications Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> --> <!-- Access log processes all example. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/> --> <!-- </Host> --> <Host name="www.rebootradio.nu"> <Alias>rebootradio.nu</Alias> <Context path="" docBase="D:/services/http/rebootradio.nu" debug="1" reloadable="true"/> </Host> </Engine> </Service> </Server> The JSP site doesn't use any WAR files or anything like that; there's just a default.jsp in the specified folder D:/services/http/rebootradio.nu which loads the site. As I said, this configuration worked before, but now with the latest verion of XAMPP and Tomcat it doesn't work anymore. All I get is a 404 message saying The requested resource () is not available.

    Read the article

  • I don't understand how work call_once

    - by SABROG
    Please help me understand how work call_once Here is thread-safe code. I don't understand why this need Thread Local Storage and global_epoch variables. Variable _fast_pthread_once_per_thread_epoch can be changed to constant/enum like {FAST_PTHREAD_ONCE_INIT, BEING_INITIALIZED, FINISH_INITIALIZED}. Why needed count calls in global_epoch? I think this code can be rewriting with logc: if flag FINISH_INITIALIZED do nothing, else go to block with mutexes and this all. #ifndef FAST_PTHREAD_ONCE_H #define FAST_PTHREAD_ONCE_H #include #include typedef sig_atomic_t fast_pthread_once_t; #define FAST_PTHREAD_ONCE_INIT SIG_ATOMIC_MAX extern __thread fast_pthread_once_t _fast_pthread_once_per_thread_epoch; #ifdef __cplusplus extern "C" { #endif extern void fast_pthread_once( pthread_once_t *once, void (*func)(void) ); inline static void fast_pthread_once_inline( fast_pthread_once_t *once, void (*func)(void) ) { fast_pthread_once_t x = *once; /* unprotected access */ if ( x _fast_pthread_once_per_thread_epoch ) { fast_pthread_once( once, func ); } } #ifdef __cplusplus } #endif #endif FAST_PTHREAD_ONCE_H Source fast_pthread_once.c The source is written in C. The lines of the primary function are numbered for reference in the subsequent correctness argument. #include "fast_pthread_once.h" #include static pthread_mutex_t mu = PTHREAD_MUTEX_INITIALIZER; /* protects global_epoch and all fast_pthread_once_t writes */ static pthread_cond_t cv = PTHREAD_COND_INITIALIZER; /* signalled whenever a fast_pthread_once_t is finalized */ #define BEING_INITIALIZED (FAST_PTHREAD_ONCE_INIT - 1) static fast_pthread_once_t global_epoch = 0; /* under mu */ __thread fast_pthread_once_t _fast_pthread_once_per_thread_epoch; static void check( int x ) { if ( x == 0 ) abort(); } void fast_pthread_once( fast_pthread_once_t *once, void (*func)(void) ) { /*01*/ fast_pthread_once_t x = *once; /* unprotected access */ /*02*/ if ( x _fast_pthread_once_per_thread_epoch ) { /*03*/ check( pthread_mutex_lock(µ) == 0 ); /*04*/ if ( *once == FAST_PTHREAD_ONCE_INIT ) { /*05*/ *once = BEING_INITIALIZED; /*06*/ check( pthread_mutex_unlock(µ) == 0 ); /*07*/ (*func)(); /*08*/ check( pthread_mutex_lock(µ) == 0 ); /*09*/ global_epoch++; /*10*/ *once = global_epoch; /*11*/ check( pthread_cond_broadcast(&cv;) == 0 ); /*12*/ } else { /*13*/ while ( *once == BEING_INITIALIZED ) { /*14*/ check( pthread_cond_wait(&cv;, µ) == 0 ); /*15*/ } /*16*/ } /*17*/ _fast_pthread_once_per_thread_epoch = global_epoch; /*18*/ check (pthread_mutex_unlock(µ) == 0); } } This code from BOOST: #ifndef BOOST_THREAD_PTHREAD_ONCE_HPP #define BOOST_THREAD_PTHREAD_ONCE_HPP // once.hpp // // (C) Copyright 2007-8 Anthony Williams // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include #include #include #include "pthread_mutex_scoped_lock.hpp" #include #include #include namespace boost { struct once_flag { boost::uintmax_t epoch; }; namespace detail { BOOST_THREAD_DECL boost::uintmax_t& get_once_per_thread_epoch(); BOOST_THREAD_DECL extern boost::uintmax_t once_global_epoch; BOOST_THREAD_DECL extern pthread_mutex_t once_epoch_mutex; BOOST_THREAD_DECL extern pthread_cond_t once_epoch_cv; } #define BOOST_ONCE_INITIAL_FLAG_VALUE 0 #define BOOST_ONCE_INIT {BOOST_ONCE_INITIAL_FLAG_VALUE} // Based on Mike Burrows fast_pthread_once algorithm as described in // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2444.html template void call_once(once_flag& flag,Function f) { static boost::uintmax_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE; static boost::uintmax_t const being_initialized=uninitialized_flag+1; boost::uintmax_t const epoch=flag.epoch; boost::uintmax_t& this_thread_epoch=detail::get_once_per_thread_epoch(); if(epoch #endif I right understand, boost don't use atomic operation, so code from boost not thread-safe?

    Read the article

  • Need help configurating my Tomcat server

    - by gablin
    I just reinstalled my entire server, and now I can't seem to get my JSP-based website to work on Tomcat anymore. I use the same server.xml file, which worked perfectly before the reinstallation, but no longer. Here's the content of the server.xml file which worked before: <!--APR library loader. Documentation at /docs/apr.html --> <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" /> <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> <Listener className="org.apache.catalina.core.JasperListener" /> <!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html --> <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" /> <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" /> <!-- Global JNDI resources Documentation at /docs/jndi-resources-howto.html --> <GlobalNamingResources> <!-- Editable user database that can also be used by UserDatabaseRealm to authenticate users --> <Resource name="UserDatabase" auth="Container" type="org.apache.catalina.UserDatabase" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" pathname="conf/tomcat-users.xml" /> </GlobalNamingResources> <!-- A "Service" is a collection of one or more "Connectors" that share a single "Container" Note: A "Service" is not itself a "Container", so you may not define subcomponents such as "Valves" at this level. Documentation at /docs/config/service.html --> <Service name="Catalina"> <!--The connectors can use a shared executor, you can define one or more named thread pools--> <!-- <Executor name="tomcatThreadPool" namePrefix="catalina-exec-" maxThreads="150" minSpareThreads="4"/> --> <!-- A "Connector" represents an endpoint by which requests are received and responses are returned. Documentation at : Java HTTP Connector: /docs/config/http.html (blocking & non-blocking) Java AJP Connector: /docs/config/ajp.html APR (HTTP/AJP) Connector: /docs/apr.html Define a non-SSL HTTP/1.1 Connector on port 8080 --> <Connector port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> <!-- A "Connector" using the shared thread pool--> <!-- <Connector executor="tomcatThreadPool" port="8080" protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443" /> --> <!-- Define a SSL HTTP/1.1 Connector on port 8443 This connector uses the JSSE configuration, when using APR, the connector should be using the OpenSSL style configuration described in the APR documentation --> <!-- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" maxThreads="150" scheme="https" secure="true" clientAuth="false" sslProtocol="TLS" /> --> <!-- Define an AJP 1.3 Connector on port 8009 --> <Connector port="8009" protocol="AJP/1.3" redirectPort="8443" /> <!-- An Engine represents the entry point (within Catalina) that processes every request. The Engine implementation for Tomcat stand alone analyzes the HTTP headers included with the request, and passes them on to the appropriate Host (virtual host). Documentation at /docs/config/engine.html --> <!-- You should set jvmRoute to support load-balancing via AJP ie : <Engine name="Standalone" defaultHost="localhost" jvmRoute="jvm1"> --> <Engine name="Catalina" defaultHost="localhost"> <!--For clustering, please take a look at documentation at: /docs/cluster-howto.html (simple how to) /docs/config/cluster.html (reference documentation) --> <!-- <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/> --> <!-- The request dumper valve dumps useful debugging information about the request and response data received and sent by Tomcat. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.RequestDumperValve"/> --> <!-- This Realm uses the UserDatabase configured in the global JNDI resources under the key "UserDatabase". Any edits that are performed against this UserDatabase are immediately available for use by the Realm. --> <Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase"/> <!-- Define the default virtual host Note: XML Schema validation will not work with Xerces 2.2. --> <!-- <Host name="localhost" appBase="webapps" unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false"> --> <!-- SingleSignOn valve, share authentication between web applications Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> --> <!-- Access log processes all example. Documentation at: /docs/config/valve.html --> <!-- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/> --> <!-- </Host> --> <Host name="www.rebootradio.nu"> <Alias>rebootradio.nu</Alias> <Context path="" docBase="D:/services/http/rebootradio.nu" debug="1" reloadable="true"/> </Host> </Engine> </Service> </Server> The JSP site doesn't use any WAR files or anything like that; there's just a default.jsp in the specified folder D:/services/http/rebootradio.nu which loads the site. As I said, this configuration worked before, but now with the latest verion of XAMPP and Tomcat it doesn't work anymore. All I get is a 404 message saying The requested resource () is not available.

    Read the article

  • Facebook invites partialy working..

    - by dugi007
    Hello! im new to facebook api and i have a litle problem.. i created iframe app and im using the folowing code to invite my friends. invitation screen renders and im able to send invitation. when someone accepts invitation it redirects them to the following link: http://www.facebook.com/reqs.php#!//apps.facebook.com/erste_app/ and nothing happens (only the facebook header apears and thats all)...when they manualy repost that link they are redirected to app and everything works... <?php include 'facebook.php'; define( 'FB_API_KEY', 'e23463********9c7ebfd6d34' ); define( 'FB_SECRET', '5f6************7efff5c8cb8' ); define( 'FB_APPID', '312*********23' ); define( 'FB_CANVAS_URL', 'http://apps.facebook.com/erste_app/' ); define( 'FB_APP_HOME_URL', 'http://www.bijelarukavica.com/test/' ); define( 'FB_APP_NAME', 'Zaigraj s Rokom' ); $bOK=SendStandardInvitation("", false); function SendStandardInvitation($to, $bNewStyle = true) { $typeword = FB_APP_NAME; // Warning: double quotes in the content string will screw up the invite signature process $content = '<fb:req-choice url=\' ' . FB_CANVAS_URL . '\' label=\'Check out ' . FB_APP_NAME . ' />'; // if your have post add routines take them to that add app URL instead. $actionText = 'Probaj jesi bolji od mene uz "' . FB_APP_NAME . '".'; $bOK = SendNewRequest($to, $typeword, $content, $actionText); return $bOK; } function SendNewRequest($to, $typeword, $content, $actionText, $bInvitation = true) { $facebook = new Facebook(FB_API_KEY,FB_SECRET); $to = implode(",", $facebook->api_client->friends_get('','')); $bInviteAll = (!$to || $to == "" ? true : false); $excludeFriends = null; if (!$bInviteAll) $excludeFriends = $facebook->api_client->friends_get(); else // Get all friends with the app $excludeFriends = $facebook->api_client->friends_getAppUsers(); $excludeFriendsStr = null; foreach ($excludeFriends as $userid) { $pos = strpos($to, (string)$userid); if ($pos !== false) continue; if ($excludeFriendsStr) $excludeFriendsStr .= ','; $excludeFriendsStr .= $userid; } $params = array(); $params['api_key'] = FB_API_KEY; $params['content'] = $content; // Don't use htmlentities() or urlencode() here $params['type'] = $typeword; $params['action'] = FB_CANVAS_URL ; $params['actiontext'] = $actionText; $params['invite'] = ($bInvitation ? 'true' : 'false'); $params['rows'] = '5'; $params['max'] = '20'; $params['exclude_ids'] = $excludeFriendsStr; $params['sig'] = $facebook->generate_sig($params, FB_SECRET); $qstring = null; foreach ($params as $key => $value) { if ($qstring) $qstring .= '&'; $qstring .= "$key=".urlencode($value); } $inviteUrl = 'http://www.facebook.com/multi_friend_selector.php?'; $facebook->redirect($inviteUrl . $qstring); return true; } $facebook->api_client->notifications_sendRequest function SendRequest($to, $typeword, $content, $bInvitation = true) { $facebook = new Facebook(FB_API_KEY,FB_SECRET); $image = FB_APP_HOME_URL . 'logo.gif'; $result = $facebook->api_client->notifications_sendRequest($to, $typeword, $content, $image, $bInvitation); $url = $result; if (isset($url) && $url) { $facebook->redirect($url . '&canvas=1&next=index.php'); return true; } $bOK = ($result && $result != ""); return $bOK; } SendStandardInvitation($to, $bNewStyle = false) ?>

    Read the article

  • What is a good java data structure for storing nested items (like cities in states)?

    - by anotherAlan
    I'm just getting started in Java and am looking for advice on a good way to store nested sets of data. For example, I'm interested in storing city population data that can be accessed by looking up the city in a given state. (Note: eventually, other data will be stored with each city as well, this is just the first attempt at getting started.) The current approach I'm using is to have a StateList Object which contains a HashMap that stores State Objects via a string key (i.e. HashMap<String, State>). Each State Object contains its own HashMap of City Objects keyed off the city name (i.e. HashMap<String, City>). A cut down version of what I've come up with looks like this: // TestPopulation.java public class TestPopulation { public static void main(String [] args) { // build the stateList Object StateList sl = new StateList(); // get a test state State stateAl = sl.getState("AL"); // make sure it's there. if(stateAl != null) { // add a city stateAl.addCity("Abbeville"); // now grab the city City cityAbbevilleAl = stateAl.getCity("Abbeville"); cityAbbevilleAl.setPopulation(2987); System.out.print("The city has a pop of: "); System.out.println(Integer.toString(cityAbbevilleAl.getPopulation())); } // otherwise, print an error else { System.out.println("That was an invalid state"); } } } // StateList.java import java.util.*; public class StateList { // define hash map to hold the states private HashMap<String, State> theStates = new HashMap<String, State>(); // setup constructor that loads the states public StateList() { String[] stateCodes = {"AL","AK","AZ","AR","CA","CO"}; // etc... for (String s : stateCodes) { State newState = new State(s); theStates.put(s, newState); } } // define method for getting a state public State getState(String stateCode) { if(theStates.containsKey(stateCode)) { return theStates.get(stateCode); } else { return null; } } } // State.java import java.util.*; public class State { // Setup the state code String stateCode; // HashMap for cities HashMap<String, City> cities = new HashMap<String, City>(); // define the constructor public State(String newStateCode) { System.out.println("Creating State: " + newStateCode); stateCode = newStateCode; } // define the method for adding a city public void addCity(String newCityName) { City newCityObj = new City(newCityName); cities.put(newCityName, newCityObj); } // define the method for getting a city public City getCity(String cityName) { if(cities.containsKey(cityName)) { return cities.get(cityName); } else { return null; } } } // City.java public class City { // Define the instance vars String cityName; int cityPop; // setup the constructor public City(String newCityName) { cityName = newCityName; System.out.println("Created City: " + newCityName); } public void setPopulation(int newPop) { cityPop = newPop; } public int getPopulation() { return cityPop; } } This is working for me, but I'm wondering if there are gotchas that I haven't run into, or if there are alternate/better ways to do the same thing. (P.S. I know that I need to add some more error checking in, but right now, I'm focused on trying to figure out a good data structure.) (NOTE: Edited to change setPop() and getPop() to setPopulation() and getPopulation() respectively to avoid confucsion)

    Read the article

  • Testing shared memory ,strange thing happen

    - by barfatchen
    I have 2 program compiled in 4.1.2 running in RedHat 5.5 , It is a simple job to test shared memory , shmem1.c like following : #define STATE_FILE "/program.shared" #define NAMESIZE 1024 #define MAXNAMES 100 typedef struct { char name[MAXNAMES][NAMESIZE]; int heartbeat ; int iFlag ; } SHARED_VAR; int main (void) { int first = 0; int shm_fd; static SHARED_VAR *conf; if((shm_fd = shm_open(STATE_FILE, (O_CREAT | O_EXCL | O_RDWR), (S_IREAD | S_IWRITE))) > 0 ) { first = 1; /* We are the first instance */ } else if((shm_fd = shm_open(STATE_FILE, (O_CREAT | O_RDWR), (S_IREAD | S_IWRITE))) < 0) { printf("Could not create shm object. %s\n", strerror(errno)); return errno; } if((conf = mmap(0, sizeof(SHARED_VAR), (PROT_READ | PROT_WRITE), MAP_SHARED, shm_fd, 0)) == MAP_FAILED) { return errno; } if(first) { for(idx=0;idx< 1000000000;idx++) { conf->heartbeat = conf->heartbeat + 1 ; } } printf("conf->heartbeat=(%d)\n",conf->heartbeat) ; close(shm_fd); shm_unlink(STATE_FILE); exit(0); }//main And shmem2.c like following : #define STATE_FILE "/program.shared" #define NAMESIZE 1024 #define MAXNAMES 100 typedef struct { char name[MAXNAMES][NAMESIZE]; int heartbeat ; int iFlag ; } SHARED_VAR; int main (void) { int first = 0; int shm_fd; static SHARED_VAR *conf; if((shm_fd = shm_open(STATE_FILE, (O_RDWR), (S_IREAD | S_IWRITE))) < 0) { printf("Could not create shm object. %s\n", strerror(errno)); return errno; } ftruncate(shm_fd, sizeof(SHARED_VAR)); if((conf = mmap(0, sizeof(SHARED_VAR), (PROT_READ | PROT_WRITE), MAP_SHARED, shm_fd, 0)) == MAP_FAILED) { return errno; } int idx ; for(idx=0;idx< 1000000000;idx++) { conf->heartbeat = conf->heartbeat + 1 ; } printf("conf->heartbeat=(%d)\n",conf->heartbeat) ; close(shm_fd); exit(0); } After compiled : gcc shmem1.c -lpthread -lrt -o shmem1.exe gcc shmem2.c -lpthread -lrt -o shmem2.exe And Run both program almost at the same time with 2 terminal : [test]$ ./shmem1.exe First creation of the shm. Setting up default values conf->heartbeat=(840825951) [test]$ ./shmem2.exe conf->heartbeat=(1215083817) I feel confused !! since shmem1.c is a loop 1,000,000,000 times , how can it be possible to have a answer like 840,825,951 ? I run shmem1.exe and shmem2.exe this way,most of the results are conf-heartbeat will larger than 1,000,000,000 , but seldom and randomly , I will see result conf-heartbeat will lesser than 1,000,000,000 , either in shmem1.exe or shmem2.exe !! if run shmem1.exe only , it is always print 1,000,000,000 , my question is , what is the reason cause conf-heartbeat=(840825951) in shmem1.exe ? Update: Although not sure , but I think I figure it out what is going on , If shmem1.exe run 10 times for example , then conf-heartbeat = 10 , in this time shmem1.exe take a rest and then back , shmem1.exe read from shared memory and conf-heartbeat = 8 , so shmem1.exe will continue from 8 , why conf-heartbeat = 8 ? I think it is because shmem2.exe update the shared memory data to 8 , shmem1.exe did not write 10 back to shared memory before it took a rest ....that is just my theory... i don't know how to prove it !!

    Read the article

  • Building services with .Net Part 1

    - by Allan Rwakatungu
    On the 26th of May 2010 , I made a presentation to the .NET user group meeting (thanks to Malisa Ncube for organizing this event every month … ). If you missed my presentation , we talked about why we should all be building services … better still using the .NET framework. This blog post is an introduction to services , why you would want to build services and how you can build services using the .NET framework. What is a service? OASIS defines service as "a mechanism to enable access to one or more capabilities, where the access is provided using a prescribed interface and is exercised consistent with constraints and policies as specified by the service description." [1]. If the above definition sounds to academic , you can also define a service as loosely coupled units of functionality that have no calls to each other embedded in the. Instead of services embedding calls to each other in their service code they use defined protocols that describe how services pass and parse messages. This is a good way to think about services if you’re from an objected oriented background. While in object oriented programming functions make calls to each other, in service oriented programming, functions pass messages between each other. Why would you want to use services? 1. If your enterprise architecture looks like this   Services are the building blocks for SOA . With SOA you can move away from the sphaggetti infrastructure that is common in most enterprises. The complexity or lack of visibility of the integration points in your enterprises makes it difficult and costly to implement new initiatives and changes into the business - and even impossible in some cases - as it is not possible to identify the impact a change in one system might have to other systems. With services you can move to an architecture like this Your building blocks from Spaghetti infrastructure to something that is more well-defined and manageable to achieve cost efficiency and not least business agility - enabling you to react to changes in the market with speed and achieve operational efficiency and control are services. 2. If you want to become the Gates or Zuckerburger. Have you heard about Web 2.0 ? Mashups? Software as a service (SAAS) ? Cloud computing ?   They all offer you the opportunity to have scalable but low cost business models and they built using services.  Some of my favorite companies that leverage services for their business models include  https://www.salesforce.com/ (cloud CRM) http://www. twitter.com (more people use twitter clients built by 3rd parties than their official clients) http://www.kayak.com/ (compares data from other travel sites to give information to users in one location) Services with the .NET framework      If you are a .NET developer and you want to develop services, Windows Communication Framework (WCF) is the tool for you. WCF is Microsoft’s unified programming model (service model) for building service oriented applications. ( Before .NET 3.0 you had several models for programming services in .NET including .NET remoting, Web services (ASMX), COM +, Microsoft Messaging queuing (MSMQ) etc, after .NET 3.0 the programming model was unified into one i.e. WCF ). Windows Communication Framework (WCF) provides you 1. An Software Development Kit (SDK) for creating SOA applications 2. A runtime for running services on the Windows platform Why should you use Windows Communication Foundation if you’re programming services?   1. It supports interoperable and open standards e.g. WS* protocols for programming SOAP services 2. It has a unified programming model. Whether you use TCP or Http or Pipes or transmitting using Messaging Queues, programmers need to learn just one way to program. Previously you had .NET remoting, MSMQ, Web services, COM+ and they were all done differently 3. Productive programming model You don’t have to worry about all the plumbing involved to write services. You have a rich declarative programming model to add stuff like logging, transactions, and reliable messages in-built in the Windows Communication Framework. Understanding services in WCF The basic principles of WCF are as easy as ABC A – Address This is where the service is located B- Binding This describes how you communicate with the service e.g. Use TCP, HTTP or both. How to exchange security information with the service etc. C – Contract This defines what the service can do. E.g. Pay water bill, Make a phone call A - Addresses In WCF, an address is a combination of transport, server name, port and path Example addresses may include http://localhost:8001 net.tcp://localhost:8002/MyService net.pipe://localhost/MyPipe net.msmq://localhost/private/MyService net.msmq://localhost/MyService B- Binding   There are numerous ways to communicate with services , different ways that a message can be formatted/sent/secured, that allows you to tailor your service for the compatibility/performance you require for your solution. Transport You can use HTTP TCP MSMQ , Named pipes, Your own custom transport etc Message You  can send a plain text binary, Message Transmission Optimization Mechanism (MTOM) message Communication security No security Transport security Message security Authenticating and authorizing callers etc Behaviour You service can support Transactions Be reliable Use queues Support ajax etc C - Contract You define what your service can do using Service contracts :- Define operations that your service can do, communications and behaviours Data contracts :- Define the messages that are passed from and into your service and how they are formatted Fault contracts :- Defines errors types in your service   As an example, suppose your service service shows money. You define your service contract using a interface [ServiceContract] public interface IShowMeTheMoney {   [OperationContract]    Money Show(); } You define the data contract by annotating a class it with the Data Contract attribute and fields you want to pass in the message as Data Members. (Note:- In the latest versions of WCF you dont have to use attributes if you passing all the objects properties in the message) [DataContract] public Money {   [DataMember]   public string Currency { get; set; }   [DataMember]   public Decimal Amount { get; set; }   public string Comment { get; set; } } Features of Windows Communication Foundation Windows Communication Foundation is not only simple but feature rich , offering you several options to tweak your service to fit your business requirements. Some of the features of WCF include 1. Workflow services You can combine WCF with Windows WorkFlow Foundation (WWF) to write workflow type services 2. Control how your data (messages) are transferred and serialized e.g. you can serialize your business objects as XML or binary 3. control over session management , instance creation and concurrency management without writing code if you like 4. Queues and reliable sessions. You can store messages from the sending client and later forward them to the receiving application. You can also guarantee that messages will arrive at their destincation. 5.Transactions:  You can have different services participate in a transaction operations that can be rolled back if needed 6. Security. WCF has rich features for authorization and authentication  as well as keep audit trails 7. Web programming model. WCF allows developers to expose services as non SOAP endpoints 8. Inbuilt features that you can use to write JSON and services that support AJAX applications And lots more In my next blog I will show you how you can use WCF features to write a real world business service.               Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 ]] /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin-top:0in; mso-para-margin-right:0in; mso-para-margin-bottom:10.0pt; mso-para-margin-left:0in; line-height:115%; mso-pagination:widow-orphan; font-size:11.0pt; font-family:"Calibri","sans-serif"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"Times New Roman"; mso-bidi-theme-font:minor-bidi;}

    Read the article

  • Squeezing hardware

    - by [email protected]
    It's very common that high availability means duplicate hardware so costs grows up.Nowadays, CIOs and DBAs has the main challenge of reduce the money spent increasing the performance and the availability. Since Grid Infrastructure 11gR2, there is a new feature that helps them to afford this challenge: Server PoolsNow, in Grid Infrastructure 11gR2, you can define server pools across the cluster setting up the minimum number of servers, the maximum and how important is the pool.For example:Consider  that "Velasco, Boixeda & co"  has 3 apps in a 6 servers cluster.First One is the main core business appSecond one is Mid RangeAnd third it's a database not very important.We Define the following resource requirements for expected workload:1- Main App 2 servers required2- Mid Range App requires 1 server3- Is not a required app in case of disasterThe we define 3 server pools across the cluster:1- Main pool min two servers, max three servers, importance four2- Mid pool, min one server max two servers, importance two3- test pool,min zero servers, max one server, importance oneSo the initial configuration is:-Main pool has three servers-Mid pool has two servers-Test pool has one serverLogically, we can see the cluster like this:If any server fails, the following algorithm will be applied:1.-The server pool of least importance2.-IF server pools are of the same importance,   THEN then the Server Pool that has more than its defined minimum servers Is chosenHope it helps 

    Read the article

  • Two things I learned this week...

    - by noreply(at)blogger.com (Thomas Kyte)
    I often say "I learn something new about Oracle every day".  It really is true - there is so much to know about it, it is hard to keep up sometimes.Here are the two new things I learned - the first is regarding temporary tablespaces.  In the past - when people have asked "how can I shrink my temporary tablespace" I've said "create a new one that is smaller, alter your database/users to use this new one by default, wait a bit, drop the old one".  Actually I usually said first - "don't, it'll just grow again" but some people really wanted to make it smaller.Now, there is an easier way:http://docs.oracle.com/cd/E11882_01/server.112/e26088/statements_3002.htm#SQLRF53578Using alter tablespace temp shrink space .The second thing is just a little sqlplus quirk that I probably knew at one point but totally forgot.  People run into problems with &'s in sqlplus all of the time as sqlplus tries to substitute in for an &variable.  So, if they try to select '&hello world' from dual - they'll get:ops$tkyte%ORA11GR2> select '&hello world' from dual;Enter value for hello: old   1: select '&hello world' from dualnew   1: select ' world' from dual'WORLD------ worldops$tkyte%ORA11GR2> One solution is to "set define off" to disable the substitution (or set define to some other character).  Another oft quoted solution is to use chr(38) - select chr(38)||'hello world' from dual.  I never liked that one personally.  Today - I was shown another wayhttps://asktom.oracle.com/pls/apex/f?p=100:11:0::::P11_QUESTION_ID:4549764300346084350#4573022300346189787 ops$tkyte%ORA11GR2> select '&' || 'hello world' from dual;'&'||'HELLOW------------&hello worldops$tkyte%ORA11GR2>just concatenate '&' to the string, sqlplus doesn't touch that one!  I like that better than chr(38) (but a little less than set define off....)

    Read the article

  • External table and preprocessor for loading LOBs

    - by David Allan
    I was using the COLUMN TRANSFORMS syntax to load LOBs into Oracle using the Oracle external which is a handy way of doing several stuff - from loading LOBs from the filesystem to having constants as fields. In OWB you can use unbound external tables to define an external table using your own arbitrary access parameters - I blogged a while back on this for doing preprocessing before it was added into OWB 11gR2. For loading LOBs using the COLUMN TRANSFORMS syntax have a read through this post on loading CLOB, BLOB or any LOB, the files to load can be specified as a field that is a filename field, the content of this file will be the LOB data. So using the example from the linked post, you can define the columns; Then define the access parameters - if you go the unbound external table route you can can put whatever you want in here (your external table get out of jail free card); This will let you read the LOB files fromn the filesystem and use the external table in a mapping. Pushing the envelope a little further I then thought about marrying together the preprocessor with the COLUMN TRANSFORMS, this would have let me have a shell script for example as the preprocessor which listed the contents of a directory and let me read the files as LOBs via an external table. Unfortunately that doesn't quote work - there is now a bug/enhancement logged, so one day maybe. So I'm afraid my blog title was a little bit of a teaser....

    Read the article

  • Formal definition for term "pure OO language"?

    - by Yauhen Yakimovich
    I can't think of a better place among SO siblings to pose such a question. Originally I wanted to ask "Is python a pure OO language?" but considering troubles and some sort of discomfort people experience while trying to define the term I decided to start with obtaining a clear definition for the term itself. It would be rather fair to start with correspondence by Dr. Alan Kay, who has coined the term (note the inspiration in biological analogy to cells or other living objects). There are following ways to approach the task: Give a comparative analysis by listing programming languages that exhibits certain properties unique and sufficient to define the term (although Smalltalk and Java are passing examples but IMO this way seems neither really complete or nor fruitful) Give a formal definition (or close to it, e.g. in more academic or mathematical style). Give a philosophical definition that would totally rely on semantical context of concrete language or a priori programming experience (there must be some chance of successful explanation by the community). My current version: "If a certain programing (formal) language that can (grammatically) differentiate between operations and operands as well as infer about the type of each operand whether this type is an object (in sense of OOP) or not then we call such a language an OO-language as long as there is at least one type in this language which is an object. Finally, if all types of the language are also objects we define such language to be pure OO-language." Would appreciate any possible improvement of it. As you can see I just made the definition dependent on the term "object" (often fully referenced as class of objects).

    Read the article

  • Precising definition of programming paradigm

    - by Kazark
    Wikipedia defines programming paradigm thus: a fundamental style of computer programming which is echoed in the descriptive text of the paradigms tag on this site. I find this a disappointing definition. Anyone who knows the words programming and paradigm could do about that well without knowing anything else about it. There are many styles of computer programming at many level of abstraction; within any given programming paradigm, multiple styles are possible. For example, Bob Martin says in Clean Code (13), Consider this book a description of the Object Mentor School of Clean Code. The techniques and teachings within are the way that we practice our art. We are willing to claim that if you follow these teachings, you will enjoy the benefits that we have enjoyed, and you will learn to write code that is clean and professional. But don't make the mistake of thinking that we are somehow "right" in any absolute sense. Thus Bob Martin is not claiming to have the correct style of Object-Oriented programming, even though he, if anyone, might have some claim to doing so. But even within his school of programming, we might have different styles of formatting the code (K&R, etc). There are many styles of programming at many levels. Sp how can we define programming paradigm rigorously, to distinguish it from other categories of programming styles? Fundamental is somewhat helpful, but not specific. How can we define the phrase in a way that will communicate more than the separate meanings of each of the two words—in other words, how can we define it in a way that will provide additional meaning for someone who speaks English but isn't familiar with a variety of paradigms?

    Read the article

  • What is the precise definition of programming paradigm?

    - by Kazark
    Wikipedia defines programming paradigm thus: a fundamental style of computer programming which is echoed in the descriptive text of the paradigms tag on this site. I find this a disappointing definition. Anyone who knows the words programming and paradigm could do about that well without knowing anything else about it. There are many styles of computer programming at many level of abstraction; within any given programming paradigm, multiple styles are possible. For example, Bob Martin says in Clean Code (13), Consider this book a description of the Object Mentor School of Clean Code. The techniques and teachings within are the way that we practice our art. We are willing to claim that if you follow these teachings, you will enjoy the benefits that we have enjoyed, and you will learn to write code that is clean and professional. But don't make the mistake of thinking that we are somehow "right" in any absolute sense. Thus Bob Martin is not claiming to have the correct style of Object-Oriented programming, even though he, if anyone, might have some claim to doing so. But even within his school of programming, we might have different styles of formatting the code (K&R, etc). There are many styles of programming at many levels. So how can we define programming paradigm rigorously, to distinguish it from other categories of programming styles? Fundamental is somewhat helpful, but not specific. How can we define the phrase in a way that will communicate more than the separate meanings of each of the two words—in other words, how can we define it in a way that will provide additional meaning for someone who speaks English but isn't familiar with a variety of paradigms?

    Read the article

< Previous Page | 26 27 28 29 30 31 32 33 34 35 36 37  | Next Page >