Search Results

Search found 17734 results on 710 pages for 'values'.

Page 703/710 | < Previous Page | 699 700 701 702 703 704 705 706 707 708 709 710  | Next Page >

  • datagrid binding

    - by abcdd007
    using System; using System.Data; using System.Configuration; using System.Collections; using System.Web; using System.Web.Security; using System.Web.UI; using System.Web.UI.WebControls; using System.Web.UI.WebControls.WebParts; using System.Web.UI.HtmlControls; using System.Data.SqlClient; public partial class OrderMaster : System.Web.UI.Page { BLLOrderMaster objMaster = new BLLOrderMaster(); protected void Page_Load(object sender, EventArgs e) { if (!Page.IsPostBack) { SetInitialRow(); string OrderNumber = objMaster.SelectDetails().ToString(); if (OrderNumber != "") { txtOrderNo.Text = OrderNumber.ToString(); txtOrderDate.Focus(); } } } private void InsertEmptyRow() { DataTable dt = new DataTable(); DataRow dr = null; dt.Columns.Add(new DataColumn("ItemCode", typeof(string))); dt.Columns.Add(new DataColumn("Description", typeof(string))); dt.Columns.Add(new DataColumn("Unit", typeof(string))); dt.Columns.Add(new DataColumn("Qty", typeof(string))); dt.Columns.Add(new DataColumn("Rate", typeof(string))); dt.Columns.Add(new DataColumn("Disc", typeof(string))); dt.Columns.Add(new DataColumn("Amount", typeof(string))); for (int i = 0; i < 5; i++) { dr = dt.NewRow(); dr["ItemCode"] = string.Empty; dr["Description"] = string.Empty; dr["Unit"] = string.Empty; dr["Qty"] = string.Empty; dr["Rate"] = string.Empty; dr["Disc"] = string.Empty; dr["Amount"] = string.Empty; dt.Rows.Add(dr); } //GridView1.DataSource = dt; //GridView1.DataBind(); } private void SetInitialRow() { DataTable dt = new DataTable(); DataRow dr = null; dt.Columns.Add(new DataColumn("RowNumber", typeof(string))); dt.Columns.Add(new DataColumn("ItemCode", typeof(string))); dt.Columns.Add(new DataColumn("Description", typeof(string))); dt.Columns.Add(new DataColumn("Unit", typeof(string))); dt.Columns.Add(new DataColumn("Qty", typeof(string))); dt.Columns.Add(new DataColumn("Rate", typeof(string))); dt.Columns.Add(new DataColumn("Disc", typeof(string))); dt.Columns.Add(new DataColumn("Amount", typeof(string))); dr = dt.NewRow(); dr["RowNumber"] = 1; dr["ItemCode"] = string.Empty; dr["Description"] = string.Empty; dr["Unit"] = string.Empty; dr["Qty"] = string.Empty; dr["Rate"] = string.Empty; dr["Disc"] = string.Empty; dr["Amount"] = string.Empty; dt.Rows.Add(dr); //Store DataTable ViewState["OrderDetails"] = dt; Gridview1.DataSource = dt; Gridview1.DataBind(); } protected void AddNewRowToGrid() { int rowIndex = 0; if (ViewState["OrderDetails"] != null) { DataTable dtCurrentTable = (DataTable)ViewState["OrderDetails"]; DataRow drCurrentRow = null; if (dtCurrentTable.Rows.Count > 0) { for (int i = 1; i <= dtCurrentTable.Rows.Count; i++) { //extract the TextBox values TextBox box1 = (TextBox)Gridview1.Rows[rowIndex].Cells[1].FindControl("txtItemCode"); TextBox box2 = (TextBox)Gridview1.Rows[rowIndex].Cells[2].FindControl("txtdescription"); TextBox box3 = (TextBox)Gridview1.Rows[rowIndex].Cells[3].FindControl("txtunit"); TextBox box4 = (TextBox)Gridview1.Rows[rowIndex].Cells[4].FindControl("txtqty"); TextBox box5 = (TextBox)Gridview1.Rows[rowIndex].Cells[5].FindControl("txtRate"); TextBox box6 = (TextBox)Gridview1.Rows[rowIndex].Cells[6].FindControl("txtdisc"); TextBox box7 = (TextBox)Gridview1.Rows[rowIndex].Cells[7].FindControl("txtamount"); drCurrentRow = dtCurrentTable.NewRow(); drCurrentRow["RowNumber"] = i + 1; drCurrentRow["ItemCode"] = box1.Text; drCurrentRow["Description"] = box2.Text; drCurrentRow["Unit"] = box3.Text; drCurrentRow["Qty"] = box4.Text; drCurrentRow["Rate"] = box5.Text; drCurrentRow["Disc"] = box6.Text; drCurrentRow["Amount"] = box7.Text; rowIndex++; } //add new row to DataTable dtCurrentTable.Rows.Add(drCurrentRow); //Store the current data to ViewState ViewState["OrderDetails"] = dtCurrentTable; //Rebind the Grid with the current data Gridview1.DataSource = dtCurrentTable; Gridview1.DataBind(); } } else { // } //Set Previous Data on Postbacks SetPreviousData(); } private void SetPreviousData() { int rowIndex = 0; if (ViewState["OrderDetails"] != null) { DataTable dt = (DataTable)ViewState["OrderDetails"]; if (dt.Rows.Count > 0) { for (int i = 1; i < dt.Rows.Count; i++) { TextBox box1 = (TextBox)Gridview1.Rows[rowIndex].Cells[1].FindControl("txtItemCode"); TextBox box2 = (TextBox)Gridview1.Rows[rowIndex].Cells[2].FindControl("txtdescription"); TextBox box3 = (TextBox)Gridview1.Rows[rowIndex].Cells[3].FindControl("txtunit"); TextBox box4 = (TextBox)Gridview1.Rows[rowIndex].Cells[4].FindControl("txtqty"); TextBox box5 = (TextBox)Gridview1.Rows[rowIndex].Cells[5].FindControl("txtRate"); TextBox box6 = (TextBox)Gridview1.Rows[rowIndex].Cells[6].FindControl("txtdisc"); TextBox box7 = (TextBox)Gridview1.Rows[rowIndex].Cells[7].FindControl("txtamount"); box1.Text = dt.Rows[i]["ItemCode"].ToString(); box2.Text = dt.Rows[i]["Description"].ToString(); box3.Text = dt.Rows[i]["Unit"].ToString(); box4.Text = dt.Rows[i]["Qty"].ToString(); box5.Text = dt.Rows[i]["Rate"].ToString(); box6.Text = dt.Rows[i]["Disc"].ToString(); box7.Text = dt.Rows[i]["Amount"].ToString(); rowIndex++; } dt.AcceptChanges(); } ViewState["OrderDetails"] = dt; } } protected void BindOrderDetails() { DataTable dtOrderDetails = new DataTable(); if (ViewState["OrderDetails"] != null) { dtOrderDetails = (DataTable)ViewState["OrderDetails"]; } else { dtOrderDetails.Columns.Add(""); dtOrderDetails.Columns.Add(""); dtOrderDetails.Columns.Add(""); dtOrderDetails.Columns.Add(""); dtOrderDetails.Columns.Add(""); dtOrderDetails.Columns.Add(""); dtOrderDetails.AcceptChanges(); DataRow dr = dtOrderDetails.NewRow(); dtOrderDetails.Rows.Add(dr); ViewState["OrderDetails"] = dtOrderDetails; } if (dtOrderDetails != null) { Gridview1.DataSource = dtOrderDetails; Gridview1.DataBind(); if (Gridview1.Rows.Count > 0) { ((LinkButton)Gridview1.Rows[Gridview1.Rows.Count - 1].FindControl("btnDelete")).Visible = false; } } } protected void btnSave_Click(object sender, EventArgs e) { if (txtOrderDate.Text != "" && txtOrderNo.Text != "" && txtPartyName.Text != "" && txttotalAmount.Text !="") { BLLOrderMaster bllobj = new BLLOrderMaster(); DataTable dtdetails = new DataTable(); UpdateItemDetailRow(); dtdetails = (DataTable)ViewState["OrderDetails"]; SetValues(bllobj); int k = 0; k = bllobj.Insert_Update_Delete(1, bllobj, dtdetails); if (k > 0) { ScriptManager.RegisterStartupScript(this, this.GetType(), "Login Denied", "<Script>alert('Order Code Alraddy Exist');</Script>", false); } else { ScriptManager.RegisterStartupScript(this, this.GetType(), "Login Denied", "<Script>alert('Record Saved Successfully');</Script>", false); } dtdetails.Clear(); SetInitialRow(); txttotalAmount.Text = ""; txtOrderNo.Text = ""; txtPartyName.Text = ""; txtOrderDate.Text = ""; txttotalQty.Text = ""; string OrderNumber = objMaster.SelectDetails().ToString(); if (OrderNumber != "") { txtOrderNo.Text = OrderNumber.ToString(); txtOrderDate.Focus(); } } else { txtOrderNo.Text = ""; } } public void SetValues(BLLOrderMaster bllobj) { if (txtOrderNo.Text != null && txtOrderNo.Text.ToString() != "") { bllobj.OrNumber = Convert.ToInt16(txtOrderNo.Text); } if (txtOrderDate.Text != null && txtOrderDate.Text.ToString() != "") { bllobj.Date = DateTime.Parse(txtOrderDate.Text.ToString()).ToString("dd/MM/yyyy"); } if (txtPartyName.Text != null && txtPartyName.Text.ToString() != "") { bllobj.PartyName = txtPartyName.Text; } bllobj.TotalBillAmount = txttotalAmount.Text == "" ? 0 : int.Parse(txttotalAmount.Text); bllobj.TotalQty = txttotalQty.Text == "" ? 0 : int.Parse(txttotalQty.Text); } protected void txtdisc_TextChanged(object sender, EventArgs e) { double total = 0; double totalqty = 0; foreach (GridViewRow dgvr in Gridview1.Rows) { TextBox tb = (TextBox)dgvr.Cells[7].FindControl("txtamount"); double sum; if (double.TryParse(tb.Text.Trim(), out sum)) { total += sum; } TextBox tb1 = (TextBox)dgvr.Cells[4].FindControl("txtqty"); double qtysum; if (double.TryParse(tb1.Text.Trim(), out qtysum)) { totalqty += qtysum; } } txttotalAmount.Text = total.ToString(); txttotalQty.Text = totalqty.ToString(); AddNewRowToGrid(); Gridview1.TabIndex = 1; } public void UpdateItemDetailRow() { DataTable dt = new DataTable(); if (ViewState["OrderDetails"] != null) { dt = (DataTable)ViewState["OrderDetails"]; } if (dt.Rows.Count > 0) { for (int i = 0; i < Gridview1.Rows.Count; i++) { dt.Rows[i]["ItemCode"] = (Gridview1.Rows[i].FindControl("txtItemCode") as TextBox).Text.ToString(); if (dt.Rows[i]["ItemCode"].ToString() == "") { dt.Rows[i].Delete(); break; } else { dt.Rows[i]["Description"] = (Gridview1.Rows[i].FindControl("txtdescription") as TextBox).Text.ToString(); dt.Rows[i]["Unit"] = (Gridview1.Rows[i].FindControl("txtunit") as TextBox).Text.ToString(); dt.Rows[i]["Qty"] = (Gridview1.Rows[i].FindControl("txtqty") as TextBox).Text.ToString(); dt.Rows[i]["Rate"] = (Gridview1.Rows[i].FindControl("txtRate") as TextBox).Text.ToString(); dt.Rows[i]["Disc"] = (Gridview1.Rows[i].FindControl("txtdisc") as TextBox).Text.ToString(); dt.Rows[i]["Amount"] = (Gridview1.Rows[i].FindControl("txtamount") as TextBox).Text.ToString(); } } dt.AcceptChanges(); } ViewState["OrderDetails"] = dt; } }

    Read the article

  • Error 2013: Lost connection to MySQL server during query when executing CHECK TABLE FOR UPGRADE

    - by Dean Richardson
    I just upgraded Ubuntu from 11.10 to 12.04. My rails app now returns the (passenger) error "Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (111) (Mysql2::Error)". I get a similar error when I try to access mysql at the command line on my Ubuntu server using mysql -u root -p. I have mysql-server 5.5 installed. I've checked and mysql is not running. When I try to restart it, it fails. Here are some key lines from the tail of /var/log/syslog after an attempted restart: dean@dgwjasonfried:/etc/mysql$ tail -f /var/log/syslog Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: Looking for 'mysqlcheck' as: /usr/bin/mysqlcheck Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: Running 'mysqlcheck' with connection arguments: '--port=3306' '--socket=/var/run/mysqld/mysqld.sock' '--host=localhost' '--socket=/var/run/mysqld/mysqld.sock' '--host=localhost' '--socket=/var/run/mysqld/mysqld.sock' Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: Running 'mysqlcheck' with connection arguments: '--port=3306' '--socket=/var/run/mysqld/mysqld.sock' '--host=localhost' '--socket=/var/run/mysqld/mysqld.sock' '--host=localhost' '--socket=/var/run/mysqld/mysqld.sock' Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: /usr/bin/mysqlcheck: Got error: 2013: Lost connection to MySQL server during query when executing 'CHECK TABLE ... FOR UPGRADE' Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: FATAL ERROR: Upgrade failed Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: molex_app_development.assets OK Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5107]: molex_app_development.ecd_types OK Mar 7 08:55:27 dgwjasonfried /etc/mysql/debian-start[5124]: Checking for insecure root accounts. Mar 7 08:55:27 dgwjasonfried kernel: [ 7551.769657] init: mysql main process (5064) terminated with status 1 Mar 7 08:55:27 dgwjasonfried kernel: [ 7551.769697] init: mysql respawning too fast, stopped Here is most of /etc/mysql/my.cnf: Remember to edit /etc/mysql/debian.cnf when changing the socket location. [client] port = 3306 socket = /var/run/mysqld/mysqld.sock Here is entries for some specific programs The following values assume you have at least 32M ram This was formally known as [safe_mysqld]. Both versions are currently parsed. [mysqld_safe] socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] Basic Settings user = mysql pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock port = 3306 basedir = /usr datadir = /var/lib/mysql tmpdir = /tmp lc-messages-dir = /usr/share/mysql skip-external-locking Instead of skip-networking the default is now to listen only on localhost which is more compatible and is not less secure. bind-address = 127.0.0.1 And here are permissions for var/run/mysqld/mysqld.sock: srwxrwxrwx 1 mysql mysql 0 Mar 7 09:18 mysqld.sock I'd be grateful for any suggestions the community might have. I reviewed the related questions here and attempted some of the fixes offered but to no avail. Thanks! Dean Richardson Update: Thanks to quanta's suggestion, I looked at the /var/log/mysql/error.log file. I found error messages relating to pointers, fatal signals, and more stuff that I really couldn't make much sense of. I also found mysql man page references, however. One suggested that I try starting mysqld with the --innodb_force_recovery=# option, then attempt to dump (or drop) the offending/corrupted database or table. I worked through the escalating option levels one-by-one (innodb_force_recovery=1, innodb_force_recovery=2, etc.) This allowed me to successfully run mysql -u root -p from the command line and execute several commands. I was able to run queries on my production database, but any attempt to query, dump, or even drop my development database raised an error and led to me losing the connection to mysql. So I've made progress, but until I'm somehow able to drop or repair my development db I'm still unable to get my app to load. Any further advice or suggestions? Thanks! Dean Update: Right after running sudo mysqld --innodb_force_recover=1 from the command line, the error.log contains this: Right after retrying sudo mysqld --innodb_force_recover=1, The error.log file shows this: 130308 4:55:39 [Note] Plugin 'FEDERATED' is disabled. 130308 4:55:39 InnoDB: The InnoDB memory heap is disabled 130308 4:55:39 InnoDB: Mutexes and rw_locks use GCC atomic builtins 130308 4:55:39 InnoDB: Compressed tables use zlib 1.2.3.4 130308 4:55:39 InnoDB: Initializing buffer pool, size = 128.0M 130308 4:55:39 InnoDB: Completed initialization of buffer pool 130308 4:55:39 InnoDB: highest supported file format is Barracuda. InnoDB: The log sequence number in ibdata files does not match InnoDB: the log sequence number in the ib_logfiles! 130308 4:55:39 InnoDB: Database was not shut down normally! InnoDB: Starting crash recovery. InnoDB: Reading tablespace information from the .ibd files... InnoDB: Restoring possible half-written data pages from the doublewrite InnoDB: buffer... 130308 4:55:40 InnoDB: Waiting for the background threads to start 130308 4:55:41 InnoDB: 1.1.8 started; log sequence number 10259220 130308 4:55:41 InnoDB: !!! innodb_force_recovery is set to 1 !!! 130308 4:55:41 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 130308 4:55:41 [Note] - '127.0.0.1' resolves to '127.0.0.1'; 130308 4:55:41 [Note] Server socket created on IP: '127.0.0.1'. 130308 4:55:41 [Note] Event Scheduler: Loaded 0 events 130308 4:55:41 [Note] mysqld: ready for connections. Version: '5.5.29-0ubuntu0.12.04.2' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) Then after mysql -u root -p and mysql> drop database molex_app_development; ERROR 2013 (HY000): Lost connection to MySQL server during query mysql> the error.log contains: dean@dgwjasonfried:/var/log/mysql$ tail -f error.log /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d)[0x7f6a3ff9ecbd] Trying to get some variables. Some pointers may be invalid and cause the dump to abort. Query (7f6a1c004bd8): is an invalid pointer Connection ID (thread ID): 1 Status: NOT_KILLED The manual page at http://dev.mysql.com/doc/mysql/en/crashing.html contains information that should help you find out what is causing the crash. 130308 4:55:39 [Note] Plugin 'FEDERATED' is disabled. 130308 4:55:39 InnoDB: The InnoDB memory heap is disabled 130308 4:55:39 InnoDB: Mutexes and rw_locks use GCC atomic builtins 130308 4:55:39 InnoDB: Compressed tables use zlib 1.2.3.4 130308 4:55:39 InnoDB: Initializing buffer pool, size = 128.0M 130308 4:55:39 InnoDB: Completed initialization of buffer pool 130308 4:55:39 InnoDB: highest supported file format is Barracuda. InnoDB: The log sequence number in ibdata files does not match InnoDB: the log sequence number in the ib_logfiles! 130308 4:55:39 InnoDB: Database was not shut down normally! InnoDB: Starting crash recovery. InnoDB: Reading tablespace information from the .ibd files... InnoDB: Restoring possible half-written data pages from the doublewrite InnoDB: buffer... 130308 4:55:40 InnoDB: Waiting for the background threads to start 130308 4:55:41 InnoDB: 1.1.8 started; log sequence number 10259220 130308 4:55:41 InnoDB: !!! innodb_force_recovery is set to 1 !!! 130308 4:55:41 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306 130308 4:55:41 [Note] - '127.0.0.1' resolves to '127.0.0.1'; 130308 4:55:41 [Note] Server socket created on IP: '127.0.0.1'. 130308 4:55:41 [Note] Event Scheduler: Loaded 0 events 130308 4:55:41 [Note] mysqld: ready for connections. Version: '5.5.29-0ubuntu0.12.04.2' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) 130308 4:58:23 [ERROR] Incorrect definition of table mysql.proc: expected column 'comment' at position 15 to have type text, found type char(64). 130308 4:58:23 InnoDB: Assertion failure in thread 140168992810752 in file fsp0fsp.c line 3639 InnoDB: We intentionally generate a memory trap. InnoDB: Submit a detailed bug report to http://bugs.mysql.com. InnoDB: If you get repeated assertion failures or crashes, even InnoDB: immediately after the mysqld startup, there may be InnoDB: corruption in the InnoDB tablespace. Please refer to InnoDB: http://dev.mysql.com/doc/refman/5.5/en/forcing-innodb-recovery.html InnoDB: about forcing recovery. 10:58:23 UTC - mysqld got signal 6 ; This could be because you hit a bug. It is also possible that this binary or one of the libraries it was linked against is corrupt, improperly built, or misconfigured. This error can also be caused by malfunctioning hardware. We will try our best to scrape up some info that will hopefully help diagnose the problem, but since we have already crashed, something is definitely wrong and this may fail. key_buffer_size=16777216 read_buffer_size=131072 max_used_connections=1 max_threads=151 thread_count=1 connection_count=1 It is possible that mysqld could use up to key_buffer_size + (read_buffer_size + sort_buffer_size)*max_threads = 346681 K bytes of memory Hope that's ok; if not, decrease some variables in the equation. Thread pointer: 0x7f7ba4f6c2f0 Attempting backtrace. You can use the following information to find out where mysqld died. If you see no messages after this, something went terribly wrong... stack_bottom = 7f7ba3065e60 thread_stack 0x30000 mysqld(my_print_stacktrace+0x29)[0x7f7ba3609039] mysqld(handle_fatal_signal+0x483)[0x7f7ba34cf9c3] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7f7ba2220cb0] /lib/x86_64-linux-gnu/libc.so.6(gsignal+0x35)[0x7f7ba188c425] /lib/x86_64-linux-gnu/libc.so.6(abort+0x17b)[0x7f7ba188fb8b] mysqld(+0x65e0fc)[0x7f7ba37160fc] mysqld(+0x602be6)[0x7f7ba36babe6] mysqld(+0x635006)[0x7f7ba36ed006] mysqld(+0x5d7072)[0x7f7ba368f072] mysqld(+0x5d7b9c)[0x7f7ba368fb9c] mysqld(+0x6a3348)[0x7f7ba375b348] mysqld(+0x6a3887)[0x7f7ba375b887] mysqld(+0x5c6a86)[0x7f7ba367ea86] mysqld(+0x5ae3a7)[0x7f7ba36663a7] mysqld(_Z15ha_delete_tableP3THDP10handlertonPKcS4_S4_b+0x16d)[0x7f7ba34d3ffd] mysqld(_Z23mysql_rm_table_no_locksP3THDP10TABLE_LISTbbbb+0x568)[0x7f7ba3417f78] mysqld(_Z11mysql_rm_dbP3THDPcbb+0x8aa)[0x7f7ba339780a] mysqld(_Z21mysql_execute_commandP3THD+0x394c)[0x7f7ba33b886c] mysqld(_Z11mysql_parseP3THDPcjP12Parser_state+0x10f)[0x7f7ba33bb28f] mysqld(_Z16dispatch_command19enum_server_commandP3THDPcj+0x1380)[0x7f7ba33bc6e0] mysqld(_Z24do_handle_one_connectionP3THD+0x1bd)[0x7f7ba346119d] mysqld(handle_one_connection+0x50)[0x7f7ba3461200] /lib/x86_64-linux-gnu/libpthread.so.0(+0x7e9a)[0x7f7ba2218e9a] /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d)[0x7f7ba1949cbd] Trying to get some variables. Some pointers may be invalid and cause the dump to abort. Query (7f7b7c004b60): is an invalid pointer Connection ID (thread ID): 1 Status: NOT_KILLED The manual page at http://dev.mysql.com/doc/mysql/en/crashing.html contains information that should help you find out what is causing the crash. --Dean

    Read the article

  • SQL Server Express 2008 R2 Installation error at Windows 7

    - by Shai Sherman
    Hello, I created install script that will install SQL Server 2008 R2 on windows XP SP3, windows vista and windows 7. One of the command that i used in the installation is for silent installation of SQL Server 2008 R2. When i install it on windows XP everything works just fine but when i try to install it on Windows 7 i get an error. What am I doing wrong? Here is the command line that i use: "Setup.exe /ConfigurationFile=Mysetup.ini" Mysetup.ini file: -------------------------------------Start of ini file --------------------------------- ;SQL SERVER 2008 R2 Configuration File ;Version 1.0, 5 May 2010 ; [SQLSERVER2008] ; Specify the Instance ID for the SQL Server features you have specified. SQL Server directory structure, registry structure, and service names will reflect the instance ID of the SQL Server instance. INSTANCEID="MSSQLSERVER" ; Specifies a Setup work flow, like INSTALL, UNINSTALL, or UPGRADE. This is a required parameter. ACTION="Install" ; Specifies features to install, uninstall, or upgrade. The list of top-level features include SQL, AS, RS, IS, and Tools. The SQL feature will install the database engine, replication, and full-text. The Tools feature will install Management Tools, Books online, Business Intelligence Development Studio, and other shared components. FEATURES=SQLENGINE ; Displays the command line parameters usage HELP="False" ; Specifies that the detailed Setup log should be piped to the console. INDICATEPROGRESS="False" ; Setup will not display any user interface. QUIET="False" ; Setup will display progress only without any user interaction. QUIETSIMPLE="True" ; Specifies that Setup should install into WOW64. This command line argument is not supported on an IA64 or a 32-bit system. ;X86="False" ; Specifies the path to the installation media folder where setup.exe is located. ;MEDIASOURCE="z:\" ; Detailed help for command line argument ENU has not been defined yet. ENU="True" ; Parameter that controls the user interface behavior. Valid values are Normal for the full UI, and AutoAdvance for a simplied UI. ; UIMODE="Normal" ; Specify if errors can be reported to Microsoft to improve future SQL Server releases. Specify 1 or True to enable and 0 or False to disable this feature. ERRORREPORTING="False" ; Specify the root installation directory for native shared components. ;INSTALLSHAREDDIR="D:\Program Files\Microsoft SQL Server" ; Specify the root installation directory for the WOW64 shared components. ;INSTALLSHAREDWOWDIR="D:\Program Files (x86)\Microsoft SQL Server" ; Specify the installation directory. ;INSTANCEDIR="D:\Program Files\Microsoft SQL Server" ; Specify that SQL Server feature usage data can be collected and sent to Microsoft. Specify 1 or True to enable and 0 or False to disable this feature. SQMREPORTING="False" ; Specify a default or named instance. MSSQLSERVER is the default instance for non-Express editions and SQLExpress for Express editions. This parameter is required when installing the SQL Server Database Engine (SQL), Analysis Services (AS), or Reporting Services (RS). INSTANCENAME="SQLEXPRESS" SECURITYMODE=SQL SAPWD=SystemAdmin ; Agent account name AGTSVCACCOUNT="NT AUTHORITY\NETWORK SERVICE" ; Auto-start service after installation. AGTSVCSTARTUPTYPE="Manual" ; Startup type for Integration Services. ;ISSVCSTARTUPTYPE="Automatic" ; Account for Integration Services: Domain\User or system account. ;ISSVCACCOUNT="NT AUTHORITY\NetworkService" ; Controls the service startup type setting after the service has been created. ;ASSVCSTARTUPTYPE="Automatic" ; The collation to be used by Analysis Services. ;ASCOLLATION="Latin1_General_CI_AS" ; The location for the Analysis Services data files. ;ASDATADIR="Data" ; The location for the Analysis Services log files. ;ASLOGDIR="Log" ; The location for the Analysis Services backup files. ;ASBACKUPDIR="Backup" ; The location for the Analysis Services temporary files. ;ASTEMPDIR="Temp" ; The location for the Analysis Services configuration files. ;ASCONFIGDIR="Config" ; Specifies whether or not the MSOLAP provider is allowed to run in process. ;ASPROVIDERMSOLAP="1" ; A port number used to connect to the SharePoint Central Administration web application. ;FARMADMINPORT="0" ; Startup type for the SQL Server service. SQLSVCSTARTUPTYPE="Automatic" ; Level to enable FILESTREAM feature at (0, 1, 2 or 3). FILESTREAMLEVEL="0" ; Set to "1" to enable RANU for SQL Server Express. ENABLERANU="1" ; Specifies a Windows collation or an SQL collation to use for the Database Engine. SQLCOLLATION="SQL_Latin1_General_CP1_CI_AS" ; Account for SQL Server service: Domain\User or system account. SQLSVCACCOUNT="NT Authority\System" ; Default directory for the Database Engine user databases. ;SQLUSERDBDIR="K:\Microsoft SQL Server\MSSQL\Data" ; Default directory for the Database Engine user database logs. ;SQLUSERDBLOGDIR="L:\Microsoft SQL Server\MSSQL\Data\Logs" ; Directory for Database Engine TempDB files. ;SQLTEMPDBDIR="T:\Microsoft SQL Server\MSSQL\Data" ; Directory for the Database Engine TempDB log files. ;SQLTEMPDBLOGDIR="T:\Microsoft SQL Server\MSSQL\Data\Logs" ; Provision current user as a Database Engine system administrator for SQL Server 2008 R2 Express. ADDCURRENTUSERASSQLADMIN="True" ; Specify 0 to disable or 1 to enable the TCP/IP protocol. TCPENABLED="1" ; Specify 0 to disable or 1 to enable the Named Pipes protocol. NPENABLED="0" ; Startup type for Browser Service. BROWSERSVCSTARTUPTYPE="Automatic" ; Specifies how the startup mode of the report server NT service. When ; Manual - Service startup is manual mode (default). ; Automatic - Service startup is automatic mode. ; Disabled - Service is disabled ;RSSVCSTARTUPTYPE="Automatic" ; Specifies which mode report server is installed in. ; Default value: “FilesOnly” ;RSINSTALLMODE="FilesOnlyMode" ; Accept SQL Server 2008 R2 license terms IACCEPTSQLSERVERLICENSETERMS="TRUE" ;setup.exe /CONFIGURATIONFILE=Mysetup.ini /INDICATEPROGRESS --------------------------- End of ini file ------------------------------------- And i get this error: 2010-08-31 18:05:53 Slp: Error result: -2068119551 2010-08-31 18:05:53 Slp: Result facility code: 1211 2010-08-31 18:05:53 Slp: Result error code: 1 2010-08-31 18:05:53 Slp: Sco: Attempting to create base registry key HKEY_LOCAL_MACHINE, machine 2010-08-31 18:05:53 Slp: Sco: Attempting to open registry subkey 2010-08-31 18:05:53 Slp: Sco: Attempting to open registry subkey Software\Microsoft\PCHealth\ErrorReporting\DW\Installed 2010-08-31 18:05:53 Slp: Sco: Attempting to get registry value DW0200 2010-08-31 18:05:53 Slp: Submitted 1 of 1 failures to the Watson data repository What the meaning of this? What do i need to do to fix that problem? Here is the Summary file: Overall summary: Final result: SQL Server installation failed. To continue, investigate the reason for the failure, correct the problem, uninstall SQL Server, and then rerun SQL Server Setup. Exit code (Decimal): -2068119551 Exit facility code: 1211 Exit error code: 1 Exit message: SQL Server installation failed. To continue, investigate the reason for the failure, correct the problem, uninstall SQL Server, and then rerun SQL Server Setup. Start time: 2010-08-31 18:03:44 End time: 2010-08-31 18:05:51 Requested action: Install Log with failure: C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100831_180236\Detail.txt Exception help link: http%3a%2f%2fgo.microsoft.com%2ffwlink%3fLinkId%3d20476%26ProdName%3dMicrosoft%2bSQL%2bServer%26EvtSrc%3dsetup.rll%26EvtID%3d50000%26ProdVer%3d10.50.1600.1%26EvtType%3d0x6121810A%400xC24842DB Machine Properties: Machine name: NVR Machine processor count: 2 OS version: Windows 7 OS service pack: OS region: United States OS language: English (United States) OS architecture: x86 Process architecture: 32 Bit OS clustered: No Product features discovered: Product Instance Instance ID Feature Language Edition Version Clustered Package properties: Description: SQL Server Database Services 2008 R2 ProductName: SQL Server 2008 R2 Type: RTM Version: 10 SPLevel: 0 Installation location: C:\Disk1\setupsql\x86\setup\ Installation edition: EXPRESS User Input Settings: ACTION: Install ADDCURRENTUSERASSQLADMIN: True AGTSVCACCOUNT: NT AUTHORITY\NETWORK SERVICE AGTSVCPASSWORD: * AGTSVCSTARTUPTYPE: Disabled ASBACKUPDIR: Backup ASCOLLATION: Latin1_General_CI_AS ASCONFIGDIR: Config ASDATADIR: Data ASDOMAINGROUP: ASLOGDIR: Log ASPROVIDERMSOLAP: 1 ASSVCACCOUNT: ASSVCPASSWORD: * ASSVCSTARTUPTYPE: Automatic ASSYSADMINACCOUNTS: ASTEMPDIR: Temp BROWSERSVCSTARTUPTYPE: Automatic CONFIGURATIONFILE: C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100831_180236\ConfigurationFile.ini CUSOURCE: ENABLERANU: True ENU: True ERRORREPORTING: False FARMACCOUNT: FARMADMINPORT: 0 FARMPASSWORD: * FEATURES: SQLENGINE FILESTREAMLEVEL: 0 FILESTREAMSHARENAME: FTSVCACCOUNT: FTSVCPASSWORD: * HELP: False IACCEPTSQLSERVERLICENSETERMS: True INDICATEPROGRESS: False INSTALLSHAREDDIR: C:\Program Files\Microsoft SQL Server\ INSTALLSHAREDWOWDIR: C:\Program Files\Microsoft SQL Server\ INSTALLSQLDATADIR: INSTANCEDIR: C:\Program Files\Microsoft SQL Server\ INSTANCEID: MSSQLSERVER INSTANCENAME: SQLEXPRESS ISSVCACCOUNT: NT AUTHORITY\NetworkService ISSVCPASSWORD: * ISSVCSTARTUPTYPE: Automatic NPENABLED: 0 PASSPHRASE: * PCUSOURCE: PID: * QUIET: False QUIETSIMPLE: True ROLE: AllFeatures_WithDefaults RSINSTALLMODE: FilesOnlyMode RSSVCACCOUNT: NT AUTHORITY\NETWORK SERVICE RSSVCPASSWORD: * RSSVCSTARTUPTYPE: Automatic SAPWD: * SECURITYMODE: SQL SQLBACKUPDIR: SQLCOLLATION: SQL_Latin1_General_CP1_CI_AS SQLSVCACCOUNT: NT Authority\System SQLSVCPASSWORD: * SQLSVCSTARTUPTYPE: Automatic SQLSYSADMINACCOUNTS: SQLTEMPDBDIR: SQLTEMPDBLOGDIR: SQLUSERDBDIR: SQLUSERDBLOGDIR: SQMREPORTING: False TCPENABLED: 1 UIMODE: AutoAdvance X86: False Configuration file: C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100831_180236\ConfigurationFile.ini Detailed results: Feature: Database Engine Services Status: Failed: see logs for details MSI status: Passed Configuration status: Failed: see details below Configuration error code: 0x0A2FBD17@1211@1 Configuration error description: The process cannot access the file because it is being used by another process. Configuration log: C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100831_180236\Detail.txt Rules with failures: Global rules: Scenario specific rules: Rules report file: C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100831_180236\SystemConfigurationCheck_Report.htm What should I do and why does this problem occur? Thanks , Shai.

    Read the article

  • socket connection failed, telnet OK

    - by cf16
    my problem is that I can't connect two comps through socket (windows xp and windows7) although the server created with socket is listening and I can telnet it. It receives then information and does what should be done, but if I run the corresponding socket client I get error 10061. Moreover I am behind firewall - these two comps are running within my LAN, the windows firewalls are turned off, comp1: 192.168.1.2 port 12345 comp1: 192.168.1.6 port 12345 router: 192.168.1.1 Maybe port forwarding could help? But most important for me is to answer why Sockets fail if telnet works fine. client: int main(){ // Initialize Winsock. WSADATA wsaData; int iResult = WSAStartup(MAKEWORD(2,2), &wsaData); if (iResult != NO_ERROR) printf("Client: Error at WSAStartup().\n"); else printf("Client: WSAStartup() is OK.\n"); // Create a socket. SOCKET m_socket; m_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (m_socket == INVALID_SOCKET){ printf("Client: socket() - Error at socket(): %ld\n", WSAGetLastError()); WSACleanup(); return 7; }else printf("Client: socket() is OK.\n"); // Connect to a server. sockaddr_in clientService; clientService.sin_family = AF_INET; //clientService.sin_addr.s_addr = inet_addr("77.64.240.156"); clientService.sin_addr.s_addr = inet_addr("192.168.1.5"); //clientService.sin_addr.s_addr = inet_addr("87.207.222.5"); clientService.sin_port = htons(12345); if (connect(m_socket, (SOCKADDR*)&clientService, sizeof(clientService)) == SOCKET_ERROR){ printf("Client: connect() - Failed to connect.\n"); wprintf(L"connect function failed with error: %ld\n", WSAGetLastError()); iResult = closesocket(m_socket); if (iResult == SOCKET_ERROR) wprintf(L"closesocket function failed with error: %ld\n", WSAGetLastError()); WSACleanup(); return 6; } // Send and receive data int bytesSent; int bytesRecv = SOCKET_ERROR; // Be careful with the array bound, provide some checking mechanism char sendbuf[200] = "Client: Sending some test string to server..."; char recvbuf[200] = ""; bytesSent = send(m_socket, sendbuf, strlen(sendbuf), 0); printf("Client: send() - Bytes Sent: %ld\n", bytesSent); while(bytesRecv == SOCKET_ERROR){ bytesRecv = recv(m_socket, recvbuf, 32, 0); if (bytesRecv == 0 || bytesRecv == WSAECONNRESET){ printf("Client: Connection Closed.\n"); break; }else printf("Client: recv() is OK.\n"); if (bytesRecv < 0) return 0; else printf("Client: Bytes received - %ld.\n", bytesRecv); } system("pause"); return 0; } server: int main(){ WORD wVersionRequested; WSADATA wsaData={0}; int wsaerr; // Using MAKEWORD macro, Winsock version request 2.2 wVersionRequested = MAKEWORD(2, 2); wsaerr = WSAStartup(wVersionRequested, &wsaData); if (wsaerr != 0){ /* Tell the user that we could not find a usable WinSock DLL.*/ printf("Server: The Winsock dll not found!\n"); return 0; }else{ printf("Server: The Winsock dll found!\n"); printf("Server: The status: %s.\n", wsaData.szSystemStatus); } /* Confirm that the WinSock DLL supports 2.2.*/ /* Note that if the DLL supports versions greater */ /* than 2.2 in addition to 2.2, it will still return */ /* 2.2 in wVersion since that is the version we */ /* requested. */ if (LOBYTE(wsaData.wVersion) != 2 || HIBYTE(wsaData.wVersion) != 2 ){ /* Tell the user that we could not find a usable WinSock DLL.*/ printf("Server: The dll do not support the Winsock version %u.%u!\n", LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion)); WSACleanup(); return 0; }else{ printf("Server: The dll supports the Winsock version %u.%u!\n", LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion)); printf("Server: The highest version this dll can support: %u.%u\n", LOBYTE(wsaData.wHighVersion), HIBYTE(wsaData.wHighVersion)); } //////////Create a socket//////////////////////// //Create a SOCKET object called m_socket. SOCKET m_socket; // Call the socket function and return its value to the m_socket variable. // For this application, use the Internet address family, streaming sockets, and the TCP/IP protocol. // using AF_INET family, TCP socket type and protocol of the AF_INET - IPv4 m_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); // Check for errors to ensure that the socket is a valid socket. if (m_socket == INVALID_SOCKET){ printf("Server: Error at socket(): %ld\n", WSAGetLastError()); WSACleanup(); //return 0; }else{ printf("Server: socket() is OK!\n"); } ////////////////bind////////////////////////////// // Create a sockaddr_in object and set its values. sockaddr_in service; // AF_INET is the Internet address family. service.sin_family = AF_INET; // "127.0.0.1" is the local IP address to which the socket will be bound. service.sin_addr.s_addr = htons(INADDR_ANY);//inet_addr("127.0.0.1");//htons(INADDR_ANY); //inet_addr("192.168.1.2"); // 55555 is the port number to which the socket will be bound. // using the htons for big-endian service.sin_port = htons(12345); // Call the bind function, passing the created socket and the sockaddr_in structure as parameters. // Check for general errors. if (bind(m_socket, (SOCKADDR*)&service, sizeof(service)) == SOCKET_ERROR){ printf("Server: bind() failed: %ld.\n", WSAGetLastError()); closesocket(m_socket); //return 0; }else{ printf("Server: bind() is OK!\n"); } // Call the listen function, passing the created socket and the maximum number of allowed // connections to accept as parameters. Check for general errors. if (listen(m_socket, 1) == SOCKET_ERROR) printf("Server: listen(): Error listening on socket %ld.\n", WSAGetLastError()); else{ printf("Server: listen() is OK, I'm waiting for connections...\n"); } // Create a temporary SOCKET object called AcceptSocket for accepting connections. SOCKET AcceptSocket; // Create a continuous loop that checks for connections requests. If a connection // request occurs, call the accept function to handle the request. printf("Server: Waiting for a client to connect...\n"); printf("***Hint: Server is ready...run your client program...***\n"); // Do some verification... while (1){ AcceptSocket = SOCKET_ERROR; while (AcceptSocket == SOCKET_ERROR){ AcceptSocket = accept(m_socket, NULL, NULL); } // else, accept the connection... note: now it is wrong implementation !!!!!!!! !! !! (only 1 char) // When the client connection has been accepted, transfer control from the // temporary socket to the original socket and stop checking for new connections. printf("Server: Client Connected! Mammamija. \n"); m_socket = AcceptSocket; char recvBuf[200]=""; char * rc=recvBuf; int bytesRecv=recv(m_socket,recvBuf,64,0); if(bytesRecv==0 || bytesRecv==WSAECONNRESET){ cout<<"server: connection closed.\n"; }else{ cout<<"server: recv() is OK.\n"; if(bytesRecv<0){ return 0; }else{ printf("server: bytes received: %ld.\n",recvBuf); } }

    Read the article

  • Failing Sata HDD

    - by DaveCol
    I think my HDD is fried... Could someone confirm or help me restore it? I was using Hardware RAID 1 Configuration [2 x 160GB SATA HDD] on a CentOS 4 Installation. All of a sudden I started seeing bad sectors on the second HDD which stopped being mirrored. I have removed the RAID array and have tested with SMART which showed the following error: 187 Unknown_Attribute 0x003a 001 001 051 Old_age Always FAILING_NOW 4645 I have no clue what this means, or if I can recover from it. Could someone give me some ideas on how to fix this, or what HDD to get to replace this? Complete SMART report: Smartctl version 5.33 [i686-redhat-linux-gnu] Copyright (C) 2002-4 Bruce Allen Home page is http://smartmontools.sourceforge.net/ === START OF INFORMATION SECTION === Device Model: GB0160CAABV Serial Number: 6RX58NAA Firmware Version: HPG1 User Capacity: 160,041,885,696 bytes Device is: Not in smartctl database [for details use: -P showall] ATA Version is: 7 ATA Standard is: ATA/ATAPI-7 T13 1532D revision 4a Local Time is: Tue Oct 19 13:42:42 2010 COT SMART support is: Available - device has SMART capability. SMART support is: Enabled === START OF READ SMART DATA SECTION === SMART overall-health self-assessment test result: PASSED See vendor-specific Attribute list for marginal Attributes. General SMART Values: Offline data collection status: (0x82) Offline data collection activity was completed without error. Auto Offline Data Collection: Enabled. Self-test execution status: ( 0) The previous self-test routine completed without error or no self-test has ever been run. Total time to complete Offline data collection: ( 433) seconds. Offline data collection capabilities: (0x5b) SMART execute Offline immediate. Auto Offline data collection on/off support. Suspend Offline collection upon new command. Offline surface scan supported. Self-test supported. No Conveyance Self-test supported. Selective Self-test supported. SMART capabilities: (0x0003) Saves SMART data before entering power-saving mode. Supports SMART auto save timer. Error logging capability: (0x01) Error logging supported. General Purpose Logging supported. Short self-test routine recommended polling time: ( 2) minutes. Extended self-test routine recommended polling time: ( 54) minutes. SMART Attributes Data Structure revision number: 10 Vendor Specific SMART Attributes with Thresholds: ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE 1 Raw_Read_Error_Rate 0x000f 100 253 006 Pre-fail Always - 0 3 Spin_Up_Time 0x0002 097 097 000 Old_age Always - 0 4 Start_Stop_Count 0x0033 100 100 020 Pre-fail Always - 152 5 Reallocated_Sector_Ct 0x0033 095 095 036 Pre-fail Always - 214 7 Seek_Error_Rate 0x000f 078 060 030 Pre-fail Always - 73109713 9 Power_On_Hours 0x0032 083 083 000 Old_age Always - 15133 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0 12 Power_Cycle_Count 0x0033 100 100 020 Pre-fail Always - 154 184 Unknown_Attribute 0x0032 038 038 000 Old_age Always - 62 187 Unknown_Attribute 0x003a 001 001 051 Old_age Always FAILING_NOW 4645 189 Unknown_Attribute 0x0022 100 100 000 Old_age Always - 0 190 Unknown_Attribute 0x001a 061 055 000 Old_age Always - 656408615 194 Temperature_Celsius 0x0000 039 045 000 Old_age Offline - 39 (Lifetime Min/Max 0/22) 195 Hardware_ECC_Recovered 0x0032 070 059 000 Old_age Always - 12605265 197 Current_Pending_Sector 0x0000 100 100 000 Old_age Offline - 1 198 Offline_Uncorrectable 0x0000 100 100 000 Old_age Offline - 0 199 UDMA_CRC_Error_Count 0x0000 200 200 000 Old_age Offline - 62 SMART Error Log Version: 1 ATA Error Count: 4645 (device log contains only the most recent five errors) CR = Command Register [HEX] FR = Features Register [HEX] SC = Sector Count Register [HEX] SN = Sector Number Register [HEX] CL = Cylinder Low Register [HEX] CH = Cylinder High Register [HEX] DH = Device/Head Register [HEX] DC = Device Command Register [HEX] ER = Error register [HEX] ST = Status register [HEX] Powered_Up_Time is measured from power on, and printed as DDd+hh:mm:SS.sss where DD=days, hh=hours, mm=minutes, SS=sec, and sss=millisec. It "wraps" after 49.710 days. Error 4645 occurred at disk power-on lifetime: 15132 hours (630 days + 12 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 00 7b 86 b1 ea Error: UNC at LBA = 0x0ab1867b = 179406459 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 02 7b 86 b1 ea 00 00:38:52.796 READ DMA ec 03 45 00 00 00 a0 00 00:38:52.796 IDENTIFY DEVICE ef 03 45 00 00 00 a0 00 00:38:52.794 SET FEATURES [Set transfer mode] ec 00 00 7b 86 b1 a0 00 00:38:49.991 IDENTIFY DEVICE c8 00 04 79 86 b1 ea 00 00:38:49.935 READ DMA Error 4644 occurred at disk power-on lifetime: 15132 hours (630 days + 12 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 00 7b 86 b1 ea Error: UNC at LBA = 0x0ab1867b = 179406459 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 04 79 86 b1 ea 00 00:38:41.517 READ DMA ec 03 45 00 00 00 a0 00 00:38:41.515 IDENTIFY DEVICE ef 03 45 00 00 00 a0 00 00:38:41.515 SET FEATURES [Set transfer mode] ec 00 00 7b 86 b1 a0 00 00:38:49.991 IDENTIFY DEVICE c8 00 06 77 86 b1 ea 00 00:38:49.935 READ DMA Error 4643 occurred at disk power-on lifetime: 15132 hours (630 days + 12 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 00 7b 86 b1 ea Error: UNC at LBA = 0x0ab1867b = 179406459 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 06 77 86 b1 ea 00 00:38:41.517 READ DMA ec 03 45 00 00 00 a0 00 00:38:41.515 IDENTIFY DEVICE ef 03 45 00 00 00 a0 00 00:38:41.515 SET FEATURES [Set transfer mode] ec 00 00 7b 86 b1 a0 00 00:38:41.513 IDENTIFY DEVICE c8 00 06 77 86 b1 ea 00 00:38:38.706 READ DMA Error 4642 occurred at disk power-on lifetime: 15132 hours (630 days + 12 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 00 7b 86 b1 ea Error: UNC at LBA = 0x0ab1867b = 179406459 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 06 77 86 b1 ea 00 00:38:41.517 READ DMA ec 03 45 00 00 00 a0 00 00:38:41.515 IDENTIFY DEVICE ef 03 45 00 00 00 a0 00 00:38:41.515 SET FEATURES [Set transfer mode] ec 00 00 7b 86 b1 a0 00 00:38:41.513 IDENTIFY DEVICE c8 00 06 77 86 b1 ea 00 00:38:38.706 READ DMA Error 4641 occurred at disk power-on lifetime: 15132 hours (630 days + 12 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 00 7b 86 b1 ea Error: UNC at LBA = 0x0ab1867b = 179406459 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 06 77 86 b1 ea 00 00:38:41.517 READ DMA ec 03 45 00 00 00 a0 00 00:38:41.515 IDENTIFY DEVICE ef 03 45 00 00 00 a0 00 00:38:41.515 SET FEATURES [Set transfer mode] ec 00 00 7b 86 b1 a0 00 00:38:41.513 IDENTIFY DEVICE c8 00 06 77 86 b1 ea 00 00:38:38.706 READ DMA SMART Self-test log structure revision number 1 Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error # 1 Short offline Completed without error 00% 15131 - # 2 Short offline Completed without error 00% 15131 - SMART Selective self-test log data structure revision number 1 SPAN MIN_LBA MAX_LBA CURRENT_TEST_STATUS 1 0 0 Not_testing 2 0 0 Not_testing 3 0 0 Not_testing 4 0 0 Not_testing 5 0 0 Not_testing Selective self-test flags (0x0): After scanning selected spans, do NOT read-scan remainder of disk. If Selective self-test is pending on power-up, resume after 0 minute delay.

    Read the article

  • Hard Disk DRDY error: is it a crash

    - by pranjal
    I am using IBM Thinkpad, 1.7GHz, 512 RAM with Linux Mint 9 installed. I have two partitions in addition to root. One of the partitions became read-only yesterday, after which I rebooted my system. It is extremely slow along with DRDY Error : Is my Hard disk crashed ? Error Log while booting. Differences between boot sector and its backup. failed command : READ DMA BMDMA : stat 0X25 ata 1.00 : status : { DRDY ERR } ata 1.00 : status :{ UNC } Buffer I/O error on logical device, logical block 65467 smartctl output for the partition: mint mint # smartctl -a /dev/sda1 smartctl version 5.38 [i686-pc-linux-gnu] Copyright (C) 2002-8 Bruce Allen Home page is http://smartmontools.sourceforge.net/ === START OF INFORMATION SECTION === Device Model: TOSHIBA MK4026GAX RoHS Serial Number: X5LY1623T Firmware Version: PA107E User Capacity: 40,007,761,920 bytes Device is: Not in smartctl database [for details use: -P showall] ATA Version is: 6 ATA Standard is: Exact ATA specification draft version not indicated Local Time is: Thu Feb 17 06:48:25 2011 UTC SMART support is: Available - device has SMART capability. SMART support is: Enabled === START OF READ SMART DATA SECTION === SMART overall-health self-assessment test result: PASSED General SMART Values: Offline data collection status: (0x84) Offline data collection activity was suspended by an interrupting command from host. Auto Offline Data Collection: Enabled. Self-test execution status: ( 0) The previous self-test routine completed without error or no self-test has ever been run. Total time to complete Offline data collection: ( 153) seconds. Offline data collection capabilities: (0x1b) SMART execute Offline immediate. Auto Offline data collection on/off support. Suspend Offline collection upon new command. Offline surface scan supported. Self-test supported. No Conveyance Self-test supported. No Selective Self-test supported. SMART capabilities: (0x0003) Saves SMART data before entering power-saving mode. Supports SMART auto save timer. Error logging capability: (0x01) Error logging supported. No General Purpose Logging support. Short self-test routine recommended polling time: ( 2) minutes. Extended self-test routine recommended polling time: ( 30) minutes. SMART Attributes Data Structure revision number: 16 Vendor Specific SMART Attributes with Thresholds: ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE 1 Raw_Read_Error_Rate 0x000b 100 100 050 Pre-fail Always - 0 2 Throughput_Performance 0x0005 100 100 050 Pre-fail Offline - 0 3 Spin_Up_Time 0x0027 100 100 001 Pre-fail Always - 310 4 Start_Stop_Count 0x0032 100 100 000 Old_age Always - 3968 5 Reallocated_Sector_Ct 0x0033 100 100 050 Pre-fail Always - 40 7 Seek_Error_Rate 0x000b 100 100 050 Pre-fail Always - 0 8 Seek_Time_Performance 0x0005 100 100 050 Pre-fail Offline - 0 9 Power_On_Hours 0x0032 082 082 000 Old_age Always - 7257 10 Spin_Retry_Count 0x0033 179 100 030 Pre-fail Always - 0 12 Power_Cycle_Count 0x0032 100 100 000 Old_age Always - 3484 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 489 193 Load_Cycle_Count 0x0032 064 064 000 Old_age Always - 367150 194 Temperature_Celsius 0x0022 100 100 000 Old_age Always - 36 (Lifetime Min/Max 14/57) 196 Reallocated_Event_Count 0x0032 100 100 000 Old_age Always - 33 197 Current_Pending_Sector 0x0032 100 100 000 Old_age Always - 82 198 Offline_Uncorrectable 0x0030 100 100 000 Old_age Offline - 1 199 UDMA_CRC_Error_Count 0x0032 200 253 000 Old_age Always - 0 220 Disk_Shift 0x0002 100 100 000 Old_age Always - 101 222 Loaded_Hours 0x0032 085 085 000 Old_age Always - 6146 223 Load_Retry_Count 0x0032 100 100 000 Old_age Always - 0 224 Load_Friction 0x0022 100 100 000 Old_age Always - 0 226 Load-in_Time 0x0026 100 100 000 Old_age Always - 227 240 Head_Flying_Hours 0x0001 100 100 001 Pre-fail Offline - 0 SMART Error Log Version: 1 ATA Error Count: 2371 (device log contains only the most recent five errors) CR = Command Register [HEX] FR = Features Register [HEX] SC = Sector Count Register [HEX] SN = Sector Number Register [HEX] CL = Cylinder Low Register [HEX] CH = Cylinder High Register [HEX] DH = Device/Head Register [HEX] DC = Device Command Register [HEX] ER = Error register [HEX] ST = Status register [HEX] Powered_Up_Time is measured from power on, and printed as DDd+hh:mm:SS.sss where DD=days, hh=hours, mm=minutes, SS=sec, and sss=millisec. It "wraps" after 49.710 days. Error 2371 occurred at disk power-on lifetime: 7256 hours (302 days + 8 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 05 1a 1b 00 e0 Error: UNC 5 sectors at LBA = 0x00001b1a = 6938 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 05 1a 1b 00 e0 00 00:03:10.061 READ DMA f8 00 00 00 00 00 e0 00 00:03:10.061 READ NATIVE MAX ADDRESS ec 00 00 00 00 00 a0 02 00:03:10.053 IDENTIFY DEVICE ef 03 45 00 00 00 a0 02 00:03:10.053 SET FEATURES [Set transfer mode] f8 00 00 00 00 00 e0 00 00:03:10.053 READ NATIVE MAX ADDRESS Error 2370 occurred at disk power-on lifetime: 7256 hours (302 days + 8 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 05 1a 1b 00 e0 Error: UNC 5 sectors at LBA = 0x00001b1a = 6938 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 05 1a 1b 00 e0 00 00:03:03.328 READ DMA f8 00 00 00 00 00 e0 00 00:03:03.327 READ NATIVE MAX ADDRESS ec 00 00 00 00 00 a0 02 00:03:03.320 IDENTIFY DEVICE ef 03 45 00 00 00 a0 02 00:03:03.319 SET FEATURES [Set transfer mode] f8 00 00 00 00 00 e0 00 00:03:03.319 READ NATIVE MAX ADDRESS Error 2369 occurred at disk power-on lifetime: 7256 hours (302 days + 8 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 05 1a 1b 00 e0 Error: UNC 5 sectors at LBA = 0x00001b1a = 6938 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 05 1a 1b 00 e0 00 00:02:56.582 READ DMA f8 00 00 00 00 00 e0 00 00:02:56.582 READ NATIVE MAX ADDRESS ec 00 00 00 00 00 a0 02 00:02:56.574 IDENTIFY DEVICE ef 03 45 00 00 00 a0 02 00:02:56.574 SET FEATURES [Set transfer mode] f8 00 00 00 00 00 e0 00 00:02:56.574 READ NATIVE MAX ADDRESS Error 2368 occurred at disk power-on lifetime: 7256 hours (302 days + 8 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 05 1a 1b 00 e0 Error: UNC 5 sectors at LBA = 0x00001b1a = 6938 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 05 1a 1b 00 e0 00 00:02:49.809 READ DMA f8 00 00 00 00 00 e0 00 00:02:49.809 READ NATIVE MAX ADDRESS ec 00 00 00 00 00 a0 02 00:02:49.801 IDENTIFY DEVICE ef 03 45 00 00 00 a0 02 00:02:49.801 SET FEATURES [Set transfer mode] f8 00 00 00 00 00 e0 00 00:02:49.801 READ NATIVE MAX ADDRESS Error 2367 occurred at disk power-on lifetime: 7256 hours (302 days + 8 hours) When the command that caused the error occurred, the device was active or idle. After command completion occurred, registers were: ER ST SC SN CL CH DH -- -- -- -- -- -- -- 40 51 05 1a 1b 00 e0 Error: UNC 5 sectors at LBA = 0x00001b1a = 6938 Commands leading to the command that caused the error were: CR FR SC SN CL CH DH DC Powered_Up_Time Command/Feature_Name -- -- -- -- -- -- -- -- ---------------- -------------------- c8 00 05 1a 1b 00 e0 00 00:02:43.056 READ DMA f8 00 00 00 00 00 e0 00 00:02:43.056 READ NATIVE MAX ADDRESS ec 00 00 00 00 00 a0 02 00:02:43.048 IDENTIFY DEVICE ef 03 45 00 00 00 a0 02 00:02:43.048 SET FEATURES [Set transfer mode] f8 00 00 00 00 00 e0 00 00:02:43.047 READ NATIVE MAX ADDRESS SMART Self-test log structure revision number 1 No self-tests have been logged. [To run self-tests, use: smartctl -t] Device does not support Selective Self Tests/Logging Do I need to get a new Hard Disk my PC ?

    Read the article

  • High Server Load cannot figure out why

    - by Tim Bolton
    My server is currently running CentOS 5.2, with WHM 11.34. Currently, we're at 6.43 to 12 for a load average. The sites that we're hosting are taking a lot time to respond and resolve. top doesn't show anything out of the ordinary and iftop doesn't show a lot of traffic. We have many resellers, and some not so good at writing code, how can we find the culprit? vmstat output: vmstat procs -----------memory---------- ---swap-- -----io---- --system-- -----cpu------ r b swpd free buff cache si so bi bo in cs us sy id wa st 0 2 84 78684 154916 1021080 0 0 72 274 0 14 6 3 80 12 0 top output (ordered by %CPU) top - 21:44:43 up 5 days, 10:39, 3 users, load average: 3.36, 4.18, 4.73 Tasks: 222 total, 3 running, 219 sleeping, 0 stopped, 0 zombie Cpu(s): 5.8%us, 2.3%sy, 0.2%ni, 79.6%id, 11.8%wa, 0.0%hi, 0.2%si, 0.0%st Mem: 2074580k total, 1863044k used, 211536k free, 174828k buffers Swap: 2040212k total, 84k used, 2040128k free, 987604k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 15930 mysql 15 0 138m 46m 4380 S 4 2.3 1:45.87 mysqld 21772 igniteth 17 0 23200 7152 3932 R 4 0.3 0:00.02 php 1586 root 10 -5 0 0 0 S 2 0.0 11:45.19 kjournald 21759 root 15 0 2416 1024 732 R 2 0.0 0:00.01 top 1 root 15 0 2156 648 560 S 0 0.0 0:26.31 init 2 root RT 0 0 0 0 S 0 0.0 0:00.35 migration/0 3 root 34 19 0 0 0 S 0 0.0 0:00.32 ksoftirqd/0 4 root RT 0 0 0 0 S 0 0.0 0:00.00 watchdog/0 5 root RT 0 0 0 0 S 0 0.0 0:02.00 migration/1 6 root 34 19 0 0 0 S 0 0.0 0:00.11 ksoftirqd/1 7 root RT 0 0 0 0 S 0 0.0 0:00.00 watchdog/1 8 root RT 0 0 0 0 S 0 0.0 0:01.29 migration/2 9 root 34 19 0 0 0 S 0 0.0 0:00.26 ksoftirqd/2 10 root RT 0 0 0 0 S 0 0.0 0:00.00 watchdog/2 11 root RT 0 0 0 0 S 0 0.0 0:00.90 migration/3 12 root 34 19 0 0 0 R 0 0.0 0:00.20 ksoftirqd/3 13 root RT 0 0 0 0 S 0 0.0 0:00.00 watchdog/3 top output (ordered by CPU time) top - 21:46:12 up 5 days, 10:41, 3 users, load average: 2.88, 3.82, 4.55 Tasks: 217 total, 1 running, 216 sleeping, 0 stopped, 0 zombie Cpu(s): 3.7%us, 2.0%sy, 2.0%ni, 67.2%id, 25.0%wa, 0.0%hi, 0.1%si, 0.0%st Mem: 2074580k total, 1959516k used, 115064k free, 183116k buffers Swap: 2040212k total, 84k used, 2040128k free, 1090308k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ TIME COMMAND 32367 root 16 0 215m 212m 1548 S 0 10.5 62:03.63 62:03 tailwatchd 1586 root 10 -5 0 0 0 S 0 0.0 11:45.27 11:45 kjournald 1576 root 10 -5 0 0 0 S 0 0.0 2:37.86 2:37 kjournald 27722 root 16 0 2556 1184 800 S 0 0.1 1:48.94 1:48 top 15930 mysql 15 0 138m 46m 4380 S 4 2.3 1:48.63 1:48 mysqld 2932 root 34 19 0 0 0 S 0 0.0 1:41.05 1:41 kipmi0 226 root 10 -5 0 0 0 S 0 0.0 1:34.33 1:34 kswapd0 2671 named 25 0 74688 7400 2116 S 0 0.4 1:23.58 1:23 named 3229 root 15 0 10300 3348 2724 S 0 0.2 0:40.85 0:40 sshd 1580 root 10 -5 0 0 0 S 0 0.0 0:30.62 0:30 kjournald 1 root 17 0 2156 648 560 S 0 0.0 0:26.32 0:26 init 2616 root 15 0 1816 576 480 S 0 0.0 0:23.50 0:23 syslogd 1584 root 10 -5 0 0 0 S 0 0.0 0:18.67 0:18 kjournald 4342 root 34 19 27692 11m 2116 S 0 0.5 0:18.23 0:18 yum-updatesd 8044 bollingp 15 0 3456 2036 740 S 1 0.1 0:15.56 0:15 imapd 26 root 10 -5 0 0 0 S 0 0.0 0:14.18 0:14 kblockd/1 7989 gmailsit 16 0 3196 1748 736 S 0 0.1 0:10.43 0:10 imapd iostat -xtk 1 10 output [root@server1 tmp]# iostat -xtk 1 10 Linux 2.6.18-53.el5 12/18/2012 Time: 09:51:06 PM avg-cpu: %user %nice %system %iowait %steal %idle 5.83 0.19 2.53 11.85 0.00 79.60 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 1.37 118.83 18.70 54.27 131.47 692.72 22.59 4.90 67.19 3.10 22.59 sdb 0.35 39.33 20.33 61.43 158.79 403.22 13.75 5.23 63.93 3.77 30.80 Time: 09:51:07 PM avg-cpu: %user %nice %system %iowait %steal %idle 1.50 0.00 0.50 24.00 0.00 74.00 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 25.00 2.00 2.00 128.00 108.00 118.00 0.03 7.25 4.00 1.60 sdb 0.00 16.00 41.00 145.00 200.00 668.00 9.33 107.92 272.72 5.38 100.10 Time: 09:51:08 PM avg-cpu: %user %nice %system %iowait %steal %idle 2.00 0.00 1.50 29.50 0.00 67.00 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 95.00 3.00 33.00 12.00 480.00 27.33 0.07 1.72 1.31 4.70 sdb 0.00 14.00 1.00 228.00 4.00 960.00 8.42 143.49 568.01 4.37 100.10 Time: 09:51:09 PM avg-cpu: %user %nice %system %iowait %steal %idle 13.28 0.00 2.76 21.30 0.00 62.66 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 21.00 1.00 19.00 16.00 192.00 20.80 0.06 3.55 1.30 2.60 sdb 0.00 36.00 28.00 181.00 124.00 884.00 9.65 121.16 617.31 4.79 100.10 Time: 09:51:10 PM avg-cpu: %user %nice %system %iowait %steal %idle 4.74 0.00 1.50 25.19 0.00 68.58 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 20.00 3.00 15.00 12.00 136.00 16.44 0.17 7.11 3.11 5.60 sdb 0.00 0.00 103.00 60.00 544.00 248.00 9.72 52.35 545.23 6.14 100.10 Time: 09:51:11 PM avg-cpu: %user %nice %system %iowait %steal %idle 1.24 0.00 1.24 25.31 0.00 72.21 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 75.00 4.00 28.00 16.00 416.00 27.00 0.08 3.72 2.03 6.50 sdb 2.00 9.00 124.00 17.00 616.00 104.00 10.21 3.73 213.73 7.10 100.10 Time: 09:51:12 PM avg-cpu: %user %nice %system %iowait %steal %idle 1.00 0.00 0.75 24.31 0.00 73.93 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 24.00 1.00 9.00 4.00 132.00 27.20 0.01 1.20 1.10 1.10 sdb 4.00 40.00 103.00 48.00 528.00 212.00 9.80 105.21 104.32 6.64 100.20 Time: 09:51:13 PM avg-cpu: %user %nice %system %iowait %steal %idle 2.50 0.00 1.75 23.25 0.00 72.50 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 125.74 3.96 46.53 15.84 689.11 27.92 0.20 4.06 2.41 12.18 sdb 2.97 0.00 91.09 84.16 419.80 471.29 10.17 85.85 590.78 5.66 99.11 Time: 09:51:14 PM avg-cpu: %user %nice %system %iowait %steal %idle 0.75 0.00 0.50 24.94 0.00 73.82 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 88.00 1.00 7.00 4.00 380.00 96.00 0.04 4.38 3.00 2.40 sdb 3.00 7.00 111.00 44.00 540.00 208.00 9.65 18.58 581.79 6.46 100.10 Time: 09:51:15 PM avg-cpu: %user %nice %system %iowait %steal %idle 11.03 0.00 3.26 26.57 0.00 59.15 Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util sda 0.00 145.00 7.00 53.00 28.00 792.00 27.33 0.15 2.50 1.55 9.30 sdb 1.00 0.00 155.00 0.00 800.00 0.00 10.32 2.85 18.63 6.46 100.10 [root@server1 tmp]# MySQL Show Full Processlist mysql> show full processlist; +------+---------------+-----------+-----------------------+----------------+------+----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Id | User | Host | db | Command | Time | State | Info | +------+---------------+-----------+-----------------------+----------------+------+----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | 1 | DB_USER_ONE | localhost | DB_ONE | Query | 3 | waiting for handler insert | INSERT DELAYED INTO defers (mailtime,msgid,email,transport_method,message,host,ip,router,deliveryuser,deliverydomain) VALUES(FROM_UNIXTIME('1355879748'),'1TivwL-0003y8-8l','[email protected]','remote_smtp','SMTP error from remote mail server after initial connection: host mx1.mail.tw.yahoo.com [203.188.197.119]: 421 4.7.0 [TS01] Messages from 75.125.90.146 temporarily deferred due to user complaints - 4.16.55.1; see http://postmaster.yahoo.com/421-ts01.html','mx1.mail.tw.yahoo.com','203.188.197.119','lookuphost','','') | | 2 | DELAYED | localhost | DB_ONE | Delayed insert | 52 | insert | | | 3 | DELAYED | localhost | DB_ONE | Delayed insert | 68 | insert | | | 911 | DELAYED | localhost | DB_ONE | Delayed insert | 99 | Waiting for INSERT | | | 993 | DB_USER_TWO | localhost | DB_TWO | Sleep | 832 | | NULL | | 994 | DB_USER_ONE | localhost | DB_ONE | Query | 185 | Locked | delete from failures where FROM_UNIXTIME(UNIX_TIMESTAMP(NOW())-1296000) > mailtime | | 1102 | DB_USER_THREE | localhost | DB_THREE | Query | 29 | NULL | commit | | 1249 | DB_USER_FOUR | localhost | DB_FOUR | Query | 13 | NULL | commit | | 1263 | root | localhost | DB_FIVE | Query | 0 | NULL | show full processlist | | 1264 | DB_USER_SIX | localhost | DB_SIX | Query | 3 | NULL | commit | +------+---------------+-----------+-----------------------+----------------+------+----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ 10 rows in set (0.00 sec)

    Read the article

  • I need advices: small memory footprint linux mail server with spam filtering

    - by petermolnar
    I have a VPS which is originally destined to be a webserver but some minimal mail capabilities are needed to be deployed as well, including sending and receiving as standalone server. The current setup is the following: Postfix reveices the mail, the users are in virtual tables, stored in MySQL on connection all servers are tested with policyd-weight service against some DNSBLs all mail is runs through SpamAssassin spamd with the help of spamc client the mail is then delivered with Dovecot 2' LDA (local delivery agent), virtual users as well As you saw... there's no virus scanner running, and that's for a reason: clamav eats all the memory possible and also, virus mails are all filtered out with this setup (I've tested the same with ClamAV enabled for 1,5 years, no virus mail ever got even to ClamAV) I don't use amavisd and I really don't want to. You only need that monster if you have plenty of memory and lots of simultaneous scanners. It's also a nightmare to fine tune by hand. I run policyd-weight instead of policyd and native DNSBLs in postfix. I don't like to send someone away because a single service listed them. Important statement: everything works fine. I receive very small amount of spam, nearly never get a false positive and most of the bad mail is stopped by policyd-weight. The only "problem" that I feel the services at total uses a bit much memory alltogether. I've already cut the modules of spamassassin (see below), but I'd really like to hear some advices how to cut the memory footprint as low as possible, mostly: what plugins SpamAssassin really needs and what are more or less useless, regarding to my current postfix & policyd-weight setup? SpamAssassin rules are also compiled with sa-compile (sa-update runs once a week from cron, compile runs right after that) These are some of the current configurations that may matter, please tell me if you need anything more. postfix/master.cf (parts only) dovecot unix - n n - - pipe flags=DRhu user=vmail:vmail argv=/usr/bin/spamc -e /usr/lib/dovecot/deliver -d ${recipient} -f {sender} postfix/main.cf (parts only) smtpd_helo_required = yes smtpd_helo_restrictions = permit_mynetworks, reject_invalid_hostname, permit smtpd_recipient_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_invalid_hostname, reject_non_fqdn_hostname, reject_non_fqdn_recipient, reject_unknown_recipient_domain, reject_unauth_pipelining, reject_unauth_destination, check_policy_service inet:127.0.0.1:12525, permit policyd-weight.conf (parts only) $REJECTMSG = "550 Mail appeared to be SPAM or forged. Ask your Mail/DNS-Administrator to correct HELO and DNS MX settings or to get removed from DNSBLs"; $REJECTLEVEL = 4; $DEFER_STRING = 'IN_SPAMCOP= BOGUS_MX='; $DEFER_ACTION = '450'; $DEFER_LEVEL = 5; $DNSERRMSG = '450 No DNS entries for your MTA, HELO and Domain. Contact YOUR administrator'; # 1: ON, 0: OFF (default) # If ON request that ALL clients are only checked against RBLs $dnsbl_checks_only = 0; # 1: ON (default), 0: OFF # When set to ON it logs only RBLs which affect scoring (positive or negative) $LOG_BAD_RBL_ONLY = 1; ## DNSBL settings @dnsbl_score = ( # host, hit, miss, log name 'dnsbl.ahbl.org', 3, -1, 'dnsbl.ahbl.org', 'dnsbl.njabl.org', 3, -1, 'dnsbl.njabl.org', 'dnsbl.sorbs.net', 3, -1, 'dnsbl.sorbs.net', 'bl.spamcop.net', 3, -1, 'bl.spamcop.net', 'zen.spamhaus.org', 3, -1, 'zen.spamhaus.org', 'pbl.spamhaus.org', 3, -1, 'pbl.spamhaus.org', 'cbl.abuseat.org', 3, -1, 'cbl.abuseat.org', 'list.dsbl.org', 3, -1, 'list.dsbl.org', ); # If Client IP is listed in MORE DNSBLS than this var, it gets REJECTed immediately $MAXDNSBLHITS = 3; # alternatively, if the score of DNSBLs is ABOVE this level, reject immediately $MAXDNSBLSCORE = 9; $MAXDNSBLMSG = '550 Az levelezoszerveruk IP cime tul sok spamlistan talahato, kerjuk ellenorizze! / Your MTA is listed in too many DNSBLs; please check.'; ## RHSBL settings @rhsbl_score = ( 'multi.surbl.org', 4, 0, 'multi.surbl.org', 'rhsbl.ahbl.org', 4, 0, 'rhsbl.ahbl.org', 'dsn.rfc-ignorant.org', 4, 0, 'dsn.rfc-ignorant.org', # 'postmaster.rfc-ignorant.org', 0.1, 0, 'postmaster.rfc-ignorant.org', # 'abuse.rfc-ignorant.org', 0.1, 0, 'abuse.rfc-ignorant.org' ); # skip a RBL if this RBL had this many continuous errors $BL_ERROR_SKIP = 2; # skip a RBL for that many times $BL_SKIP_RELEASE = 10; ## cache stuff # must be a directory (add trailing slash) $LOCKPATH = '/var/run/policyd-weight/'; # socket path for the cache daemon. $SPATH = $LOCKPATH.'/polw.sock'; # how many seconds the cache may be idle before starting maintenance routines #NOTE: standard maintenance jobs happen regardless of this setting. $MAXIDLECACHE = 60; # after this number of requests do following maintenance jobs: checking for config changes $MAINTENANCE_LEVEL = 5; # negative (i.e. SPAM) result cache settings ################################## # set to 0 to disable caching for spam results. To this level the cache will be cleaned. $CACHESIZE = 2000; # at this number of entries cleanup takes place $CACHEMAXSIZE = 4000; $CACHEREJECTMSG = '550 temporarily blocked because of previous errors'; # after NTTL retries the cache entry is deleted $NTTL = 1; # client MUST NOT retry within this seconds in order to decrease TTL counter $NTIME = 30; # positve (i.,e. HAM) result cache settings ################################### # set to 0 to disable caching of HAM. To this number of entries the cache will be cleaned $POSCACHESIZE = 1000; # at this number of entries cleanup takes place $POSCACHEMAXSIZE = 2000; $POSCACHEMSG = 'using cached result'; #after PTTL requests the HAM entry must succeed one time the RBL checks again $PTTL = 60; # after $PTIME in HAM Cache the client must pass one time the RBL checks again. #Values must be nonfractal. Accepted time-units: s, m, h, d $PTIME = '3h'; # The client must pass this time the RBL checks in order to be listed as hard-HAM # After this time the client will pass immediately for PTTL within PTIME $TEMP_PTIME = '1d'; ## DNS settings # Retries for ONE DNS-Lookup $DNS_RETRIES = 1; # Retry-interval for ONE DNS-Lookup $DNS_RETRY_IVAL = 5; # max error count for unresponded queries in a complete policy query $MAXDNSERR = 3; $MAXDNSERRMSG = 'passed - too many local DNS-errors'; # persistent udp connection for DNS queries. #broken in Net::DNS version 0.51. Works with Net::DNS 0.53; DEFAULT: off $PUDP= 0; # Force the usage of Net::DNS for RBL lookups. # Normally policyd-weight tries to use a faster RBL lookup routine instead of Net::DNS $USE_NET_DNS = 0; # A list of space separated NS IPs # This overrides resolv.conf settings # Example: $NS = '1.2.3.4 1.2.3.5'; # DEFAULT: empty $NS = ''; # timeout for receiving from cache instance $IPC_TIMEOUT = 2; # If set to 1 policyd-weight closes connections to smtpd clients in order to avoid too many #established connections to one policyd-weight child $TRY_BALANCE = 0; # scores for checks, WARNING: they may manipulate eachother # or be factors for other scores. # HIT score, MISS Score @client_ip_eq_helo_score = (1.5, -1.25 ); @helo_score = (1.5, -2 ); @helo_score = (0, -2 ); @helo_from_mx_eq_ip_score= (1.5, -3.1 ); @helo_numeric_score= (2.5, 0 ); @from_match_regex_verified_helo= (1,-2 ); @from_match_regex_unverified_helo = (1.6, -1.5 ); @from_match_regex_failed_helo = (2.5, 0 ); @helo_seems_dialup = (1.5, 0 ); @failed_helo_seems_dialup= (2, 0 ); @helo_ip_in_client_subnet= (0,-1.2 ); @helo_ip_in_cl16_subnet = (0,-0.41 ); #@client_seems_dialup_score = (3.75, 0 ); @client_seems_dialup_score = (0, 0 ); @from_multiparted = (1.09, 0 ); @from_anon= (1.17, 0 ); @bogus_mx_score = (2.1, 0 ); @random_sender_score = (0.25, 0 ); @rhsbl_penalty_score = (3.1, 0 ); @enforce_dyndns_score = (3, 0 ); spamassassin/init.pre (I've put the .pre files together) loadplugin Mail::SpamAssassin::Plugin::Hashcash loadplugin Mail::SpamAssassin::Plugin::SPF loadplugin Mail::SpamAssassin::Plugin::Pyzor loadplugin Mail::SpamAssassin::Plugin::Razor2 loadplugin Mail::SpamAssassin::Plugin::AutoLearnThreshold loadplugin Mail::SpamAssassin::Plugin::MIMEHeader loadplugin Mail::SpamAssassin::Plugin::ReplaceTags loadplugin Mail::SpamAssassin::Plugin::Check loadplugin Mail::SpamAssassin::Plugin::HTTPSMismatch loadplugin Mail::SpamAssassin::Plugin::URIDetail loadplugin Mail::SpamAssassin::Plugin::Bayes loadplugin Mail::SpamAssassin::Plugin::BodyEval loadplugin Mail::SpamAssassin::Plugin::DNSEval loadplugin Mail::SpamAssassin::Plugin::HTMLEval loadplugin Mail::SpamAssassin::Plugin::HeaderEval loadplugin Mail::SpamAssassin::Plugin::MIMEEval loadplugin Mail::SpamAssassin::Plugin::RelayEval loadplugin Mail::SpamAssassin::Plugin::URIEval loadplugin Mail::SpamAssassin::Plugin::WLBLEval loadplugin Mail::SpamAssassin::Plugin::VBounce loadplugin Mail::SpamAssassin::Plugin::Rule2XSBody spamassassin/local.cf (parts) use_bayes 1 bayes_auto_learn 1 bayes_store_module Mail::SpamAssassin::BayesStore::MySQL bayes_sql_dsn DBI:mysql:db:127.0.0.1:3306 bayes_sql_username user bayes_sql_password pass bayes_ignore_header X-Bogosity bayes_ignore_header X-Spam-Flag bayes_ignore_header X-Spam-Status ### User settings user_scores_dsn DBI:mysql:db:127.0.0.1:3306 user_scores_sql_password user user_scores_sql_username pass user_scores_sql_custom_query SELECT preference, value FROM _TABLE_ WHERE username = _USERNAME_ OR username = '$GLOBAL' OR username = CONCAT('%',_DOMAIN_) ORDER BY username ASC # for better speed score DNS_FROM_AHBL_RHSBL 0 score __RFC_IGNORANT_ENVFROM 0 score DNS_FROM_RFC_DSN 0 score DNS_FROM_RFC_BOGUSMX 0 score __DNS_FROM_RFC_POST 0 score __DNS_FROM_RFC_ABUSE 0 score __DNS_FROM_RFC_WHOIS 0 UPDATE 01 As adaptr advised I remove policyd-weight and configured postfix postscreen, this resulted approximately -15-20 MB from RAM usage and a lot faster work. I'm not sure it's working at full capacity but it seems promising.

    Read the article

  • Does anyone really understand how HFSC scheduling in Linux/BSD works?

    - by Mecki
    I read the original SIGCOMM '97 PostScript paper about HFSC, it is very technically, but I understand the basic concept. Instead of giving a linear service curve (as with pretty much every other scheduling algorithm), you can specify a convex or concave service curve and thus it is possible to decouple bandwidth and delay. However, even though this paper mentions to kind of scheduling algorithms being used (real-time and link-share), it always only mentions ONE curve per scheduling class (the decoupling is done by specifying this curve, only one curve is needed for that). Now HFSC has been implemented for BSD (OpenBSD, FreeBSD, etc.) using the ALTQ scheduling framework and it has been implemented Linux using the TC scheduling framework (part of iproute2). Both implementations added two additional service curves, that were NOT in the original paper! A real-time service curve and an upper-limit service curve. Again, please note that the original paper mentions two scheduling algorithms (real-time and link-share), but in that paper both work with one single service curve. There never have been two independent service curves for either one as you currently find in BSD and Linux. Even worse, some version of ALTQ seems to add an additional queue priority to HSFC (there is no such thing as priority in the original paper either). I found several BSD HowTo's mentioning this priority setting (even though the man page of the latest ALTQ release knows no such parameter for HSFC, so officially it does not even exist). This all makes the HFSC scheduling even more complex than the algorithm described in the original paper and there are tons of tutorials on the Internet that often contradict each other, one claiming the opposite of the other one. This is probably the main reason why nobody really seems to understand how HFSC scheduling really works. Before I can ask my questions, we need a sample setup of some kind. I'll use a very simple one as seen in the image below: Here are some questions I cannot answer because the tutorials contradict each other: What for do I need a real-time curve at all? Assuming A1, A2, B1, B2 are all 128 kbit/s link-share (no real-time curve for either one), then each of those will get 128 kbit/s if the root has 512 kbit/s to distribute (and A and B are both 256 kbit/s of course), right? Why would I additionally give A1 and B1 a real-time curve with 128 kbit/s? What would this be good for? To give those two a higher priority? According to original paper I can give them a higher priority by using a curve, that's what HFSC is all about after all. By giving both classes a curve of [256kbit/s 20ms 128kbit/s] both have twice the priority than A2 and B2 automatically (still only getting 128 kbit/s on average) Does the real-time bandwidth count towards the link-share bandwidth? E.g. if A1 and B1 both only have 64kbit/s real-time and 64kbit/s link-share bandwidth, does that mean once they are served 64kbit/s via real-time, their link-share requirement is satisfied as well (they might get excess bandwidth, but lets ignore that for a second) or does that mean they get another 64 kbit/s via link-share? So does each class has a bandwidth "requirement" of real-time plus link-share? Or does a class only have a higher requirement than the real-time curve if the link-share curve is higher than the real-time curve (current link-share requirement equals specified link-share requirement minus real-time bandwidth already provided to this class)? Is upper limit curve applied to real-time as well, only to link-share, or maybe to both? Some tutorials say one way, some say the other way. Some even claim upper-limit is the maximum for real-time bandwidth + link-share bandwidth? What is the truth? Assuming A2 and B2 are both 128 kbit/s, does it make any difference if A1 and B1 are 128 kbit/s link-share only, or 64 kbit/s real-time and 128 kbit/s link-share, and if so, what difference? If I use the seperate real-time curve to increase priorities of classes, why would I need "curves" at all? Why is not real-time a flat value and link-share also a flat value? Why are both curves? The need for curves is clear in the original paper, because there is only one attribute of that kind per class. But now, having three attributes (real-time, link-share, and upper-limit) what for do I still need curves on each one? Why would I want the curves shape (not average bandwidth, but their slopes) to be different for real-time and link-share traffic? According to the little documentation available, real-time curve values are totally ignored for inner classes (class A and B), they are only applied to leaf classes (A1, A2, B1, B2). If that is true, why does the ALTQ HFSC sample configuration (search for 3.3 Sample configuration) set real-time curves on inner classes and claims that those set the guaranteed rate of those inner classes? Isn't that completely pointless? (note: pshare sets the link-share curve in ALTQ and grate the real-time curve; you can see this in the paragraph above the sample configuration). Some tutorials say the sum of all real-time curves may not be higher than 80% of the line speed, others say it must not be higher than 70% of the line speed. Which one is right or are they maybe both wrong? One tutorial said you shall forget all the theory. No matter how things really work (schedulers and bandwidth distribution), imagine the three curves according to the following "simplified mind model": real-time is the guaranteed bandwidth that this class will always get. link-share is the bandwidth that this class wants to become fully satisfied, but satisfaction cannot be guaranteed. In case there is excess bandwidth, the class might even get offered more bandwidth than necessary to become satisfied, but it may never use more than upper-limit says. For all this to work, the sum of all real-time bandwidths may not be above xx% of the line speed (see question above, the percentage varies). Question: Is this more or less accurate or a total misunderstanding of HSFC? And if assumption above is really accurate, where is prioritization in that model? E.g. every class might have a real-time bandwidth (guaranteed), a link-share bandwidth (not guaranteed) and an maybe an upper-limit, but still some classes have higher priority needs than other classes. In that case I must still prioritize somehow, even among real-time traffic of those classes. Would I prioritize by the slope of the curves? And if so, which curve? The real-time curve? The link-share curve? The upper-limit curve? All of them? Would I give all of them the same slope or each a different one and how to find out the right slope? I still haven't lost hope that there exists at least a hand full of people in this world that really understood HFSC and are able to answer all these questions accurately. And doing so without contradicting each other in the answers would be really nice ;-)

    Read the article

  • Python CGI on Amazon AWS EC2 micro-instance -- a how-to!

    - by user595585
    How can you make an EC2 micro instance serve CGI scripts from lighthttpd? For instance Python CGI? Well, it took half a day, but I have gotten Python cgi running on a free Amazon AWS EC2 micro-instance, using the lighttpd server. I think it will help my fellow noobs to put all the steps in one place. Armed with the simple steps below, it will take you only 15 minutes to set things up! My question for the more experienced users reading this is: Are there any security flaws in what I've done? (See file and directory permissions.) Step 1: Start your EC2 instance and ssh into it. [Obviously, you'll need to sign up for Amazon EC2 and save your key pairs to a *.pem file. I won't go over this, as Amazon tells you how to do it.] Sign into your AWS account and start your EC2 instance. The web has tutorials on doing this. Notice that default instance-size that Amazon presents to you is "small." This is not "micro" and so it will cost you money. Be sure to manually choose "micro." (Micro instances are free only for the first year...) Find the public DNS code for your running instance. To do this, click on the instance in the top pane of the dashboard and you'll eventually see the "Public DNS" field populated in the bottom pane. (You may need to fiddle a bit.) The Public DNS looks something like: ec2-174-129-110-23.compute-1.amazonaws.com Start your Unix console program. (On Max OS X, it's called Terminal, and lives in the Applications - Utilities folder.) cd to the directory on your desktop system that has your *.pem file containing your AWS keypairs. ssh to your EC2 instance using a command like: ssh -i <<your *.pem filename>> ec2-user@<< Public DNS address >> So, for me, this was: ssh -i amzn_ec2_keypair.pem [email protected] Your EC2 instance should let you in. Step 2: Download lighttpd to your EC2 instance. To install lighttpd, you will need root access on your EC2 instance. The problem is: Amazon will not let you sign in as root. (Not straightforwardly, at least.) But there is a workaround. Type this command: sudo /bin/bash The system prompt-character will change from $ to #. We won't exit from "sudo" until the very last step in this whole process. Install the lighttpd application (version 1.4.28-1.3.amzn1 for me): yum install lighttpd Install the FastCGI libraries for lighttpd (not needed, but why not?): yum install lighttpd-fastcgi Test that your server is working: /etc/init.d/lighttpd start Step 3: Let the outside world see your server. If you now tried to hit your server from the browser on your desktop, it would fail. The reason: By default, Amazon AWS does not open any ports to your EC2 instance. So, you have to open the ports manually. Go to your EC2 dashboard in your desktop's browser. Click on "Security Groups" in the left pane. One or more security groups will appear in the upper right pane. Choose the one that was assigned to your EC2 instance when you launched your instance. A table called "Allowed Connections" will appear in the lower right pane. A pop-up menu will let you choose "HTTP" as the connection method. The other values in that line of the table should be: tcp, 80, 80, 0.0.0.0/0 Now hit your EC2 instance's server from the desktop in your browser. Use the Public DNS address that you used earlier to SSH in. You should see the lighttpd generic web page. If you don't, I can't help you because I am such a noob. :-( Step 4: Configure lighttpd to serve CGI. Back in the console program, cd to the configuration directory for lighttpd: cd /etc/lighttpd To enable CGI, you want to uncomment one line in the < modules.conf file. (I could have enabled Fast CGI, but baby steps are best!) You can do this with the "ed" editor as follows: ed modules.conf /include "conf.d\/cgi.conf"/ s/#// w q Create the directory where CGI programs will live. (The /etc/lighttpd/lighttpd.conf file determines where this will be.) We'll create our directory in the default location, so we don't have to do any editing of configuration files: cd /var/www/lighttpd mkdir cgi-bin chmod 755 cgi-bin Almost there! Of course you need to put a test CGI program into the cgi-bin directory. Here is one: cd cgi-bin ed a #!/usr/bin/python print "Content-type: text/html\n\n" print "<html><body>Hello, pyworld.</body></html>" . w hellopyworld.py q chmod 655 hellopyworld.py Restart your lighttpd server: /etc/init.d/lighttpd restart Test your CGI program. In your desktop's browser, hit this URL, substituting your EC2 instance's public DNS address: http://<<Public DNS>>/cgi-bin/hellopyworld.py For me, this was: http://ec2-174-129-110-23.compute-1.amazonaws.com/cgi-bin/hellopyworld.py Step 5: That's it! Clean up, and give thanks! To exit from the "sudo /bin/bash" command given earlier, type: exit Acknowledgements: Heaps of thanks to: wiki.vpslink.com/Install_and_Configure_lighttpd www.cyberciti.biz/tips/lighttpd-howto-setup-cgi-bin-access-for-perl-programs.html aws.typepad.com/aws/2010/06/building-three-tier-architectures-with-security-groups.html Good luck, amigos! I apologize for the non-traditional nature of this "question" but I have gotten so much help from Stackoverflow that I was eager to give something back.

    Read the article

  • Configuring OpenLDAP and SSL

    - by Stormshadow
    I am having trouble trying to connect to a secure OpenLDAP server which I have set up. On running my LDAP client code java -Djavax.net.debug=ssl LDAPConnector I get the following exception trace (java version 1.6.0_17) trigger seeding of SecureRandom done seeding SecureRandom %% No cached client session *** ClientHello, TLSv1 RandomCookie: GMT: 1256110124 bytes = { 224, 19, 193, 148, 45, 205, 108, 37, 101, 247, 112, 24, 157, 39, 111, 177, 43, 53, 206, 224, 68, 165, 55, 185, 54, 203, 43, 91 } Session ID: {} Cipher Suites: [SSL_RSA_WITH_RC4_128_MD5, SSL_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, SSL_RSA_W ITH_3DES_EDE_CBC_SHA, SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA, SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA, SSL_RSA_WITH_DES_CBC_SHA, SSL_DHE_RSA_WITH_DES_CBC_SHA, SSL_DHE_DSS_WITH_DES_CBC_SH A, SSL_RSA_EXPORT_WITH_RC4_40_MD5, SSL_RSA_EXPORT_WITH_DES40_CBC_SHA, SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA] Compression Methods: { 0 } *** Thread-0, WRITE: TLSv1 Handshake, length = 73 Thread-0, WRITE: SSLv2 client hello message, length = 98 Thread-0, received EOFException: error Thread-0, handling exception: javax.net.ssl.SSLHandshakeException: Remote host closed connection during handshake Thread-0, SEND TLSv1 ALERT: fatal, description = handshake_failure Thread-0, WRITE: TLSv1 Alert, length = 2 Thread-0, called closeSocket() main, handling exception: javax.net.ssl.SSLHandshakeException: Remote host closed connection during handshake javax.naming.CommunicationException: simple bind failed: ldap.natraj.com:636 [Root exception is javax.net.ssl.SSLHandshakeException: Remote host closed connection during hands hake] at com.sun.jndi.ldap.LdapClient.authenticate(Unknown Source) at com.sun.jndi.ldap.LdapCtx.connect(Unknown Source) at com.sun.jndi.ldap.LdapCtx.<init>(Unknown Source) at com.sun.jndi.ldap.LdapCtxFactory.getUsingURL(Unknown Source) at com.sun.jndi.ldap.LdapCtxFactory.getUsingURLs(Unknown Source) at com.sun.jndi.ldap.LdapCtxFactory.getLdapCtxInstance(Unknown Source) at com.sun.jndi.ldap.LdapCtxFactory.getInitialContext(Unknown Source) at javax.naming.spi.NamingManager.getInitialContext(Unknown Source) at javax.naming.InitialContext.getDefaultInitCtx(Unknown Source) at javax.naming.InitialContext.init(Unknown Source) at javax.naming.InitialContext.<init>(Unknown Source) at javax.naming.directory.InitialDirContext.<init>(Unknown Source) at LDAPConnector.CallSecureLDAPServer(LDAPConnector.java:43) at LDAPConnector.main(LDAPConnector.java:237) Caused by: javax.net.ssl.SSLHandshakeException: Remote host closed connection during handshake at com.sun.net.ssl.internal.ssl.SSLSocketImpl.readRecord(Unknown Source) at com.sun.net.ssl.internal.ssl.SSLSocketImpl.performInitialHandshake(Unknown Source) at com.sun.net.ssl.internal.ssl.SSLSocketImpl.readDataRecord(Unknown Source) at com.sun.net.ssl.internal.ssl.AppInputStream.read(Unknown Source) at java.io.BufferedInputStream.fill(Unknown Source) at java.io.BufferedInputStream.read1(Unknown Source) at java.io.BufferedInputStream.read(Unknown Source) at com.sun.jndi.ldap.Connection.run(Unknown Source) at java.lang.Thread.run(Unknown Source) Caused by: java.io.EOFException: SSL peer shut down incorrectly at com.sun.net.ssl.internal.ssl.InputRecord.read(Unknown Source) ... 9 more I am able to connect to the same secure LDAP server however if I use another version of java (1.6.0_14) I have created and installed the server certificates in the cacerts of both the JRE's as mentioned in this guide -- OpenLDAP with SSL When I run ldapsearch -x on the server I get # extended LDIF # # LDAPv3 # base <dc=localdomain> (default) with scope subtree # filter: (objectclass=*) # requesting: ALL # # localdomain dn: dc=localdomain objectClass: top objectClass: dcObject objectClass: organization o: localdomain dc: localdomain # admin, localdomain dn: cn=admin,dc=localdomain objectClass: simpleSecurityObject objectClass: organizationalRole cn: admin description: LDAP administrator # search result search: 2 result: 0 Success # numResponses: 3 # numEntries: 2 On running openssl s_client -connect ldap.natraj.com:636 -showcerts , I obtain the self signed certificate. My slapd.conf file is as follows ####################################################################### # Global Directives: # Features to permit #allow bind_v2 # Schema and objectClass definitions include /etc/ldap/schema/core.schema include /etc/ldap/schema/cosine.schema include /etc/ldap/schema/nis.schema include /etc/ldap/schema/inetorgperson.schema # Where the pid file is put. The init.d script # will not stop the server if you change this. pidfile /var/run/slapd/slapd.pid # List of arguments that were passed to the server argsfile /var/run/slapd/slapd.args # Read slapd.conf(5) for possible values loglevel none # Where the dynamically loaded modules are stored modulepath /usr/lib/ldap moduleload back_hdb # The maximum number of entries that is returned for a search operation sizelimit 500 # The tool-threads parameter sets the actual amount of cpu's that is used # for indexing. tool-threads 1 ####################################################################### # Specific Backend Directives for hdb: # Backend specific directives apply to this backend until another # 'backend' directive occurs backend hdb ####################################################################### # Specific Backend Directives for 'other': # Backend specific directives apply to this backend until another # 'backend' directive occurs #backend <other> ####################################################################### # Specific Directives for database #1, of type hdb: # Database specific directives apply to this databasse until another # 'database' directive occurs database hdb # The base of your directory in database #1 suffix "dc=localdomain" # rootdn directive for specifying a superuser on the database. This is needed # for syncrepl. rootdn "cn=admin,dc=localdomain" # Where the database file are physically stored for database #1 directory "/var/lib/ldap" # The dbconfig settings are used to generate a DB_CONFIG file the first # time slapd starts. They do NOT override existing an existing DB_CONFIG # file. You should therefore change these settings in DB_CONFIG directly # or remove DB_CONFIG and restart slapd for changes to take effect. # For the Debian package we use 2MB as default but be sure to update this # value if you have plenty of RAM dbconfig set_cachesize 0 2097152 0 # Sven Hartge reported that he had to set this value incredibly high # to get slapd running at all. See http://bugs.debian.org/303057 for more # information. # Number of objects that can be locked at the same time. dbconfig set_lk_max_objects 1500 # Number of locks (both requested and granted) dbconfig set_lk_max_locks 1500 # Number of lockers dbconfig set_lk_max_lockers 1500 # Indexing options for database #1 index objectClass eq # Save the time that the entry gets modified, for database #1 lastmod on # Checkpoint the BerkeleyDB database periodically in case of system # failure and to speed slapd shutdown. checkpoint 512 30 # Where to store the replica logs for database #1 # replogfile /var/lib/ldap/replog # The userPassword by default can be changed # by the entry owning it if they are authenticated. # Others should not be able to see it, except the # admin entry below # These access lines apply to database #1 only access to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=localdomain" write by anonymous auth by self write by * none # Ensure read access to the base for things like # supportedSASLMechanisms. Without this you may # have problems with SASL not knowing what # mechanisms are available and the like. # Note that this is covered by the 'access to *' # ACL below too but if you change that as people # are wont to do you'll still need this if you # want SASL (and possible other things) to work # happily. access to dn.base="" by * read # The admin dn has full write access, everyone else # can read everything. access to * by dn="cn=admin,dc=localdomain" write by * read # For Netscape Roaming support, each user gets a roaming # profile for which they have write access to #access to dn=".*,ou=Roaming,o=morsnet" # by dn="cn=admin,dc=localdomain" write # by dnattr=owner write ####################################################################### # Specific Directives for database #2, of type 'other' (can be hdb too): # Database specific directives apply to this databasse until another # 'database' directive occurs #database <other> # The base of your directory for database #2 #suffix "dc=debian,dc=org" ####################################################################### # SSL: # Uncomment the following lines to enable SSL and use the default # snakeoil certificates. #TLSCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem #TLSCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key TLSCipherSuite TLS_RSA_AES_256_CBC_SHA TLSCACertificateFile /etc/ldap/ssl/server.pem TLSCertificateFile /etc/ldap/ssl/server.pem TLSCertificateKeyFile /etc/ldap/ssl/server.pem My ldap.conf file is # # LDAP Defaults # # See ldap.conf(5) for details # This file should be world readable but not world writable. HOST ldap.natraj.com PORT 636 BASE dc=localdomain URI ldaps://ldap.natraj.com TLS_CACERT /etc/ldap/ssl/server.pem TLS_REQCERT allow #SIZELIMIT 12 #TIMELIMIT 15 #DEREF never Why is it that I can connect to the same server using one version of JRE while I cannot with another ?

    Read the article

  • High Load mysql on Debian server stops every day. Why?

    - by Oleg Abrazhaev
    I have Debian server with 32 gb memory. And there is apache2, memcached and nginx on this server. Memory load always on maximum. Only 500m free. Most memory leak do MySql. Apache only 70 clients configured, other services small memory usage. When mysql use all memory it stops. And nothing works, need mysql reboot. Mysql configured use maximum 24 gb memory. I have hight weight InnoDB bases. (400000 rows, 30 gb). And on server multithread daemon, that makes many inserts in this tables, thats why InnoDB. There is my mysql config. [mysqld] # # * Basic Settings # default-time-zone = "+04:00" user = mysql pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock port = 3306 basedir = /usr datadir = /var/lib/mysql tmpdir = /tmp language = /usr/share/mysql/english skip-external-locking default-time-zone='Europe/Moscow' # # Instead of skip-networking the default is now to listen only on # localhost which is more compatible and is not less secure. # # * Fine Tuning # #low_priority_updates = 1 concurrent_insert = ALWAYS wait_timeout = 600 interactive_timeout = 600 #normal key_buffer_size = 2024M #key_buffer_size = 1512M #70% hot cache key_cache_division_limit= 70 #16-32 max_allowed_packet = 32M #1-16M thread_stack = 8M #40-50 thread_cache_size = 50 #orderby groupby sort sort_buffer_size = 64M #same myisam_sort_buffer_size = 400M #temp table creates when group_by tmp_table_size = 3000M #tables in memory max_heap_table_size = 3000M #on disk open_files_limit = 10000 table_cache = 10000 join_buffer_size = 5M # This replaces the startup script and checks MyISAM tables if needed # the first time they are touched myisam-recover = BACKUP #myisam_use_mmap = 1 max_connections = 200 thread_concurrency = 8 # # * Query Cache Configuration # #more ignored query_cache_limit = 50M query_cache_size = 210M #on query cache query_cache_type = 1 # # * Logging and Replication # # Both location gets rotated by the cronjob. # Be aware that this log type is a performance killer. #log = /var/log/mysql/mysql.log # # Error logging goes to syslog. This is a Debian improvement :) # # Here you can see queries with especially long duration log_slow_queries = /var/log/mysql/mysql-slow.log long_query_time = 1 log-queries-not-using-indexes # # The following can be used as easy to replay backup logs or for replication. # note: if you are setting up a replication slave, see README.Debian about # other settings you may need to change. #server-id = 1 #log_bin = /var/log/mysql/mysql-bin.log server-id = 1 log-bin = /var/lib/mysql/mysql-bin #replicate-do-db = gate log-bin-index = /var/lib/mysql/mysql-bin.index log-error = /var/lib/mysql/mysql-bin.err relay-log = /var/lib/mysql/relay-bin relay-log-info-file = /var/lib/mysql/relay-bin.info relay-log-index = /var/lib/mysql/relay-bin.index binlog_do_db = 24avia expire_logs_days = 10 max_binlog_size = 100M read_buffer_size = 4024288 innodb_buffer_pool_size = 5000M innodb_flush_log_at_trx_commit = 2 innodb_thread_concurrency = 8 table_definition_cache = 2000 group_concat_max_len = 16M #binlog_do_db = gate #binlog_ignore_db = include_database_name # # * BerkeleyDB # # Using BerkeleyDB is now discouraged as its support will cease in 5.1.12. #skip-bdb # # * InnoDB # # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. # Read the manual for more InnoDB related options. There are many! # You might want to disable InnoDB to shrink the mysqld process by circa 100MB. #skip-innodb # # * Security Features # # Read the manual, too, if you want chroot! # chroot = /var/lib/mysql/ # # For generating SSL certificates I recommend the OpenSSL GUI "tinyca". # # ssl-ca=/etc/mysql/cacert.pem # ssl-cert=/etc/mysql/server-cert.pem # ssl-key=/etc/mysql/server-key.pem [mysqldump] quick quote-names max_allowed_packet = 500M [mysql] #no-auto-rehash # faster start of mysql but no tab completition [isamchk] key_buffer = 32M key_buffer_size = 512M # # * NDB Cluster # # See /usr/share/doc/mysql-server-*/README.Debian for more information. # # The following configuration is read by the NDB Data Nodes (ndbd processes) # not from the NDB Management Nodes (ndb_mgmd processes). # # [MYSQL_CLUSTER] # ndb-connectstring=127.0.0.1 # # * IMPORTANT: Additional settings that can override those from this file! # The files must end with '.cnf', otherwise they'll be ignored. # !includedir /etc/mysql/conf.d/ Please, help me make it stable. Memory used /etc/mysql # free total used free shared buffers cached Mem: 32930800 32766424 164376 0 139208 23829196 -/+ buffers/cache: 8798020 24132780 Swap: 33553328 44660 33508668 Maybe my problem not in memory, but MySQL stops every day. As you can see, cache memory free 24 gb. Thank to Michael Hampton? for correction. Load overage on server 3.5. Maybe hdd or another problem? Maybe my config not optimal for 30gb InnoDB ? I'm already try mysqltuner and tunung-primer.sh , but they marked all green. Mysqltuner output mysqltuner >> MySQLTuner 1.0.1 - Major Hayden <[email protected]> >> Bug reports, feature requests, and downloads at http://mysqltuner.com/ >> Run with '--help' for additional options and output filtering -------- General Statistics -------------------------------------------------- [--] Skipped version check for MySQLTuner script [OK] Currently running supported MySQL version 5.5.24-9-log [OK] Operating on 64-bit architecture -------- Storage Engine Statistics ------------------------------------------- [--] Status: -Archive -BDB -Federated +InnoDB -ISAM -NDBCluster [--] Data in MyISAM tables: 112G (Tables: 1528) [--] Data in InnoDB tables: 39G (Tables: 340) [--] Data in PERFORMANCE_SCHEMA tables: 0B (Tables: 17) [!!] Total fragmented tables: 344 -------- Performance Metrics ------------------------------------------------- [--] Up for: 8h 18m 33s (14M q [478.333 qps], 259K conn, TX: 9B, RX: 5B) [--] Reads / Writes: 84% / 16% [--] Total buffers: 10.5G global + 81.1M per thread (200 max threads) [OK] Maximum possible memory usage: 26.3G (83% of installed RAM) [OK] Slow queries: 1% (259K/14M) [!!] Highest connection usage: 100% (201/200) [OK] Key buffer size / total MyISAM indexes: 1.5G/5.6G [OK] Key buffer hit rate: 100.0% (6B cached / 1M reads) [OK] Query cache efficiency: 74.3% (8M cached / 11M selects) [OK] Query cache prunes per day: 0 [OK] Sorts requiring temporary tables: 0% (0 temp sorts / 247K sorts) [!!] Joins performed without indexes: 106025 [!!] Temporary tables created on disk: 49% (351K on disk / 715K total) [OK] Thread cache hit rate: 99% (249 created / 259K connections) [!!] Table cache hit rate: 15% (2K open / 13K opened) [OK] Open file limit used: 15% (3K/20K) [OK] Table locks acquired immediately: 99% (4M immediate / 4M locks) [!!] InnoDB data size / buffer pool: 39.4G/5.9G -------- Recommendations ----------------------------------------------------- General recommendations: Run OPTIMIZE TABLE to defragment tables for better performance MySQL started within last 24 hours - recommendations may be inaccurate Reduce or eliminate persistent connections to reduce connection usage Adjust your join queries to always utilize indexes Temporary table size is already large - reduce result set size Reduce your SELECT DISTINCT queries without LIMIT clauses Increase table_cache gradually to avoid file descriptor limits Variables to adjust: max_connections (> 200) wait_timeout (< 600) interactive_timeout (< 600) join_buffer_size (> 5.0M, or always use indexes with joins) table_cache (> 10000) innodb_buffer_pool_size (>= 39G) Mysql primer output -- MYSQL PERFORMANCE TUNING PRIMER -- - By: Matthew Montgomery - MySQL Version 5.5.24-9-log x86_64 Uptime = 0 days 8 hrs 20 min 50 sec Avg. qps = 478 Total Questions = 14369568 Threads Connected = 16 Warning: Server has not been running for at least 48hrs. It may not be safe to use these recommendations To find out more information on how each of these runtime variables effects performance visit: http://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html Visit http://www.mysql.com/products/enterprise/advisors.html for info about MySQL's Enterprise Monitoring and Advisory Service SLOW QUERIES The slow query log is enabled. Current long_query_time = 1.000000 sec. You have 260626 out of 14369701 that take longer than 1.000000 sec. to complete Your long_query_time seems to be fine BINARY UPDATE LOG The binary update log is enabled Binlog sync is not enabled, you could loose binlog records during a server crash WORKER THREADS Current thread_cache_size = 50 Current threads_cached = 45 Current threads_per_sec = 0 Historic threads_per_sec = 0 Your thread_cache_size is fine MAX CONNECTIONS Current max_connections = 200 Current threads_connected = 11 Historic max_used_connections = 201 The number of used connections is 100% of the configured maximum. You should raise max_connections INNODB STATUS Current InnoDB index space = 214 M Current InnoDB data space = 39.40 G Current InnoDB buffer pool free = 0 % Current innodb_buffer_pool_size = 5.85 G Depending on how much space your innodb indexes take up it may be safe to increase this value to up to 2 / 3 of total system memory MEMORY USAGE Max Memory Ever Allocated : 23.46 G Configured Max Per-thread Buffers : 15.84 G Configured Max Global Buffers : 7.54 G Configured Max Memory Limit : 23.39 G Physical Memory : 31.40 G Max memory limit seem to be within acceptable norms KEY BUFFER Current MyISAM index space = 5.61 G Current key_buffer_size = 1.47 G Key cache miss rate is 1 : 5578 Key buffer free ratio = 77 % Your key_buffer_size seems to be fine QUERY CACHE Query cache is enabled Current query_cache_size = 200 M Current query_cache_used = 101 M Current query_cache_limit = 50 M Current Query cache Memory fill ratio = 50.59 % Current query_cache_min_res_unit = 4 K MySQL won't cache query results that are larger than query_cache_limit in size SORT OPERATIONS Current sort_buffer_size = 64 M Current read_rnd_buffer_size = 256 K Sort buffer seems to be fine JOINS Current join_buffer_size = 5.00 M You have had 106606 queries where a join could not use an index properly You have had 8 joins without keys that check for key usage after each row join_buffer_size >= 4 M This is not advised You should enable "log-queries-not-using-indexes" Then look for non indexed joins in the slow query log. OPEN FILES LIMIT Current open_files_limit = 20210 files The open_files_limit should typically be set to at least 2x-3x that of table_cache if you have heavy MyISAM usage. Your open_files_limit value seems to be fine TABLE CACHE Current table_open_cache = 10000 tables Current table_definition_cache = 2000 tables You have a total of 1910 tables You have 2151 open tables. The table_cache value seems to be fine TEMP TABLES Current max_heap_table_size = 2.92 G Current tmp_table_size = 2.92 G Of 366426 temp tables, 49% were created on disk Perhaps you should increase your tmp_table_size and/or max_heap_table_size to reduce the number of disk-based temporary tables Note! BLOB and TEXT columns are not allow in memory tables. If you are using these columns raising these values might not impact your ratio of on disk temp tables. TABLE SCANS Current read_buffer_size = 3 M Current table scan ratio = 2846 : 1 read_buffer_size seems to be fine TABLE LOCKING Current Lock Wait ratio = 1 : 185 You may benefit from selective use of InnoDB. If you have long running SELECT's against MyISAM tables and perform frequent updates consider setting 'low_priority_updates=1'

    Read the article

  • Installing Oracle 11gR2 on RHEL 6.2

    - by Chris
    Hello all I'm having some difficulty installing Oracle 11gR2 on RHEL 6.2 I have compiled a giant list of every single step I have taken so far I installed RHEL 6.2 on VMWARE it did it's easy install automatically I Selected 4gb of memory Selected max size of 80Gb Selected 2 processors Sorry for the bad styling copy paste isn't working correctly The version of oracle i downloaded is Linux x86-64 11.2.0.1 I am installing this on a local machine NOT a remote machine I followed the following documentation http://docs.oracle.com/cd/E11882_01/install.112/e24326/toc.htm I bolded the steps which I was least sure about from my research Easy installed with RHEL 6.2 for VMWARE Registered with red hat so I can get updates Reinstalled vmware-tools by pressing enter at every choice Sudo yum update at the end something about GPG key selected y then y Checked Memory Requirements grep MemTotal /proc/meminfo MemTotal: 3921368 kb uname -m x86_64 grep SwapTotal /proc/meminfo SwapTotal: 6160376 kb free total used free shared buffers cached Mem: 3921368 2032012 1889356 0 76216 1533268 -/+ buffers/cache: 422528 3498840 Swap: 6160376 0 6160376 df -h /dev/shm Filesystem Size Used Avail Use% Mounted on tmpfs 1.9G 276K 1.9G 1% /dev/shm df -h /tmp Filesystem Size Used Avail Use% Mounted on /dev/sda2 73G 2.7G 67G 4% / df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 73G 2.7G 67G 4% / tmpfs 1.9G 276K 1.9G 1% /dev/shm /dev/sda1 291M 58M 219M 21% /boot All looked fine to me except maybe for swap? Software Requirements cat /proc/version Linux version 2.6.32-220.el6.x86_64 ([email protected]) (gcc version 4.4.5 20110214 (Red Hat 4.4.5-6) (GCC) ) #1 SMP Wed Nov 9 08:03:13 EST 2011 uname -r 2.6.32-220.el6.x86_64 (same as above but whatever) According to the tutorial should be On Red Hat Enterprise Linux 6 2.6.32-71.el6.x86_64 or later These are the versions of software I have installed binutils-2.20.51.0.2-5.28.el6.x86_64 compat-libcap1-1.10-1.x86_64 compat-libstdc++-33-3.2.3-69.el6.x86_64 compat-libstdc++-33.i686 0:3.2.3-69.el6 gcc-4.4.6-3.el6.x86_64 gcc-c++.x86_64 0:4.4.6-3.el6 glibc-2.12-1.47.el6_2.12.x86_64 glibc-2.12-1.47.el6_2.12.i686 glibc-devel-2.12-1.47.el6_2.12.x86_64 glibc-devel.i686 0:2.12-1.47.el6_2.12 ksh.x86_64 0:20100621-12.el6_2.1 libgcc-4.4.6-3.el6.x86_64 libgcc-4.4.6-3.el6.i686 libstdc++-4.4.6-3.el6.x86_64 libstdc++.i686 0:4.4.6-3.el6 libstdc++-devel.i686 0:4.4.6-3.el6 libstdc++-devel-4.4.6-3.el6.x86_64 libaio-0.3.107-10.el6.x86_64 libaio-0.3.107-10.el6.i686 libaio-devel-0.3.107-10.el6.x86_64 libaio-devel-0.3.107-10.el6.i686 make-3.81-19.el6.x86_64 sysstat-9.0.4-18.el6.x86_64 unixODBC-2.2.14-11.el6.x86_64 unixODBC-devel-2.2.14-11.el6.x86_64 unixODBC-devel-2.2.14-11.el6.i686 unixODBC-2.2.14-11.el6.i686 8. Probably screwed up here or step 9 /usr/sbin/groupadd oinstall /usr/sbin/groupadd dba(not sure why this isn't in the tutorial) /usr/sbin/useradd -g oinstall -G dba oracle passwd oracle /sbin/sysctl -a | grep sem Xkernel.sem = 250 32000 32 128 /sbin/sysctl -a | grep shm kernel.shmmax = 68719476736 kernel.shmall = 4294967296 kernel.shmmni = 4096 vm.hugetlb_shm_group = 0 /sbin/sysctl -a | grep file-max Xfs.file-max = 384629 /sbin/sysctl -a | grep ip_local_port_range Xnet.ipv4.ip_local_port_range = 32768 61000 /sbin/sysctl -a | grep rmem_default Xnet.core.rmem_default = 124928 /sbin/sysctl -a | grep rmem_max Xnet.core.rmem_max = 131071 /sbin/sysctl -a | grep wmem_max Xnet.core.wmem_max = 131071 /sbin/sysctl -a | grep wmem_default Xnet.core.wmem_default = 124928 Here is my sysctl.conf file I only added the items that were bigger: Kernel sysctl configuration file for Red Hat Linux # For binary values, 0 is disabled, 1 is enabled. See sysctl(8) and sysctl.conf(5) for more details. Controls IP packet forwarding net.ipv4.ip_forward = 0 Controls source route verification net.ipv4.conf.default.rp_filter = 1 Do not accept source routing net.ipv4.conf.default.accept_source_route = 0 Controls the System Request debugging functionality of the kernel kernel.sysrq = 0 Controls whether core dumps will append the PID to the core filename. Useful for debugging multi-threaded applications. kernel.core_uses_pid = 1 Controls the use of TCP syncookies net.ipv4.tcp_syncookies = 1 Disable netfilter on bridges. net.bridge.bridge-nf-call-ip6tables = 0 net.bridge.bridge-nf-call-iptables = 0 net.bridge.bridge-nf-call-arptables = 0 Controls the maximum size of a message, in bytes kernel.msgmnb = 65536 Controls the default maxmimum size of a mesage queue kernel.msgmax = 65536 Controls the maximum shared segment size, in bytes kernel.shmmax = 68719476736 Controls the maximum number of shared memory segments, in pages kernel.shmall = 4294967296 fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048576 /sbin/sysctl -p net.ipv4.ip_forward = 0 net.ipv4.conf.default.rp_filter = 1 net.ipv4.conf.default.accept_source_route = 0 kernel.sysrq = 0 kernel.core_uses_pid = 1 net.ipv4.tcp_syncookies = 1 error: "net.bridge.bridge-nf-call-ip6tables" is an unknown key error: "net.bridge.bridge-nf-call-iptables" is an unknown key error: "net.bridge.bridge-nf-call-arptables" is an unknown key kernel.msgmnb = 65536 kernel.msgmax = 65536 kernel.shmmax = 68719476736 kernel.shmall = 4294967296 fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048576 su - oracle ulimit -Sn 1024 ulimit -Hn 1024 ulimit -Su 1024 ulimit -Hu 30482 ulimit -Su 1024 ulimit -Ss 10240 ulimit -Hs unlimited su - nano /etc/security/limits.conf *added to the end of the file * oracle soft nproc 2047 oracle hard nproc 16384 oracle soft nofile 1024 oracle hard nofile 65536 oracle soft stack 10240 exit exit su - mkdir -p /app/ chown -R oracle:oinstall /app/ chmod -R 775 /app/ 9. THIS IS PROBABLY WHERE I MESSED UP I then exited out of the root account so now I'm back in my account chris then I su - oracle echo $SHELL /bin/bash umask 0022 (so it should be set already to what is neccesary) Also from what I have read I do not need to set the DISPLAY variable because I'm installing this on the localhost I then opened the .bash_profile of the oracle and changed it to the following .bash_profile Get the aliases and functions if [ -f ~/.bashrc ]; then . ~/.bashrc fi User specific environment and startup programs PATH=$PATH:$HOME/bin; export PATH ORACLE_BASE=/app/oracle ORACLE_SID=orcl export ORACLE_BASE ORACLE_SID I then shutdown the virtual machine shared my desktop folder from my windows 7 then turned back on the virtual machine logged in as chris opened up a terminal then: su - for some reason the shared folder didn't appear so I reinstalled vmware tools again and restarted then same as before su - cp -R linux_oracle/database /db; chown -R oracle:oinstall /db; chmod -R 775 /db; ll /db drwxrwxr-x. 8 oracle oinstall 4096 Jun 5 06:20 database exit su - oracle cd /db/database ./runInstaller AND FINALLY THE INFAMOUS JAVA:132 ERROR MESSAGE Starting Oracle Universal Installer... Checking Temp space: must be greater than 80 MB. Actual 65646 MB Passed Checking swap space: must be greater than 150 MB. Actual 6015 MB Passed Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed Preparing to launch Oracle Universal Installer from /tmp/OraInstall2012-06-05_06-47-12AM. Please wait ...[oracle@localhost database]$ Exception in thread "main" java.lang.UnsatisfiedLinkError: /tmp/OraInstall2012-06-05_06-47-12AM/jdk/jre/lib/i386/xawt/libmawt.so: libXext.so.6: cannot open shared object file: No such file or directory at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1751) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1647) at java.lang.Runtime.load0(Runtime.java:769) at java.lang.System.load(System.java:968) at java.lang.ClassLoader$NativeLibrary.load(Native Method) at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1751) at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1668) at java.lang.Runtime.loadLibrary0(Runtime.java:822) at java.lang.System.loadLibrary(System.java:993) at sun.security.action.LoadLibraryAction.run(LoadLibraryAction.java:50) at java.security.AccessController.doPrivileged(Native Method) at java.awt.Toolkit.loadLibraries(Toolkit.java:1509) at java.awt.Toolkit.(Toolkit.java:1530) at com.jgoodies.looks.LookUtils.isLowResolution(Unknown Source) at com.jgoodies.looks.LookUtils.(Unknown Source) at com.jgoodies.looks.plastic.PlasticLookAndFeel.(PlasticLookAndFeel.java:122) at java.lang.Class.forName0(Native Method) at java.lang.Class.forName(Class.java:242) at javax.swing.SwingUtilities.loadSystemClass(SwingUtilities.java:1783) at javax.swing.UIManager.setLookAndFeel(UIManager.java:480) at oracle.install.commons.util.Application.startup(Application.java:758) at oracle.install.commons.flow.FlowApplication.startup(FlowApplication.java:164) at oracle.install.commons.flow.FlowApplication.startup(FlowApplication.java:181) at oracle.install.commons.base.driver.common.Installer.startup(Installer.java:265) at oracle.install.ivw.db.driver.DBInstaller.startup(DBInstaller.java:114) at oracle.install.ivw.db.driver.DBInstaller.main(DBInstaller.java:132)

    Read the article

  • Apache restart does not load new php.ini

    - by Tiffany Walker
    Never had this problem till updated CPanel today? Maybe that is part the problem? I only have the one php.ini file # /usr/local/bin/php --info | grep php.ini Configure Command => './configure' '--disable-cgi' '--disable-fileinfo' '--enable-bcmath' '--enable-calendar' '--enable-exif' '--enable-ftp' '--enable-gd-native-ttf' '--enable-libxml' '--enable-magic-quotes' '--enable-mbstring' '--enable-pdo=shared' '--enable-soap' '--enable-sockets' '--enable-zip' '--prefix=/usr/local' '--with-bz2' '--with-config-file-path=/usr/local/lib' '--with-config-file-scan-dir=/usr/local/lib/php.ini.d' '--with-curl=/opt/curlssl/' '--with-curlwrappers' '--with-freetype-dir=/usr' '--with-gd' '--with-imap=/opt/php_with_imap_client/' '--with-imap-ssl=/usr' '--with-jpeg-dir=/usr' '--with-kerberos' '--with-libdir=lib64' '--with-libexpat-dir=/usr' '--with-libxml-dir=/opt/xml2' '--with-libxml-dir=/opt/xml2/' '--with-mcrypt=/opt/libmcrypt/' '--with-mysql=/usr' '--with-mysql-sock=/var/lib/mysql/mysql.sock' '--with-mysqli=/usr/bin/mysql_config' '--with-openssl=/usr' '--with-openssl-dir=/usr' '--with-pcre-regex=/opt/pcre' '--with-pdo-mysql=shared' '--with-pdo-sqlite=shared' '--with-pic' '--with-png-dir=/usr' '--with-pspell' '--with-sqlite=shared' '--with-tidy=/opt/tidy/' '--with-xmlrpc' '--with-xpm-dir=/usr' '--with-xsl=/opt/xslt/' '--with-zlib' '--with-zlib-dir=/usr' '--with-gettext' Configuration File (php.ini) Path => /usr/local/lib Loaded Configuration File => /usr/local/lib/php.ini Scan this dir for additional .ini files => /usr/local/lib/php.ini.d # /usr/bin/php --info | grep php.ini <tr><td class="e">Configure Command </td><td class="v"> &#039;./configure&#039; &#039;--disable-fileinfo&#039; &#039;--enable-bcmath&#039; &#039;--enable-calendar&#039; &#039;--enable-exif&#039; &#039;--enable-ftp&#039; &#039;--enable-gd-native-ttf&#039; &#039;--enable-libxml&#039; &#039;--enable-magic-quotes&#039; &#039;--enable-mbstring&#039; &#039;--enable-pdo=shared&#039; &#039;--enable-soap&#039; &#039;--enable-sockets&#039; &#039;--enable-zip&#039; &#039;--prefix=/usr&#039; &#039;--with-bz2&#039; &#039;--with-config-file-path=/usr/local/lib&#039; &#039;--with-config-file-scan-dir=/usr/local/lib/php.ini.d&#039; &#039;--with-curl=/opt/curlssl/&#039; &#039;--with-curlwrappers&#039; &#039;--with-freetype-dir=/usr&#039; &#039;--with-gd&#039; &#039;--with-imap=/opt/php_with_imap_client/&#039; &#039;--with-imap-ssl=/usr&#039; &#039;--with-jpeg-dir=/usr&#039; &#039;--with-kerberos&#039; &#039;--with-libdir=lib64&#039; &#039;--with-libexpat-dir=/usr&#039; &#039;--with-libxml-dir=/opt/xml2&#039; &#039;--with-libxml-dir=/opt/xml2/&#039; &#039;--with-mcrypt=/opt/libmcrypt/&#039; &#039;--with-mysql=/usr&#039; &#039;--with-mysql-sock=/var/lib/mysql/mysql.sock&#039; &#039;--with-mysqli=/usr/bin/mysql_config&#039; &#039;--with-openssl=/usr&#039; &#039;--with-openssl-dir=/usr&#039; &#039;--with-pcre-regex=/opt/pcre&#039; &#039;--with-pdo-mysql=shared&#039; &#039;--with-pdo-sqlite=shared&#039; &#039;--with-pic&#039; &#039;--with-png-dir=/usr&#039; &#039;--with-pspell&#039; &#039;--with-sqlite=shared&#039; &#039;--with-tidy=/opt/tidy/&#039; &#039;--with-xmlrpc&#039; &#039;--with-xpm-dir=/usr&#039; &#039;--with-xsl=/opt/xslt/&#039; &#039;--with-zlib&#039; &#039;--with-zlib-dir=/usr&#039; </td></tr> <tr><td class="e">Configuration File (php.ini) Path </td><td class="v">/usr/local/lib </td></tr> <tr><td class="e">Loaded Configuration File </td><td class="v">/usr/local/lib/php.ini </td></tr> <tr><td class="e">Scan this dir for additional .ini files </td><td class="v">/usr/local/lib/php.ini.d </td></tr> everytime I restart apache I still seem to be running the old one. Nothing changes. I removed phpinfo() and ini_set() from the php.ini but I still can't use them. # service httpd -k restart [Fri Oct 26 15:27:10 2012] [warn] module hostinglimits_module is already loaded, skipping [Fri Oct 26 15:27:10 2012] [warn] NameVirtualHost 127.0.0.1:8081 has no VirtualHosts There is also no php.ini files under the vhosts or .htaccess. # /usr/bin/php -v PHP 5.3.15 (cgi-fcgi) (built: Aug 4 2012 21:33:58) Copyright (c) 1997-2012 The PHP Group Zend Engine v2.3.0, Copyright (c) 1998-2012 Zend Technologies with eAccelerator v0.9.6.1, Copyright (c) 2004-2010 eAccelerator, by eAccelerator with the ionCube PHP Loader v4.2.2, Copyright (c) 2002-2012, by ionCube Ltd., and with Zend Guard Loader v3.3, Copyright (c) 1998-2010, by Zend Technologies with Suhosin v0.9.33, Copyright (c) 2007-2012, by SektionEins GmbH and # /usr/local/bin/php -v PHP 5.3.15 (cli) (built: Aug 4 2012 21:34:27) Copyright (c) 1997-2012 The PHP Group Zend Engine v2.3.0, Copyright (c) 1998-2012 Zend Technologies with eAccelerator v0.9.6.1, Copyright (c) 2004-2010 eAccelerator, by eAccelerator with the ionCube PHP Loader v4.2.2, Copyright (c) 2002-2012, by ionCube Ltd., and with Zend Guard Loader v3.3, Copyright (c) 1998-2010, by Zend Technologies with Suhosin v0.9.33, Copyright (c) 2007-2012, by SektionEins GmbH Nothing shows up in the error logs either. The only errors we get are under the vhost's with error_log saying phpinfo and ini_set are disabled. EDIT: Both php binaries use the same php.ini file EDIT: Running php as mod_fgcid.so with suexec EDIT: From SSH I see the correct values for PHP from the php.ini file being loaded from both binaries When using php from apache [26-Oct-2012 20:25:34 UTC] PHP Warning: phpinfo() has been disabled for security reasons in /home/jake/public_html/phpinfo.php on line 1 EDIT: /usr/bin/php is the correct PHP file. Forgot to mention. It is the one in the wrapper script.

    Read the article

  • Does anyone really understand how HFSC scheduling in Linux/BSD works?

    - by Mecki
    I read the original SIGCOMM '97 PostScript paper about HFSC, it is very technically, but I understand the basic concept. Instead of giving a linear service curve (as with pretty much every other scheduling algorithm), you can specify a convex or concave service curve and thus it is possible to decouple bandwidth and delay. However, even though this paper mentions to kind of scheduling algorithms being used (real-time and link-share), it always only mentions ONE curve per scheduling class (the decoupling is done by specifying this curve, only one curve is needed for that). Now HFSC has been implemented for BSD (OpenBSD, FreeBSD, etc.) using the ALTQ scheduling framework and it has been implemented Linux using the TC scheduling framework (part of iproute2). Both implementations added two additional service curves, that were NOT in the original paper! A real-time service curve and an upper-limit service curve. Again, please note that the original paper mentions two scheduling algorithms (real-time and link-share), but in that paper both work with one single service curve. There never have been two independent service curves for either one as you currently find in BSD and Linux. Even worse, some version of ALTQ seems to add an additional queue priority to HSFC (there is no such thing as priority in the original paper either). I found several BSD HowTo's mentioning this priority setting (even though the man page of the latest ALTQ release knows no such parameter for HSFC, so officially it does not even exist). This all makes the HFSC scheduling even more complex than the algorithm described in the original paper and there are tons of tutorials on the Internet that often contradict each other, one claiming the opposite of the other one. This is probably the main reason why nobody really seems to understand how HFSC scheduling really works. Before I can ask my questions, we need a sample setup of some kind. I'll use a very simple one as seen in the image below: Here are some questions I cannot answer because the tutorials contradict each other: What for do I need a real-time curve at all? Assuming A1, A2, B1, B2 are all 128 kbit/s link-share (no real-time curve for either one), then each of those will get 128 kbit/s if the root has 512 kbit/s to distribute (and A and B are both 256 kbit/s of course), right? Why would I additionally give A1 and B1 a real-time curve with 128 kbit/s? What would this be good for? To give those two a higher priority? According to original paper I can give them a higher priority by using a curve, that's what HFSC is all about after all. By giving both classes a curve of [256kbit/s 20ms 128kbit/s] both have twice the priority than A2 and B2 automatically (still only getting 128 kbit/s on average) Does the real-time bandwidth count towards the link-share bandwidth? E.g. if A1 and B1 both only have 64kbit/s real-time and 64kbit/s link-share bandwidth, does that mean once they are served 64kbit/s via real-time, their link-share requirement is satisfied as well (they might get excess bandwidth, but lets ignore that for a second) or does that mean they get another 64 kbit/s via link-share? So does each class has a bandwidth "requirement" of real-time plus link-share? Or does a class only have a higher requirement than the real-time curve if the link-share curve is higher than the real-time curve (current link-share requirement equals specified link-share requirement minus real-time bandwidth already provided to this class)? Is upper limit curve applied to real-time as well, only to link-share, or maybe to both? Some tutorials say one way, some say the other way. Some even claim upper-limit is the maximum for real-time bandwidth + link-share bandwidth? What is the truth? Assuming A2 and B2 are both 128 kbit/s, does it make any difference if A1 and B1 are 128 kbit/s link-share only, or 64 kbit/s real-time and 128 kbit/s link-share, and if so, what difference? If I use the seperate real-time curve to increase priorities of classes, why would I need "curves" at all? Why is not real-time a flat value and link-share also a flat value? Why are both curves? The need for curves is clear in the original paper, because there is only one attribute of that kind per class. But now, having three attributes (real-time, link-share, and upper-limit) what for do I still need curves on each one? Why would I want the curves shape (not average bandwidth, but their slopes) to be different for real-time and link-share traffic? According to the little documentation available, real-time curve values are totally ignored for inner classes (class A and B), they are only applied to leaf classes (A1, A2, B1, B2). If that is true, why does the ALTQ HFSC sample configuration (search for 3.3 Sample configuration) set real-time curves on inner classes and claims that those set the guaranteed rate of those inner classes? Isn't that completely pointless? (note: pshare sets the link-share curve in ALTQ and grate the real-time curve; you can see this in the paragraph above the sample configuration). Some tutorials say the sum of all real-time curves may not be higher than 80% of the line speed, others say it must not be higher than 70% of the line speed. Which one is right or are they maybe both wrong? One tutorial said you shall forget all the theory. No matter how things really work (schedulers and bandwidth distribution), imagine the three curves according to the following "simplified mind model": real-time is the guaranteed bandwidth that this class will always get. link-share is the bandwidth that this class wants to become fully satisfied, but satisfaction cannot be guaranteed. In case there is excess bandwidth, the class might even get offered more bandwidth than necessary to become satisfied, but it may never use more than upper-limit says. For all this to work, the sum of all real-time bandwidths may not be above xx% of the line speed (see question above, the percentage varies). Question: Is this more or less accurate or a total misunderstanding of HSFC? And if assumption above is really accurate, where is prioritization in that model? E.g. every class might have a real-time bandwidth (guaranteed), a link-share bandwidth (not guaranteed) and an maybe an upper-limit, but still some classes have higher priority needs than other classes. In that case I must still prioritize somehow, even among real-time traffic of those classes. Would I prioritize by the slope of the curves? And if so, which curve? The real-time curve? The link-share curve? The upper-limit curve? All of them? Would I give all of them the same slope or each a different one and how to find out the right slope? I still haven't lost hope that there exists at least a hand full of people in this world that really understood HFSC and are able to answer all these questions accurately. And doing so without contradicting each other in the answers would be really nice ;-)

    Read the article

  • Either, nginx+php-fpm bad config or nginx+php-fpm cannot handle high query?

    - by The Wolf
    I have wordpress installed in my server configured(hopefully with nginx+php-fpm+mariaDB). I am trying to import using wordpress importer a 1.5MB xml file. Everytime I try to upload it using the importer, it got cut of... meaning just blank screen result.. Here is my error log: actually I just posted 2 of the errors [error] 858#0: *1 connect() failed (111: Connection refused) while connecting to upstream, client: xx.xxx.xx.xx, server: xxx.com, request: "GET xxxx.html HTTP/1.1", upstream: "fastcgi://127.0.0.1:9000", host: "xxx.com" [error] 858#0: *13 connect() failed (111: Connection refused) while connecting to upstream, client: xxx.x.xx.xx, server: xxx.com, request: "GET xxxx.php HTTP/1.1", upstream: "fastcgi://127.0.0.1:9000", host: "xxx.com" I don't know what is the reason why it can't process the wordpress export .xml. I already increased max_file_upload & etc., but nothing happens. Hope somebody can help me. Here are my conf: nginx.conf user nginx; worker_processes 8; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; #tcp_nopush on; server_tokens off; keepalive_timeout 65; fastcgi_read_timeout 500; #gzip on; client_max_body_size 2M; php-fpm.conf ;;;;;;;;;;;;;;;;;;;;; ; FPM Configuration ; ;;;;;;;;;;;;;;;;;;;;; ; All relative paths in this configuration file are relative to PHP's install ; prefix. ; Include one or more files. If glob(3) exists, it is used to include a bunch of ; files from a glob(3) pattern. This directive can be used everywhere in the ; file. include=/etc/php-fpm.d/*.conf ;;;;;;;;;;;;;;;;;; ; Global Options ; ;;;;;;;;;;;;;;;;;; [global] ; Pid file ; Default Value: none pid = /var/run/php-fpm/php-fpm.pid ; Error log file ; Default Value: /var/log/php-fpm.log error_log = /var/log/php-fpm/error.log ; Log level ; Possible Values: alert, error, warning, notice, debug ; Default Value: notice ;log_level = notice ; If this number of child processes exit with SIGSEGV or SIGBUS within the time ; interval set by emergency_restart_interval then FPM will restart. A value ; of '0' means 'Off'. ; Default Value: 0 ;emergency_restart_threshold = 0 ; Interval of time used by emergency_restart_interval to determine when ; a graceful restart will be initiated. This can be useful to work around ; accidental corruptions in an accelerator's shared memory. ; Available Units: s(econds), m(inutes), h(ours), or d(ays) ; Default Unit: seconds ; Default Value: 0 ;emergency_restart_interval = 0 ; Time limit for child processes to wait for a reaction on signals from master. ; Available units: s(econds), m(inutes), h(ours), or d(ays) ; Default Unit: seconds ; Default Value: 0 ;process_control_timeout = 0 ; Send FPM to background. Set to 'no' to keep FPM in foreground for debugging. ; Default Value: yes daemonize = no ;;;;;;;;;;;;;;;;;;;; ; Pool Definitions ; ;;;;;;;;;;;;;;;;;;;; ; See /etc/php-fpm.d/*.conf [root@host etc]# vim php-fpm.conf [root@host etc]# vim php-fpm.conf ; Default Value: notice ;log_level = notice ; If this number of child processes exit with SIGSEGV or SIGBUS within the time ; interval set by emergency_restart_interval then FPM will restart. A value ; of '0' means 'Off'. ; Default Value: 0 ;emergency_restart_threshold = 0 ; Interval of time used by emergency_restart_interval to determine when ; a graceful restart will be initiated. This can be useful to work around ; accidental corruptions in an accelerator's shared memory. ; Available Units: s(econds), m(inutes), h(ours), or d(ays) ; Default Unit: seconds ; Default Value: 0 ;emergency_restart_interval = 0 ; Time limit for child processes to wait for a reaction on signals from master. ; Available units: s(econds), m(inutes), h(ours), or d(ays) ; Default Unit: seconds ; Default Value: 0 ;process_control_timeout = 0 ; Send FPM to background. Set to 'no' to keep FPM in foreground for debugging. ; Default Value: yes daemonize = no ;;;;;;;;;;;;;;;;;;;; ; Pool Definitions ; ;;;;;;;;;;;;;;;;;;;; ; See /etc/php-fpm.d/*.conf ps aux [root@host etc]# ps aux USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.1 2900 1380 ? Ss Jun02 0:00 init root 2 0.0 0.0 0 0 ? S Jun02 0:00 [kthreadd/9308] root 3 0.0 0.0 0 0 ? S Jun02 0:00 [khelper/9308] root 124 0.0 0.0 2464 576 ? S<s Jun02 0:00 /sbin/udevd -d root 460 0.0 0.1 35976 1308 ? Sl Jun02 0:00 /sbin/rsyslogd -i /var/run/syslogd.pid -c 5 root 474 0.0 0.0 8940 1028 ? Ss Jun02 0:00 /usr/sbin/sshd root 481 0.0 0.0 3264 876 ? Ss Jun02 0:00 xinetd -stayalive -pidfile /var/run/xinetd.pid root 491 0.0 0.1 6268 1432 ? S Jun02 0:00 /bin/sh /usr/bin/mysqld_safe --datadir=/var/lib/mysql --pid-file=/var/lib/mysql/host.busilak.com. mysql 584 0.1 6.8 679072 71456 ? Sl Jun02 0:04 /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib/mysql/plugin --use root 586 0.0 0.3 12008 3820 ? Ss Jun02 0:01 sshd: root@pts/0 root 629 0.0 0.0 9140 756 ? Ss Jun02 0:00 /usr/sbin/saslauthd -m /var/run/saslauthd -a pam -n 2 root 630 0.0 0.0 9140 520 ? S Jun02 0:00 /usr/sbin/saslauthd -m /var/run/saslauthd -a pam -n 2 root 645 0.0 0.1 12788 1928 ? Ss Jun02 0:01 sendmail: accepting connections smmsp 653 0.0 0.1 12576 1728 ? Ss Jun02 0:00 sendmail: Queue runner@01:00:00 for /var/spool/clientmqueue root 691 0.0 0.1 7148 1184 ? Ss Jun02 0:00 crond root 698 0.0 0.1 6272 1688 pts/0 Ss Jun02 0:00 -bash root 1006 0.0 0.0 7828 924 ? Ss 00:30 0:00 nginx: master process /usr/sbin/nginx -c /etc/nginx/nginx.conf nginx 1007 0.0 0.1 8156 1724 ? S 00:30 0:00 nginx: worker process nginx 1008 0.0 0.1 8024 1360 ? S 00:30 0:00 nginx: worker process nginx 1009 0.0 0.1 8020 1356 ? S 00:30 0:00 nginx: worker process nginx 1011 0.0 0.1 8024 1360 ? S 00:30 0:00 nginx: worker process nginx 1012 0.0 0.1 8024 1360 ? S 00:30 0:00 nginx: worker process nginx 1013 0.0 0.1 8024 1360 ? S 00:30 0:00 nginx: worker process nginx 1014 0.0 0.1 8024 1360 ? S 00:30 0:00 nginx: worker process nginx 1015 0.0 0.1 8024 1344 ? S 00:30 0:00 nginx: worker process root 1030 0.0 0.2 25396 2904 ? Ss 00:30 0:00 php-fpm: master process (/etc/php-fpm.conf) apache 1031 0.0 1.9 40700 20624 ? S 00:30 0:00 php-fpm: pool www apache 1032 0.0 2.0 41924 21888 ? S 00:30 0:01 php-fpm: pool www apache 1033 0.0 1.9 41212 20848 ? S 00:30 0:01 php-fpm: pool www apache 1034 0.0 1.9 40956 20792 ? S 00:30 0:01 php-fpm: pool www apache 1035 0.0 2.0 41560 21556 ? S 00:30 0:02 php-fpm: pool www apache 1040 0.0 1.8 39292 19120 ? S 00:30 0:00 php-fpm: pool www root 1125 0.0 0.0 6080 1040 pts/0 R+ 01:04 0:00 ps aux netstat -l [root@host etc]# netstat -l Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State tcp 0 0 *:ssh *:* LISTEN tcp 0 0 localhost.localdomain:smtp *:* LISTEN tcp 0 0 localhost.locald:cslistener *:* LISTEN tcp 0 0 *:mysql *:* LISTEN tcp 0 0 *:http *:* LISTEN tcp 0 0 *:ssh *:* LISTEN Active UNIX domain sockets (only servers) Proto RefCnt Flags Type State I-Node Path unix 2 [ ACC ] STREAM LISTENING 60575947 /var/run/saslauthd/mux unix 2 [ ACC ] STREAM LISTENING 60574168 @/com/ubuntu/upstart unix 2 [ ACC ] STREAM LISTENING 60575873 /var/lib/mysql/mysql.sock Hope somebody can help me to figure out what is the problem.

    Read the article

  • Connect ps/2->usb keyboard to linux?

    - by Daniel
    I have a lovely ancient ergonomic keyboard (no name SK - 6000) connected via a DIN-ps/2 adapter to a ps/2-usb adapter to my docking station. After Grub it stops working. It takes either suspending and waking up or replugging it while Linux is running to get it to work. No extra kernel modules get loaded for this. When it works and I restart without power off, it will work immediately. Even when it does not work, it is visible (lsusb device number varies but output is identical whether working or not): $ lsusb -v -s 001:006 Bus 001 Device 006: ID 0a81:0205 Chesen Electronics Corp. PS/2 Keyboard+Mouse Adapter Device Descriptor: bLength 18 bDescriptorType 1 bcdUSB 1.10 bDeviceClass 0 (Defined at Interface level) bDeviceSubClass 0 bDeviceProtocol 0 bMaxPacketSize0 8 idVendor 0x0a81 Chesen Electronics Corp. idProduct 0x0205 PS/2 Keyboard+Mouse Adapter bcdDevice 0.10 iManufacturer 1 CHESEN iProduct 2 PS2 to USB Converter iSerial 0 bNumConfigurations 1 Configuration Descriptor: bLength 9 bDescriptorType 2 wTotalLength 59 bNumInterfaces 2 bConfigurationValue 1 iConfiguration 2 PS2 to USB Converter bmAttributes 0xa0 (Bus Powered) Remote Wakeup MaxPower 100mA Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 0 bAlternateSetting 0 bNumEndpoints 1 bInterfaceClass 3 Human Interface Device bInterfaceSubClass 1 Boot Interface Subclass bInterfaceProtocol 1 Keyboard iInterface 0 HID Device Descriptor: bLength 9 bDescriptorType 33 bcdHID 1.10 bCountryCode 0 Not supported bNumDescriptors 1 bDescriptorType 34 Report wDescriptorLength 64 Report Descriptors: ** UNAVAILABLE ** Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x81 EP 1 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0008 1x 8 bytes bInterval 10 Interface Descriptor: bLength 9 bDescriptorType 4 bInterfaceNumber 1 bAlternateSetting 0 bNumEndpoints 1 bInterfaceClass 3 Human Interface Device bInterfaceSubClass 1 Boot Interface Subclass bInterfaceProtocol 2 Mouse iInterface 0 HID Device Descriptor: bLength 9 bDescriptorType 33 bcdHID 1.10 bCountryCode 0 Not supported bNumDescriptors 1 bDescriptorType 34 Report wDescriptorLength 148 Report Descriptors: ** UNAVAILABLE ** Endpoint Descriptor: bLength 7 bDescriptorType 5 bEndpointAddress 0x82 EP 2 IN bmAttributes 3 Transfer Type Interrupt Synch Type None Usage Type Data wMaxPacketSize 0x0008 1x 8 bytes bInterval 10 Device Status: 0x0000 (Bus Powered) $ ll -R /sys/bus/hid/drivers/ /sys/bus/hid/drivers/: total 0 drwxr-xr-x 2 root root 0 Jul 8 2012 generic-usb/ /sys/bus/hid/drivers/generic-usb: total 0 lrwxrwxrwx 1 root root 0 Jul 7 23:33 0003:046D:C03D.0003 -> ../../../../devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.2/1-1.2.2:1.0/0003:046D:C03D.0003/ lrwxrwxrwx 1 root root 0 Jul 7 23:33 0003:0A81:0205.0001 -> ../../../../devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/ lrwxrwxrwx 1 root root 0 Jul 7 23:33 0003:0A81:0205.0002 -> ../../../../devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.1/0003:0A81:0205.0002/ --w------- 1 root root 4096 Jul 7 23:32 bind lrwxrwxrwx 1 root root 0 Jul 7 23:33 module -> ../../../../module/usbhid/ --w------- 1 root root 4096 Jul 7 23:32 new_id --w------- 1 root root 4096 Jul 8 2012 uevent --w------- 1 root root 4096 Jul 7 23:32 unbind When replugging, dmesg shows this (which except for the 1st line and different input numbers already came at boot time): [ 1583.295385] usb 1-1.2.1: new low-speed USB device number 6 using ehci_hcd [ 1583.446514] input: CHESEN PS2 to USB Converter as /devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/input/input17 [ 1583.446817] generic-usb 0003:0A81:0205.0001: input,hidraw0: USB HID v1.10 Keyboard [CHESEN PS2 to USB Converter] on usb-0000:00:1a.0-1.2.1/input0 [ 1583.454764] input: CHESEN PS2 to USB Converter as /devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.1/input/input18 [ 1583.455534] generic-usb 0003:0A81:0205.0002: input,hidraw1: USB HID v1.10 Mouse [CHESEN PS2 to USB Converter] on usb-0000:00:1a.0-1.2.1/input1 [ 1583.455578] usbcore: registered new interface driver usbhid [ 1583.455584] usbhid: USB HID core driver So I tried $ sudo udevadm test /sys/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0 run_command: calling: test adm_test: version 175 This program is for debugging only, it does not run any program, specified by a RUN key. It may show incorrect results, because some values may be different, or not available at a simulation run. parse_file: reading '/lib/udev/rules.d/40-crda.rules' as rules file parse_file: reading '/lib/udev/rules.d/40-fuse.rules' as rules file ... parse_file: reading '/lib/udev/rules.d/40-usb-media-players.rules' as rules file parse_file: reading '/lib/udev/rules.d/40-usb_modeswitch.rules' as rules file ... parse_file: reading '/lib/udev/rules.d/42-qemu-usb.rules' as rules file ... parse_file: reading '/lib/udev/rules.d/69-cd-sensors.rules' as rules file add_rule: IMPORT found builtin 'usb_id', replacing /lib/udev/rules.d/69-cd-sensors.rules:76 ... parse_file: reading '/lib/udev/rules.d/77-mm-usb-device-blacklist.rules' as rules file ... parse_file: reading '/lib/udev/rules.d/85-usbmuxd.rules' as rules file ... parse_file: reading '/lib/udev/rules.d/95-upower-hid.rules' as rules file parse_file: reading '/lib/udev/rules.d/95-upower-wup.rules' as rules file parse_file: reading '/lib/udev/rules.d/97-bluetooth-hid2hci.rules' as rules file udev_rules_new: rules use 271500 bytes tokens (22625 * 12 bytes), 44331 bytes buffer udev_rules_new: temporary index used 76320 bytes (3816 * 20 bytes) udev_device_new_from_syspath: device 0x7f78a5e4d2d0 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0' udev_device_new_from_syspath: device 0x7f78a5e5f820 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0' udev_device_read_db: device 0x7f78a5e5f820 filled with db file data udev_device_new_from_syspath: device 0x7f78a5e60270 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001' udev_device_new_from_syspath: device 0x7f78a5e609c0 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0' udev_device_new_from_syspath: device 0x7f78a5e61160 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1' udev_device_new_from_syspath: device 0x7f78a5e61960 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2' udev_device_new_from_syspath: device 0x7f78a5e62150 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1/1-1' udev_device_new_from_syspath: device 0x7f78a5e62940 has devpath '/devices/pci0000:00/0000:00:1a.0/usb1' udev_device_new_from_syspath: device 0x7f78a5e630f0 has devpath '/devices/pci0000:00/0000:00:1a.0' udev_device_new_from_syspath: device 0x7f78a5e638a0 has devpath '/devices/pci0000:00' udev_event_execute_rules: no node name set, will use kernel supplied name 'hidraw0' udev_node_add: creating device node '/dev/hidraw0', devnum=251:0, mode=0600, uid=0, gid=0 udev_node_mknod: preserve file '/dev/hidraw0', because it has correct dev_t udev_node_mknod: preserve permissions /dev/hidraw0, 020600, uid=0, gid=0 node_symlink: preserve already existing symlink '/dev/char/251:0' to '../hidraw0' udev_device_update_db: created empty file '/run/udev/data/c251:0' for '/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0' ACTION=add DEVNAME=/dev/hidraw0 DEVPATH=/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0 MAJOR=251 MINOR=0 SUBSYSTEM=hidraw UDEV_LOG=6 USEC_INITIALIZED=969079051 The later lines sound like it's already there. And none of these awakes the keyboard: $ sudo udevadm trigger --verbose --sysname-match=usb* /sys/devices/pci0000:00/0000:00:1a.0/usb1 /sys/devices/pci0000:00/0000:00:1a.0/usbmon/usbmon1 /sys/devices/pci0000:00/0000:00:1d.0/usb2 /sys/devices/pci0000:00/0000:00:1d.0/usbmon/usbmon2 /sys/devices/virtual/usbmon/usbmon0 $ sudo udevadm trigger --verbose --sysname-match=hidraw0 /sys/devices/pci0000:00/0000:00:1a.0/usb1/1-1/1-1.2/1-1.2.1/1-1.2.1:1.0/0003:0A81:0205.0001/hidraw/hidraw0 $ sudo udevadm trigger I also tried this to no avail: # echo -n 0003:0A81:0205.0001 > /sys/bus/hid/drivers/generic-usb/bind ksh: echo: write to 1 failed [No such device] # echo -n 0003:0A81:0205.0001 > /sys/bus/hid/drivers/generic-usb/unbind # echo -n 0003:0A81:0205.0001 > /sys/bus/hid/drivers/generic-usb/bind # echo usb1 >/sys/bus/usb/drivers/usb/unbind # echo usb1 >/sys/bus/usb/drivers/usb/bind What else should I try to get the same result as replugging or suspending, by just issuing a command?

    Read the article

  • Gmail rejects emails. Openspf.net fails the tests

    - by pablomedok
    I've got a problem with Gmail. It started after one of our trojan infected PCs sent spam for one day from our IP address. We've fixed the problem, but we got into 3 black lists. We've fixed that, too. But still every time we send an email to Gmail the message is rejected: So I've checked Google Bulk Sender's guide once again and found an error in our SPF record and fixed it. Google says everything should become fine after some time, but this doesn't happen. 3 weeks already passed but we still can't send emails to Gmail. Our MX setup is a bit complex, but not too much: We have a domain name delo-company.com, it has it's own mail @delo-company.com (this one is fine, but the problems are with sub-domain name corp.delo-company.com). Delo-company.com domain has several DNS records for the subdomain: corp A 82.209.198.147 corp MX 20 corp.delo-company.com corp.delo-company.com TXT "v=spf1 ip4:82.209.198.147 ~all" (I set ~all for testing purposes only, it was -all before that) These records are for our corporate Exchange 2003 server at 82.209.198.147. Its LAN name is s2.corp.delo-company.com so its HELO/EHLO greetings are also s2.corp.delo-company.com. To pass EHLO check we've also created some records in delo-company.com's DNS: s2.corp A 82.209.198.147 s2.corp.delo-company.com TXT "v=spf1 ip4:82.209.198.147 ~all" As I understand SPF verifications should be passed in this way: Out server s2 connects to MX of the recepient (Rcp.MX): EHLO s2.corp.delo-company.com Rcp.MX says Ok, and makes SPF check of HELO/EHLO. It does NSlookup for s2.corp.delo-company.com and gets the above DNS-records. TXT records says that s2.corp.delo-company.com should be only from IP 82.209.198.147. So it should be passed. Then our s2 server says RCPT FROM: Rcp.MX` server checks it, too. The values are the same so they should also be positive. Maybe there is also a rDNS check, but I'm not sure what is checked HELO or RCPT FROM. Our PTR record for 82.209.198.147 is: 147.198.209.82.in-addr.arpa. 86400 IN PTR s2.corp.delo-company.com. To me everything looks fine, but anyway all emails are rejected by Gmail. So, I've checked MXtoolbox.com - it says everything is fine, I passed http://www.kitterman.com/spf/validate.html Python check, I did 25port.com email test. It's fine, too: Return-Path: <[email protected]> Received: from s2.corp.delo-company.com (82.209.198.147) by verifier.port25.com id ha45na11u9cs for <[email protected]>; Fri, 2 Mar 2012 13:03:21 -0500 (envelope-from <[email protected]>) Authentication-Results: verifier.port25.com; spf=pass [email protected] Authentication-Results: verifier.port25.com; domainkeys=neutral (message not signed) [email protected] Authentication-Results: verifier.port25.com; dkim=neutral (message not signed) Authentication-Results: verifier.port25.com; sender-id=pass [email protected] Content-class: urn:content-classes:message MIME-Version: 1.0 Content-Type: multipart/alternative; boundary="----_=_NextPart_001_01CCF89E.BE02A069" Subject: test Date: Fri, 2 Mar 2012 21:03:15 +0300 X-MimeOLE: Produced By Microsoft Exchange V6.5 Message-ID: <[email protected]> X-MS-Has-Attach: X-MS-TNEF-Correlator: Thread-Topic: test Thread-Index: Acz4jS34oznvbyFQR4S5rXsNQFvTdg== From: =?koi8-r?B?89XQ0tXOwMsg8MHXxcw=?= <[email protected]> To: <[email protected]> I also checked with [email protected], but it FAILs all the time, no matter which SPF records I make: <s2.corp.delo-company.com #5.7.1 smtp;550 5.7.1 <[email protected]>: Recipient address rejected: SPF Tests: Mail-From Result="softfail": Mail From="[email protected]" HELO name="s2.corp.delo-company.com" HELO Result="softfail" Remote IP="82.209.198.147"> I've filled Gmail form twice, but nothing happens. We do not send spam, only emails for our clients. 2 or 3 times we did mass emails (like New Year Greetings and sales promos) from corp.delo-company.com addresses, but they where all complying to Gmail Bulk Sender's Guide (I mean SPF, Open Relays, Precedence: Bulk and Unsubscribe tags). So, this should be not a problem. Please, help me. What am I doing wrong? UPD: I also tried Unlocktheinbox.com test and the server also fails this test. Here is the result: http://bit.ly/wYr39h . Here is one more http://bit.ly/ypWLjr I also tried to send email from that server manually via telnet and everything is fine. Here is what I type: 220 mx.google.com ESMTP g15si4811326anb.170 HELO s2.corp.delo-company.com 250 mx.google.com at your service MAIL FROM: <[email protected]> 250 2.1.0 OK g15si4811326anb.170 RCPT TO: <[email protected]> 250 2.1.5 OK g15si4811326anb.170 DATA 354 Go ahead g15si4811326anb.170 From: [email protected] To: Pavel <[email protected]> Subject: Test 28 This is telnet test . 250 2.0.0 OK 1330795021 g15si4811326anb.170 QUIT 221 2.0.0 closing connection g15si4811326anb.170 And this is what I get: Delivered-To: [email protected] Received: by 10.227.132.73 with SMTP id a9csp96864wbt; Sat, 3 Mar 2012 09:17:02 -0800 (PST) Received: by 10.101.128.12 with SMTP id f12mr4837125ann.49.1330795021572; Sat, 03 Mar 2012 09:17:01 -0800 (PST) Return-Path: <[email protected]> Received: from s2.corp.delo-company.com (s2.corp.delo-company.com. [82.209.198.147]) by mx.google.com with SMTP id g15si4811326anb.170.2012.03.03.09.15.59; Sat, 03 Mar 2012 09:17:00 -0800 (PST) Received-SPF: pass (google.com: domain of [email protected] designates 82.209.198.147 as permitted sender) client-ip=82.209.198.147; Authentication-Results: mx.google.com; spf=pass (google.com: domain of [email protected] designates 82.209.198.147 as permitted sender) [email protected] Date: Sat, 03 Mar 2012 09:17:00 -0800 (PST) Message-Id: <[email protected]> From: [email protected] To: Pavel <[email protected]> Subject: Test 28 This is telnet test

    Read the article

  • puma init.d for centos 6 fails with runuser: user /var/log/puma.log does not exist

    - by Rubytastic
    Trying to get a init.d/puma to work on Centos 6. It throws error runuser: user /var/log/puma.log does not exist I run this from the /srv/books/current folder but it fails. I tried to debug the values but not quite get what is missing and why it throws this error. #! /bin/sh # puma - this script starts and stops the puma daemon # # chkconfig: - 85 15 # description: Puma # processname: puma # config: /etc/puma.conf # pidfile: /srv/books/current/tmp/pids/puma.pid # Author: Darío Javier Cravero &lt;[email protected]> # # Do NOT "set -e" # Original script https://github.com/puma/puma/blob/master/tools/jungle/puma # It was modified here by Stanislaw Pankevich <[email protected]> # to run on CentOS 5.5 boxes. # Script works perfectly on CentOS 5: script uses its native daemon(). # Puma is being stopped/restarted by sending signals, control app is not used. # Source function library. . /etc/rc.d/init.d/functions # Source networking configuration. . /etc/sysconfig/network # Check that networking is up. [ "$NETWORKING" = "no" ] && exit 0 # PATH should only include /usr/* if it runs after the mountnfs.sh script PATH=/usr/local/bin:/usr/local/sbin/:/sbin:/usr/sbin:/bin:/usr/bin DESC="Puma rack web server" NAME=puma DAEMON=$NAME SCRIPTNAME=/etc/init.d/$NAME CONFIG=/etc/puma.conf JUNGLE=`cat $CONFIG` RUNPUMA=/usr/local/bin/run-puma # Skipping the following non-CentOS string # Load the VERBOSE setting and other rcS variables # . /lib/init/vars.sh # CentOS does not have these functions natively log_daemon_msg() { echo "$@"; } log_end_msg() { [ $1 -eq 0 ] && RES=OK; logger ${RES:=FAIL}; } # Define LSB log_* functions. # Depend on lsb-base (>= 3.0-6) to ensure that this file is present. . /lib/lsb/init-functions # # Function that performs a clean up of puma.* files # cleanup() { echo "Cleaning up puma temporary files..." echo $1; PIDFILE=$1/tmp/puma/puma.pid STATEFILE=$1/tmp/puma/puma.state SOCKFILE=$1/tmp/puma/puma.sock rm -f $PIDFILE $STATEFILE $SOCKFILE } # # Function that starts the jungle # do_start() { log_daemon_msg "=> Running the jungle..." for i in $JUNGLE; do dir=`echo $i | cut -d , -f 1` user=`echo $i | cut -d , -f 2` config_file=`echo $i | cut -d , -f 3` if [ "$config_file" = "" ]; then config_file="$dir/puma/config.rb" fi log_file=`echo $i | cut -d , -f 4` if [ "$log_file" = "" ]; then log_file="$dir/puma/puma.log" fi do_start_one $dir $user $config_file $log_file done } do_start_one() { PIDFILE=$1/puma/puma.pid if [ -e $PIDFILE ]; then PID=`cat $PIDFILE` # If the puma isn't running, run it, otherwise restart it. if [ "`ps -A -o pid= | grep -c $PID`" -eq 0 ]; then do_start_one_do $1 $2 $3 $4 else do_restart_one $1 fi else do_start_one_do $1 $2 $3 $4 fi } do_start_one_do() { log_daemon_msg "--> Woke up puma $1" log_daemon_msg "user $2" log_daemon_msg "log to $4" cleanup $1; daemon --user $2 $RUNPUMA $1 $3 $4 } # # Function that stops the jungle # do_stop() { log_daemon_msg "=> Putting all the beasts to bed..." for i in $JUNGLE; do dir=`echo $i | cut -d , -f 1` do_stop_one $dir done } # # Function that stops the daemon/service # do_stop_one() { log_daemon_msg "--> Stopping $1" PIDFILE=$1/tmp/puma/puma.pid STATEFILE=$1/tmp/puma/puma.state echo $PIDFILE if [ -e $PIDFILE ]; then PID=`cat $PIDFILE` echo "Pid:" echo $PID if [ "`ps -A -o pid= | grep -c $PID`" -eq 0 ]; then log_daemon_msg "---> Puma $1 isn't running." else log_daemon_msg "---> About to kill PID `cat $PIDFILE`" # pumactl --state $STATEFILE stop # Many daemons don't delete their pidfiles when they exit. kill -9 $PID fi cleanup $1 else log_daemon_msg "---> No puma here..." fi return 0 } # # Function that restarts the jungle # do_restart() { for i in $JUNGLE; do dir=`echo $i | cut -d , -f 1` do_restart_one $dir done } # # Function that sends a SIGUSR2 to the daemon/service # do_restart_one() { PIDFILE=$1/tmp/puma/puma.pid i=`grep $1 $CONFIG` dir=`echo $i | cut -d , -f 1` if [ -e $PIDFILE ]; then log_daemon_msg "--> About to restart puma $1" # pumactl --state $dir/tmp/puma/state restart kill -s USR2 `cat $PIDFILE` # TODO Check if process exist else log_daemon_msg "--> Your puma was never playing... Let's get it out there first" user=`echo $i | cut -d , -f 2` config_file=`echo $i | cut -d , -f 3` if [ "$config_file" = "" ]; then config_file="$dir/config/puma.rb" fi log_file=`echo $i | cut -d , -f 4` if [ "$log_file" = "" ]; then log_file="$dir/log/puma.log" fi do_start_one $dir $user $config_file $log_file fi return 0 } # # Function that statuss then jungle # do_status() { for i in $JUNGLE; do dir=`echo $i | cut -d , -f 1` do_status_one $dir done } # # Function that sends a SIGUSR2 to the daemon/service # do_status_one() { PIDFILE=$1/tmp/puma/pid i=`grep $1 $CONFIG` dir=`echo $i | cut -d , -f 1` if [ -e $PIDFILE ]; then log_daemon_msg "--> About to status puma $1" pumactl --state $dir/tmp/puma/state stats # kill -s USR2 `cat $PIDFILE` # TODO Check if process exist else log_daemon_msg "--> $1 isn't there :(..." fi return 0 } do_add() { str="" # App's directory if [ -d "$1" ]; then if [ "`grep -c "^$1" $CONFIG`" -eq 0 ]; then str=$1 else echo "The app is already being managed. Remove it if you want to update its config." exit 1 fi else echo "The directory $1 doesn't exist." exit 1 fi # User to run it as if [ "`grep -c "^$2:" /etc/passwd`" -eq 0 ]; then echo "The user $2 doesn't exist." exit 1 else str="$str,$2" fi # Config file if [ "$3" != "" ]; then if [ -e $3 ]; then str="$str,$3" else echo "The config file $3 doesn't exist." exit 1 fi fi # Log file if [ "$4" != "" ]; then str="$str,$4" fi # Add it to the jungle echo $str >> $CONFIG log_daemon_msg "Added a Puma to the jungle: $str. You still have to start it though." } do_remove() { if [ "`grep -c "^$1" $CONFIG`" -eq 0 ]; then echo "There's no app $1 to remove." else # Stop it first. do_stop_one $1 # Remove it from the config. sed -i "\\:^$1:d" $CONFIG log_daemon_msg "Removed a Puma from the jungle: $1." fi } case "$1" in start) [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME" if [ "$#" -eq 1 ]; then do_start else i=`grep $2 $CONFIG` dir=`echo $i | cut -d , -f 1` user=`echo $i | cut -d , -f 2` config_file=`echo $i | cut -d , -f 3` if [ "$config_file" = "" ]; then config_file="$dir/config/puma.rb" fi log_file=`echo $i | cut -d , -f 4` if [ "$log_file" = "" ]; then log_file="$dir/log/puma.log" fi do_start_one $dir $user $config_file $log_file fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; stop) [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" if [ "$#" -eq 1 ]; then do_stop else i=`grep $2 $CONFIG` dir=`echo $i | cut -d , -f 1` do_stop_one $dir fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; status) # TODO Implement. log_daemon_msg "Status $DESC" "$NAME" if [ "$#" -eq 1 ]; then do_status else i=`grep $2 $CONFIG` dir=`echo $i | cut -d , -f 1` do_status_one $dir fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; restart) log_daemon_msg "Restarting $DESC" "$NAME" if [ "$#" -eq 1 ]; then do_restart else i=`grep $2 $CONFIG` dir=`echo $i | cut -d , -f 1` do_restart_one $dir fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; add) if [ "$#" -lt 3 ]; then echo "Please, specifiy the app's directory and the user that will run it at least." echo " Usage: $SCRIPTNAME add /path/to/app user /path/to/app/config/puma.rb /path/to/app/config/log/puma.log" echo " config and log are optionals." exit 1 else do_add $2 $3 $4 $5 fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; remove) if [ "$#" -lt 2 ]; then echo "Please, specifiy the app's directory to remove." exit 1 else do_remove $2 fi case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; *) echo "Usage:" >&2 echo " Run the jungle: $SCRIPTNAME {start|stop|status|restart}" >&2 echo " Add a Puma: $SCRIPTNAME add /path/to/app user /path/to/app/config/puma.rb /path/to/app/config/log/puma.log" echo " config and log are optionals." echo " Remove a Puma: $SCRIPTNAME remove /path/to/app" echo " On a Puma: $SCRIPTNAME {start|stop|status|restart} PUMA-NAME" >&2 exit 3 ;; esac :

    Read the article

  • PPTP ping client to client error

    - by Linux Intel
    I installed pptp server on a centos 6 64bit server PPTP Server ip : 55.66.77.10 PPTP Local ip : 10.0.0.1 Client1 IP : 10.0.0.60 centos 5 64bit Client2 IP : 10.0.0.61 centos5 64bit PPTP Server can ping Client1 And client 1 can ping PPTP Server PPTP Server can ping Client2 And client 2 can ping PPTP Server The problem is client 1 can not ping Client 2 route -n on PPTP Server Destination Gateway Genmask Flags Metric Ref Use Iface 10.0.0.60 0.0.0.0 255.255.255.255 UH 0 0 0 ppp0 10.0.0.61 0.0.0.0 255.255.255.255 UH 0 0 0 ppp1 55.66.77.10 0.0.0.0 255.255.255.248 U 0 0 0 eth0 10.0.0.0 0.0.0.0 255.0.0.0 U 0 0 0 eth0 0.0.0.0 55.66.77.19 0.0.0.0 UG 0 0 0 eth0 route -n On Client 1 Destination Gateway Genmask Flags Metric Ref Use Iface 10.0.0.1 0.0.0.0 255.255.255.255 UH 0 0 0 ppp0 55.66.77.10 70.14.13.19 255.255.255.255 UGH 0 0 0 eth0 10.0.0.0 0.0.0.0 255.0.0.0 U 0 0 0 eth1 0.0.0.0 70.14.13.19 0.0.0.0 UG 0 0 0 eth0 route -n On Client 2 Destination Gateway Genmask Flags Metric Ref Use Iface 10.0.0.1 0.0.0.0 255.255.255.255 UH 0 0 0 ppp0 55.66.77.10 84.56.120.60 255.255.255.255 UGH 0 0 0 eth1 10.0.0.0 0.0.0.0 255.0.0.0 U 0 0 0 eth0 0.0.0.0 84.56.120.60 0.0.0.0 UG 0 0 0 eth1 cat /etc/ppp/options.pptpd on PPTP server ############################################################################### # $Id: options.pptpd,v 1.11 2005/12/29 01:21:09 quozl Exp $ # # Sample Poptop PPP options file /etc/ppp/options.pptpd # Options used by PPP when a connection arrives from a client. # This file is pointed to by /etc/pptpd.conf option keyword. # Changes are effective on the next connection. See "man pppd". # # You are expected to change this file to suit your system. As # packaged, it requires PPP 2.4.2 and the kernel MPPE module. ############################################################################### # Authentication # Name of the local system for authentication purposes # (must match the second field in /etc/ppp/chap-secrets entries) name pptpd # Strip the domain prefix from the username before authentication. # (applies if you use pppd with chapms-strip-domain patch) #chapms-strip-domain # Encryption # (There have been multiple versions of PPP with encryption support, # choose with of the following sections you will use.) # BSD licensed ppp-2.4.2 upstream with MPPE only, kernel module ppp_mppe.o # {{{ refuse-pap refuse-chap refuse-mschap # Require the peer to authenticate itself using MS-CHAPv2 [Microsoft # Challenge Handshake Authentication Protocol, Version 2] authentication. require-mschap-v2 # Require MPPE 128-bit encryption # (note that MPPE requires the use of MSCHAP-V2 during authentication) require-mppe-128 # }}} # OpenSSL licensed ppp-2.4.1 fork with MPPE only, kernel module mppe.o # {{{ #-chap #-chapms # Require the peer to authenticate itself using MS-CHAPv2 [Microsoft # Challenge Handshake Authentication Protocol, Version 2] authentication. #+chapms-v2 # Require MPPE encryption # (note that MPPE requires the use of MSCHAP-V2 during authentication) #mppe-40 # enable either 40-bit or 128-bit, not both #mppe-128 #mppe-stateless # }}} # Network and Routing # If pppd is acting as a server for Microsoft Windows clients, this # option allows pppd to supply one or two DNS (Domain Name Server) # addresses to the clients. The first instance of this option # specifies the primary DNS address; the second instance (if given) # specifies the secondary DNS address. #ms-dns 10.0.0.1 #ms-dns 10.0.0.2 # If pppd is acting as a server for Microsoft Windows or "Samba" # clients, this option allows pppd to supply one or two WINS (Windows # Internet Name Services) server addresses to the clients. The first # instance of this option specifies the primary WINS address; the # second instance (if given) specifies the secondary WINS address. #ms-wins 10.0.0.3 #ms-wins 10.0.0.4 # Add an entry to this system's ARP [Address Resolution Protocol] # table with the IP address of the peer and the Ethernet address of this # system. This will have the effect of making the peer appear to other # systems to be on the local ethernet. # (you do not need this if your PPTP server is responsible for routing # packets to the clients -- James Cameron) proxyarp # Normally pptpd passes the IP address to pppd, but if pptpd has been # given the delegate option in pptpd.conf or the --delegate command line # option, then pppd will use chap-secrets or radius to allocate the # client IP address. The default local IP address used at the server # end is often the same as the address of the server. To override this, # specify the local IP address here. # (you must not use this unless you have used the delegate option) #10.8.0.100 # Logging # Enable connection debugging facilities. # (see your syslog configuration for where pppd sends to) debug # Print out all the option values which have been set. # (often requested by mailing list to verify options) #dump # Miscellaneous # Create a UUCP-style lock file for the pseudo-tty to ensure exclusive # access. lock # Disable BSD-Compress compression nobsdcomp # Disable Van Jacobson compression # (needed on some networks with Windows 9x/ME/XP clients, see posting to # poptop-server on 14th April 2005 by Pawel Pokrywka and followups, # http://marc.theaimsgroup.com/?t=111343175400006&r=1&w=2 ) novj novjccomp # turn off logging to stderr, since this may be redirected to pptpd, # which may trigger a loopback nologfd # put plugins here # (putting them higher up may cause them to sent messages to the pty) cat /etc/ppp/options.pptp on Client1 and Client2 ############################################################################### # $Id: options.pptp,v 1.3 2006/03/26 23:11:05 quozl Exp $ # # Sample PPTP PPP options file /etc/ppp/options.pptp # Options used by PPP when a connection is made by a PPTP client. # This file can be referred to by an /etc/ppp/peers file for the tunnel. # Changes are effective on the next connection. See "man pppd". # # You are expected to change this file to suit your system. As # packaged, it requires PPP 2.4.2 or later from http://ppp.samba.org/ # and the kernel MPPE module available from the CVS repository also on # http://ppp.samba.org/, which is packaged for DKMS as kernel_ppp_mppe. ############################################################################### # Lock the port lock # Authentication # We don't need the tunnel server to authenticate itself noauth # We won't do PAP, EAP, CHAP, or MSCHAP, but we will accept MSCHAP-V2 # (you may need to remove these refusals if the server is not using MPPE) refuse-pap refuse-eap refuse-chap refuse-mschap # Compression # Turn off compression protocols we know won't be used nobsdcomp nodeflate # Encryption # (There have been multiple versions of PPP with encryption support, # choose which of the following sections you will use. Note that MPPE # requires the use of MSCHAP-V2 during authentication) # # Note that using PPTP with MPPE and MSCHAP-V2 should be considered # insecure: # http://marc.info/?l=pptpclient-devel&m=134372640219039&w=2 # https://github.com/moxie0/chapcrack/blob/master/README.md # http://technet.microsoft.com/en-us/security/advisory/2743314 # http://ppp.samba.org/ the PPP project version of PPP by Paul Mackarras # ppp-2.4.2 or later with MPPE only, kernel module ppp_mppe.o # If the kernel is booted in FIPS mode (fips=1), the ppp_mppe.ko module # is not allowed and PPTP-MPPE is not available. # {{{ # Require MPPE 128-bit encryption #require-mppe-128 # }}} # http://mppe-mppc.alphacron.de/ fork from PPP project by Jan Dubiec # ppp-2.4.2 or later with MPPE and MPPC, kernel module ppp_mppe_mppc.o # {{{ # Require MPPE 128-bit encryption #mppe required,stateless # }}} IPtables are stopped on clients and server, Also net.ipv4.ip_forward = 1 is enabled on PPTP Server. How can i solve this problem .?

    Read the article

  • How can I avoid Windows 8.1 resetting my font size?

    - by Michael Tsang
    I am using Windows 8.1 on my laptop, which has a 15.6" screen with resolution 1366x768. I measured the screen with a ruler and calculated its DPI, which is 101. Therefore, I have set the scaling to 105%. However, when I change to an external monitor, which is a huge one with resolution 1920x1080 and DPI 93, I need to change the scaling to 97% but when I change the DPI back and forth, my font sizes have get resetted. I prefer using font sizes 14 on my title bars, message boxes and icons and font sizes 13 on my palette titles, menus and tooltips. However, as my laptop screen is too small, in order to make my apps fit on screen, I use font sizes 12 on my title bars, message boxes and icons and font sizes 11 on my palette titles, menus and tooltips. I don't know why I can't resize the window to make it larger than my screen in Windows (but it is possible in Kubuntu), therefore, some parts of my apps cannot be shown with my preferred font size. I have tried changing both the DPI and the font size by using .reg files. Before switching to my laptop screen, I apply the following: Windows Registry Editor Version 5.00 [HKEY_CURRENT_USER\Control Panel\Desktop] "LogPixels"=dword:00000065 [HKEY_CURRENT_USER\Control Panel\Desktop\WindowMetrics] "CaptionFont"=hex:ef,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,bc,02,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "SmCaptionFont"=hex:f0,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,bc,02,00,\ 00,00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "MenuFont"=hex:f0,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,00,\ 00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "StatusFont"=hex:f0,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "MessageFont"=hex:ef,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "IconFont"=hex:ef,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,00,\ 00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "AppliedDPI"=dword:00000065 Before switching to my external display, I apply this: Windows Registry Editor Version 5.00 [HKEY_CURRENT_USER\Control Panel\Desktop] "LogPixels"=dword:0000005d [HKEY_CURRENT_USER\Control Panel\Desktop\WindowMetrics] "CaptionFont"=hex:ed,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,bc,02,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "SmCaptionFont"=hex:ee,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,bc,02,00,\ 00,00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "MenuFont"=hex:ef,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,00,\ 00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "StatusFont"=hex:ef,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "MessageFont"=hex:ed,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,\ 00,00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "IconFont"=hex:ed,ff,ff,ff,00,00,00,00,00,00,00,00,00,00,00,00,90,01,00,00,00,\ 00,00,01,00,00,05,00,53,00,65,00,67,00,6f,00,65,00,20,00,55,00,49,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,\ 00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00 "AppliedDPI"=dword:0000005d I expect after applying the file, the DPI settings and the font sizes take effect at the next sign in. However, on my laptop screen, after I applied the file, signed out and in, the DPI setting changed, but the font sizes were resetted to tiny, and I had to apply the same file, signed out and in again to get the correct font size. The situation is even worse on my external monitor. After I applied the file, signed out and in, both the DPI setting and the font sizes were resetted to their default values, which were 96 DPI (the physical DPI as measured by dividing the resolution by the physical size is 93) and font size 9, which is totally unacceptable. How can I write the .reg files such that the settings can be correctly applied with a single sign in?

    Read the article

  • A free standing ASP.NET Pager Web Control

    - by Rick Strahl
    Paging in ASP.NET has been relatively easy with stock controls supporting basic paging functionality. However, recently I built an MVC application and one of the things I ran into was that I HAD TO build manual paging support into a few of my pages. Dealing with list controls and rendering markup is easy enough, but doing paging is a little more involved. I ended up with a small but flexible component that can be dropped anywhere. As it turns out the task of creating a semi-generic Pager control for MVC was fairly easily. Now I’m back to working in Web Forms and thought to myself that the way I created the pager in MVC actually would also work in ASP.NET – in fact quite a bit easier since the whole thing can be conveniently wrapped up into an easily reusable control. A standalone pager would provider easier reuse in various pages and a more consistent pager display regardless of what kind of 'control’ the pager is associated with. Why a Pager Control? At first blush it might sound silly to create a new pager control – after all Web Forms has pretty decent paging support, doesn’t it? Well, sort of. Yes the GridView control has automatic paging built in and the ListView control has the related DataPager control. The built in ASP.NET paging has several issues though: Postback and JavaScript requirements If you look at paging links in ASP.NET they are always postback links with javascript:__doPostback() calls that go back to the server. While that works fine and actually has some benefit like the fact that paging saves changes to the page and post them back, it’s not very SEO friendly. Basically if you use javascript based navigation nosearch engine will follow the paging links which effectively cuts off list content on the first page. The DataPager control does support GET based links via the QueryStringParameter property, but the control is effectively tied to the ListView control (which is the only control that implements IPageableItemContainer). DataSource Controls required for Efficient Data Paging Retrieval The only way you can get paging to work efficiently where only the few records you display on the page are queried for and retrieved from the database you have to use a DataSource control - only the Linq and Entity DataSource controls  support this natively. While you can retrieve this data yourself manually, there’s no way to just assign the page number and render the pager based on this custom subset. Other than that default paging requires a full resultset for ASP.NET to filter the data and display only a subset which can be very resource intensive and wasteful if you’re dealing with largish resultsets (although I’m a firm believer in returning actually usable sets :-}). If you use your own business layer that doesn’t fit an ObjectDataSource you’re SOL. That’s a real shame too because with LINQ based querying it’s real easy to retrieve a subset of data that is just the data you want to display but the native Pager functionality doesn’t support just setting properties to display just the subset AFAIK. DataPager is not Free Standing The DataPager control is the closest thing to a decent Pager implementation that ASP.NET has, but alas it’s not a free standing component – it works off a related control and the only one that it effectively supports from the stock ASP.NET controls is the ListView control. This means you can’t use the same data pager formatting for a grid and a list view or vice versa and you’re always tied to the control. Paging Events In order to handle paging you have to deal with paging events. The events fire at specific time instances in the page pipeline and because of this you often have to handle data binding in a way to work around the paging events or else end up double binding your data sources based on paging. Yuk. Styling The GridView pager is a royal pain to beat into submission for styled rendering. The DataPager control has many more options and template layout and it renders somewhat cleaner, but it too is not exactly easy to get a decent display for. Not a Generic Solution The problem with the ASP.NET controls too is that it’s not generic. GridView, DataGrid use their own internal paging, ListView can use a DataPager and if you want to manually create data layout – well you’re on your own. IOW, depending on what you use you likely have very different looking Paging experiences. So, I figured I’ve struggled with this once too many and finally sat down and built a Pager control. The Pager Control My goal was to create a totally free standing control that has no dependencies on other controls and certainly no requirements for using DataSource controls. The idea is that you should be able to use this pager control without any sort of data requirements at all – you should just be able to set properties and be able to display a pager. The Pager control I ended up with has the following features: Completely free standing Pager control – no control or data dependencies Complete manual control – Pager can render without any data dependency Easy to use: Only need to set PageSize, ActivePage and TotalItems Supports optional filtering of IQueryable for efficient queries and Pager rendering Supports optional full set filtering of IEnumerable<T> and DataTable Page links are plain HTTP GET href Links Control automatically picks up Page links on the URL and assigns them (automatic page detection no page index changing events to hookup) Full CSS Styling support On the downside there’s no templating support for the control so the layout of the pager is relatively fixed. All elements however are stylable and there are options to control the text, and layout options such as whether to display first and last pages and the previous/next buttons and so on. To give you an idea what the pager looks like, here are two differently styled examples (all via CSS):   The markup for these two pagers looks like this: <ww:Pager runat="server" id="ItemPager" PageSize="5" PageLinkCssClass="gridpagerbutton" SelectedPageCssClass="gridpagerbutton-selected" PagesTextCssClass="gridpagertext" CssClass="gridpager" RenderContainerDiv="true" ContainerDivCssClass="gridpagercontainer" MaxPagesToDisplay="6" PagesText="Item Pages:" NextText="next" PreviousText="previous" /> <ww:Pager runat="server" id="ItemPager2" PageSize="5" RenderContainerDiv="true" MaxPagesToDisplay="6" /> The latter example uses default style settings so it there’s not much to set. The first example on the other hand explicitly assigns custom styles and overrides a few of the formatting options. Styling The styling is based on a number of CSS classes of which the the main pager, pagerbutton and pagerbutton-selected classes are the important ones. Other styles like pagerbutton-next/prev/first/last are based on the pagerbutton style. The default styling shown for the red outlined pager looks like this: .pagercontainer { margin: 20px 0; background: whitesmoke; padding: 5px; } .pager { float: right; font-size: 10pt; text-align: left; } .pagerbutton,.pagerbutton-selected,.pagertext { display: block; float: left; text-align: center; border: solid 2px maroon; min-width: 18px; margin-left: 3px; text-decoration: none; padding: 4px; } .pagerbutton-selected { font-size: 130%; font-weight: bold; color: maroon; border-width: 0px; background: khaki; } .pagerbutton-first { margin-right: 12px; } .pagerbutton-last,.pagerbutton-prev { margin-left: 12px; } .pagertext { border: none; margin-left: 30px; font-weight: bold; } .pagerbutton a { text-decoration: none; } .pagerbutton:hover { background-color: maroon; color: cornsilk; } .pagerbutton-prev { background-image: url(images/prev.png); background-position: 2px center; background-repeat: no-repeat; width: 35px; padding-left: 20px; } .pagerbutton-next { background-image: url(images/next.png); background-position: 40px center; background-repeat: no-repeat; width: 35px; padding-right: 20px; margin-right: 0px; } Yup that’s a lot of styling settings although not all of them are required. The key ones are pagerbutton, pager and pager selection. The others (which are implicitly created by the control based on the pagerbutton style) are for custom markup of the ‘special’ buttons. In my apps I tend to have two kinds of pages: Those that are associated with typical ‘grid’ displays that display purely tabular data and those that have a more looser list like layout. The two pagers shown above represent these two views and the pager and gridpager styles in my standard style sheet reflect these two styles. Configuring the Pager with Code Finally lets look at what it takes to hook up the pager. As mentioned in the highlights the Pager control is completely independent of other controls so if you just want to display a pager on its own it’s as simple as dropping the control and assigning the PageSize, ActivePage and either TotalPages or TotalItems. So for this markup: <ww:Pager runat="server" id="ItemPagerManual" PageSize="5" MaxPagesToDisplay="6" /> I can use code as simple as: ItemPagerManual.PageSize = 3; ItemPagerManual.ActivePage = 4;ItemPagerManual.TotalItems = 20; Note that ActivePage is not required - it will automatically use any Page=x query string value and assign it, although you can override it as I did above. TotalItems can be any value that you retrieve from a result set or manually assign as I did above. A more realistic scenario based on a LINQ to SQL IQueryable result is even easier. In this example, I have a UserControl that contains a ListView control that renders IQueryable data. I use a User Control here because there are different views the user can choose from with each view being a different user control. This incidentally also highlights one of the nice features of the pager: Because the pager is independent of the control I can put the pager on the host page instead of into each of the user controls. IOW, there’s only one Pager control, but there are potentially many user controls/listviews that hold the actual display data. The following code demonstrates how to use the Pager with an IQueryable that loads only the records it displays: protected voidPage_Load(objectsender, EventArgs e) {     Category = Request.Params["Category"] ?? string.Empty;     IQueryable<wws_Item> ItemList = ItemRepository.GetItemsByCategory(Category);     // Update the page and filter the list down     ItemList = ItemPager.FilterIQueryable<wws_Item>(ItemList); // Render user control with a list view Control ulItemList = LoadControl("~/usercontrols/" + App.Configuration.ItemListType + ".ascx"); ((IInventoryItemListControl)ulItemList).InventoryItemList = ItemList; phItemList.Controls.Add(ulItemList); // placeholder } The code uses a business object to retrieve Items by category as an IQueryable which means that the result is only an expression tree that hasn’t execute SQL yet and can be further filtered. I then pass this IQueryable to the FilterIQueryable() helper method of the control which does two main things: Filters the IQueryable to retrieve only the data displayed on the active page Sets the Totaltems property and calculates TotalPages on the Pager and that’s it! When the Pager renders it uses those values, plus the PageSize and ActivePage properties to render the Pager. In addition to IQueryable there are also filter methods for IEnumerable<T> and DataTable, but these versions just filter the data by removing rows/items from the entire already retrieved data. Output Generated and Paging Links The output generated creates pager links as plain href links. Here’s what the output looks like: <div id="ItemPager" class="pagercontainer"> <div class="pager"> <span class="pagertext">Pages: </span><a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=1" class="pagerbutton" />1</a> <a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=2" class="pagerbutton" />2</a> <a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=3" class="pagerbutton" />3</a> <span class="pagerbutton-selected">4</span> <a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=5" class="pagerbutton" />5</a> <a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=6" class="pagerbutton" />6</a> <a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=20" class="pagerbutton pagerbutton-last" />20</a>&nbsp;<a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=3" class="pagerbutton pagerbutton-prev" />Prev</a>&nbsp;<a href="http://localhost/WestWindWebStore/itemlist.aspx?Page=5" class="pagerbutton pagerbutton-next" />Next</a></div> <br clear="all" /> </div> </div> The links point back to the current page and simply append a Page= page link into the page. When the page gets reloaded with the new page number the pager automatically detects the page number and automatically assigns the ActivePage property which results in the appropriate page to be displayed. The code shown in the previous section is all that’s needed to handle paging. Note that HTTP GET based paging is different than the Postback paging ASP.NET uses by default. Postback paging preserves modified page content when clicking on pager buttons, but this control will simply load a new page – no page preservation at this time. The advantage of not using Postback paging is that the URLs generated are plain HTML links that a search engine can follow where __doPostback() links are not. Pager with a Grid The pager also works in combination with grid controls so it’s easy to bypass the grid control’s paging features if desired. In the following example I use a gridView control and binds it to a DataTable result which is also filterable by the Pager control. The very basic plain vanilla ASP.NET grid markup looks like this: <div style="width: 600px; margin: 0 auto;padding: 20px; "> <asp:DataGrid runat="server" AutoGenerateColumns="True" ID="gdItems" CssClass="blackborder" style="width: 600px;"> <AlternatingItemStyle CssClass="gridalternate" /> <HeaderStyle CssClass="gridheader" /> </asp:DataGrid> <ww:Pager runat="server" ID="Pager" CssClass="gridpager" ContainerDivCssClass="gridpagercontainer" PageLinkCssClass="gridpagerbutton" SelectedPageCssClass="gridpagerbutton-selected" PageSize="8" RenderContainerDiv="true" MaxPagesToDisplay="6" /> </div> and looks like this when rendered: using custom set of CSS styles. The code behind for this code is also very simple: protected void Page_Load(object sender, EventArgs e) { string category = Request.Params["category"] ?? ""; busItem itemRep = WebStoreFactory.GetItem(); var items = itemRep.GetItemsByCategory(category) .Select(itm => new {Sku = itm.Sku, Description = itm.Description}); // run query into a DataTable for demonstration DataTable dt = itemRep.Converter.ToDataTable(items,"TItems"); // Remove all items not on the current page dt = Pager.FilterDataTable(dt,0); // bind and display gdItems.DataSource = dt; gdItems.DataBind(); } A little contrived I suppose since the list could already be bound from the list of elements, but this is to demonstrate that you can also bind against a DataTable if your business layer returns those. Unfortunately there’s no way to filter a DataReader as it’s a one way forward only reader and the reader is required by the DataSource to perform the bindings.  However, you can still use a DataReader as long as your business logic filters the data prior to rendering and provides a total item count (most likely as a second query). Control Creation The control itself is a pretty brute force ASP.NET control. Nothing clever about this other than some basic rendering logic and some simple calculations and update routines to determine which buttons need to be shown. You can take a look at the full code from the West Wind Web Toolkit’s Repository (note there are a few dependencies). To give you an idea how the control works here is the Render() method: /// <summary> /// overridden to handle custom pager rendering for runtime and design time /// </summary> /// <param name="writer"></param> protected override void Render(HtmlTextWriter writer) { base.Render(writer); if (TotalPages == 0 && TotalItems > 0) TotalPages = CalculateTotalPagesFromTotalItems(); if (DesignMode) TotalPages = 10; // don't render pager if there's only one page if (TotalPages < 2) return; if (RenderContainerDiv) { if (!string.IsNullOrEmpty(ContainerDivCssClass)) writer.AddAttribute("class", ContainerDivCssClass); writer.RenderBeginTag("div"); } // main pager wrapper writer.WriteBeginTag("div"); writer.AddAttribute("id", this.ClientID); if (!string.IsNullOrEmpty(CssClass)) writer.WriteAttribute("class", this.CssClass); writer.Write(HtmlTextWriter.TagRightChar + "\r\n"); // Pages Text writer.WriteBeginTag("span"); if (!string.IsNullOrEmpty(PagesTextCssClass)) writer.WriteAttribute("class", PagesTextCssClass); writer.Write(HtmlTextWriter.TagRightChar); writer.Write(this.PagesText); writer.WriteEndTag("span"); // if the base url is empty use the current URL FixupBaseUrl(); // set _startPage and _endPage ConfigurePagesToRender(); // write out first page link if (ShowFirstAndLastPageLinks && _startPage != 1) { writer.WriteBeginTag("a"); string pageUrl = StringUtils.SetUrlEncodedKey(BaseUrl, QueryStringPageField, (1).ToString()); writer.WriteAttribute("href", pageUrl); if (!string.IsNullOrEmpty(PageLinkCssClass)) writer.WriteAttribute("class", PageLinkCssClass + " " + PageLinkCssClass + "-first"); writer.Write(HtmlTextWriter.SelfClosingTagEnd); writer.Write("1"); writer.WriteEndTag("a"); writer.Write("&nbsp;"); } // write out all the page links for (int i = _startPage; i < _endPage + 1; i++) { if (i == ActivePage) { writer.WriteBeginTag("span"); if (!string.IsNullOrEmpty(SelectedPageCssClass)) writer.WriteAttribute("class", SelectedPageCssClass); writer.Write(HtmlTextWriter.TagRightChar); writer.Write(i.ToString()); writer.WriteEndTag("span"); } else { writer.WriteBeginTag("a"); string pageUrl = StringUtils.SetUrlEncodedKey(BaseUrl, QueryStringPageField, i.ToString()).TrimEnd('&'); writer.WriteAttribute("href", pageUrl); if (!string.IsNullOrEmpty(PageLinkCssClass)) writer.WriteAttribute("class", PageLinkCssClass); writer.Write(HtmlTextWriter.SelfClosingTagEnd); writer.Write(i.ToString()); writer.WriteEndTag("a"); } writer.Write("\r\n"); } // write out last page link if (ShowFirstAndLastPageLinks && _endPage < TotalPages) { writer.WriteBeginTag("a"); string pageUrl = StringUtils.SetUrlEncodedKey(BaseUrl, QueryStringPageField, TotalPages.ToString()); writer.WriteAttribute("href", pageUrl); if (!string.IsNullOrEmpty(PageLinkCssClass)) writer.WriteAttribute("class", PageLinkCssClass + " " + PageLinkCssClass + "-last"); writer.Write(HtmlTextWriter.SelfClosingTagEnd); writer.Write(TotalPages.ToString()); writer.WriteEndTag("a"); } // Previous link if (ShowPreviousNextLinks && !string.IsNullOrEmpty(PreviousText) && ActivePage > 1) { writer.Write("&nbsp;"); writer.WriteBeginTag("a"); string pageUrl = StringUtils.SetUrlEncodedKey(BaseUrl, QueryStringPageField, (ActivePage - 1).ToString()); writer.WriteAttribute("href", pageUrl); if (!string.IsNullOrEmpty(PageLinkCssClass)) writer.WriteAttribute("class", PageLinkCssClass + " " + PageLinkCssClass + "-prev"); writer.Write(HtmlTextWriter.SelfClosingTagEnd); writer.Write(PreviousText); writer.WriteEndTag("a"); } // Next link if (ShowPreviousNextLinks && !string.IsNullOrEmpty(NextText) && ActivePage < TotalPages) { writer.Write("&nbsp;"); writer.WriteBeginTag("a"); string pageUrl = StringUtils.SetUrlEncodedKey(BaseUrl, QueryStringPageField, (ActivePage + 1).ToString()); writer.WriteAttribute("href", pageUrl); if (!string.IsNullOrEmpty(PageLinkCssClass)) writer.WriteAttribute("class", PageLinkCssClass + " " + PageLinkCssClass + "-next"); writer.Write(HtmlTextWriter.SelfClosingTagEnd); writer.Write(NextText); writer.WriteEndTag("a"); } writer.WriteEndTag("div"); if (RenderContainerDiv) { if (RenderContainerDivBreak) writer.Write("<br clear=\"all\" />\r\n"); writer.WriteEndTag("div"); } } As I said pretty much brute force rendering based on the control’s property settings of which there are quite a few: You can also see the pager in the designer above. unfortunately the VS designer (both 2010 and 2008) fails to render the float: left CSS styles properly and starts wrapping after margins are applied in the special buttons. Not a big deal since VS does at least respect the spacing (the floated elements overlay). Then again I’m not using the designer anyway :-}. Filtering Data What makes the Pager easy to use is the filter methods built into the control. While this functionality is clearly not the most politically correct design choice as it violates separation of concerns, it’s very useful for typical pager operation. While I actually have filter methods that do something similar in my business layer, having it exposed on the control makes the control a lot more useful for typical databinding scenarios. Of course these methods are optional – if you have a business layer that can provide filtered page queries for you can use that instead and assign the TotalItems property manually. There are three filter method types available for IQueryable, IEnumerable and for DataTable which tend to be the most common use cases in my apps old and new. The IQueryable version is pretty simple as it can simply rely on on .Skip() and .Take() with LINQ: /// <summary> /// <summary> /// Queries the database for the ActivePage applied manually /// or from the Request["page"] variable. This routine /// figures out and sets TotalPages, ActivePage and /// returns a filtered subset IQueryable that contains /// only the items from the ActivePage. /// </summary> /// <param name="query"></param> /// <param name="activePage"> /// The page you want to display. Sets the ActivePage property when passed. /// Pass 0 or smaller to use ActivePage setting. /// </param> /// <returns></returns> public IQueryable<T> FilterIQueryable<T>(IQueryable<T> query, int activePage) where T : class, new() { ActivePage = activePage < 1 ? ActivePage : activePage; if (ActivePage < 1) ActivePage = 1; TotalItems = query.Count(); if (TotalItems <= PageSize) { ActivePage = 1; TotalPages = 1; return query; } int skip = ActivePage - 1; if (skip > 0) query = query.Skip(skip * PageSize); _TotalPages = CalculateTotalPagesFromTotalItems(); return query.Take(PageSize); } The IEnumerable<T> version simply  converts the IEnumerable to an IQuerable and calls back into this method for filtering. The DataTable version requires a little more work to manually parse and filter records (I didn’t want to add the Linq DataSetExtensions assembly just for this): /// <summary> /// Filters a data table for an ActivePage. /// /// Note: Modifies the data set permanently by remove DataRows /// </summary> /// <param name="dt">Full result DataTable</param> /// <param name="activePage">Page to display. 0 to use ActivePage property </param> /// <returns></returns> public DataTable FilterDataTable(DataTable dt, int activePage) { ActivePage = activePage < 1 ? ActivePage : activePage; if (ActivePage < 1) ActivePage = 1; TotalItems = dt.Rows.Count; if (TotalItems <= PageSize) { ActivePage = 1; TotalPages = 1; return dt; } int skip = ActivePage - 1; if (skip > 0) { for (int i = 0; i < skip * PageSize; i++ ) dt.Rows.RemoveAt(0); } while(dt.Rows.Count > PageSize) dt.Rows.RemoveAt(PageSize); return dt; } Using the Pager Control The pager as it is is a first cut I built a couple of weeks ago and since then have been tweaking a little as part of an internal project I’m working on. I’ve replaced a bunch of pagers on various older pages with this pager without any issues and have what now feels like a more consistent user interface where paging looks and feels the same across different controls. As a bonus I’m only loading the data from the database that I need to display a single page. With the preset class tags applied too adding a pager is now as easy as dropping the control and adding the style sheet for styling to be consistent – no fuss, no muss. Schweet. Hopefully some of you may find this as useful as I have or at least as a baseline to build ontop of… Resources The Pager is part of the West Wind Web & Ajax Toolkit Pager.cs Source Code (some toolkit dependencies) Westwind.css base stylesheet with .pager and .gridpager styles Pager Example Page © Rick Strahl, West Wind Technologies, 2005-2010Posted in ASP.NET  

    Read the article

  • NoSQL with RavenDB and ASP.NET MVC - Part 1

    - by shiju
     A while back, I have blogged NoSQL with MongoDB, NoRM and ASP.NET MVC Part 1 and Part 2 on how to use MongoDB with an ASP.NET MVC application. The NoSQL movement is getting big attention and RavenDB is the latest addition to the NoSQL and document database world. RavenDB is an Open Source (with a commercial option) document database for the .NET/Windows platform developed  by Ayende Rahien.  Raven stores schema-less JSON documents, allow you to define indexes using Linq queries and focus on low latency and high performance. RavenDB is .NET focused document database which comes with a fully functional .NET client API  and supports LINQ. RavenDB comes with two components, a server and a client API. RavenDB is a REST based system, so you can write your own HTTP cleint API. As a .NET developer, RavenDB is becoming my favorite document database. Unlike other document databases, RavenDB is supports transactions using System.Transactions. Also it's supports both embedded and server mode of database. You can access RavenDB site at http://ravendb.netA demo App with ASP.NET MVCLet's create a simple demo app with RavenDB and ASP.NET MVC. To work with RavenDB, do the following steps. Go to http://ravendb.net/download and download the latest build.Unzip the downloaded file.Go to the /Server directory and run the RavenDB.exe. This will start the RavenDB server listening on localhost:8080You can change the port of RavenDB  by modifying the "Raven/Port" appSetting value in the RavenDB.exe.config file.When running the RavenDB, it will automatically create a database in the /Data directory. You can change the directory name data by modifying "Raven/DataDirt" appSetting value in the RavenDB.exe.config file.RavenDB provides a browser based admin tool. When the Raven server is running, You can be access the browser based admin tool and view and edit documents and index using your browser admin tool. The web admin tool available at http://localhost:8080The below is the some screen shots of web admin tool     Working with ASP.NET MVC  To working with RavenDB in our demo ASP.NET MVC application, do the following steps Step 1 - Add reference to Raven Cleint API In our ASP.NET MVC application, Add a reference to the Raven.Client.Lightweight.dll from the Client directory. Step 2 - Create DocumentStoreThe document store would be created once per application. Let's create a DocumentStore on application start-up in the Global.asax.cs. documentStore = new DocumentStore { Url = "http://localhost:8080/" }; documentStore.Initialise(); The above code will create a Raven DB document store and will be listening the server locahost at port 8080    Step 3 - Create DocumentSession on BeginRequest   Let's create a DocumentSession on BeginRequest event in the Global.asax.cs. We are using the document session for every unit of work. In our demo app, every HTTP request would be a single Unit of Work (UoW). BeginRequest += (sender, args) =>   HttpContext.Current.Items[RavenSessionKey] = documentStore.OpenSession(); Step 4 - Destroy the DocumentSession on EndRequest  EndRequest += (o, eventArgs) => {     var disposable = HttpContext.Current.Items[RavenSessionKey] as IDisposable;     if (disposable != null)         disposable.Dispose(); };  At the end of HTTP request, we are destroying the DocumentSession  object.The below  code block shown all the code in the Global.asax.cs  private const string RavenSessionKey = "RavenMVC.Session"; private static DocumentStore documentStore;   protected void Application_Start() { //Create a DocumentStore in Application_Start //DocumentStore should be created once per application and stored as a singleton. documentStore = new DocumentStore { Url = "http://localhost:8080/" }; documentStore.Initialise(); AreaRegistration.RegisterAllAreas(); RegisterRoutes(RouteTable.Routes); //DI using Unity 2.0 ConfigureUnity(); }   public MvcApplication() { //Create a DocumentSession on BeginRequest   //create a document session for every unit of work BeginRequest += (sender, args) =>     HttpContext.Current.Items[RavenSessionKey] = documentStore.OpenSession(); //Destroy the DocumentSession on EndRequest EndRequest += (o, eventArgs) => { var disposable = HttpContext.Current.Items[RavenSessionKey] as IDisposable; if (disposable != null) disposable.Dispose(); }; }   //Getting the current DocumentSession public static IDocumentSession CurrentSession {   get { return (IDocumentSession)HttpContext.Current.Items[RavenSessionKey]; } }  We have setup all necessary code in the Global.asax.cs for working with RavenDB. For our demo app, Let’s write a domain class  public class Category {       public string Id { get; set; }       [Required(ErrorMessage = "Name Required")]     [StringLength(25, ErrorMessage = "Must be less than 25 characters")]     public string Name { get; set;}     public string Description { get; set; }   } We have created simple domain entity Category. Let's create repository class for performing CRUD operations against our domain entity Category.  public interface ICategoryRepository {     Category Load(string id);     IEnumerable<Category> GetCategories();     void Save(Category category);     void Delete(string id);       }    public class CategoryRepository : ICategoryRepository {     private IDocumentSession session;     public CategoryRepository()     {             session = MvcApplication.CurrentSession;     }     //Load category based on Id     public Category Load(string id)     {         return session.Load<Category>(id);     }     //Get all categories     public IEnumerable<Category> GetCategories()     {         var categories= session.LuceneQuery<Category>()                 .WaitForNonStaleResults()             .ToArray();         return categories;       }     //Insert/Update category     public void Save(Category category)     {         if (string.IsNullOrEmpty(category.Id))         {             //insert new record             session.Store(category);         }         else         {             //edit record             var categoryToEdit = Load(category.Id);             categoryToEdit.Name = category.Name;             categoryToEdit.Description = category.Description;         }         //save the document session         session.SaveChanges();     }     //delete a category     public void Delete(string id)     {         var category = Load(id);         session.Delete<Category>(category);         session.SaveChanges();     }        } For every CRUD operations, we are taking the current document session object from HttpContext object. session = MvcApplication.CurrentSession; We are calling the static method CurrentSession from the Global.asax.cs public static IDocumentSession CurrentSession {     get { return (IDocumentSession)HttpContext.Current.Items[RavenSessionKey]; } }  Retrieve Entities  The Load method get the single Category object based on the Id. RavenDB is working based on the REST principles and the Id would be like categories/1. The Id would be created by automatically when a new object is inserted to the document store. The REST uri categories/1 represents a single category object with Id representation of 1.   public Category Load(string id) {    return session.Load<Category>(id); } The GetCategories method returns all the categories calling the session.LuceneQuery method. RavenDB is using a lucen query syntax for querying. I will explain more details about querying and indexing in my future posts.   public IEnumerable<Category> GetCategories() {     var categories= session.LuceneQuery<Category>()             .WaitForNonStaleResults()         .ToArray();     return categories;   } Insert/Update entityFor insert/Update a Category entity, we have created Save method in repository class. If  the Id property of Category is null, we call Store method of Documentsession for insert a new record. For editing a existing record, we load the Category object and assign the values to the loaded Category object. The session.SaveChanges() will save the changes to document store.  //Insert/Update category public void Save(Category category) {     if (string.IsNullOrEmpty(category.Id))     {         //insert new record         session.Store(category);     }     else     {         //edit record         var categoryToEdit = Load(category.Id);         categoryToEdit.Name = category.Name;         categoryToEdit.Description = category.Description;     }     //save the document session     session.SaveChanges(); }  Delete Entity  In the Delete method, we call the document session's delete method and call the SaveChanges method to reflect changes in the document store.  public void Delete(string id) {     var category = Load(id);     session.Delete<Category>(category);     session.SaveChanges(); }  Let’s create ASP.NET MVC controller and controller actions for handling CRUD operations for the domain class Category  public class CategoryController : Controller { private ICategoryRepository categoyRepository; //DI enabled constructor public CategoryController(ICategoryRepository categoyRepository) {     this.categoyRepository = categoyRepository; } public ActionResult Index() {         var categories = categoyRepository.GetCategories();     if (categories == null)         return RedirectToAction("Create");     return View(categories); }   [HttpGet] public ActionResult Edit(string id) {     var category = categoyRepository.Load(id);         return View("Save",category); } // GET: /Category/Create [HttpGet] public ActionResult Create() {     var category = new Category();     return View("Save", category); } [HttpPost] public ActionResult Save(Category category) {     if (!ModelState.IsValid)     {         return View("Save", category);     }           categoyRepository.Save(category);         return RedirectToAction("Index");     }        [HttpPost] public ActionResult Delete(string id) {     categoyRepository.Delete(id);     var categories = categoyRepository.GetCategories();     return PartialView("CategoryList", categories);      }        }  RavenDB is an awesome document database and I hope that it will be the winner in .NET space of document database world.  The source code of demo application available at http://ravenmvc.codeplex.com/

    Read the article

  • ASP.NET Web API Exception Handling

    - by Fredrik N
    When I talk about exceptions in my product team I often talk about two kind of exceptions, business and critical exceptions. Business exceptions are exceptions thrown based on “business rules”, for example if you aren’t allowed to do a purchase. Business exceptions in most case aren’t important to log into a log file, they can directly be shown to the user. An example of a business exception could be "DeniedToPurchaseException”, or some validation exceptions such as “FirstNameIsMissingException” etc. Critical Exceptions are all other kind of exceptions such as the SQL server is down etc. Those kind of exception message need to be logged and should not reach the user, because they can contain information that can be harmful if it reach out to wrong kind of users. I often distinguish business exceptions from critical exceptions by creating a base class called BusinessException, then in my error handling code I catch on the type BusinessException and all other exceptions will be handled as critical exceptions. This blog post will be about different ways to handle exceptions and how Business and Critical Exceptions could be handled. Web API and Exceptions the basics When an exception is thrown in a ApiController a response message will be returned with a status code set to 500 and a response formatted by the formatters based on the “Accept” or “Content-Type” HTTP header, for example JSON or XML. Here is an example:   public IEnumerable<string> Get() { throw new ApplicationException("Error!!!!!"); return new string[] { "value1", "value2" }; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The response message will be: HTTP/1.1 500 Internal Server Error Content-Length: 860 Content-Type: application/json; charset=utf-8 { "ExceptionType":"System.ApplicationException","Message":"Error!!!!!","StackTrace":" at ..."} .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   The stack trace will be returned to the client, this is because of making it easier to debug. Be careful so you don’t leak out some sensitive information to the client. So as long as you are developing your API, this is not harmful. In a production environment it can be better to log exceptions and return a user friendly exception instead of the original exception. There is a specific exception shipped with ASP.NET Web API that will not use the formatters based on the “Accept” or “Content-Type” HTTP header, it is the exception is the HttpResponseException class. Here is an example where the HttpReponseExcetpion is used: // GET api/values [ExceptionHandling] public IEnumerable<string> Get() { throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError)); return new string[] { "value1", "value2" }; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The response will not contain any content, only header information and the status code based on the HttpStatusCode passed as an argument to the HttpResponseMessage. Because the HttpResponsException takes a HttpResponseMessage as an argument, we can give the response a content: public IEnumerable<string> Get() { throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent("My Error Message"), ReasonPhrase = "Critical Exception" }); return new string[] { "value1", "value2" }; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   The code above will have the following response:   HTTP/1.1 500 Critical Exception Content-Length: 5 Content-Type: text/plain; charset=utf-8 My Error Message .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } The Content property of the HttpResponseMessage doesn’t need to be just plain text, it can also be other formats, for example JSON, XML etc. By using the HttpResponseException we can for example catch an exception and throw a user friendly exception instead: public IEnumerable<string> Get() { try { DoSomething(); return new string[] { "value1", "value2" }; } catch (Exception e) { throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent("An error occurred, please try again or contact the administrator."), ReasonPhrase = "Critical Exception" }); } } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   Adding a try catch to every ApiController methods will only end in duplication of code, by using a custom ExceptionFilterAttribute or our own custom ApiController base class we can reduce code duplicationof code and also have a more general exception handler for our ApiControllers . By creating a custom ApiController’s and override the ExecuteAsync method, we can add a try catch around the base.ExecuteAsync method, but I prefer to skip the creation of a own custom ApiController, better to use a solution that require few files to be modified. The ExceptionFilterAttribute has a OnException method that we can override and add our exception handling. Here is an example: using System; using System.Diagnostics; using System.Net; using System.Net.Http; using System.Web.Http; using System.Web.Http.Filters; public class ExceptionHandlingAttribute : ExceptionFilterAttribute { public override void OnException(HttpActionExecutedContext context) { if (context.Exception is BusinessException) { throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent(context.Exception.Message), ReasonPhrase = "Exception" }); } //Log Critical errors Debug.WriteLine(context.Exception); throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent("An error occurred, please try again or contact the administrator."), ReasonPhrase = "Critical Exception" }); } } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   Note: Something to have in mind is that the ExceptionFilterAttribute will be ignored if the ApiController action method throws a HttpResponseException. The code above will always make sure a HttpResponseExceptions will be returned, it will also make sure the critical exceptions will show a more user friendly message. The OnException method can also be used to log exceptions. By using a ExceptionFilterAttribute the Get() method in the previous example can now look like this: public IEnumerable<string> Get() { DoSomething(); return new string[] { "value1", "value2" }; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } To use the an ExceptionFilterAttribute, we can for example add the ExceptionFilterAttribute to our ApiControllers methods or to the ApiController class definition, or register it globally for all ApiControllers. You can read more about is here. Note: If something goes wrong in the ExceptionFilterAttribute and an exception is thrown that is not of type HttpResponseException, a formatted exception will be thrown with stack trace etc to the client. How about using a custom IHttpActionInvoker? We can create our own IHTTPActionInvoker and add Exception handling to the invoker. The IHttpActionInvoker will be used to invoke the ApiController’s ExecuteAsync method. Here is an example where the default IHttpActionInvoker, ApiControllerActionInvoker, is used to add exception handling: public class MyApiControllerActionInvoker : ApiControllerActionInvoker { public override Task<HttpResponseMessage> InvokeActionAsync(HttpActionContext actionContext, System.Threading.CancellationToken cancellationToken) { var result = base.InvokeActionAsync(actionContext, cancellationToken); if (result.Exception != null && result.Exception.GetBaseException() != null) { var baseException = result.Exception.GetBaseException(); if (baseException is BusinessException) { return Task.Run<HttpResponseMessage>(() => new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent(baseException.Message), ReasonPhrase = "Error" }); } else { //Log critical error Debug.WriteLine(baseException); return Task.Run<HttpResponseMessage>(() => new HttpResponseMessage(HttpStatusCode.InternalServerError) { Content = new StringContent(baseException.Message), ReasonPhrase = "Critical Error" }); } } return result; } } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } You can register the IHttpActionInvoker with your own IoC to resolve the MyApiContollerActionInvoker, or add it in the Global.asax: GlobalConfiguration.Configuration.Services.Remove(typeof(IHttpActionInvoker), GlobalConfiguration.Configuration.Services.GetActionInvoker()); GlobalConfiguration.Configuration.Services.Add(typeof(IHttpActionInvoker), new MyApiControllerActionInvoker()); .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   How about using a Message Handler for Exception Handling? By creating a custom Message Handler, we can handle error after the ApiController and the ExceptionFilterAttribute is invoked and in that way create a global exception handler, BUT, the only thing we can take a look at is the HttpResponseMessage, we can’t add a try catch around the Message Handler’s SendAsync method. The last Message Handler that will be used in the Wep API pipe-line is the HttpControllerDispatcher and this Message Handler is added to the HttpServer in an early stage. The HttpControllerDispatcher will use the IHttpActionInvoker to invoke the ApiController method. The HttpControllerDipatcher has a try catch that will turn ALL exceptions into a HttpResponseMessage, so that is the reason why a try catch around the SendAsync in a custom Message Handler want help us. If we create our own Host for the Wep API we could create our own custom HttpControllerDispatcher and add or exception handler to that class, but that would be little tricky but is possible. We can in a Message Handler take a look at the HttpResponseMessage’s IsSuccessStatusCode property to see if the request has failed and if we throw the HttpResponseException in our ApiControllers, we could use the HttpResponseException and give it a Reason Phrase and use that to identify business exceptions or critical exceptions. I wouldn’t add an exception handler into a Message Handler, instead I should use the ExceptionFilterAttribute and register it globally for all ApiControllers. BUT, now to another interesting issue. What will happen if we have a Message Handler that throws an exception?  Those exceptions will not be catch and handled by the ExceptionFilterAttribute. I found a  bug in my previews blog post about “Log message Request and Response in ASP.NET WebAPI” in the MessageHandler I use to log incoming and outgoing messages. Here is the code from my blog before I fixed the bug:   public abstract class MessageHandler : DelegatingHandler { protected override async Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) { var corrId = string.Format("{0}{1}", DateTime.Now.Ticks, Thread.CurrentThread.ManagedThreadId); var requestInfo = string.Format("{0} {1}", request.Method, request.RequestUri); var requestMessage = await request.Content.ReadAsByteArrayAsync(); await IncommingMessageAsync(corrId, requestInfo, requestMessage); var response = await base.SendAsync(request, cancellationToken); var responseMessage = await response.Content.ReadAsByteArrayAsync(); await OutgoingMessageAsync(corrId, requestInfo, responseMessage); return response; } protected abstract Task IncommingMessageAsync(string correlationId, string requestInfo, byte[] message); protected abstract Task OutgoingMessageAsync(string correlationId, string requestInfo, byte[] message); } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; }   If a ApiController throws a HttpResponseException, the Content property of the HttpResponseMessage from the SendAsync will be NULL. So a null reference exception is thrown within the MessageHandler. The yellow screen of death will be returned to the client, and the content is HTML and the Http status code is 500. The bug in the MessageHandler was solved by adding a check against the HttpResponseMessage’s IsSuccessStatusCode property: public abstract class MessageHandler : DelegatingHandler { protected override async Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) { var corrId = string.Format("{0}{1}", DateTime.Now.Ticks, Thread.CurrentThread.ManagedThreadId); var requestInfo = string.Format("{0} {1}", request.Method, request.RequestUri); var requestMessage = await request.Content.ReadAsByteArrayAsync(); await IncommingMessageAsync(corrId, requestInfo, requestMessage); var response = await base.SendAsync(request, cancellationToken); byte[] responseMessage; if (response.IsSuccessStatusCode) responseMessage = await response.Content.ReadAsByteArrayAsync(); else responseMessage = Encoding.UTF8.GetBytes(response.ReasonPhrase); await OutgoingMessageAsync(corrId, requestInfo, responseMessage); return response; } protected abstract Task IncommingMessageAsync(string correlationId, string requestInfo, byte[] message); protected abstract Task OutgoingMessageAsync(string correlationId, string requestInfo, byte[] message); } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } If we don’t handle the exceptions that can occur in a custom Message Handler, we can have a hard time to find the problem causing the exception. The savior in this case is the Global.asax’s Application_Error: protected void Application_Error() { var exception = Server.GetLastError(); Debug.WriteLine(exception); } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } I would recommend you to add the Application_Error to the Global.asax and log all exceptions to make sure all kind of exception is handled. Summary There are different ways we could add Exception Handling to the Wep API, we can use a custom ApiController, ExceptionFilterAttribute, IHttpActionInvoker or Message Handler. The ExceptionFilterAttribute would be a good place to add a global exception handling, require very few modification, just register it globally for all ApiControllers, even the IHttpActionInvoker can be used to minimize the modifications of files. Adding the Application_Error to the global.asax is a good way to catch all unhandled exception that can occur, for example exception thrown in a Message Handler.   If you want to know when I have posted a blog post, you can follow me on twitter @fredrikn

    Read the article

  • E-Business Suite Technology Sessions at OpenWorld 2012

    - by Max Arderius
    Oracle OpenWorld 2012 is almost here! We're looking forward to updating you on our products, strategy, and roadmaps. This year, the E-Business Suite Applications Technology Group (ATG) will participate in 25 speaker sessions, two Meet the Experts round-table discussions, five demoground booths and seven Special Interest Group meetings as guest speakers. We hope to see you at our sessions.  Please join us to hear the latest news and connect with senior ATG development staff. Here's a downloadable listing of all Applications Technology Group-related sessions with times and locations: FOCUS ON Oracle E-Business Suite - Applications Tools and Technology (PDF) General Sessions GEN8474 - Oracle E-Business Suite - Strategy, Update, and RoadmapCliff Godwin, SVP, Oracle Monday, Oct 1, 12:15 PM - 1:15 PM - Moscone West 2002/2004 In this session, hear Oracle E-Business Suite General Manager Cliff Godwin deliver an update on the Oracle E-Business Suite product line. This session covers the value delivered by the current release of Oracle E-Business Suite, the momentum, and how Oracle E-Business Suite applications integrate into Oracle’s overall applications strategy. You’ll come away with an understanding of the value Oracle E-Business Suite applications deliver now and will deliver in the future. GEN9173 - Optimize and Extend Oracle Applications - The Path to Oracle Fusion ApplicationsNadia Bendjedou, Oracle; Corre Curtice, Bhavish Madurai (CSC) Tuesday, Oct 2, 10:15 AM - 11:15 AM - Moscone West 3002/3004 One of the main objectives of this session is to help organizations build their IT roadmap for the next five years and be aligned with the Oracle Applications strategy in general and the Oracle Fusion Applications strategy in particular. Come hear about some of the common sense, practical steps you can take to optimize the performance of your Oracle Applications today and prepare your path to Oracle Fusion Applications for when your organization is ready to embrace them. Each step you take in adopting Oracle Fusion technology gets you partway to Oracle Fusion Applications. Conference Sessions CON9024 - Oracle E-Business Suite Technology: Latest Features and Roadmap Lisa Parekh, Oracle Monday, Oct 1, 10:45 AM - 11:45 AM - Moscone West 2016 This Oracle development session provides a comprehensive overview of Oracle’s product strategy for Oracle E-Business Suite technology, the capabilities and associated business benefits of recent releases, and a review of capabilities on the product roadmap. This is the cornerstone session for the Oracle E-Business Suite technology stack. Come hear about the latest new usability enhancements of the user interface; systems administration and configuration management tools; security-related updates; and tools and options for extending, customizing, and integrating Oracle E-Business Suite with other applications. CON9021 - Oracle E-Business Suite Future Directions: Deployment and System AdministrationMax Arderius, Oracle Monday, Oct 1, 3:15 PM - 4:15 PM - Moscone West 2016  What’s coming in the next major version of Oracle E-Business Suite 12? This Oracle Development session covers the latest technology stack, including the use of Oracle WebLogic Server (Oracle Fusion Middleware 11g) and Oracle Database 11g Release 2 (11.2). Topics include an architectural overview of the latest updates, installation and upgrade options, new configuration options, and new tools for hot cloning and automated “lights-out” cloning. Come learn how online patching (based on the Oracle Database 11g Release 2 Edition-Based Redefinition feature) will reduce your database patching downtimes to however long it takes to bounce your database server. CON9017 - Desktop Integration in Oracle E-Business Suite 12.1 Padmaprabodh Ambale, Gustavo Jimenez, Oracle Monday, Oct 1, 4:45 PM - 5:45 PM - Moscone West 2016 This presentation covers the latest functional enhancements in Oracle Web Applications Desktop Integrator and Oracle Report Manager, enhanced Microsoft Office support, and greater support for building custom desktop integration solutions. The session also presents tips and tricks for upgrading from Oracle Applications Desktop Integrator to Oracle Web Applications Desktop Integrator and Oracle Report Manager. CON9023 - Oracle E-Business Suite Technology Certification Primer and Roadmap Steven Chan, Oracle Tuesday, Oct 2, 10:15 AM - 11:15 AM - Moscone West 2016  Is your Oracle E-Business Suite technology stack up to date? Are you taking advantage of all the latest options and capabilities? This Oracle development session summarizes the latest certifications and roadmap for the Oracle E-Business Suite technology stack, including elements such as database releases and options, Java, Oracle Forms, Oracle Containers for J2EE, desktop operating systems, browsers, JRE releases, development and Web authoring tools, user authentication and management, business intelligence, Oracle Application Management Packs, security options, clouds, Oracle VM, and virtualization. The session also covers the most commonly asked questions about tech stack component support dates and upgrade implications. CON9028 - Minimizing Oracle E-Business Suite Maintenance DowntimesSantiago Bastidas, Elke Phelps, Oracle Tuesday, Oct 2, 11:45 AM - 12:45 PM - Moscone West 2016 This Oracle development session features a survey of the best techniques sysadmins can use to minimize patching downtimes. It starts with an architectural-level review of Oracle E-Business Suite fundamentals and then moves to a practical view of the various tools and approaches for downtimes. Topics include patching shortcuts, merging patches, distributing worker processes across multiple servers, running ADPatch in noninteractive mode, staged APPL_TOPs, shared file systems, deferring systemwide database tasks, avoiding resource bottlenecks, and more. An added bonus: hear about the upcoming Oracle E-Business Suite 12 online patching capabilities based on the groundbreaking Oracle Database 11g Release 2 Edition-Based Redefinition feature. CON9116 - Extending the Use of Oracle E-Business Suite with the Oracle Endeca PlatformOsama Elkady, Muhannad Obeidat, Oracle Tuesday, Oct 2, 11:45 AM - 12:45 PM - Moscone West 2018 The Oracle Endeca platform includes a leading unstructured data correlation and analytics engine, together with a best-in class catalog search and guided navigation solution, to improve the productivity of all types of users in your enterprise. This development session focuses on the details behind the Oracle Endeca platform’s integration into Oracle E-Business Suite. It demonstrates how easily you can extend the use of the Oracle Endeca platform into other areas of Oracle E-Business Suite and how you can bring in your own data and build new Oracle Endeca applications for Oracle E-Business Suite. CON9005 - Oracle E-Business Suite Integration Best PracticesVeshaal Singh, Oracle, Jeffrey Hand, Zebra Technologies Tuesday, Oct 2, 1:15 PM - 2:15 PM - Moscone West 2018 Oracle is investing across applications and technologies to make the application integration experience easier for customers. Today Oracle has certified Oracle E-Business Suite on Oracle Fusion Middleware 11g and provides a comprehensive set of integration technologies. Learn about Oracle’s integration offering across data- and process-centric integrations. These technologies can be used to address various application integration challenges and styles. In this session, you will get an understanding of how, when, and where you can leverage Oracle’s integration technologies to connect end-to-end business processes across your enterprise, including your Oracle Applications portfolio.  CON9026 - Latest Oracle E-Business Suite 12.1 User Interface and Usability EnhancementsPadmaprabodh Ambale, Oracle Tuesday, Oct 2, 1:15 PM - 2:15 PM - Moscone West 2016 This Oracle development session details the latest UI enhancements to Oracle Application Framework in Oracle E-Business Suite 12.1. Developers will get a detailed look at new features to enhance usability, offer more capabilities for personalization and extensions, and support the development and use of dashboards and Web services. Topics include new rich UI capabilities such as new home page features, Navigator and Favorites pull-down menus, REST interface, embedded widgets for analytics content, Oracle Application Development Framework (Oracle ADF) task flows, third-party widgets, a look-ahead list of values, inline attachments, pop-ups, personalization and extensibility enhancements, business layer extensions, Oracle ADF integration, and mobile devices. CON8805 - Planning Your Oracle E-Business Suite Upgrade from 11i to Release 12.1 and BeyondAnne Carlson, Oracle Tuesday, Oct 2, 5:00 PM - 6:00 PM - Moscone West 3002/3004 Attend this session to hear the latest Oracle E-Business Suite 12.1 upgrade planning tips from Oracle’s support, consulting, development, and IT organizations. You’ll get specific cross-product advice on how to understand the factors that affect your project’s duration, decide on your project’s scope, develop a robust testing strategy, leverage Oracle Support resources, and more. In a nutshell, this session tells you things you need to know before embarking upon your Release 12.1 upgrade project. CON9053 - Advanced Management of Oracle E-Business Suite with Oracle Enterprise ManagerAngelo Rosado, Oracle Tuesday, Oct 2, 5:00 PM - 6:00 PM - Moscone West 2016 The task of managing and monitoring Oracle E-Business Suite environments can be very challenging. Oracle Enterprise Manager is the only product on the market that is designed to monitor and manage all the different technologies that constitute Oracle E-Business Suite applications, including end user, midtier, configuration, host, and database management—to name just a few. Customers that have implemented Oracle Enterprise Manager have experienced dramatic improvements in system visibility and diagnostic capability as well as administrator productivity. The purpose of this session is to highlight the key features and benefits of Oracle Enterprise Manager and Oracle Application Management Suite for Oracle E-Business Suite. CON8809 - Oracle E-Business Suite 12.1 Upgrade Best Practices: Technical InsightIsam Alyousfi, Udayan Parvate, Oracle Wednesday, Oct 3, 10:15 AM - 11:15 AM - Moscone West 3011 This session is ideal for organizations thinking about upgrading to Oracle E-Business Suite 12.1. It covers the fundamentals of upgrading to Release 12.1, including the technology stack components and supported upgrade paths. Hear from Oracle Development about the set of best practices for patching in general and executing the Release 12.1 technical upgrade, with special considerations for minimizing your downtime. Also get to know about relatively recent upgrade resources. CON9032 - Upgrading Your Customizations of Oracle E-Business Suite 12.1Sara Woodhull, Oracle Wednesday, Oct 3, 10:15 AM - 11:15 AM - Moscone West 2016 Have you personalized Oracle Forms or Oracle Application Framework screens in Oracle E-Business Suite? Have you used mod_plsql in Release 11i? Have you extended or customized your Release 11i environment with other tools? The technical options for upgrading these customizations as part of your Oracle E-Business Suite Release 12.1 upgrade can be bewildering. Come to this Oracle development session to learn about selecting the best upgrade approach for your existing customizations. The session will help you understand customization scenarios and use cases, tools, and technologies to ensure that your Oracle E-Business Suite Release 12.1 environment fits your users’ needs closely and that any future customizations will be easy to upgrade. CON9259 - Oracle E-Business Suite Internationalization and Multilingual FeaturesMaher Al-Nubani, Oracle Wednesday, Oct 3, 10:15 AM - 11:15 AM - Moscone West 2018 Oracle E-Business Suite supports more countries, languages, and regions than ever. Come to this Oracle development session to get an overview of internationalization features and capabilities and see new Release 12 features such as calendar support for Hijra and Thai, new group separators, lightweight multilingual support (MLS) setup, new character sets such as AL32UTF, newly supported languages, Mac certifications, Oracle iSetup support for moving MLS setups, new file export options for Unicode, new MLS number spelling options, and more. CON7188 - Mobile Apps for Oracle E-Business Suite with Oracle ADF Mobile and Oracle SOA SuiteSrikant Subramaniam, Joe Huang, Veshaal Singh, Oracle Wednesday, Oct 3, 10:15 AM - 11:15 AM - Moscone West 3001 Follow your mobile customers, employees, and partners with Oracle Fusion Middleware. See how native iPhone and iPad applications can easily be built for Oracle E-Business Suite with the new Oracle ADF Mobile and Oracle SOA Suite. Using Oracle ADF Mobile, developers can quickly develop native applications for Apple iOS and other mobile platforms. The Oracle SOA Suite/Oracle ADF Mobile combination can execute business transactions on Oracle E-Business Suite. This session includes a demo in which a mobile user approves a business transaction in Oracle E-Business Suite and a demo of the tools used to build a native on-device solution. These concepts for mobile applications also apply to other Oracle applications.CON9029 - Oracle E-Business Suite Directions: Slashing Downtimes with Online PatchingKevin Hudson, Oracle Wednesday, Oct 3, 11:45 AM - 12:45 PM - Moscone West 2016 Oracle E-Business Suite will soon include online patching (based on the Oracle Database 11g Release 2 Edition-Based Redefinition feature), which will reduce your database patching downtimes to however long it takes to bounce your database server. This Oracle development session details how online patching works, with special attention to what’s happening at a database object level when database patches are applied to an Oracle E-Business Suite environment that’s still running. Come learn about the operational and system management implications for minimizing maintenance downtimes when applying database patches with this new technology and the related impact on customizations you might have built on top of Oracle E-Business Suite. CON8806 - Upgrading to Oracle E-Business Suite 12.1: Technical and Functional PanelAndrew Katz, Komori America Corporation; Sandra Vucinic, VLAD Group, Inc. ;Srini Chavali, Cummins Inc.; Amrita Mehrok, Nadia Bendjedou, Anne Carlson Oracle Wednesday, Oct 3, 1:15 PM - 2:15 PM - Moscone West 2018 In this panel discussion, Oracle experts, customers, and partners share their experiences in upgrading to the latest release of Oracle E-Business Suite, Release 12.1. The panelists cover aspects of a typical Release 12 upgrade, technical (upgrading the technical infrastructure) as well as functional (upgrading to the new financial infrastructure). Hear directly from the experts who either develop the product or support, implement, or upgrade it, and find out how to apply their lessons learned to your organization. CON9027 - Personalize and Extend Oracle E-Business Suite Applications with Rich MashupsGustavo Jimenez, Padmaprabodh Ambale, Oracle Wednesday, Oct 3, 1:15 PM - 2:15 PM - Moscone West 2016 This session covers the use of several Oracle Fusion Middleware technologies to personalize and extend your existing Oracle E-Business Suite applications. The Oracle Fusion Middleware technologies covered include Oracle Application Development Framework (Oracle ADF), Oracle WebCenter, Oracle Endeca applications, and Oracle Business Intelligence Enterprise Edition with Oracle E-Business Suite Oracle Application Framework applications. CON9036 - Advanced Oracle E-Business Suite Architectures: Maximum Availability, Security, and MoreElke Phelps, Oracle Wednesday, Oct 3, 3:30 PM - 4:30 PM - Moscone West 2016 This session includes architecture diagrams and configuration instructions for building a maximum availability architecture (MAA) that will help you design a disaster recovery solution that fits the needs of your business. Database and application high-availability features it describes include Oracle Data Guard, Oracle Real Application Clusters (Oracle RAC), Oracle Active Data Guard, load-balancing Web and forms services, parallel concurrent processing, and the use of Oracle Exalogic and Oracle Exadata to provide a highly available environment. The session also covers the latest updates to systems management tools, AutoConfig, cloud computing, virtualization, and Oracle WebLogic Server and provides sneak previews of upcoming functionality. CON9047 - Efficiently Scaling Oracle E-Business Suite on Oracle Exadata and Oracle ExalogicIsam Alyousfi, Nishit Rao, Oracle Wednesday, Oct 3, 5:00 PM - 6:00 PM - Moscone West 2016 Oracle Exadata and Oracle Exalogic are designed from the ground up with optimizations in software and hardware to deliver superfast performance for mission-critical applications such as Oracle E-Business Suite. Oracle E-Business Suite applications run three to eight times as fast on the Oracle Exadata/Oracle Exalogic platform in standard benchmark tests. Besides performance, customers benefit from simplified support, enhanced manageability, and the ability to consolidate multiple Oracle E-Business Suite instances. Attend this session to understand best practices for Oracle E-Business Suite deployment on Oracle Exalogic and Oracle Exadata through customer case studies. Learn how adopting the Exa* platform increases efficiency, simplifies scaling, and boosts performance for peak loads. CON8716 - Web Services and SOA Integration Options for Oracle E-Business SuiteRekha Ayothi, Veshaal Singh, Oracle Thursday, Oct 4, 11:15 AM - 12:15 PM - Moscone West 2016 This Oracle development session provides a deep dive into a subset of the Web services and SOA-related integration options available to Oracle E-Business Suite systems integrators. It offers a technical look at Oracle E-Business Suite Integrated SOA Gateway, Oracle SOA Suite, Oracle Application Adapters for Data Integration for Oracle E-Business Suite, and other Web services options for integrating Oracle E-Business Suite with other applications. Systems integrators and developers will get an overview of the latest integration capabilities and technologies available out of the box with Oracle E-Business Suite and possibly a sneak preview of upcoming functionality and features. CON9030 - Recommendations for Oracle E-Business Suite Performance TuningIsam Alyousfi, Samer Barakat, Oracle Thursday, Oct 4, 11:15 AM - 12:15 PM - Moscone West 2018 Need to squeeze more performance out of your existing servers? This packed Oracle development session summarizes practical tips and lessons learned from performance-tuning and benchmarking the world’s largest Oracle E-Business Suite environments. Apps sysadmins will learn concrete tips and techniques for identifying and resolving performance bottlenecks on all layers, with special attention to application- and database-tier servers. Learn about tuning Oracle Forms, Oracle Concurrent Manager, Apache, and Oracle Discoverer. Track down memory leaks and other issues at the Java and JVM layers. The session also covers Oracle E-Business Suite product-level tuning, including Oracle Workflow, Oracle Order Management, Oracle Payroll, and other modules. CON3429 - Using Oracle ADF with Oracle E-Business Suite: The Full Integration ViewSiva Puthurkattil, Lake County; Juan Camilo Ruiz, Sara Woodhull, Oracle Thursday, Oct 4, 11:15 AM - 12:15 PM - Moscone West 3003 Oracle E-Business Suite delivers functionality for handling the core business of your organization. However, user requirements and new technologies are driving an emerging need to implement new types of user interfaces for these applications. This session provides an overview of how to use Oracle Application Development Framework (Oracle ADF) to deliver cutting-edge Web 2.0 and mobile rich user interfaces that front existing Oracle E-Business Suite processes, and it also explores all the existing types of integration between the two worlds. CON9020 - Integrating Oracle E-Business Suite with Oracle Identity Management SolutionsSunil Ghosh, Elke Phelps, Oracle Thursday, Oct 4, 12:45 PM - 1:45 PM - Moscone West 2016 Need to integrate Oracle E-Business Suite with Microsoft Windows Kerberos, Active Directory, CA Netegrity SiteMinder, or other third-party authentication systems? Want to understand your options when Oracle Premier Support for Oracle Single Sign-On ends in December 2011? This Oracle Development session covers the latest certified integrations with Oracle Access Manager 11g and Oracle Internet Directory 11g, which can be used individually or as bridges for integrating with third-party authentication solutions. The session presents an architectural overview of how Oracle Access Manager, its WebGate and AccessGate components, and Oracle Internet Directory work together, with implications for Oracle Discoverer, Oracle Portal, and other Oracle Fusion identity management products. CON9019 - Troubleshooting, Diagnosing, and Optimizing Oracle E-Business Suite TechnologyGustavo Jimenez, Oracle Thursday, Oct 4, 2:15 PM - 3:15 PM - Moscone West 2016 This session covers how you can proactively diagnose Oracle E-Business Suite applications, including extensions built with Oracle Fusion Middleware technologies such as Oracle Application Development Framework (Oracle ADF) and Oracle WebCenter to catch potential issues in the middle tier before they become more serious. Topics include debugging, logging infrastructure, warning signs, performance tuning, information required when logging service requests, general JVM optimization, and an overall picture of all the moving parts that make it possible for Oracle E-Business Suite to isolate and fix problems. Also learn how Oracle Diagnostics Framework will help prevent downtime caused by failures. CON9031 - The Top 10 Things You Can Do to Secure Your Oracle E-Business Suite InstanceEric Bing, Erik Graversen, Oracle Thursday, Oct 4, 2:15 PM - 3:15 PM - Moscone West 2018 Learn the top 10 things you can do to secure your applications and your sensitive data. This Oracle development session for system administrators and security professionals explores some of the most important and overlooked things you can do to secure your Oracle E-Business Suite instance. It also covers data masking and other mechanisms for protecting sensitive data. Special Interest Groups (SIG) Some of our most senior staff have been invited to participate on the following SIG meetings as guest speakers: SIG10525 - OAUG - Archive & Purge SIGBrian Bent - Pre-Sales Engineer, TierData, Inc. Sunday, Sep 30, 10:30 AM - 12:00 PM - Moscone West 3011 The Archive and Purge SIG is an organization in which users can share their experiences and solicit functional and technical advice on archiving and purging data in Oracle E-Business Suite. This session provides an opportunity for users to network and share best practices, tips, and tricks. Guest: Oracle E-Business Suite Database Performance, Archive & Purging - Q&A SessionIsam Alyousfi, Senior Director, Applications Performance, Oracle SIG10547 - OAUG - Oracle E-Business (EBS) Applications Technology SIGSrini Chavali - IT Director, Cummins Inc Sunday, Sep 30, 10:30 AM - 12:00 PM - Moscone West 3018 The general purpose of the EBS Applications Technology SIG is to inform and educate its members about current and future components of the tech stack as they relate to Oracle E-Business Suite. Attend this meeting for networking and education and to share best practices. Guest: Oracle E-Business Suite Technology Certification Roadmap - Presentation and Q&ASteven Chan, Sr. Director, Applications Technology Group, Oracle SIG10559 - OAUG - User Management SIGSusan Behn - VP of Oracle Delivery, Infosemantics, Inc. Sunday, Sep 30, 10:30 AM - 12:00 PM - Moscone West 3024 The E-Business Suite User Management SIG focuses on the components of user management that enable Oracle E-Business Suite users to define administrative functions and manage users’ access to functions and data based on roles within an organization—rather than the user’s individual identity—which is referred to as role-based access control (RBAC). This meeting includes an introduction to Oracle User Management that covers the Oracle User Management building blocks and presents an example of creating a security policy.Guest: Security and User Management - Q&A SessionEric Bing, Sr. Director, EBS Security, OracleSara Woodhull, Principal Product Manager, Applications Technology Group, Oracle SIG10515 - OAUG – Upgrade SIGBarbara Matthews - Consultant, On Call DBASandra Vucinic, VLAD Group, Inc. Sunday, Sep 30, 12:00 PM - 2:00 PM - Moscone West 3009 This Upgrade SIG session starts with a business meeting and then features a Q&A panel discussion on Oracle E-Business Suite upgrade topics. The session• Reviews Upgrade SIG goals and objectives• Provides answers, during the Q&A session, to questions related to Oracle E-Business Suite upgrades• Shares “real world” experiences, tips, and techniques for Oracle E-Business Suite upgrades to Release 12.1. Guest: Oracle E-Business Suite Upgrade - Q&A SessionAnne Carlson - Sr. Director, Oracle E-Business Suite Product Strategy, OracleUdayan Parvate - Director, EBS Release Engineering, OracleSuzana Ferrari, Sr. Principal Consultant, OracleIsam Alyousfi, Sr. Director, Applications Performance, Oracle SIG10552 - OAUG - Oracle E-Business Suite SIGDonna Rosentrater - Manager, Global Sourcing & Procurement Systems, TJX Sunday, Sep 30, 12:15 PM - 1:45 PM - Moscone West 3020 The E-Business Suite SIG, affiliated with OAUG, supports Oracle E-Business Suite users through networking, education, and sharing of best practices. This SIG meeting will feature a general discussion of Oracle E-Business Suite product strategies in Release 12 and migration to Oracle Fusion Applications. Guest: Oracle E-Business Suite - Q&A SessionJeanne Lowell, Vice President, EBS Product Strategy, OracleNadia Bendjedou, Sr. Director, Product Strategy, Oracle SIG10556 - OAUG - SysAdmin SIGRandy Giefer - Sr Systems and Security Architect, Solution Beacon, LLC Sunday, Sep 30, 12:15 PM - 1:45 PM - Moscone West 3022 The SysAdmin SIG provides a forum in which OAUG members and participants can share updates, tips, and successful practices relating to system administration in an Oracle applications environment. The SysAdmin SIG strives to enable system administrators to become more effective and efficient in their jobs by providing them with access to people and information that can increase their system administration knowledge and experience. Attend this meeting to network, share best practices, and benefit from educational content. Guest: Oracle E-Business Suite 12.2 Online Patching- Presentation and Q&AKevin Hudson, Sr. Director, Applications Technology Group, Oracle SIG10553 - OAUG - Database SIGMichael Brown - Senior DBA, COLIBRI LTD LC Sunday, Sep 30, 2:00 PM - 3:15 PM - Moscone West 3020 The OAUG Database SIG provides an opportunity for applications database administrators to learn from and share their experiences with supporting the various Oracle applications environments. This session will include a brief business meeting followed by a short presentation. It will end with an open discussion among the attendees about items of interest to those present. Guest: Oracle E-Business Suite Database Performance - Presentation and Q&AIsam Alyousfi, Sr. Director, Applications Performance, Oracle Meet the Experts We're planning two round-table discussions where you can review your questions with senior E-Business Suite ATG staff: MTE9648 - Meet the Experts for Oracle E-Business Suite: Planning Your Upgrade Jeanne Lowell - VP, EBS Product Strategy, Oracle John Abraham - Sr. Principal Product Manager, Oracle Nadia Bendjedou - Sr. Director - Product Strategy, Oracle Anne Carlson - Sr. Director, Applications Technology Group, Oracle Udayan Parvate - Director, EBS Release Engineering, Oracle Isam Alyousfi, Sr. Director, Applications Performance, Oracle Monday, Oct 1, 3:15 PM - 4:15 PM - Moscone West 2001A Don’t miss this Oracle Applications Meet the Experts session with experts who specialize in Oracle E-Business Suite upgrade best practices. This is the place where attendees can have informal and semistructured but open one-on-one discussions with Strategy and Development regarding Oracle Applications strategy and your specific business and IT strategy. The experts will be available to discuss the value of the latest releases and share insights into the best path for your enterprise, so come ready with your questions. Space is limited, so make sure you register. MTE9649 - Meet the Oracle E-Business Suite Tools and Technology Experts Lisa Parekh - Vice President, Technology Integration, Oracle Steven Chan - Sr. Director, Oracle Elke Phelps - Sr. Principal Product Manager, Applications Technology Group, Oracle Max Arderius - Manager, Applications Technology Group, Oracle Tuesday, Oct 2, 1:15 PM - 2:15 PM - Moscone West 2001A Don’t miss this Oracle Applications Meet the Experts session with experts who specialize in Oracle E-Business Suite technology. This is the place where attendees can have informal and semistructured but open one-on-one discussions with Strategy and Development regarding Oracle Applications strategy and your specific business and IT strategy. The experts will be available to discuss the value of the latest releases and share insights into the best path for your enterprise, so come ready with your questions. Space is limited, so make sure you register. Demos We have five booths in the exhibition demogrounds this year, where you can try ATG technologies firsthand and get your questions answered. Please stop by and meet our staff at the following locations: Advanced Architecture and Technology Stack for Oracle E-Business Suite (W-067) New User Productivity Capabilities in Oracle E-Business Suite (W-065) End-to-End Management of Oracle E-Business Suite (W-063) Oracle E-Business Suite 12.1 Technical Upgrade Best Practices (W-066) SOA-Based Integration for Oracle E-Business Suite (W-064)

    Read the article

< Previous Page | 699 700 701 702 703 704 705 706 707 708 709 710  | Next Page >