Search Results

Search found 17097 results on 684 pages for 'entry level'.

Page 129/684 | < Previous Page | 125 126 127 128 129 130 131 132 133 134 135 136  | Next Page >

  • iphone Dev - activity indicator with NSThread not working on Nav controller table view

    - by Frames84
    I really can't get this to work, basically when my JSON feeds loads I want the indicator to show, then hide when it's stopped. It loads top level menu items 1st "Publishing, Broadcasting, Marketing Services", then when Broadcasting is selected it loads a feed using the JSON framework hosted on Google. Round this load I call startIndicator and stopIndicator using the NSThread. Have I missed something? @implementation GeneralNewsTableViewController @synthesize dataList; @synthesize generalNewsDetailViewController; @synthesize atLevel; -(void) startIndicator { NSAutoreleasePool *pool = [[NSAutoreleasePool alloc ] init ]; [(UIActivityIndicatorView *)[self navigationItem].rightBarButtonItem.customView startAnimating]; [pool release]; } -(void) stopIndicator { NSAutoreleasePool *pool = [[NSAutoreleasePool alloc ] init ]; [(UIActivityIndicatorView *)[self navigationItem].rightBarButtonItem.customView stopAnimating]; [pool release]; } - (void)viewDidLoad { NSMutableArray *checker = self.dataList; if(checker == nil) { self.title = NSLocalizedString(@"General1",@"General News"); NSMutableArray *array = [[NSArray alloc] initWithObjects:@"Publishing", @"Broadcasting",@"Marketing Services",nil]; self.dataList = [array retain]; self.atLevel = @"level1"; [array release]; } UIActivityIndicatorView * activityIndicator = [[UIActivityIndicatorView alloc] initWithFrame:CGRectMake(0, 0, 20, 20)]; //set the initial property [activityIndicator stopAnimating]; [activityIndicator hidesWhenStopped]; //Create an instance of Bar button item with custome view which is of activity indicator UIBarButtonItem * barButton = [[UIBarButtonItem alloc] initWithCustomView:activityIndicator]; //Set the bar button the navigation bar [self navigationItem].rightBarButtonItem = barButton; //Memory clean up [activityIndicator release]; [barButton release]; [super viewDidLoad]; } - (CGFloat)tableView:(UITableView *)tableView heightForRowAtIndexPath:(NSIndexPath *)indexPath { NSString *level = self.atLevel; if([level isEqualToString:@"level2"]) { return 70.0f; } else { return 40.0f; } } - (UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath { static NSString *FirstLevelCell = @"FirstLevelCell"; UITableViewCell *cell = [tableView dequeueReusableCellWithIdentifier:FirstLevelCell]; if(cell == nil) { cell = [[[UITableViewCell alloc] initWithStyle:UITableViewCellStyleSubtitle reuseIdentifier:FirstLevelCell] autorelease]; } NSInteger row = [indexPath row]; NSString *level = self.atLevel; if([level isEqualToString:@"level2"]) { NSMutableArray *stream = [self.dataList objectAtIndex:row]; NSString *newsTitle = [stream valueForKey:@"title"]; if( ![newsTitle isKindOfClass:[NSString class]] ) { cell.textLabel.text = @""; } else { cell.textLabel.text = [stream valueForKey:@"title"]; } cell.textLabel.numberOfLines = 2; cell.textLabel.font =[UIFont systemFontOfSize:10]; cell.detailTextLabel.numberOfLines = 1; cell.detailTextLabel.font= [UIFont systemFontOfSize:8]; cell.detailTextLabel.text = [stream valueForKey:@"created"]; NSData *imageURL = [[NSData alloc] initWithContentsOfURL:[NSURL URLWithString:@"http://www.how-do.co.uk/images/stories/Cimex.jpg"]]; UIImage *newsImage = [[UIImage alloc] initWithData:imageURL]; cell.imageView.image = newsImage; [imageURL release]; [newsImage release]; } else { cell.textLabel.text = [dataList objectAtIndex:row]; } return cell; } - (void) tableView:(UITableView *)tableView didSelectRowAtIndexPath:(NSIndexPath *)indexPath { NSUInteger row = [indexPath row]; NSMutableString *levelType = (NSMutableString *) [dataList objectAtIndex:row]; if(![levelType isKindOfClass:[NSString class]]) { if(self.generalNewsDetailViewController == nil) { GeneralNewsDetailViewController *generalDetail = [[GeneralNewsDetailViewController alloc] initWithNibName:@"GeneralNewsDetailView" bundle:nil]; self.generalNewsDetailViewController = generalDetail; [generalDetail release]; } NSDictionary *stream = [self.dataList objectAtIndex:row]; NSString *newsTitle = [stream valueForKey:@"title"]; if( ![newsTitle isKindOfClass:[NSString class]] ) { generalNewsDetailViewController.newsTitle = @""; } else { generalNewsDetailViewController.newsTitle =[stream valueForKey:@"title"]; } generalNewsDetailViewController.newsId = [stream valueForKey:@"id"]; generalNewsDetailViewController.fullText = [stream valueForKey:@"fulltext"]; generalNewsDetailViewController.newsImage = [stream valueForKey:@"images"]; generalNewsDetailViewController.created = [stream valueForKey:@"created"]; HowDo_v1AppDelegate *delegate = [[UIApplication sharedApplication] delegate]; [delegate.generalNewsNavController pushViewController:self.generalNewsDetailViewController animated:YES]; } else { GeneralNewsTableViewController *generalSubDetail = [[GeneralNewsTableViewController alloc] initWithNibName:@"GeneralNewsTableView" bundle:nil]; NSMutableArray *array; NSString *titleSelected = (NSString *) [dataList objectAtIndex:row]; if([titleSelected isEqualToString:@"Publishing"]) { generalSubDetail.title = @"Publishing news detail"; array = [[NSArray alloc] initWithObjects:@"pub News1", @"pub News2",@"pub News3",nil]; generalSubDetail.atLevel = @"level1"; } else if ([titleSelected isEqualToString:@"Broadcasting"]) { generalSubDetail.title = @"Broadcasting news detail"; /// START [self performSelectorOnMainThread:@selector(startIndicator) withObject:nil waitUntilDone:YES]; if(jSONDataAccessWrapper == nil) { jSONDataAccessWrapper = [JSON_DataAccess_Wrapper alloc]; } array = [jSONDataAccessWrapper downloadJSONFeed]; [self performSelectorOnMainThread:@selector(stopIndicator) withObject:nil waitUntilDone:YES]; generalSubDetail.atLevel = @"level2"; } else if ([titleSelected isEqualToString:@"Marketing Services"]) { generalSubDetail.title = @"Marketing Services news detail"; array = [[NSArray alloc] initWithObjects:@"Marketing News1", @"Marketing News2",@"Marketing News3",nil]; generalSubDetail.atLevel = @"level1"; } generalSubDetail.dataList = array; [self.navigationController pushViewController:generalSubDetail animated:YES]; [titleSelected release]; } } - (NSInteger)tableView:(UITableView *)table numberOfRowsInSection:(NSInteger)section //- (NSInteger) tableView:(UITableView *)tableView numberOfRowsInSection:(NSIndexPath *) section { return [self.dataList count]; } Cheers for any feedback Frames

    Read the article

  • Using JDialog with Tabbed Pane to draw different pictures [migrated]

    - by Bryam Ulloa
    I am using NetBeans, and I have a class that extends to JDialog, inside that Dialog box I have created a Tabbed Pane. The Tabbed Pane contains 6 different tabs, with 6 different panels of course. What I want to do is when I click on the different tabs, a diagram is supposed to be drawn with the paint method. My question is how can I draw on the different panels with just one paint method in another class being called from the Dialog class? Here is my code for the Dialog class: package GUI; public class NewJDialog extends javax.swing.JDialog{ /** * Creates new form NewJDialog */ public NewJDialog(java.awt.Frame parent, boolean modal) { super(parent, modal); initComponents(); } /** * This method is called from within the constructor to initialize the form. * WARNING: Do NOT modify this code. The content of this method is always * regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code"> private void initComponents() { jTabbedPane1 = new javax.swing.JTabbedPane(); jPanel1 = new javax.swing.JPanel(); jPanel2 = new javax.swing.JPanel(); jPanel3 = new javax.swing.JPanel(); jPanel4 = new javax.swing.JPanel(); jPanel5 = new javax.swing.JPanel(); jPanel6 = new javax.swing.JPanel(); jPanel7 = new javax.swing.JPanel(); jLabel1 = new javax.swing.JLabel(); jLabel2 = new javax.swing.JLabel(); setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE); javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1); jPanel1.setLayout(jPanel1Layout); jPanel1Layout.setHorizontalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel1Layout.setVerticalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("FCFS", jPanel1); javax.swing.GroupLayout jPanel2Layout = new javax.swing.GroupLayout(jPanel2); jPanel2.setLayout(jPanel2Layout); jPanel2Layout.setHorizontalGroup( jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel2Layout.setVerticalGroup( jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("SSTF", jPanel2); javax.swing.GroupLayout jPanel3Layout = new javax.swing.GroupLayout(jPanel3); jPanel3.setLayout(jPanel3Layout); jPanel3Layout.setHorizontalGroup( jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel3Layout.setVerticalGroup( jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("LOOK", jPanel3); javax.swing.GroupLayout jPanel4Layout = new javax.swing.GroupLayout(jPanel4); jPanel4.setLayout(jPanel4Layout); jPanel4Layout.setHorizontalGroup( jPanel4Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel4Layout.setVerticalGroup( jPanel4Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("LOOK C", jPanel4); javax.swing.GroupLayout jPanel5Layout = new javax.swing.GroupLayout(jPanel5); jPanel5.setLayout(jPanel5Layout); jPanel5Layout.setHorizontalGroup( jPanel5Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel5Layout.setVerticalGroup( jPanel5Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("SCAN", jPanel5); javax.swing.GroupLayout jPanel6Layout = new javax.swing.GroupLayout(jPanel6); jPanel6.setLayout(jPanel6Layout); jPanel6Layout.setHorizontalGroup( jPanel6Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 466, Short.MAX_VALUE) ); jPanel6Layout.setVerticalGroup( jPanel6Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 242, Short.MAX_VALUE) ); jTabbedPane1.addTab("SCAN C", jPanel6); getContentPane().add(jTabbedPane1, java.awt.BorderLayout.CENTER); jLabel1.setText("Distancia:"); jLabel2.setText("___________"); javax.swing.GroupLayout jPanel7Layout = new javax.swing.GroupLayout(jPanel7); jPanel7.setLayout(jPanel7Layout); jPanel7Layout.setHorizontalGroup( jPanel7Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel7Layout.createSequentialGroup() .addGap(21, 21, 21) .addComponent(jLabel1) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(jLabel2) .addContainerGap(331, Short.MAX_VALUE)) ); jPanel7Layout.setVerticalGroup( jPanel7Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(jPanel7Layout.createSequentialGroup() .addContainerGap() .addGroup(jPanel7Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel1) .addComponent(jLabel2)) .addContainerGap(15, Short.MAX_VALUE)) ); getContentPane().add(jPanel7, java.awt.BorderLayout.PAGE_START); pack(); }// </editor-fold> /** * @param args the command line arguments */ public static void main(String args[]) { /* Set the Nimbus look and feel */ //<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) "> /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel. * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html */ try { for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) { if ("Nimbus".equals(info.getName())) { javax.swing.UIManager.setLookAndFeel(info.getClassName()); break; } } } catch (ClassNotFoundException ex) { java.util.logging.Logger.getLogger(NewJDialog.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (InstantiationException ex) { java.util.logging.Logger.getLogger(NewJDialog.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (IllegalAccessException ex) { java.util.logging.Logger.getLogger(NewJDialog.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (javax.swing.UnsupportedLookAndFeelException ex) { java.util.logging.Logger.getLogger(NewJDialog.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } //</editor-fold> /* Create and display the dialog */ java.awt.EventQueue.invokeLater(new Runnable() { public void run() { NewJDialog dialog = new NewJDialog(new javax.swing.JFrame(), true); dialog.addWindowListener(new java.awt.event.WindowAdapter() { @Override public void windowClosing(java.awt.event.WindowEvent e) { System.exit(0); } }); dialog.setVisible(true); } }); } // Variables declaration - do not modify private javax.swing.JLabel jLabel1; private javax.swing.JLabel jLabel2; private javax.swing.JPanel jPanel1; private javax.swing.JPanel jPanel2; private javax.swing.JPanel jPanel3; private javax.swing.JPanel jPanel4; private javax.swing.JPanel jPanel5; private javax.swing.JPanel jPanel6; private javax.swing.JPanel jPanel7; private javax.swing.JTabbedPane jTabbedPane1; // End of variables declaration } This is another class that I have created for the paint method: package GUI; import java.awt.Graphics; import javax.swing.JPanel; /** * * @author TOSHIBA */ public class Lienzo { private int width = 5; private int height = 5; private int y = 5; private int x = 0; private int x1 = 0; public Graphics Draw(Graphics g, int[] pistas) { //Im not sure if this is the correct way to do it //The diagram gets drawn according to values from an array //The array is not always the same thats why I used the different Panels for (int i = 0; i < pistas.length; i++) { x = pistas[i]; x1 = pistas[i + 1]; g.drawOval(x, y, width, height); g.drawString(Integer.toString(x), x, y); g.drawLine(x, y, x1, y); } return g; } } I hope you guys understand what I am trying to do with my program.

    Read the article

  • Resulting .exe from PyInstaller with wxPython crashing

    - by Helgi Hrafn Gunnarsson
    I'm trying to compile a very simple wxPython script into an executable by using PyInstaller on Windows Vista. The Python script is nothing but a Hello World in wxPython. I'm trying to get that up and running as a Windows executable before I add any of the features that the program needs to have. But I'm already stuck. I've jumped through some loops in regards to MSVCR90.DLL, MSVCP90.DLL and MSVCPM90.DLL, which I ended up copying from my Visual Studio installation (C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT). As according to the instructions for PyInstaller, I run: Command: Configure.py Output: I: computing EXE_dependencies I: Finding TCL/TK... I: could not find TCL/TK I: testing for Zlib... I: ... Zlib available I: Testing for ability to set icons, version resources... I: ... resource update available I: Testing for Unicode support... I: ... Unicode available I: testing for UPX... I: ...UPX available I: computing PYZ dependencies... So far, so good. I continue. Command: Makespec.py -F guitest.py Output: wrote C:\Code\PromoUSB\guitest.spec now run Build.py to build the executable Then there's the final command. Command: Build.py guitest.spec Output: checking Analysis building Analysis because out0.toc non existent running Analysis out0.toc Analyzing: C:\Python26\pyinstaller-1.3\support\_mountzlib.py Analyzing: C:\Python26\pyinstaller-1.3\support\useUnicode.py Analyzing: guitest.py Warnings written to C:\Code\PromoUSB\warnguitest.txt checking PYZ rebuilding out1.toc because out1.pyz is missing building PYZ out1.toc checking PKG rebuilding out3.toc because out3.pkg is missing building PKG out3.pkg checking ELFEXE rebuilding out2.toc because guitest.exe missing building ELFEXE out2.toc I get the resulting 'guitest.exe' file, but upon execution, it "simply crashes"... and there is no debug info. It's just one of those standard Windows Vista crashes. The script itself, guitest.py runs just fine by itself. It only crashes as an executable, and I'm completely lost. I don't even know what to look for, since nothing I've tried has returned any relevant results. Another file is generated as a result of the compilation process, called 'warnguitest.txt'. Here are its contents. W: no module named posix (conditional import by os) W: no module named optik.__all__ (top-level import by optparse) W: no module named readline (delayed, conditional import by cmd) W: no module named readline (delayed import by pdb) W: no module named pwd (delayed, conditional import by posixpath) W: no module named org (top-level import by pickle) W: no module named posix (delayed, conditional import by iu) W: no module named fcntl (conditional import by subprocess) W: no module named org (top-level import by copy) W: no module named _emx_link (conditional import by os) W: no module named optik.__version__ (top-level import by optparse) W: no module named fcntl (top-level import by tempfile) W: __all__ is built strangely at line 0 - collections (C:\Python26\lib\collections.pyc) W: delayed exec statement detected at line 0 - collections (C:\Python26\lib\collections.pyc) W: delayed conditional __import__ hack detected at line 0 - doctest (C:\Python26\lib\doctest.pyc) W: delayed exec statement detected at line 0 - doctest (C:\Python26\lib\doctest.pyc) W: delayed conditional __import__ hack detected at line 0 - doctest (C:\Python26\lib\doctest.pyc) W: delayed __import__ hack detected at line 0 - encodings (C:\Python26\lib\encodings\__init__.pyc) W: __all__ is built strangely at line 0 - optparse (C:\Python26\pyinstaller-1.3\optparse.pyc) W: __all__ is built strangely at line 0 - dis (C:\Python26\lib\dis.pyc) W: delayed eval hack detected at line 0 - os (C:\Python26\lib\os.pyc) W: __all__ is built strangely at line 0 - __future__ (C:\Python26\lib\__future__.pyc) W: delayed conditional __import__ hack detected at line 0 - unittest (C:\Python26\lib\unittest.pyc) W: delayed conditional __import__ hack detected at line 0 - unittest (C:\Python26\lib\unittest.pyc) W: __all__ is built strangely at line 0 - tokenize (C:\Python26\lib\tokenize.pyc) W: __all__ is built strangely at line 0 - wx (C:\Python26\lib\site-packages\wx-2.8-msw-unicode\wx\__init__.pyc) W: __all__ is built strangely at line 0 - wx (C:\Python26\lib\site-packages\wx-2.8-msw-unicode\wx\__init__.pyc) W: delayed exec statement detected at line 0 - bdb (C:\Python26\lib\bdb.pyc) W: delayed eval hack detected at line 0 - bdb (C:\Python26\lib\bdb.pyc) W: delayed eval hack detected at line 0 - bdb (C:\Python26\lib\bdb.pyc) W: delayed __import__ hack detected at line 0 - pickle (C:\Python26\lib\pickle.pyc) W: delayed __import__ hack detected at line 0 - pickle (C:\Python26\lib\pickle.pyc) W: delayed conditional exec statement detected at line 0 - iu (C:\Python26\pyinstaller-1.3\iu.pyc) W: delayed conditional exec statement detected at line 0 - iu (C:\Python26\pyinstaller-1.3\iu.pyc) W: delayed eval hack detected at line 0 - gettext (C:\Python26\lib\gettext.pyc) W: delayed __import__ hack detected at line 0 - optik.option_parser (C:\Python26\pyinstaller-1.3\optik\option_parser.pyc) W: delayed conditional eval hack detected at line 0 - warnings (C:\Python26\lib\warnings.pyc) W: delayed conditional __import__ hack detected at line 0 - warnings (C:\Python26\lib\warnings.pyc) W: __all__ is built strangely at line 0 - optik (C:\Python26\pyinstaller-1.3\optik\__init__.pyc) W: delayed exec statement detected at line 0 - pdb (C:\Python26\lib\pdb.pyc) W: delayed conditional eval hack detected at line 0 - pdb (C:\Python26\lib\pdb.pyc) W: delayed eval hack detected at line 0 - pdb (C:\Python26\lib\pdb.pyc) W: delayed conditional eval hack detected at line 0 - pdb (C:\Python26\lib\pdb.pyc) W: delayed eval hack detected at line 0 - pdb (C:\Python26\lib\pdb.pyc) I don't know what the heck to make of any of that. Again, my searches have been fruitless.

    Read the article

  • SQL SERVER – Weekly Series – Memory Lane – #052

    - by Pinal Dave
    Let us continue with the final episode of the Memory Lane Series. Here is the list of selected articles of SQLAuthority.com across all these years. Instead of just listing all the articles I have selected a few of my most favorite articles and have listed them here with additional notes below it. Let me know which one of the following is your favorite article from memory lane. 2007 Set Server Level FILLFACTOR Using T-SQL Script Specifies a percentage that indicates how full the Database Engine should make the leaf level of each index page during index creation or alteration. fillfactor must be an integer value from 1 to 100. The default is 0. Limitation of Online Index Rebuld Operation Online operation means when online operations are happening in the database are in normal operational condition, the processes which are participating in online operations does not require exclusive access to the database. Get Permissions of My Username / Userlogin on Server / Database A few days ago, I was invited to one of the largest database company. I was asked to review database schema and propose changes to it. There was special username or user logic was created for me, so I can review their database. I was very much interested to know what kind of permissions I was assigned per server level and database level. I did not feel like asking Sr. DBA the question about permissions. Simple Example of WHILE Loop With CONTINUE and BREAK Keywords This question is one of those questions which is very simple and most of the users get it correct, however few users find it confusing for the first time. I have tried to explain the usage of simple WHILE loop in the first example. BREAK keyword will exit the stop the while loop and control is moved to the next statement after the while loop. CONTINUE keyword skips all the statement after its execution and control is sent to the first statement of while loop. Forced Parameterization and Simple Parameterization – T-SQL and SSMS When the PARAMETERIZATION option is set to FORCED, any literal value that appears in a SELECT, INSERT, UPDATE or DELETE statement is converted to a parameter during query compilation. When the PARAMETERIZATION database option is SET to SIMPLE, the SQL Server query optimizer may choose to parameterize the queries. 2008 Transaction and Local Variables – Swap Variables – Update All At Once Concept Summary : Transaction have no effect over memory variables. When UPDATE statement is applied over any table (physical or memory) all the updates are applied at one time together when the statement is committed. First of all I suggest that you read the article listed above about the effect of transaction on local variant. As seen there local variables are independent of any transaction effect. Simulate INNER JOIN using LEFT JOIN statement – Performance Analysis Just a day ago, while I was working with JOINs I find one interesting observation, which has prompted me to create following example. Before we continue further let me make very clear that INNER JOIN should be used where it cannot be used and simulating INNER JOIN using any other JOINs will degrade the performance. If there are scopes to convert any OUTER JOIN to INNER JOIN it should be done with priority. 2009 Introduction to Business Intelligence – Important Terms & Definitions Business intelligence (BI) is a broad category of application programs and technologies for gathering, storing, analyzing, and providing access to data from various data sources, thus providing enterprise users with reliable and timely information and analysis for improved decision making. Difference Between Candidate Keys and Primary Key Candidate Key – A Candidate Key can be any column or a combination of columns that can qualify as unique key in database. There can be multiple Candidate Keys in one table. Each Candidate Key can qualify as Primary Key. Primary Key – A Primary Key is a column or a combination of columns that uniquely identify a record. Only one Candidate Key can be Primary Key. 2010 Taking Multiple Backup of Database in Single Command – Mirrored Database Backup I recently had a very interesting experience. In one of my recent consultancy works, I was told by our client that they are going to take the backup of the database and will also a copy of it at the same time. I expressed that it was surely possible if they were going to use a mirror command. In addition, they told me that whenever they take two copies of the database, the size of the database, is always reduced. Now this was something not clear to me, I said it was not possible and so I asked them to show me the script. Corrupted Backup File and Unsuccessful Restore The CTO, who was also present at the location, got very upset with this situation. He then asked when the last successful restore test was done. As expected, the answer was NEVER.There were no successful restore tests done before. During that time, I was present and I could clearly see the stress, confusion, carelessness and anger around me. I did not appreciate the feeling and I was pretty sure that no one in there wanted the atmosphere like me. 2011 TRACEWRITE – Wait Type – Wait Related to Buffer and Resolution SQL Trace is a SQL Server database engine technology which monitors specific events generated when various actions occur in the database engine. When any event is fired it goes through various stages as well various routes. One of the routes is Trace I/O Provider, which sends data to its final destination either as a file or rowset. DATEDIFF – Accuracy of Various Dateparts If you want to have accuracy in seconds, you need to use a different approach. In the first example, the accurate method is to find the number of seconds first and then divide it by 60 to convert it in minutes. Dedicated Access Control for SQL Server Express Edition http://www.youtube.com/watch?v=1k00z82u4OI Book Signing at SQLPASS 2012 Who I Am And How I Got Here – True Story as Blog Post If there was a shortcut to success – I want to know. I learnt SQL Server hard way and I am still learning. There are so many things, I have to learn. There is not enough time to learn everything which we want to learn. I am constantly working on it every day. I welcome you to join my journey as well. Please join me in my journey to learn SQL Server – more the merrier. Vacation, Travel and Study – A New Concept Even those who have advanced degrees and went to college for years, or even decades, find studying hard.  There is a difference between studying for a career and studying for a certification.  At least to get a degree there is a variety of subjects, with labs, exams, and practice problems to make things more interesting. Order By Numeric Values Formatted as String We have a table which has a column containing alphanumeric data. The data always has first as an integer and later part as a string. The business need is to order the data based on the first part of the alphanumeric data which is an integer. Now the problem is that no matter how we use ORDER BY the result is not produced as expected. Let us understand this with an example. Resolving SQL Server Connection Errors – SQL in Sixty Seconds #030 – Video One of the most famous errors related to SQL Server is about connecting to SQL Server itself. Here is how it goes, most of the time developers have worked with SQL Server and knows pretty much every error which they face during development language. However, hardly they install fresh SQL Server. As the installation of the SQL Server is a rare occasion unless you are a DBA who is responsible for such an instance – the error faced during installations are pretty rare as well. http://www.youtube.com/watch?v=1k00z82u4OI Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Memory Lane, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Oracle's Global Single Schema

    - by david.butler(at)oracle.com
    Maximizing business process efficiencies in a heterogeneous environment is very difficult. The difficulty stems from the fact that the various applications across the Information Technology (IT) landscape employ different integration standards, different message passing strategies, and different workflow engines. Vendors such as Oracle and others are delivering tools to help IT organizations manage the complexities introduced by these differences. But the one remaining intractable problem impacting efficient operations is the fact that these applications have different definitions for the same business data. Business data is your business information codified for computer programs to use. A good data model will represent the way your organization does business. The computer applications your organization deploys to improve operational efficiency are built to operate on the business data organized into this schema.  If the schema does not represent how you do business, the applications on that schema cannot provide the features you need to achieve the desired efficiencies. Business processes span these applications. Data problems break these processes rendering them far less efficient than they need to be to achieve organization goals. Thus, the expected return on the investment in these applications is never realized. The success of all business processes depends on the availability of accurate master data.  Clearly, the solution to this problem is to consolidate all the master data an organization uses to run its business. Then clean it up, augment it, govern it, and connect it back to the applications that need it. Until now, this obvious solution has been difficult to achieve because no one had defined a data model sufficiently broad, deep and flexible enough to support transaction processing on all key business entities and serve as a master superset to all other operational data models deployed in heterogeneous IT environments. Today, the situation has changed. Oracle has created an operational data model (aka schema) that can support accurate and consistent master data across heterogeneous IT systems. This is foundational for providing a way to consolidate and integrate master data without having to replace investments in existing applications. This Global Single Schema (GSS) represents a revolutionary breakthrough that allows for true master data consolidation. Oracle has deep knowledge of applications dating back to the early 1990s.  It developed applications in the areas of Supply Chain Management (SCM), Product Lifecycle Management (PLM), Enterprise Resource Planning (ERP), Customer Relationship Management (CRM), Human Capital Management (HCM), Financials and Manufacturing. In addition, Oracle applications were delivered for key industries such as Communications, Financial Services, Retail, Public Sector, High Tech Manufacturing (HTM) and more. Expertise in all these areas drove requirements for GSS. The following figure illustrates Oracle's unique position that enabled the creation of the Global Single Schema. GSS Requirements Gathering GSS defines all the key business entities and attributes including Customers, Contacts, Suppliers, Accounts, Products, Services, Materials, Employees, Installed Base, Sites, Assets, and Inventory to name just a few. In addition, Oracle delivers GSS pre-integrated with a wide variety of operational applications.  Business Process Automation EBusiness is about maximizing operational efficiency. At the highest level, these 'operations' span all that you do as an organization.  The following figure illustrates some of these high-level business processes. Enterprise Business Processes Supplies are procured. Assets are maintained. Materials are stored. Inventory is accumulated. Products and Services are engineered, produced and sold. Customers are serviced. And across this entire spectrum, Employees do the procuring, supporting, engineering, producing, selling and servicing. Not shown, but not to be overlooked, are the accounting and the financial processes associated with all this procuring, manufacturing, and selling activity. Supporting all these applications is the master data. When this data is fragmented and inconsistent, the business processes fail and inefficiencies multiply. But imagine having all the data under these operational business processes in one place. ·            The same accurate and timely customer data will be provided to all your operational applications from the call center to the point of sale. ·            The same accurate and timely supplier data will be provided to all your operational applications from supply chain planning to procurement. ·            The same accurate and timely product information will be available to all your operational applications from demand chain planning to marketing. You would have a single version of the truth about your assets, financial information, customers, suppliers, employees, products and services to support your business automation processes as they flow across your business applications. All company and partner personnel will access the same exact data entity across all your channels and across all your lines of business. Oracle's Global Single Schema enables this vision of a single version of the truth across the heterogeneous operational applications supporting the entire enterprise. Global Single Schema Oracle's Global Single Schema organizes hundreds of thousands of attributes into 165 major schema objects supporting over 180 business application modules. It is designed for international operations, and extensibility.  The schema is delivered with a full set of public Application Programming Interfaces (APIs) and an Integration Repository with modern Service Oriented Architecture interfaces to make data available as a services (DaaS) to business processes and enable operations in heterogeneous IT environments. ·         Key tables can be extended with unlimited numbers of additional attributes and attribute groups for maximum flexibility.  o    This enables model extensions that reflect business entities unique to your organization's operations. ·         The schema is multi-organization enabled so data manipulation can be controlled along organizational boundaries. ·         It uses variable byte Unicode to support over 31 languages. ·         The schema encodes flexible date and flexible address formats for easy localizations. No matter how complex your business is, Oracle's Global Single Schema can hold your business objects and support your global operations. Oracle's Global Single Schema identifies and defines the business objects an enterprise needs within the context of its business operations. The interrelationships between the business objects are also contained within the GSS data model. Their presence expresses fundamental business rules for the interaction between business entities. The following figure illustrates some of these connections.   Interconnected Business Entities Interconnecte business processes require interconnected business data. No other MDM vendor has this capability. Everyone else has either one entity they can master or separate disconnected models for various business entities. Higher level integrations are made available, but that is a weak architectural alternative to data level integration in this critically important aspect of Master Data Management.    

    Read the article

  • Getting your bearings and defining the project objective

    - by johndoucette
    I wrote this two years ago and thought it was worth posting… Some may think this is a daunting task and some may even say “what a waste of time” and want to open MS Project and start typing out tasks because someone asked for an estimate and a task list. Hell, maybe you even use Excel and pump out a spreadsheet with some real scientific formula for guessing how long it will take to code a bunch of classes. However, this short exercise will provide the basis for the entire project, whether small or large and be a great friend when communicating to anyone on your team or even your client. I call this the Project Brief. If you find yourself going beyond a single page, then you must decompose the sections and summarize your findings so there is a complete and clear picture of the project you are working on in a relatively short statement. Here is a great quote from the PMBOK (Project Management Body of Knowledge) relative to what a project is;   A project is a temporary endeavor undertaken to create a unique product, service or result. With this in mind, the project brief should encompass the entirety (objective) of the endeavor in its explanation and what it will take (goals) to create the product, service or result (deliverables). Normally the process of identifying the project objective is done during the first stage of a project called the Project Kickoff, but you can perform this very important step anytime to help you get a bearing. There are many more parts to helping a project stay on course, but this is usually the foundation where it can be grounded on. Through a series of 3 exercises, you should be able to come up with the objective, goals and deliverables on your project. Follow these steps, and in no time (about &frac12; hour), you will have the foundation of your project plan. (See examples below) Exercise 1 – Objectives Begin with the end in mind. Think about your project in business terms with a couple things to help you understand the objective; Reference the business benefit in terms of cost, speed and / or quality, Provide a higher level of what the outcome will look like (future sense) It should be non-measurable, that’s what the goals are all about The output should be a single paragraph with three sentences and take 10 minutes to write. *Typically, agreement must be reached on the objectives of the project before you would proceed to the next steps of the project. Exercise 2 – Goals A project goal is a statement that answers questions about who, what, why, where and when. A good project goal statement; Answers the five “W” questions for the project Is measurable in each of its parts Is published and agreed on by all the owners This helps the Project Manager receive confirmation on defining the project target. Using the established project objective done in the first exercise, think about the things it will take to get the job done. Think about tangible activities which are the top level tasks in a typical Work Breakdown Structure (WBS). The overall goal statement plus all the deliverables (next exercise) can be seen as the project team’s contract with the project owners. Write 3 - 5 goals in about 10 minutes. You should not write the words “Who, what, why, where and when, but merely be able to answer the questions when you read a goal. Exercise 3 – Deliverables Every project creates some type of output and these outputs are called deliverables. There are two classes of deliverables; Internal – produced for project team members to meet their goals External – produced for project owners to meet their expectations The list you enter here provides a checklist for the team’s delivery and/or is a statement of all the expectations of the project owners. Here are some typical project deliverables; Product and product documentation End product/system Requirements/feature documents Installation guides Demo/prototype System design documents User guides/help files Plans Project plan Training plan Conversion/installation/delivery plan Test plans Documentation plan Communication plan Reports and general documentation Progress reports System acceptance tests Outstanding bug list Procedures Risk and issue logs Project history Deliverables should go with each of the goals. Have 3-5 deliverables for each goal. When you are done, you will have established a great foundation for the clarity of your project. This exercise can take some time, but with practice, you should be able to whip this one out in 10 minutes as well, especially if you are intimate with an ongoing project. Samples  Objective [Client] is implementing a series of MOSS sites to support external public (Internet), internal employee (Intranet) and an external secure (password protected Internet) applications. This project will focus on the public-facing web site and will provide [Client] with architectural recommendations based on the current design being done by their design partner [Partner] and the internal Content Team. In addition, it will provide [Client] with a development plan and confidence they need to deploy a world class public Internet website. Goals 1.  [Consultant] will provide technical guidance and set project team expectations for the implementation of the MOSS Internet site based on provided features/functions within three weeks. 2.  [Consultant] will understand phase 2 secure password-protected Internet site design and provide recommendations.   Deliverables 1.1  Public Internet (unsecure) Architectural Recommendation Plan 1.2  Physical Site construction Work Breakdown Structure and plan (Time, cost and resources needed) 2.1  Two Factor authentication recommendation document   Objective [Client] is currently using an application developed by [Consultant] many years ago called "XXX". This application, although functional, does not meet their new updated business requirements and contains a few defects which [Client] has developed work-around processes. [Client] would like to have a "new and improved" system to support their membership management needs by expanding membership and subscription capabilities, provide accounting integration with internal (GL) and external (VeriSign) systems, and implement hooks to the current CRM solution. This effort will take place through a series of phases, beginning with envisioning. Goals 1. Through discussions with users, [Consultant] will discover current issues/bugs which need to be resolved which must meet the current functionality requirements within three weeks. 2. [Consultant] will gather requirements from the users about what is "needed" vs. "what they have" for enhancements and provide a high level document supporting their needs. 3. [Consultant] will meet with the team members through a series of meetings and help define the overall project plan to deliver a new and improved solution. Deliverables 1.1 Prioritized list of Current application issues/bugs that need to be resolved 1.2 Provide a resolution plan on the issues/bugs identified in the current application 1.3 Risk Assessment Document 2.1 Deliver a Requirements Document showing high-level [Client] needs for the new XXX application. · New feature functionality not in the application today · Existing functionality that will remain in the new functionality 2.2 Reporting Requirements Document 3.1 A Project Plan showing the deliverables and cost for the next (second) phase of this project. 3.2 A Statement of Work for the next (second) phase of this project. 3.3 An Estimate of any work that would need to follow the second phase.

    Read the article

  • iPack -The iOS Application Packager

    - by user13277780
    iOS applications are distributed in .ipa archive files. These files are regular zip files which contain application resources and executable-s. To protect them from unauthorized modifications and to provide identification of their sources, the content of the archives is signed. The signature is included in the application executable of an.ipa archive and protects the executable file itself and the associated resource files. Apple provides native Mac OS tools for signing iOS executable-s (which are actually generic Mach-O code signing tools), but these tools are not generally available on other platforms. To provide a multi-platform development environment for JavaFX based iOS applications, we ported iOS signing and packaging to Java and created a dedicated ipack tool for it. The iPack tool can be used as a last step of creating .ipa package on various operating systems. Prototype has been tested by creating a final distributable for JavaFX application that runs on iPad, all done on Windows 7. Source Code The source code of iPac tool is in OpenJFX project repository. You can find it in: <openjfx root>/rt/tools/ios/Maven/ipack To build the iPack tool use: rt/tools/ios/Maven/ipack$ mvn package After building, you can run the tool: java -jar <path to ipack.jar> <arguments>  Signing keystore The tool uses a java key store to read the signing certificate and the associated private key. To prepare such keystore users can use keytool from JDK. One possible scenario is to import an existing private key and the certificate from a key store used on Mac OS: To list the content of an existing key store and identify the source alias: keytool -list -keystore <src keystore>.p12 -storetype pkcs12 -storepass <src keystore password> To create Java key store and import the private key with its certificate to the keys store: keytool -importkeystore \ -destkeystore <dst keystore> -deststorepass <dst keystore password> \ -srckeystore <src keystore>.p12 -srcstorepass <src keystore password> -srcstoretype pkcs12 \ -srcalias <src alias> -destalias <dst alias> -destkeypass <dst key password> Another scenario would be to generate a private / public key pair directly in a Java key store and create a certificate request from it. After sending the request to Apple one can then import the certificate response back to the Java key store and complete the signing certificate entry. In both scenarios the resulting alias in the Java key store will contain only a single (leaf) certificate. This can be verified with the following command: keytool -list -v -keystore <ipack keystore> -storepass <keystore password> When looking at the Certificate chain length entry, the number next to it is 1. When an executable file is signed on Mac OS, the resulting signature (in CMS format) includes the whole certificate chain up to the Apple Root CA. The ipack tool includes only the chain which is stored under the alias specified on the command line. So to have the whole chain in the signature we need to replace the single certificate entry under the alias with the corresponding full certificate chain. To do that we need first to create the chain in a separate file. It is easy to create such chain when working with certificates in Base-64 encoded PEM format. A certificate chain can be created by concatenating PEM certificates, which should form the chain, into a single file. For iOS signing we need the following certificates in our chain: Apple Root CA Apple Worldwide Developer Relations CA Our signing leaf certificate To convert a certificate from the binary DER format (.der, .cer) to PEM format: keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert -file <certificate>.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert -rfc -file <certificate>.pem To export the signing certificate into PEM format: keytool -exportcert -keystore <ipack keystore> -storepass <keystore password> -alias <signing alias> -rfc -file SigningCert.pem After constructing a chain from AppleIncRootCertificate.pem, AppleWWDRCA.pem andSigningCert.pem, it can be imported back into the keystore with: keytool -importcert -noprompt -keystore <ipack keystore> -storepass <keystore password> -alias <signing alias> -keypass <key password> -file SigningCertChain.pem To summarize, the following example shows the full certificate chain replacement process: keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert1 -file AppleIncRootCertificate.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert1 -rfc -file AppleIncRootCertificate.pem keytool -importcert -noprompt -keystore temp.ks -storepass temppwd -alias tempcert2 -file AppleWWDRCA.cer keytool -exportcert -keystore temp.ks -storepass temppwd -alias tempcert2 -rfc -file AppleWWDRCA.pem keytool -exportcert -keystore ipack.ks -storepass keystorepwd -alias mycert -rfc -file SigningCert.pem cat SigningCert.pem AppleWWDRCA.pem AppleIncRootCertificate.pem >SigningCertChain.pem keytool -importcert -noprompt -keystore ipack.ks -storepass keystorepwd -alias mycert -keypass keypwd -file SigningCertChain.pem keytool -list -v -keystore ipack.ks -storepass keystorepwd Usage When the ipack tool is started with no arguments it prints the following usage information: -appname MyApplication -appid com.myorg.MyApplication     Usage: ipack <archive> <signing opts> <application opts> [ <application opts> ... ] Signing options: -keystore <keystore> keystore to use for signing -storepass <password> keystore password -alias <alias> alias for the signing certificate chain and the associated private key -keypass <password> password for the private key Application options: -basedir <directory> base directory from which to derive relative paths -appdir <directory> directory with the application executable and resources -appname <file> name of the application executable -appid <id> application identifier Example: ipack MyApplication.ipa -keystore ipack.ks -storepass keystorepwd -alias mycert -keypass keypwd -basedir mysources/MyApplication/dist -appdir Payload/MyApplication.app -appname MyApplication -appid com.myorg.MyApplication    

    Read the article

  • Design for complex ATG applications

    - by Glen Borkowski
    Overview Needless to say, some ATG applications are more complex than others.  Some ATG applications support a single site, single language, single catalog, single currency, have a single development staff, single business team, and a relatively simple business model.  The real complex applications have to support multiple sites, multiple languages, multiple catalogs, multiple currencies, a couple different development teams, multiple business teams, and a highly complex business model (and processes to go along with it).  While it's still important to implement a proper design for simple applications, it's absolutely critical to do this for the complex applications.  Why?  It's all about time and money.  If you are unable to manage your complex applications in an efficient manner, the cost of managing it will increase dramatically as will the time to get things done (time to market).  On the positive side, your competition is most likely in the same situation, so you just need to be more efficient than they are. This article is intended to discuss a number of key areas to think about when designing complex applications on ATG.  Some of this can get fairly technical, so it may help to get some background first.  You can get enough of the required background information from this post.  After reading that, come back here and follow along. Application Design Of all the various types of ATG applications out there, the most complex tend to be the ones in the telecommunications industry - especially the ones which operate in multiple countries.  To get started, let's assume that we are talking about an application like that.  One that has these properties: Operates in multiple countries - must support multiple sites, catalogs, languages, and currencies The organization is fairly loosely-coupled - single brand, but different businesses across different countries There is some common functionality across all sites in all countries There is some common functionality across different sites within the same country Sites within a single country may have some unique functionality - relative to other sites in the same country Complex product catalog (mostly in terms of bundles, eligibility, and compatibility) At this point, I'll assume you have read through the required reading and have a decent understanding of how ATG modules work... Code / configuration - assemble into modules When it comes to defining your modules for a complex application, there are a number of goals: Divide functionality between the modules in a way that maps to your business Group common functionality 'further down in the stack of modules' Provide a good balance between shared resources and autonomy for countries / sites Now I'll describe a high level approach to how you could accomplish those goals...  Let's start from the bottom and work our way up.  At the very bottom, you have the modules that ship with ATG - the 'out of the box' stuff.  You want to make sure that you are leveraging all the modules that make sense in order to get the most value from ATG as possible - and less stuff you'll have to write yourself.  On top of the ATG modules, you should create what we'll refer to as the Corporate Foundation Module described as follows: Sits directly on top of ATG modules Used by all applications across all countries and sites - this is the foundation for everyone Contains everything that is common across all countries / all sites Once established and settled, will change less frequently than other 'higher' modules Encapsulates as many enterprise-wide integrations as possible Will provide means of code sharing therefore less development / testing - faster time to market Contains a 'reference' web application (described below) The next layer up could be multiple modules for each country (you could replace this with region if that makes more sense).  We'll define those modules as follows: Sits on top of the corporate foundation module Contains what is unique to all sites in a given country Responsible for managing any resource bundles for this country (to handle multiple languages) Overrides / replaces corporate integration points with any country-specific ones Finally, we will define what should be a fairly 'thin' (in terms of functionality) set of modules for each site as follows: Sits on top of the country it resides in module Contains what is unique for a given site within a given country Will mostly contain configuration, but could also define some unique functionality as well Contains one or more web applications The graphic below should help to indicate how these modules fit together: Web applications As described in the previous section, there are many opportunities for sharing (minimizing costs) as it relates to the code and configuration aspects of ATG modules.  Web applications are also contained within ATG modules, however, sharing web applications can be a bit more difficult because this is what the end customer actually sees, and since each site may have some degree of unique look & feel, sharing becomes more challenging.  One approach that can help is to define a 'reference' web application at the corporate foundation layer to act as a solid starting point for each site.  Here's a description of the 'reference' web application: Contains minimal / sample reference styling as this will mostly be addressed at the site level web app Focus on functionality - ensure that core functionality is revealed via this web application Each individual site can use this as a starting point There may be multiple types of web apps (i.e. B2C, B2B, etc) There are some techniques to share web application assets - i.e. multiple web applications, defined in the web.xml, and it's worth investigating, but is out of scope here. Reference infrastructure In this complex environment, it is assumed that there is not a single infrastructure for all countries and all sites.  It's more likely that different countries (or regions) could have their own solution for infrastructure.  In this case, it will be advantageous to define a reference infrastructure which contains all the hardware and software that make up the core environment.  Specifications and diagrams should be created to outline what this reference infrastructure looks like, as well as it's baseline cost and the incremental cost to scale up with volume.  Having some consistency in terms of infrastructure will save time and money as new countries / sites come online.  Here are some properties of the reference infrastructure: Standardized approach to setup of hardware Type and number of servers Defines application server, operating system, database, etc... - including vendor and specific versions Consistent naming conventions Provides a consistent base of terminology and understanding across environments Defines which ATG services run on which servers Production Staging BCC / Preview Each site can change as required to meet scale requirements Governance / organization It should be no surprise that the complex application we're talking about is backed by an equally complex organization.  One of the more challenging aspects of efficiently managing a series of complex applications is to ensure the proper level of governance and organization.  Here are some ideas and goals to work towards: Establish a committee to make enterprise-wide decisions that affect all sites Representation should be evenly distributed Should have a clear communication procedure Focus on high level business goals Evaluation of feature / function gaps and how that relates to ATG release schedule / roadmap Determine when to upgrade & ensure value will be realized Determine how to manage various levels of modules Who is responsible for maintaining corporate / country / site layers Determine a procedure for controlling what goes in the corporate foundation module Standardize on source code control, database, hardware, OS versions, J2EE app servers, development procedures, etc only use tested / proven versions - this is something that should be centralized so that every country / site does not have to worry about compatibility between versions Create a innovation team Quickly develop new features, perform proof of concepts All teams can benefit from their findings Summary At this point, it should be clear why the topics above (design, governance, organization, etc) are critical to being able to efficiently manage a complex application.  To summarize, it's all about competitive advantage...  You will need to reduce costs and improve time to market with the goal of providing a better experience for your end customers.  You can reduce cost by reducing development time, time allocated to testing (don't have to test the corporate foundation module over and over again - do it once), and optimizing operations.  With an efficient design, you can improve your time to market and your business will be more flexible  and agile.  Over time, you'll find that you're becoming more focused on offering functionality that is new to the market (creativity) and this will be rewarded - you're now a leader. In addition to the above, you'll realize soft benefits as well.  Your staff will be operating in a culture based on sharing.  You'll want to reward efforts to improve and enhance the foundation as this will benefit everyone.  This culture will inspire innovation, which can only lend itself to your competitive advantage.

    Read the article

  • Matlab Image watermarking question , using both SVD and DWT

    - by Georgek
    Hello all . here is a code that i got over the net ,and it is supposed to embed a watermark of size(50*20) called _copyright.bmp in the Code below . the size of the cover object is (512*512), it is called _lena_std_bw.bmp.What we did here is we did DWT2 2 times for the image , when we reached our second dwt2 cA2 size is 128*128. You should notice that the blocksize and it equals 4, it is used to determine the max msg size based on cA2 according to the following code:max_message=RcA2*CcA2/(blocksize^2). in our current case max_message would equal 128*128/(4^2)=1024. i want to embed a bigger watermark in the 2nd dwt2 and lets say the size of that watermark is 400*10(i can change the dimension using MS PAINT), what i have to do is change the size of the blocksize to 2. so max_message=4096.Matlab gives me 3 errors and they are : ??? Error using == plus Matrix dimensions must agree. Error in == idwt2 at 93 x = upsconv2(a,{Lo_R,Lo_R},sx,dwtEXTM,shift)+ ... % Approximation. Error in == two_dwt_svd_low_low at 88 CAA1 = idwt2(cA22,cH2,cV2,cD2,'haar',[RcA1,CcA1]); The origional Code is (the origional code where blocksize =4): %This algorithm makes DWT for the whole image and after that make DWT for %cH1 and make SVD for cH2 and embed the watermark in every level after SVD %(1) -------------- Embed Watermark ------------------------------------ %Add the watermar W to original image I and give the watermarked image in J %-------------------------------------------------------------------------- % set the gain factor for embeding and threshold for evaluation clc; clear all; close all; % save start time start_time=cputime; % set the value of threshold and alpha thresh=.5; alpha =0.01; % read in the cover object file_name='_lena_std_bw.bmp'; cover_object=double(imread(file_name)); % determine size of watermarked image Mc=size(cover_object,1); %Height Nc=size(cover_object,2); %Width % read in the message image and reshape it into a vector file_name='_copyright.bmp'; message=double(imread(file_name)); T=message; Mm=size(message,1); %Height Nm=size(message,2); %Width % perform 1-level DWT for the whole cover image [cA1,cH1,cV1,cD1] = dwt2(cover_object,'haar'); % determine the size of cA1 [RcA1 CcA1]=size(cA1) % perform 2-level DWT for cA1 [cA2,cH2,cV2,cD2] = dwt2(cA1,'haar'); % determine the size of cA2 [RcA2 CcA2]=size(cA2) % set the value of blocksize blocksize=4 % reshape the watermark to a vector message_vector=round(reshape(message,Mm*Nm,1)./256); W=message_vector; % determine maximum message size based on cA2, and blocksize max_message=RcA2*CcA2/(blocksize^2) % check that the message isn't too large for cover if (length(message) max_message) error('Message too large to fit in Cover Object') end %----------------------- process the image in blocks ---------------------- x=1; y=1; for (kk = 1:length(message_vector)) [cA2u cA2s cA2v]=svd(cA2(y:y+blocksize-1,x:x+blocksize-1)); % if message bit contains zero, modify S of the original image if (message_vector(kk) == 0) cA2s = cA2s*(1 + alpha); % otherwise mask is filled with zeros else cA2s=cA2s; end cA22(y:y+blocksize-1,x:x+blocksize-1)=cA2u*cA2s*cA2v; % move to next block of mask along x; If at end of row, move to next row if (x+blocksize) >= CcA2 x=1; y=y+blocksize; else x=x+blocksize; end end % perform IDWT CAA1 = idwt2(cA22,cH2,cV2,cD2,'haar',[RcA1,CcA1]); watermarked_image= idwt2(CAA1,cH1,cV1,cD1,'haar',[Mc,Nc]); % convert back to uint8 watermarked_image_uint8=uint8(watermarked_image); % write watermarked Image to file imwrite(watermarked_image_uint8,'dwt_watermarked.bmp','bmp'); % display watermarked image figure(1) imshow(watermarked_image_uint8,[]) title('Watermarked Image') %(2) ---------------------------------------------------------------------- %---------- Extract Watermark from attacked watermarked image ------------- %-------------------------------------------------------------------------- % read in the watermarked object file_name='dwt_watermarked.bmp'; watermarked_image=double(imread(file_name)); % determine size of watermarked image Mw=size(watermarked_image,1); %Height Nw=size(watermarked_image,2); %Width % perform 1-level DWT for the whole watermarked image [ca1,ch1,cv1,cd1] = dwt2(watermarked_image,'haar'); % determine the size of ca1 [Rca1 Cca1]=size(ca1); % perform 2-level DWT for ca1 [ca2,ch2,cv2,cd2] = dwt2(ca1,'haar'); % determine the size of ca2 [Rca2 Cca2]=size(ca2); % process the image in blocks % for each block get a bit for message x=1; y=1; for (kk = 1:length(message_vector)) % sets correlation to 1 when patterns are identical to avoid /0 errors % otherwise calcluate difference between the cover image and the % watermarked image [cA2u cA2s cA2v]=svd(cA2(y:y+blocksize-1,x:x+blocksize-1)); [ca2u1 ca2s1 ca2v1]=svd(ca2(y:y+blocksize-1,x:x+blocksize-1)); correlation(kk)=diag(ca2s1-cA2s)'*diag(ca2s1-cA2s)/(alpha*alpha)/(diag(cA2s)*diag(cA2s)); % move on to next block. At and of row move to next row if (x+blocksize) >= Cca2 x=1; y=y+blocksize; else x=x+blocksize; end end % if correlation exceeds average correlation correlation(kk)=correlation(kk)+mean(correlation(1:Mm*Nm)); for kk = 1:length(correlation) if (correlation(kk) > thresh*alpha);%thresh*mean(correlation(1:Mo*No))) message_vector(kk)=0; end end % reshape the message vector and display recovered watermark. figure(2) message=reshape(message_vector(1:Mm*Nm),Mm,Nm); imshow(message,[]) title('Recovered Watermark') % display processing time elapsed_time=cputime-start_time, please do help,its my graduation project and i have been trying this code for along time but failed miserable. Thanks in advance

    Read the article

  • Simple Project Templates

    - by Geertjan
    The NetBeans sources include a module named "simple.project.templates": In the module sources, Tim Boudreau turns out to be the author of the code, so I asked him what it was all about, and if he could provide some usage code. His response, from approximately this time last year because it's been sitting in my inbox for a while, is below. Sure - though I think the javadoc in it is fairly complete.  I wrote it because I needed to create a bunch of project templates for Javacard, and all of the ways that is usually done were grotesque and complicated.  I figured we already have the ability to create files from templates, and we already have the ability to do substitutions in templates, so why not have a single file that defines the project as a list of file templates to create (with substitutions in the names) and some definitions of what should be in project properties. You can also add files to the project programmatically if you want.Basically, a template for an entire project is a .properties file.  Any line which doesn't have the prefix 'pp.' or 'pvp.' is treated as the definition of one file which should be created in the new project.  Any such line where the key ends in * means that file should be opened once the new project is created.  So, for example, in the nodejs module, the definition looks like: {{projectName}}.js*=Templates/javascript/HelloWorld.js .npmignore=node_hidden_templates/npmignore So, the first line means:  - Create a file with the same name as the project, using the HelloWorld template    - I.e. the left side of the line is the relative path of the file to create, and the right side is the path in the system filesystem for the template to use       - If the template is not one you normally want users to see, just register it in the system filesystem somewhere other than Templates/ (but remember to set the attribute that marks it as a template)  - Include that file in the set of files which should be opened in the editor once the new project is created. To actually create a project, first you just create a new ProjectCreator: ProjectCreator gen = new ProjectCreator( parentFolderOfNewProject ); Now, if you want to programmatically generate any files, in addition to those defined in the template, you can: gen.add (new FileCreator("nbproject", "project.xml", false) {     public DataObject create (FileObject project, Map<String,String> substitutions) throws IOException {          ...     } }); Then pass the FileObject for the project template (the properties file) to the ProjectCreator's createProject method (hmm, maybe it should be the string path to the project template instead, to save the caller trouble looking up the FileObject for the template).  That method looks like this: public final GeneratedProject createProject(final ProgressHandle handle, final String name, final FileObject template, final Map<String, String> substitutions) throws IOException { The name parameter should be the directory name for the new project;  the map is the strings you gathered in the wizard which should be used for substitutions.  createProject should be called on a background thread (i.e. use a ProgressInstantiatingIterator for the wizard iterator and just pass in the ProgressHandle you are given). The return value is a GeneratedProject object, which is just a holder for the created project directory and the set of DataObjects which should be opened when the wizard finishes. I'd love to see simple.project.templates moved out of the javacard cluster, as it is really useful and much simpler than any of the stuff currently done for generating projects.  It would also be possible to do much richer tools for creating projects in apisupport - i.e. choose (or create in the wizard) the templates you want to use, generate a skeleton wizard with a UI for all the properties you'd like to substitute, etc. Here is a partial project template from Javacard - for example usage, see org.netbeans.modules.javacard.wizard.ProjectWizardIterator in javacard.project (or the much simpler one in contrib/nodejs). #This properties file describes what to create when a project template is#instantiated.  The keys are paths on disk relative to the project root. #The values are paths to the templates to use for those files in the system#filesystem.  Any string inside {{ and }}'s will be substituted using properties#gathered in the template wizard.#Special key prefixes are #  pp. - indicates an entry for nbproject/project.properties#  pvp. - indicates an entry for nbproject/private/private.properties #File templates, in format [path-in-project=path-to-template]META-INF/javacard.xml=org-netbeans-modules-javacard/templates/javacard.xmlMETA-INF/MANIFEST.MF=org-netbeans-modules-javacard/templates/EAP_MANIFEST.MF APPLET-INF/applet.xml=org-netbeans-modules-javacard/templates/applet.xmlscripts/{{classnamelowercase}}.scr=org-netbeans-modules-javacard/templates/test.scrsrc/{{packagepath}}/{{classname}}.java*=Templates/javacard/ExtendedApplet.java nbproject/deployment.xml=org-netbeans-modules-javacard/templates/deployment.xml#project.properties contentspp.display.name={{projectname}}pp.platform.active={{activeplatform}} pp.active.device={{activedevice}}pp.includes=**pp.excludes= I will be using the above info in an upcoming blog entry and provide step by step instructions showing how to use them. However, anyone else out there should have enough info from the above to get started yourself!

    Read the article

  • Retrieving a list of eBay categories using the .NET SDK and GetCategoriesCall

    - by Bill Osuch
    eBay offers a .Net SDK for its Trading API - this post will show you the basics of making an API call and retrieving a list of current categories. You'll need the category ID(s) for any apps that post or search eBay. To start, download the latest SDK from https://www.x.com/developers/ebay/documentation-tools/sdks/dotnet and create a new console app project. Add a reference to the eBay.Service DLL, and a few using statements: using eBay.Service.Call; using eBay.Service.Core.Sdk; using eBay.Service.Core.Soap; I'm assuming at this point you've already joined the eBay Developer Network and gotten your app IDs and user tokens. If not: Join the developer program Generate tokens Next, add an app.config file that looks like this: <?xml version="1.0"?> <configuration>   <appSettings>     <add key="Environment.ApiServerUrl" value="https://api.ebay.com/wsapi"/>     <add key="UserAccount.ApiToken" value="YourBigLongToken"/>   </appSettings> </configuration> And then add the code to get the xml list of categories: ApiContext apiContext = GetApiContext(); GetCategoriesCall apiCall = new GetCategoriesCall(apiContext); apiCall.CategorySiteID = "0"; //Leave this commented out to retrieve all category levels (all the way down): //apiCall.LevelLimit = 4; //Uncomment this to begin at a specific parent category: //StringCollection parentCategories = new StringCollection(); //parentCategories.Add("63"); //apiCall.CategoryParent = parentCategories; apiCall.DetailLevelList.Add(DetailLevelCodeType.ReturnAll); CategoryTypeCollection cats = apiCall.GetCategories(); using (StreamWriter outfile = new StreamWriter(@"C:\Temp\EbayCategories.xml")) {    outfile.Write(apiCall.SoapResponse); } GetApiContext() (provided in the sample apps in the SDK) is required for any call:         static ApiContext GetApiContext()         {             //apiContext is a singleton,             //to avoid duplicate configuration reading             if (apiContext != null)             {                 return apiContext;             }             else             {                 apiContext = new ApiContext();                 //set Api Server Url                 apiContext.SoapApiServerUrl = ConfigurationManager.AppSettings["Environment.ApiServerUrl"];                 //set Api Token to access eBay Api Server                 ApiCredential apiCredential = new ApiCredential();                 apiCredential.eBayToken = ConfigurationManager.AppSettings["UserAccount.ApiToken"];                 apiContext.ApiCredential = apiCredential;                 //set eBay Site target to US                 apiContext.Site = SiteCodeType.US;                 return apiContext;             }         } Running this will give you a large (4 or 5 megs) XML file that looks something like this: <soapenv:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">    <soapenv:Body>       <GetCategoriesResponse >          <Timestamp>2012-06-06T16:03:46.158Z</Timestamp>          <Ack>Success</Ack>          <CorrelationID>d02dd9e3-295a-4268-9ea5-554eeb2e0e18</CorrelationID>          <Version>775</Version>          <Build>E775_CORE_BUNDLED_14891042_R1</Build> -          <CategoryArray>             <Category>                <BestOfferEnabled>true</BestOfferEnabled>                <AutoPayEnabled>true</AutoPayEnabled>                <CategoryID>20081</CategoryID>                <CategoryLevel>1</CategoryLevel>                <CategoryName>Antiques</CategoryName>                <CategoryParentID>20081</CategoryParentID>             </Category>             <Category>                <BestOfferEnabled>true</BestOfferEnabled>                <AutoPayEnabled>true</AutoPayEnabled>                <CategoryID>37903</CategoryID>                <CategoryLevel>2</CategoryLevel>                <CategoryName>Antiquities</CategoryName>                <CategoryParentID>20081</CategoryParentID>             </Category> (etc.) You could work with this, but I wanted a nicely nested view, like this: <CategoryArray>    <Category Name='Antiques' ID='20081' Level='1'>       <Category Name='Antiquities' ID='37903' Level='2'/> </CategoryArray> ...so I transformed the xml: private void TransformXML(CategoryTypeCollection cats)         {             XmlElement topLevelElement = null;             XmlElement childLevelElement = null;             XmlNode parentNode = null;             string categoryString = "";             XmlDocument returnDoc = new XmlDocument();             XmlElement root = returnDoc.CreateElement("CategoryArray");             returnDoc.AppendChild(root);             XmlNode rootNode = returnDoc.SelectSingleNode("/CategoryArray");             //Loop through CategoryTypeCollection             foreach (CategoryType category in cats)             {                 if (category.CategoryLevel == 1)                 {                     //Top-level category, so we know we can just add it                     topLevelElement = returnDoc.CreateElement("Category");                     topLevelElement.SetAttribute("Name", category.CategoryName);                     topLevelElement.SetAttribute("ID", category.CategoryID);                     rootNode.AppendChild(topLevelElement);                 }                 else                 {                     // Level number will determine how many Category nodes we are deep                     categoryString = "";                     for (int x = 1; x < category.CategoryLevel; x++)                     {                         categoryString += "/Category";                     }                     parentNode = returnDoc.SelectSingleNode("/CategoryArray" + categoryString + "[@ID='" + category.CategoryParentID[0] + "']");                     childLevelElement = returnDoc.CreateElement("Category");                     childLevelElement.SetAttribute("Name", category.CategoryName);                     childLevelElement.SetAttribute("ID", category.CategoryID);                     parentNode.AppendChild(childLevelElement);                 }             }             returnDoc.Save(@"C:\Temp\EbayCategories-Modified.xml");         } Yes, there are probably much cleaner ways of dealing with it, but I'm not an xml expert… Keep in mind, eBay categories do not change on a regular basis, so you should be able to cache this data (either in a file or database) for some time. The xml returns a CategoryVersion node that you can use to determine if the category list has changed. Technorati Tags: Csharp, eBay

    Read the article

  • no Jquery only Javascrip fadein function with cleartype fot text

    - by Chetan
    Sorry guys.. I am not familiar with JavaScrip anymore but I need to add clear type of some thing which can make text anti-aliased in IE7 browser. My script as follow // JavaScript Document var CurrentDivIndex=0; var TimeOutValue; var btn; var TimeToFade = 1000.0; function ShowDivSlideShow() { try { if(CurrentDivIndex == 5) CurrentDivIndex=0; CurrentDivIndex++; //alert("Banner" + CurrentDivIndex); //alert(CurrentDivIndex); var Indexer=1; while(Indexer<6) { var DivToShow=document.getElementById("Banner" + Indexer); DivToShow.style.display = "none"; btn=document.getElementById("btnb" + Indexer); btn.setAttribute("class","none"); Indexer++; } var DivToShow=document.getElementById("Banner" + CurrentDivIndex); DivToShow.style.display = "block"; btn=document.getElementById("btnb" + CurrentDivIndex); btn.setAttribute("class","activeSlide"); // btn.className="activeSlide"; fadeIn(); TimeOutValue=setTimeout("ShowDivSlideShow()",6000); } catch(err) { alert(err) } } function ShowCustomDiv(CurrentDivIndexRec) { clearTimeout(TimeOutValue) CurrentDivIndex=CurrentDivIndexRec var Indexer=1; while(Indexer<6) { if(CurrentDivIndex==Indexer) { Indexer++; continue; } var DivToShow=document.getElementById("Banner" + Indexer); DivToShow.style.display = "none"; btn=document.getElementById("btnb" + Indexer); btn.setAttribute("class","none"); Indexer++; } var DivToShow=document.getElementById("Banner" + CurrentDivIndex); DivToShow.style.display = "block"; btn=document.getElementById("btnb" + CurrentDivIndex); btn.setAttribute("class","activeSlide"); btn.className="activeSlide" fadeIn(); } function ShowDivSlideShowWithTimeOut(CurrentDivIndexRec) { clearTimeout(TimeOutValue) CurrentDivIndex=CurrentDivIndexRec; var Indexer=1; while(Indexer<6) { if(CurrentDivIndex==Indexer) { Indexer++; continue; } var DivToShow=document.getElementById("Banner" + Indexer); DivToShow.style.display = "none"; btn=document.getElementById("btnb" + Indexer); btn.setAttribute("class","none"); Indexer++; } var DivToShow=document.getElementById("Banner" + CurrentDivIndexRec); DivToShow.style.display = "block"; btn=document.getElementById("btnb" + CurrentDivIndexRec); btn.setAttribute("class","activeSlide"); TimeOutValue=setTimeout("ShowDivSlideShow()",6000); } function ShowCustomDivOnClick(CurrentDivIndexRec) { clearTimeout(TimeOutValue) CurrentDivIndex=CurrentDivIndexRec; var Indexer=1; while(Indexer<6) { if(CurrentDivIndex==Indexer) { Indexer++; continue; } var DivToShow=document.getElementById("Banner" + Indexer); DivToShow.style.display = "none"; btn=document.getElementById("btnb" + Indexer); btn.setAttribute("class","none"); Indexer++; } var DivToShow=document.getElementById("Banner" + CurrentDivIndexRec); DivToShow.style.display = "block"; btn=document.getElementById("btnb" + CurrentDivIndexRec); btn.setAttribute("class","activeSlide"); fadeIn(); TimeOutValue=setTimeout("ShowDivSlideShow()",6000); } function setOpacity(level) { element=document.getElementById("Banner" + CurrentDivIndex); element.style.opacity = level; element.style.MozOpacity = level; element.style.KhtmlOpacity = level; element.style.filter = "alpha(opacity=" + (level * 100) + ");"; } var duration = 300; /* 1000 millisecond fade = 1 sec */ var steps = 10; /* number of opacity intervals */ var delay = 6000; /* 5 sec delay before fading out */ function fadeIn(){ for (i = 0; i <= 1; i += (1 / steps)) { setTimeout("setOpacity(" + i + ")", i * duration); } // setTimeout("fadeOut()", delay); } function fadeOut() { for (i = 0; i <= 1; i += (1 / steps)) { setTimeout("setOpacity(" + (1 - i) + ")", i * duration); } setTimeout("fadeIn()", duration); } //end of script Now I am very confused where to add : $('#slideshow').cycle({ cleartype: 1 // enable cleartype corrections }); or $('#fadingElement').fadeIn(2000, function(){ $(this).css('filter',''); }); so it will work... Please Help me...

    Read the article

  • Add Widget via Action in Toolbar

    - by Geertjan
    The question of the day comes from Vadim, who asks on the NetBeans Platform mailing list: "Looking for example showing how to add Widget to Scene, e.g. by toolbar button click." Well, the solution is very similar to this blog entry, where you see a solution provided by Jesse Glick for VisiTrend in Boston: https://blogs.oracle.com/geertjan/entry/zoom_capability Other relevant articles to read are as follows: http://netbeans.dzone.com/news/which-netbeans-platform-action http://netbeans.dzone.com/how-to-make-context-sensitive-actions Let's go through it step by step, with this result in the end, a solution involving 4 classes split (optionally, since a central feature of the NetBeans Platform is modularity) across multiple modules: The Customer object has a "name" String and the Droppable capability has a method "doDrop" which takes a Customer object: public interface Droppable {    void doDrop(Customer c);} In the TopComponent, we use "TopComponent.associateLookup" to publish an instance of "Droppable", which creates a new LabelWidget and adds it to the Scene in the TopComponent. Here's the TopComponent constructor: public CustomerCanvasTopComponent() {    initComponents();    setName(Bundle.CTL_CustomerCanvasTopComponent());    setToolTipText(Bundle.HINT_CustomerCanvasTopComponent());    final Scene scene = new Scene();    final LayerWidget layerWidget = new LayerWidget(scene);    Droppable d = new Droppable(){        @Override        public void doDrop(Customer c) {            LabelWidget customerWidget = new LabelWidget(scene, c.getTitle());            customerWidget.getActions().addAction(ActionFactory.createMoveAction());            layerWidget.addChild(customerWidget);            scene.validate();        }    };    scene.addChild(layerWidget);    jScrollPane1.setViewportView(scene.createView());    associateLookup(Lookups.singleton(d));} The Action is displayed in the toolbar and is enabled only if a Droppable is currently in the Lookup: @ActionID(        category = "Tools",        id = "org.customer.controler.AddCustomerAction")@ActionRegistration(        iconBase = "org/customer/controler/icon.png",        displayName = "#AddCustomerAction")@ActionReferences({    @ActionReference(path = "Toolbars/File", position = 300)})@NbBundle.Messages("AddCustomerAction=Add Customer")public final class AddCustomerAction implements ActionListener {    private final Droppable context;    public AddCustomerAction(Droppable droppable) {        this.context = droppable;    }    @Override    public void actionPerformed(ActionEvent ev) {        NotifyDescriptor.InputLine inputLine = new NotifyDescriptor.InputLine("Name:", "Data Entry");        Object result = DialogDisplayer.getDefault().notify(inputLine);        if (result == NotifyDescriptor.OK_OPTION) {            Customer customer = new Customer(inputLine.getInputText());            context.doDrop(customer);        }    }} Therefore, when the Properties window, for example, is selected, the Action will be disabled. (See the Zoomable example referred to in the link above for another example of this.) As you can see above, when the Action is invoked, a Droppable must be available (otherwise the Action would not have been enabled). The Droppable is obtained in the Action and a new Customer object is passed to its "doDrop" method. The above in pictures, take note of the enablement of the toolbar button with the red dot, on the extreme left of the toolbar in the screenshots below: The above shows the JButton is only enabled if the relevant TopComponent is active and, when the Action is invoked, the user can enter a name, after which a new LabelWidget is created in the Scene. The source code of the above is here: http://java.net/projects/nb-api-samples/sources/api-samples/show/versions/7.3/misc/WidgetCreationFromAction Note: Showing this as an MVC example is slightly misleading because, depending on which model object ("Customer" and "Droppable") you're looking at, the V and the C are different. From the point of view of "Customer", the TopComponent is the View, while the Action is the Controler, since it determines when the M is displayed. However, from the point of view of "Droppable", the TopComponent is the Controler, since it determines when the Action, i.e., which is in this case the View, displays the presence of the M.

    Read the article

  • Breaking 1NF to model subset constraints. Does this sound sane?

    - by Chris Travers
    My first question here. Appologize if it is in the wrong forum but this seems pretty conceptual. I am looking at doing something that goes against conventional wisdom and want to get some feedback as to whether this is totally insane or will result in problems, so critique away! I am on PostgreSQL 9.1 but may be moving to 9.2 for this part of this project. To re-iterate: Does it seem sane to break 1NF in this way? I am not looking for debugging code so much as where people see problems that this might lead. The Problem In double entry accounting, financial transactions are journal entries with an arbitrary number of lines. Each line has either a left value (debit) or a right value (credit) which can be modelled as a single value with negatives as debits and positives as credits or vice versa. The sum of all debits and credits must equal zero (so if we go with a single amount field, sum(amount) must equal zero for each financial journal entry). SQL-based databases, pretty much required for this sort of work, have no way to express this sort of constraint natively and so any approach to enforcing it in the database seems rather complex. The Write Model The journal entries are append only. There is a possibility we will add a delete model but it will be subject to a different set of restrictions and so is not applicable here. If and when we allow deletes, we will probably do them using a simple ON DELETE CASCADE designation on the foreign key, and require that deletes go through a dedicated stored procedure which can enforce the other constraints. So inserts and selects have to be accommodated but updates and deletes do not for this task. My Proposed Solution My proposed solution is to break first normal form and model constraints on arrays of tuples, with a trigger that breaks the rows out into another table. CREATE TABLE journal_line ( entry_id bigserial primary key, account_id int not null references account(id), journal_entry_id bigint not null, -- adding references later amount numeric not null ); I would then add "table methods" to extract debits and credits for reporting purposes: CREATE OR REPLACE FUNCTION debits(journal_line) RETURNS numeric LANGUAGE sql IMMUTABLE AS $$ SELECT CASE WHEN $1.amount < 0 THEN $1.amount * -1 ELSE NULL END; $$; CREATE OR REPLACE FUNCTION credits(journal_line) RETURNS numeric LANGUAGE sql IMMUTABLE AS $$ SELECT CASE WHEN $1.amount > 0 THEN $1.amount ELSE NULL END; $$; Then the journal entry table (simplified for this example): CREATE TABLE journal_entry ( entry_id bigserial primary key, -- no natural keys :-( journal_id int not null references journal(id), date_posted date not null, reference text not null, description text not null, journal_lines journal_line[] not null ); Then a table method and and check constraints: CREATE OR REPLACE FUNCTION running_total(journal_entry) returns numeric language sql immutable as $$ SELECT sum(amount) FROM unnest($1.journal_lines); $$; ALTER TABLE journal_entry ADD CONSTRAINT CHECK (((journal_entry.running_total) = 0)); ALTER TABLE journal_line ADD FOREIGN KEY journal_entry_id REFERENCES journal_entry(entry_id); And finally we'd have a breakout trigger: CREATE OR REPLACE FUNCTION je_breakout() RETURNS TRIGGER LANGUAGE PLPGSQL AS $$ BEGIN IF TG_OP = 'INSERT' THEN INSERT INTO journal_line (journal_entry_id, account_id, amount) SELECT NEW.id, account_id, amount FROM unnest(NEW.journal_lines); RETURN NEW; ELSE RAISE EXCEPTION 'Operation Not Allowed'; END IF; END; $$; And finally CREATE TRIGGER AFTER INSERT OR UPDATE OR DELETE ON journal_entry FOR EACH ROW EXECUTE_PROCEDURE je_breaout(); Of course the example above is simplified. There will be a status table that will track approval status allowing for separation of duties, etc. However the goal here is to prevent unbalanced transactions. Any feedback? Does this sound entirely insane? Standard Solutions? In getting to this point I have to say I have looked at four different current ERP solutions to this problems: Represent every line item as a debit and a credit against different accounts. Use of foreign keys against the line item table to enforce an eventual running total of 0 Use of constraint triggers in PostgreSQL Forcing all validation here solely through the app logic. My concerns are that #1 is pretty limiting and very hard to audit internally. It's not programmer transparent and so it strikes me as being difficult to work with in the future. The second strikes me as being very complex and required a series of contraints and foreign keys against self to make work, and therefore it strikes me as complex, hard to sort out at least in my mind, and thus hard to work with. The fourth could be done as we force all access through stored procedures anyway and this is the most common solution (have the app total things up and throw an error otherwise). However, I think proof that a constraint is followed is superior to test cases, and so the question becomes whether this in fact generates insert anomilies rather than solving them. If this is a solved problem it isn't the case that everyone agrees on the solution....

    Read the article

  • Nashorn ?? JDBC ? Oracle DB ?????·?? 3

    - by Homma
    ???? Nashorn ?? JavaScript ??????? JDBC ? Oracle DB ???????????????????? Oracle DB ????? SQL ??????????????? ???????????????????????????????? ????????? URL ? https://blogs.oracle.com/nashorn_ja/entry/nashorn_jdbc_3 ??? JDBC ??????????????? JDBC ????????????????? Nashorn ????? JavaScript ????????????? ???????? JDBC OCI ???????????????????????????????? ????? ?? Java ??????????????? Nashorn ? JavaScript ???????????????? // Invoke jjs with -scripting option. /* * This sample can be used to check the JDBC installation. * Just run it and provide the connect information. It will select * "Hello World" from the database. */ var OracleDataSource = Java.type("oracle.jdbc.pool.OracleDataSource"); function main() { // Prompt the user for connect information print("Please enter information to test connection to the database"); var user, password, database; user = readLine("user: "); slash_index = user.indexOf('/'); if (slash_index != -1) { password = user.substring(slash_index + 1) user = user.substring(0, slash_index); } else password = readLine("password: "); database = readLine("database(a TNSNAME entry): "); java.lang.System.out.print("Connecting to the database..."); java.lang.System.out.flush(); print("Connecting..."); // Open an OracleDataSource and get a connection var ods = new OracleDataSource(); ods.setURL("jdbc:oracle:oci:@" + database); ods.setUser(user); ods.setPassword(password); var conn = ods.getConnection(); print("connected."); // Create a statement var stmt = conn.createStatement(); // Do the SQL "Hello World" thing var rset = stmt.executeQuery("select 'Hello World' from dual"); while (rset.next()) print(rset.getString(1)); // close the result set, the statement and the connection rset.close(); stmt.close(); conn.close(); print("Your JDBC installation is correct."); } main(); oracle.jdbc.pool.OracleDataSource ? Java.type() ?????Nashorn ??????????????????????????? Java ? System.out.println() ? System.out.flush() ? java.lang. ???????????????? Java ?????????? readEntry() ????? Nashorn ? readLine() ???????????? Java ????????????????????????JavaScript ?????????????????? ?? Java ??????????????????????????? Java ???????????????? JavaScript ?????????????????? ???????? JDBC OCI ???????????????? LD_LIBRARY_PATH ????????????????? ???Nashorn ? readLine() ??????????jjs ????? -scripting ????????????????? $ export LD_LIBRARY_PATH=${ORACLE_HOME}/lib $ jjs -scripting -cp ${ORACLE_HOME}/jdbc/lib/ojdbc6.jar JdbcCheckup.js Please enter information to test connection to the database user: test password: test database(a TNSNAME entry): orcl Connecting to the database...Connecting... connected. Hello World Your JDBC installation is correct. JDBC OCI ????????????????? "select 'Hello World' from dual" ??? SQL ?????????????? ?????????????????database ???? :: ??????????? ??? ??? Oracle DB ????? SQL ???????????????? Java ? JDBC ??????????????????????????? Nashorn ??????????????????????????????????

    Read the article

  • LSI 9285-8e and Supermicro SC837E26-RJBOD1 duplicate enclosure ID and slot numbers

    - by Andy Shinn
    I am working with 2 x Supermicro SC837E26-RJBOD1 chassis connected to a single LSI 9285-8e card in a Supermicro 1U host. There are 28 drives in each chassis for a total of 56 drives in 28 RAID1 mirrors. The problem I am running in to is that there are duplicate slots for the 2 chassis (the slots list twice and only go from 0 to 27). All the drives also show the same enclosure ID (ID 36). However, MegaCLI -encinfo lists the 2 enclosures correctly (ID 36 and ID 65). My question is, why would this happen? Is there an option I am missing to use 2 enclosures effectively? This is blocking me rebuilding a drive that failed in slot 11 since I can only specify enclosure and slot as parameters to replace a drive. When I do this, it picks the wrong slot 11 (device ID 46 instead of device ID 19). Adapter #1 is the LSI 9285-8e, adapter #0 (which I removed due to space limitations) is the onboard LSI. Adapter information: Adapter #1 ============================================================================== Versions ================ Product Name : LSI MegaRAID SAS 9285-8e Serial No : SV12704804 FW Package Build: 23.1.1-0004 Mfg. Data ================ Mfg. Date : 06/30/11 Rework Date : 00/00/00 Revision No : 00A Battery FRU : N/A Image Versions in Flash: ================ BIOS Version : 5.25.00_4.11.05.00_0x05040000 WebBIOS Version : 6.1-20-e_20-Rel Preboot CLI Version: 05.01-04:#%00001 FW Version : 3.140.15-1320 NVDATA Version : 2.1106.03-0051 Boot Block Version : 2.04.00.00-0001 BOOT Version : 06.253.57.219 Pending Images in Flash ================ None PCI Info ================ Vendor Id : 1000 Device Id : 005b SubVendorId : 1000 SubDeviceId : 9285 Host Interface : PCIE ChipRevision : B0 Number of Frontend Port: 0 Device Interface : PCIE Number of Backend Port: 8 Port : Address 0 5003048000ee8e7f 1 5003048000ee8a7f 2 0000000000000000 3 0000000000000000 4 0000000000000000 5 0000000000000000 6 0000000000000000 7 0000000000000000 HW Configuration ================ SAS Address : 500605b0038f9210 BBU : Present Alarm : Present NVRAM : Present Serial Debugger : Present Memory : Present Flash : Present Memory Size : 1024MB TPM : Absent On board Expander: Absent Upgrade Key : Absent Temperature sensor for ROC : Present Temperature sensor for controller : Absent ROC temperature : 70 degree Celcius Settings ================ Current Time : 18:24:36 3/13, 2012 Predictive Fail Poll Interval : 300sec Interrupt Throttle Active Count : 16 Interrupt Throttle Completion : 50us Rebuild Rate : 30% PR Rate : 30% BGI Rate : 30% Check Consistency Rate : 30% Reconstruction Rate : 30% Cache Flush Interval : 4s Max Drives to Spinup at One Time : 2 Delay Among Spinup Groups : 12s Physical Drive Coercion Mode : Disabled Cluster Mode : Disabled Alarm : Enabled Auto Rebuild : Enabled Battery Warning : Enabled Ecc Bucket Size : 15 Ecc Bucket Leak Rate : 1440 Minutes Restore HotSpare on Insertion : Disabled Expose Enclosure Devices : Enabled Maintain PD Fail History : Enabled Host Request Reordering : Enabled Auto Detect BackPlane Enabled : SGPIO/i2c SEP Load Balance Mode : Auto Use FDE Only : No Security Key Assigned : No Security Key Failed : No Security Key Not Backedup : No Default LD PowerSave Policy : Controller Defined Maximum number of direct attached drives to spin up in 1 min : 10 Any Offline VD Cache Preserved : No Allow Boot with Preserved Cache : No Disable Online Controller Reset : No PFK in NVRAM : No Use disk activity for locate : No Capabilities ================ RAID Level Supported : RAID0, RAID1, RAID5, RAID6, RAID00, RAID10, RAID50, RAID60, PRL 11, PRL 11 with spanning, SRL 3 supported, PRL11-RLQ0 DDF layout with no span, PRL11-RLQ0 DDF layout with span Supported Drives : SAS, SATA Allowed Mixing: Mix in Enclosure Allowed Mix of SAS/SATA of HDD type in VD Allowed Status ================ ECC Bucket Count : 0 Limitations ================ Max Arms Per VD : 32 Max Spans Per VD : 8 Max Arrays : 128 Max Number of VDs : 64 Max Parallel Commands : 1008 Max SGE Count : 60 Max Data Transfer Size : 8192 sectors Max Strips PerIO : 42 Max LD per array : 16 Min Strip Size : 8 KB Max Strip Size : 1.0 MB Max Configurable CacheCade Size: 0 GB Current Size of CacheCade : 0 GB Current Size of FW Cache : 887 MB Device Present ================ Virtual Drives : 28 Degraded : 0 Offline : 0 Physical Devices : 59 Disks : 56 Critical Disks : 0 Failed Disks : 0 Supported Adapter Operations ================ Rebuild Rate : Yes CC Rate : Yes BGI Rate : Yes Reconstruct Rate : Yes Patrol Read Rate : Yes Alarm Control : Yes Cluster Support : No BBU : No Spanning : Yes Dedicated Hot Spare : Yes Revertible Hot Spares : Yes Foreign Config Import : Yes Self Diagnostic : Yes Allow Mixed Redundancy on Array : No Global Hot Spares : Yes Deny SCSI Passthrough : No Deny SMP Passthrough : No Deny STP Passthrough : No Support Security : No Snapshot Enabled : No Support the OCE without adding drives : Yes Support PFK : Yes Support PI : No Support Boot Time PFK Change : Yes Disable Online PFK Change : No PFK TrailTime Remaining : 0 days 0 hours Support Shield State : Yes Block SSD Write Disk Cache Change: Yes Supported VD Operations ================ Read Policy : Yes Write Policy : Yes IO Policy : Yes Access Policy : Yes Disk Cache Policy : Yes Reconstruction : Yes Deny Locate : No Deny CC : No Allow Ctrl Encryption: No Enable LDBBM : No Support Breakmirror : No Power Savings : Yes Supported PD Operations ================ Force Online : Yes Force Offline : Yes Force Rebuild : Yes Deny Force Failed : No Deny Force Good/Bad : No Deny Missing Replace : No Deny Clear : No Deny Locate : No Support Temperature : Yes Disable Copyback : No Enable JBOD : No Enable Copyback on SMART : No Enable Copyback to SSD on SMART Error : Yes Enable SSD Patrol Read : No PR Correct Unconfigured Areas : Yes Enable Spin Down of UnConfigured Drives : Yes Disable Spin Down of hot spares : No Spin Down time : 30 T10 Power State : Yes Error Counters ================ Memory Correctable Errors : 0 Memory Uncorrectable Errors : 0 Cluster Information ================ Cluster Permitted : No Cluster Active : No Default Settings ================ Phy Polarity : 0 Phy PolaritySplit : 0 Background Rate : 30 Strip Size : 64kB Flush Time : 4 seconds Write Policy : WB Read Policy : Adaptive Cache When BBU Bad : Disabled Cached IO : No SMART Mode : Mode 6 Alarm Disable : Yes Coercion Mode : None ZCR Config : Unknown Dirty LED Shows Drive Activity : No BIOS Continue on Error : No Spin Down Mode : None Allowed Device Type : SAS/SATA Mix Allow Mix in Enclosure : Yes Allow HDD SAS/SATA Mix in VD : Yes Allow SSD SAS/SATA Mix in VD : No Allow HDD/SSD Mix in VD : No Allow SATA in Cluster : No Max Chained Enclosures : 16 Disable Ctrl-R : Yes Enable Web BIOS : Yes Direct PD Mapping : No BIOS Enumerate VDs : Yes Restore Hot Spare on Insertion : No Expose Enclosure Devices : Yes Maintain PD Fail History : Yes Disable Puncturing : No Zero Based Enclosure Enumeration : No PreBoot CLI Enabled : Yes LED Show Drive Activity : Yes Cluster Disable : Yes SAS Disable : No Auto Detect BackPlane Enable : SGPIO/i2c SEP Use FDE Only : No Enable Led Header : No Delay during POST : 0 EnableCrashDump : No Disable Online Controller Reset : No EnableLDBBM : No Un-Certified Hard Disk Drives : Allow Treat Single span R1E as R10 : No Max LD per array : 16 Power Saving option : Don't Auto spin down Configured Drives Max power savings option is not allowed for LDs. Only T10 power conditions are to be used. Default spin down time in minutes: 30 Enable JBOD : No TTY Log In Flash : No Auto Enhanced Import : No BreakMirror RAID Support : No Disable Join Mirror : No Enable Shield State : Yes Time taken to detect CME : 60s Exit Code: 0x00 Enclosure information: # /opt/MegaRAID/MegaCli/MegaCli64 -encinfo -a1 Number of enclosures on adapter 1 -- 3 Enclosure 0: Device ID : 36 Number of Slots : 28 Number of Power Supplies : 2 Number of Fans : 3 Number of Temperature Sensors : 1 Number of Alarms : 1 Number of SIM Modules : 0 Number of Physical Drives : 28 Status : Normal Position : 1 Connector Name : Port B Enclosure type : SES VendorId is LSI CORP and Product Id is SAS2X36 VendorID and Product ID didnt match FRU Part Number : N/A Enclosure Serial Number : N/A ESM Serial Number : N/A Enclosure Zoning Mode : N/A Partner Device Id : 65 Inquiry data : Vendor Identification : LSI CORP Product Identification : SAS2X36 Product Revision Level : 0718 Vendor Specific : x36-55.7.24.1 Number of Voltage Sensors :2 Voltage Sensor :0 Voltage Sensor Status :OK Voltage Value :5020 milli volts Voltage Sensor :1 Voltage Sensor Status :OK Voltage Value :11820 milli volts Number of Power Supplies : 2 Power Supply : 0 Power Supply Status : OK Power Supply : 1 Power Supply Status : OK Number of Fans : 3 Fan : 0 Fan Speed :Low Speed Fan Status : OK Fan : 1 Fan Speed :Low Speed Fan Status : OK Fan : 2 Fan Speed :Low Speed Fan Status : OK Number of Temperature Sensors : 1 Temp Sensor : 0 Temperature : 48 Temperature Sensor Status : OK Number of Chassis : 1 Chassis : 0 Chassis Status : OK Enclosure 1: Device ID : 65 Number of Slots : 28 Number of Power Supplies : 2 Number of Fans : 3 Number of Temperature Sensors : 1 Number of Alarms : 1 Number of SIM Modules : 0 Number of Physical Drives : 28 Status : Normal Position : 1 Connector Name : Port A Enclosure type : SES VendorId is LSI CORP and Product Id is SAS2X36 VendorID and Product ID didnt match FRU Part Number : N/A Enclosure Serial Number : N/A ESM Serial Number : N/A Enclosure Zoning Mode : N/A Partner Device Id : 36 Inquiry data : Vendor Identification : LSI CORP Product Identification : SAS2X36 Product Revision Level : 0718 Vendor Specific : x36-55.7.24.1 Number of Voltage Sensors :2 Voltage Sensor :0 Voltage Sensor Status :OK Voltage Value :5020 milli volts Voltage Sensor :1 Voltage Sensor Status :OK Voltage Value :11760 milli volts Number of Power Supplies : 2 Power Supply : 0 Power Supply Status : OK Power Supply : 1 Power Supply Status : OK Number of Fans : 3 Fan : 0 Fan Speed :Low Speed Fan Status : OK Fan : 1 Fan Speed :Low Speed Fan Status : OK Fan : 2 Fan Speed :Low Speed Fan Status : OK Number of Temperature Sensors : 1 Temp Sensor : 0 Temperature : 47 Temperature Sensor Status : OK Number of Chassis : 1 Chassis : 0 Chassis Status : OK Enclosure 2: Device ID : 252 Number of Slots : 8 Number of Power Supplies : 0 Number of Fans : 0 Number of Temperature Sensors : 0 Number of Alarms : 0 Number of SIM Modules : 1 Number of Physical Drives : 0 Status : Normal Position : 1 Connector Name : Unavailable Enclosure type : SGPIO Failed in first Inquiry commnad FRU Part Number : N/A Enclosure Serial Number : N/A ESM Serial Number : N/A Enclosure Zoning Mode : N/A Partner Device Id : Unavailable Inquiry data : Vendor Identification : LSI Product Identification : SGPIO Product Revision Level : N/A Vendor Specific : Exit Code: 0x00 Now, notice that each slot 11 device shows an enclosure ID of 36, I think this is where the discrepancy happens. One should be 36. But the other should be on enclosure 65. Drives in slot 11: Enclosure Device ID: 36 Slot Number: 11 Drive's postion: DiskGroup: 5, Span: 0, Arm: 1 Enclosure position: 0 Device Id: 48 WWN: Sequence Number: 11 Media Error Count: 0 Other Error Count: 0 Predictive Failure Count: 0 Last Predictive Failure Event Seq Number: 0 PD Type: SATA Raw Size: 2.728 TB [0x15d50a3b0 Sectors] Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors] Coerced Size: 2.728 TB [0x15d400000 Sectors] Firmware state: Online, Spun Up Is Commissioned Spare : YES Device Firmware Level: A5C0 Shield Counter: 0 Successful diagnostics completion on : N/A SAS Address(0): 0x5003048000ee8a53 Connected Port Number: 1(path0) Inquiry Data: MJ1311YNG6YYXAHitachi HDS5C3030ALA630 MEAOA5C0 FDE Enable: Disable Secured: Unsecured Locked: Unlocked Needs EKM Attention: No Foreign State: None Device Speed: 6.0Gb/s Link Speed: 6.0Gb/s Media Type: Hard Disk Device Drive Temperature :30C (86.00 F) PI Eligibility: No Drive is formatted for PI information: No PI: No PI Drive's write cache : Disabled Drive's NCQ setting : Enabled Port-0 : Port status: Active Port's Linkspeed: 6.0Gb/s Drive has flagged a S.M.A.R.T alert : No Enclosure Device ID: 36 Slot Number: 11 Drive's postion: DiskGroup: 19, Span: 0, Arm: 1 Enclosure position: 0 Device Id: 19 WWN: Sequence Number: 4 Media Error Count: 0 Other Error Count: 0 Predictive Failure Count: 0 Last Predictive Failure Event Seq Number: 0 PD Type: SATA Raw Size: 2.728 TB [0x15d50a3b0 Sectors] Non Coerced Size: 2.728 TB [0x15d40a3b0 Sectors] Coerced Size: 2.728 TB [0x15d400000 Sectors] Firmware state: Online, Spun Up Is Commissioned Spare : NO Device Firmware Level: A580 Shield Counter: 0 Successful diagnostics completion on : N/A SAS Address(0): 0x5003048000ee8e53 Connected Port Number: 0(path0) Inquiry Data: MJ1313YNG1VA5CHitachi HDS5C3030ALA630 MEAOA580 FDE Enable: Disable Secured: Unsecured Locked: Unlocked Needs EKM Attention: No Foreign State: None Device Speed: 6.0Gb/s Link Speed: 6.0Gb/s Media Type: Hard Disk Device Drive Temperature :30C (86.00 F) PI Eligibility: No Drive is formatted for PI information: No PI: No PI Drive's write cache : Disabled Drive's NCQ setting : Enabled Port-0 : Port status: Active Port's Linkspeed: 6.0Gb/s Drive has flagged a S.M.A.R.T alert : No Update 06/28/12: I finally have some new information about (what we think) the root cause of this problem so I thought I would share. After getting in contact with a very knowledgeable Supermicro tech, they provided us with a tool called Xflash (doesn't appear to be readily available on their FTP). When we gathered some information using this utility, my colleague found something very strange: root@mogile2 test]# ./xflash.dat -i get avail Initializing Interface. Expander: SAS2X36 (SAS2x36) 1) SAS2X36 (SAS2x36) (50030480:00EE917F) (0.0.0.0) 2) SAS2X36 (SAS2x36) (50030480:00E9D67F) (0.0.0.0) 3) SAS2X36 (SAS2x36) (50030480:0112D97F) (0.0.0.0) This lists the connected enclosures. You see the 3 connected (we have since added a 3rd and a 4th which is not yet showing up) with their respective SAS address / WWN (50030480:00EE917F). Now we can use this address to get information on the individual enclosures: [root@mogile2 test]# ./xflash.dat -i 5003048000EE917F get exp Initializing Interface. Expander: SAS2X36 (SAS2x36) Reading the expander information.......... Expander: SAS2X36 (SAS2x36) B3 SAS Address: 50030480:00EE917F Enclosure Logical Id: 50030480:0000007F IP Address: 0.0.0.0 Component Identifier: 0x0223 Component Revision: 0x05 [root@mogile2 test]# ./xflash.dat -i 5003048000E9D67F get exp Initializing Interface. Expander: SAS2X36 (SAS2x36) Reading the expander information.......... Expander: SAS2X36 (SAS2x36) B3 SAS Address: 50030480:00E9D67F Enclosure Logical Id: 50030480:0000007F IP Address: 0.0.0.0 Component Identifier: 0x0223 Component Revision: 0x05 [root@mogile2 test]# ./xflash.dat -i 500304800112D97F get exp Initializing Interface. Expander: SAS2X36 (SAS2x36) Reading the expander information.......... Expander: SAS2X36 (SAS2x36) B3 SAS Address: 50030480:0112D97F Enclosure Logical Id: 50030480:0112D97F IP Address: 0.0.0.0 Component Identifier: 0x0223 Component Revision: 0x05 Did you catch it? The first 2 enclosures logical ID is partially masked out where the 3rd one (which has a correct unique enclosure ID) is not. We pointed this out to Supermicro and were able to confirm that this address is supposed to be set during manufacturing and there was a problem with a certain batch of these enclosures where the logical ID was not set. We believe that the RAID controller is determining the ID based on the logical ID and since our first 2 enclosures have the same logical ID, they get the same enclosure ID. We also confirmed that 0000007F is the default which comes from LSI as an ID. The next pointer that helps confirm this could be a manufacturing problem with a run of JBODs is the fact that all 6 of the enclosures that have this problem begin with 00E. I believe that between 00E8 and 00EE Supermicro forgot to program the logical IDs correctly and neglected to recall or fix the problem post production. Fortunately for us, there is a tool to manage the WWN and logical ID of the devices from Supermicro: ftp://ftp.supermicro.com/utility/ExpanderXtools_Lite/. Our next step is to schedule a shutdown of these JBODs (after data migration) and reprogram the logical ID and see if it solves the problem. Update 06/28/12 #2: I just discovered this FAQ at Supermicro while Google searching for "lsi 0000007f": http://www.supermicro.com/support/faqs/faq.cfm?faq=11805. I still don't understand why, in the last several times we contacted Supermicro, they would have never directed us to this article :\

    Read the article

  • SQL SERVER – Fix : Error : 3117 : The log or differential backup cannot be restored because no files

    - by pinaldave
    I received the following email from one of my readers. Dear Pinal, I am new to SQL Server and our regular DBA is on vacation. Our production database had some problem and I have just restored full database backup to production server. When I try to apply log back I am getting following error. I am sure, this is valid log backup file. Screenshot is attached. [Few other details regarding server/ip address removed] Msg 3117, Level 16, State 1, Line 1 The log or differential backup cannot be restored because no files are ready to roll forward. Msg 3013, Level 16, State 1, Line 1 RESTORE LOG is terminating abnormally. Screenshot attached. [Removed as it contained live IP address] Please help immediately. Well I have answered this question in my earlier post, 2 years ago, over here SQL SERVER – Fix : Error : Msg 3117, Level 16, State 4 The log or differential backup cannot be restored because no files are ready to rollforward. However, I will try to explain it a little more this time. For SQL Server database to be used it should in online state. There are multiple states of SQL Server Database. ONLINE (Available – online for data) OFFLINE RESTORING RECOVERING RECOVERY PENDING SUSPECT EMERGENCY (Limited Availability) If the database is online, it means it is active and in operational mode. It will not make sense to apply further log from backup if the operations have continued on this database. The common practice during the backup restore process is to specify the keyword RECOVERY when the database is restored. When RECOVERY keyword is specified, the SQL Server brings back the database online and will not accept any further log backups. However, if you want to restore more than one backup files, i.e. after restoring the full back up if you want to apply further differential or log backup you cannot do that when database is online and already active. You need to have your database in the state where it can further accept the backup data and not the online data request. If the SQL Server is online and also accepts database backup file, then there can be data inconsistency. This is the reason that when there are more than one database backup files to be restored, one has to restore the database with NO RECOVERY keyword in the RESTORE operation. I suggest you all to read one more post written by me earlier. In this post, I explained the time line with image and graphic SQL SERVER – Backup Timeline and Understanding of Database Restore Process in Full Recovery Model. Sample Code for reference: RESTORE DATABASE AdventureWorks FROM DISK = 'C:\AdventureWorksFull.bak' WITH NORECOVERY; RESTORE DATABASE AdventureWorks FROM DISK = 'C:\AdventureWorksDiff.bak' WITH RECOVERY; In this post, I am not trying to cover complete backup and recovery. I am just attempting to address one type of error and its resolution. Please test these scenarios on the development server. Playing with live database backup and recovery is always very crucial and needs to be properly planned. Leave a comment here if you need help with this subject. Similar Post: SQL SERVER – Restore Sequence and Understanding NORECOVERY and RECOVERY Note: We will cover Standby Server maintenance and Recovery in another blog post and it is intentionally, not covered this post. Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: Pinal Dave, Readers Question, SQL, SQL Authority, SQL Backup and Restore, SQL Error Messages, SQL Query, SQL Scripts, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • GPGPU

    WhatGPU obviously stands for Graphics Processing Unit (the silicon powering the display you are using to read this blog post). The extra GP in front of that stands for General Purpose computing.So, altogether GPGPU refers to computing we can perform on GPU for purposes beyond just drawing on the screen. In effect, we can use a GPGPU a bit like we already use a CPU: to perform some calculation (that doesn’t have to have any visual element to it). The attraction is that a GPGPU can be orders of magnitude faster than a CPU.WhyWhen I was at the SuperComputing conference in Portland last November, GPGPUs were all the rage. A quick online search reveals many articles introducing the GPGPU topic. I'll just share 3 here: pcper (ignoring all pages except the first, it is a good consumer perspective), gizmodo (nice take using mostly layman terms) and vizworld (answering the question on "what's the big deal").The GPGPU programming paradigm (from a high level) is simple: in your CPU program you define functions (aka kernels) that take some input, can perform the costly operation and return the output. The kernels are the things that execute on the GPGPU leveraging its power (and hence execute faster than what they could on the CPU) while the host CPU program waits for the results or asynchronously performs other tasks.However, GPGPUs have different characteristics to CPUs which means they are suitable only for certain classes of problem (i.e. data parallel algorithms) and not for others (e.g. algorithms with branching or recursion or other complex flow control). You also pay a high cost for transferring the input data from the CPU to the GPU (and vice versa the results back to the CPU), so the computation itself has to be long enough to justify the overhead transfer costs. If your problem space fits the criteria then you probably want to check out this technology.HowSo where can you get a graphics card to start playing with all this? At the time of writing, the two main vendors ATI (owned by AMD) and NVIDIA are the obvious players in this industry. You can read about GPGPU on this AMD page and also on this NVIDIA page. NVIDIA's website also has a free chapter on the topic from the "GPU Gems" book: A Toolkit for Computation on GPUs.If you followed the links above, then you've already come across some of the choices of programming models that are available today. Essentially, AMD is offering their ATI Stream technology accessible via a language they call Brook+; NVIDIA offers their CUDA platform which is accessible from CUDA C. Choosing either of those locks you into the GPU vendor and hence your code cannot run on systems with cards from the other vendor (e.g. imagine if your CPU code would run on Intel chips but not AMD chips). Having said that, both vendors plan to support a new emerging standard called OpenCL, which theoretically means your kernels can execute on any GPU that supports it. To learn more about all of these there is a website: gpgpu.org. The caveat about that site is that (currently) it completely ignores the Microsoft approach, which I touch on next.On Windows, there is already a cross-GPU-vendor way of programming GPUs and that is the DirectX API. Specifically, on Windows Vista and Windows 7, the DirectX 11 API offers a dedicated subset of the API for GPGPU programming: DirectCompute. You use this API on the CPU side, to set up and execute the kernels that run on the GPU. The kernels are written in a language called HLSL (High Level Shader Language). You can use DirectCompute with HLSL to write a "compute shader", which is the term DirectX uses for what I've been referring to in this post as a "kernel". For a comprehensive collection of links about this (including tutorials, videos and samples) please see my blog post: DirectCompute.Note that there are many efforts to build even higher level languages on top of DirectX that aim to expose GPGPU programming to a wider audience by making it as easy as today's mainstream programming models. I'll mention here just two of those efforts: Accelerator from MSR and Brahma by Ananth. Comments about this post welcome at the original blog.

    Read the article

  • MySQL Syslog Audit Plugin

    - by jonathonc
    This post shows the construction process of the Syslog Audit plugin that was presented at MySQL Connect 2012. It is based on an environment that has the appropriate development tools enabled including gcc,g++ and cmake. It also assumes you have downloaded the MySQL source code (5.5.16 or higher) and have compiled and installed the system into the /usr/local/mysql directory ready for use.  The information provided below is designed to show the different components that make up a plugin, and specifically an audit type plugin, and how it comes together to be used within the MySQL service. The MySQL Reference Manual contains information regarding the plugin API and how it can be used, so please refer there for more detailed information. The code in this post is designed to give the simplest information necessary, so handling every return code, managing race conditions etc is not part of this example code. Let's start by looking at the most basic implementation of our plugin code as seen below: /*    Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.    Author:  Jonathon Coombes    Licence: GPL    Description: An auditing plugin that logs to syslog and                 can adjust the loglevel via the system variables. */ #include <stdio.h> #include <string.h> #include <mysql/plugin_audit.h> #include <syslog.h> There is a commented header detailing copyright/licencing and meta-data information and then the include headers. The two important include statements for our plugin are the syslog.h plugin, which gives us the structures for syslog, and the plugin_audit.h include which has details regarding the audit specific plugin api. Note that we do not need to include the general plugin header plugin.h, as this is done within the plugin_audit.h file already. To implement our plugin within the current implementation we need to add it into our source code and compile. > cd /usr/local/src/mysql-5.5.28/plugin > mkdir audit_syslog > cd audit_syslog A simple CMakeLists.txt file is created to manage the plugin compilation: MYSQL_ADD_PLUGIN(audit_syslog audit_syslog.cc MODULE_ONLY) Run the cmake  command at the top level of the source and then you can compile the plugin using the 'make' command. This results in a compiled audit_syslog.so library, but currently it is not much use to MySQL as there is no level of api defined to communicate with the MySQL service. Now we need to define the general plugin structure that enables MySQL to recognise the library as a plugin and be able to install/uninstall it and have it show up in the system. The structure is defined in the plugin.h file in the MySQL source code.  /*   Plugin library descriptor */ mysql_declare_plugin(audit_syslog) {   MYSQL_AUDIT_PLUGIN,           /* plugin type                    */   &audit_syslog_descriptor,     /* descriptor handle               */   "audit_syslog",               /* plugin name                     */   "Author Name",                /* author                          */   "Simple Syslog Audit",        /* description                     */   PLUGIN_LICENSE_GPL,           /* licence                         */   audit_syslog_init,            /* init function     */   audit_syslog_deinit,          /* deinit function */   0x0001,                       /* plugin version                  */   NULL,                         /* status variables        */   NULL,                         /* system variables                */   NULL,                         /* no reserves                     */   0,                            /* no flags                        */ } mysql_declare_plugin_end; The general plugin descriptor above is standard for all plugin types in MySQL. The plugin type is defined along with the init/deinit functions and interface methods into the system for sharing information, and various other metadata information. The descriptors have an internally recognised version number so that plugins can be matched against the api on the running server. The other details are usually related to the type-specific methods and structures to implement the plugin. Each plugin has a type-specific descriptor as well which details how the plugin is implemented for the specific purpose of that plugin type. /*   Plugin type-specific descriptor */ static struct st_mysql_audit audit_syslog_descriptor= {   MYSQL_AUDIT_INTERFACE_VERSION,                        /* interface version    */   NULL,                                                 /* release_thd function */   audit_syslog_notify,                                  /* notify function      */   { (unsigned long) MYSQL_AUDIT_GENERAL_CLASSMASK |                     MYSQL_AUDIT_CONNECTION_CLASSMASK }  /* class mask           */ }; In this particular case, the release_thd function has not been defined as it is not required. The important method for auditing is the notify function which is activated when an event occurs on the system. The notify function is designed to activate on an event and the implementation will determine how it is handled. For the audit_syslog plugin, the use of the syslog feature sends all events to the syslog for recording. The class mask allows us to determine what type of events are being seen by the notify function. There are currently two major types of event: 1. General Events: This includes general logging, errors, status and result type events. This is the main one for tracking the queries and operations on the database. 2. Connection Events: This group is based around user logins. It monitors connections and disconnections, but also if somebody changes user while connected. With most audit plugins, the principle behind the plugin is to track changes to the system over time and counters can be an important part of this process. The next step is to define and initialise the counters that are used to track the events in the service. There are 3 counters defined in total for our plugin - the # of general events, the # of connection events and the total number of events.  static volatile int total_number_of_calls; /* Count MYSQL_AUDIT_GENERAL_CLASS event instances */ static volatile int number_of_calls_general; /* Count MYSQL_AUDIT_CONNECTION_CLASS event instances */ static volatile int number_of_calls_connection; The init and deinit functions for the plugin are there to be called when the plugin is activated and when it is terminated. These offer the best option to initialise the counters for our plugin: /*  Initialize the plugin at server start or plugin installation. */ static int audit_syslog_init(void *arg __attribute__((unused))) {     openlog("mysql_audit:",LOG_PID|LOG_PERROR|LOG_CONS,LOG_USER);     total_number_of_calls= 0;     number_of_calls_general= 0;     number_of_calls_connection= 0;     return(0); } The init function does a call to openlog to initialise the syslog functionality. The parameters are the service to log under ("mysql_audit" in this case), the syslog flags and the facility for the logging. Then each of the counters are initialised to zero and a success is returned. If the init function is not defined, it will return success by default. /*  Terminate the plugin at server shutdown or plugin deinstallation. */ static int audit_syslog_deinit(void *arg __attribute__((unused))) {     closelog();     return(0); } The deinit function will simply close our syslog connection and return success. Note that the syslog functionality is part of the glibc libraries and does not require any external factors.  The function names are what we define in the general plugin structure, so these have to match otherwise there will be errors. The next step is to implement the event notifier function that was defined in the type specific descriptor (audit_syslog_descriptor) which is audit_syslog_notify. /* Event notifier function */ static void audit_syslog_notify(MYSQL_THD thd __attribute__((unused)), unsigned int event_class, const void *event) { total_number_of_calls++; if (event_class == MYSQL_AUDIT_GENERAL_CLASS) { const struct mysql_event_general *event_general= (const struct mysql_event_general *) event; number_of_calls_general++; syslog(audit_loglevel,"%lu: User: %s Command: %s Query: %s\n", event_general->general_thread_id, event_general->general_user, event_general->general_command, event_general->general_query ); } else if (event_class == MYSQL_AUDIT_CONNECTION_CLASS) { const struct mysql_event_connection *event_connection= (const struct mysql_event_connection *) event; number_of_calls_connection++; syslog(audit_loglevel,"%lu: User: %s@%s[%s] Event: %d Status: %d\n", event_connection->thread_id, event_connection->user, event_connection->host, event_connection->ip, event_connection->event_subclass, event_connection->status ); } }   In the case of an event, the notifier function is called. The first step is to increment the total number of events that have occurred in our database.The event argument is then cast into the appropriate event structure depending on the class type, of general event or connection event. The event type counters are incremented and details are sent via the syslog() function out to the system log. There are going to be different line formats and information returned since the general events have different data compared to the connection events, even though some of the details overlap, for example, user, thread id, host etc. On compiling the code now, there should be no errors and the resulting audit_syslog.so can be loaded into the server and ready to use. Log into the server and type: mysql> INSTALL PLUGIN audit_syslog SONAME 'audit_syslog.so'; This will install the plugin and will start updating the syslog immediately. Note that the audit plugin attaches to the immediate thread and cannot be uninstalled while that thread is active. This means that you cannot run the UNISTALL command until you log into a different connection (thread) on the server. Once the plugin is loaded, the system log will show output such as the following: Oct  8 15:33:21 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: (null)  Query: INSTALL PLUGIN audit_syslog SONAME 'audit_syslog.so' Oct  8 15:33:21 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: Query  Query: INSTALL PLUGIN audit_syslog SONAME 'audit_syslog.so' Oct  8 15:33:40 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: (null)  Query: show tables Oct  8 15:33:40 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: Query  Query: show tables Oct  8 15:33:43 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: (null)  Query: select * from t1 Oct  8 15:33:43 machine mysql_audit:[8337]: 87: User: root[root] @ localhost []  Command: Query  Query: select * from t1 It appears that two of each event is being shown, but in actuality, these are two separate event types - the result event and the status event. This could be refined further by changing the audit_syslog_notify function to handle the different event sub-types in a different manner.  So far, it seems that the logging is working with events showing up in the syslog output. The issue now is that the counters created earlier to track the number of events by type are not accessible when the plugin is being run. Instead there needs to be a way to expose the plugin specific information to the service and vice versa. This could be done via the information_schema plugin api, but for something as simple as counters, the obvious choice is the system status variables. This is done using the standard structure and the declaration: /*  Plugin status variables for SHOW STATUS */ static struct st_mysql_show_var audit_syslog_status[]= {   { "Audit_syslog_total_calls",     (char *) &total_number_of_calls,     SHOW_INT },   { "Audit_syslog_general_events",     (char *) &number_of_calls_general,     SHOW_INT },   { "Audit_syslog_connection_events",     (char *) &number_of_calls_connection,     SHOW_INT },   { 0, 0, SHOW_INT } };   The structure is simply the name that will be displaying in the mysql service, the address of the associated variables, and the data type being used for the counter. It is finished with a blank structure to show that there are no more variables. Remember that status variables may have the same name for variables from other plugin, so it is considered appropriate to add the plugin name at the start of the status variable name to avoid confusion. Looking at the status variables in the mysql client shows something like the following: mysql> show global status like "audit%"; +--------------------------------+-------+ | Variable_name                  | Value | +--------------------------------+-------+ | Audit_syslog_connection_events | 1     | | Audit_syslog_general_events    | 2     | | Audit_syslog_total_calls       | 3     | +--------------------------------+-------+ 3 rows in set (0.00 sec) The final connectivity piece for the plugin is to allow the interactive change of the logging level between the plugin and the system. This requires the ability to send changes via the mysql service through to the plugin. This is done using the system variables interface and defining a single variable to keep track of the active logging level for the facility. /* Plugin system variables for SHOW VARIABLES */ static MYSQL_SYSVAR_STR(loglevel, audit_loglevel,                         PLUGIN_VAR_RQCMDARG,                         "User can specify the log level for auditing",                         audit_loglevel_check, audit_loglevel_update, "LOG_NOTICE"); static struct st_mysql_sys_var* audit_syslog_sysvars[] = {     MYSQL_SYSVAR(loglevel),     NULL }; So now the system variable 'loglevel' is defined for the plugin and associated to the global variable 'audit_loglevel'. The check or validation function is defined to make sure that no garbage values are attempted in the update of the variable. The update function is used to save the new value to the variable. Note that the audit_syslog_sysvars structure is defined in the general plugin descriptor to associate the link between the plugin and the system and how much they interact. Next comes the implementation of the validation function and the update function for the system variable. It is worth noting that if you have a simple numeric such as integers for the variable types, the validate function is often not required as MySQL will handle the automatic check and validation of simple types. /* longest valid value */ #define MAX_LOGLEVEL_SIZE 100 /* hold the valid values */ static const char *possible_modes[]= { "LOG_ERROR", "LOG_WARNING", "LOG_NOTICE", NULL };  static int audit_loglevel_check(     THD*                        thd,    /*!< in: thread handle */     struct st_mysql_sys_var*    var,    /*!< in: pointer to system                                         variable */     void*                       save,   /*!< out: immediate result                                         for update function */     struct st_mysql_value*      value)  /*!< in: incoming string */ {     char buff[MAX_LOGLEVEL_SIZE];     const char *str;     const char **found;     int length;     length= sizeof(buff);     if (!(str= value->val_str(value, buff, &length)))         return 1;     /*         We need to return a pointer to a locally allocated value in "save".         Here we pick to search for the supplied value in an global array of         constant strings and return a pointer to one of them.         The other possiblity is to use the thd_alloc() function to allocate         a thread local buffer instead of the global constants.     */     for (found= possible_modes; *found; found++)     {         if (!strcmp(*found, str))         {             *(const char**)save= *found;             return 0;         }     }     return 1; } The validation function is simply to take the value being passed in via the SET GLOBAL VARIABLE command and check if it is one of the pre-defined values allowed  in our possible_values array. If it is found to be valid, then the value is assigned to the save variable ready for passing through to the update function. static void audit_loglevel_update(     THD*                        thd,        /*!< in: thread handle */     struct st_mysql_sys_var*    var,        /*!< in: system variable                                             being altered */     void*                       var_ptr,    /*!< out: pointer to                                             dynamic variable */     const void*                 save)       /*!< in: pointer to                                             temporary storage */ {     /* assign the new value so that the server can read it */     *(char **) var_ptr= *(char **) save;     /* assign the new value to the internal variable */     audit_loglevel= *(char **) save; } Since all the validation has been done already, the update function is quite simple for this plugin. The first part is to update the system variable pointer so that the server can read the value. The second part is to update our own global plugin variable for tracking the value. Notice that the save variable is passed in as a void type to allow handling of various data types, so it must be cast to the appropriate data type when assigning it to the variables. Looking at how the latest changes affect the usage of the plugin and the interaction within the server shows: mysql> show global variables like "audit%"; +-----------------------+------------+ | Variable_name         | Value      | +-----------------------+------------+ | audit_syslog_loglevel | LOG_NOTICE | +-----------------------+------------+ 1 row in set (0.00 sec) mysql> set global audit_syslog_loglevel="LOG_ERROR"; Query OK, 0 rows affected (0.00 sec) mysql> show global status like "audit%"; +--------------------------------+-------+ | Variable_name                  | Value | +--------------------------------+-------+ | Audit_syslog_connection_events | 1     | | Audit_syslog_general_events    | 11    | | Audit_syslog_total_calls       | 12    | +--------------------------------+-------+ 3 rows in set (0.00 sec) mysql> show global variables like "audit%"; +-----------------------+-----------+ | Variable_name         | Value     | +-----------------------+-----------+ | audit_syslog_loglevel | LOG_ERROR | +-----------------------+-----------+ 1 row in set (0.00 sec)   So now we have a plugin that will audit the events on the system and log the details to the system log. It allows for interaction to see the number of different events within the server details and provides a mechanism to change the logging level interactively via the standard system methods of the SET command. A more complex auditing plugin may have more detailed code, but each of the above areas is what will be involved and simply expanded on to add more functionality. With the above skeleton code, it is now possible to create your own audit plugins to implement your own auditing requirements. If, however, you are not of the coding persuasion, then you could always consider the option of the MySQL Enterprise Audit plugin that is available to purchase.

    Read the article

  • New security options in UCM Patch Set 3

    - by kyle.hatlestad
    While the Patch Set 3 (PS3) release was mostly focused on bug fixes and such, some new features sneaked in there. One of those new features is to the security options. In 10gR3 and prior versions, UCM had a component called Collaboration Manager which allowed for project folders to be created and groups of users assigned as members to collaborate on documents. With this component came access control lists (ACL) for content and folders. Users could assign specific security rights on each and every document and folder within a project. And it was even possible to enable these ACL's without having the Collaboration Manager component enabled (see technote# 603148.1). When 11g came out, Collaboration Manager was no longer available. But the configuration settings to turn on ACLs were still there. Well, in PS3 they're implemented slightly differently. And there is a new component available which adds an additional dimension to define security on the object, Roles. So now instead of selecting individual users or groups of users (defined as an Alias in User Admin), you can select a particular role. And if a user has that role, they are granted that level of access. This can allow for a much more flexible and manageable security model instead of trying to manage with just user and group access as people come and go in the organization. The way that it is enabled is still through configuration entries. First log in as an administrator and go to Administration -> Admin Server. On the Component Manager page, click the 'advanced component manager' link in the description paragraph at the top. In the list of Disabled Components, enable the RoleEntityACL component. Then click the General Configuration link on the left. In the Additional Configuration Variables text area, enter the new configuration values: UseEntitySecurity=true SpecialAuthGroups=<comma separated list of Security Groups to honor ACLs> The SpecialAuthGroups should be a list of Security Groups that honor the ACL fields. If an ACL is applied to a content item with a Security Group outside this list, it will be ignored. Save the settings and restart the instance. Upon restart, three new metadata fields will be created: xClbraUserList, xClbraAliasList, xClbraRoleList. If you are using OracleTextSearch as the search indexer, be sure to run a Fast Rebuild on the collection. On the Check In, Search, and Update pages, values are added by simply typing in the value and getting a type-ahead list of possible values. Select the value, click Add and then set the level of access (Read, Write, Delete, or Admin). If all of the fields are blank, then it simply falls back to just Security Group and Account access. For Users and Groups, these values are automatically picked up from the corresponding database tables. In the case of Roles, this is an explicitly defined list of choices that are made available. These values must match the role that is being defined from WebLogic Server or you LDAP/AD repository. To add these values, go to Administration -> Admin Applets -> Configuration Manager. On the Views tab, edit the values for the ExternalRolesView. By default, 'guest' and 'authenticated' are added. Once added to through the view, they will be available to select from for the Roles Access List. As for how they are stored in the metadata fields, each entry starts with it's identifier: ampersand (&) symbol for users, "at" (@) symbol for groups, and colon (:) for roles. Following that is the entity name. And at the end is the level of access in paranthesis. e.g. (RWDA). And each entry is separated by a comma. So if you were populating values through batch loader or an external source, the values would be defined this way. Detailed information on Access Control Lists can be found in the Oracle Fusion Middleware System Administrator's Guide for Oracle Content Server.

    Read the article

  • Time to stop using &ldquo;Execute Package Task&rdquo;&ndash; a way to execute package in SSIS catalog taking advantage of the new project deployment model ,and the logging and reporting feature

    - by Kevin Shyr
    I set out to find a way to dynamically call package in SSIS 2012.  The following are 2 excellent blogs I found; I used them heavily.  The code below has some addition to parameter types and message types, but was made essentially derived entirely from the blogs. http://sqlblog.com/blogs/jamie_thomson/archive/2011/07/16/ssis-logging-in-denali.aspx http://www.ssistalk.com/2012/07/24/quick-tip-run-ssis-2012-packages-synchronously-and-other-execution-options/   The code: Every package will be called by a PackageController package.  The packageController is initialized with some information on which package to run and what information to pass in.   The following is the stored procedure called from the “Execute SQL Task”.  Here is the highlight of the stored procedure It takes in packageName, project name, and folder name (folder in SSIS project deployment to SSIS catalog) The stored procedure sets the package variables of the upcoming package execution Execute package in SSIS Catalog Get the status of the execution.  Also, if exists, get the error message’s message_id and store them in the management database. Return value to “Execute SQL Task” to manage failure properly CREATE PROCEDURE [AUDIT].[LaunchPackageExecutionInSSISCatalog]        @PackageName NVARCHAR(255)        , @ProjectFolder NVARCHAR(255)        , @ProjectName NVARCHAR(255)        , @AuditKey INT        , @DisableNotification BIT        , @PackageExecutionLogID INT AS BEGIN TRY        DECLARE @execution_id BIGINT = 0;        -- Create a package execution        EXEC [SSISDB].[catalog].[create_execution]                     @package_name=@PackageName,                     @execution_id=@execution_id OUTPUT,                     @folder_name=@ProjectFolder,                     @project_name=@ProjectName,                     @use32bitruntime=False;          UPDATE [AUDIT].[PackageInstanceExecutionLog] WITH(ROWLOCK)        SET [SSISCatalogExecutionID] = @execution_id        WHERE [PackageInstanceExecutionLogID] = @PackageExecutionLogID          -- this is to set the execution synchronized so that I can check the result in the end        EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=50,                     @parameter_name=N'SYNCHRONIZED',                     @parameter_value=1; -- true          /********************************************************         ********************************************************              Section: setting parameters                     Source table:  SSISDB.internal.object_parameters              object_type list:                     20: project level variables                     30: package level variables                     50: execution parameter         ********************************************************         ********************************************************/        EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=30,                     @parameter_name=N'FromParent_AuditKey',                     @parameter_value=@AuditKey; -- true          EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=30,                     @parameter_name=N'FromParent_DisableNotification',                     @parameter_value=@DisableNotification; -- true          EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=30,                     @parameter_name=N'FromParent_PackageInstanceExecutionID',                     @parameter_value=@PackageExecutionLogID; -- true        /********************************************************         ********************************************************              Section: setting variables END         ********************************************************         ********************************************************/            /* This section is carried over from example code           I don't see a reason to change them yet        */        -- Set our package parameters        EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=50,                     @parameter_name=N'DUMP_ON_EVENT',                     @parameter_value=1; -- true          EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=50,                     @parameter_name=N'DUMP_EVENT_CODE',                     @parameter_value=N'0x80040E4D;0x80004005';          EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=50,                     @parameter_name=N'LOGGING_LEVEL',                     @parameter_value= 1; -- Basic          EXEC [SSISDB].[catalog].[set_execution_parameter_value]                     @execution_id,                      @object_type=50,                     @parameter_name=N'DUMP_ON_ERROR',                     @parameter_value=1; -- true                              /********************************************************         ********************************************************              Section: EXECUTING         ********************************************************         ********************************************************/        EXEC [SSISDB].[catalog].[start_execution]                     @execution_id;        /********************************************************         ********************************************************              Section: EXECUTING END         ********************************************************         ********************************************************/            /********************************************************         ********************************************************              Section: checking execution result                     Source table:  [SSISDB].[catalog].[executions]              status:                     1: created                     2: running                     3: cancelled                     4: failed                     5: pending                     6: ended unexpectedly                     7: succeeded                     8: stopping                     9: completed         ********************************************************         ********************************************************/        if EXISTS(SELECT TOP 1 1                            FROM [SSISDB].[catalog].[executions] WITH(NOLOCK)                            WHERE [execution_id] = @execution_id                                  AND [status] NOT IN (2, 7, 9)) BEGIN                /********************************************************               ********************************************************                     Section: logging error messages                            Source table:  [SSISDB].[internal].[operation_messages]                     message type:                            10:  OnPreValidate                             20:  OnPostValidate                             30:  OnPreExecute                             40:  OnPostExecute                             60:  OnProgress                             70:  OnInformation                             90:  Diagnostic                             110:  OnWarning                            120:  OnError                            130:  Failure                            140:  DiagnosticEx                             200:  Custom events                             400:  OnPipeline                     message source type:                            10:  Messages logged by the entry APIs (e.g. T-SQL, CLR Stored procedures)                             20:  Messages logged by the external process used to run package (ISServerExec)                             30:  Messages logged by the package-level objects                             40:  Messages logged by tasks in the control flow                             50:  Messages logged by containers (For, ForEach, Sequence) in the control flow                             60:  Messages logged by the Data Flow Task                                    ********************************************************               ********************************************************/                INSERT INTO AUDIT.PackageInstanceExecutionOperationErrorLink                     SELECT @PackageExecutionLogID                                  ,[operation_message_id]                            FROM [SSISDB].[internal].[operation_messages] WITH(NOLOCK)                            WHERE operation_id = @execution_id                                  AND message_type IN (120, 130)                           EXEC [AUDIT].[FailPackageInstanceExecution] @PackageExecutionLogID, 'SSISDB Internal operation_messages found'                GOTO ReturnTrueAsErrorFlag                /********************************************************               ********************************************************                     Section: checking messages END               ********************************************************               ********************************************************/                /* This part is not really working, so now using rowcount to pass status              --DECLARE @PackageErrorMessage NVARCHAR(4000)              --SET @PackageErrorMessage = @PackageName + 'failed with executionID: ' + CONVERT(VARCHAR(20), @execution_id)                --RAISERROR (@PackageErrorMessage -- Message text.              --     , 18 -- Severity,              --     , 1 -- State,              --     , N'check table AUDIT.PackageInstanceExecutionErrorMessages' -- First argument.              --     );              */        END        ELSE BEGIN              GOTO ReturnFalseAsErrorFlagToSignalSuccess        END        /********************************************************         ********************************************************              Section: checking execution result END         ********************************************************         ********************************************************/ END TRY BEGIN CATCH        DECLARE @SSISCatalogCallError NVARCHAR(MAX)        SELECT @SSISCatalogCallError = ERROR_MESSAGE()          EXEC [AUDIT].[FailPackageInstanceExecution] @PackageExecutionLogID, @SSISCatalogCallError          GOTO ReturnTrueAsErrorFlag END CATCH;     /********************************************************  ********************************************************    Section: end result  ********************************************************  ********************************************************/ ReturnTrueAsErrorFlag:        SELECT CONVERT(BIT, 1) AS PackageExecutionErrorExists ReturnFalseAsErrorFlagToSignalSuccess:        SELECT CONVERT(BIT, 0) AS PackageExecutionErrorExists   GO

    Read the article

  • BIP BIServer Query Debug

    - by Tim Dexter
    With some help from Bryan, I have uncovered a way of being able to debug or at least log what BIServer is doing when BIP sends it a query request. This is not for those of you querying the database directly but if you are using the BIServer and its datamodel to fetch data for a BIP report. If you have written or used the query builder against BIServer and when you run the report it chokes with a cryptic message, that you have no clue about, read on. When BIP runs a piece of BIServer logical SQL to fetch data. It does not appear to validate it, it just passes it through, so what is BIServer doing on its end? As you may know, you are not writing regular physical sql its actually logical sql e.g. select Jobs."Job Title" as "Job Title", Employees."Last Name" as "Last Name", Employees.Salary as Salary, Locations."Department Name" as "Department Name", Locations."Country Name" as "Country Name", Locations."Region Name" as "Region Name" from HR.Locations Locations, HR.Employees Employees, HR.Jobs Jobs The tables might not even be a physical tables, we don't care, that's what the BIServer and its model are for. You have put all the effort into building the model, just go get me the data from where ever it might be. The BIServer takes the logical sql and uses its vast brain to work out what the physical SQL is, executes it and passes the result back to BIP. select distinct T32556.JOB_TITLE as c1, T32543.LAST_NAME as c2, T32543.SALARY as c3, T32537.DEPARTMENT_NAME as c4, T32532.COUNTRY_NAME as c5, T32577.REGION_NAME as c6 from JOBS T32556, REGIONS T32577, COUNTRIES T32532, LOCATIONS T32569, DEPARTMENTS T32537, EMPLOYEES T32543 where ( T32532.COUNTRY_ID = T32569.COUNTRY_ID and T32532.REGION_ID = T32577.REGION_ID and T32537.DEPARTMENT_ID = T32543.DEPARTMENT_ID and T32537.LOCATION_ID = T32569.LOCATION_ID and T32543.JOB_ID = T32556.JOB_ID ) Not a very tough example I know but you get the idea. How do I know what the BIServer is up to? How can I find out what the issue might be if BIServer chokes on my query? There are a couple of steps: In the Administrator tool you need to set the logging level for the Administrator user to something greater than the default '0'. '7' is going to give you the max. Just remember to take it back down after you have finished the debug. I needed to bounce my BIServer service Now here's the secret sauce. Prefix the following to your BIP query set variable LOGLEVEL = 7; Set the log level to that you have in the admin tool Now run your BIP report. With the prefix in place; BIServer will write to the NQQuery.log file. This is located in the ./OracleBI/server/Log directory. In there you are going to find the complete process the BIServer has gone through to try and get the data back for you A quick note, if the BIServer can, its going to hit that great BIEE cache to get your data and you may not see the full log. IF this is the case. Get inot hte Administration page (via the browser login) and clear out your BIP report cursor. Then re-run. This will hopefully help out if you are trying to debug that annoying BIP report that will not run or is getting some strange data. Don't forget to turn that logging level back down once you are done. This will avoid the DBA screaming at you for sucking up all the disk space on the system.

    Read the article

  • Unable to either locate any wireless networks nor even connect to wifi

    - by Leo Chan
    I'm new to Linux. I currently have installed ubuntu 12.10. I had a previous problem with my wireless card (see url to see previous problem : How to enable wireless in a Fujitsu LH532?). It now shows Connect to hidden network and create new wireless network but now unfortunately it simply cannot find any wireless connections. I did have a very thorough look around about this problem such as wait a little longer since sometimes it cannot load all the wireless connections available that quickly. My wifi is a hidden network and I have used the connect to hidden network feature but it keeps asking for my wep key which has been checked 4 times (I counted) and it still seems to not work; It keeps asking for the WEP key. I did try both WEP 40/128-bit key and WPA & WPA2 since previously on my windows it worked; My family later decided to use WEP. I only have a quick fix using a usb wireless stick and I wish to have a more solid fix. Thanks Results from sudo iwlist wlan0 scan wlan0 Scan completed : Cell 01 - Address: 00:1E:73:C8:62:BD Channel:6 Frequency:2.437 GHz (Channel 6) Quality=25/70 Signal level=-85 dBm Encryption key:on ESSID:"EnigmaHome" Bit Rates:1 Mb/s; 2 Mb/s; 5.5 Mb/s; 11 Mb/s Bit Rates:6 Mb/s; 9 Mb/s; 12 Mb/s; 18 Mb/s; 24 Mb/s 36 Mb/s; 48 Mb/s; 54 Mb/s Mode:Master Extra:tsf=000000cb3bb10a5c Extra: Last beacon: 696ms ago IE: Unknown: 000A456E69676D61486F6D65 IE: Unknown: 010482848B96 IE: Unknown: 030106 IE: Unknown: 0706484B20010B1E IE: Unknown: 2A0107 IE: Unknown: 32080C1218243048606C IE: Unknown: DD180050F2020101000003A4000027A4000042435E0062322F00 Cell 02 - Address: C8:3A:35:34:C1:60 Channel:6 Frequency:2.437 GHz (Channel 6) Quality=22/70 Signal level=-88 dBm Encryption key:on ESSID:"Tenda" Bit Rates:1 Mb/s; 2 Mb/s; 5.5 Mb/s; 11 Mb/s; 9 Mb/s 18 Mb/s; 36 Mb/s; 54 Mb/s Bit Rates:6 Mb/s; 12 Mb/s; 24 Mb/s; 48 Mb/s Mode:Master Extra:tsf=000001336e70ffdd Extra: Last beacon: 716ms ago IE: Unknown: 000554656E6461 IE: Unknown: 010882848B961224486C IE: Unknown: 030106 IE: Unknown: 32040C183060 IE: Unknown: 0706434E20010D10 IE: Unknown: 33082001020304050607 IE: Unknown: 33082105060708090A0B IE: Unknown: DD270050F204104A0001101044000101104700102880288028801880A880C83A3534C160103C000101 IE: Unknown: 050400010000 IE: Unknown: 2A0106 IE: Unknown: 2D1AEC0117FFFF0000000000000000000000000000000C0000000000 IE: Unknown: 3D1606000500000000000000000000000000000000000000 IE: Unknown: 7F0101 IE: IEEE 802.11i/WPA2 Version 1 Group Cipher : CCMP Pairwise Ciphers (1) : CCMP Authentication Suites (1) : PSK Preauthentication Supported IE: Unknown: DD180050F2020101000003A4000027A4000042435E0062322F00 IE: Unknown: 0B05010089127A IE: Unknown: DD1E00904C33EC0117FFFF0000000000000000000000000000000C0000000000 IE: Unknown: DD1A00904C3406000500000000000000000000000000000000000000 IE: Unknown: DD07000C4304000000 Cell 03 - Address: 00:1E:73:C8:62:BF Channel:6 Frequency:2.437 GHz (Channel 6) Quality=47/70 Signal level=-63 dBm Encryption key:on ESSID:"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" Bit Rates:1 Mb/s; 2 Mb/s; 5.5 Mb/s; 11 Mb/s Bit Rates:6 Mb/s; 9 Mb/s; 12 Mb/s; 18 Mb/s; 24 Mb/s 36 Mb/s; 48 Mb/s; 54 Mb/s Mode:Master Extra:tsf=000000cb3bac614e Extra: Last beacon: 1064ms ago IE: Unknown: 00110000000000000000000000000000000000 IE: Unknown: 010482848B96 IE: Unknown: 030106 IE: Unknown: 050C010200000000000000000000 IE: Unknown: 0706484B20010B1E IE: Unknown: 2A0107 IE: Unknown: 32080C1218243048606C IE: Unknown: DD070050F202000100

    Read the article

  • Developing an Implementation Plan with Iterations by Russ Pitts

    - by user535886
    Developing an Implementation Plan with Iterations by Russ Pitts  Ok, so you have come to grips with understanding that applying the iterative concept, as defined by OUM is simply breaking up the project effort you have estimated for each phase into one or more six week calendar duration blocks of work. Idea being the business user(s) or key recipient(s) of work product(s) being developed never go longer than six weeks without having some sort of review or prototyping of the work results for an iteration…”think-a-little”, “do-a-little”, and “show-a-little” in a six week or less timeframe…ideally the business user(s) or key recipients(s) are involved throughout. You also understand the OUM concept that you only plan for that which you have knowledge of. The concept further defined, a project plan initially is developed at a high-level, and becomes more detailed as project knowledge grows. Agreeing to this concept means you also have to admit to the fallacy that one can plan with precision beyond six weeks into a project…Anything beyond six weeks is a best guess in most cases when dealing with software implementation projects. Project planning, as defined by OUM begins with the Implementation Plan view, which is a very high-level perspective of the effort estimated for each of the five OUM phases, as well as the number of iterations within each phase. You might wonder how can you predict the number of iterations for each phase at this early point in the project. Remember project planning is not an exact science, and initially is high-level and abstract in nature, and then becomes more detailed and precise as the project proceeds. So where do you start in defining iterations for each phase for a project? The following are three easy steps to initially define the number of iterations for each phase: Step 1 => Start with identifying the known factors… …Prior to starting a project you should know: · The agreed upon time-period for an iteration (e.g 6 weeks, or 4 weeks, or…) within a phase (recommend keeping iteration time-period consistent within a phase, if not for the entire project) · The number of resources available for the project · The number of total number of man-day (effort) you have estimated for each of the five OUM phases of the project · The number of work days for a week Step 2 => Calculate the man-days of effort required for an iteration within a phase… Lets assume for the sake of this example there are 10 project resources, and you have estimated 2,536 man-days of work effort which will need to occur for the elaboration phase of the project. Let’s also assume a week for this project is defined as 5 business days, and that each iteration in the elaboration phase will last a calendar duration of 6 weeks. A simple calculation is performed to calculate the daily burn rate for a single iteration, which produces a result of… ((Number of resources * days per week) * duration of iteration) = Number of days required per iteration ((10 resources * 5 days/week) * 6 weeks) = 300 man days of effort required per iteration Step 3 => Calculate the number of iterations that can occur within a phase Next calculate the number of iterations that can occur for the amount of man-days of effort estimated for the phase being considered… (number of man-days of effort estimated / number of man-days required per iteration) = # of iterations for phase (2,536 man-days of estimated effort for phase / 300 man days of effort required per iteration) = 8.45 iterations, which should be rounded to a whole number such as 9 iterations* *Note - It is important to note this is an approximate calculation, not an exact science. This particular example is a simple one, which assumes all resources are utilized throughout the phase, including tech resources, etc. (rounding down or up to a whole number based on project factor considerations). It is also best in many cases to round up to higher number, as this provides some calendar scheduling contingency.

    Read the article

  • Windows Azure Use Case: New Development

    - by BuckWoody
    This is one in a series of posts on when and where to use a distributed architecture design in your organization's computing needs. You can find the main post here: http://blogs.msdn.com/b/buckwoody/archive/2011/01/18/windows-azure-and-sql-azure-use-cases.aspx Description: Computing platforms evolve over time. Originally computers were directed by hardware wiring - that, the “code” was the path of the wiring that directed an electrical signal from one component to another, or in some cases a physical switch controlled the path. From there software was developed, first in a very low machine language, then when compilers were created, computer languages could more closely mimic written statements. These language statements can be compiled into the lower-level machine language still used by computers today. Microprocessors replaced logic circuits, sometimes with fewer instructions (Reduced Instruction Set Computing, RISC) and sometimes with more instructions (Complex Instruction Set Computing, CISC). The reason this history is important is that along each technology advancement, computer code has adapted. Writing software for a RISC architecture is significantly different than developing for a CISC architecture. And moving to a Distributed Architecture like Windows Azure also has specific implementation details that our code must follow. But why make a change? As I’ve described, we need to make the change to our code to follow advances in technology. There’s no point in change for its own sake, but as a new paradigm offers benefits to our users, it’s important for us to leverage those benefits where it makes sense. That’s most often done in new development projects. It’s a far simpler task to take a new project and adapt it to Windows Azure than to try and retrofit older code designed in a previous computing environment. We can still use the same coding languages (.NET, Java, C++) to write code for Windows Azure, but we need to think about the architecture of that code on a new project so that it runs in the most efficient, cost-effective way in a Distributed Architecture. As we receive new requests from the organization for new projects, a distributed architecture paradigm belongs in the decision matrix for the platform target. Implementation: When you are designing new applications for Windows Azure (or any distributed architecture) there are many important details to consider. But at the risk of over-simplification, there are three main concepts to learn and architect within the new code: Stateless Programming - Stateless program is a prime concept within distributed architectures. Rather than each server owning the complete processing cycle, the information from an operation that needs to be retained (the “state”) should be persisted to another location c(like storage) common to all machines involved in the process.  An interesting learning process for Stateless Programming (although not unique to this language type) is to learn Functional Programming. Server-Side Processing - Along with developing using a Stateless Design, the closer you can locate the code processing to the data, the less expensive and faster the code will run. When you control the network layer, this is less important, since you can send vast amounts of data between the server and client, allowing the client to perform processing. In a distributed architecture, you don’t always own the network, so it’s performance is unpredictable. Also, you may not be able to control the platform the user is on (such as a smartphone, PC or tablet), so it’s imperative to deliver only results and graphical elements where possible.  Token-Based Authentication - Also called “Claims-Based Authorization”, this code practice means instead of allowing a user to log on once and then running code in that context, a more granular level of security is used. A “token” or “claim”, often represented as a Certificate, is sent along for a series or even one request. In other words, every call to the code is authenticated against the token, rather than allowing a user free reign within the code call. While this is more work initially, it can bring a greater level of security, and it is far more resilient to disconnections. Resources: See the references of “Nondistributed Deployment” and “Distributed Deployment” at the top of this article for more information with graphics:  http://msdn.microsoft.com/en-us/library/ee658120.aspx  Stack Overflow has a good thread on functional programming: http://stackoverflow.com/questions/844536/advantages-of-stateless-programming  Another good discussion on Stack Overflow on server-side processing is here: http://stackoverflow.com/questions/3064018/client-side-or-server-side-processing Claims Based Authorization is described here: http://msdn.microsoft.com/en-us/magazine/ee335707.aspx

    Read the article

< Previous Page | 125 126 127 128 129 130 131 132 133 134 135 136  | Next Page >