Search Results

Search found 2393 results on 96 pages for 'c builder'.

Page 87/96 | < Previous Page | 83 84 85 86 87 88 89 90 91 92 93 94  | Next Page >

  • JGoodies HashMap

    - by JohnMcClane
    Hi, I'm trying to build a chart program using presentation model. Using JGoodies for data binding was relatively easy for simple types like strings or numbers. But I can't figure out how to use it on a hashmap. I'll try to explain how the chart works and what my problem is: A chart consists of DataSeries, a DataSeries consists of DataPoints. I want to have a data model and to be able to use different views on the same model (e.g. bar chart, pie chart,...). Each of them consists of three classes. For example: DataPointModel: holds the data model (value, label, category) DataPointViewModel: extends JGoodies PresentationModel. wraps around DataPointModel and holds view properties like font and color. DataPoint: abstract class, extends JComponent. Different Views must subclass and implement their own ui. Binding and creating the data model was easy, but i don't know how to bind my data series model. package at.onscreen.chart; import java.beans.PropertyChangeListener; import java.beans.PropertyChangeSupport; import java.beans.PropertyVetoException; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; public class DataSeriesModel { public static String PROPERTY_DATAPOINT = "dataPoint"; public static String PROPERTY_DATAPOINTS = "dataPoints"; public static String PROPERTY_LABEL = "label"; public static String PROPERTY_MAXVALUE = "maxValue"; /** * holds the data points */ private HashMap dataPoints; /** * the label for the data series */ private String label; /** * the maximum data point value */ private Double maxValue; /** * the model supports property change notification */ private PropertyChangeSupport propertyChangeSupport; /** * default constructor */ public DataSeriesModel() { this.maxValue = Double.valueOf(0); this.dataPoints = new HashMap(); this.propertyChangeSupport = new PropertyChangeSupport(this); } /** * constructor * @param label - the series label */ public DataSeriesModel(String label) { this.dataPoints = new HashMap(); this.maxValue = Double.valueOf(0); this.label = label; this.propertyChangeSupport = new PropertyChangeSupport(this); } /** * full constructor * @param label - the series label * @param dataPoints - an array of data points */ public DataSeriesModel(String label, DataPoint[] dataPoints) { this.dataPoints = new HashMap(); this.propertyChangeSupport = new PropertyChangeSupport(this); this.maxValue = Double.valueOf(0); this.label = label; for (int i = 0; i < dataPoints.length; i++) { this.addDataPoint(dataPoints[i]); } } /** * full constructor * @param label - the series label * @param dataPoints - a collection of data points */ public DataSeriesModel(String label, Collection dataPoints) { this.dataPoints = new HashMap(); this.propertyChangeSupport = new PropertyChangeSupport(this); this.maxValue = Double.valueOf(0); this.label = label; for (Iterator it = dataPoints.iterator(); it.hasNext();) { this.addDataPoint(it.next()); } } /** * adds a new data point to the series. if the series contains a data point with same id, it will be replaced by the new one. * @param dataPoint - the data point */ public void addDataPoint(DataPoint dataPoint) { String category = dataPoint.getCategory(); DataPoint oldDataPoint = this.getDataPoint(category); this.dataPoints.put(category, dataPoint); this.setMaxValue(Math.max(this.maxValue, dataPoint.getValue())); this.propertyChangeSupport.firePropertyChange(PROPERTY_DATAPOINT, oldDataPoint, dataPoint); } /** * returns the data point with given id or null if not found * @param uid - the id of the data point * @return the data point or null if there is no such point in the table */ public DataPoint getDataPoint(String category) { return this.dataPoints.get(category); } /** * removes the data point with given id from the series, if present * @param category - the data point to remove */ public void removeDataPoint(String category) { DataPoint dataPoint = this.getDataPoint(category); this.dataPoints.remove(category); if (dataPoint != null) { if (dataPoint.getValue() == this.getMaxValue()) { Double maxValue = Double.valueOf(0); for (Iterator it = this.iterator(); it.hasNext();) { DataPoint itDataPoint = it.next(); maxValue = Math.max(itDataPoint.getValue(), maxValue); } this.setMaxValue(maxValue); } } this.propertyChangeSupport.firePropertyChange(PROPERTY_DATAPOINT, dataPoint, null); } /** * removes all data points from the series * @throws PropertyVetoException */ public void removeAll() { this.setMaxValue(Double.valueOf(0)); this.dataPoints.clear(); this.propertyChangeSupport.firePropertyChange(PROPERTY_DATAPOINTS, this.getDataPoints(), null); } /** * returns the maximum of all data point values * @return the maximum of all data points */ public Double getMaxValue() { return this.maxValue; } /** * sets the max value * @param maxValue - the max value */ protected void setMaxValue(Double maxValue) { Double oldMaxValue = this.getMaxValue(); this.maxValue = maxValue; this.propertyChangeSupport.firePropertyChange(PROPERTY_MAXVALUE, oldMaxValue, maxValue); } /** * returns true if there is a data point with given category * @param category - the data point category * @return true if there is a data point with given category, otherwise false */ public boolean contains(String category) { return this.dataPoints.containsKey(category); } /** * returns the label for the series * @return the label for the series */ public String getLabel() { return this.label; } /** * returns an iterator over the data points * @return an iterator over the data points */ public Iterator iterator() { return this.dataPoints.values().iterator(); } /** * returns a collection of the data points. the collection supports removal, but does not support adding of data points. * @return a collection of data points */ public Collection getDataPoints() { return this.dataPoints.values(); } /** * returns the number of data points in the series * @return the number of data points */ public int getSize() { return this.dataPoints.size(); } /** * adds a PropertyChangeListener * @param listener - the listener */ public void addPropertyChangeListener(PropertyChangeListener listener) { this.propertyChangeSupport.addPropertyChangeListener(listener); } /** * removes a PropertyChangeListener * @param listener - the listener */ public void removePropertyChangeListener(PropertyChangeListener listener) { this.propertyChangeSupport.removePropertyChangeListener(listener); } } package at.onscreen.chart; import java.beans.PropertyVetoException; import java.util.Collection; import java.util.Iterator; import com.jgoodies.binding.PresentationModel; public class DataSeriesViewModel extends PresentationModel { /** * default constructor */ public DataSeriesViewModel() { super(new DataSeriesModel()); } /** * constructor * @param label - the series label */ public DataSeriesViewModel(String label) { super(new DataSeriesModel(label)); } /** * full constructor * @param label - the series label * @param dataPoints - an array of data points */ public DataSeriesViewModel(String label, DataPoint[] dataPoints) { super(new DataSeriesModel(label, dataPoints)); } /** * full constructor * @param label - the series label * @param dataPoints - a collection of data points */ public DataSeriesViewModel(String label, Collection dataPoints) { super(new DataSeriesModel(label, dataPoints)); } /** * full constructor * @param model - the data series model */ public DataSeriesViewModel(DataSeriesModel model) { super(model); } /** * adds a data point to the series * @param dataPoint - the data point */ public void addDataPoint(DataPoint dataPoint) { this.getBean().addDataPoint(dataPoint); } /** * returns true if there is a data point with given category * @param category - the data point category * @return true if there is a data point with given category, otherwise false */ public boolean contains(String category) { return this.getBean().contains(category); } /** * returns the data point with given id or null if not found * @param uid - the id of the data point * @return the data point or null if there is no such point in the table */ public DataPoint getDataPoint(String category) { return this.getBean().getDataPoint(category); } /** * returns a collection of the data points. the collection supports removal, but does not support adding of data points. * @return a collection of data points */ public Collection getDataPoints() { return this.getBean().getDataPoints(); } /** * returns the label for the series * @return the label for the series */ public String getLabel() { return this.getBean().getLabel(); } /** * sets the max value * @param maxValue - the max value */ public Double getMaxValue() { return this.getBean().getMaxValue(); } /** * returns the number of data points in the series * @return the number of data points */ public int getSize() { return this.getBean().getSize(); } /** * returns an iterator over the data points * @return an iterator over the data points */ public Iterator iterator() { return this.getBean().iterator(); } /** * removes all data points from the series * @throws PropertyVetoException */ public void removeAll() { this.getBean().removeAll(); } /** * removes the data point with given id from the series, if present * @param category - the data point to remove */ public void removeDataPoint(String category) { this.getBean().removeDataPoint(category); } } package at.onscreen.chart; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.beans.PropertyVetoException; import java.util.Collection; import java.util.Iterator; import javax.swing.JComponent; public abstract class DataSeries extends JComponent implements PropertyChangeListener { /** * the model */ private DataSeriesViewModel model; /** * default constructor */ public DataSeries() { this.model = new DataSeriesViewModel(); this.model.addPropertyChangeListener(this); this.createComponents(); } /** * constructor * @param label - the series label */ public DataSeries(String label) { this.model = new DataSeriesViewModel(label); this.model.addPropertyChangeListener(this); this.createComponents(); } /** * full constructor * @param label - the series label * @param dataPoints - an array of data points */ public DataSeries(String label, DataPoint[] dataPoints) { this.model = new DataSeriesViewModel(label, dataPoints); this.model.addPropertyChangeListener(this); this.createComponents(); } /** * full constructor * @param label - the series label * @param dataPoints - a collection of data points */ public DataSeries(String label, Collection dataPoints) { this.model = new DataSeriesViewModel(label, dataPoints); this.model.addPropertyChangeListener(this); this.createComponents(); } /** * full constructor * @param model - the model */ public DataSeries(DataSeriesViewModel model) { this.model = model; this.model.addPropertyChangeListener(this); this.createComponents(); } /** * creates, binds and configures UI components. * data point properties can be created here as components or be painted in paintComponent. */ protected abstract void createComponents(); @Override public void propertyChange(PropertyChangeEvent evt) { this.repaint(); } /** * adds a data point to the series * @param dataPoint - the data point */ public void addDataPoint(DataPoint dataPoint) { this.model.addDataPoint(dataPoint); } /** * returns true if there is a data point with given category * @param category - the data point category * @return true if there is a data point with given category, otherwise false */ public boolean contains(String category) { return this.model.contains(category); } /** * returns the data point with given id or null if not found * @param uid - the id of the data point * @return the data point or null if there is no such point in the table */ public DataPoint getDataPoint(String category) { return this.model.getDataPoint(category); } /** * returns a collection of the data points. the collection supports removal, but does not support adding of data points. * @return a collection of data points */ public Collection getDataPoints() { return this.model.getDataPoints(); } /** * returns the label for the series * @return the label for the series */ public String getLabel() { return this.model.getLabel(); } /** * sets the max value * @param maxValue - the max value */ public Double getMaxValue() { return this.model.getMaxValue(); } /** * returns the number of data points in the series * @return the number of data points */ public int getDataPointCount() { return this.model.getSize(); } /** * returns an iterator over the data points * @return an iterator over the data points */ public Iterator iterator() { return this.model.iterator(); } /** * removes all data points from the series * @throws PropertyVetoException */ public void removeAll() { this.model.removeAll(); } /** * removes the data point with given id from the series, if present * @param category - the data point to remove */ public void removeDataPoint(String category) { this.model.removeDataPoint(category); } /** * returns the data series view model * @return - the data series view model */ public DataSeriesViewModel getViewModel() { return this.model; } /** * returns the data series model * @return - the data series model */ public DataSeriesModel getModel() { return this.model.getBean(); } } package at.onscreen.chart.builder; import java.util.Collection; import net.miginfocom.swing.MigLayout; import at.onscreen.chart.DataPoint; import at.onscreen.chart.DataSeries; import at.onscreen.chart.DataSeriesViewModel; public class BuilderDataSeries extends DataSeries { /** * default constructor */ public BuilderDataSeries() { super(); } /** * constructor * @param label - the series label */ public BuilderDataSeries(String label) { super(label); } /** * full constructor * @param label - the series label * @param dataPoints - an array of data points */ public BuilderDataSeries(String label, DataPoint[] dataPoints) { super(label, dataPoints); } /** * full constructor * @param label - the series label * @param dataPoints - a collection of data points */ public BuilderDataSeries(String label, Collection dataPoints) { super(label, dataPoints); } /** * full constructor * @param model - the model */ public BuilderDataSeries(DataSeriesViewModel model) { super(model); } @Override protected void createComponents() { this.setLayout(new MigLayout()); /* * * I want to add a new BuilderDataPoint for each data point in the model. * I want the BuilderDataPoints to be synchronized with the model. * e.g. when a data point is removed from the model, the BuilderDataPoint shall be removed * from the BuilderDataSeries * */ } } package at.onscreen.chart.builder; import javax.swing.JFormattedTextField; import javax.swing.JTextField; import at.onscreen.chart.DataPoint; import at.onscreen.chart.DataPointModel; import at.onscreen.chart.DataPointViewModel; import at.onscreen.chart.ValueFormat; import com.jgoodies.binding.adapter.BasicComponentFactory; import com.jgoodies.binding.beans.BeanAdapter; public class BuilderDataPoint extends DataPoint { /** * default constructor */ public BuilderDataPoint() { super(); } /** * constructor * @param category - the category */ public BuilderDataPoint(String category) { super(category); } /** * constructor * @param value - the value * @param label - the label * @param category - the category */ public BuilderDataPoint(Double value, String label, String category) { super(value, label, category); } /** * full constructor * @param model - the model */ public BuilderDataPoint(DataPointViewModel model) { super(model); } @Override protected void createComponents() { BeanAdapter beanAdapter = new BeanAdapter(this.getModel(), true); ValueFormat format = new ValueFormat(); JFormattedTextField value = BasicComponentFactory.createFormattedTextField(beanAdapter.getValueModel(DataPointModel.PROPERTY_VALUE), format); this.add(value, "w 80, growx, wrap"); JTextField label = BasicComponentFactory.createTextField(beanAdapter.getValueModel(DataPointModel.PROPERTY_LABEL)); this.add(label, "growx, wrap"); JTextField category = BasicComponentFactory.createTextField(beanAdapter.getValueModel(DataPointModel.PROPERTY_CATEGORY)); this.add(category, "growx, wrap"); } } To sum it up: I need to know how to bind a hash map property to JComponent.components property. JGoodies is in my opinion not very well documented, I spent a long time searching through the internet, but I did not find any solution to my problem. Hope you can help me.

    Read the article

  • Top 10 Tips & Tricks for Oracle SQL Developer

    - by thatjeffsmith
    Being a short week due to the holiday, and with everyone enjoying their Summer vacations (apologies Southern Hemispherians), I reckoned it was a great time to do one of those lazy recap-Top 10-Reader’s Digest type posts. I’ve been sharing 1-3 tips or ‘tricks’ a week since I started blogging about SQL Developer, and I have more than enough content to write a book. But since I’m lazy, I’m just going to compile a list of my favorite ‘must know’ tips instead. I always have to leave out a few tips when I do my presentations, so now I can refer back to this list to make sure I’m not forgetting anything. So without further ado… 1. Configure Your Preferences Yes, there are a LOT of options. But you don’t need to worry about all of them just yet. I do recommend you take a quick look at these ones in particular. Whether you’re new to the tool or have been using it for 5 years, don’t overlook these settings! 2. Disable Extensions You Aren’t Using If you’re not using Data Miner, or if you’re not working on a Migration – disable those extensions! SQL Developer will run leaner & meaner, plus the user interface will be a bit more simplified making the tool easier to navigate as well. 3. SQL Recall via Keyboard Access your history via the keyboard! Cycle through your recent SQL statements just using these magic key strokes! Ctrl+Up or Ctrl+Down. 4. Format Your Query Output Directly to CSV, XML, HTML, etc Have the query results pre-formatted in the format of your choice! Too lazy to run the Export wizard for your query result sets? Just add the SQL Developer output hints to your statement and have the output auto-magically formatted to the style of your choice! 5. Drag & Drop Multiple Tables to the Worksheet SQL Developer will auto-join the related objects. You can then toggle over to the Query Builder to toggle off the columns you don’t want to query. I guarantee this tip will save you time if you’re joining 3 or more tables! 6. Drag & Drop Multiple Tables to a Relational Model A pretty picture is worth a few dozen DDL scripts? SQL Developer does data modeling! If you ctrl-drag a table to a model, it will take that table and any related tables and reverse engineer them to a relational model! You can then print it out or export it to HTML, PDF, etc. 7. View Your PL/SQL Execution Output Automatically Function returns a refcursor? Procedure had 3 out parameters? When you run these programs via the Procedure Editor, we automatically capture the output and place them into one or more data grids for you to browse. 8. Disable Automatic Code Insight and Use It On-Demand Code Editor – Completion Insight – Enable Completion Auto-Popup (Keyword being Auto) Some folks really don’t like it when their IDEs or word-processors try to do ‘too much’ for them. Thankfully SQL Developer allows you to either increase the delay before it attempts to auto-complete your text OR to disable the automatic bit. Instead, you can invoke it on-demand. 9. Interactive Debugging – Change Your Variable Values as You Step Through Your PLSQL Watches aren’t just for watching. You can actually interact with your programs and ‘see what happens’ when X = 256 instead of 1. 10. Ditch the Tree View for the Schema Browser There’s nothing wrong with the Connection tree for browsing your database objects. But some folks just can’t seem to get comfortable with it. So, we built them a Schema Browser that uses a drop down control instead for changing up your schema and object types. Already Know This Stuff, Want More? Just check out my SQL Developer resource page, it’s one of the main links on the top of this page. Or if you can’t find something, just drop me a note in the form of a comment on this page and I’ll do my best to find it or write it for you.

    Read the article

  • Seven Worlds will collide…. High Availability BI is not such a Distant Sun.

    - by Testas
    Over the last 5 years I have observed Microsoft persevere with the notion of Self Service BI over a series of conferences as far back as SQLBits V in Newport. The release of SQL Server 2012, improvements in Excel and the integration with SharePoint 2010 is making this a reality. Business users are now empowered to create their own BI reports through a number of different technologies such as PowerPivot, PowerView and Report Builder. This opens up a whole new way of working; improving staff productivity, promoting efficient decision making and delivering timely business reports. There is, however; a serious question to answer. What happens should any of these applications become unavailable? More to the point, how would the business react should key business users be unable to fulfil reporting requests for key management meetings when they require it?  While the introduction of self-service BI will provide instant access to the creation of management information reports, it will also cause instant support calls should the access to the data become unavailable. These are questions that are often overlooked when a business evaluates the need for self-service BI. But as I have written in other blog posts, the thirst for information is unquenchable once the business users have access to the data. When they are unable to access the information, you will be the first to know about it and will be expected to have a resolution to the downtime as soon as possible. The world of self-service BI is pushing reporting and analytical databases to the tier 1 application level for some of Coeo’s customers. A level that is traditionally associated with mission critical OLTP environments. There is recognition that by making BI readily available to the business user, provisions also need to be made to ensure that the solution is highly available so that there is minimal disruption to the business. This is where High Availability BI infrastructures provide a solution. As there is a convergence of technologies to support a self-service BI culture, there is also a convergence of technologies that need to be understood in order to provide the high availability architecture required to support the self-service BI infrastructure. While you may not be the individual that implements these components, understanding the concepts behind these components will empower you to have meaningful discussions with the right people should you put this infrastructure in place. There are 7 worlds that you will have to understand to successfully implement a highly available BI infrastructure   1.       Server/Virtualised server hardware/software 2.       DNS 3.       Network Load Balancing 4.       Active Directory 5.       Kerberos 6.       SharePoint 7.       SQL Server I have found myself over the last 6 months reaching out to knowledge that I learnt years ago when I studied for the Windows 2000 and 2003 (MCSE) Microsoft Certified System Engineer. (To the point that I am resuming my studies for the Windows Server 2008 equivalent to be up to date with newer technologies) This knowledge has proved very useful in the numerous engagements I have undertaken since being at Coeo, particularly when dealing with High Availability Infrastructures. As a result of running my session at SQLBits X and SQL Saturday in Dublin, the feedback I have received has been that many individuals desire to understand more of the concepts behind the first 6 “worlds” in the list above. Over the coming weeks, a series of blog posts will be put on this site to help understand the key concepts of each area as it pertains to a High Availability BI Infrastructure. Each post will not provide exhaustive coverage of the topic. For example DNS can be a book in its own right when you consider that there are so many different configuration options with Forward Lookup, Reverse Lookups, AD Integrated Zones and DNA forwarders to name some examples. What I want to do is share the pertinent points as it pertains to the BI infrastructure that you build so that you are equipped with the knowledge to have the right discussion when planning this infrastructure. Next, we will focus on the server infrastructure that will be required to support the High Availability BI Infrastructure, from both a physical box and virtualised perspective. Thanks   Chris

    Read the article

  • ODI 12c's Mapping Designer - Combining Flow Based and Expression Based Mapping

    - by Madhu Nair
    post by David Allan ODI is renowned for its declarative designer and minimal expression based paradigm. The new ODI 12c release has extended this even further to provide an extended declarative mapping designer. The ODI 12c mapper is a fusion of ODI's new declarative designer with the familiar flow based designer while retaining ODI’s key differentiators of: Minimal expression based definition, The ability to incrementally design an interface and to extract/load data from any combination of sources, and most importantly Backed by ODI’s extensible knowledge module framework. The declarative nature of the product has been extended to include an extensible library of common components that can be used to easily build simple to complex data integration solutions. Big usability improvements through consistent interactions of components and concepts all constructed around the familiar knowledge module framework provide the utmost flexibility. Here is a little taster: So what is a mapping? A mapping comprises of a logical design and at least one physical design, it may have many. A mapping can have many targets, of any technology and can be arbitrarily complex. You can build reusable mappings and use them in other mappings or other reusable mappings. In the example below all of the information from an Oracle bonus table and a bonus file are joined with an Oracle employees table before being written to a target. Some things that are cool include the one-click expression cross referencing so you can easily see what's used where within the design. The logical design in a mapping describes what you want to accomplish  (see the animated GIF here illustrating how the above mapping was designed) . The physical design lets you configure how it is to be accomplished. So you could have one logical design that is realized as an initial load in one physical design and as an incremental load in another. In the physical design below we can customize how the mapping is accomplished by picking Knowledge Modules, in ODI 12c you can pick multiple nodes (on logical or physical) and see common properties. This is useful as we can quickly compare property values across objects - below we can see knowledge modules settings on the access points between execution units side by side, in the example one table is retrieved via database links and the other is an external table. In the logical design I had selected an append mode for the integration type, so by default the IKM on the target will choose the most suitable/default IKM - which in this case is an in-built Oracle Insert IKM (see image below). This supports insert and select hints for the Oracle database (the ANSI SQL Insert IKM does not support these), so by default you will get direct path inserts with Oracle on this statement. In ODI 12c, the mapper is just that, a mapper. Design your mapping, write to multiple targets, the targets can be in the same data server, in different data servers or in totally different technologies - it does not matter. ODI 12c will derive and generate a plan that you can use or customize with knowledge modules. Some of the use cases which are greatly simplified include multiple heterogeneous targets, multi target inserts for Oracle and writing of XML. Let's switch it up now and look at a slightly different example to illustrate expression reuse. In ODI you can define reusable expressions using user functions. These can be reused across mappings and the implementations specialized per technology. So you can have common expressions across Oracle, SQL Server, Hive etc. shielding the design from the physical aspects of the generated language. Another way to reuse is within a mapping itself. In ODI 12c expressions can be defined and reused within a mapping. Rather than replicating the expression text in larger expressions you can decompose into smaller snippets, below you can see UNIT_TAX AMOUNT has been defined and is used in two downstream target columns - its used in the TOTAL_TAX_AMOUNT plus its used in the UNIT_TAX_AMOUNT (a recording of the calculation).  You can see the columns that the expressions depend on (upstream) and the columns the expression is used in (downstream) highlighted within the mapper. Also multi selecting attributes is a convenient way to see what's being used where, below I have selected the TOTAL_TAX_AMOUNT in the target datastore and the UNIT_TAX_AMOUNT in UNIT_CALC. You can now see many expressions at once now and understand much more at the once time without needlessly clicking around and memorizing information. Our mantra during development was to keep it simple and make the tool more powerful and do even more for the user. The development team was a fusion of many teams from Oracle Warehouse Builder, Sunopsis and BEA Aqualogic, debating and perfecting the mapper in ODI 12c. This was quite a project from supporting the capabilities of ODI in 11g to building the flow based mapping tool to support the future. I hope this was a useful insight, there is so much more to come on this topic, this is just a preview of much more that you will see of the mapper in ODI 12c.

    Read the article

  • The ugly evolution of running a background operation in the context of an ASP.NET app

    - by Jeff
    If you’re one of the two people who has followed my blog for many years, you know that I’ve been going at POP Forums now for over almost 15 years. Publishing it as an open source app has been a big help because it helps me understand how people want to use it, and having it translated to six languages is pretty sweet. Despite this warm and fuzzy group hug, there has been an ugly hack hiding in there for years. One of the things we find ourselves wanting to do is hide some kind of regular process inside of an ASP.NET application that runs periodically. The motivation for this has always been that a lot of people simply don’t have a choice, because they’re running the app on shared hosting, or don’t otherwise have access to a box that can run some kind of regular background service. In POP Forums, I “solved” this problem years ago by hiding some static timers in an HttpModule. Truthfully, this works well as long as you don’t run multiple instances of the app, which in the cloud world, is always a possibility. With the arrival of WebJobs in Azure, I’m going to solve this problem. This post isn’t about that. The other little hacky problem that I “solved” was spawning a background thread to queue emails to subscribed users of the forum. This evolved quite a bit over the years, starting with a long running page to mail users in real-time, when I had only a few hundred. By the time it got into the thousands, or tens of thousands, I needed a better way. What I did is launched a new thread that read all of the user data in, then wrote a queued email to the database (as in, the entire body of the email, every time), with the properly formatted opt-out link. It was super inefficient, but it worked. Then I moved my biggest site using it, CoasterBuzz, to an Azure Website, and it stopped working. So let’s start with the first stupid thing I was doing. The new thread was simply created with delegate code inline. As best I can tell, Azure Websites are more aggressive about garbage collection, because that thread didn’t queue even one message. When the calling server response went out of scope, so went the magic background thread. Duh, all I had to do was move the thread to a private static variable in the class. That’s the way I was able to keep stuff running from the HttpModule. (And yes, I know this is still prone to failure, particularly if the app recycles. For as infrequently as it’s used, I have not, however, experienced this.) It was still failing, but this time I wasn’t sure why. It would queue a few dozen messages, then die. Running in Azure, I had to turn on the application logging and FTP in to see what was going on. That led me to a helper method I was using as delegate to build the unsubscribe links. The idea here is that I didn’t want yet another config entry to describe the base URL, appended with the right path that would match the routing table. No, I wanted the app to figure it out for you, so I came up with this little thing: public static string FullUrlHelper(this Controller controller, string actionName, string controllerName, object routeValues = null) { var helper = new UrlHelper(controller.Request.RequestContext); var requestUrl = controller.Request.Url; if (requestUrl == null) return String.Empty; var url = requestUrl.Scheme + "://"; url += requestUrl.Host; url += (requestUrl.Port != 80 ? ":" + requestUrl.Port : ""); url += helper.Action(actionName, controllerName, routeValues); return url; } And yes, that should have been done with a string builder. This is useful for sending out the email verification messages, too. As clever as I thought I was with this, I was using a delegate in the admin controller to format these unsubscribe links for tens of thousands of users. I passed that delegate into a service class that did the email work: Func<User, string> unsubscribeLinkGenerator = user => this.FullUrlHelper("Unsubscribe", AccountController.Name, new { id = user.UserID, key = _profileService.GetUnsubscribeHash(user) }); _mailingListService.MailUsers(subject, body, htmlBody, unsubscribeLinkGenerator); Cool, right? Actually, not so much. If you look back at the helper, this delegate then will depend on the controller context to learn the routing and format for the URL. As you might have guessed, those things were turning null after a few dozen formatted links, when the original request to the admin controller went away. That this wasn’t already happening on my dedicated server is surprising, but again, I understand why the Azure environment might be eager to reclaim a thread after servicing the request. It’s already inefficient that I’m building the entire email for every user, but going back to check the routing table for the right link every time isn’t a win either. I put together a little hack to look up one generic URL, and use that as the basis for a string format. If you’re wondering why I didn’t just use the curly braces up front, it’s because they get URL formatted: var baseString = this.FullUrlHelper("Unsubscribe", AccountController.Name, new { id = "--id--", key = "--key--" }); baseString = baseString.Replace("--id--", "{0}").Replace("--key--", "{1}"); Func unsubscribeLinkGenerator = user => String.Format(baseString, user.UserID, _profileService.GetUnsubscribeHash(user)); _mailingListService.MailUsers(subject, body, htmlBody, unsubscribeLinkGenerator); And wouldn’t you know it, the new solution works just fine. It’s still kind of hacky and inefficient, but it will work until this somehow breaks too.

    Read the article

  • jQuery Selector Tester and Cheat Sheet

    - by SGWellens
    I've always appreciated these tools: Expresso and XPath Builder. They make designing regular expressions and XPath selectors almost fun! Did I say fun? I meant less painful. Being able to paste/load text and then interactively play with the search criteria is infinitely better than the code/compile/run/test cycle. It's faster and you get a much better feel for how the expressions work. So, I decided to make my own interactive tool to test jQuery selectors:  jQuery Selector Tester.   Here's a sneak peek: Note: There are some existing tools you may like better: http://www.woods.iki.fi/interactive-jquery-tester.html http://www.w3schools.com/jquery/trysel.asp?filename=trysel_basic&jqsel=p.intro,%23choose My tool is different: It is one page. You can save it and run it locally without a Web Server. It shows the results as a list of iterated objects instead of highlighted html. A cheat sheet is on the same page as the tester which is handy. I couldn't upload an .htm or .html file to this site so I hosted it on my personal site here: jQuery Selector Tester. Design Highlights: To make the interactive search work, I added a hidden div to the page: <!--Hidden div holds DOM elements for jQuery to search--><div id="HiddenDiv" style="display: none"></div> When ready to search, the searchable html text is copied into the hidden div…this renders the DOM tree in the hidden div: // get the html to search, insert it to the hidden divvar Html = $("#TextAreaHTML").val();$("#HiddenDiv").html(Html); When doing a search, I modify the search pattern to look only in the HiddenDiv. To do that, I put a space between the patterns.  The space is the Ancestor operator (see the Cheat Sheet): // modify search string to only search in our// hidden div and do the searchvar SearchString = "#HiddenDiv " + SearchPattern;try{    var $FoundItems = $(SearchString);}   Big Fat Stinking Faux Pas: I was about to publish this article when I made a big mistake: I tested the tool with Mozilla FireFox. It blowed up…it blowed up real good. In the past I’ve only had to target IE so this was quite a revelation. When I started to learn JavaScript, I was disgusted to see all the browser dependent code. Who wants to spend their time testing against different browsers and versions of browsers? Adding a bunch of ‘if-else’ code is a tedious and thankless task. I avoided client code as much as I could. Then jQuery came along and all was good. It was browser independent and freed us from the tedium of worrying about version N of the Acme browser. Right? Wrong! I had used outerHTML to display the selected elements. The problem is Mozilla FireFox doesn’t implement outerHTML. I replaced this: // encode the html markupvar OuterHtml = $('<div/>').text(this.outerHTML).html(); With this: // encode the html markupvar Html = $('<div>').append(this).html();var OuterHtml = $('<div/>').text(Html).html(); Another problem was that Mozilla FireFox doesn’t implement srcElement. I replaced this: var Row = e.srcElement.parentNode;  With this: var Row = e.target.parentNode; Another problem was the indexing. The browsers have different ways of indexing. I replaced this: // this cell has the search pattern  var Cell = Row.childNodes[1];   // put the pattern in the search box and search                    $("#TextSearchPattern").val(Cell.innerText);  With this: // get the correct cell and the text in the cell// place the text in the seach box and serachvar Cell = $(Row).find("TD:nth-child(2)");var CellText = Cell.text();$("#TextSearchPattern").val(CellText);   So much for the myth of browser independence. Was I overly optimistic and gullible? I don’t think so. And when I get my millions from the deposed Nigerian prince I sent money to, you’ll see that having faith is not futile. Notes: My goal was to have a single standalone file. I tried to keep the features and CSS to a minimum–adding only enough to make it useful and visually pleasing. When testing, I often thought there was a problem with the jQuery selector. Invariable it was invalid html code. If your results aren't what you expect, don't assume it's the jQuery selector pattern: The html may be invalid. To help in development and testing, I added a double-click handler to the rows in the Cheat Sheet table. If you double-click a row, the search pattern is put in the search box, a search is performed and the page is scrolled so you can see the results. I left the test html and code in the page. If you are using a CDN (non-local) version of the jQuery libraray, the designer in Visual Studio becomes extremely slow.  That's why there are two version of the library in the header and one is commented out. For reference, here is the jQuery documentation on selectors: http://api.jquery.com/category/selectors/ Here is a much more comprehensive list of CSS selectors (which jQuery uses): http://www.w3.org/TR/CSS2/selector.html I hope someone finds this useful. Steve WellensCodeProject

    Read the article

  • top tweets WebLogic Partner Community – March 2012

    - by JuergenKress
    Send us your tweets @wlscommunity #WebLogicCommunity and follow us on twitter http://twitter.com/wlscommunity PeterPaul ? RT @JDeveloper: EJB 3 Deployment guide for WebLogic Server Version: 10.3.4.0 dlvr.it/1J5VcV Andrejus Baranovskis ?Open ADF PopUp on Page Load fb.me/1Rx9LP3oW Sten Vesterli ? RT @OracleBlogs: Using the Oracle E-Business Suite SDK for Java on ADF Applications ow.ly/1hVKbB <- Neat! No more WS calls Java Buddy ?JavaFX 2.0: Example of MediaPlay java-buddy.blogspot.com/2012/03/javafx… Georges Saab Build improvements coming to #openJDK for #jdk8 mail.openjdk.java.net/pipermail/buil… NetBeans Team Share your #Java experience! JavaOne 2012 India call for papers: ow.ly/9xYg0 GlassFish ? GlassFish 3.1.2 Screencasts & Videos – bit.ly/zmQjn2 chriscmuir ?G+: New blog post: ADF Runtimes vs WLS versions as of JDeveloper 11.1.1.6.0 – bit.ly/y8tkgJ Michael Heinrichs New article: Creating a Sprite Animation with JavaFX blog.netopyr.com/2012/03/09/cre… Oracle WebLogic ? #WebLogic Devcast Webinar Series for March: Enterprise Java Scale Out, JPA, Distributed Grid Data Cache bit.ly/zeUXEV #Coherence Andrejus Baranovskis ?Extending Application Module for ADF BC Proxy User DB Connection fb.me/Bj1hLUqm OTNArchBeat ? Oracle Fusion Middleware on JDK 7 | Mark Nelson bit.ly/w7IroZ OTNArchBeat ? Java Champion Jonas Bonér Explains the Akka Platform bit.ly/x2GbXm Adam Bien ? (Java) FX Experience Tools–Feels Like Native Mac App: FX Experience Tools application comes with a native Mac O… bit.ly/waHF3H GlassFish ? GlassFish new recruit and Eclipse integration progress – bit.ly/y5eEkk JDeveloper & ADF Prototyping ADF Libraries dlvr.it/1Hhnw0 Eric Elzinga ?Oracle Fusion Middleware on JDK 7, bit.ly/xkphFQ ADF EMG ? Working with ADF in Arabic, Hebrew or other right-to-left-written language? Oracle UX asks for your help. groups.google.com/forum/?fromgro… Java ? A simple #JavaFX Login Form with a TRON like effect ow.ly/9n9AG JDeveloper & ADF ? Logging in Oracle ADF Applications dlvr.it/1HZhcX OTNArchBeat ? Oracle Cloud Conference: dates and locations worldwide bit.ly/ywXydR UK Oracle User Group ? Simon Haslam, ACE Director present on #WebLogic for DBAs at #oug_ire2012 j.mp/zG6vz3 @oraclewebcenter @oracleace #dublin Steven Davelaar ? Working with ADF and not a member of ADF EMG? You miss lots of valuable info, join now! sites.google.com/site/oracleemg… Simon Haslam @MaciejGruszka: Oracle plans to provide Forms & Reports plug-in for OVAB next year to help deployment. #ukoug MW SIG GlassFish ? Introducing JSR 357: Social Media API – bit.ly/yC8vez JAX London ? Are you coming to Java EE workshops by @AdamBien at JAX Days? Save £100 by registering today. #jaxdays #javaee jaxdays.com WebLogic Community ?Welcome to our Munich WebLogic 12c Bootcamp in Munich! If you also want to attend a training register for the Community oracle.com/partners/goto/… chriscmuir ? My first webcast for Oracle! (be kind) Basing ADF Business Component View Objects on More that one Entity Object bit.ly/ArKija OTNArchBeat ? Oracle Weblogic Server 12c is available on Oracle Solaris 11 (SPARC and x86) bit.ly/xE3TLg JDeveloper & ADF ? Basing ADF Business Component View Objects on More that one Entity Object – YouTube dlvr.it/1H93Qr OTNArchBeat ? Application-Driven Virtualization with Oracle Virtual Assembly Builder | Ronen Kofman bit.ly/wF1C1N Oracle WebLogic ? Steve Button’s blog: WebLogic Server Singleton Services ow.ly/1hOu4U Barbara Ann May ?@oracledevtools: New update: #NetBeans IDE 7.1.1, with support for #GlassFish 3.1.2 bit.ly/mOLcQd #java #developer OTNArchBeat ? Using Coherence with JDeveloper: bit.ly/AkoEQb WebLogic Community ? WebLogic Partner Community Newsletter February 2012 wp.me/p1LMIb-f3 GlassFish ? GlassFish 3.1.2 – new Podcast episode : bit.ly/wc6oBE Frank Nimphius ?Cool! Open JDeveloper 11.1.1.5, go help–>check for updates. First thing shown is that 11.1.1.6 is available. Never miss a new release Adam Bien ?5 Minutes (Video) With Java EE …Or With NetBeans + GlassFish: This screencast covers a 5-minute development of a… bit.ly/xkOJMf WebLogic Community ? Free Oracle WebLogic Certification Application Grid Implementation Specialist wp.me/p1LMIb-eT OTNArchBeat ?Oracle Coherence: First Steps Using Clusters and Basic API Usage | Ricardo Ferreira bit.ly/yYQ3Wz GlassFish ? JMS 2.0 Early Draft is here – bit.ly/ygT1VN OTNArchBeat ? Exalogic Networking Part 2 | The Old Toxophilist bit.ly/xuYMIi OTNArchBeat ?New Release: GlassFish Server 3.1.2. Read All About It! | Paul Davies bit.ly/AtlGxo Oracle WebLogic ?OTN Virtual Developer Day: #WebLogic 12c & #Coherence ost-conference on-demand page live with bonus #Virtualbox lab – bit.ly/xUy6BJ Oracle WebLogic ? Steve Button’s blog: WebLogic Server 11g (10.3.6) Documentation ow.ly/1hJgUB Lucas Jellema ? Just published an article on the AMIS blog: technology.amis.nl/2012/03/adf-11… ADF 11g – programmatically sorting rich table columns. Java Certification ? New Course! Learn how to create mobile applications using Java ME: bit.ly/xZj1Jh Simon Haslam ? @MaciejGruszka WebLogic 12c can run against 11g domain config without changes …and can rollback to 11. #ukoug MW SIG Justin Kestelyn ? Learn Advanced ADF, free and online bit.ly/wEKSRc WebLogic Partner Community For regular information become a member in the WebLogic Partner Community please visit: http://www.oracle.com/partners/goto/wls-emea ( OPN account required). If you need support with your account please contact the Oracle Partner Business Center. Blog Twitter LinkedIn Mix Forum Wiki Technorati Tags: twitter,WebLogic,WebLogic Community,OPN,Oracle,Jürgen Kress,WebLogic 12c

    Read the article

  • Installing Catalyst 11.6 for an ATI HD 6970

    - by David Oliver
    Ubuntu Maverick 10.10 is displaying the desktop okay (though limited to 1600x1200) after my having installed my new HD 6970 card, so I'm now trying to install the proprietary driver (I understand the open source one requires a more recent kernel than that in Maverick). The proprietary driver under 'Additional Drivers' resulted in a black screen on boot, so I deactivated and am trying to follow the manual install instructions at the cchtml Ubuntu Maverick Installation Guide. When I try to create the .deb packages with: sh ati-driver-installer-11-6-x86.x86_64.run --buildpkg Ubuntu/maverick I get: david@skipper:~/catalyst11.6$ sh ati-driver-installer-11-6-x86.x86_64.run --buildpkg Ubuntu/maverick Created directory fglrx-install.oLN3ux Verifying archive integrity... All good. Uncompressing ATI Catalyst(TM) Proprietary Driver-8.861......................... ===================================================================== ATI Technologies Catalyst(TM) Proprietary Driver Installer/Packager ===================================================================== Generating package: Ubuntu/maverick Package build failed! Package build utility output: ./packages/Ubuntu/ati-packager.sh: 396: debclean: not found dpkg-buildpackage: export CFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export CPPFLAGS from dpkg-buildflags (origin: vendor): dpkg-buildpackage: export CXXFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export FFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export LDFLAGS from dpkg-buildflags (origin: vendor): -Wl,-Bsymbolic-functions dpkg-buildpackage: source package fglrx-installer dpkg-buildpackage: source version 2:8.861-0ubuntu1 dpkg-buildpackage: source changed by ATI Technologies Inc. <http://ati.amd.com/support/driver.html> dpkg-source --before-build fglrx.64Vzxk dpkg-buildpackage: host architecture amd64 debian/rules build Can't exec "debian/rules": Permission denied at /usr/bin/dpkg-buildpackage line 507. dpkg-buildpackage: error: debian/rules build failed with unknown exit code -1 Cleaning in directory . /usr/bin/fakeroot: line 176: debian/rules: Permission denied debuild: fatal error at line 1319: couldn't exec fakeroot debian/rules: dpkg-buildpackage: export CFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export CPPFLAGS from dpkg-buildflags (origin: vendor): dpkg-buildpackage: export CXXFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export FFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export LDFLAGS from dpkg-buildflags (origin: vendor): -Wl,-Bsymbolic-functions dpkg-buildpackage: source package fglrx-installer dpkg-buildpackage: source version 2:8.861-0ubuntu1 dpkg-buildpackage: source changed by ATI Technologies Inc. <http://ati.amd.com/support/driver.html> dpkg-source --before-build fglrx.QEmIld dpkg-buildpackage: host architecture amd64 debian/rules build Can't exec "debian/rules": Permission denied at /usr/bin/dpkg-buildpackage line 507. dpkg-buildpackage: error: debian/rules build failed with unknown exit code -1 Cleaning in directory . Can't exec "debian/rules": Permission denied at /usr/bin/debuild line 1314. debuild: fatal error at line 1313: couldn't exec debian/rules: Permission denied dpkg-buildpackage: export CFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export CPPFLAGS from dpkg-buildflags (origin: vendor): dpkg-buildpackage: export CXXFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export FFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export LDFLAGS from dpkg-buildflags (origin: vendor): -Wl,-Bsymbolic-functions dpkg-buildpackage: source package fglrx-installer dpkg-buildpackage: source version 2:8.861-0ubuntu1 dpkg-buildpackage: source changed by ATI Technologies Inc. <http://ati.amd.com/support/driver.html> dpkg-source --before-build fglrx.xtY6vC dpkg-buildpackage: host architecture amd64 debian/rules build Can't exec "debian/rules": Permission denied at /usr/bin/dpkg-buildpackage line 507. dpkg-buildpackage: error: debian/rules build failed with unknown exit code -1 Cleaning in directory . /usr/bin/fakeroot: line 176: debian/rules: Permission denied debuild: fatal error at line 1319: couldn't exec fakeroot debian/rules: dpkg-buildpackage: export CFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export CPPFLAGS from dpkg-buildflags (origin: vendor): dpkg-buildpackage: export CXXFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export FFLAGS from dpkg-buildflags (origin: vendor): -g -O2 dpkg-buildpackage: export LDFLAGS from dpkg-buildflags (origin: vendor): -Wl,-Bsymbolic-functions dpkg-buildpackage: source package fglrx-installer dpkg-buildpackage: source version 2:8.861-0ubuntu1 dpkg-buildpackage: source changed by ATI Technologies Inc. <http://ati.amd.com/support/driver.html> dpkg-source --before-build fglrx.oYWICI dpkg-buildpackage: host architecture amd64 debian/rules build Can't exec "debian/rules": Permission denied at /usr/bin/dpkg-buildpackage line 507. dpkg-buildpackage: error: debian/rules build failed with unknown exit code -1 Removing temporary directory: fglrx-install.oLN3ux I've installed devscripts which has debclean in it. I've tried running the command with and without sudo. I'm not experienced with installing from downloads/source, but it seems like the file debian/source isn't being set to be executable when it needs to be. If I extract only, without using the package builder command, debian/rules is 744. As to what to do next, I'm stumped. Many thanks.

    Read the article

  • 7 reasons you had to be at JavaOne Latin America 2012

    - by Bruno.Borges
    Yesterday was 12/12/12, and everybody went crazy on Twitter with cool memes like this one. And maybe you are now wondering why I mentioned 7 (seven) on the blog title. Because I want to play numbers? Yes! Today is 7 days after JavaOne Latin America 2012 is over (... and I had to figure out an excuse for taking so long to blog about it...). So unless you were at JavaOne Latin America this year, here are 7 things you missed: OTN Lounge mini-theatreThere was a mini-theatre holding several lightning talks. We had people from SouJava JUG, GoJava JUG, Globalcode, and several other Java gurus and companies running demos, talks, and even more. For example, @drspockbr talked about the ScrumToys project, that demonstrates the power of JSF. Hands On Lab for JAX-RS and WebSocketsOne of the cool things to do during JavaOne is to come to these Hands On labs and really do something using new technologies with the help of experts. This one in particular, was covered by me, Arun Gupta, and Reza Rahman. The HOL had more people than laptops (and we had 48 laptops!) interested on understanding and learning about the new stuff that is coming within Java EE 7. Things like JAX-RS, Server-sent Events and WebSockets. Hey, if you want to try this HOL by yourself, it is available on Github, so go for it! If you have questions, just let me know! Java Community KeynoteThis keynote presented a lot of cool things like startups using Java in their projects, the Duke Awards, SouJava winning the JCP Outstanding Award, the Java Band, and even more! It was really a space where the Java community could present what they are doing and what they want to do. There's a lot of interest on the Adopt-a-JSR program and the Adopt-OpenJDK. There's also an Adopt-a-JavaEE-JSR program! Take a look if you want to participate and Make the Future Java. Java EE (JMS, JAX-RS) sessions from Reza Rahman, the HeavyMetal guyReza is a well know professional and Java EE enthusiast from the communitty who just joined Oracle this year. His sessions were very well attended, perhaps because of a high interest on the new things coming to Java EE 7 like JMS 2.0 and JAX-RS 2.0. If you want to look at what he did at this JavaOne edition, read his blog post. By the way, if you like Java and heavymetal, you should follow him on Twitter as well! :-) Java EE (WebSockets, HTML5) sessions from Arun Gupta, the GlassFish guyIf you don't know Arun Gupta, no worries. You will have time to know about him while you read his Java EE 6 Pocket Guide. Arun has been evangelizing Java EE for a long time, and is now spreading his word about the new upcoming version Java EE 7. He gave one talk about HTML5 Productivity on the Java EE 7 platform, and another one on building web apps with WebSockets. Pretty neat! Arun blogged about JavaOne Latin America as well. Read it here. Java Embedded and JavaFXIf there are two things that are really trending in the Java World right now besides Java EE 7, certainly they are JavaFX and Java Embedded. There were 14 talks covering Java Embedded, from Java Cards to Raspberry.pi, from Java ME to Java on your TV with Ginga-J. The Internet of Things is becoming true, and Java is the only platform today that can connect it all in an standardized and concise way. JavaFX gained a lot of attention too. There were 8 sessions covering what the platform has to offer in terms of Rich User Experience. The JavaFX Scene Builder is an awesome tool to start playing designing an UI, and coding for JavaFX is like coding Swing with 8 hands, one holding your coffee cup. You can achieve a lot, with your two hands (unless, you really have 8 hands, then you can achieve 4 times more :-). If you want to read more about JavaFX, go to Stephen Chin's blog post. GlassFish and Friends Party, 1st edition at JavaOne Lating AmericaThis is probably the thing that I'm most proud. We brought to Brasil the tradition of holding a happy hour for all GlassFish, Java EE friends. This party started almost 7 years ago in San Francisco, and it was about time to bring it to Brazil! The party happened on Tuesday night, right after JavaOne General Keynote, at the Tribeca Pub. We had about 80 attendees and met a lot of Java EE developers there! People from JUGs, Oracle, Locaweb and Red Hat showed up too, including some execs from Oracle that didn't resist and could not miss a party like this one.Lots of caipirinhas, beer and food to everyone, some cool music... even The Fish walking around the party with Juggy!You can see more photos from the party on an album I shared with the recently created GlassFish Brasil community on Google+ here (but you may be more interested in joining the GlassFish english community). There's also more pictures that Arun took and shared on this link. So now you may want to consider coming to Brazil next year! Java EE 7 is on its way, and Brazil is happily and patiently waiting for it, with a lot of enthusiasm. By the way, GlassFish and Java EE 6 just celebrated a Happy Birthday!

    Read the article

  • MySQL Connect Only 10 Days Away - Focus on InnoDB Sessions

    - by Bertrand Matthelié
    Time flies and MySQL Connect is only 10 days away! You can check out the full program here as well as in the September edition of the MySQL newsletter. Mat recently blogged about the MySQL Cluster sessions you’ll have the opportunity to attend, and below are those focused on InnoDB. Remember you can plan your schedule with Schedule Builder. Saturday, 1.00 pm, Room Golden Gate 3: 10 Things You Should Know About InnoDB—Calvin Sun, Oracle InnoDB is the default storage engine for Oracle’s MySQL as of MySQL Release 5.5. It provides the standard ACID-compliant transactions, row-level locking, multiversion concurrency control, and referential integrity. InnoDB also implements several innovative technologies to improve its performance and reliability. This presentation gives a brief history of InnoDB; its main features; and some recent enhancements for better performance, scalability, and availability. Saturday, 5.30 pm, Room Golden Gate 4: Demystified MySQL/InnoDB Performance Tuning—Dimitri Kravtchuk, Oracle This session covers performance tuning with MySQL and the InnoDB storage engine for MySQL and explains the main improvements made in MySQL Release 5.5 and Release 5.6. Which setting for which workload? Which value will be better for my system? How can I avoid potential bottlenecks from the beginning? Do I need a purge thread? Is it true that InnoDB doesn't need thread concurrency anymore? These and many other questions are asked by DBAs and developers. Things are changing quickly and constantly, and there is no “silver bullet.” But understanding the configuration setting’s impact is already a huge step in performance improvement. Bring your ideas and problems to share them with others—the discussion is open, just moderated by a speaker. Sunday, 10.15 am, Room Golden Gate 4: Better Availability with InnoDB Online Operations—Calvin Sun, Oracle Many top Web properties rely on Oracle’s MySQL as a critical piece of infrastructure for serving millions of users. Database availability has become increasingly important. One way to enhance availability is to give users full access to the database during data definition language (DDL) operations. The online DDL operations in recent MySQL releases offer users the flexibility to perform schema changes while having full access to the database—that is, with minimal delay of operations on a table and without rebuilding the entire table. These enhancements provide better responsiveness and availability in busy production environments. This session covers these improvements in the InnoDB storage engine for MySQL for online DDL operations such as add index, drop foreign key, and rename column. Sunday, 11.45 am, Room Golden Gate 7: Developing High-Throughput Services with NoSQL APIs to InnoDB and MySQL Cluster—Andrew Morgan and John Duncan, Oracle Ever-increasing performance demands of Web-based services have generated significant interest in providing NoSQL access methods to MySQL (MySQL Cluster and the InnoDB storage engine of MySQL), enabling users to maintain all the advantages of their existing relational databases while providing blazing-fast performance for simple queries. Get the best of both worlds: persistence; consistency; rich SQL queries; high availability; scalability; and simple, flexible APIs and schemas for agile development. This session describes the memcached connectors and examines some use cases for how MySQL and memcached fit together in application architectures. It does the same for the newest MySQL Cluster native connector, an easy-to-use, fully asynchronous connector for Node.js. Sunday, 1.15 pm, Room Golden Gate 4: InnoDB Performance Tuning—Inaam Rana, Oracle The InnoDB storage engine has always been highly efficient and includes many unique architectural elements to ensure high performance and scalability. In MySQL 5.5 and MySQL 5.6, InnoDB includes many new features that take better advantage of recent advances in operating systems and hardware platforms than previous releases did. This session describes unique InnoDB architectural elements for performance, new features, and how to tune InnoDB to achieve better performance. Sunday, 4.15 pm, Room Golden Gate 3: InnoDB Compression for OLTP—Nizameddin Ordulu, Facebook and Inaam Rana, Oracle Data compression is an important capability of the InnoDB storage engine for Oracle’s MySQL. Compressed tables reduce the size of the database on disk, resulting in fewer reads and writes and better throughput by reducing the I/O workload. Facebook pushes the limit of InnoDB compression and has made several enhancements to InnoDB, making this technology ready for online transaction processing (OLTP). In this session, you will learn the fundamentals of InnoDB compression. You will also learn the enhancements the Facebook team has made to improve InnoDB compression, such as reducing compression failures, not logging compressed page images, and allowing changes of compression level. Not registered yet? You can still save US$ 300 over the on-site fee – Register Now!

    Read the article

  • A Semantic Model For Html: TagBuilder and HtmlTags

    - by Ryan Ohs
    In this post I look into the code smell that is HTML literals and show how we can refactor these pesky strings into a friendlier and more maintainable model.   The Problem When I started writing MVC applications, I quickly realized that I built a lot of my HTML inside HtmlHelpers. As I did this, I ended up moving quite a bit of HTML into string literals inside my helper classes. As I wanted to add more attributes (such as classes) to my tags, I needed to keep adding overloads to my helpers. A good example of this end result is the default html helpers that come with the MVC framework. Too many overloads make me crazy! The problem with all these overloads is that they quickly muck up the API and nobody can remember exactly what order the parameters go in. I've seen many presenters (including members of the ASP.NET MVC team!) stumble before realizing that their view wasn't compiling because they needed one more null parameter in the call to Html.ActionLink(). What if instead of writing Html.ActionLink("Edit", "Edit", null, new { @class = "navigation" }) we could do Html.LinkToAction("Edit").Text("Edit").AddClass("navigation") ? Wouldn't that be much easier to remember and understand?  We can do this if we introduce a semantic model for building our HTML.   What is a Semantic Model? According to Martin Folwer, "a semantic model is an in-memory representation, usually an object model, of the same subject that the domain specific language describes." In our case, the model would be a set of classes that know how to render HTML. By using a semantic model we can free ourselves from dealing with strings and instead output the HTML (typically via ToString()) once we've added all the elements and attributes we desire to the model. There are two primary semantic models available in ASP.NET MVC: MVC 2.0's TagBuilder and FubuMVC's HtmlTags.   TagBuilder TagBuilder is the html builder that is available in ASP.NET MVC 2.0. I'm not a huge fan but it gets the job done -- for simple jobs.  Here's an overview of how to use TagBuilder. See my Tips section below for a few comments on that example. The disadvantage of TagBuilder is that unless you wrap it up with our own classes, you still have to write the actual tag name over and over in your code. eg. new TagBuilder("div") instead of new DivTag(). I also think it's method names are a little too long. Why not have AddClass() instead of AddCssClass() or Text() instead of SetInnerText()? What those methods are doing should be pretty obvious even in the short form. I also don't like that it wants to generate an id attribute from your input instead of letting you set it yourself using external conventions. (See GenerateId() and IdAttributeDotReplacement)). Obviously these come from Microsoft's default approach to MVC but may not be optimal for all programmers.   HtmlTags HtmlTags is in my opinion the much better option for generating html in ASP.NET MVC. It was actually written as a part of FubuMVC but is available as a stand alone library. HtmlTags provides a much cleaner syntax for writing HTML. There are classes for most of the major tags and it's trivial to create additional ones by inheriting from HtmlTag. There are also methods on each tag for the common attributes. For instance, FormTag has an Action() method. The SelectTag class allows you to set the default option or first option independent from adding other options. With TagBuilder there isn't even an abstraction for building selects! The project is open source and always improving. I'll hopefully find time to submit some of my own enhancements soon.   Tips 1) It's best not to have insanely overloaded html helpers. Use fluent builders. 2) In html helpers, return the TagBuilder/tag itself (not a string!) so that you can continue to add attributes outside the helper; see my first sample above. 3) Create a static entry point into your builders. I created a static Tags class that gives me access all the HtmlTag classes I need. This way I don't clutter my code with "new" keywords. eg. Tags.Div returns a new DivTag instance. 4) If you find yourself doing something a lot, create an extension method for it. I created a Nest() extension method that reads much more fluently than the AddChildren() method. It also accepts a params array of tags so I can very easily nest many children.   I hope you have found this post helpful. Join me in my war against HTML literals! I’ll have some more samples of how I use HtmlTags in future posts.

    Read the article

  • Blog Buzz - Devoxx 2011

    - by Janice J. Heiss
    Some day I will make it to Devoxx – for now, I’m content to vicariously follow the blogs of attendees and pick up on what’s happening.  I’ve been doing more blog "fishing," looking for the best commentary on 2011 Devoxx. There’s plenty of food for thought – and the ideas are not half-baked.The bloggers are out in full, offering useful summaries and commentary on Devoxx goings-on.Constantin Partac, a Java developer and a member of Transylvania JUG, a community from Cluj-Napoca/Romania, offers an excellent summary of the Devoxx keynotes. Here’s a sample:“Oracle Opening Keynote and JDK 7, 8, and 9 Presentation•    Oracle is committed to Java and wants to provide support for it on any device.•    JSE 7 for Mac will be released next week.•    Oracle would like Java developers to be involved in JCP, to adopt a JSR and to attend local JUG meetings.•    JEE 7 will be released next year.•    JEE 7 is focused on cloud integration, some of the features are already implemented in glassfish 4 development branch.•    JSE 8 will be release in summer of 2013 due to “enterprise community request” as they can not keep the pace with an 18    month release cycle.•    The main features included in JSE8 are lambda support, project Jigsaw, new Date/Time API, project Coin++ and adding   support for sensors. JSE 9 probably will focus on some of these features:1.    self tuning JVM2.    improved native language integration3.    processing enhancement for big data4.    reification (adding runtime class type info for generic types)5.    unification of primitive and corresponding object classes6.    meta-object protocol in order to use type and methods define in other JVM languages7.    multi-tenancy8.    JVM resource management” Thanks Constantin! Ivan St. Ivanov, of SAP Labs Bulgaria, also commented on the keynotes with a different focus.  He summarizes Henrik Stahl’s look ahead to Java SE 8 and JavaFX 3.0; Cameron Purdy on Java EE and the cloud; celebrated Java Champion Josh Bloch on what’s good and bad about Java; Mark Reinhold’s quick look ahead to Java SE 9; and Brian Goetz on lambdas and default methods in Java SE 8. Here’s St. Ivanov’s account of Josh Bloch’s comments on the pluses of Java:“He started with the virtues of the platform. To name a few:    Tightly specified language primitives and evaluation order – int is always 32 bits and operations are executed always from left  to right, without compilers messing around    Dynamic linking – when you change a class, you need to recompile and rebuild just the jar that has it and not the whole application    Syntax  similarity with C/C++ – most existing developers at that time felt like at home    Object orientations – it was cool at that time as well as functional programming is today    It was statically typed language – helps in faster runtime, better IDE support, etc.    No operator overloading – well, I’m not sure why it is good. Scala has it for example and that’s why it is far better for defining DSLs. But I will not argue with Josh.”It’s worth checking out St. Ivanov’s summary of Bloch’s views on what’s not so great about Java as well. What's Coming in JAX-RS 2.0Marek Potociar, Principal Software Engineer at Oracle and currently specification lead of Java EE RESTful web services API (JAX-RS), blogged on his talk about what's coming in JAX-RS 2.0, scheduled for final release in mid-2012.  Here’s a taste:“Perhaps the most wanted addition to the JAX-RS is the Client API, that would complete the JAX-RS story, that is currently server-side only. In JAX-RS 2.0 we are adding a completely interface-based and fluent client API that blends nicely in with the existing fluent response builder pattern on the server-side. When we started with the client API, the first proposal contained around 30 classes. Thanks to the feedback from our Expert Group we managed to reduce the number of API classes to 14 (2 of them being exceptions)! The resulting is compact while at the same time we still managed to create an API that reflects the method invocation context flow (e.g. once you decide on the target URI and start setting headers on the request, your IDE will not try to offer you a URI setter in the code completion). This is a subtle but very important usability aspect of an API…” Obviously, Devoxx is a great Java conference, one that is hitting this year at a time when much is brewing in the platform and beginning to be anticipated.

    Read the article

  • Problems with opening CHM Help files from Network or Internet

    - by Rick Strahl
    As a publisher of a Help Creation tool called Html Help Help Builder, I’ve seen a lot of problems with help files that won't properly display actual topic content and displays an error message for topics instead. Here’s the scenario: You go ahead and happily build your fancy, schmanzy Help File for your application and deploy it to your customer. Or alternately you've created a help file and you let your customers download them off the Internet directly or in a zip file. The customer downloads the file, opens the zip file and copies the help file contained in the zip file to disk. She then opens the help file and finds the following unfortunate result:     The help file  comes up with all topics in the tree on the left, but a Navigation to the WebPage was cancelled or Operation Aborted error in the Help Viewer's content window whenever you try to open a topic. The CHM file obviously opened since the topic list is there, but the Help Viewer refuses to display the content. Looks like a broken help file, right? But it's not - it's merely a Windows security 'feature' that tries to be overly helpful in protecting you. The reason this happens is because files downloaded off the Internet - including ZIP files and CHM files contained in those zip files - are marked as as coming from the Internet and so can potentially be malicious, so do not get browsing rights on the local machine – they can’t access local Web content, which is exactly what help topics are. If you look at the URL of a help topic you see something like this:   mk:@MSITStore:C:\wwapps\wwIPStuff\wwipstuff.chm::/indexpage.htm which points at a special Microsoft Url Moniker that in turn points the CHM file and a relative path within that HTML help file. Try pasting a URL like this into Internet Explorer and you'll see the help topic pop up in your browser (along with a warning most likely). Although the URL looks weird this still equates to a call to the local computer zone, the same as if you had navigated to a local file in IE which by default is not allowed.  Unfortunately, unlike Internet Explorer where you have the option of clicking a security toolbar, the CHM viewer simply refuses to load the page and you get an error page as shown above. How to Fix This - Unblock the Help File There's a workaround that lets you explicitly 'unblock' a CHM help file. To do this: Open Windows Explorer Find your CHM file Right click and select Properties Click the Unblock button on the General tab Here's what the dialog looks like:   Clicking the Unblock button basically, tells Windows that you approve this Help File and allows topics to be viewed.   Is this insecure? Not unless you're running a really old Version of Windows (XP pre-SP1). In recent versions of Windows Internet Explorer pops up various security dialogs or fires script errors when potentially malicious operations are accessed (like loading Active Controls), so it's relatively safe to run local content in the CHM viewer. Since most help files don't contain script or only load script that runs pure JavaScript access web resources this works fine without issues. How to avoid this Problem As an application developer there's a simple solution around this problem: Always install your Help Files with an Installer. The above security warning pop up because Windows can't validate the source of the CHM file. However, if the help file is installed as part of an installation the installation and all files associated with that installation including the help file are trusted. A fully installed Help File of an application works just fine because it is trusted by Windows. Summary It's annoying as all hell that this sort of obtrusive marking is necessary, but it's admittedly a necessary evil because of Microsoft's use of the insecure Internet Explorer engine that drives the CHM Html Engine's topic viewer. Because help files are viewing local content and script is allowed to execute in CHM files there's potential for malicious code hiding in CHM files and the above precautions are supposed to avoid any issues. © Rick Strahl, West Wind Technologies, 2005-2012 Tweet !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs"); (function() { var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = 'https://apis.google.com/js/plusone.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s); })();

    Read the article

  • How to build a Singleton-like dependency injector replacement (Php)

    - by Erparom
    I know out there are a lot of excelent containers, even frameworks almost entirely DI based with good strong IoC classes. However, this doesn't help me to "define" a new pattern. (This is Php code but understandable to anyone) Supose we have: //Declares the singleton class bookSingleton { private $author; private static $bookInstance; private static $isLoaned = FALSE; //The private constructor private function __constructor() { $this->author = "Onecrappy Writer Ofcheap Novels"; } //Sets the global isLoaned state and also gets self instance public static function loanBook() { if (self::$isLoaned === FALSE) { //Book already taken, so return false return FALSE; } else { //Ok, not loaned, lets instantiate (if needed and loan) if (!isset(self::$bookInstance)) { self::$bookInstance = new BookSingleton(); } self::$isLoaned = TRUE; } } //Return loaned state to false, so another book reader can take the book public function returnBook() { $self::$isLoaned = FALSE; } public function getAuthor() { return $this->author; } } Then we get the singelton consumtion class: //Consumes the Singleton class BookBorrower() { private $borrowedBook; private $haveBookState; public function __construct() { this->haveBookState = FALSE; } //Use the singelton-pattern behavior public function borrowBook() { $this->borrowedBook = BookSingleton::loanBook(); //Check if was successfully borrowed if (!this->borrowedBook) { $this->haveBookState = FALSE; } else { $this->haveBookState = TRUE; } } public function returnBook() { $this->borrowedBook->returnBook(); $this->haveBookState = FALSE; } public function getBook() { if ($this->haveBookState) { return "The book is loaned, the author is" . $this->borrowedbook->getAuthor(); } else { return "I don't have the book, perhaps someone else took it"; } } } At last, we got a client, to test the behavior function __autoload($class) { require_once $class . '.php'; } function write ($whatever,$breaks) { for($break = 0;$break<$breaks;$break++) { $whatever .= "\n"; } echo nl2br($whatever); } write("Begin Singleton test", 2); $borrowerJuan = new BookBorrower(); $borrowerPedro = new BookBorrower(); write("Juan asks for the book", 1); $borrowerJuan->borrowBook(); write("Book Borrowed? ", 1); write($borrowerJuan->getAuthorAndTitle(),2); write("Pedro asks for the book", 1); $borrowerPedro->borrowBook(); write("Book Borrowed? ", 1); write($borrowerPedro->getAuthorAndTitle(),2); write("Juan returns the book", 1); $borrowerJuan->returnBook(); write("Returned Book Juan? ", 1); write($borrowerJuan->getAuthorAndTitle(),2); write("Pedro asks again for the book", 1); $borrowerPedro->borrowBook(); write("Book Borrowed? ", 1); write($borrowerPedro->getAuthorAndTitle(),2); This will end up in the expected behavior: Begin Singleton test Juan asks for the book Book Borrowed? The book is loaned, the author is = Onecrappy Writer Ofcheap Novels Pedro asks for the book Book Borrowed? I don't have the book, perhaps someone else took it Juan returns the book Returned Book Juan? I don't have the book, perhaps someone else took it Pedro asks again for the book Book Borrowed? The book is loaned, the author is = Onecrappy Writer Ofcheap Novels So I want to make a pattern based on the DI technique able to do exactly the same, but without singleton pattern. As far as I'm aware, I KNOW I must inject the book inside "borrowBook" function instead of taking a static instance: public function borrowBook(BookNonSingleton $book) { if (isset($this->borrowedBook) || $book->isLoaned()) { $this->haveBook = FALSE; return FALSE; } else { $this->borrowedBook = $book; $this->haveBook = TRUE; return TRUE; } } And at the client, just handle the book: $borrowerJuan = new BookBorrower(); $borrowerJuan-borrowBook(new NonSingletonBook()); Etc... and so far so good, BUT... Im taking the responsability of "single instance" to the borrower, instead of keeping that responsability inside the NonSingletonBook, that since it has not anymore a private constructor, can be instantiated as many times... making instances on each call. So, What does my NonSingletonBook class MUST be in order to never allow borrowers to have this same book twice? (aka) keep the single instance. Because the dependency injector part of the code (borrower) does not solve me this AT ALL. Is it needed the container with an "asShared" method builder with static behavior? No way to encapsulate this functionallity into the Book itself? "Hey Im a book and I shouldn't be instantiated more than once, I'm unique"

    Read the article

  • Azure Mobile Services: available modules

    - by svdoever
    Azure Mobile Services has documented a set of objects available in your Azure Mobile Services server side scripts at their documentation page Mobile Services server script reference. Although the documented list is a nice list of objects for the common things you want to do, it will be sooner than later that you will look for more functionality to be included in your script, especially with the new provided feature that you can now create your custom API’s. If you use GIT it is now possible to add any NPM module (node package manager module, say the NuGet of the node world), but why include a module if it is already available out of the box. And you can only use GIT with Azure Mobile Services if you are an administrator on your Azure Mobile Service, not if you are a co-administrator (will be solved in the future). Until now I did some trial and error experimentation to test if a certain module was available. This is easiest to do as follows:   Create a custom API, for example named experiment. In this API use the following code: exports.get = function (request, response) { var module = "nonexistingmodule"; var m = require(module); response.send(200, "Module '%s' found.", module); }; You can now test your service with the following request in your browser: https://yourservice.azure-mobile.net/api/experiment If you get the result: {"code":500,"error":"Error: Internal Server Error"} you know that the module does not exist. In your logs you will find the following error: Error in script '/api/experiment.json'. Error: Cannot find module 'nonexistingmodule' [external code] atC:\DWASFiles\Sites\yourservice\VirtualDirectory0\site\wwwroot\App_Data\config\scripts\api\experiment.js:3:13[external code] If you require an existing (undocumented) module like the OAuth module in the following code, you will get success as a result: exports.get = function (request, response) { var module = "oauth"; var m = require(module); response.send(200, "Module '" + module + "' found."); }; If we look at the standard node.js documentation we see an extensive list of modules that can be used from your code. If we look at the list of files available in the Azure Mobile Services platform as documented in the blog post Azure Mobile Services: what files does it consist of? we see a folder node_modules with many more modules are used to build the Azure Mobile Services functionality on, but that can also be utilized from your server side node script code: apn - An interface to the Apple Push Notification service for Node.js. dpush - Send push notifications to Android devices using GCM. mpns - A Node.js interface to the Microsoft Push Notification Service (MPNS) for Windows Phone. wns - Send push notifications to Windows 8 devices using WNS. pusher - Node library for the Pusher server API (see also: http://pusher.com/) azure - Windows Azure Client Library for node. express - Sinatra inspired web development framework. oauth - Library for interacting with OAuth 1.0, 1.0A, 2 and Echo. Provides simplified client access and allows for construction of more complex apis and OAuth providers. request - Simplified HTTP request client. sax - An evented streaming XML parser in JavaScript sendgrid - A NodeJS implementation of the SendGrid Api. sqlserver – In node repository known as msnodesql - Microsoft Driver for Node.js for SQL Server. tripwire - Break out from scripts blocking node.js event loop. underscore - JavaScript's functional programming helper library. underscore.string - String manipulation extensions for Underscore.js javascript library. xml2js - Simple XML to JavaScript object converter. xmlbuilder - An XML builder for node.js. As stated before, many of these modules are used to provide the functionality of Azure Mobile Services platform, and in general should not be used directly. On the other hand, I needed OAuth badly to authenticate to the new v1.1 services of Twitter, and was very happy that a require('oauth') and a few lines of code did the job. Based on the above modules and a lot of code in the other javascript files in the Azure Mobile Services platform a set of global objects is provided that can be used from your server side node.js script code. In future blog posts I will go into more details with respect to how this code is built-up, all starting at the node.js express entry point app.js.

    Read the article

  • Access Services in SharePoint Server 2010

    - by Wayne
    Another SharePoint Server 2010 feature which cannot go unnoticed is the Access Services. Access Services is a service in SharePoint Server 2010 that allows administrators to view, edit, and configure a Microsoft access application within a Web Browser. Access Services settings support backup and recovery, regardless of whether there is a UI setting in Central Administration. However, backup and recovery only apply to service-level and administrative-level settings; end-user content from the Access application is not backed up as part of this process. Access Services has Windows PowerShell functionality that can be used to provide the service that uses settings from a previous backup; configure and manage macro and query setting; manage and configure session management; and configure all the global settings of the service. Key Benefits of SharePoint Server Access Services Easier Access to right tools: The enhanced, customizable Ribbon in Access 2010 makes it easy to uncover more commands so you can focus on the end product. The new Microsoft Office BackstageTM view is yet another feature that can help you easily analyze and document your database, share, publish, and customize your Access 2010 experience, all from one convenient location. Helps build database effortlessly and quickly: Out-of-the box templates and reusable components make Access Services the fastest, simplest database solution available. It helps find new pre-built templates which you can start using without customization or select templates created by your peers in the Access online community and customize them to meet your needs. It builds your databases with new modular components. New Application Parts enable you to add a set of common Access components, such as a table and form for task management, to your database in a few simple clicks. Database navigation is now simplified. It creates Navigation Forms and makes your frequently used forms and reports more accessible without writing any code or logic. Create Impactful forms and reports: Whether it's an inventory of your assets or customer sales database, Access 2010 brings the innovative tools you'd expect from Microsoft Office. Access Services easily spot trends and add emphasis to your data. It quickly create coordinating database forms and reports and bring the Web into your database. Obtain a centralized landing pad for your data: Access 2010 offers easy ways to bring your data together and help increase work quality. New technologies help break down barriers so you can share and work together on your databases, making you or your team more efficient and productive. Add automation and complex expressions: If you need a more robust database design, such as preventing record deletion if a specific condition is met or if you need to create calculations to forecast your budget, Access 2010 empowers you to be your own developer. The enhanced Expression Builder greatly simplifies your expression building experience with IntelliSense®. With the revamped Macro Designer, it's now even easier for you to add basic logic to your database. New Data Macros allow you to attach logic to your data, centralizing the logic on the table, not the objects that update your data. Key features of Access Services 2010 - Access database content through a Web browser: Newly added Access Services on Microsoft SharePoint Server 2010 enables you to make your databases available on the Web with new Web databases. Users without an Access client can open Web forms and reports via a browser and changes are automatically synchronized. - Simplify how you access the features you need: The Ribbon, improved in Access 2010, helps you access commands even more quickly by enabling you to customize or create your own tabs. The new Microsoft Office Backstage view replaces the traditional File menu to provide one central, organized location for all of your document management tasks. - Codeless navigation: Use professional looking web-like navigation forms to make frequently used forms and reports more accessible without writing any code or logic. - Easily reuse Access items in other databases: Use Application Parts to add pre-built Access components for common tasks to your database in a few simple clicks. You can also package common database components, such as data entry forms and reports for task management, and reuse them across your organization or other databases. - Simplified formatting: By using Office themes you can create coordinating professional forms and reports across your database. Simply select a familiar and great looking Office theme, or design your own, and apply it to your database. Newly created Access objects will automatically match your chosen theme.

    Read the article

  • Xobni Plus for Outlook [Review]

    - by The Geek
    Overview Xobni Plus is an addin that will bring a sidebar to Outlook which allows you to search through your inbox and contacts a lot easier. It provides the ability to search and keep track of your favorite social networks. Searching with Xobni is a lot more powerful than the default search feature in Outlook. It let’s you drill down your searches to conversations, email, links, and attachments. It now supports Outlook 2010 both 32 & 64-bit versions. Installation & Setup Installation is easy following the wizard. After completing the wizard you can tell you’re friends on Facebook and Twitter that you are now using it. You can also decide to join their Product Improvement Program if you want. After installation when you open Outlook, Xobni appears as a sidebar on the right side. Don’t worry about it always being in the way, as you can hide it if you need more room for other Outlook functions. After Xobni free is installed, you can upgrade to the Plus version at any time. A new window will open up and you can use your Credit Card, PayPal, or redeem a code if you have one. Features & Use Where to begin with the amount of features available in Xobni Plus? It really has an amazing amount of cool features. Of course you’ll have all of the features of the Free Version which we previously covered…and a lot more. After Xobni is installed you’ll notice a section for it on the Ribbon. From here you can search Xobni, show or hide the Sidebar, and change other options. It allows you to easily keep up with various social networks like Facebook, Twitter, and LinkedIn… Check out email analytics and contact ranks. Click on the Files Exchanged tab to search for specific attachments. Quickly search links exchanged with your contacts. Hover over a link to get a preview of what it entails. It gives you the ability to index all of your Yahoo mail as well, without the need for purchasing Yahoo Plus! Then your Yahoo messages appear in the Xobni sidebar. When you select a contact you can see related messages from you Yahoo account. Easily index all of your mail…including Yahoo mail for better organization and faster search results. There are several options you can select to change the way Xobni works. From setting up your Yahoo email, Indexing options, and much more. Additional Features of Xobni Plus Advanced Search Capabilities – Filter results, Boolean & Phrase Search, Ability to search Appointments & Tasks, Advanced Search Builder Search unlimited PST data files Xobni contacts in the compose screen Find links exchanged with your contacts View calendar appointments One year premium tech support No Ads! Performance We ran Xobni Plus on Outlook 2010 32-bit on a Dual-Core AMD Athlon system with 4GB of RAM and found it to run quite smoothly. However, we did notice it would sometimes slow down launching Outlook, especially if other apps are running at the same time. Product Support When you buy a license for Xobni Plus you get a full year of premium tech support. They provide a Questions and Answers page on their site where you can run a search query and answers appear instantly. You can contact support directly as a Plus member through their web form and they advise the turn around time is 2 business days. However, when we tested it, we received a response within 24 hours. They also provide FAQ, Community forum, and you can download the Owners Manual in PDF format from the support page. Conclusion Xobni Plus is a very powerful addin for Outlook and includes a lot more features that we didn’t cover in this review. You can download Xobni free edition which includes an 8 day free trial of the Plus version. This provides a good way to start getting familiar with it. Then upgrade to Xobni Plus at any time for $29.95. Once you get started, you’ll find the sidebar is nicely laid out and intuitive to use. If you live out of Outlook during the day, Xobni Plus is a great addition for fast and powerful searches. It provides an easy way to keep all of your contacts and messages well organized and easy to find. Xobni Plus works with XP, Vista, and Windows 7 (32 & 64-bit editions) Outlook 2003, 2007 and both 32 & 64-bit editions of Outlook 2010. Download Xobni Plus Download Xobni Free Edition Rating Installation: 8 Ease of Use: 8 Features: 9 Performance: 8 Product Support: 8 Similar Articles Productive Geek Tips Xobni Free Powers Up Outlook’s Search and ContactsCreate an Email Template in Outlook 2003Add Social Elements to Your Gmail Contacts with RapportiveChange Outlook Startup FolderClear Outlook Searches and MRU (Most Recently Used) Lists TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips Xobni Plus for Outlook All My Movies 5.9 CloudBerry Online Backup 1.5 for Windows Home Server Snagit 10 10 Superb Firefox Wallpapers OpenDNS Guide Google TV The iPod Revolution Ultimate Boot CD can help when disaster strikes Windows Firewall with Advanced Security – How To Guides

    Read the article

  • Webcor Builders Coordinates Construction Schedules and Mitigates Potential Delays More Efficiently with Integrated Project Management

    - by Sylvie MacKenzie, PMP
    Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:10.0pt; font-family:"Times New Roman","serif";} With more than 40 years of commercial construction experience, Webcor Builders is a leading builder of distinguished, high-profile projects, including high-rise condominiums and hotels, laboratories, healthcare centers, and public works projects. Webcor is also known for its award-winning concrete, interior construction, historic restoration, and seismic renovation work. The company has completed more than 50 million square feet of projects to date. Considering the variety and complexity of the construction projects Webcor undertakes, an integrated project management solution is critical to ensuring optimal efficiency and completing client projects on time and on budget. The company previously used a number of scheduling systems for its various building projects. These packages provided different levels of schedule detail and required schedulers, engineers, and other employees to learn multiple systems. From an IT cost and complexity perspective, the company had to manage multiple scheduling systems and pay for multiple sets of licenses. The company looked to standardize on an enterprise project management system, and selected Oracle’s Primavera P6 Enterprise Project Portfolio Management. Webcor uses the solution’s advanced capabilities to schedule complex projects, analyze delays, model and propose multiple scenarios to demonstrate and mitigate delays and cost overruns, and process that information efficiently to deliver the scheduling precision that public and private projects require. In fact, the solution was instrumental in helping the company’s expansion into public sector projects during the recent economic downturn, and with Primavera P6 in place, it can deliver the precise schedule reporting required for large public projects. With Primavera P6 in place, the company could deliver the precise scheduling and milestone reporting capabilities required for large public projects. The solution is in managing the high-profile University of California – Berkeley Memorial Stadium project. Webcor was hired as construction manager and general contractor for the stadium renovation project, which is a fast-paced project located near the seismically active Hayward Fault Zone. Due to the University of California’s football schedule, meeting the Universities deadline for the coming season placed Webcor in a situation where risk awareness and early warnings of issues would be paramount. Webcor and the extended project team needed a solution that could instantly analyze alternate scenarios to mitigate potential delays; Primavera would deliver those answers.The team would also need to enable multiple stakeholders to use an internet-based platform to access the schedule from various locations, and model complicated sequencing requirements where swift decisions would be made to keep the project on track. The schedule is an integral part of Webcor’s construction management process for the stadium project. Rather than providing the client with the industry-standard monthly update, Webcor updates the critical path method (CPM) schedule on a weekly basis. The project team also reviews the schedule and updates weekly to confirm that progress and forecasted performance are accurate. Hired by the University for their ability to deliver in high risk environments The Webcor team was hit recently with a design supplement that could have added up to 70 days to the project. Using Oracle Primavera P6 the team sprung into action analyzing multiple “what if” scenarios to review mitigation means and methods.  Determined to make sure the Bears could take the field in the coming season the project team nearly eliminated the impact with their creative analysis in working the schedule. The total time from the issuance of the final design supplement to an agreed mitigation response was less than one week; leveraging the Oracle Primavera solution Webcor was able to deliver superior customer value With the ability to efficiently manage projects and schedules, Webcor can ensure it completes its projects on time and on budget, as well as inform clients about what changes to plans will mean in terms of delays and additional costs. Read the complete customer case study at :  http://www.oracle.com/us/corporate/customers/customersearch/webcor-builders-1-primavera-ss-1639886.html

    Read the article

  • BIP 11g Dynamic SQL

    - by Tim Dexter
    Back in the 10g release, if you wanted something beyond the standard query for your report extract; you needed to break out your favorite text editor. You gotta love 'vi' and hate emacs, am I right? And get to building a data template, they were/are lovely to write, such fun ... not! Its not fun writing them by hand but, you do get to do some cool stuff around the data extract including dynamic SQL. By that I mean the ability to add content dynamically to your your query at runtime. With 11g, we spoiled you with a visual builder, no more vi or notepad sessions, a friendly drag and drop interface allowing you to build hierarchical data sets, calculated columns, summary columns, etc. You can still create the dynamic SQL statements, its not so well documented right now, in lieu of doc updates here's the skinny. If you check out the 10g process to create dynamic sql in the docs. You need to create a data trigger function where you assign the dynamic sql to a global variable that's matched in your report SQL. In 11g, the process is really the same, BI Publisher just provides a bit more help to define what trigger code needs to be called. You still need to create the function and place it inside a package in the db. Here's a simple plsql package with the 'beforedata' function trigger. Spec create or replace PACKAGE BIREPORTS AS whereCols varchar2(2000); FUNCTION beforeReportTrig return boolean; end BIREPORTS; Body create or replace PACKAGE BODY BIREPORTS AS   FUNCTION beforeReportTrig return boolean AS   BEGIN       whereCols := ' and d.department_id = 100';     RETURN true;   END beforeReportTrig; END BIREPORTS; you'll notice the additional where clause (whereCols - declared as a public variable) is hard coded. I'll cover parameterizing that in my next post. If you can not wait, check the 10g docs for an example. I have my package compiling successfully in the db. Now, onto the BIP data model definition. 1. Create a new data model and go ahead and create your query(s) as you would normally. 2. In the query dialog box, add in the variables you want replaced at runtime using an ampersand rather than a colon e.g. &whereCols.   select     d.DEPARTMENT_NAME, ...  from    "OE"."EMPLOYEES" e,     "OE"."DEPARTMENTS" d  where   d."DEPARTMENT_ID"= e."DEPARTMENT_ID" &whereCols   Note that 'whereCols' matches the global variable name in our package. When you click OK to clear the dialog, you'll be asked for a default value for the variable, just use ' and 1=1' That leading space is important to keep the SQL valid ie required whitespace. This value will be used for the where clause if case its not set by the function code. 3. Now click on the Event Triggers tree node and create a new trigger of the type Before Data. Type in the default package name, in my example, 'BIREPORTS'. Then hit the update button to get BIP to fetch the valid functions.In my case I get to see the following: Select the BEFOREREPORTTRIG function (or your name) and shuttle it across. 4. Save your data model and now test it. For now, you can update the where clause via the plsql package. Next time ... parametrizing the dynamic clause.

    Read the article

  • Unity.ResolutionFailedException - Resolution of the dependency failed

    - by Anibas
    I have the following code: public static IEngine CreateEngine() { UnityContainer container = Unity.LoadUnityContainer(DefaultStrategiesContainerName); IEnumerable<IStrategy> strategies = container.ResolveAll<IStrategy>(); ITraderProvider provider = container.Resolve<ITraderProvider>(); return new Engine(provider, new List<IStrategy>(strategies)); } and the config: <unity> <typeAliases> <typeAlias alias="singleton" type="Microsoft.Practices.Unity.ContainerControlledLifetimeManager, Microsoft.Practices.Unity" /> <typeAlias alias="weakRef" type="Microsoft.Practices.Unity.ExternallyControlledLifetimeManager, Microsoft.Practices.Unity" /> <typeAlias alias="Strategy" type="ADTrader.Core.Contracts.IStrategy, ADTrader.Core" /> <typeAlias alias="Trader" type="ADTrader.Core.Contracts.ITraderProvider, ADTrader.Core" /> </typeAliases> <containers> <container name="strategies"> <types> <type type="Strategy" mapTo="ADTrader.Strategies.ThreeTurningStrategy, ADTrader.Strategies" name="1" /> <type type="Trader" mapTo="ADTrader.MbTradingProvider.MBTradingProvider, ADTrader.MbTradingProvider" /> </types> </container> </containers></unity> I am getting the following exception: Microsoft.Practices.Unity.ResolutionFailedException: Resolution of the dependency failed, type = "ADTrader.Core.Contracts.ITraderProvider", name = "". Exception message is: The current build operation (build key Build Key[ADTrader.MbTradingProvider.MBTradingProvider, null]) failed: Attempted to read or write protected memory. This is often an indication that other memory is corrupt. (Strategy type BuildPlanStrategy, index 3) --- Microsoft.Practices.ObjectBuilder2.BuildFailedException: The current build operation (build key Build Key[ADTrader.MbTradingProvider.MBTradingProvider, null]) failed: Attempted to read or write protected memory. This is often an indication that other memory is corrupt. (Strategy type BuildPlanStrategy, index 3) --- System.AccessViolationException: Attempted to read or write protected memory. This is often an indication that other memory is corrupt. at MBTCOMLib.MbtComMgrClass.EnableSplash(Boolean bEnable) at ADTrader.MbTradingProvider.MBTradingProvider..ctor() at BuildUp_ADTrader.MbTradingProvider.MBTradingProvider(IBuilderContext ) at Microsoft.Practices.ObjectBuilder2.DynamicMethodBuildPlan.BuildUp(IBuilderContext context) at Microsoft.Practices.ObjectBuilder2.BuildPlanStrategy.PreBuildUp(IBuilderContext context) at Microsoft.Practices.ObjectBuilder2.StrategyChain.ExecuteBuildUp(IBuilderContext context) --- End of inner exception stack trace --- at Microsoft.Practices.ObjectBuilder2.StrategyChain.ExecuteBuildUp(IBuilderContext context) at Microsoft.Practices.ObjectBuilder2.Builder.BuildUp(IReadWriteLocator locator, ILifetimeContainer lifetime, IPolicyList policies, IStrategyChain strategies, Object buildKey, Object existing) at Microsoft.Practices.Unity.UnityContainer.DoBuildUp(Type t, Object existing, String name) --- End of inner exception stack trace --- at Microsoft.Practices.Unity.UnityContainer.DoBuildUp(Type t, Object existing, String name) at Microsoft.Practices.Unity.UnityContainer.Resolve(Type t, String name) at Microsoft.Practices.Unity.UnityContainerBase.ResolveT at ADTrader.Engine.EngineFactory.CreateEngine() Any idea?

    Read the article

  • Unable to set maxReceivedMessageSize through web.config

    - by Michael Mortensen
    Hello there, I have now investigated the 400 - BadRequest code for the last two hours. A lot of sugestions goes towards ensuring the bindingConfiguration attribute is set correctly, and in my case, it is. Now, I need YOUR help before destroying the building i am in :-) I run a WCF RestFull service (very lightweight, using this resource for inspiration: http://msdn.microsoft.com/en-us/magazine/dd315413.aspx) which (for now) accepts an XmlElement (POX) provided through the POST verb. I am currently ONLY using Fiddler's request builder before implementing a true client (as this is mixed environments). When I do this for XML smaller than 65K, it works fine - larger, it throws this exception: The maximum message size quota for incoming messages (65536) has been exceeded. To increase the quota, use the MaxReceivedMessageSize property on the appropriate binding element. Here is my web.config file (which I even included the client-tag for (desperate times!)): <system.web> <httpRuntime maxRequestLength="1500000" executionTimeout="180"/> </system.web> <system.serviceModel> <diagnostics> <messageLogging logEntireMessage="true" logMalformedMessages="true" logMessagesAtServiceLevel="true" logMessagesAtTransportLevel="true" /> </diagnostics> <bindings> <webHttpBinding> <binding name="WebHttpBinding" maxReceivedMessageSize="1500000" maxBufferPoolSize="1500000" maxBufferSize="1500000" closeTimeout="00:03:00" openTimeout="00:03:00" receiveTimeout="00:10:00" sendTimeout="00:03:00"> <readerQuotas maxStringContentLength="1500000" maxArrayLength="1500000" maxBytesPerRead="1500000" /> <security mode="None"/> </binding> </webHttpBinding> </bindings> <client> <endpoint address="" binding="webHttpBinding" bindingConfiguration="WebHttpBinding" contract="Commerce.ICatalogue"/> </client> <services> <service behaviorConfiguration="ServiceBehavior" name="Catalogue"> <endpoint address="" behaviorConfiguration="RestFull" binding="webHttpBinding" bindingConfiguration="WebHttpBinding" contract="Commerce.ICatalogue" /> <!-- endpoint address="mex" binding="mexHttpBinding" contract="IMetadataExchange" / --> </service> </services> <behaviors> <endpointBehaviors> <behavior name="RestFull"> <webHttp/> </behavior> </endpointBehaviors> <serviceBehaviors> <behavior name="ServiceBehavior"> <serviceDebug httpHelpPageEnabled="true" includeExceptionDetailInFaults="true"/> <serviceMetadata httpGetEnabled="true"/> </behavior> </serviceBehaviors> </behaviors> </system.serviceModel> Thanks in advance for any help leading to succesfull call with 65K XML ;-)

    Read the article

  • how do I register a custom conversion Service in spring 3 / webflow 2?

    - by nont
    I've been trying to follow this example and using the reference to guide me, but I'm having no luck. I've defined a converter: import org.springframework.binding.convert.converters.StringToObject; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.text.ParseException; import java.util.Date; public class StringToDateTwoWayConverter extends StringToObject { private DateFormat format = null; public StringToDateTwoWayConverter () { super(StringToDateTwoWayConverter.class); format = new SimpleDateFormat("MM/dd/yyyy"); } @Override protected Object toObject(String string, Class targetClass) throws Exception { Date date = null; try { date = format.parse(string); } catch (ParseException e) { e.printStackTrace(); return null; } return date; } @Override protected String toString(Object object) throws Exception { Date date = (Date) object; return format.format(date); } } and a conversionService: import org.springframework.binding.convert.service.DefaultConversionService; import org.springframework.stereotype.Component; @Component("conversionService") public class ApplicationConversionService extends DefaultConversionService { @Override protected void addDefaultConverters() { super.addDefaultConverters(); this.addConverter(new StringToDateTwoWayConverter()); this.addConverter("shortDate", new StringToDateTwoWayConverter()); } } and configured it: <webflow:flow-builder-services id="flowBuilderServices" conversion-service="conversionService" .../> However, upon startup, I'm greeted with this exception: Caused by: org.springframework.beans.factory.UnsatisfiedDependencyException: Error creating bean with name '(inner bean)': Unsatisfied dependency expressed through constructor argument with index 0 of type [org.springframework.core.convert.ConversionService]: Could not convert constructor argument value of type [com.yadayada.converter.ApplicationConversionService] to required type [org.springframework.core.convert.ConversionService]: Failed to convert value of type 'com.yadayada.converter.ApplicationConversionService' to required type 'org.springframework.core.convert.ConversionService'; nested exception is java.lang.IllegalStateException: Cannot convert value of type [com.yadayada.converter.ApplicationConversionService] to required type [org.springframework.core.convert.ConversionService]: no matching editors or conversion strategy found at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:687) at org.springframework.beans.factory.support.ConstructorResolver.autowireConstructor(ConstructorResolver.java:195) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireConstructor(AbstractAutowireCapableBeanFactory.java:993) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:897) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:485) at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:456) at org.springframework.beans.factory.support.BeanDefinitionValueResolver.resolveInnerBean(BeanDefinitionValueResolver.java:270) ... 60 more I'm thoroughly puzzled why its not working. The conversion service implements ConversionService through its base class, so I don't see the problem. Any insight much appreciated!

    Read the article

  • Snow Leopard sqlite3-ruby install problem

    - by JZ
    UPDATE 3/20/10 I'm running Mac OSX Snow Leopard, this problem is caused by a recent train wreck in which I updated ruby without RVM. I've attempted to properly install/run RVM, however I can't get it to work. I am unable to install the sqlite3-ruby gem. I get the following ERROR: Error installing sqlite3-ruby: ERROR: Failed to build gem native extension. How do I fix this? justin-zollarss-mac-pro:~ justinz$ rails -v Rails 2.3.5 justin-zollarss-mac-pro:~ justinz$ ruby -v ruby 1.8.7 (2008-08-11 patchlevel 72) [i686-darwin10.2.0] justin-zollarss-mac-pro:~ justinz$ gem -v 1.3.5 justin-zollarss-mac-pro:~ justinz$ which gem /usr/local/bin/gem justin-zollarss-mac-pro:~ justinz$ whereis gem /usr/bin/gem justin-zollarss-mac-pro:~ justinz$ which ruby /usr/local/bin/ruby justin-zollarss-mac-pro:~ justinz$ whereis ruby /usr/bin/ruby justin-zollarss-mac-pro:~ justinz$ which rails /usr/local/bin/rails justin-zollarss-mac-pro:~ justinz$ whereis rails /usr/bin/rails justin-zollarss-mac-pro:~ justinz$ gem list *** LOCAL GEMS *** actionmailer (2.3.5) actionpack (2.3.5) activerecord (2.3.5) activeresource (2.3.5) activesupport (2.3.5) builder (2.1.2) bundler (0.9.11) columnize (0.3.1) erubis (2.6.5) fastercsv (1.5.1) ffi (0.6.3) gbarcode (0.98.16) i18n (0.3.5) linecache (0.43) mail (2.1.3) memcache-client (1.8.0) prawn (0.8.4) prawn-core (0.8.4) prawn-layout (0.8.4) prawn-security (0.8.4) rack (1.1.0, 1.0.1) rack-mount (0.6.1) rack-test (0.5.3) rails (2.3.5) rake (0.8.7) ruby-debug (0.10.3) ruby-debug-base (0.10.3) rubygems-update (1.3.6) sqlite3 (0.0.8) text-format (1.0.0) thor (0.13.4) tzinfo (0.3.17) justin-zollarss-mac-pro:~ justinz$ sudo gem install sqlite3-ruby Password: Building native extensions. This could take a while... ERROR: Error installing sqlite3-ruby: ERROR: Failed to build gem native extension. /usr/local/bin/ruby extconf.rb checking for fdatasync() in -lrt... no checking for sqlite3.h... yes checking for sqlite3_open() in -lsqlite3... no *** extconf.rb failed *** Could not create Makefile due to some reason, probably lack of necessary libraries and/or headers. Check the mkmf.log file for more details. You may need configuration options. Provided configuration options: --with-opt-dir --without-opt-dir --with-opt-include --without-opt-include=${opt-dir}/include --with-opt-lib --without-opt-lib=${opt-dir}/lib --with-make-prog --without-make-prog --srcdir=. --curdir --ruby=/usr/local/bin/ruby --with-sqlite3-dir --without-sqlite3-dir --with-sqlite3-include --without-sqlite3-include=${sqlite3-dir}/include --with-sqlite3-lib --without-sqlite3-lib=${sqlite3-dir}/lib --with-rtlib --without-rtlib --with-sqlite3lib --without-sqlite3lib Gem files will remain installed in /usr/local/lib/ruby/gems/1.8/gems/sqlite3-ruby-1.2.5 for inspection. Results logged to /usr/local/lib/ruby/gems/1.8/gems/sqlite3-ruby-1.2.5/ext/sqlite3_api/gem_make.out

    Read the article

  • sqlite3-ruby can't make on rvm 1.8.7

    - by Josh Crews
    Upgrading to Rails 3 by starting with RVM 1.8.7. OSX 10.5.8 Output: josh-crewss-macbook:~ joshcrews$ gem install sqlite3-rubyBuilding native extensions. This could take a while...ERROR: Error installing sqlite3-ruby: ERROR: Failed to build gem native extension. /Users/joshcrews/.rvm/rubies/ruby-1.8.7-p174/bin/ruby extconf.rb checking for sqlite3.h... yes checking for sqlite3_libversion_number() in -lsqlite3... yes checking for rb_proc_arity()... no checking for sqlite3_column_database_name()... no checking for sqlite3_enable_load_extension()... no checking for sqlite3_load_extension()... no creating Makefile make gcc -I. -I. -I/Users/joshcrews/.rvm/rubies/ruby-1.8.7-p174/lib/ruby/1.8/i686-darwin9.8.0 -I. -I/usr/local/include -I/opt/local/include -I/usr/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -fno-common -g -O2 -fno-common -pipe -fno-common -O3 -Wall -Wcast-qual -Wwrite-strings -Wconversion -Wmissing-noreturn -Winline -c database.c database.c: In function ‘deallocate’: database.c:17: warning: implicit declaration of function ‘sqlite3_next_stmt’ database.c:17: warning: assignment makes pointer from integer without a cast database.c: In function ‘initialize’: database.c:76: warning: implicit declaration of function ‘sqlite3_open_v2’ database.c:79: error: ‘SQLITE_OPEN_READWRITE’ undeclared (first use in this function) database.c:79: error: (Each undeclared identifier is reported only once database.c:79: error: for each function it appears in.) database.c:79: error: ‘SQLITE_OPEN_CREATE’ undeclared (first use in this function) database.c: In function ‘set_sqlite3_func_result’: database.c:277: error: ‘sqlite3_int64’ undeclared (first use in this function) database.c: In function ‘rb_sqlite3_func’: database.c:311: warning: passing argument 1 of ‘ruby_xcalloc’ as signed due to prototype database.c: In function ‘rb_sqlite3_step’: database.c:378: warning: passing argument 1 of ‘ruby_xcalloc’ as signed due to prototype make: *** [database.o] Error 1 Gem list (these are under RVM, under system I've got lot more gems included the sqlite3-ruby that's worked for 1.5 years) josh-crewss-macbook:~ joshcrews$ gem list *** LOCAL GEMS *** abstract (1.0.0) actionmailer (3.0.0.beta3) actionpack (3.0.0.beta3) activemodel (3.0.0.beta3) activerecord (3.0.0.beta3) activeresource (3.0.0.beta3) activesupport (3.0.0.beta3, 2.3.8) arel (0.3.3) builder (2.1.2) bundler (0.9.25) capybara (0.3.8) configuration (1.1.0) cucumber (0.7.2) cucumber-rails (0.3.1) culerity (0.2.10) database_cleaner (0.5.2) diff-lcs (1.1.2) erubis (2.6.5) ffi (0.6.3) gherkin (1.0.30) i18n (0.4.0, 0.3.7) json_pure (1.4.3) launchy (0.3.5) mail (2.2.1) memcache-client (1.8.3) mime-types (1.16) nokogiri (1.4.2) polyglot (0.3.1) rack (1.1.0) rack-mount (0.6.3) rack-test (0.5.4) rails (3.0.0.beta3) railties (3.0.0.beta3) rake (0.8.7) rdoc (2.5.8) rspec (2.0.0.beta.10, 2.0.0.beta.8) rspec-core (2.0.0.beta.10, 2.0.0.beta.8) rspec-expectations (2.0.0.beta.10, 2.0.0.beta.8) rspec-mocks (2.0.0.beta.10, 2.0.0.beta.8) rspec-rails (2.0.0.beta.10, 2.0.0.beta.8) rubygems-update (1.3.7) selenium-webdriver (0.0.20) spork (0.8.3) term-ansicolor (1.0.5) text-format (1.0.0) text-hyphen (1.0.0) thor (0.13.6) treetop (1.4.8) trollop (1.16.2) tzinfo (0.3.22) webrat (0.7.1) Version of XCode: 3.1.1 My suspicion is it has to do with "-I/Users/joshcrews/.rvm/rubies/ruby-1.8.7-p174/lib/ruby/1.8/i686-darwin9.8.0", because i686-darwin9.8.0 doesnt exist in that file

    Read the article

  • Java :Interface for this code

    - by ibrahim
    Please i neeed help to make interface for this code: package com.ejada.alinma.edh.xsdtransform; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.StringWriter; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.Properties; import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.transform.Result; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; /*import org.apache.log4j.Logger;*/ import org.apache.log4j.PropertyConfigurator; import org.w3c.dom.Document; import org.w3c.dom.DocumentFragment; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import com.sun.org.apache.xml.internal.serialize.OutputFormat; import com.sun.org.apache.xml.internal.serialize.XMLSerializer; /** * An XSD Transformer that replaces the "name" attribute's value in T24 XSDs * with the "shortname" attribute's value * * @author ahusseiny * */ public class XSDTransformer { /** * constants representing the XSD tags and attributes' names used in the parse process */ public static final String TAG_SCHEMA = "xsd:schema"; public static final String TAG_TEXT = "#text"; public static final String TAG_COMPLEX_TYPE = "xsd:complexType"; public static final String TAG_SIMPLE_TYPE = "xsd:simpleType"; public static final String TAG_SEQUENCE = "xsd:sequence"; public static final String TAG_ATTRIBUTE = "xsd:attribute"; public static final String TAG_ELEMENT = "xsd:element"; public static final String TAG_ANNOTATION = "xsd:annotation"; public static final String TAG_APP_INFO = "xsd:appinfo"; public static final String TAG_HAS_PROPERTY = "xsd:hasProperty"; public static final String TAG_RESTRICTION = "xsd:restriction"; public static final String TAG_MAX_LENGTH = "xsd:maxLength"; public static final String ATTR_NAME = "name"; public static final String ATTR_VALUE = "value"; public static final String ATTR_TYPE = "type"; public static final String ATTR_MIXED = "mixed"; public static final String ATTR_USE = "use"; public static final String ATTR_REF = "ref"; public static final String ATTR_MAX_OCCURS = "maxOccurs"; /** * constants representing specific XSD attributes' values used in the parse process */ public static final String FIELD_TAG = "fieldtag"; public static final String FIELD_NUMBER = "fieldnumber"; public static final String FIELD_DATA_TYPE = "fielddatatype"; public static final String FIELD_FMT = "fieldfmt"; public static final String FIELD_LEN = "fieldlen"; public static final String FIELD_INPUT_LEN = "fieldinputlen"; public static final String FIELD_GROUP_NUMBER = "fieldgroupnumber"; public static final String FIELD_MV_GROUP_NUMBER = "fieldmvgroupnumber"; public static final String FIELD_SHORT_NAME = "fieldshortname"; public static final String FIELD_NAME = "fieldname"; public static final String FIELD_COLUMN_NAME = "fieldcolumnname"; public static final String FIELD_GROUP_NAME = "fieldgroupname"; public static final String FIELD_MV_GROUP_NAME = "fieldmvgroupname"; public static final String FIELD_JUSTIFICATION = "fieldjustification"; public static final String FIELD_TYPE = "fieldtype"; public static final String FIELD_SINGLE_OR_MULTI = "singleormulti"; public static final String DELIMITER_COLUMN_TYPE = "#"; public static final String COLUMN_FK_ROW = "FK_ROW"; public static final String COLUMN_XPK_ROW = "XPK_ROW"; public static final int SQL_VIEW_MULTI = 1; public static final int SQL_VIEW_SINGLE = 2; public static final String DATA_TYPE_XSD_NUMERIC = "numeric"; public static final String DATA_TYPE_XSD_DECIMAL = "decimal"; public static final String DATA_TYPE_XSD_STRING = "string"; public static final String DATA_TYPE_XSD_DATE = "date"; /** * application configuration properties */ public static final String PROP_LOG4J_CONFIG_FILE = "log4j_config"; public static final String PROP_MAIN_VIEW_NAME_SINGLE = "view_name_single"; public static final String PROP_MAIN_VIEW_NAME_MULTI = "view_name_multi"; public static final String PROP_MAIN_TABLE_NAME = "main_edh_table_name"; public static final String PROP_SUB_TABLE_PREFIX = "sub_table_prefix"; public static final String PROP_SOURCE_XSD_FULLNAME = "source_xsd_fullname"; public static final String PROP_RESULTS_PATH = "results_path"; public static final String PROP_NEW_XSD_FILENAME = "new_xsd_filename"; public static final String PROP_CSV_FILENAME = "csv_filename"; /** * static holders for application-level utilities */ private static Properties appProps; private static Logger appLogger; /** * */ private StringBuffer sqlViewColumnsSingle = null; private StringBuffer sqlViewSelectSingle = null; private StringBuffer columnsCSV = null; private ArrayList<String> singleValueTableColumns = null; private HashMap<String, String> multiValueTablesSQL = null; private HashMap<Object, HashMap<String, Object>> groupAttrs = null; public XSDTransformer(String appConfigPropsPath) { if (appProps == null) { appProps = new Properties(); } try { init(appConfigPropsPath); } catch (Exception e) { appLogger.error(e.getMessage()); } } /** * initialization */ private void init(String appConfigPropsPath) throws Exception { // init the properties object FileReader in = new FileReader(appConfigPropsPath); appProps.load(in); // init the logger if ((appProps.getProperty(XSDTransformer.PROP_LOG4J_CONFIG_FILE) != null) && (!appProps.getProperty(XSDTransformer.PROP_LOG4J_CONFIG_FILE).equals(""))) { PropertyConfigurator.configure(appProps.getProperty(XSDTransformer.PROP_LOG4J_CONFIG_FILE)); if (appLogger == null) { appLogger = Logger.getLogger(XSDTransformer.class.getName()); } appLogger.info("Application initialization successful."); } sqlViewColumnsSingle = new StringBuffer(); sqlViewSelectSingle = new StringBuffer(); columnsCSV = new StringBuffer(XSDTransformer.FIELD_TAG + "," + XSDTransformer.FIELD_NUMBER + "," + XSDTransformer.FIELD_DATA_TYPE + "," + XSDTransformer.FIELD_FMT + "," + XSDTransformer.FIELD_LEN + "," + XSDTransformer.FIELD_INPUT_LEN + "," + XSDTransformer.FIELD_GROUP_NUMBER + "," + XSDTransformer.FIELD_MV_GROUP_NUMBER + "," + XSDTransformer.FIELD_SHORT_NAME + "," + XSDTransformer.FIELD_NAME + "," + XSDTransformer.FIELD_COLUMN_NAME + "," + XSDTransformer.FIELD_GROUP_NAME + "," + XSDTransformer.FIELD_MV_GROUP_NAME + "," + XSDTransformer.FIELD_JUSTIFICATION + "," + XSDTransformer.FIELD_TYPE + "," + XSDTransformer.FIELD_SINGLE_OR_MULTI + System.getProperty("line.separator")); singleValueTableColumns = new ArrayList<String>(); singleValueTableColumns.add(XSDTransformer.COLUMN_XPK_ROW + XSDTransformer.DELIMITER_COLUMN_TYPE + XSDTransformer.DATA_TYPE_XSD_NUMERIC); multiValueTablesSQL = new HashMap<String, String>(); groupAttrs = new HashMap<Object, HashMap<String, Object>>(); } /** * initialize the <code>DocumentBuilder</code> and read the XSD file * * @param docPath * @return the <code>Document</code> object representing the read XSD file */ private Document retrieveDoc(String docPath) { Document xsdDoc = null; File file = new File(docPath); try { DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); xsdDoc = builder.parse(file); } catch (Exception e) { appLogger.error(e.getMessage()); } return xsdDoc; } /** * perform the iteration/modification on the document * iterate to the level which contains all the elements (Single-Value, and Groups) and start processing each * * @param xsdDoc * @return */ private Document transformDoc(Document xsdDoc) { ArrayList<Object> newElementsList = new ArrayList<Object>(); HashMap<String, Object> docAttrMap = new HashMap<String, Object>(); Element sequenceElement = null; Element schemaElement = null; // get document's root element NodeList nodes = xsdDoc.getChildNodes(); for (int i = 0; i < nodes.getLength(); i++) { if (XSDTransformer.TAG_SCHEMA.equals(nodes.item(i).getNodeName())) { schemaElement = (Element) nodes.item(i); break; } } // process the document (change single-value elements, collect list of new elements to be added) for (int i1 = 0; i1 < schemaElement.getChildNodes().getLength(); i1++) { Node childLevel1 = (Node) schemaElement.getChildNodes().item(i1); // <ComplexType> element if (childLevel1.getNodeName().equals(XSDTransformer.TAG_COMPLEX_TYPE)) { // first, get the main attributes and put it in the csv file for (int i6 = 0; i6 < childLevel1.getChildNodes().getLength(); i6++) { Node child6 = childLevel1.getChildNodes().item(i6); if (XSDTransformer.TAG_ATTRIBUTE.equals(child6.getNodeName())) { if (child6.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME) != null) { String attrName = child6.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME).getNodeValue(); if (((Element) child6).getElementsByTagName(XSDTransformer.TAG_SIMPLE_TYPE).getLength() != 0) { Node simpleTypeElement = ((Element) child6).getElementsByTagName(XSDTransformer.TAG_SIMPLE_TYPE) .item(0); if (((Element) simpleTypeElement).getElementsByTagName(XSDTransformer.TAG_RESTRICTION).getLength() != 0) { Node restrictionElement = ((Element) simpleTypeElement).getElementsByTagName( XSDTransformer.TAG_RESTRICTION).item(0); if (((Element) restrictionElement).getElementsByTagName(XSDTransformer.TAG_MAX_LENGTH).getLength() != 0) { Node maxLengthElement = ((Element) restrictionElement).getElementsByTagName( XSDTransformer.TAG_MAX_LENGTH).item(0); HashMap<String, String> elementProperties = new HashMap<String, String>(); elementProperties.put(XSDTransformer.FIELD_TAG, attrName); elementProperties.put(XSDTransformer.FIELD_NUMBER, "0"); elementProperties.put(XSDTransformer.FIELD_DATA_TYPE, XSDTransformer.DATA_TYPE_XSD_STRING); elementProperties.put(XSDTransformer.FIELD_FMT, ""); elementProperties.put(XSDTransformer.FIELD_NAME, attrName); elementProperties.put(XSDTransformer.FIELD_SHORT_NAME, attrName); elementProperties.put(XSDTransformer.FIELD_COLUMN_NAME, attrName); elementProperties.put(XSDTransformer.FIELD_SINGLE_OR_MULTI, "S"); elementProperties.put(XSDTransformer.FIELD_LEN, maxLengthElement.getAttributes().getNamedItem( XSDTransformer.ATTR_VALUE).getNodeValue()); elementProperties.put(XSDTransformer.FIELD_INPUT_LEN, maxLengthElement.getAttributes() .getNamedItem(XSDTransformer.ATTR_VALUE).getNodeValue()); constructElementRow(elementProperties); // add the attribute as a column in the single-value table singleValueTableColumns.add(attrName + XSDTransformer.DELIMITER_COLUMN_TYPE + XSDTransformer.DATA_TYPE_XSD_STRING + XSDTransformer.DELIMITER_COLUMN_TYPE + maxLengthElement.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE).getNodeValue()); // add the attribute as a column in the single-values view sqlViewColumnsSingle.append(System.getProperty("line.separator") + attrName + ", "); sqlViewSelectSingle.append(System.getProperty("line.separator") + attrName + ", "); appLogger.debug("added attribute: " + attrName); } } } } } } // now, loop on the elements and process them for (int i2 = 0; i2 < childLevel1.getChildNodes().getLength(); i2++) { Node childLevel2 = (Node) childLevel1.getChildNodes().item(i2); // <Sequence> element if (childLevel2.getNodeName().equals(XSDTransformer.TAG_SEQUENCE)) { sequenceElement = (Element) childLevel2; for (int i3 = 0; i3 < childLevel2.getChildNodes().getLength(); i3++) { Node childLevel3 = (Node) childLevel2.getChildNodes().item(i3); // <Element> element if (childLevel3.getNodeName().equals(XSDTransformer.TAG_ELEMENT)) { // check if single element or group if (isGroup(childLevel3)) { processGroup(childLevel3, true, null, docAttrMap, xsdDoc, newElementsList); // insert a new comment node with the contents of the group tag sequenceElement.insertBefore(xsdDoc.createComment(serialize(childLevel3)), childLevel3); // remove the group tag sequenceElement.removeChild(childLevel3); } else { processElement(childLevel3); } } } } } } } // add new elements // this step should be after finishing processing the whole document. when you add new elements to the document // while you are working on it, those new elements will be included in the processing. We don't need that! for (int i = 0; i < newElementsList.size(); i++) { sequenceElement.appendChild((Element) newElementsList.get(i)); } // write the new required attributes to the schema element Iterator<String> attrIter = docAttrMap.keySet().iterator(); while(attrIter.hasNext()) { Element attr = (Element) docAttrMap.get(attrIter.next()); Element newAttrElement = xsdDoc.createElement(XSDTransformer.TAG_ATTRIBUTE); appLogger.debug("appending attr. [" + attr.getAttribute(XSDTransformer.ATTR_NAME) + "]..."); newAttrElement.setAttribute(XSDTransformer.ATTR_NAME, attr.getAttribute(XSDTransformer.ATTR_NAME)); newAttrElement.setAttribute(XSDTransformer.ATTR_TYPE, attr.getAttribute(XSDTransformer.ATTR_TYPE)); schemaElement.appendChild(newAttrElement); } return xsdDoc; } /** * check if the <code>element</code> sent is single-value element or group * element. the comparison depends on the children of the element. if found one of type * <code>ComplexType</code> then it's a group element, and if of type * <code>SimpleType</code> then it's a single-value element * * @param element * @return <code>true</code> if the element is a group element, * <code>false</code> otherwise */ private boolean isGroup(Node element) { for (int i = 0; i < element.getChildNodes().getLength(); i++) { Node child = (Node) element.getChildNodes().item(i); if (child.getNodeName().equals(XSDTransformer.TAG_COMPLEX_TYPE)) { // found a ComplexType child (Group element) return true; } else if (child.getNodeName().equals(XSDTransformer.TAG_SIMPLE_TYPE)) { // found a SimpleType child (Single-Value element) return false; } } return false; /* String attrName = null; if (element.getAttributes() != null) { Node attribute = element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME); if (attribute != null) { attrName = attribute.getNodeValue(); } } if (attrName.startsWith("g")) { // group element return true; } else { // single element return false; } */ } /** * process a group element. recursively, process groups till no more group elements are found * * @param element * @param isFirstLevelGroup * @param attrMap * @param docAttrMap * @param xsdDoc * @param newElementsList */ private void processGroup(Node element, boolean isFirstLevelGroup, Node parentGroup, HashMap<String, Object> docAttrMap, Document xsdDoc, ArrayList<Object> newElementsList) { String elementName = null; HashMap<String, Object> groupAttrMap = new HashMap<String, Object>(); HashMap<String, Object> parentGroupAttrMap = new HashMap<String, Object>(); if (element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME) != null) { elementName = element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME).getNodeValue(); } appLogger.debug("processing group [" + elementName + "]..."); // get the attributes if a non-first-level-group // attributes are: groups's own attributes + parent group's attributes if (!isFirstLevelGroup) { // get the current element (group) attributes for (int i1 = 0; i1 < element.getChildNodes().getLength(); i1++) { if (XSDTransformer.TAG_COMPLEX_TYPE.equals(element.getChildNodes().item(i1).getNodeName())) { Node complexTypeNode = element.getChildNodes().item(i1); for (int i2 = 0; i2 < complexTypeNode.getChildNodes().getLength(); i2++) { if (XSDTransformer.TAG_ATTRIBUTE.equals(complexTypeNode.getChildNodes().item(i2).getNodeName())) { appLogger.debug("add group attr: " + ((Element) complexTypeNode.getChildNodes().item(i2)).getAttribute(XSDTransformer.ATTR_NAME)); groupAttrMap.put(((Element) complexTypeNode.getChildNodes().item(i2)).getAttribute(XSDTransformer.ATTR_NAME), complexTypeNode.getChildNodes().item(i2)); docAttrMap.put(((Element) complexTypeNode.getChildNodes().item(i2)).getAttribute(XSDTransformer.ATTR_NAME), complexTypeNode.getChildNodes().item(i2)); } } } } // now, get the parent's attributes parentGroupAttrMap = groupAttrs.get(parentGroup); if (parentGroupAttrMap != null) { Iterator<String> iter = parentGroupAttrMap.keySet().iterator(); while (iter.hasNext()) { String attrName = iter.next(); groupAttrMap.put(attrName, parentGroupAttrMap.get(attrName)); } } // put the attributes in the attributes map groupAttrs.put(element, groupAttrMap); } for (int i = 0; i < element.getChildNodes().getLength(); i++) { Node childLevel1 = (Node) element.getChildNodes().item(i); if (childLevel1.getNodeName().equals(XSDTransformer.TAG_COMPLEX_TYPE)) { for (int j = 0; j < childLevel1.getChildNodes().getLength(); j++) { Node childLevel2 = (Node) childLevel1.getChildNodes().item(j); if (childLevel2.getNodeName().equals(XSDTransformer.TAG_SEQUENCE)) { for (int k = 0; k < childLevel2.getChildNodes().getLength(); k++) { Node childLevel3 = (Node) childLevel2.getChildNodes().item(k); if (childLevel3.getNodeName().equals(XSDTransformer.TAG_ELEMENT)) { // check if single element or group if (isGroup(childLevel3)) { // another group element.. // unfortunately, a recursion is // needed here!!! :-( processGroup(childLevel3, false, element, docAttrMap, xsdDoc, newElementsList); } else { // reached a single-value element.. copy it under the // main sequence and apply the name-shorname // replacement processGroupElement(childLevel3, element, isFirstLevelGroup, xsdDoc, newElementsList); } } } } } } } appLogger.debug("finished processing group [" + elementName + "]."); } /** * process the sent <code>element</code> to extract/modify required * information: * 1. replace the <code>name</code> attribute with the <code>shortname</code>. * * @param element */ private void processElement(Node element) { String fieldShortName = null; String fieldColumnName = null; String fieldDataType = null; String fieldFormat = null; String fieldInputLength = null; String elementName = null; HashMap<String, String> elementProperties = new HashMap<String, String>(); if (element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME) != null) { elementName = element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME).getNodeValue(); } appLogger.debug("processing element [" + elementName + "]..."); for (int i = 0; i < element.getChildNodes().getLength(); i++) { Node childLevel1 = (Node) element.getChildNodes().item(i); if (childLevel1.getNodeName().equals(XSDTransformer.TAG_ANNOTATION)) { for (int j = 0; j < childLevel1.getChildNodes().getLength(); j++) { Node childLevel2 = (Node) childLevel1.getChildNodes().item(j); if (childLevel2.getNodeName().equals(XSDTransformer.TAG_APP_INFO)) { for (int k = 0; k < childLevel2.getChildNodes().getLength(); k++) { Node childLevel3 = (Node) childLevel2.getChildNodes().item(k); if (childLevel3.getNodeName().equals(XSDTransformer.TAG_HAS_PROPERTY)) { if (childLevel3.getAttributes() != null) { String attrName = null; Node attribute = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME); if (attribute != null) { attrName = attribute.getNodeValue(); elementProperties.put(attrName, childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue()); if (attrName.equals(XSDTransformer.FIELD_SHORT_NAME)) { fieldShortName = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_COLUMN_NAME)) { fieldColumnName = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_DATA_TYPE)) { fieldDataType = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_FMT)) { fieldFormat = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_INPUT_LEN)) { fieldInputLength = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } } } } } } } } } if (element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME) != null) { element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME).setNodeValue(fieldShortName); } sqlViewColumnsSingle.append(System.getProperty("line.separator") + fieldColumnName + ", "); sqlViewSelectSingle.append(System.getProperty("line.separator") + fieldShortName + ", "); elementProperties.put(XSDTransformer.FIELD_SINGLE_OR_MULTI, "S"); constructElementRow(elementProperties); singleValueTableColumns.add(fieldShortName + XSDTransformer.DELIMITER_COLUMN_TYPE + fieldDataType + fieldFormat + XSDTransformer.DELIMITER_COLUMN_TYPE + fieldInputLength); appLogger.debug("finished processing element [" + elementName + "]."); } /** * process the sent <code>element</code> to extract/modify required * information: * 1. copy the element under the main sequence * 2. replace the <code>name</code> attribute with the <code>shortname</code>. * 3. add the attributes of the parent groups (if non-first-level-group) * * @param element */ private void processGroupElement(Node element, Node parentGroup, boolean isFirstLevelGroup, Document xsdDoc, ArrayList<Object> newElementsList) { String fieldShortName = null; String fieldColumnName = null; String fieldDataType = null; String fieldFormat = null; String fieldInputLength = null; String elementName = null; Element newElement = null; HashMap<String, String> elementProperties = new HashMap<String, String>(); ArrayList<String> tableColumns = new ArrayList<String>(); HashMap<String, Object> groupAttrMap = null; if (element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME) != null) { elementName = element.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME).getNodeValue(); } appLogger.debug("processing element [" + elementName + "]..."); // 1. copy the element newElement = (Element) element.cloneNode(true); newElement.setAttribute(XSDTransformer.ATTR_MAX_OCCURS, "unbounded"); // 2. if non-first-level-group, replace the element's SimpleType tag with a ComplexType tag if (!isFirstLevelGroup) { if (((Element) newElement).getElementsByTagName(XSDTransformer.TAG_SIMPLE_TYPE).getLength() != 0) { // there should be only one tag of SimpleType Node simpleTypeNode = ((Element) newElement).getElementsByTagName(XSDTransformer.TAG_SIMPLE_TYPE).item(0); // create the new ComplexType element Element complexTypeNode = xsdDoc.createElement(XSDTransformer.TAG_COMPLEX_TYPE); complexTypeNode.setAttribute(XSDTransformer.ATTR_MIXED, "true"); // get the list of attributes for the parent group groupAttrMap = groupAttrs.get(parentGroup); Iterator<String> attrIter = groupAttrMap.keySet().iterator(); while(attrIter.hasNext()) { Element attr = (Element) groupAttrMap.get(attrIter.next()); Element newAttrElement = xsdDoc.createElement(XSDTransformer.TAG_ATTRIBUTE); appLogger.debug("adding attr. [" + attr.getAttribute(XSDTransformer.ATTR_NAME) + "]..."); newAttrElement.setAttribute(XSDTransformer.ATTR_REF, attr.getAttribute(XSDTransformer.ATTR_NAME)); newAttrElement.setAttribute(XSDTransformer.ATTR_USE, "optional"); complexTypeNode.appendChild(newAttrElement); } // replace the old SimpleType node with the new ComplexType node newElement.replaceChild(complexTypeNode, simpleTypeNode); } } // 3. replace the name with the shortname in the new element for (int i = 0; i < newElement.getChildNodes().getLength(); i++) { Node childLevel1 = (Node) newElement.getChildNodes().item(i); if (childLevel1.getNodeName().equals(XSDTransformer.TAG_ANNOTATION)) { for (int j = 0; j < childLevel1.getChildNodes().getLength(); j++) { Node childLevel2 = (Node) childLevel1.getChildNodes().item(j); if (childLevel2.getNodeName().equals(XSDTransformer.TAG_APP_INFO)) { for (int k = 0; k < childLevel2.getChildNodes().getLength(); k++) { Node childLevel3 = (Node) childLevel2.getChildNodes().item(k); if (childLevel3.getNodeName().equals(XSDTransformer.TAG_HAS_PROPERTY)) { if (childLevel3.getAttributes() != null) { String attrName = null; Node attribute = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_NAME); if (attribute != null) { attrName = attribute.getNodeValue(); elementProperties.put(attrName, childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue()); if (attrName.equals(XSDTransformer.FIELD_SHORT_NAME)) { fieldShortName = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_COLUMN_NAME)) { fieldColumnName = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_DATA_TYPE)) { fieldDataType = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE) .getNodeValue(); } else if (attrName.equals(XSDTransformer.FIELD_FMT)) { fieldFormat = childLevel3.getAttributes().getNamedItem(XSDTransformer.ATTR_VALUE)

    Read the article

< Previous Page | 83 84 85 86 87 88 89 90 91 92 93 94  | Next Page >