Search Results

Search found 67075 results on 2683 pages for 'data model'.

Page 120/2683 | < Previous Page | 116 117 118 119 120 121 122 123 124 125 126 127  | Next Page >

  • What's the best way to expose a Model object in a ViewModel?

    - by Angel
    In a WPF MVVM application, I exposed my model object into my viewModel by creating an instance of Model class (which cause dependency) into ViewModel. Instead of creating separate VM properties, I wrap the Model properties inside my ViewModel Property. My model is just an entity framework generated proxy class: public partial class TblProduct { public TblProduct() { this.TblPurchaseDetails = new HashSet<TblPurchaseDetail>(); this.TblPurchaseOrderDetails = new HashSet<TblPurchaseOrderDetail>(); this.TblSalesInvoiceDetails = new HashSet<TblSalesInvoiceDetail>(); this.TblSalesOrderDetails = new HashSet<TblSalesOrderDetail>(); } public int ProductId { get; set; } public string ProductCode { get; set; } public string ProductName { get; set; } public int CategoryId { get; set; } public string Color { get; set; } public Nullable<decimal> PurchaseRate { get; set; } public Nullable<decimal> SalesRate { get; set; } public string ImagePath { get; set; } public bool IsActive { get; set; } public virtual TblCompany TblCompany { get; set; } public virtual TblProductCategory TblProductCategory { get; set; } public virtual TblUser TblUser { get; set; } public virtual ICollection<TblPurchaseDetail> TblPurchaseDetails { get; set; } public virtual ICollection<TblPurchaseOrderDetail> TblPurchaseOrderDetails { get; set; } public virtual ICollection<TblSalesInvoiceDetail> TblSalesInvoiceDetails { get; set; } public virtual ICollection<TblSalesOrderDetail> TblSalesOrderDetails { get; set; } } Here is my ViewModel: public class ProductViewModel : WorkspaceViewModel { #region Constructor public ProductViewModel() { StartApp(); } #endregion //Constructor #region Properties private IProductDataService _dataService; public IProductDataService DataService { get { if (_dataService == null) { if (IsInDesignMode) { _dataService = new ProductDataServiceMock(); } else { _dataService = new ProductDataService(); } } return _dataService; } } //Get and set Model object private TblProduct _product; public TblProduct Product { get { return _product ?? (_product = new TblProduct()); } set { _product = value; } } #region Public Properties public int ProductId { get { return Product.ProductId; } set { if (Product.ProductId == value) { return; } Product.ProductId = value; RaisePropertyChanged("ProductId"); } } public string ProductName { get { return Product.ProductName; } set { if (Product.ProductName == value) { return; } Product.ProductName = value; RaisePropertyChanged(() => ProductName); } } private ObservableCollection<TblProduct> _productRecords; public ObservableCollection<TblProduct> ProductRecords { get { return _productRecords; } set { _productRecords = value; RaisePropertyChanged("ProductRecords"); } } //Selected Product private TblProduct _selectedProduct; public TblProduct SelectedProduct { get { return _selectedProduct; } set { _selectedProduct = value; if (_selectedProduct != null) { this.ProductId = _selectedProduct.ProductId; this.ProductCode = _selectedProduct.ProductCode; } RaisePropertyChanged("SelectedProduct"); } } #endregion //Public Properties #endregion // Properties #region Commands private ICommand _newCommand; public ICommand NewCommand { get { if (_newCommand == null) { _newCommand = new RelayCommand(() => ResetAll()); } return _newCommand; } } private ICommand _saveCommand; public ICommand SaveCommand { get { if (_saveCommand == null) { _saveCommand = new RelayCommand(() => Save()); } return _saveCommand; } } private ICommand _deleteCommand; public ICommand DeleteCommand { get { if (_deleteCommand == null) { _deleteCommand = new RelayCommand(() => Delete()); } return _deleteCommand; } } #endregion //Commands #region Methods private void StartApp() { LoadProductCollection(); } private void LoadProductCollection() { var q = DataService.GetAllProducts(); this.ProductRecords = new ObservableCollection<TblProduct>(q); } private void Save() { if (SelectedOperateMode == OperateModeEnum.OperateMode.New) { //Pass the Model object into Dataservice for save DataService.SaveProduct(this.Product); } else if (SelectedOperateMode == OperateModeEnum.OperateMode.Edit) { //Pass the Model object into Dataservice for Update DataService.UpdateProduct(this.Product); } ResetAll(); LoadProductCollection(); } #endregion //Methods } Here is my Service class: class ProductDataService:IProductDataService { /// <summary> /// Context object of Entity Framework model /// </summary> private MaizeEntities Context { get; set; } public ProductDataService() { Context = new MaizeEntities(); } public IEnumerable<TblProduct> GetAllProducts() { using(var context=new R_MaizeEntities()) { var q = from p in context.TblProducts where p.IsDel == false select p; return new ObservableCollection<TblProduct>(q); } } public void SaveProduct(TblProduct _product) { using(var context=new R_MaizeEntities()) { _product.LastModUserId = GlobalObjects.LoggedUserID; _product.LastModDttm = DateTime.Now; _product.CompanyId = GlobalObjects.CompanyID; context.TblProducts.Add(_product); context.SaveChanges(); } } public void UpdateProduct(TblProduct _product) { using (var context = new R_MaizeEntities()) { context.TblProducts.Attach(_product); context.Entry(_product).State = EntityState.Modified; _product.LastModUserId = GlobalObjects.LoggedUserID; _product.LastModDttm = DateTime.Now; _product.CompanyId = GlobalObjects.CompanyID; context.SaveChanges(); } } public void DeleteProduct(int _productId) { using (var context = new R_MaizeEntities()) { var product = (from c in context.TblProducts where c.ProductId == _productId select c).First(); product.LastModUserId = GlobalObjects.LoggedUserID; product.LastModDttm = DateTime.Now; product.IsDel = true; context.SaveChanges(); } } } I exposed my model object in my viewModel by creating an instance of it using new keyword, also I instantiated my DataService class in VM. I know this will cause a strong dependency. So: What's the best way to expose a Model object in a ViewModel? What's the best way to use DataService in VM?

    Read the article

  • Data Source Connection Pool Sizing

    - by Steve Felts
    Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:10.0pt; font-family:"Times New Roman","serif";} One of the most time-consuming procedures of a database application is establishing a connection. The connection pooling of the data source can be used to minimize this overhead.  That argues for using the data source instead of accessing the database driver directly. Configuring the size of the pool in the data source is somewhere between an art and science – this article will try to move it closer to science.  From the beginning, WLS data source has had an initial capacity and a maximum capacity configuration values.  When the system starts up and when it shrinks, initial capacity is used.  The pool can grow to maximum capacity.  Customers found that they might want to set the initial capacity to 0 (more on that later) but didn’t want the pool to shrink to 0.  In WLS 10.3.6, we added minimum capacity to specify the lower limit to which a pool will shrink.  If minimum capacity is not set, it defaults to the initial capacity for upward compatibility.   We also did some work on the shrinking in release 10.3.4 to reduce thrashing; the algorithm that used to shrink to the maximum of the currently used connections or the initial capacity (basically the unused connections were all released) was changed to shrink by half of the unused connections. The simple approach to sizing the pool is to set the initial/minimum capacity to the maximum capacity.  Doing this creates all connections at startup, avoiding creating connections on demand and the pool is stable.  However, there are a number of reasons not to take this simple approach. When WLS is booted, the deployment of the data source includes synchronously creating the connections.  The more connections that are configured in initial capacity, the longer the boot time for WLS (there have been several projects for parallel boot in WLS but none that are available).  Related to creating a lot of connections at boot time is the problem of logon storms (the database gets too much work at one time).   WLS has a solution for that by setting the login delay seconds on the pool but that also increases the boot time. There are a number of cases where it is desirable to set the initial capacity to 0.  By doing that, the overhead of creating connections is deferred out of the boot and the database doesn’t need to be available.  An application may not want WLS to automatically connect to the database until it is actually needed, such as for some code/warm failover configurations. There are a number of cases where minimum capacity should be less than maximum capacity.  Connections are generally expensive to keep around.  They cause state to be kept on both the client and the server, and the state on the backend may be heavy (for example, a process).  Depending on the vendor, connection usage may cost money.  If work load is not constant, then database connections can be freed up by shrinking the pool when connections are not in use.  When using Active GridLink, connections can be created as needed according to runtime load balancing (RLB) percentages instead of by connection load balancing (CLB) during data source deployment. Shrinking is an effective technique for clearing the pool when connections are not in use.  In addition to the obvious reason that there times where the workload is lighter,  there are some configurations where the database and/or firewall conspire to make long-unused or too-old connections no longer viable.  There are also some data source features where the connection has state and cannot be used again unless the state matches the request.  Examples of this are identity based pooling where the connection has a particular owner and XA affinity where the connection is associated with a particular RAC node.  At this point, WLS does not re-purpose (discard/replace) connections and shrinking is a way to get rid of the unused existing connection and get a new one with the correct state when needed. So far, the discussion has focused on the relationship of initial, minimum, and maximum capacity.  Computing the maximum size requires some knowledge about the application and the current number of simultaneously active users, web sessions, batch programs, or whatever access patterns are common.  The applications should be written to only reserve and close connections as needed but multiple statements, if needed, should be done in one reservation (don’t get/close more often than necessary).  This means that the size of the pool is likely to be significantly smaller then the number of users.   If possible, you can pick a size and see how it performs under simulated or real load.  There is a high-water mark statistic (ActiveConnectionsHighCount) that tracks the maximum connections concurrently used.  In general, you want the size to be big enough so that you never run out of connections but no bigger.   It will need to deal with spikes in usage, which is where shrinking after the spike is important.  Of course, the database capacity also has a big influence on the decision since it’s important not to overload the database machine.  Planning also needs to happen if you are running in a Multi-Data Source or Active GridLink configuration and expect that the remaining nodes will take over the connections when one of the nodes in the cluster goes down.  For XA affinity, additional headroom is also recommended.  In summary, setting initial and maximum capacity to be the same may be simple but there are many other factors that may be important in making the decision about sizing.

    Read the article

  • Data Source Security Part 4

    - by Steve Felts
    So far, I have covered Client Identity and Oracle Proxy Session features, with WLS or database credentials.  This article will cover one more feature, Identify-based pooling.  Then, there is one more topic to cover - how these options play with transactions.Identity-based Connection Pooling An identity based pool creates a heterogeneous pool of connections.  This allows applications to use a JDBC connection with a specific DBMS credential by pooling physical connections with different DBMS credentials.  The DBMS credential is based on either the WebLogic user mapped to a database user or the database user directly, based on the “use database credentials” setting as described earlier. Using this feature enabled with “use database credentials” enabled seems to be what is proposed in the JDBC standard, basically a heterogeneous pool with users specified by getConnection(user, password). The allocation of connections is more complex if Enable Identity Based Connection Pooling attribute is enabled on the data source.  When an application requests a database connection, the WebLogic Server instance selects an existing physical connection or creates a new physical connection with requested DBMS identity. The following section provides information on how heterogeneous connections are created:1. At connection pool initialization, the physical JDBC connections based on the configured or default “initial capacity” are created with the configured default DBMS credential of the data source.2. An application tries to get a connection from a data source.3a. If “use database credentials” is not enabled, the user specified in getConnection is mapped to a DBMS credential, as described earlier.  If the credential map doesn’t have a matching user, the default DBMS credential is used from the datasource descriptor.3b. If “use database credentials” is enabled, the user and password specified in getConnection are used directly.4. The connection pool is searched for a connection with a matching DBMS credential.5. If a match is found, the connection is reserved and returned to the application.6. If no match is found, a connection is created or reused based on the maximum capacity of the pool: - If the maximum capacity has not been reached, a new connection is created with the DBMS credential, reserved, and returned to the application.- If the pool has reached maximum capacity, based on the least recently used (LRU) algorithm, a physical connection is selected from the pool and destroyed. A new connection is created with the DBMS credential, reserved, and returned to the application. It should be clear that finding a matching connection is more expensive than a homogeneous pool.  Destroying a connection and getting a new one is very expensive.  If you can use a normal homogeneous pool or one of the light-weight options (client identity or an Oracle proxy connection), those should be used instead of identity based pooling. Regardless of how physical connections are created, each physical connection in the pool has its own DBMS credential information maintained by the pool. Once a physical connection is reserved by the pool, it does not change its DBMS credential even if the current thread changes its WebLogic user credential and continues to use the same connection. To configure this feature, select Enable Identity Based Connection Pooling.  See http://docs.oracle.com/cd/E24329_01/apirefs.1211/e24401/taskhelp/jdbc/jdbc_datasources/EnableIdentityBasedConnectionPooling.html  "Enable identity-based connection pooling for a JDBC data source" in Oracle WebLogic Server Administration Console Help. You must make the following changes to use Logging Last Resource (LLR) transaction optimization with Identity-based Pooling to get around the problem that multiple users will be accessing the associated transaction table.- You must configure a custom schema for LLR using a fully qualified LLR table name. All LLR connections will then use the named schema rather than the default schema when accessing the LLR transaction table.  - Use database specific administration tools to grant permission to access the named LLR table to all users that could access this table via a global transaction. By default, the LLR table is created during boot by the user configured for the connection in the data source. In most cases, the database will only allow access to this user and not allow access to mapped users. Connections within Transactions Now that we have covered the behavior of all of these various options, it’s time to discuss the exception to all of the rules.  When you get a connection within a transaction, it is associated with the transaction context on a particular WLS instance. When getting a connection with a data source configured with non-XA LLR or 1PC (using the JTS driver) with global transactions, the first connection obtained within the transaction is returned on subsequent connection requests regardless of the values of username/password specified and independent of the associated proxy user session, if any. The connection must be shared among all users of the connection when using LLR or 1PC. For XA data sources, the first connection obtained within the global transaction is returned on subsequent connection requests within the application server, regardless of the values of username/password specified and independent of the associated proxy user session, if any.  The connection must be shared among all users of the connection within a global transaction within the application server/JVM.

    Read the article

  • SQL SERVER – Disable Clustered Index and Data Insert

    - by pinaldave
    Earlier today I received following email. “Dear Pinal, [Removed unrelated content] We looked at your script and found out that in your script of disabling indexes, you have only included non-clustered index during the bulk insert and missed to disabled all the clustered index. Our DBA[name removed] has changed your script a bit and included all the clustered indexes. Since our application is not working. When DBA [name removed] tried to enable clustered indexes again he is facing error incorrect syntax error. We are in deep problem [word replaced] [Removed Identity of organization and few unrelated stuff ]“ I have replied to my client and helped them fixed the problem. What really came to my attention is the concept of disabling clustered index. Let us try to learn a lesson from this experience. In this case, there was no need to disable clustered index at all. I had done necessary work when I was called in to work on tuning project. I had removed unused indexes, created few optimal indexes and wrote a script to disable few selected high cost indexes when bulk insert (and similar) operations are performed. There was another script which rebuild all the indexes as well. The solution worked till they included clustered index in disabling the script. Clustered indexes are in fact original table (or heap) physically ordered (any more things – not scope of this article) according to one or more keys(columns). When clustered index is disabled data rows of the disabled clustered index cannot be accessed. This means there will be no insert possible. When non clustered indexes are disabled all the data related to physically deleted but the definition of the index is kept in the system. Due to the same reason even reorganization of the index is not possible till the clustered index (which was disabled) is rebuild. Now let us come to the second part of the question, regarding receiving the error when clustered index is ‘enabled’. This is very common question I receive on the blog. (The following statement is written keeping the syntax of T-SQL in mind) Clustered indexes can be disabled but can not be enabled, they have to rebuild. It is intuitive to think that something which we have ‘disabled’ can be ‘enabled’ but the syntax for the same is ‘rebuild’. This issue has been explained here: SQL SERVER – How to Enable Index – How to Disable Index – Incorrect syntax near ‘ENABLE’. Let us go over this example where inserting the data is not possible when clustered index is disabled. USE AdventureWorks GO -- Create Table CREATE TABLE [dbo].[TableName]( [ID] [int] NOT NULL, [FirstCol] [varchar](50) NULL, CONSTRAINT [PK_TableName] PRIMARY KEY CLUSTERED ([ID] ASC) ) GO -- Create Nonclustered Index CREATE UNIQUE NONCLUSTERED INDEX [IX_NonClustered_TableName] ON [dbo].[TableName] ([FirstCol] ASC) GO -- Populate Table INSERT INTO [dbo].[TableName] SELECT 1, 'First' UNION ALL SELECT 2, 'Second' UNION ALL SELECT 3, 'Third' GO -- Disable Nonclustered Index ALTER INDEX [IX_NonClustered_TableName] ON [dbo].[TableName] DISABLE GO -- Insert Data should work fine INSERT INTO [dbo].[TableName] SELECT 4, 'Fourth' UNION ALL SELECT 5, 'Fifth' GO -- Disable Clustered Index ALTER INDEX [PK_TableName] ON [dbo].[TableName] DISABLE GO -- Insert Data will fail INSERT INTO [dbo].[TableName] SELECT 6, 'Sixth' UNION ALL SELECT 7, 'Seventh' GO /* Error: Msg 8655, Level 16, State 1, Line 1 The query processor is unable to produce a plan because the index 'PK_TableName' on table or view 'TableName' is disabled. */ -- Reorganizing Index will also throw an error ALTER INDEX [PK_TableName] ON [dbo].[TableName] REORGANIZE GO /* Error: Msg 1973, Level 16, State 1, Line 1 Cannot perform the specified operation on disabled index 'PK_TableName' on table 'dbo.TableName'. */ -- Rebuliding should work fine ALTER INDEX [PK_TableName] ON [dbo].[TableName] REBUILD GO -- Insert Data should work fine INSERT INTO [dbo].[TableName] SELECT 6, 'Sixth' UNION ALL SELECT 7, 'Seventh' GO -- Clean Up DROP TABLE [dbo].[TableName] GO I hope this example is clear enough. There were few additional posts I had written years ago, I am listing them here. SQL SERVER – Enable and Disable Index Non Clustered Indexes Using T-SQL SQL SERVER – Enabling Clustered and Non-Clustered Indexes – Interesting Fact Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: Pinal Dave, SQL, SQL Authority, SQL Constraint and Keys, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • SQL – Step by Step Guide to Download and Install NuoDB – Getting Started with NuoDB

    - by Pinal Dave
    Let us take a look at the application you own at your business. If you pay attention to the underlying database for that application you will be amazed. Every successful business these days processes way more data than they used to process before. The number of transactions and the amount of data is growing at an exponential rate. Every single day there is way more data to process than before. Big data is no longer a concept; it is now turning into reality. If you look around there are so many different big data solutions and it can be a quite difficult task to figure out where to begin. Personally, I have been experimenting with a lot of different solutions which allow my database to scale immediately without much hassle while maintaining optimal database performance.  There are for sure some solutions out there, but for many I even have to learn their specific language and there is a lot of new exploration to do. Honestly, what I prefer is a product, which works with the language I know (SQL) and follows all the RDBMS concepts which I am familiar with (ACID etc.). NuoDB is one such solution.  It is an operational NewSQL database built on a patented emergent architecture with full support for SQL and ACID guarantees. In this blog post, I will explore how one can download and install NuoDB database. Step 1: Follow me and go to the NuoDB download page. Simply fill out the form, accept the online license agreement, and you will be taken directly to a page where you can select any platform you prefer to install NuoDB. In my example below, I select the Windows 64-bit platform as it is one of the most popular NuoDB platforms. (You can also run NuoDB on Amazon Web Services but I prefer to install it on my local machine for the purposes of this blog). Step 2: Once you have downloaded the NuoDB installer, double click on it to install it on the Windows platform. Here is the enlarged the icon of the installer. Step 3: Follow the wizard installation, as it is pretty straight forward and easy to do so. I have selected all the options to install as the overall installation is very simple and it does not take up much space. I have installed it on my C drive but you can select your preferred drive. It is quite possible that if you do not have 64 bit Java, it will throw following error. If you face following error, I suggest you to download 64-bit Java from here. Make sure that you download 64-bit Java from following link: http://java.com/en/download/manual.jsp If already have Java 64-bit installed, you can continue with the installation as described in following image. Otherwise, install Java and start from with Step 1. As in my case, I already have 64-bit Java installed – and you won’t believe me when I say that the entire installation of NuoDB only took me around 90 seconds. Click on Finish to end to exit the installation. Step 4: Once the installation is successful, NuoDB will automatically open the following two tabs – Console and DevCenter — in your preferred browser. On the Console tab you can explore various components of the NuoDB solution, e.g. QuickStart, Admin, Explorer, Storefront and Samples. We will see various components and their usage in future blog posts. If you follow these steps in this post, which I have followed to install NuoDB, you will agree that the installation of NuoDB is extremely smooth and it was indeed a pleasure to install a database product with such ease. If you have installed other database products in the past, you will absolutely agree with me. So download NuoDB and install it today, and in tomorrow’s blog post I will take the installation to the next level. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Big Data, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology Tagged: NuoDB

    Read the article

  • Recover Lost Form Data in Firefox

    - by Asian Angel
    Have you ever filled in a text area or form in a webpage and something happens before you can finish it? If you like the idea of recovering that lost data then you will want to have a look at the Lazarus: Form Recovery extension for Firefox. Lazarus: Form Recovery in Action For our first example we chose the comment text box area for one of the articles here at the website. As you can see we were not finished typing in the whole comment yet… Notice the “Lazarus Icon” in the lower right corner. Note: We simulated accidental tab closures for our two examples. After getting our webpage opened up again all of our text was gone. Right clicking within the text area showed two options available…”Recover Text & Recover Form”. Notice that our lost text was listed as a “sub menu”…this could be extremely useful in matching up the appropriate text to the correct webpage if you had multiple tabs open before something happened. Click on the correct text listing to insert it. So easy to finish writing our comment without having to start from zero again. In our second example we chose the sign-up form page for the website. As before we were not finished filling in the form… Getting the webpage opened back up showed the same problem as before…all the entered text was lost. This time we right clicked in the browser window area and there was that wonderful “Recover Form Command” waiting to be used. One click and… All of our lost form data was back and we were able to finish filling in the form. For those who may be interested you can disable Lazarus: Form Recovery on individual websites using the “Context Menu” for the “Status Bar Icon” Options There are three sections in the options and you should take a quick look through them to make any desired modifications in how Lazarus: Form Recovery functions. The first “Options Area” focuses on display/access for the extension. The second “Options Area” allows you to expand the type of data retained, enable removal of data within a given time frame, set up a password, disable search indexing, and enable form data retention while in “Private Browsing Mode”. The third “Options Area” focuses on the Lazarus database itself. Conclusion If you have ever lost text area or form data before then you know how much time could be lost in starting over. Lazarus: Form Recovery helps provide a nice backup solution to get you up and running once again with a minimum of effort. Links Download the Lazarus: Form Recovery extension (Mozilla Add-ons) Download the Lazarus: Form Recovery extension (Extension Homepage) Similar Articles Productive Geek Tips Quick Tip: Resize Any Textbox or Textarea in FirefoxWhy Doesn’t AutoComplete Always Work in Firefox?Pass Variables between Windows Forms Windows without ShowDialog()Using Secure Login in FirefoxAdd Search Forms to the Firefox Search Bar TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Looking for Good Windows Media Player 12 Plug-ins? Find Out the Celebrity You Resemble With FaceDouble Whoa ! Use Printflush to Solve Printing Problems Icelandic Volcano Webcams Open Multiple Links At One Go

    Read the article

  • Using the Script Component as a Conditional Split

    This is a quick walk through on how you can use the Script Component to perform Conditional Split like behaviour, splitting your data across multiple outputs. We will use C# code to decide what does flows to which output, rather than the expression syntax of the Conditional Split transformation. Start by setting up the source. For my example the source is a list of SQL objects from sys.objects, just a quick way to get some data: SELECT type, name FROM sys.objects type name S syssoftobjrefs F FK_Message_Page U Conference IT queue_messages_23007163 Shown above is a small sample of the data you could expect to see. Once you have setup your source, add the Script Component, selecting Transformation when prompted for the type, and connect it up to the source. Now open the component, but don’t dive into the script just yet. First we need to select some columns. Select the Input Columns page and then select the columns we want to uses as part of our filter logic. You don’t need to choose columns that you may want later, this is just the columns used in the script itself. Next we need to add our outputs. Select the Inputs and Outputs page.You get one by default, but we need to add some more, it wouldn’t be much of a split otherwise. For this example we’ll add just one more. Click the Add Output button, and you’ll see a new output is added. Now we need to set some properties, so make sure our new Output 1 is selected. In the properties grid change the SynchronousInputID property to be our input Input 0, and  change the ExclusionGroup property to 1. Now select Ouput 0 and change the ExclusionGroup property to 2. This value itself isn’t important, provided each output has a different value other than zero. By setting this property on both outputs it allows us to split the data down one or the other, making each exclusive. If we left it to 0, that output would get all the rows. It can be a useful feature allowing you to copy selected rows to one output whilst retraining the full set of data in the other. Now we can go back to the Script page and start writing some code. For the example we will do a very simple test, if the value of the type column is U, for user table, then it goes down the first output, otherwise it ends up in the other. This mimics the exclusive behaviour of the conditional split transformation. public override void Input0_ProcessInputRow(Input0Buffer Row) { // Filter all user tables to the first output, // the remaining objects down the other if (Row.type.Trim() == "U") { Row.DirectRowToOutput0(); } else { Row.DirectRowToOutput1(); } } The code itself is very simple, a basic if clause that determines which of the DirectRowToOutput methods we call, there is one for each output. Of course you could write a lot more code to implement some very complex logic, but the final direction is still just a method call. If we now close the script component, we can hook up the outputs and test the package. Your numbers will vary depending on the sample database but as you can see we have clearly split out input data into two outputs. As a final tip, when adding the outputs I would normally rename them, changing the Name in the Properties grid. This means the generated methods follow the pattern as do the path label shown on the design surface, making everything that much easier to recognise.

    Read the article

  • Problems with 3D Array for Voxel Data

    - by Sean M.
    I'm trying to implement a voxel engine in C++ using OpenGL, and I've been working on the rendering of the world. In order to render, I have a 3D array of uint16's that hold that id of the block at the point. I also have a 3D array of uint8's that I am using to store the visibility data for that point, where each bit represents if a face is visible. I have it so the blocks render and all of the proper faces are hidden if needed, but all of the blocks are offset by a power of 2 from where they are stored in the array. So the block at [0][0][0] is rendered at (0, 0, 0), and the block at 11 is rendered at (1, 1, 1), but the block at [2][2][2] is rendered at (4, 4, 4) and the block at [3][3][3] is rendered at (8, 8, 8), and so on and so forth. This is the result of drawing the above situation: I'm still a little new to the more advanced concepts of C++, like triple pointers, which I'm using for the 3D array, so I think the error is somewhere in there. This is the code for creating the arrays: uint16*** _blockData; //Contains a 3D array of uint16s that are the ids of the blocks in the region uint8*** _visibilityData; //Contains a 3D array of bytes that hold the visibility data for the faces //Allocate memory for the world data _blockData = new uint16**[REGION_DIM]; for (int i = 0; i < REGION_DIM; i++) { _blockData[i] = new uint16*[REGION_DIM]; for (int j = 0; j < REGION_DIM; j++) _blockData[i][j] = new uint16[REGION_DIM]; } //Allocate memory for the visibility _visibilityData = new uint8**[REGION_DIM]; for (int i = 0; i < REGION_DIM; i++) { _visibilityData[i] = new uint8*[REGION_DIM]; for (int j = 0; j < REGION_DIM; j++) _visibilityData[i][j] = new uint8[REGION_DIM]; } Here is the code used to create the block mesh for the region: //Check if the positive x face is visible, this happens for every face //Block::VERT_X_POS is just an array of non-transformed cube verts for one face //These checks are in a triple loop, which goes over every place in the array if (_visibilityData[x][y][z] & 0x01 > 0) { _vertexData->AddData(&(translateVertices(Block::VERT_X_POS, x, y, z)[0]), sizeof(Block::VERT_X_POS)); } //This is a seperate method, not in the loop glm::vec3* translateVertices(const glm::vec3 data[], uint16 x, uint16 y, uint16 z) { glm::vec3* copy = new glm::vec3[6]; memcpy(&copy, &data, sizeof(data)); for(int i = 0; i < 6; i++) copy[i] += glm::vec3(x, -y, z); //Make +y go down instead return copy; } I cannot see where the blocks may be getting offset by more than they should be, and certainly not why the offsets are a power of 2. Any help is greatly appreciated. Thanks.

    Read the article

  • Combobox with collection view itemssource does not update selection box item on changes to the Model

    - by Vinit Sankhe
    Hello, Sorry for the earlier lengthy post. Here is my concise (!) description. I bind a collection view to a combobox as a itemsSource and also bind its selectedvalue with a property from my view model. I must keep IsSynchronizedWithCurrentItem="False". I change the source list ofr the view and then refresh the view. The changed (added, removed, edited) items appear correctly in the item list of the combo. But problem is with the selected item. When I change its property which is also the displaymember path of the combo, the changed property value does not reflect back on the selecton box of the combo. If you open the combo dropdown it appears correctly on the item list but not on the selection box. Now if I change the combobox tag to Listbox in my XAML (keeping all attributes as it is) then when selected item's displaymember property value is updated, the changes reflect back on the selected item of the list box . Why this issue? Just FYI: My View Model has properties EmployeeCollectionView and SelectedEmployeeId which are bound to combo as ItemsSource and SelectedValue resp. This VM implements the INotifyPropertyChanged interface. My core employee class (list of which is the source for the EmployeeCollectionView) is simply a Model class without INotifyPropertyChanged. DisplayMemberPath is "Name" property of employee Model class. I change this by some means and expect the combo selection box to update the value. I tried refreshing ther SelectedEmployeeId by setting it 0 (where it correctly selects the dummy "-- Select All --" employee entry from itemsSource) and old selected value back. But no use. The old value takes me back to the old label. Items collection has latest entry though. When I make combobox's IsEditable=True before the view's refresh and after refresh I make IsEditable=False then the things work out correctly! But this is a patch and is unnecessary. Thx Vinit Sankhe

    Read the article

  • Support for nested model and class validation with ASP.NET MVC 2.0

    - by Diep-Vriezer
    I'm trying to validate a model containing other objects with validation rules using the System.ComponentModel.DataAnnotations attributes was hoping the default MVC implementation would suffice: var obj = js.Deserialize(json, objectInfo.ObjectType); if(!TryValidateModel(obj)) { // Handle failed model validation. } The object is composed of primitive types but also contains other classes which also use DataAnnotications. Like so: public class Entry { [Required] public Person Subscriber { get; set; } [Required] public String Company { get; set; } } public class Person { public String FirstName { get; set;} [Required] public String Surname { get; set; } } The problem is that the ASP.NET MVC validation only goes down 1 level and only evaluates the properties of the top level class, as can be read on digitallycreated.net/Blog/54/deep-inside-asp.net-mvc-2-model-metadata-and-validation. Does anyone know an elegant solution to this? I've tried xVal, but they seem to use a non-recursive pattern (http://blog.stevensanderson.com/2009/01/10/xval-a-validation-framework-for-aspnet-mvc/). Someone must have run into this problem before right? Nesting objects in your model doesn't seem so weird if you're designing a web service.

    Read the article

  • ASP MVC2 model binding issue on POST

    - by Brandon Linton
    So I'm looking at moving from MVC 1.0 to MVC 2.0 RTM. One of the conventions I'd like to start following is using the strongly-typed HTML helpers for generating controls like text boxes. However, it looks like it won't be an easy jump. I tried migrating my first form, replacing lines like this: <%= Html.TextBox("FirstName", Model.Data.FirstName, new {maxlength = 30}) %> ...for lines like this: <%= Html.TextBoxFor(x => x.Data.FirstName, new {maxlength = 30}) %> Previously, this would map into its appropriate view model on a POST, using the following method signature: [AcceptVerbs(HttpVerbs.Post)] public ActionResult Registration(AccountViewInfo viewInfo) Instead, it currently gets an empty object back. I believe the disconnect is in the fact that we pass the view model into a larger aggregate object that has some page metadata and other fun stuff along with it (hence x.Data.FirstName instead of x.FirstName). So my question is: what is the best way to use the strongly-typed helpers while still allowing the MVC framework to appropriately cast the form collection to my view-model as it does in the original line? Is there any way to do it without changing the aggregate type we pass to the view? Thanks!

    Read the article

  • Complex ModelBinders and being in charge of creating part of the model

    - by Kieron
    Hi, I've a scenario where I need to bind to an interface - in order to create the correct type, I've got a custom model binder that knows how to create the correct concrete type (which can differ). However, the type created never has the fields correctly filled in. I know I'm missing something blindingly simple here, but can anyone tell me why or at least what I need to do for the model binder to carry on it's work and bind the properties? public class ProductModelBinder : DefaultModelBinder { override public object BindModel (ControllerContext controllerContext, ModelBindingContext bindingContext) { if (bindingContext.ModelType == typeof (IProduct)) { var content = GetProduct (bindingContext); return content; } var result = base.BindModel (controllerContext, bindingContext); return result; } IProduct GetProduct (ModelBindingContext bindingContext) { var idProvider = bindingContext.ValueProvider.GetValue ("Id"); var id = (Guid)idProvider.ConvertTo (typeof (Guid)); var repository = RepositoryFactory.GetRepository<IProductRepository> (); var product = repository.Get (id); return product; } } The Model in my case is a complex type that has an IProduct property, and it's those values I need filled in. Model: [ProductBinder] public class Edit : IProductModel { public Guid Id { get; set; } public byte[] Version { get; set; } public IProduct Product { get; set; } }

    Read the article

  • Putting a MovieMaterial behind a DAE model in Papervision3D

    - by didibus
    Hi, I'm doing a project using FLARManager augmented reality and the Papervision3D library. Unfortunately, Papervision is giving me a lot of problems. My scene3D contains a DAE model and a plane. The plane has a MovieMaterial and is playing a video through FLVPlayback. The DAE and the plane are both inside the same DisplayObject3D container. FLARManager transforms the container so that everything appears through the angle of the marker. My DAE model is a TV, the screen of the TV is transparent. I want to have my Plane inside of my DAE model, so that the Movie playing on the plane material appears to be what is playing on the TV. The problem is that, even if the plane has a lower Z index then the TV, it always appears in front of the TV. How do I have my plane and its MovieMaterial appear behind the TV, so that some of its corners are cut out by the TV and the part of the TV thats transparent let me see the Movie? If its impossible, anyone has an idea of how I could get the desired effect of having a movie play on the screen of my DAE tv model? Thank You.

    Read the article

  • Iterating throgh mvc model lists using javascript

    - by kapil
    I want to iterate through my model values. Following is what I did to achieve this. But the varible count never increments. How can I increment it to iterate throgh my model values? <script language="javascript" type="text/javascript"> function AddStudentName() { var StudentName = document.getElementById('txtStudentName').value; StudentName= StudentName.replace(/^\s+|\s+$/g, ''); if(StudentName!= null) { <%int count = 0; %> for(var counter = 0; parseInt(counter)< parseInt('<%=Model.StudentInfo.Count%>',10); counter++) { alert('<%=count%>'); if('<%=Model.StudentInfo[count].StudentName%>' == StudentName) { alert("A student with new student name already exists."); return false; } <%count = count +1;%> } } } </script> thanks, Kapil

    Read the article

  • Custom model validation of dependent properties using Data Annotations

    - by Darin Dimitrov
    Since now I've used the excellent FluentValidation library to validate my model classes. In web applications I use it in conjunction with the jquery.validate plugin to perform client side validation as well. One drawback is that much of the validation logic is repeated on the client side and is no longer centralized at a single place. For this reason I'm looking for an alternative. There are many examples out there showing the usage of data annotations to perform model validation. It looks very promising. One thing I couldn't find out is how to validate a property that depends on another property value. Let's take for example the following model: public class Event { [Required] public DateTime? StartDate { get; set; } [Required] public DateTime? EndDate { get; set; } } I would like to ensure that EndDate is greater than StartDate. I could write a custom validation attribute extending ValidationAttribute in order to perform custom validation logic. Unfortunately I couldn't find a way to obtain the model instance: public class CustomValidationAttribute : ValidationAttribute { public override bool IsValid(object value) { // value represents the property value on which this attribute is applied // but how to obtain the object instance to which this property belongs? return true; } } I found that the CustomValidationAttribute seems to do the job because it has this ValidationContext property that contains the object instance being validated. Unfortunately this attribute has been added only in .NET 4.0. So my question is: can I achieve the same functionality in .NET 3.5 SP1? UPDATE: It seems that FluentValidation already supports clientside validation and metadata in ASP.NET MVC 2. Still it would be good to know though if data annotations could be used to validate dependent properties.

    Read the article

  • Calling Model Functions from a Library

    - by Abs
    Hello all, I have turned a normal PHP class into a library so I can use it in Codeigniter as a library. I can load it and call the functions I need in that class. Here is that class to help with the question. However, there are quite a few points where I have to call functions in my class. These functions reside in the model that instantiated my class. How can I do this as currently normal calls don't work. Here is my code: class Controlpanel_model extends Model { var $category = ''; var $dataa = 'a'; function Controlpanel_model(){ parent::Model(); } function import_browser_bookmarks(){ $this->load->library('BookmarkParser'); /* *In this function call to the class I pass * what model functions exist that it should call * You can view how this done by clicking the link above and looking at line 383 */ $this->bookmarkparser->parseNetscape("./bookmarks.html", 0, 'myURL', 'myFolder'); return $this->dataa; } function myURL($data, $depth, $no) { $category = $this->category; $this->dataa .= 'Tag = '.$category.'<br />'.'URL = '.$data["url"].'<br />'.'Title = '.$data["descr"].'<br />'.'<br /><br />'; } function myFolder($data, $depth, $no) { $this->category = $data["name"]; } } Thanks all for any help.

    Read the article

  • MVC View Model Intellisense / Compile error

    - by Marty Trenouth
    I have one Library with my ORM and am working with a MVC Application. I have a problem where the pages won't compile because the Views can't see the Model's properties (which are inherited from lower level base classes). They system throws a compile error saying that 'object' does not contain a definition for 'ID' and no extension method 'ID' accepting a first argument of type 'object' could be found (are you missing a using directive or an assembly reference?) implying that the View is not seeing the model. In the Controller I have full access to the Model and have check the Inherits from portion of the view to validate the correct type is being passed. Controller: return View(new TeraViral_Blog()); View: <%@ Page Title="" Language="C#" MasterPageFile="~/Views/Shared/Site.Master" Inherits="System.Web.Mvc.ViewPage<com.models.TeraViral_Blog>" %> <asp:Content ID="Content1" ContentPlaceHolderID="TitleContent" runat="server"> Index2 </asp:Content> <asp:Content ID="Content2" ContentPlaceHolderID="MainContent" runat="server"> <h2>Index2</h2> <fieldset> <legend>Fields</legend> <p> ID: <%= Html.Encode(Model.ID) %> </p> </fieldset> </asp:Content>

    Read the article

  • Strange ng-model behavior inside ng-repeat

    - by Mike Fisher
    I'm trying to build up a complex post request to run a report in my Angular app. I have a list of inputs all dynamically generated via an ng-repeat a simple version of my html looks like this. <div ng-repeat="filter in lists.filters"> <input type="checkbox" ng-model="report.options.filters[filter.value]['type']/> <input type="text" ng-model="report.options.filters[filter.value]['values']/> </div> ng-repeat is looping over this array [ {name: 'Advertisers', value: 'advertisers'}, {name: 'Sizes', value: 'sizes'}, {name: 'Campaign IDs', value: 'campaigns'}, {name: 'Creative IDs', value: 'creatives'}, {name: 'Publishers', value: 'publishers'}, {name: 'Placement IDs', value: 'placements'}, {name: 'Seller Types', value: 'seller_types'}, {name: 'Impression Types', value: 'impression_types'}, {name: 'Bid Types', value: 'bid_types'}, {name: 'Seller Members', value: 'seller_members'}, {name: 'Buyer Members', value: 'buyer_members'}, {name: 'Insertion Order Ids', value: 'insertion_orders'}, {name: 'Countries', value: 'countries'}, {name: 'Site Ids', value: 'sites'}, {name: 'Sources', value: 'sources'} ]; The JSON I'm sending back needs to be structured like this: "filters": { "state": "all", "campaigns": {type:"include", values":[1,2]}, "creatives": {type:"exclude","values":[1,2]}, "publishers": {"values":[1,2]}, "placements": {type:"exclude",values":[1,2]}, "advertisers": {"values":[1,2]}, "sizes": {"values":[1,2]}, "countries": {"values":[1,2]}, "insertion_orders": {"values":[1,2]}, "sites": {"values":[1,2]}, "bid_types": {"values":[1,2]}, "seller_types": {"values":[1,2]}, "impression_types": {"values":[1,2]}, "seller_members": {"values":[1,2]}, "buyer_members": {"values":[1,2]}, "sources": {"values":[1,2]} } When I do this Angular throws an error: 'Cannot set property 'values' of undefined' and 'Cannot set property 'type' of undefined' Yet if I do this (inside ng-repeat) <input type="text" ng-model="report.options.filters[filter.value]/> Or this outside of ng-repeat <input type="text" ng-model="report.options.filters[filter.value]['values']/> No errors are thrown and everything works fine. I'm positive that filter.value is defined and available on the scope even though Angular thinks it's not for some reason. I'm not quite sure what I'm doing wrong. Any help is much appreciated.

    Read the article

  • Flexible design - customizable entity model, UI and workflow

    - by Ngm
    Hi All, I want to achieve the following aspects in the software I am building: 1. Customizable entity model 2. Customizable UI 3. Customizable workflow I have thought about an approach to achieve this, I want you to review this and make suggestions: Entity objects should be plain objects and will hold just data Separate Entity model and DB Schema by using an framework (like NHibernate?). This will allow easy modification of entity objects. Business logic to fetch/modify entities has to be granular enough so that they can be invoked as part of the workflow. Business objects should not hold any state, and hence will contain only static methods The workflow will decide depending upon the "state" of an entity/entities which methods on business object/objects to invoke. The workflow should obtain the results of the processing and then pass on the business objects to the appropriate UI screen. The UI screen has to contain instructions about how to display a given entity/entites. Possibly the UI has to be generated dynamically based on a set of UI instructions. (like XUL) What do you think about this approach? Suggest which existing frameworks (like NHiberante, Window Workflow) fit into this model, so that I will not spend time on coding these frameworks Also suggest is there any asp.net framework that can generate dynamic asp.net ajax pages based on a set of UI instructions (like Mozilla XUL)? I have recently been exploring Apache Ofbiz and was impressed by its ability to customize most areas of the application: UI, workflow, entities. Is there any similar (not necessarily an ERP system) application developed in C#/.Net which offers a similar level of customization? I am looking for examples of applications developed in C# that are highly customizable in terms of UI, Workflow and Entity Model

    Read the article

  • Django internationalization for admin pages - translate model name and attributes

    - by geekQ
    Django's internationalization is very nice (gettext based, LocaleMiddleware), but what is the proper way to translate the model name and the attributes for admin pages? I did not find anything about this in the documentation: http://docs.djangoproject.com/en/dev/topics/i18n/internationalization/ http://www.djangobook.com/en/2.0/chapter19/ I would like to have "???????? ????? ??? ?????????" instead of "???????? order ??? ?????????". Note, the 'order' is not translated. First, I defined a model, activated USE_I18N = True in settings.py, run django-admin makemessages -l ru. No entries are created by default for model names and attributes. Grepping in the Django source code I found: $ ack "Select %s to change" contrib/admin/views/main.py 70: self.title = (self.is_popup and ugettext('Select %s') % force_unicode(self.opts.verbose_name) or ugettext('Select %s to change') % force_unicode(self.opts.verbose_name)) So the verbose_name meta property seems to play some role here. Tried to use it: class Order(models.Model): subject = models.CharField(max_length=150) description = models.TextField() class Meta: verbose_name = _('order') Now the updated po file contains msgid 'order' that can be translated. So I put the translation in. Unfortunately running the admin pages show the same mix of "???????? order ??? ?????????". I'm currently using Django 1.1.1. Could somebody point me to the relevant documentation? Because google can not. ;-) In the mean time I'll dig deeper into the django source code...

    Read the article

  • Custom model validation of dependent properties using Data Annotations

    - by Darin Dimitrov
    Since now I've used the excellent FluentValidation library to validate my model classes. In web applications I use it in conjunction with the jquery.validate plugin to perform client side validation as well. One drawback is that much of the validation logic is repeated on the client side and is no longer centralized at a single place. For this reason I'm looking for an alternative. There are many examples out there showing the usage of data annotations to perform model validation. It looks very promising. One thing I couldn't find out is how to validate a property that depends on another property value. Let's take for example the following model: public class Event { [Required] public DateTime? StartDate { get; set; } [Required] public DateTime? EndDate { get; set; } } I would like to ensure that EndDate is greater than StartDate. I could write a custom validation attribute extending ValidationAttribute in order to perform custom validation logic. Unfortunately I couldn't find a way to obtain the model instance: public class CustomValidationAttribute : ValidationAttribute { public override bool IsValid(object value) { // value represents the property value on which this attribute is applied // but how to obtain the object instance to which this property belongs? return true; } } I found that the CustomValidationAttribute seems to do the job because it has this ValidationContext property that contains the object instance being validated. Unfortunately this attribute has been added only in .NET 4.0. So my question is: can I achieve the same functionality in .NET 3.5 SP1? UPDATE: It seems that FluentValidation already supports clientside validation and metadata in ASP.NET MVC 2. Still it would be good to know though if data annotations could be used to validate dependent properties.

    Read the article

  • Domain model for an optional many-many relationship

    - by Greg
    Let's say I'm modeling phone numbers. I have one entity for PhoneNumber, and one for Person. There's a link table that expresses the link (if any) between the PhoneNumber and Person. The link table also has a field for DisplayOrder. When accessing my domain model, I have several Use Cases for viewing a Person. I can look at them without any PhoneNumber information. I can look at them for a specific PhoneNumber. I can look at them and all of their current (or past) PhoneNumbers. I'm trying to model Person, not only for the standard CRUD operations, but for the (un)assignment of PhoneNumbers to a Person. I'm having trouble expressing the relationship between the two, especially with respects to the DisplayOrder property. I can think of several solutions but I'm not sure of which (if any) would be best. A PhoneNumberPerson class that has a Person and PhoneNumber property (most closely resembles database design) A PhoneCarryingPerson class that inherits from Person and has a PhoneNumber property. A PhoneNumber and/or PhoneNumbers property on Person (and vis-a-versa, a Person property on PhoneNumber) What would be a good way to model this that makes sense from a domain model perspective? How do I avoid misplaced properties (DisplayOrder on Person) or conditionally populated properties?

    Read the article

  • How to save to Django Model that Have Mulitple Foreign Keys Fields

    - by Spikie
    I have Models for business Apps class staff_name(models.Model): TITLE_CHOICES = ( ('Mr', 'Mr'), ('Miss', 'Miss'), ( 'Mrs', 'Mrs'), ( 'chief', 'chief'), ) titlename = models.CharField(max_length=10,choices=TITLE_CHOICES) firstname = models.CharField(max_length=150) surname = models.CharField(max_length=150) date = models.DateTimeField(auto_now=True) class meta: ordering = ["date"] get_latest_by = "date" class inventory_transaction(models.Model): stock_in = models.DecimalField(blank=True, null=True,max_digits=8, decimal_places=2) stock_out = models.DecimalField(blank=True,null=True,max_digits=8, decimal_places=2) Number_container = models.ForeignKey(container_identity) staffs = models.ForeignKey(staff_name) goods_details = models.ForeignKey(departments) balance = models.DecimalField(max_digits=8, decimal_places=2) date = models.DateTimeField(auto_now=True) What i want to accomplish is check if the staff have made entry to the table before if yes add the value for the stock in plus (last) balance field and assign to balance if no just assign stock in value to balance field and save these are my codes These are my codes: try: s = staffname.staffs_set.all().order_by("-date").latest() # staffname is the instant of the class model staff_name e = s.staffs_set.create(stockin=vdataz,balance=s.balance + vdataz ) # e is the instant of the class model inventory_transaction e.save e.staffs.add(s) e.from_container.add(containersno) e.goods_details.add(department) except ObjectDoesNotExist: e = staff_name.objects.create(stockin=vdataz,balance=vdataz ) e.save e.staffs.add(staffname) e.from_container.add(containersno) e.goods_details.add(department) I will really appreciate a solution Thanks I hope it make more sense now. iam on online if you need more explanation just ask in the comment.Thank you for your help

    Read the article

  • MVC View Model Intellesense / Compile error

    - by Marty Trenouth
    I have one Library with my ORM and am working with a MVC Application. I have a problem where the pages won't compile because the Views can't see the Model's properties (which are inherited from lower level base classes). They system throws a compile error saying that 'object' does not contain a definition for 'ID' and no extension method 'ID' accepting a first argument of type 'object' could be found (are you missing a using directive or an assembly reference?) implying that the View is not seeing the model. In the Controller I have full access to the Model and have check the Inherits from portion of the view to validate the correct type is being passed. Controller: return View(new TeraViral_Blog()); View: <%@ Page Title="" Language="C#" MasterPageFile="~/Views/Shared/Site.Master" Inherits="System.Web.Mvc.ViewPage<com.models.TeraViral_Blog>" %> <asp:Content ID="Content1" ContentPlaceHolderID="TitleContent" runat="server"> Index2 </asp:Content> <asp:Content ID="Content2" ContentPlaceHolderID="MainContent" runat="server"> <h2>Index2</h2> <fieldset> <legend>Fields</legend> <p> ID: <%= Html.Encode(Model.ID) %> </p> </fieldset> </asp:Content>

    Read the article

  • Reverse mapping from a table to a model in SQLAlchemy

    - by Jace
    To provide an activity log in my SQLAlchemy-based app, I have a model like this: class ActivityLog(Base): __tablename__ = 'activitylog' id = Column(Integer, primary_key=True) activity_by_id = Column(Integer, ForeignKey('users.id'), nullable=False) activity_by = relation(User, primaryjoin=activity_by_id == User.id) activity_at = Column(DateTime, default=datetime.utcnow, nullable=False) activity_type = Column(SmallInteger, nullable=False) target_table = Column(Unicode(20), nullable=False) target_id = Column(Integer, nullable=False) target_title = Column(Unicode(255), nullable=False) The log contains entries for multiple tables, so I can't use ForeignKey relations. Log entries are made like this: doc = Document(name=u'mydoc', title=u'My Test Document', created_by=user, edited_by=user) session.add(doc) session.flush() # See note below log = ActivityLog(activity_by=user, activity_type=ACTIVITY_ADD, target_table=Document.__table__.name, target_id=doc.id, target_title=doc.title) session.add(log) This leaves me with three problems: I have to flush the session before my doc object gets an id. If I had used a ForeignKey column and a relation mapper, I could have simply called ActivityLog(target=doc) and let SQLAlchemy do the work. Is there any way to work around needing to flush by hand? The target_table parameter is too verbose. I suppose I could solve this with a target property setter in ActivityLog that automatically retrieves the table name and id from a given instance. Biggest of all, I'm not sure how to retrieve a model instance from the database. Given an ActivityLog instance log, calling self.session.query(log.target_table).get(log.target_id) does not work, as query() expects a model as parameter. One workaround appears to be to use polymorphism and derive all my models from a base model which ActivityLog recognises. Something like this: class Entity(Base): __tablename__ = 'entities' id = Column(Integer, primary_key=True) title = Column(Unicode(255), nullable=False) edited_at = Column(DateTime, onupdate=datetime.utcnow, nullable=False) entity_type = Column(Unicode(20), nullable=False) __mapper_args__ = {'polymorphic_on': entity_type} class Document(Entity): __tablename__ = 'documents' __mapper_args__ = {'polymorphic_identity': 'document'} body = Column(UnicodeText, nullable=False) class ActivityLog(Base): __tablename__ = 'activitylog' id = Column(Integer, primary_key=True) ... target_id = Column(Integer, ForeignKey('entities.id'), nullable=False) target = relation(Entity) If I do this, ActivityLog(...).target will give me a Document instance when it refers to a Document, but I'm not sure it's worth the overhead of having two tables for everything. Should I go ahead and do it this way?

    Read the article

< Previous Page | 116 117 118 119 120 121 122 123 124 125 126 127  | Next Page >