Search Results

Search found 18173 results on 727 pages for 'null radix'.

Page 218/727 | < Previous Page | 214 215 216 217 218 219 220 221 222 223 224 225  | Next Page >

  • problem with google chrome

    - by user365559
    hi. i have javscript file for history management.IT is not supported by chrome when i am trying to navigate to back page with backbutton in the browser.I can see the url change but it doesnt go to preceeding page. BrowserHistoryUtils = { addEvent: function(elm, evType, fn, useCapture) { useCapture = useCapture || false; if (elm.addEventListener) { elm.addEventListener(evType, fn, useCapture); return true; } else if (elm.attachEvent) { var r = elm.attachEvent('on' + evType, fn); return r; } else { elm['on' + evType] = fn; } } } BrowserHistory = (function() { // type of browser var browser = { ie: false, firefox: false, safari: false, opera: false, version: -1 }; // if setDefaultURL has been called, our first clue // that the SWF is ready and listening //var swfReady = false; // the URL we'll send to the SWF once it is ready //var pendingURL = ''; // Default app state URL to use when no fragment ID present var defaultHash = ''; // Last-known app state URL var currentHref = document.location.href; // Initial URL (used only by IE) var initialHref = document.location.href; // Initial URL (used only by IE) var initialHash = document.location.hash; // History frame source URL prefix (used only by IE) var historyFrameSourcePrefix = 'history/historyFrame.html?'; // History maintenance (used only by Safari) var currentHistoryLength = -1; var historyHash = []; var initialState = createState(initialHref, initialHref + '#' + initialHash, initialHash); var backStack = []; var forwardStack = []; var currentObjectId = null; //UserAgent detection var useragent = navigator.userAgent.toLowerCase(); if (useragent.indexOf("opera") != -1) { browser.opera = true; } else if (useragent.indexOf("msie") != -1) { browser.ie = true; browser.version = parseFloat(useragent.substring(useragent.indexOf('msie') + 4)); } else if (useragent.indexOf("safari") != -1) { browser.safari = true; browser.version = parseFloat(useragent.substring(useragent.indexOf('safari') + 7)); } else if (useragent.indexOf("gecko") != -1) { browser.firefox = true; } if (browser.ie == true && browser.version == 7) { window["_ie_firstload"] = false; } // Accessor functions for obtaining specific elements of the page. function getHistoryFrame() { return document.getElementById('ie_historyFrame'); } function getAnchorElement() { return document.getElementById('firefox_anchorDiv'); } function getFormElement() { return document.getElementById('safari_formDiv'); } function getRememberElement() { return document.getElementById("safari_remember_field"); } // Get the Flash player object for performing ExternalInterface callbacks. // Updated for changes to SWFObject2. function getPlayer(id) { if (id && document.getElementById(id)) { var r = document.getElementById(id); if (typeof r.SetVariable != "undefined") { return r; } else { var o = r.getElementsByTagName("object"); var e = r.getElementsByTagName("embed"); if (o.length > 0 && typeof o[0].SetVariable != "undefined") { return o[0]; } else if (e.length > 0 && typeof e[0].SetVariable != "undefined") { return e[0]; } } } else { var o = document.getElementsByTagName("object"); var e = document.getElementsByTagName("embed"); if (e.length > 0 && typeof e[0].SetVariable != "undefined") { return e[0]; } else if (o.length > 0 && typeof o[0].SetVariable != "undefined") { return o[0]; } else if (o.length > 1 && typeof o[1].SetVariable != "undefined") { return o[1]; } } return undefined; } function getPlayers() { var players = []; if (players.length == 0) { var tmp = document.getElementsByTagName('object'); players = tmp; } if (players.length == 0 || players[0].object == null) { var tmp = document.getElementsByTagName('embed'); players = tmp; } return players; } function getIframeHash() { var doc = getHistoryFrame().contentWindow.document; var hash = String(doc.location.search); if (hash.length == 1 && hash.charAt(0) == "?") { hash = ""; } else if (hash.length >= 2 && hash.charAt(0) == "?") { hash = hash.substring(1); } return hash; } /* Get the current location hash excluding the '#' symbol. */ function getHash() { // It would be nice if we could use document.location.hash here, // but it's faulty sometimes. var idx = document.location.href.indexOf('#'); return (idx >= 0) ? document.location.href.substr(idx+1) : ''; } /* Get the current location hash excluding the '#' symbol. */ function setHash(hash) { // It would be nice if we could use document.location.hash here, // but it's faulty sometimes. if (hash == '') hash = '#' document.location.hash = hash; } function createState(baseUrl, newUrl, flexAppUrl) { return { 'baseUrl': baseUrl, 'newUrl': newUrl, 'flexAppUrl': flexAppUrl, 'title': null }; } /* Add a history entry to the browser. * baseUrl: the portion of the location prior to the '#' * newUrl: the entire new URL, including '#' and following fragment * flexAppUrl: the portion of the location following the '#' only */ function addHistoryEntry(baseUrl, newUrl, flexAppUrl) { //delete all the history entries forwardStack = []; if (browser.ie) { //Check to see if we are being asked to do a navigate for the first //history entry, and if so ignore, because it's coming from the creation //of the history iframe if (flexAppUrl == defaultHash && document.location.href == initialHref && window['_ie_firstload']) { currentHref = initialHref; return; } if ((!flexAppUrl || flexAppUrl == defaultHash) && window['_ie_firstload']) { newUrl = baseUrl + '#' + defaultHash; flexAppUrl = defaultHash; } else { // for IE, tell the history frame to go somewhere without a '#' // in order to get this entry into the browser history. getHistoryFrame().src = historyFrameSourcePrefix + flexAppUrl; } setHash(flexAppUrl); } else { //ADR if (backStack.length == 0 && initialState.flexAppUrl == flexAppUrl) { initialState = createState(baseUrl, newUrl, flexAppUrl); } else if(backStack.length > 0 && backStack[backStack.length - 1].flexAppUrl == flexAppUrl) { backStack[backStack.length - 1] = createState(baseUrl, newUrl, flexAppUrl); } if (browser.safari) { // for Safari, submit a form whose action points to the desired URL if (browser.version <= 419.3) { var file = window.location.pathname.toString(); file = file.substring(file.lastIndexOf("/")+1); getFormElement().innerHTML = '<form name="historyForm" action="'+file+'#' + flexAppUrl + '" method="GET"></form>'; //get the current elements and add them to the form var qs = window.location.search.substring(1); var qs_arr = qs.split("&"); for (var i = 0; i < qs_arr.length; i++) { var tmp = qs_arr[i].split("="); var elem = document.createElement("input"); elem.type = "hidden"; elem.name = tmp[0]; elem.value = tmp[1]; document.forms.historyForm.appendChild(elem); } document.forms.historyForm.submit(); } else { top.location.hash = flexAppUrl; } // We also have to maintain the history by hand for Safari historyHash[history.length] = flexAppUrl; _storeStates(); } else { // Otherwise, write an anchor into the page and tell the browser to go there addAnchor(flexAppUrl); setHash(flexAppUrl); } } backStack.push(createState(baseUrl, newUrl, flexAppUrl)); } function _storeStates() { if (browser.safari) { getRememberElement().value = historyHash.join(","); } } function handleBackButton() { //The "current" page is always at the top of the history stack. var current = backStack.pop(); if (!current) { return; } var last = backStack[backStack.length - 1]; if (!last && backStack.length == 0){ last = initialState; } forwardStack.push(current); } function handleForwardButton() { //summary: private method. Do not call this directly. var last = forwardStack.pop(); if (!last) { return; } backStack.push(last); } function handleArbitraryUrl() { //delete all the history entries forwardStack = []; } /* Called periodically to poll to see if we need to detect navigation that has occurred */ function checkForUrlChange() { if (browser.ie) { if (currentHref != document.location.href && currentHref + '#' != document.location.href) { //This occurs when the user has navigated to a specific URL //within the app, and didn't use browser back/forward //IE seems to have a bug where it stops updating the URL it //shows the end-user at this point, but programatically it //appears to be correct. Do a full app reload to get around //this issue. if (browser.version < 7) { currentHref = document.location.href; document.location.reload(); } else { if (getHash() != getIframeHash()) { // this.iframe.src = this.blankURL + hash; var sourceToSet = historyFrameSourcePrefix + getHash(); getHistoryFrame().src = sourceToSet; } } } } if (browser.safari) { // For Safari, we have to check to see if history.length changed. if (currentHistoryLength >= 0 && history.length != currentHistoryLength) { //alert("did change: " + history.length + ", " + historyHash.length + "|" + historyHash[history.length] + "|>" + historyHash.join("|")); // If it did change, then we have to look the old state up // in our hand-maintained array since document.location.hash // won't have changed, then call back into BrowserManager. currentHistoryLength = history.length; var flexAppUrl = historyHash[currentHistoryLength]; if (flexAppUrl == '') { //flexAppUrl = defaultHash; } //ADR: to fix multiple if (typeof BrowserHistory_multiple != "undefined" && BrowserHistory_multiple == true) { var pl = getPlayers(); for (var i = 0; i < pl.length; i++) { pl[i].browserURLChange(flexAppUrl); } } else { getPlayer().browserURLChange(flexAppUrl); } _storeStates(); } } if (browser.firefox) { if (currentHref != document.location.href) { var bsl = backStack.length; var urlActions = { back: false, forward: false, set: false } if ((window.location.hash == initialHash || window.location.href == initialHref) && (bsl == 1)) { urlActions.back = true; // FIXME: could this ever be a forward button? // we can't clear it because we still need to check for forwards. Ugg. // clearInterval(this.locationTimer); handleBackButton(); } // first check to see if we could have gone forward. We always halt on // a no-hash item. if (forwardStack.length > 0) { if (forwardStack[forwardStack.length-1].flexAppUrl == getHash()) { urlActions.forward = true; handleForwardButton(); } } // ok, that didn't work, try someplace back in the history stack if ((bsl >= 2) && (backStack[bsl - 2])) { if (backStack[bsl - 2].flexAppUrl == getHash()) { urlActions.back = true; handleBackButton(); } } if (!urlActions.back && !urlActions.forward) { var foundInStacks = { back: -1, forward: -1 } for (var i = 0; i < backStack.length; i++) { if (backStack[i].flexAppUrl == getHash() && i != (bsl - 2)) { arbitraryUrl = true; foundInStacks.back = i; } } for (var i = 0; i < forwardStack.length; i++) { if (forwardStack[i].flexAppUrl == getHash() && i != (bsl - 2)) { arbitraryUrl = true; foundInStacks.forward = i; } } handleArbitraryUrl(); } // Firefox changed; do a callback into BrowserManager to tell it. currentHref = document.location.href; var flexAppUrl = getHash(); if (flexAppUrl == '') { //flexAppUrl = defaultHash; } //ADR: to fix multiple if (typeof BrowserHistory_multiple != "undefined" && BrowserHistory_multiple == true) { var pl = getPlayers(); for (var i = 0; i < pl.length; i++) { pl[i].browserURLChange(flexAppUrl); } } else { getPlayer().browserURLChange(flexAppUrl); } } } //setTimeout(checkForUrlChange, 50); } /* Write an anchor into the page to legitimize it as a URL for Firefox et al. */ function addAnchor(flexAppUrl) { if (document.getElementsByName(flexAppUrl).length == 0) { getAnchorElement().innerHTML += "<a name='" + flexAppUrl + "'>" + flexAppUrl + "</a>"; } } var _initialize = function () { if (browser.ie) { var scripts = document.getElementsByTagName('script'); for (var i = 0, s; s = scripts[i]; i++) { if (s.src.indexOf("history.js") > -1) { var iframe_location = (new String(s.src)).replace("history.js", "historyFrame.html"); } } historyFrameSourcePrefix = iframe_location + "?"; var src = historyFrameSourcePrefix; var iframe = document.createElement("iframe"); iframe.id = 'ie_historyFrame'; iframe.name = 'ie_historyFrame'; //iframe.src = historyFrameSourcePrefix; try { document.body.appendChild(iframe); } catch(e) { setTimeout(function() { document.body.appendChild(iframe); }, 0); } } if (browser.safari) { var rememberDiv = document.createElement("div"); rememberDiv.id = 'safari_rememberDiv'; document.body.appendChild(rememberDiv); rememberDiv.innerHTML = '<input type="text" id="safari_remember_field" style="width: 500px;">'; var formDiv = document.createElement("div"); formDiv.id = 'safari_formDiv'; document.body.appendChild(formDiv); var reloader_content = document.createElement('div'); reloader_content.id = 'safarireloader'; var scripts = document.getElementsByTagName('script'); for (var i = 0, s; s = scripts[i]; i++) { if (s.src.indexOf("history.js") > -1) { html = (new String(s.src)).replace(".js", ".html"); } } reloader_content.innerHTML = '<iframe id="safarireloader-iframe" src="about:blank" frameborder="no" scrolling="no"></iframe>'; document.body.appendChild(reloader_content); reloader_content.style.position = 'absolute'; reloader_content.style.left = reloader_content.style.top = '-9999px'; iframe = reloader_content.getElementsByTagName('iframe')[0]; if (document.getElementById("safari_remember_field").value != "" ) { historyHash = document.getElementById("safari_remember_field").value.split(","); } } if (browser.firefox) { var anchorDiv = document.createElement("div"); anchorDiv.id = 'firefox_anchorDiv'; document.body.appendChild(anchorDiv); } //setTimeout(checkForUrlChange, 50); } return { historyHash: historyHash, backStack: function() { return backStack; }, forwardStack: function() { return forwardStack }, getPlayer: getPlayer, initialize: function(src) { _initialize(src); }, setURL: function(url) { document.location.href = url; }, getURL: function() { return document.location.href; }, getTitle: function() { return document.title; }, setTitle: function(title) { try { backStack[backStack.length - 1].title = title; } catch(e) { } //if on safari, set the title to be the empty string. if (browser.safari) { if (title == "") { try { var tmp = window.location.href.toString(); title = tmp.substring((tmp.lastIndexOf("/")+1), tmp.lastIndexOf("#")); } catch(e) { title = ""; } } } document.title = title; }, setDefaultURL: function(def) { defaultHash = def; def = getHash(); //trailing ? is important else an extra frame gets added to the history //when navigating back to the first page. Alternatively could check //in history frame navigation to compare # and ?. if (browser.ie) { window['_ie_firstload'] = true; var sourceToSet = historyFrameSourcePrefix + def; var func = function() { getHistoryFrame().src = sourceToSet; window.location.replace("#" + def); setInterval(checkForUrlChange, 50); } try { func(); } catch(e) { window.setTimeout(function() { func(); }, 0); } } if (browser.safari) { currentHistoryLength = history.length; if (historyHash.length == 0) { historyHash[currentHistoryLength] = def; var newloc = "#" + def; window.location.replace(newloc); } else { //alert(historyHash[historyHash.length-1]); } //setHash(def); setInterval(checkForUrlChange, 50); } if (browser.firefox || browser.opera) { var reg = new RegExp("#" + def + "$"); if (window.location.toString().match(reg)) { } else { var newloc ="#" + def; window.location.replace(newloc); } setInterval(checkForUrlChange, 50); //setHash(def); } }, /* Set the current browser URL; called from inside BrowserManager to propagate * the application state out to the container. */ setBrowserURL: function(flexAppUrl, objectId) { if (browser.ie && typeof objectId != "undefined") { currentObjectId = objectId; } //fromIframe = fromIframe || false; //fromFlex = fromFlex || false; //alert("setBrowserURL: " + flexAppUrl); //flexAppUrl = (flexAppUrl == "") ? defaultHash : flexAppUrl ; var pos = document.location.href.indexOf('#'); var baseUrl = pos != -1 ? document.location.href.substr(0, pos) : document.location.href; var newUrl = baseUrl + '#' + flexAppUrl; if (document.location.href != newUrl && document.location.href + '#' != newUrl) { currentHref = newUrl; addHistoryEntry(baseUrl, newUrl, flexAppUrl); currentHistoryLength = history.length; } return false; }, browserURLChange: function(flexAppUrl) { var objectId = null; if (browser.ie && currentObjectId != null) { objectId = currentObjectId; } pendingURL = ''; if (typeof BrowserHistory_multiple != "undefined" && BrowserHistory_multiple == true) { var pl = getPlayers(); for (var i = 0; i < pl.length; i++) { try { pl[i].browserURLChange(flexAppUrl); } catch(e) { } } } else { try { getPlayer(objectId).browserURLChange(flexAppUrl); } catch(e) { } } currentObjectId = null; } } })(); // Initialization // Automated unit testing and other diagnostics function setURL(url) { document.location.href = url; } function backButton() { history.back(); } function forwardButton() { history.forward(); } function goForwardOrBackInHistory(step) { history.go(step); } //BrowserHistoryUtils.addEvent(window, "load", function() { BrowserHistory.initialize(); }); (function(i) { var u =navigator.userAgent;var e=/*@cc_on!@*/false; var st = setTimeout; if(/webkit/i.test(u)){ st(function(){ var dr=document.readyState; if(dr=="loaded"||dr=="complete"){i()} else{st(arguments.callee,10);}},10); } else if((/mozilla/i.test(u)&&!/(compati)/.test(u)) || (/opera/i.test(u))){ document.addEventListener("DOMContentLoaded",i,false); } else if(e){ (function(){ var t=document.createElement('doc:rdy'); try{t.doScroll('left'); i();t=null; }catch(e){st(arguments.callee,0);}})(); } else{ window.onload=i; } })( function() {BrowserHistory.initialize();} );

    Read the article

  • DirectX11 CreateWICTextureFromMemory Using PNG

    - by seethru
    I've currently got textures loading using CreateWICTextureFromFile however I'd like a little more control over it, and I'd like to store images in their byte form in a resource loader. Below is just two sets of test code that return two separate results and I'm looking for any insight into a possible solution. ID3D11ShaderResourceView* srv; std::basic_ifstream<unsigned char> file("image.png", std::ios::binary); file.seekg(0,std::ios::end); int length = file.tellg(); file.seekg(0,std::ios::beg); unsigned char* buffer = new unsigned char[length]; file.read(&buffer[0],length); file.close(); HRESULT hr; hr = DirectX::CreateWICTextureFromMemory(_D3D->GetDevice(), _D3D->GetDeviceContext(), &buffer[0], sizeof(buffer), nullptr, &srv, NULL); As a return for the above code I get Component not found. std::ifstream file; ID3D11ShaderResourceView* srv; file.open("../Assets/Textures/osg.png", std::ios::binary); file.seekg(0,std::ios::end); int length = file.tellg(); file.seekg(0,std::ios::beg); std::vector<char> buffer(length); file.read(&buffer[0],length); file.close(); HRESULT hr; hr = DirectX::CreateWICTextureFromMemory(_D3D->GetDevice(), _D3D->GetDeviceContext(), (const uint8_t*)&buffer[0], sizeof(buffer), nullptr, &srv, NULL); The above code returns that the image format is unknown. I'm clearly doing something wrong here, any help is greatly appreciated. Tried finding anything even similar on stackoverflow, and google to no avail.

    Read the article

  • Why is there a /etc/init.d/mysql file on this Slackware machine? How could it have gotten there?

    - by jasonspiro
    A client of my IT-consulting service owns a web-development shop. He's been having problems with a Slackware 12.0 server running MySQL 5.0.67. The machine was set up by the client's sysadmin, who left on bad terms. My client no longer employs a sysadmin. As far as I can tell, the only copy of MySQL that's installed is the one described in /var/log/packages/mysql-5.0.67-i486-1: PACKAGE NAME: mysql-5.0.67-i486-1 COMPRESSED PACKAGE SIZE: 16828 K UNCOMPRESSED PACKAGE SIZE: 33840 K PACKAGE LOCATION: /var/slapt-get/archives/./slackware/ap/mysql-5.0.67-i486-1.tgz PACKAGE DESCRIPTION: mysql: mysql (SQL-based relational database server) mysql: mysql: MySQL is a fast, multi-threaded, multi-user, and robust SQL mysql: (Structured Query Language) database server. It comes with a nice API mysql: which makes it easy to integrate into other applications. mysql: mysql: The home page for MySQL is http://www.mysql.com/ mysql: mysql: mysql: mysql: FILE LIST: ./ var/ var/lib/ var/lib/mysql/ var/run/ var/run/mysql/ install/ install/doinst.sh install/slack-desc usr/ usr/include/ usr/include/mysql/ usr/include/mysql/my_alloc.h usr/include/mysql/sql_common.h usr/include/mysql/my_dbug.h usr/include/mysql/errmsg.h usr/include/mysql/my_pthread.h usr/include/mysql/my_list.h usr/include/mysql/mysql.h usr/include/mysql/sslopt-vars.h usr/include/mysql/my_config.h usr/include/mysql/mysql_com.h usr/include/mysql/m_string.h usr/include/mysql/sslopt-case.h usr/include/mysql/my_xml.h usr/include/mysql/sql_state.h usr/include/mysql/my_global.h usr/include/mysql/my_sys.h usr/include/mysql/mysqld_ername.h usr/include/mysql/mysqld_error.h usr/include/mysql/sslopt-longopts.h usr/include/mysql/keycache.h usr/include/mysql/my_net.h usr/include/mysql/mysql_version.h usr/include/mysql/my_no_pthread.h usr/include/mysql/decimal.h usr/include/mysql/readline.h usr/include/mysql/my_attribute.h usr/include/mysql/typelib.h usr/include/mysql/my_dir.h usr/include/mysql/raid.h usr/include/mysql/m_ctype.h usr/include/mysql/mysql_embed.h usr/include/mysql/mysql_time.h usr/include/mysql/my_getopt.h usr/lib/ usr/lib/mysql/ usr/lib/mysql/libmysqlclient_r.so.15.0.0 usr/lib/mysql/libmysqlclient_r.la usr/lib/mysql/libmyisammrg.a usr/lib/mysql/libmystrings.a usr/lib/mysql/libmyisam.a usr/lib/mysql/libmysqlclient.so.15.0.0 usr/lib/mysql/libmysqlclient_r.a usr/lib/mysql/libmysqlclient.a usr/lib/mysql/libheap.a usr/lib/mysql/libvio.a usr/lib/mysql/libmysqlclient.la usr/lib/mysql/libmysys.a usr/lib/mysql/libdbug.a usr/bin/ usr/bin/comp_err usr/bin/my_print_defaults usr/bin/resolve_stack_dump usr/bin/msql2mysql usr/bin/mysqltestmanager-pwgen usr/bin/myisampack usr/bin/replace usr/bin/mysqld_multi usr/bin/mysqlaccess usr/bin/mysql_install_db usr/bin/innochecksum usr/bin/myisam_ftdump usr/bin/mysqlcheck usr/bin/mysqltest usr/bin/mysql_upgrade_shell usr/bin/mysql_secure_installation usr/bin/mysql_fix_extensions usr/bin/mysqld_safe usr/bin/mysql_explain_log usr/bin/mysqlimport usr/bin/myisamlog usr/bin/mysql_tzinfo_to_sql usr/bin/mysql_upgrade usr/bin/mysqltestmanager usr/bin/mysql_fix_privilege_tables usr/bin/mysql_find_rows usr/bin/mysql_convert_table_format usr/bin/mysqltestmanagerc usr/bin/mysqlhotcopy usr/bin/mysqldump usr/bin/mysqlshow usr/bin/mysqlbug usr/bin/mysql_config usr/bin/mysqldumpslow usr/bin/mysql_waitpid usr/bin/mysqlbinlog usr/bin/mysql_client_test usr/bin/perror usr/bin/mysql usr/bin/myisamchk usr/bin/mysql_setpermission usr/bin/mysqladmin usr/bin/mysql_zap usr/bin/mysql_tableinfo usr/bin/resolveip usr/share/ usr/share/mysql/ usr/share/mysql/errmsg.txt usr/share/mysql/swedish/ usr/share/mysql/swedish/errmsg.sys usr/share/mysql/mysql_system_tables_data.sql usr/share/mysql/mysql.server usr/share/mysql/hungarian/ usr/share/mysql/hungarian/errmsg.sys usr/share/mysql/norwegian/ usr/share/mysql/norwegian/errmsg.sys usr/share/mysql/slovak/ usr/share/mysql/slovak/errmsg.sys usr/share/mysql/spanish/ usr/share/mysql/spanish/errmsg.sys usr/share/mysql/polish/ usr/share/mysql/polish/errmsg.sys usr/share/mysql/ukrainian/ usr/share/mysql/ukrainian/errmsg.sys usr/share/mysql/danish/ usr/share/mysql/danish/errmsg.sys usr/share/mysql/romanian/ usr/share/mysql/romanian/errmsg.sys usr/share/mysql/english/ usr/share/mysql/english/errmsg.sys usr/share/mysql/charsets/ usr/share/mysql/charsets/latin2.xml usr/share/mysql/charsets/greek.xml usr/share/mysql/charsets/koi8r.xml usr/share/mysql/charsets/latin1.xml usr/share/mysql/charsets/cp866.xml usr/share/mysql/charsets/geostd8.xml usr/share/mysql/charsets/cp1250.xml usr/share/mysql/charsets/koi8u.xml usr/share/mysql/charsets/cp852.xml usr/share/mysql/charsets/hebrew.xml usr/share/mysql/charsets/latin7.xml usr/share/mysql/charsets/README usr/share/mysql/charsets/ascii.xml usr/share/mysql/charsets/cp1251.xml usr/share/mysql/charsets/macce.xml usr/share/mysql/charsets/latin5.xml usr/share/mysql/charsets/Index.xml usr/share/mysql/charsets/macroman.xml usr/share/mysql/charsets/cp1256.xml usr/share/mysql/charsets/keybcs2.xml usr/share/mysql/charsets/swe7.xml usr/share/mysql/charsets/armscii8.xml usr/share/mysql/charsets/dec8.xml usr/share/mysql/charsets/cp1257.xml usr/share/mysql/charsets/hp8.xml usr/share/mysql/charsets/cp850.xml usr/share/mysql/korean/ usr/share/mysql/korean/errmsg.sys usr/share/mysql/german/ usr/share/mysql/german/errmsg.sys usr/share/mysql/mi_test_all.res usr/share/mysql/greek/ usr/share/mysql/greek/errmsg.sys usr/share/mysql/french/ usr/share/mysql/french/errmsg.sys usr/share/mysql/mysql_fix_privilege_tables.sql usr/share/mysql/dutch/ usr/share/mysql/dutch/errmsg.sys usr/share/mysql/serbian/ usr/share/mysql/serbian/errmsg.sys usr/share/mysql/mysql_system_tables.sql usr/share/mysql/my-huge.cnf usr/share/mysql/portuguese/ usr/share/mysql/portuguese/errmsg.sys usr/share/mysql/japanese/ usr/share/mysql/japanese/errmsg.sys usr/share/mysql/mysql_test_data_timezone.sql usr/share/mysql/russian/ usr/share/mysql/russian/errmsg.sys usr/share/mysql/czech/ usr/share/mysql/czech/errmsg.sys usr/share/mysql/fill_help_tables.sql usr/share/mysql/estonian/ usr/share/mysql/estonian/errmsg.sys usr/share/mysql/my-medium.cnf usr/share/mysql/norwegian-ny/ usr/share/mysql/norwegian-ny/errmsg.sys usr/share/mysql/my-small.cnf usr/share/mysql/mysql-log-rotate usr/share/mysql/italian/ usr/share/mysql/italian/errmsg.sys usr/share/mysql/my-large.cnf usr/share/mysql/ndb-config-2-node.ini usr/share/mysql/binary-configure usr/share/mysql/mi_test_all usr/share/mysql/mysqld_multi.server usr/share/mysql/my-innodb-heavy-4G.cnf usr/doc/ usr/doc/mysql-5.0.67/ usr/doc/mysql-5.0.67/README usr/doc/mysql-5.0.67/Docs/ usr/doc/mysql-5.0.67/Docs/INSTALL-BINARY usr/doc/mysql-5.0.67/COPYING usr/info/ usr/info/mysql.info.gz usr/libexec/ usr/libexec/mysqld usr/libexec/mysqlmanager usr/man/ usr/man/man8/ usr/man/man8/mysqlmanager.8.gz usr/man/man8/mysqld.8.gz usr/man/man1/ usr/man/man1/mysql_zap.1.gz usr/man/man1/mysql_setpermission.1.gz usr/man/man1/mysql_tzinfo_to_sql.1.gz usr/man/man1/msql2mysql.1.gz usr/man/man1/mysql_tableinfo.1.gz usr/man/man1/mysql_explain_log.1.gz usr/man/man1/mysqlcheck.1.gz usr/man/man1/comp_err.1.gz usr/man/man1/my_print_defaults.1.gz usr/man/man1/mysqlbinlog.1.gz usr/man/man1/myisam_ftdump.1.gz usr/man/man1/mysql_upgrade.1.gz usr/man/man1/mysql.1.gz usr/man/man1/mysql_client_test.1.gz usr/man/man1/resolve_stack_dump.1.gz usr/man/man1/mysql_fix_extensions.1.gz usr/man/man1/mysqlmanagerc.1.gz usr/man/man1/mysql_config.1.gz usr/man/man1/mysqlshow.1.gz usr/man/man1/myisamlog.1.gz usr/man/man1/replace.1.gz usr/man/man1/mysqlmanager-pwgen.1.gz usr/man/man1/mysqltest.1.gz usr/man/man1/innochecksum.1.gz usr/man/man1/mysqladmin.1.gz usr/man/man1/perror.1.gz usr/man/man1/mysql_waitpid.1.gz usr/man/man1/mysql_convert_table_format.1.gz usr/man/man1/mysqlman.1.gz usr/man/man1/mysqlimport.1.gz usr/man/man1/mysqlbug.1.gz usr/man/man1/mysql_find_rows.1.gz usr/man/man1/myisampack.1.gz usr/man/man1/myisamchk.1.gz usr/man/man1/mysql_fix_privilege_tables.1.gz usr/man/man1/mysql-stress-test.pl.1.gz usr/man/man1/resolveip.1.gz usr/man/man1/make_win_bin_dist.1.gz usr/man/man1/mysqlhotcopy.1.gz usr/man/man1/mysqld_multi.1.gz usr/man/man1/safe_mysqld.1.gz usr/man/man1/mysql_secure_installation.1.gz usr/man/man1/mysql_install_db.1.gz usr/man/man1/mysqldump.1.gz usr/man/man1/mysql-test-run.pl.1.gz usr/man/man1/mysqld_safe.1.gz usr/man/man1/mysqlaccess.1.gz usr/man/man1/mysql.server.1.gz usr/man/man1/make_win_src_distribution.1.gz etc/ etc/rc.d/ etc/rc.d/rc.mysqld.new etc/my-huge.cnf etc/my-medium.cnf etc/my-small.cnf etc/my-large.cnf /etc/rc.d/rc.mysqld is an ordinary Slackware-type start/stop script: #!/bin/sh # Start/stop/restart mysqld. # # Copyright 2003 Patrick J. Volkerding, Concord, CA # Copyright 2003 Slackware Linux, Inc., Concord, CA # # This program comes with NO WARRANTY, to the extent permitted by law. # You may redistribute copies of this program under the terms of the # GNU General Public License. # To start MySQL automatically at boot, be sure this script is executable: # chmod 755 /etc/rc.d/rc.mysqld # Before you can run MySQL, you must have a database. To install an initial # database, do this as root: # # su - mysql # mysql_install_db # # Note that step one is becoming the mysql user. It's important to do this # before making any changes to the database, or mysqld won't be able to write # to it later (this can be fixed with 'chown -R mysql.mysql /var/lib/mysql'). # To allow outside connections to the database comment out the next line. # If you don't need incoming network connections, then leave the line # uncommented to improve system security. #SKIP="--skip-networking" # Start mysqld: mysqld_start() { if [ -x /usr/bin/mysqld_safe ]; then # If there is an old PID file (no mysqld running), clean it up: if [ -r /var/run/mysql/mysql.pid ]; then if ! ps axc | grep mysqld 1> /dev/null 2> /dev/null ; then echo "Cleaning up old /var/run/mysql/mysql.pid." rm -f /var/run/mysql/mysql.pid fi fi /usr/bin/mysqld_safe --datadir=/var/lib/mysql --pid-file=/var/run/mysql/mysql.pid $SKIP & fi } # Stop mysqld: mysqld_stop() { # If there is no PID file, ignore this request... if [ -r /var/run/mysql/mysql.pid ]; then killall mysqld # Wait at least one minute for it to exit, as we don't know how big the DB is... for second in 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 \ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 60 ; do if [ ! -r /var/run/mysql/mysql.pid ]; then break; fi sleep 1 done if [ "$second" = "60" ]; then echo "WARNING: Gave up waiting for mysqld to exit!" sleep 15 fi fi } # Restart mysqld: mysqld_restart() { mysqld_stop mysqld_start } case "$1" in 'start') mysqld_start ;; 'stop') mysqld_stop ;; 'restart') mysqld_restart ;; *) echo "usage $0 start|stop|restart" esac But there's also an unexpected init script on the machine, named /etc/init.d/mysql: #!/bin/sh # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB # This file is public domain and comes with NO WARRANTY of any kind # MySQL daemon start/stop script. # Usually this is put in /etc/init.d (at least on machines SYSV R4 based # systems) and linked to /etc/rc3.d/S99mysql and /etc/rc0.d/K01mysql. # When this is done the mysql server will be started when the machine is # started and shut down when the systems goes down. # Comments to support chkconfig on RedHat Linux # chkconfig: 2345 64 36 # description: A very fast and reliable SQL database engine. # Comments to support LSB init script conventions ### BEGIN INIT INFO # Provides: mysql # Required-Start: $local_fs $network $remote_fs # Should-Start: ypbind nscd ldap ntpd xntpd # Required-Stop: $local_fs $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop MySQL # Description: MySQL is a very fast and reliable SQL database engine. ### END INIT INFO # If you install MySQL on some other places than /usr, then you # have to do one of the following things for this script to work: # # - Run this script from within the MySQL installation directory # - Create a /etc/my.cnf file with the following information: # [mysqld] # basedir=<path-to-mysql-installation-directory> # - Add the above to any other configuration file (for example ~/.my.ini) # and copy my_print_defaults to /usr/bin # - Add the path to the mysql-installation-directory to the basedir variable # below. # # If you want to affect other MySQL variables, you should make your changes # in the /etc/my.cnf, ~/.my.cnf or other MySQL configuration files. # If you change base dir, you must also change datadir. These may get # overwritten by settings in the MySQL configuration files. #basedir= #datadir= # Default value, in seconds, afterwhich the script should timeout waiting # for server start. # Value here is overriden by value in my.cnf. # 0 means don't wait at all # Negative numbers mean to wait indefinitely service_startup_timeout=900 # The following variables are only set for letting mysql.server find things. # Set some defaults pid_file=/var/run/mysql/mysql.pid server_pid_file=/var/run/mysql/mysql.pid use_mysqld_safe=1 user=mysql if test -z "$basedir" then basedir=/usr bindir=/usr/bin if test -z "$datadir" then datadir=/var/lib/mysql fi sbindir=/usr/sbin libexecdir=/usr/libexec else bindir="$basedir/bin" if test -z "$datadir" then datadir="$basedir/data" fi sbindir="$basedir/sbin" libexecdir="$basedir/libexec" fi # datadir_set is used to determine if datadir was set (and so should be # *not* set inside of the --basedir= handler.) datadir_set= # # Use LSB init script functions for printing messages, if possible # lsb_functions="/lib/lsb/init-functions" if test -f $lsb_functions ; then . $lsb_functions else log_success_msg() { echo " SUCCESS! $@" } log_failure_msg() { echo " ERROR! $@" } fi PATH=/sbin:/usr/sbin:/bin:/usr/bin:$basedir/bin export PATH mode=$1 # start or stop shift other_args="$*" # uncommon, but needed when called from an RPM upgrade action # Expected: "--skip-networking --skip-grant-tables" # They are not checked here, intentionally, as it is the resposibility # of the "spec" file author to give correct arguments only. case `echo "testing\c"`,`echo -n testing` in *c*,-n*) echo_n= echo_c= ;; *c*,*) echo_n=-n echo_c= ;; *) echo_n= echo_c='\c' ;; esac parse_server_arguments() { for arg do case "$arg" in --basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'` bindir="$basedir/bin" if test -z "$datadir_set"; then datadir="$basedir/data" fi sbindir="$basedir/sbin" libexecdir="$basedir/libexec" ;; --datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` datadir_set=1 ;; --user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --pid-file=*) server_pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --service-startup-timeout=*) service_startup_timeout=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --use-mysqld_safe) use_mysqld_safe=1;; --use-manager) use_mysqld_safe=0;; esac done } parse_manager_arguments() { for arg do case "$arg" in --pid-file=*) pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; esac done } wait_for_pid () { verb="$1" manager_pid="$2" # process ID of the program operating on the pid-file i=0 avoid_race_condition="by checking again" while test $i -ne $service_startup_timeout ; do case "$verb" in 'created') # wait for a PID-file to pop into existence. test -s $pid_file && i='' && break ;; 'removed') # wait for this PID-file to disappear test ! -s $pid_file && i='' && break ;; *) echo "wait_for_pid () usage: wait_for_pid created|removed manager_pid" exit 1 ;; esac # if manager isn't running, then pid-file will never be updated if test -n "$manager_pid"; then if kill -0 "$manager_pid" 2>/dev/null; then : # the manager still runs else # The manager may have exited between the last pid-file check and now. if test -n "$avoid_race_condition"; then avoid_race_condition="" continue # Check again. fi # there's nothing that will affect the file. log_failure_msg "Manager of pid-file quit without updating file." return 1 # not waiting any more. fi fi echo $echo_n ".$echo_c" i=`expr $i + 1` sleep 1 done if test -z "$i" ; then log_success_msg return 0 else log_failure_msg return 1 fi } # Get arguments from the my.cnf file, # the only group, which is read from now on is [mysqld] if test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" elif test -x $bindir/my_print_defaults then print_defaults="$bindir/my_print_defaults" elif test -x $bindir/mysql_print_defaults then print_defaults="$bindir/mysql_print_defaults" else # Try to find basedir in /etc/my.cnf conf=/etc/my.cnf print_defaults= if test -r $conf then subpat='^[^=]*basedir[^=]*=\(.*\)$' dirs=`sed -e "/$subpat/!d" -e 's//\1/' $conf` for d in $dirs do d=`echo $d | sed -e 's/[ ]//g'` if test -x "$d/bin/my_print_defaults" then print_defaults="$d/bin/my_print_defaults" break fi if test -x "$d/bin/mysql_print_defaults" then print_defaults="$d/bin/mysql_print_defaults" break fi done fi # Hope it's in the PATH ... but I doubt it test -z "$print_defaults" && print_defaults="my_print_defaults" fi # # Read defaults file from 'basedir'. If there is no defaults file there # check if it's in the old (depricated) place (datadir) and read it from there # extra_args="" if test -r "$basedir/my.cnf" then extra_args="-e $basedir/my.cnf" else if test -r "$datadir/my.cnf" then extra_args="-e $datadir/my.cnf" fi fi parse_server_arguments `$print_defaults $extra_args mysqld server mysql_server mysql.server` # Look for the pidfile parse_manager_arguments `$print_defaults $extra_args manager` # # Set pid file if not given # if test -z "$pid_file" then pid_file=$datadir/mysqlmanager-`/bin/hostname`.pid else case "$pid_file" in /* ) ;; * ) pid_file="$datadir/$pid_file" ;; esac fi if test -z "$server_pid_file" then server_pid_file=$datadir/`/bin/hostname`.pid else case "$server_pid_file" in /* ) ;; * ) server_pid_file="$datadir/$server_pid_file" ;; esac fi case "$mode" in 'start') # Start daemon # Safeguard (relative paths, core dumps..) cd $basedir manager=$bindir/mysqlmanager if test -x $libexecdir/mysqlmanager then manager=$libexecdir/mysqlmanager elif test -x $sbindir/mysqlmanager then manager=$sbindir/mysqlmanager fi echo $echo_n "Starting MySQL" if test -x $manager -a "$use_mysqld_safe" = "0" then if test -n "$other_args" then log_failure_msg "MySQL manager does not support options '$other_args'" exit 1 fi # Give extra arguments to mysqld with the my.cnf file. This script may # be overwritten at next upgrade. $manager --user=$user --pid-file=$pid_file >/dev/null 2>&1 & wait_for_pid created $!; return_value=$? # Make lock for RedHat / SuSE if test -w /var/lock/subsys then touch /var/lock/subsys/mysqlmanager fi exit $return_value elif test -x $bindir/mysqld_safe then # Give extra arguments to mysqld with the my.cnf file. This script # may be overwritten at next upgrade. pid_file=$server_pid_file $bindir/mysqld_safe --datadir=$datadir --pid-file=$server_pid_file $other_args >/dev/null 2>&1 & wait_for_pid created $!; return_value=$? # Make lock for RedHat / SuSE if test -w /var/lock/subsys then touch /var/lock/subsys/mysql fi exit $return_value else log_failure_msg "Couldn't find MySQL manager ($manager) or server ($bindir/mysqld_safe)" fi ;; 'stop') # Stop daemon. We use a signal here to avoid having to know the # root password. # The RedHat / SuSE lock directory to remove lock_dir=/var/lock/subsys/mysqlmanager # If the manager pid_file doesn't exist, try the server's if test ! -s "$pid_file" then pid_file=$server_pid_file lock_dir=/var/lock/subsys/mysql fi if test -s "$pid_file" then mysqlmanager_pid=`cat $pid_file` echo $echo_n "Shutting down MySQL" kill $mysqlmanager_pid # mysqlmanager should remove the pid_file when it exits, so wait for it. wait_for_pid removed "$mysqlmanager_pid"; return_value=$? # delete lock for RedHat / SuSE if test -f $lock_dir then rm -f $lock_dir fi exit $return_value else log_failure_msg "MySQL manager or server PID file could not be found!" fi ;; 'restart') # Stop the service and regardless of whether it was # running or not, start it again. if $0 stop $other_args; then $0 start $other_args else log_failure_msg "Failed to stop running server, so refusing to try to start." exit 1 fi ;; 'reload'|'force-reload') if test -s "$server_pid_file" ; then read mysqld_pid < $server_pid_file kill -HUP $mysqld_pid && log_success_msg "Reloading service MySQL" touch $server_pid_file else log_failure_msg "MySQL PID file could not be found!" exit 1 fi ;; 'status') # First, check to see if pid file exists if test -s "$server_pid_file" ; then read mysqld_pid < $server_pid_file if kill -0 $mysqld_pid 2>/dev/null ; then log_success_msg "MySQL running ($mysqld_pid)" exit 0 else log_failure_msg "MySQL is not running, but PID file exists" exit 1 fi else # Try to find appropriate mysqld process mysqld_pid=`pidof $sbindir/mysqld` if test -z $mysqld_pid ; then if test "$use_mysqld_safe" = "0" ; then lockfile=/var/lock/subsys/mysqlmanager else lockfile=/var/lock/subsys/mysql fi if test -f $lockfile ; then log_failure_msg "MySQL is not running, but lock exists" exit 2 fi log_failure_msg "MySQL is not running" exit 3 else log_failure_msg "MySQL is running but PID file could not be found" exit 4 fi fi ;; *) # usage echo "Usage: $0 {start|stop|restart|reload|force-reload|status} [ MySQL server options ]" exit 1 ;; esac exit 0 An unimportant aside: The previous users of the machine kept a messy home directory. Their home directory was /root. I've pasted a copy at http://www.pastebin.ca/2167496. My question: Why is there a /etc/init.d/mysql file on this Slackware machine? How could it have gotten there? P.S. This question is far from perfect. Please feel free to edit it.

    Read the article

  • importing a VCard in the address book , objective C [migrated]

    - by user1044771
    I am designing a QR code reader, and it needs to detect and import contact cards in vCard format. is there a way to add the card data to the system Address Book directly, or do I need to parse the vCard myself and add each field individually? I will be getting the VCArd in a NSString format I tried the code below (from a different post) and didn't work -(IBAction)saveContacts{ NSString *vCardString = @"vCardDataHere"; CFDataRef vCardData = (__bridge_retained CFDataRef)[vCardString dataUsingEncoding:NSUTF8StringEncoding]; ABAddressBookRef book = ABAddressBookCreate(); ABRecordRef defaultSource = ABAddressBookCopyDefaultSource(book); CFArrayRef vCardPeople = ABPersonCreatePeopleInSourceWithVCardRepresentation(defaultSource, vCardData); for (CFIndex index = 0; index < CFArrayGetCount(vCardPeople); index++) { ABRecordRef person = CFArrayGetValueAtIndex(vCardPeople, index); ABAddressBookAddRecord(book, person, NULL); CFRelease(person); } CFRelease(vCardPeople); CFRelease(defaultSource); ABAddressBookSave(book, NULL); CFRelease(book); } I have searched a bit and fixed the code and here how it looks like it doesn t crash anymore but it doesn t save the VCard (NSString format) in the address book , any clues ?

    Read the article

  • Direct2d off-screen rendering and hardware acceleration

    - by Goran
    I'm trying to use direct2d to render images off-screen using WindowsAPICodePack. This is easily achieved using WicBitmapRenderTarget but sadly it's not hardware accelerated. So I'm trying this route: Create direct3d device Create texture2d Use texture surface to create render target using CreateDxgiSurfaceRenderTarget Draw some shapes While this renders the image it appears GPU isn't being used at all while CPU is used heavily. Am I doing something wrong? Is there a way to check whether hardware or software rendering is used? Code sample: var device = D3DDevice1.CreateDevice1( null, DriverType.Hardware, null, CreateDeviceOptions.SupportBgra ,FeatureLevel.Ten ); var txd = new Texture2DDescription(); txd.Width = 256; txd.Height = 256; txd.MipLevels = 1; txd.ArraySize = 1; txd.Format = Format.B8G8R8A8UNorm; //DXGI_FORMAT_R32G32B32A32_FLOAT; txd.SampleDescription = new SampleDescription(1,0); txd.Usage = Usage.Default; txd.BindingOptions = BindingOptions.RenderTarget | BindingOptions.ShaderResource; txd.MiscellaneousResourceOptions = MiscellaneousResourceOptions.None; txd.CpuAccessOptions = CpuAccessOptions.None; var tx = device.CreateTexture2D(txd); var srfc = tx.GraphicsSurface; var d2dFactory = D2DFactory.CreateFactory(); var renderTargetProperties = new RenderTargetProperties { PixelFormat = new PixelFormat(Format.Unknown, AlphaMode.Premultiplied), DpiX = 96, DpiY = 96, RenderTargetType = RenderTargetType.Default, }; using(var renderTarget = d2dFactory.CreateGraphicsSurfaceRenderTarget(srfc, renderTargetProperties)) { renderTarget.BeginDraw(); var clearColor = new ColorF(1f,1f,1f,1f); renderTarget.Clear(clearColor); using (var strokeBrush = renderTarget.CreateSolidColorBrush(new ColorF(0.2f,0.2f,0.2f,1f))) { for (var i = 0; i < 100000; i++) { renderTarget.DrawEllipse(new Ellipse(new Point2F(i, i), 10, 10), strokeBrush, 2); } } var hr = renderTarget.EndDraw(); }

    Read the article

  • Entity Association Mapping with Code First Part 1 : Mapping Complex Types

    - by mortezam
    Last week the CTP5 build of the new Entity Framework Code First has been released by data team at Microsoft. Entity Framework Code-First provides a pretty powerful code-centric way to work with the databases. When it comes to associations, it brings ultimate flexibility. I’m a big fan of the EF Code First approach and am planning to explain association mapping with code first in a series of blog posts and this one is dedicated to Complex Types. If you are new to Code First approach, you can find a great walkthrough here. In order to build a solid foundation for our discussion, we will start by learning about some of the core concepts around the relationship mapping.   What is Mapping?Mapping is the act of determining how objects and their relationships are persisted in permanent data storage, in our case, relational databases. What is Relationship mapping?A mapping that describes how to persist a relationship (association, aggregation, or composition) between two or more objects. Types of RelationshipsThere are two categories of object relationships that we need to be concerned with when mapping associations. The first category is based on multiplicity and it includes three types: One-to-one relationships: This is a relationship where the maximums of each of its multiplicities is one. One-to-many relationships: Also known as a many-to-one relationship, this occurs when the maximum of one multiplicity is one and the other is greater than one. Many-to-many relationships: This is a relationship where the maximum of both multiplicities is greater than one. The second category is based on directionality and it contains two types: Uni-directional relationships: when an object knows about the object(s) it is related to but the other object(s) do not know of the original object. To put this in EF terminology, when a navigation property exists only on one of the association ends and not on the both. Bi-directional relationships: When the objects on both end of the relationship know of each other (i.e. a navigation property defined on both ends). How Object Relationships Are Implemented in POCO domain models?When the multiplicity is one (e.g. 0..1 or 1) the relationship is implemented by defining a navigation property that reference the other object (e.g. an Address property on User class). When the multiplicity is many (e.g. 0..*, 1..*) the relationship is implemented via an ICollection of the type of other object. How Relational Database Relationships Are Implemented? Relationships in relational databases are maintained through the use of Foreign Keys. A foreign key is a data attribute(s) that appears in one table and must be the primary key or other candidate key in another table. With a one-to-one relationship the foreign key needs to be implemented by one of the tables. To implement a one-to-many relationship we implement a foreign key from the “one table” to the “many table”. We could also choose to implement a one-to-many relationship via an associative table (aka Join table), effectively making it a many-to-many relationship. Introducing the ModelNow, let's review the model that we are going to use in order to implement Complex Type with Code First. It's a simple object model which consist of two classes: User and Address. Each user could have one billing address. The Address information of a User is modeled as a separate class as you can see in the UML model below: In object-modeling terms, this association is a kind of aggregation—a part-of relationship. Aggregation is a strong form of association; it has some additional semantics with regard to the lifecycle of objects. In this case, we have an even stronger form, composition, where the lifecycle of the part is fully dependent upon the lifecycle of the whole. Fine-grained domain models The motivation behind this design was to achieve Fine-grained domain models. In crude terms, fine-grained means “more classes than tables”. For example, a user may have both a billing address and a home address. In the database, you may have a single User table with the columns BillingStreet, BillingCity, and BillingPostalCode along with HomeStreet, HomeCity, and HomePostalCode. There are good reasons to use this somewhat denormalized relational model (performance, for one). In our object model, we can use the same approach, representing the two addresses as six string-valued properties of the User class. But it’s much better to model this using an Address class, where User has the BillingAddress and HomeAddress properties. This object model achieves improved cohesion and greater code reuse and is more understandable. Complex Types: Splitting a Table Across Multiple Types Back to our model, there is no difference between this composition and other weaker styles of association when it comes to the actual C# implementation. But in the context of ORM, there is a big difference: A composed class is often a candidate Complex Type. But C# has no concept of composition—a class or property can’t be marked as a composition. The only difference is the object identifier: a complex type has no individual identity (i.e. no AddressId defined on Address class) which make sense because when it comes to the database everything is going to be saved into one single table. How to implement a Complex Types with Code First Code First has a concept of Complex Type Discovery that works based on a set of Conventions. The convention is that if Code First discovers a class where a primary key cannot be inferred, and no primary key is registered through Data Annotations or the fluent API, then the type will be automatically registered as a complex type. Complex type detection also requires that the type does not have properties that reference entity types (i.e. all the properties must be scalar types) and is not referenced from a collection property on another type. Here is the implementation: public class User{    public int UserId { get; set; }    public string FirstName { get; set; }    public string LastName { get; set; }    public string Username { get; set; }    public Address Address { get; set; }} public class Address {     public string Street { get; set; }     public string City { get; set; }            public string PostalCode { get; set; }        }public class EntityMappingContext : DbContext {     public DbSet<User> Users { get; set; }        } With code first, this is all of the code we need to write to create a complex type, we do not need to configure any additional database schema mapping information through Data Annotations or the fluent API. Database SchemaThe mapping result for this object model is as follows: Limitations of this mappingThere are two important limitations to classes mapped as Complex Types: Shared references is not possible: The Address Complex Type doesn’t have its own database identity (primary key) and so can’t be referred to by any object other than the containing instance of User (e.g. a Shipping class that also needs to reference the same User Address). No elegant way to represent a null reference There is no elegant way to represent a null reference to an Address. When reading from database, EF Code First always initialize Address object even if values in all mapped columns of the complex type are null. This means that if you store a complex type object with all null property values, EF Code First returns a initialized complex type when the owning entity object is retrieved from the database. SummaryIn this post we learned about fine-grained domain models which complex type is just one example of it. Fine-grained is fully supported by EF Code First and is known as the most important requirement for a rich domain model. Complex type is usually the simplest way to represent one-to-one relationships and because the lifecycle is almost always dependent in such a case, it’s either an aggregation or a composition in UML. In the next posts we will revisit the same domain model and will learn about other ways to map a one-to-one association that does not have the limitations of the complex types. References ADO.NET team blog Mapping Objects to Relational Databases Java Persistence with Hibernate

    Read the article

  • C++ FBX Animation Importer Using the FBX SDK

    - by Mike Sawayda
    Does anyone have any experience using the FBX SDK to load in animations. I got the meshes loaded in correctly with all of their verts, indices, UV's, and normals. I am just now trying to get the Animations working correctly. I have looked at the FBX SDK documentation with little help. If someone could just help me get started or point me in the right direction I would greatly appreciate it. I added some code so you can kinda get an idea of what I am doing. I should be able to place that code anywhere in the load FBX function and have it work. //GETTING ANIMAION DATA for(int i = 0; i < scene->GetSrcObjectCount<FbxAnimStack>(); ++i) { FbxAnimStack* lAnimStack = scene->GetSrcObject<FbxAnimStack>(i); FbxString stackName = "Animation Stack Name: "; stackName += lAnimStack->GetName(); string sStackName = stackName; int numLayers = lAnimStack->GetMemberCount<FbxAnimLayer>(); for(int j = 0; j < numLayers; ++j) { FbxAnimLayer* lAnimLayer = lAnimStack->GetMember<FbxAnimLayer>(j); FbxString layerName = "Animation Stack Name: "; layerName += lAnimLayer->GetName(); string sLayerName = layerName; queue<FbxNode*> nodes; FbxNode* tempNode = scene->GetRootNode(); while(tempNode != NULL) { FbxAnimCurve* lAnimCurve = tempNode->LclTranslation.GetCurve(lAnimLayer, FBXSDK_CURVENODE_COMPONENT_X); if(lAnimCurve != NULL) { //I know something needs to be done here but I dont know what. } for(int i = 0; i < tempNode->GetChildCount(false); ++i) { nodes.push(tempNode->GetChild(i)); } if(nodes.size() > 0) { tempNode = nodes.front(); nodes.pop(); } else { tempNode = NULL; } } } } Here is the full function bool FBXLoader::LoadFBX(ParentMeshObject* _parentMesh, char* _filePath, bool _hasTexture) { FbxManager* fbxManager = FbxManager::Create(); if(!fbxManager) { printf( "ERROR %s : %d failed creating FBX Manager!\n", __FILE__, __LINE__ ); } FbxIOSettings* ioSettings = FbxIOSettings::Create(fbxManager, IOSROOT); fbxManager->SetIOSettings(ioSettings); FbxString filePath = FbxGetApplicationDirectory(); fbxManager->LoadPluginsDirectory(filePath.Buffer()); FbxScene* scene = FbxScene::Create(fbxManager, ""); int fileMinor, fileRevision; int sdkMajor, sdkMinor, sdkRevision; int fileFormat; FbxManager::GetFileFormatVersion(sdkMajor, sdkMinor, sdkRevision); FbxImporter* importer = FbxImporter::Create(fbxManager, ""); if(!fbxManager->GetIOPluginRegistry()->DetectReaderFileFormat(_filePath, fileFormat)) { //Unrecognizable file format. Try to fall back on FbxImorter::eFBX_BINARY fileFormat = fbxManager->GetIOPluginRegistry()->FindReaderIDByDescription("FBX binary (*.fbx)"); } bool importStatus = importer->Initialize(_filePath, fileFormat, fbxManager->GetIOSettings()); importer->GetFileVersion(fileMinor, fileMinor, fileRevision); if(!importStatus) { printf( "ERROR %s : %d FbxImporter Initialize failed!\n", __FILE__, __LINE__ ); return false; } importStatus = importer->Import(scene); if(!importStatus) { printf( "ERROR %s : %d FbxImporter failed to import the file to the scene!\n", __FILE__, __LINE__ ); return false; } FbxAxisSystem sceneAxisSystem = scene->GetGlobalSettings().GetAxisSystem(); FbxAxisSystem axisSystem( FbxAxisSystem::eYAxis, FbxAxisSystem::eParityOdd, FbxAxisSystem::eLeftHanded ); if(sceneAxisSystem != axisSystem) { axisSystem.ConvertScene(scene); } TriangulateRecursive(scene->GetRootNode()); FbxArray<FbxMesh*> meshes; FillMeshArray(scene, meshes); unsigned short vertexCount = 0; unsigned short triangleCount = 0; unsigned short faceCount = 0; unsigned short materialCount = 0; int numberOfVertices = 0; for(int i = 0; i < meshes.GetCount(); ++i) { numberOfVertices += meshes[i]->GetPolygonVertexCount(); } Face face; vector<Face> faces; int indicesCount = 0; int ptrMove = 0; float wValue = 0.0f; if(!_hasTexture) { wValue = 1.0f; } for(int i = 0; i < meshes.GetCount(); ++i) { int vertexCount = 0; vertexCount = meshes[i]->GetControlPointsCount(); if(vertexCount == 0) continue; VertexType* vertices; vertices = new VertexType[vertexCount]; int triangleCount = meshes[i]->GetPolygonVertexCount() / 3; indicesCount = meshes[i]->GetPolygonVertexCount(); FbxVector4* fbxVerts = new FbxVector4[vertexCount]; int arrayIndex = 0; memcpy(fbxVerts, meshes[i]->GetControlPoints(), vertexCount * sizeof(FbxVector4)); for(int j = 0; j < triangleCount; ++j) { int index = 0; FbxVector4 fbxNorm(0, 0, 0, 0); FbxVector2 fbxUV(0, 0); bool texCoordFound = false; face.indices[0] = index = meshes[i]->GetPolygonVertex(j, 0); vertices[index].position.x = (float)fbxVerts[index][0]; vertices[index].position.y = (float)fbxVerts[index][1]; vertices[index].position.z = (float)fbxVerts[index][2]; vertices[index].position.w = wValue; meshes[i]->GetPolygonVertexNormal(j, 0, fbxNorm); vertices[index].normal.x = (float)fbxNorm[0]; vertices[index].normal.y = (float)fbxNorm[1]; vertices[index].normal.z = (float)fbxNorm[2]; texCoordFound = meshes[i]->GetPolygonVertexUV(j, 0, "map1", fbxUV); vertices[index].texture.x = (float)fbxUV[0]; vertices[index].texture.y = (float)fbxUV[1]; face.indices[1] = index = meshes[i]->GetPolygonVertex(j, 1); vertices[index].position.x = (float)fbxVerts[index][0]; vertices[index].position.y = (float)fbxVerts[index][1]; vertices[index].position.z = (float)fbxVerts[index][2]; vertices[index].position.w = wValue; meshes[i]->GetPolygonVertexNormal(j, 1, fbxNorm); vertices[index].normal.x = (float)fbxNorm[0]; vertices[index].normal.y = (float)fbxNorm[1]; vertices[index].normal.z = (float)fbxNorm[2]; texCoordFound = meshes[i]->GetPolygonVertexUV(j, 1, "map1", fbxUV); vertices[index].texture.x = (float)fbxUV[0]; vertices[index].texture.y = (float)fbxUV[1]; face.indices[2] = index = meshes[i]->GetPolygonVertex(j, 2); vertices[index].position.x = (float)fbxVerts[index][0]; vertices[index].position.y = (float)fbxVerts[index][1]; vertices[index].position.z = (float)fbxVerts[index][2]; vertices[index].position.w = wValue; meshes[i]->GetPolygonVertexNormal(j, 2, fbxNorm); vertices[index].normal.x = (float)fbxNorm[0]; vertices[index].normal.y = (float)fbxNorm[1]; vertices[index].normal.z = (float)fbxNorm[2]; texCoordFound = meshes[i]->GetPolygonVertexUV(j, 2, "map1", fbxUV); vertices[index].texture.x = (float)fbxUV[0]; vertices[index].texture.y = (float)fbxUV[1]; faces.push_back(face); } meshes[i]->Destroy(); meshes[i] = NULL; int indexCount = faces.size() * 3; unsigned long* indices = new unsigned long[faces.size() * 3]; int indicie = 0; for(unsigned int i = 0; i < faces.size(); ++i) { indices[indicie++] = faces[i].indices[0]; indices[indicie++] = faces[i].indices[1]; indices[indicie++] = faces[i].indices[2]; } faces.clear(); _parentMesh->AddChild(vertices, indices, vertexCount, indexCount); } return true; }

    Read the article

  • Reusable VS clean code - where's the balance?

    - by Radek Šimko
    Let's say I have a data model for a blog posts and have two use-cases of that model - getting all blogposts and getting only blogposts which were written by specific author. There are basically two ways how I can realize that. 1st model class Articles { public function getPosts() { return $this->connection->find() ->sort(array('creation_time' => -1)); } public function getPostsByAuthor( $authorUid ) { return $this->connection->find(array('author_uid' => $authorUid)) ->sort(array('creation_time' => -1)); } } 1st usage (presenter/controller) if ( $GET['author_uid'] ) { $posts = $articles->getPostsByAuthor($GET['author_uid']); } else { $posts = $articles->getPosts(); } 2nd one class Articles { public function getPosts( $authorUid = NULL ) { $query = array(); if( $authorUid !== NULL ) { $query = array('author_uid' => $authorUid); } return $this->connection->find($query) ->sort(array('creation_time' => -1)); } } 2nd usage (presenter/controller) $posts = $articles->getPosts( $_GET['author_uid'] ); To sum up (dis)advantages: 1) cleaner code 2) more reusable code Which one do you think is better and why? Is there any kind of compromise between those two?

    Read the article

  • Internationalize WebCenter Portal - Content Presenter

    - by Stefan Krantz
    Lately we have been involved in engagements where internationalization has been holding the project back from success. In this post we are going to explain how to get Content Presenter and its editorials to comply with the current selected locale for the WebCenter Portal session. As you probably know by now WebCenter Portal leverages the Localization support from Java Server Faces (JSF), in this post we will assume that the localization is controlled and enforced by switching the current browsers locale between English and Spanish. There is two main scenarios in internationalization of a content enabled pages, since Content Presenter offers both presentation of information as well as contribution of information, in this post we will look at how to enable seamless integration of correct localized version of the back end content file and how to enable the editor/author to edit the correct localized version of the file based on the current browser locale. Solution Scenario 1 - Localization aware content presentation Due to the amount of steps required to implement the enclosed solution proposal I have decided to share the solution with you in group components for each facet of the solution. If you want to get more details on each step, you can review the enclosed components. This post will guide you through the steps of enabling each component and what it enables/changes in each section of the system. Enable Content Presenter Customization By leveraging a predictable naming convention of the data files used to hold the content for the Content Presenter instance we can easily develop a component that will dynamically switch the name out before presenting the information. The naming convention we have leverage is the industry best practice by having a shared identifier as prefix (ContentABC) and a language enabled suffix (_EN) (_ES). So the assumption is that each file pair in above example should look like following:- English version - (ContentABC_EN)- Spanish version - (ContentABC_ES) Based on above theory we can now easily regardless of the primary version assigned to the content presenter instance switch the language out by using the localization support from JSF. Below java bean (oracle.webcenter.doclib.internal.view.presenter.NLSHelperBean) is enclosed in the customization project available for download at the bottom of the post: 1: public static final String CP_D_DOCNAME_FORMAT = "%s_%s"; 2: public static final int CP_UNIQUE_ID_INDEX = 0; 3: private ContentPresenter presenter = null; 4:   5:   6: public NLSHelperBean() { 7: super(); 8: } 9:   10: /** 11: * This method updates the configuration for the pageFlowScope to have the correct datafile 12: * for the current Locale 13: */ 14: public void initLocaleForDataFile() { 15: String dataFile = null; 16: // Checking that state of presenter is present, also make sure the item is eligible for localization by locating the "_" in the name 17: if(presenter.getConfiguration().getDatasource() != null && 18: presenter.getConfiguration().getDatasource().isNodeDatasource() && 19: presenter.getConfiguration().getDatasource().getNodeIdDatasource() != null && 20: !presenter.getConfiguration().getDatasource().getNodeIdDatasource().equals("") && 21: presenter.getConfiguration().getDatasource().getNodeIdDatasource().indexOf("_") > 0) { 22: dataFile = presenter.getConfiguration().getDatasource().getNodeIdDatasource(); 23: FacesContext fc = FacesContext.getCurrentInstance(); 24: //Leveraging the current faces contenxt to get current localization language 25: String currentLocale = fc.getViewRoot().getLocale().getLanguage().toUpperCase(); 26: String newDataFile = dataFile; 27: String [] uniqueIdArr = dataFile.split("_"); 28: if(uniqueIdArr.length > 0) { 29: newDataFile = String.format(CP_D_DOCNAME_FORMAT, uniqueIdArr[CP_UNIQUE_ID_INDEX], currentLocale); 30: } 31: //Replacing the current Node datasource with localized datafile. 32: presenter.getConfiguration().getDatasource().setNodeIdDatasource(newDataFile); 33: } 34: } With this bean code available to our WebCenter Portal implementation we can start the next step, by overriding the standard behavior in content presenter by applying a MDS Taskflow customization to the content presenter taskflow, following taskflow customization has been applied to the customization project attached to this post:- Library: WebCenter Document Library Service View- Path: oracle.webcenter.doclib.view.jsf.taskflows.presenter- File: contentPresenter.xml Changes made in above customization view:1. A new method invocation activity has been added (initLocaleForDataFile)2. The method invocation invokes the new NLSHelperBean3. The default activity is moved to the new Method invocation (initLocaleForDataFile)4. The outcome from the method invocation goes to determine-navigation (original default activity) The above changes concludes the presentation modification to support a compatible localization scenario for a content driven page. In addition this customization do not limit or disables the out of the box capabilities of WebCenter Portal. Steps to enable above customization Start JDeveloper and open your WebCenter Portal Application Select "Open Project" and include the extracted project you downloaded (CPNLSCustomizations.zip) Make sure the build out put from CPNLSCustomizations project is a dependency to your Portal project Deploy your Portal Application to your WC_CustomPortal managed server Make sure your naming convention of the two data files follow above recommendation Example result of the solution: Solution Scenario 2 - Localization aware content creation and authoring As you could see from Solution Scenario 1 we require the naming convention to be strictly followed, this means in the hands of a user with limited technology knowledge this can be one of the failing links in this solutions. Therefore I strongly recommend that you also follow this part since this will eliminate this risk and also increase the editors/authors usability with a magnitude. The current WebCenter Portal Architecture leverages WebCenter Content today to maintain, publish and manage content, therefore we need to make few efforts in making sure this part of the architecture is on board with our new naming practice and also simplifies the creation of content for our end users. As you probably remember the naming convention required a prefix to be common so I propose we enable a new component that help you auto name the content items dDocName (this means that the readable title can still be in a human readable format). The new component (WCP-LocalizationSupport.zip) built for this scenario will enable a couple of things: 1. A new service where a sequential number can be generate on request - service name: GET_WCP_LOCALE_CONTENTID 2. The content presenter is leveraging a specific function when launching the content creation wizard from within Content Presenter. Assumption is that users will create the content by clicking "Create Web Content" button. When clicking the button the wizard opened is actually running in side of WebCenter Content server, file executed (contentwizard.hcsp). This file uses JSON commands that will generate operations in the content server, I have extend this file to create two identical data files instead of one.- First it creates the English version by leveraging the new Service and a Global Rule to set the dDocName on the original check in screen, this global rule is available in a configuration package attached to this blog (NLSContentProfileRule.zip)- Secondly we run a set of JSON javascripts to create the Spanish version with the same details except for the name where we replace the suffix with (_ES)- Then content creation wizard ends with its out of the box behavior and assigns the Content Presenter instance the English versionSee Javascript markup below - this can be changed in the (WCP-LocalizationSupport.zip/component/WCP-LocalizationSupport/publish/webcenter) 1: //---------------------------------------A-TEAM--------------------------------------- 2: WCM.ContentWizard.CheckinContentPage.OnCheckinComplete = function(returnParams) 3: { 4: var callback = WCM.ContentWizard.CheckinContentPage.checkinCompleteCallback; 5: WCM.ContentWizard.ChooseContentPage.OnSelectionComplete(returnParams, callback); 6: // Load latest DOC_INFO_SIMPLE 7: var cgiPath = DOCLIB.config.httpCgiPath; 8: var jsonBinder = new WCM.Idc.JSONBinder(); 9: jsonBinder.SetLocalDataValue('IdcService', 'DOC_INFO_SIMPLE'); 10: jsonBinder.SetLocalDataValue('dID', returnParams.dID); 11: jsonBinder.Send(cgiPath, $CB(this, function(http) { 12: var ret = http.GetResponseText(); 13: var binder = new WCM.Idc.JSONBinder(ret); 14: var dDocName = binder.GetResultSetValue('DOC_INFO', 'dDocName', 0); 15: if(dDocName.indexOf("_") > 0){ 16: var ssBinder = new WCM.Idc.JSONBinder(); 17: ssBinder.SetLocalDataValue('IdcService', 'SS_CHECKIN_NEW'); 18: //Additional Localization dDocName generated 19: ssBinder.SetLocalDataValue('dDocName', getLocalizedDocName(dDocName, "es")); 20: ssBinder.SetLocalDataValue('primaryFile', 'default.xml'); 21: ssBinder.SetLocalDataValue('ssDefaultDocumentToken', 'SSContributorDataFile'); 22:   23: for(var n = 0 ; n < binder.GetResultSetFields('DOC_INFO').length ; n++) { 24: var field = binder.GetResultSetFields('DOC_INFO')[n]; 25: if(field != 'dID' && 26: field != 'dDocName' && 27: field != 'dID' && 28: field != 'dReleaseState' && 29: field != 'dRevClassID' && 30: field != 'dRevisionID' && 31: field != 'dRevLabel') { 32: ssBinder.SetLocalDataValue(field, binder.GetResultSetValue('DOC_INFO', field, 0)); 33: } 34: } 35: ssBinder.Send(cgiPath, $CB(this, function(http) {})); 36: } 37: })); 38: } 39:   40: //Support function to create localized dDocNames 41: function getLocalizedDocName(dDocName, lang) { 42: var result = dDocName.replace("_EN", ("_" + lang)); 43: return result; 44: } 45: //---------------------------------------A-TEAM--------------------------------------- 3. By applying the enclosed NLSContentProfileRule.zip, the check in screen for DataFile creation will have auto naming enabled with localization suffix (default is English)You can change the default language by updating the GlobalNlsRule and assign preferred prefix.See Rule markup for dDocName field below: <$executeService("GET_WCP_LOCALE_CONTENTID")$><$dprDefaultValue=WCP_LOCALE.LocaleContentId & "_EN"$> Steps to enable above extensions and configurations Install WebCenter Component (WCP-LocalizationSupport.zip), via the AdminServer in WebCenter Content Administration menus Enable the component and restart the content server Apply the configuration bundle to enable the new Global Rule (GlobalNlsRule), via the WebCenter Content Administration/Config Migration Admin New Content Creation Experience Result Content EditingContent editing will by default be enabled for authoring in the current select locale since the content file is selected by (Solution Scenario 1), this means that a user can switch his browser locale and then get the editing experience adaptable to the current selected locale. NotesA-Team are planning to post a solution on how to inline switch the locale of the WebCenter Portal Session, so the Content Presenter, Navigation Model and other Face related features are localized accordingly. Content Presenter examples used in this post is an extension to following post:https://blogs.oracle.com/ATEAM_WEBCENTER/entry/enable_content_editing_of_iterative Downloads CPNLSCustomizations.zip - WebCenter Portal, Content Presenter Customization https://blogs.oracle.com/ATEAM_WEBCENTER/resource/stefan.krantz/CPNLSCustomizations.zip WCP-LocalizationSupport.zip - WebCenter Content, Extension Component to enable localization creation of files with compliant auto naminghttps://blogs.oracle.com/ATEAM_WEBCENTER/resource/stefan.krantz/WCP-LocalizationSupport.zip NLSContentProfileRule.zip - WebCenter Content, Configuration Update Bundle to enable Global rule for new check in naming of data fileshttps://blogs.oracle.com/ATEAM_WEBCENTER/resource/stefan.krantz/NLSContentProfileRule.zip

    Read the article

  • PowerShell Remoting w/ Exchange 2010

    - by pk.
    I'm having difficulty running Exchange 2010 cmdlets through remote PowerShell sessions. I start my local PowerShell session as Administrator and issue the following commands -- PS C:\Windows\system32> $mailcred = Get-Credential PS C:\Windows\system32> $mailSession = New-PSSession -ComputerName MAILSRV -Credential $mailcred PS C:\Windows\system32> Enter-PSSession $mailSession [MAILSRV]: PS C:\Users\jdoe\Documents> Add-PSSnapin Microsoft.Exchange.Management.PowerShell.E2010 [MAILSRV]: PS C:\Users\jdoe\Documents> hostname MAILSRV [MAILSRV]: PS C:\Users\jdoe\Documents> Get-ExchangeServer Value cannot be null. Parameter name: serverSettings + CategoryInfo : + FullyQualifiedErrorId : System.ArgumentNullException,Microsoft.Exchange.Management.SystemConfigurationTasks.GetExchangeServer [MAILSRV]: PS C:\Users\jdoe\Documents> get-mailbox Value cannot be null. Parameter name: serverSettings + CategoryInfo : + FullyQualifiedErrorId : System.ArgumentNullException,Microsoft.Exchange.Management.RecipientTasks.GetMailbox As you can see, none of the Exchange cmdlets are working. What could be the issue?

    Read the article

  • Method extension for safely type convert

    - by outcoldman
    Recently I read good Russian post with many interesting extensions methods after then I remembered that I too have one good extension method “Safely type convert”. Idea of this method I got at last job. We often write code like this: int intValue; if (obj == null || !int.TryParse(obj.ToString(), out intValue)) intValue = 0; This is method how to safely parse object to int. Of course will be good if we will create some unify method for safely casting. I found that better way is to create extension methods and use them then follows: int i; i = "1".To<int>(); // i == 1 i = "1a".To<int>(); // i == 0 (default value of int) i = "1a".To(10); // i == 10 (set as default value 10) i = "1".To(10); // i == 1 // ********** Nullable sample ************** int? j; j = "1".To<int?>(); // j == 1 j = "1a".To<int?>(); // j == null j = "1a".To<int?>(10); // j == 10 j = "1".To<int?>(10); // j == 1 Read more... (redirect to http://outcoldman.ru)

    Read the article

  • OpenGL 2 on Android: native window

    - by ThreaderSlash
    According to OGLES specification, we have the following definition: EGLSurface eglCreateWindowSurface(EGLDisplay display, EGLConfig config, NativeWindowType native_window, EGLint const * attrib_list) More details, here: http://www.khronos.org/opengles/documentation/opengles1_0/html/eglCreateWindowSurface.html And also by definition: int32_t ANativeWindow_setBuffersGeometry(ANativeWindow* window, int32_t width, int32_t height, int32_t format); More details, here: http://mobilepearls.com/labs/native-android-api I am running Android Native App on OGLES 2 and debugging it in a Samsung Nexus device. For setting up the 3D scene graph environment, the following variables are defined: struct android_app { ... ANativeWindow* window; }; android_app* mApplication; ... mApplication=&pApplication; And to initialize the App, we run the commands in the code: ANativeWindow_setBuffersGeometry(mApplication->window, 0, 0, lFormat); mSurface = eglCreateWindowSurface(mDisplay, lConfig, mApplication->window, NULL); Funny to say is that, the command ANativeWindow_setBuffersGeometry behaves as expected and works fine according to its definition, accepting all the parameters sent to it. But the eglCreateWindowSurface does no accept the parameter mApplication-window, as it should accept according to its definition. Instead, it looks for the following input: EGLNativeWindowType hWnd; mSurface = eglCreateWindowSurface(mDisplay,lConfig,hWnd,NULL); As an alternative, I considered to use instead: NativeWindowType hWnd=android_createDisplaySurface(); But debugger says: Function 'android_createDisplaySurface' could not be resolved Can someone tell if there is a way to convert mApplication-window? In a way that the data from the android_app get accepted to the window surface?

    Read the article

  • ScrollViewer.EnsureVisible for Windows Phone

    - by Daniel Moth
    In my Translator By Moth app, on both the current and saved pivot pages the need arose to programmatically scroll to the bottom. In the former, case it is when a translation takes place (if the text is too long, I want to scroll to the bottom of the translation so the user can focus on that, and not their input text for translation). In the latter case it was when a new translation is saved (it is added to the bottom of the list, so scrolling is required to make it visible). On both pages a ScrollViewer is used. In my exploration of the APIs through intellisense and msdn I could not find a method that auto scrolled to the bottom. So I hacked together a solution where I added a blank textblock to the bottom of each page (within the ScrollViewer, but above the translated textblock and the saved list) and tried to make it scroll it into view from code. After searching the web I found a little algorithm that did most of what I wanted (sorry, I do not have the reference handy, but thank you whoever it was) that after minor tweaking I turned into an extension method for the ScrollViewer that is very easy to use: this.Scroller.EnsureVisible(this.BlankText); The method itself I share with you here: public static void EnsureVisible(this System.Windows.Controls.ScrollViewer scroller, System.Windows.UIElement uiElem) { System.Diagnostics.Debug.Assert(scroller != null); System.Diagnostics.Debug.Assert(uiElem != null); scroller.UpdateLayout(); double maxScrollPos = scroller.ExtentHeight - scroller.ViewportHeight; double scrollPos = scroller.VerticalOffset - scroller.TransformToVisual(uiElem).Transform(new System.Windows.Point(0, 0)).Y; if (scrollPos > maxScrollPos) scrollPos = maxScrollPos; else if (scrollPos < 0) scrollPos = 0; scroller.ScrollToVerticalOffset(scrollPos); } I am sure there are better ways, but this "worked for me" :-) Comments about this post by Daniel Moth welcome at the original blog.

    Read the article

  • Auto-Suggest via &lsquo;Trie&rsquo; (Pre-fix Tree)

    - by Strenium
    Auto-Suggest (Auto-Complete) “thing” has been around for a few years. Here’s my little snippet on the subject. For one of my projects, I had to deal with a non-trivial set of items to be pulled via auto-suggest used by multiple concurrent users. Simple, dumb iteration through a list in local cache or back-end access didn’t quite cut it. Enter a nifty little structure, perfectly suited for storing and matching verbal data: “Trie” (http://tinyurl.com/db56g) also known as a Pre-fix Tree: “Unlike a binary search tree, no node in the tree stores the key associated with that node; instead, its position in the tree defines the key with which it is associated. All the descendants of a node have a common prefix of the string associated with that node, and the root is associated with the empty string. Values are normally not associated with every node, only with leaves and some inner nodes that correspond to keys of interest.” This is a very scalable, performing structure. Though, as usual, something ‘fast’ comes at a cost of ‘size’; fortunately RAM is more plentiful today so I can live with that. I won’t bore you with the detailed algorithmic performance here - Google can do a better job of such. So, here’s C# implementation of all this. Let’s start with individual node: Trie Node /// <summary> /// Contains datum of a single trie node. /// </summary> public class AutoSuggestTrieNode {     public char Value { get; set; }       /// <summary>     /// Gets a value indicating whether this instance is leaf node.     /// </summary>     /// <value>     ///     <c>true</c> if this instance is leaf node; otherwise, a prefix node <c>false</c>.     /// </value>     public bool IsLeafNode { get; private set; }       public List<AutoSuggestTrieNode> DescendantNodes { get; private set; }         /// <summary>     /// Initializes a new instance of the <see cref="AutoSuggestTrieNode"/> class.     /// </summary>     /// <param name="value">The phonetic value.</param>     /// <param name="isLeafNode">if set to <c>true</c> [is leaf node].</param>     public AutoSuggestTrieNode(char value = ' ', bool isLeafNode = false)     {         Value = value;         IsLeafNode = isLeafNode;           DescendantNodes = new List<AutoSuggestTrieNode>();     }       /// <summary>     /// Gets the descendants of the pre-fix node, if any.     /// </summary>     /// <param name="descendantValue">The descendant value.</param>     /// <returns></returns>     public AutoSuggestTrieNode GetDescendant(char descendantValue)     {         return DescendantNodes.FirstOrDefault(descendant => descendant.Value == descendantValue);     } }   Quite self-explanatory, imho. A node is either a “Pre-fix” or a “Leaf” node. “Leaf” contains the full “word”, while the “Pre-fix” nodes act as indices used for matching the results.   Ok, now the Trie: Trie Structure /// <summary> /// Contains structure and functionality of an AutoSuggest Trie (Pre-fix Tree) /// </summary> public class AutoSuggestTrie {     private readonly AutoSuggestTrieNode _root = new AutoSuggestTrieNode();       /// <summary>     /// Adds the word to the trie by breaking it up to pre-fix nodes + leaf node.     /// </summary>     /// <param name="word">Phonetic value.</param>     public void AddWord(string word)     {         var currentNode = _root;         word = word.Trim().ToLower();           for (int i = 0; i < word.Length; i++)         {             var child = currentNode.GetDescendant(word[i]);               if (child == null) /* this character hasn't yet been indexed in the trie */             {                 var newNode = new AutoSuggestTrieNode(word[i], word.Count() - 1 == i);                   currentNode.DescendantNodes.Add(newNode);                 currentNode = newNode;             }             else                 currentNode = child; /* this character is already indexed, move down the trie */         }     }         /// <summary>     /// Gets the suggested matches.     /// </summary>     /// <param name="word">The phonetic search value.</param>     /// <returns></returns>     public List<string> GetSuggestedMatches(string word)     {         var currentNode = _root;         word = word.Trim().ToLower();           var indexedNodesValues = new StringBuilder();         var resultBag = new ConcurrentBag<string>();           for (int i = 0; i < word.Trim().Length; i++)  /* traverse the trie collecting closest indexed parent (parent can't be leaf, obviously) */         {             var child = currentNode.GetDescendant(word[i]);               if (child == null || word.Count() - 1 == i)                 break; /* done looking, the rest of the characters aren't indexed in the trie */               indexedNodesValues.Append(word[i]);             currentNode = child;         }           Action<AutoSuggestTrieNode, string> collectAllMatches = null;         collectAllMatches = (node, aggregatedValue) => /* traverse the trie collecting matching leafNodes (i.e. "full words") */             {                 if (node.IsLeafNode) /* full word */                     resultBag.Add(aggregatedValue); /* thread-safe write */                   Parallel.ForEach(node.DescendantNodes, descendandNode => /* asynchronous recursive traversal */                 {                     collectAllMatches(descendandNode, String.Format("{0}{1}", aggregatedValue, descendandNode.Value));                 });             };           collectAllMatches(currentNode, indexedNodesValues.ToString());           return resultBag.OrderBy(o => o).ToList();     }         /// <summary>     /// Gets the total words (leafs) in the trie. Recursive traversal.     /// </summary>     public int TotalWords     {         get         {             int runningCount = 0;               Action<AutoSuggestTrieNode> traverseAllDecendants = null;             traverseAllDecendants = n => { runningCount += n.DescendantNodes.Count(o => o.IsLeafNode); n.DescendantNodes.ForEach(traverseAllDecendants); };             traverseAllDecendants(this._root);               return runningCount;         }     } }   Matching operations and Inserts involve traversing the nodes before the right “spot” is found. Inserts need be synchronous since ordering of data matters here. However, matching can be done in parallel traversal using recursion (line 64). Here’s sample usage:   [TestMethod] public void AutoSuggestTest() {     var autoSuggestCache = new AutoSuggestTrie();       var testInput = @"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer nec odio. Praesent libero.                 Sed cursus ante dapibus diam. Sed nisi. Nulla quis sem at nibh elementum imperdiet. Duis sagittis ipsum. Praesent mauris.                 Fusce nec tellus sed augue semper porta. Mauris massa. Vestibulum lacinia arcu eget nulla. Class aptent taciti sociosqu ad                 litora torquent per conubia nostra, per inceptos himenaeos. Curabitur sodales ligula in libero. Sed dignissim lacinia nunc.                 Curabitur tortor. Pellentesque nibh. Aenean quam. In scelerisque sem at dolor. Maecenas mattis. Sed convallis tristique sem.                 Proin ut ligula vel nunc egestas porttitor. Morbi lectus risus, iaculis vel, suscipit quis, luctus non, massa. Fusce ac                 turpis quis ligula lacinia aliquet. Mauris ipsum. Nulla metus metus, ullamcorper vel, tincidunt sed, euismod in, nibh. Quisque                 volutpat condimentum velit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam                 nec ante. Sed lacinia, urna non tincidunt mattis, tortor neque adipiscing diam, a cursus ipsum ante quis turpis. Nulla                 facilisi. Ut fringilla. Suspendisse potenti. Nunc feugiat mi a tellus consequat imperdiet. Vestibulum sapien. Proin quam. Etiam                 ultrices. Suspendisse in justo eu magna luctus suscipit. Sed lectus. Integer euismod lacus luctus magna. Quisque cursus, metus                 vitae pharetra auctor, sem massa mattis sem, at interdum magna augue eget diam. Vestibulum ante ipsum primis in faucibus orci                 luctus et ultrices posuere cubilia Curae; Morbi lacinia molestie dui. Praesent blandit dolor. Sed non quam. In vel mi sit amet                 augue congue elementum. Morbi in ipsum sit amet pede facilisis laoreet. Donec lacus nunc, viverra nec.";       testInput.Split(' ').ToList().ForEach(word => autoSuggestCache.AddWord(word));       var testMatches = autoSuggestCache.GetSuggestedMatches("le"); }   ..and the result: That’s it!

    Read the article

  • ASP.NET MVC Validation Complete

    - by Ricardo Peres
    OK, so let’s talk about validation. Most people are probably familiar with the out of the box validation attributes that MVC knows about, from the System.ComponentModel.DataAnnotations namespace, such as EnumDataTypeAttribute, RequiredAttribute, StringLengthAttribute, RangeAttribute, RegularExpressionAttribute and CompareAttribute from the System.Web.Mvc namespace. All of these validators inherit from ValidationAttribute and perform server as well as client-side validation. In order to use them, you must include the JavaScript files MicrosoftMvcValidation.js, jquery.validate.js or jquery.validate.unobtrusive.js, depending on whether you want to use Microsoft’s own library or jQuery. No significant difference exists, but jQuery is more extensible. You can also create your own attribute by inheriting from ValidationAttribute, but, if you want to have client-side behavior, you must also implement IClientValidatable (all of the out of the box validation attributes implement it) and supply your own JavaScript validation function that mimics its server-side counterpart. Of course, you must reference the JavaScript file where the declaration function is. Let’s see an example, validating even numbers. First, the validation attribute: 1: [Serializable] 2: [AttributeUsage(AttributeTargets.Property, AllowMultiple = false, Inherited = true)] 3: public class IsEvenAttribute : ValidationAttribute, IClientValidatable 4: { 5: protected override ValidationResult IsValid(Object value, ValidationContext validationContext) 6: { 7: Int32 v = Convert.ToInt32(value); 8:  9: if (v % 2 == 0) 10: { 11: return (ValidationResult.Success); 12: } 13: else 14: { 15: return (new ValidationResult("Value is not even")); 16: } 17: } 18:  19: #region IClientValidatable Members 20:  21: public IEnumerable<ModelClientValidationRule> GetClientValidationRules(ModelMetadata metadata, ControllerContext context) 22: { 23: yield return (new ModelClientValidationRule() { ValidationType = "iseven", ErrorMessage = "Value is not even" }); 24: } 25:  26: #endregion 27: } The iseven validation function is declared like this in JavaScript, using jQuery validation: 1: jQuery.validator.addMethod('iseven', function (value, element, params) 2: { 3: return (true); 4: return ((parseInt(value) % 2) == 0); 5: }); 6:  7: jQuery.validator.unobtrusive.adapters.add('iseven', [], function (options) 8: { 9: options.rules['iseven'] = options.params; 10: options.messages['iseven'] = options.message; 11: }); Do keep in mind that this is a simple example, for example, we are not using parameters, which may be required for some more advanced scenarios. As a side note, if you implement a custom validator that also requires a JavaScript function, you’ll probably want them together. One way to achieve this is by including the JavaScript file as an embedded resource on the same assembly where the custom attribute is declared. You do this by having its Build Action set as Embedded Resource inside Visual Studio: Then you have to declare an attribute at assembly level, perhaps in the AssemblyInfo.cs file: 1: [assembly: WebResource("SomeNamespace.IsEven.js", "text/javascript")] In your views, if you want to include a JavaScript file from an embedded resource you can use this code: 1: public static class UrlExtensions 2: { 3: private static readonly MethodInfo getResourceUrlMethod = typeof(AssemblyResourceLoader).GetMethod("GetWebResourceUrlInternal", BindingFlags.NonPublic | BindingFlags.Static); 4:  5: public static IHtmlString Resource<TType>(this UrlHelper url, String resourceName) 6: { 7: return (Resource(url, typeof(TType).Assembly.FullName, resourceName)); 8: } 9:  10: public static IHtmlString Resource(this UrlHelper url, String assemblyName, String resourceName) 11: { 12: String resourceUrl = getResourceUrlMethod.Invoke(null, new Object[] { Assembly.Load(assemblyName), resourceName, false, false, null }).ToString(); 13: return (new HtmlString(resourceUrl)); 14: } 15: } And on the view: 1: <script src="<%: this.Url.Resource("SomeAssembly", "SomeNamespace.IsEven.js") %>" type="text/javascript"></script> Then there’s the CustomValidationAttribute. It allows externalizing your validation logic to another class, so you have to tell which type and method to use. The method can be static as well as instance, if it is instance, the class cannot be abstract and must have a public parameterless constructor. It can be applied to a property as well as a class. It does not, however, support client-side validation. Let’s see an example declaration: 1: [CustomValidation(typeof(ProductValidator), "OnValidateName")] 2: public String Name 3: { 4: get; 5: set; 6: } The validation method needs this signature: 1: public static ValidationResult OnValidateName(String name) 2: { 3: if ((String.IsNullOrWhiteSpace(name) == false) && (name.Length <= 50)) 4: { 5: return (ValidationResult.Success); 6: } 7: else 8: { 9: return (new ValidationResult(String.Format("The name has an invalid value: {0}", name), new String[] { "Name" })); 10: } 11: } Note that it can be either static or instance and it must return a ValidationResult-derived class. ValidationResult.Success is null, so any non-null value is considered a validation error. The single method argument must match the property type to which the attribute is attached to or the class, in case it is applied to a class: 1: [CustomValidation(typeof(ProductValidator), "OnValidateProduct")] 2: public class Product 3: { 4: } The signature must thus be: 1: public static ValidationResult OnValidateProduct(Product product) 2: { 3: } Continuing with attribute-based validation, another possibility is RemoteAttribute. This allows specifying a controller and an action method just for performing the validation of a property or set of properties. This works in a client-side AJAX way and it can be very useful. Let’s see an example, starting with the attribute declaration and proceeding to the action method implementation: 1: [Remote("Validate", "Validation")] 2: public String Username 3: { 4: get; 5: set; 6: } The controller action method must contain an argument that can be bound to the property: 1: public ActionResult Validate(String username) 2: { 3: return (this.Json(true, JsonRequestBehavior.AllowGet)); 4: } If in your result JSON object you include a string instead of the true value, it will consider it as an error, and the validation will fail. This string will be displayed as the error message, if you have included it in your view. You can also use the remote validation approach for validating your entire entity, by including all of its properties as included fields in the attribute and having an action method that receives an entity instead of a single property: 1: [Remote("Validate", "Validation", AdditionalFields = "Price")] 2: public String Name 3: { 4: get; 5: set; 6: } 7:  8: public Decimal Price 9: { 10: get; 11: set; 12: } The action method will then be: 1: public ActionResult Validate(Product product) 2: { 3: return (this.Json("Product is not valid", JsonRequestBehavior.AllowGet)); 4: } Only the property to which the attribute is applied and the additional properties referenced by the AdditionalFields will be populated in the entity instance received by the validation method. The same rule previously stated applies, if you return anything other than true, it will be used as the validation error message for the entity. The remote validation is triggered automatically, but you can also call it explicitly. In the next example, I am causing the full entity validation, see the call to serialize(): 1: function validate() 2: { 3: var form = $('form'); 4: var data = form.serialize(); 5: var url = '<%: this.Url.Action("Validation", "Validate") %>'; 6:  7: var result = $.ajax 8: ( 9: { 10: type: 'POST', 11: url: url, 12: data: data, 13: async: false 14: } 15: ).responseText; 16:  17: if (result) 18: { 19: //error 20: } 21: } Finally, by implementing IValidatableObject, you can implement your validation logic on the object itself, that is, you make it self-validatable. This will only work server-side, that is, the ModelState.IsValid property will be set to false on the controller’s action method if the validation in unsuccessful. Let’s see how to implement it: 1: public class Product : IValidatableObject 2: { 3: public String Name 4: { 5: get; 6: set; 7: } 8:  9: public Decimal Price 10: { 11: get; 12: set; 13: } 14:  15: #region IValidatableObject Members 16: 17: public IEnumerable<ValidationResult> Validate(ValidationContext validationContext) 18: { 19: if ((String.IsNullOrWhiteSpace(this.Name) == true) || (this.Name.Length > 50)) 20: { 21: yield return (new ValidationResult(String.Format("The name has an invalid value: {0}", this.Name), new String[] { "Name" })); 22: } 23: 24: if ((this.Price <= 0) || (this.Price > 100)) 25: { 26: yield return (new ValidationResult(String.Format("The price has an invalid value: {0}", this.Price), new String[] { "Price" })); 27: } 28: } 29: 30: #endregion 31: } The errors returned will be matched against the model properties through the MemberNames property of the ValidationResult class and will be displayed in their proper labels, if present on the view. On the controller action method you can check for model validity by looking at ModelState.IsValid and you can get actual error messages and related properties by examining all of the entries in the ModelState dictionary: 1: Dictionary<String, String> errors = new Dictionary<String, String>(); 2:  3: foreach (KeyValuePair<String, ModelState> keyValue in this.ModelState) 4: { 5: String key = keyValue.Key; 6: ModelState modelState = keyValue.Value; 7:  8: foreach (ModelError error in modelState.Errors) 9: { 10: errors[key] = error.ErrorMessage; 11: } 12: } And these are the ways to perform date validation in ASP.NET MVC. Don’t forget to use them!

    Read the article

  • Exception Handling

    - by raghu.yadav
    Here is the few links on which andre had demonstrateddifferences-of-handling-jboexception-in handling-exceptions-in-oracle-ui-shell However in this post we can see how to display exception in popup being in the same page. I use similar usecase as andre however we'll not be using Exception Handling property from taskflow, instead we use popup and invoke the same programmatically. This is a dynamic region example where user can select jobs or locations links to edit the records of corresponding tables being in the same page and click commit to save changes. To generate exception we deliberately change commit to CommitAction in commit action binding code created in the bean (same as andre) and catch the exception and add brief description of exception into #{pageFlowScope.message}. Drop Popup component after Commit button and add dialog within in popup button, bind the popup component to backing bean and invoke the same in catch clause as shown below. public String Commit() { try{ BindingContainer bindings = getBindings(); OperationBinding operationBinding = bindings.getOperationBinding("CommitAction"); Object result = operationBinding.execute(); if (!operationBinding.getErrors().isEmpty()) { return null; } }catch (NullPointerException e) { setELValue("#{pageFlowScope.message}", "NullPointerException..."); e.printStackTrace(); String popupId = this.getPopup().getClientId(FacesContext.getCurrentInstance()); PatternsPublicUtil.invokePopup(popupId); } return null; } } private void setELValue(String el, String value) { FacesContext facesContext = FacesContext.getCurrentInstance(); ELContext elContext = facesContext.getELContext(); ExpressionFactory expressionFactory = facesContext.getApplication().getExpressionFactory(); ValueExpression valueExp = expressionFactory.createValueExpression(elContext, el, Object.class); valueExp.setValue(elContext, value); } .

    Read the article

  • Entity Framework v1 &ndash; tips and Tricks Part 3

    - by Rohit Gupta
    General Tips on Entity Framework v1 & Linq to Entities: ToTraceString() If you need to know the underlying SQL that the EF generates for a Linq To Entities query, then use the ToTraceString() method of the ObjectQuery class. (or use LINQPAD) Note that you need to cast the LINQToEntities query to ObjectQuery before calling TotraceString() as follows: 1: string efSQL = ((ObjectQuery)from c in ctx.Contact 2: where c.Address.Any(a => a.CountryRegion == "US") 3: select c.ContactID).ToTraceString(); ================================================================================ MARS or MultipleActiveResultSet When you create a EDM Model (EDMX file) from the database using Visual Studio, it generates a connection string with the same name as the name of the EntityContainer in CSDL. In the ConnectionString so generated it sets the MultipleActiveResultSet attribute to true by default. So if you are running the following query then it streams multiple readers over the same connection: 1: using (BAEntities context = new BAEntities()) 2: { 3: var cons = 4: from con in context.Contacts 5: where con.FirstName == "Jose" 6: select con; 7: foreach (var c in cons) 8: { 9: if (c.AddDate < new System.DateTime(2007, 1, 1)) 10: { 11: c.Addresses.Load(); 12: } 13: } 14: } ================================================================================= Explicitly opening and closing EntityConnection When you call ToList() or foreach on a LINQToEntities query the EF automatically closes the connection after all the records from the query have been consumed. Thus if you need to run many LINQToEntities queries over the same connection then explicitly open and close the connection as follows: 1: using (BAEntities context = new BAEntities()) 2: { 3: context.Connection.Open(); 4: var cons = from con in context.Contacts where con.FirstName == "Jose" 5: select con; 6: var conList = cons.ToList(); 7: var allCustomers = from con in context.Contacts.OfType<Customer>() 8: select con; 9: var allcustList = allCustomers.ToList(); 10: context.Connection.Close(); 11: } ====================================================================== Dispose ObjectContext only if required After you retrieve entities using the ObjectContext and you are not explicitly disposing the ObjectContext then insure that your code does consume all the records from the LinqToEntities query by calling .ToList() or foreach statement, otherwise the the database connection will remain open and will be closed by the garbage collector when it gets to dispose the ObjectContext. Secondly if you are making updates to the entities retrieved using LinqToEntities then insure that you dont inadverdently dispose of the ObjectContext after the entities are retrieved and before calling .SaveChanges() since you need the SAME ObjectContext to keep track of changes made to the Entities (by using ObjectStateEntry objects). So if you do need to explicitly dispose of the ObjectContext do so only after calling SaveChanges() and only if you dont need to change track the entities retrieved any further. ======================================================================= SQL InjectionAttacks under control with EFv1 LinqToEntities and LinqToSQL queries are parameterized before they are sent to the DB hence they are not vulnerable to SQL Injection attacks. EntitySQL may be slightly vulnerable to attacks since it does not use parameterized queries. However since the EntitySQL demands that the query be valid Entity SQL syntax and valid native SQL syntax at the same time. So the only way one can do a SQLInjection Attack is by knowing the SSDL of the EDM Model and be able to write the correct EntitySQL (note one cannot append regular SQL since then the query wont be a valid EntitySQL syntax) and append it to a parameter. ====================================================================== Improving Performance You can convert the EntitySets and AssociationSets in a EDM Model into precompiled Views using the edmgen utility. for e.g. the Customer Entity can be converted into a precompiled view using edmgen and all LinqToEntities query against the contaxt.Customer EntitySet will use the precompiled View instead of the EntitySet itself (the same being true for relationships (EntityReference & EntityCollections of a Entity)). The advantage being that when using precompiled views the performance will be much better. The syntax for generating precompiled views for a existing EF project is : edmgen /mode:ViewGeneration /inssdl:BAModel.ssdl /incsdl:BAModel.csdl /inmsl:BAModel.msl /p:Chap14.csproj Note that this will only generate precompiled views for EntitySets and Associations and not for existing LinqToEntities queries in the project.(for that use CompiledQuery.Compile<>) Secondly if you have a LinqToEntities query that you need to run multiple times, then one should precompile the query using CompiledQuery.Compile method. The CompiledQuery.Compile<> method accepts a lamda expression as a parameter, which denotes the LinqToEntities query  that you need to precompile. The following is a example of a lamda that we can pass into the CompiledQuery.Compile() method 1: Expression<Func<BAEntities, string, IQueryable<Customer>>> expr = (BAEntities ctx1, string loc) => 2: from c in ctx1.Contacts.OfType<Customer>() 3: where c.Reservations.Any(r => r.Trip.Destination.DestinationName == loc) 4: select c; Then we call the Compile Query as follows: 1: var query = CompiledQuery.Compile<BAEntities, string, IQueryable<Customer>>(expr); 2:  3: using (BAEntities ctx = new BAEntities()) 4: { 5: var loc = "Malta"; 6: IQueryable<Customer> custs = query.Invoke(ctx, loc); 7: var custlist = custs.ToList(); 8: foreach (var item in custlist) 9: { 10: Console.WriteLine(item.FullName); 11: } 12: } Note that if you created a ObjectQuery or a Enitity SQL query instead of the LINQToEntities query, you dont need precompilation for e.g. 1: An Example of EntitySQL query : 2: string esql = "SELECT VALUE c from Contacts AS c where c is of(BAGA.Customer) and c.LastName = 'Gupta'"; 3: ObjectQuery<Customer> custs = CreateQuery<Customer>(esql); 1: An Example of ObjectQuery built using ObjectBuilder methods: 2: from c in Contacts.OfType<Customer>().Where("it.LastName == 'Gupta'") 3: select c This is since the Query plan is cached and thus the performance improves a bit, however since the ObjectQuery or EntitySQL query still needs to materialize the results into Entities hence it will take the same amount of performance hit as with LinqToEntities. However note that not ALL EntitySQL based or QueryBuilder based ObjectQuery plans are cached. So if you are in doubt always create a LinqToEntities compiled query and use that instead ============================================================ GetObjectStateEntry Versus GetObjectByKey We can get to the Entity being referenced by the ObjectStateEntry via its Entity property and there are helper methods in the ObjectStateManager (osm.TryGetObjectStateEntry) to get the ObjectStateEntry for a entity (for which we know the EntityKey). Similarly The ObjectContext has helper methods to get an Entity i.e. TryGetObjectByKey(). TryGetObjectByKey() uses GetObjectStateEntry method under the covers to find the object, however One important difference between these 2 methods is that TryGetObjectByKey queries the database if it is unable to find the object in the context, whereas TryGetObjectStateEntry only looks in the context for existing entries. It will not make a trip to the database ============================================================= POCO objects with EFv1: To create POCO objects that can be used with EFv1. We need to implement 3 key interfaces: IEntityWithKey IEntityWithRelationships IEntityWithChangeTracker Implementing IEntityWithKey is not mandatory, but if you dont then we need to explicitly provide values for the EntityKey for various functions (for e.g. the functions needed to implement IEntityWithChangeTracker and IEntityWithRelationships). Implementation of IEntityWithKey involves exposing a property named EntityKey which returns a EntityKey object. Implementation of IEntityWithChangeTracker involves implementing a method named SetChangeTracker since there can be multiple changetrackers (Object Contexts) existing in memory at the same time. 1: public void SetChangeTracker(IEntityChangeTracker changeTracker) 2: { 3: _changeTracker = changeTracker; 4: } Additionally each property in the POCO object needs to notify the changetracker (objContext) that it is updating itself by calling the EntityMemberChanged and EntityMemberChanging methods on the changeTracker. for e.g.: 1: public EntityKey EntityKey 2: { 3: get { return _entityKey; } 4: set 5: { 6: if (_changeTracker != null) 7: { 8: _changeTracker.EntityMemberChanging("EntityKey"); 9: _entityKey = value; 10: _changeTracker.EntityMemberChanged("EntityKey"); 11: } 12: else 13: _entityKey = value; 14: } 15: } 16: ===================== Custom Property ==================================== 17:  18: [EdmScalarPropertyAttribute(IsNullable = false)] 19: public System.DateTime OrderDate 20: { 21: get { return _orderDate; } 22: set 23: { 24: if (_changeTracker != null) 25: { 26: _changeTracker.EntityMemberChanging("OrderDate"); 27: _orderDate = value; 28: _changeTracker.EntityMemberChanged("OrderDate"); 29: } 30: else 31: _orderDate = value; 32: } 33: } Finally you also need to create the EntityState property as follows: 1: public EntityState EntityState 2: { 3: get { return _changeTracker.EntityState; } 4: } The IEntityWithRelationships involves creating a property that returns RelationshipManager object: 1: public RelationshipManager RelationshipManager 2: { 3: get 4: { 5: if (_relManager == null) 6: _relManager = RelationshipManager.Create(this); 7: return _relManager; 8: } 9: } ============================================================ Tip : ProviderManifestToken – change EDMX File to use SQL 2008 instead of SQL 2005 To use with SQL Server 2008, edit the EDMX file (the raw XML) changing the ProviderManifestToken in the SSDL attributes from "2005" to "2008" ============================================================= With EFv1 we cannot use Structs to replace a anonymous Type while doing projections in a LINQ to Entities query. While the same is supported with LINQToSQL, it is not with LinqToEntities. For e.g. the following is not supported with LinqToEntities since only parameterless constructors and initializers are supported in LINQ to Entities. (the same works with LINQToSQL) 1: public struct CompanyInfo 2: { 3: public int ID { get; set; } 4: public string Name { get; set; } 5: } 6: var companies = (from c in dc.Companies 7: where c.CompanyIcon == null 8: select new CompanyInfo { Name = c.CompanyName, ID = c.CompanyId }).ToList(); ;

    Read the article

  • passwd ldap request to ActiveDirectory fails on half of 2500 users

    - by groovehunter
    We just setup ActiveDirectory in my company and imported all linux users and groups. On the linux client: (configured to ask ldap in nsswitch.conf): If i do a common ldapsearch to the AD ldap server i get the complete number of about 2580 users. But if i do this it only gets a part of all users, 1221 in number: getent passwd | wc -l Running it with strace shows kind of attempt to reconnect My ideas were: Does the linux authentication procedure run ldapsearch with a parameter incompatible to AD ldap ? Or probably it is a encoding issue. The windows user are entered in AD with all kind of characters. Maybe someone could shed light on this and give a hint how to debug that further!? Here's our ldap.conf host audc01.mycompany.de audc03.mycompany.de base ou=location,dc=mycompany,dc=de ldap_version 3 binddn cn=manager,ou=location,dc=mycompany,dc=de bindpw Password timelimit 120 idle_timelimit 3600 nss_base_passwd cn=users,cn=import,ou=location,dc=mycompany,dc=de?sub nss_base_group ou=location,dc=mycompany,dc=de?sub # RFC 2307 (AD) mappings nss_map_objectclass posixAccount User # nss_map_objectclass shadowAccount User nss_map_objectclass posixGroup Group nss_map_attribute uid sAMAccountName nss_map_attribute cn sAMAccountName # Display Name nss_map_attribute gecos cn ## nss_map_attribute homeDirectory unixHomeDirectory nss_map_attribute loginShell msSFU30LoginShell # PAM attributes pam_login_attribute sAMAccountName # Location based login pam_groupdn CN=Location-AU-Login,OU=au,OU=Location,DC=mycompany,DC=de pam_member_attribute msSFU30PosixMember ## pam_lookup_policy yes pam_filter objectclass=User nss_initgroups_ignoreusers avahi,avahi-autoipd,backup,bin,couchdb,daemon,games,gdm,gnats,haldaemon,hplip,irc,kernoops,libuuid,list,lp,mail,man,messagebus,news,proxy,pulse,root,rtkit,saned,speech-dispatcher,statd,sync,sys,syslog,usbmux,uucp,www-data and here the stacktrace from strace getent passwd poll([{fd=4, events=POLLIN|POLLPRI|POLLERR|POLLHUP}], 1, 120000) = 1 ([{fd=4, revents=POLLIN}]) read(4, "0\204\0\0\0A\2\1", 8) = 8 read(4, "\4e\204\0\0\0\7\n\1\0\4\0\4\0\240\204\0\0\0+0\204\0\0\0%\4\0261.2."..., 63) = 63 stat64("/etc/ldap.conf", {st_mode=S_IFREG|0644, st_size=1151, ...}) = 0 geteuid32() = 12560 getsockname(4, {sa_family=AF_INET, sin_port=htons(60334), sin_addr=inet_addr("10.1.35.51")}, [16]) = 0 getpeername(4, {sa_family=AF_INET, sin_port=htons(389), sin_addr=inet_addr("10.1.5.81")}, [16]) = 0 time(NULL) = 1297684722 rt_sigaction(SIGPIPE, {SIG_DFL, [], 0}, NULL, 8) = 0 munmap(0xb7617000, 1721) = 0 close(3) = 0 rt_sigaction(SIGPIPE, {SIG_IGN, [], 0}, {SIG_DFL, [], 0}, 8) = 0 rt_sigaction(SIGPIPE, {SIG_DFL, [], 0}, NULL, 8) = 0 rt_sigaction(SIGPIPE, {SIG_IGN, [], 0}, {SIG_DFL, [], 0}, 8) = 0 write(4, "0\5\2\1\5B\0", 7) = 7 shutdown(4, 2 /* send and receive */) = 0 close(4) = 0 shutdown(-1, 2 /* send and receive */) = -1 EBADF (Bad file descriptor) close(-1) = -1 EBADF (Bad file descriptor) exit_group(0) = ?

    Read the article

  • Practical mysql schema advice for eCommerce store - Products & Attributes

    - by Gravy
    I am currently planning my first eCommerce application (mySQL & Laravel Framework). I have various products, which all have different attributes. Describing products very simply, Some will have a manufacturer, some will not, some will have a diameter, others will have a width, height, depth and others will have a volume. Option 1: Create a master products table, and separate tables for specific product types (polymorphic relations). That way, I will not have any unnecessary null fields in the products table. Option 2: Create a products table, with all possible fields despite the fact that there will be a lot of null rows Option 3: Normalise so that each attribute type has it's own table. Option 4: Create an attributes table, as well as an attribute_values table with the value being varchar regardless of the actual data-type. The products table would have a many:many relationship with the attributes table. Option 5: Common attributes to all or most products put in the products table, and specific attributes to a particular category of product attached to the categories table. My thoughts are that I would like to be able to allow easy product filtering by these attributes and sorting. I would also want the frontend to be fast, less concern over the performance of the inserting and updating of product records. Im a bit overwhelmed with the vast implementation options, and cannot find a suitable answer in terms of the best method of approach. Could somebody point me in the right direction? In an ideal world, I would like to offer the following kind of functionality - http://www.glassesdirect.co.uk/products/ to my eCommerce store. As can be seen, in the sidebar, you can select an attribute the glasses to filter them. e.g. male / female or plastic / metal / titanium etc... Alternatively, should I just dump the mySql relational database idea and learn mongodb?

    Read the article

  • Disable Ethernet permanently to speed up boot time

    - by Anwar Shah
    I do not use the wired Ethernet Card. It seems to me that, Ubuntu is always trying in boot time to check the network via eth0, Which consumes some times and I guess this may slow down the boot process a bit. My dmesg output is below (partial) 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 1.985592] input: Video Bus as /devices/LNXSYSTM:00/device:00/PNP0A08:00/LNXVIDEO:01/input/input5 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 1.985651] ACPI: Video Device [GFX0] (multi-head: yes rom: no post: no) 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 1.985693] [drm] Initialized i915 1.6.0 20080730 for 0000:00:02.0 on minor 0 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 2.056261] firewire_core: created device fw0: GUID 00023f87af41fd7d, S400 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 3.710435] EXT4-fs (sda9): mounted filesystem with ordered data mode. Opts: (null) A big time here..... 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 13.466642] ADDRCONF(NETDEV_UP): eth0: link is not ready 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.125296] Adding 1050620k swap on /dev/sda6. Priority:-1 extents:1 across:1050620k 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.226952] EXT4-fs (sda9): re-mounted. Opts: (null) 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.335012] snd_hda_intel 0000:00:1b.0: PCI INT A - GSI 22 (level, low) - IRQ 22 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.335091] snd_hda_intel 0000:00:1b.0: irq 45 for MSI/MSI-X 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.335128] snd_hda_intel 0000:00:1b.0: setting latency timer to 64 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.346410] input: Ideapad extra buttons as /devices/platform/ideapad/input/input6 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.428551] input: HDA Intel Headphone as /devices/pci0000:00/0000:00:1b.0/sound/card0/input7 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.436958] cfg80211: Calling CRDA to update world regulatory domain 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.476550] Linux video capture interface: v2.00 2012-06-11 23:06:47 Ubuntu-KDE kernel [ 14.486385] uvcvideo: Found UVC 1.00 device USB 2.0 Camera (04f2:b008) So, My question is How can I disable the Ethernet card completely, so that kernel will not try to use that?

    Read the article

  • Siebel Troubleshooting : An ODBC error occurred; SBL-GEN-03006: Error calling function: DICFindTable m_pReqTbl

    - by Giri Mandalika
    Symptom: A newly installed Siebel application server fails to start despite successful ODBC connectivity to the database. SRProc process logs ODBC error messages similar to the following: Message: GEN-13, Additional Message: dict-ERR-1109: Unable to read value from export file (Data length (32) Column definition (3)). Message: GEN-13, Additional Message: dict-ERR-1107: Unable to read row 0 from export file (UTLDataValRead pBuf, col 4 ). GenericLog GenericError 1 0002157.. 11-11-18 13:28 Message: Generated SQL statement:, Additional Message: SQLFetch: SELECT RDOBJ.DOCK_ID, RDOBJ.RELATED_DOCK_ID, RDOBJ.SQL_STATEMENT, RDOBJ.CHECK_VISIBILITY, 'N', RDOBJ.COMMENTS, RDOBJ.ACTIVE, RDOBJ.SEQUENCE, RDOBJ.VIS_STRENGTH, RDOBJ.REL_VIS_STRENGTH, RDOBJ.VIS_EVT_COLS FROM ORAPERF.S_DOCK_REL_DOBJ RDOBJ, ORAPERF.S_DOCK_OBJECT DOBJ WHERE RDOBJ.REPOSITORY_ID = (SELECT ROW_ID FROM ORAPERF.S_REPOSITORY WHERE NAME = ?) AND DOBJ.ROW_ID = RDOBJ.DOCK_ID AND (DOBJ.INACTIVE_FLG = 'N' OR DOBJ.INACTIVE_FLG IS NULL) AND (RDOBJ.INACTIVE_FLG = 'N' OR RDOBJ.INACTIVE_FLG IS NULL) Message: Error: An ODBC error occurred, Additional Message: Function: DICGetRDObjects; ODBC operation: SQLFetch Message: GEN-13, Additional Message: dict-ERR-1109: Unable to read value from export file (UTLCompressFRead (fseek)). Message: GEN-13, Additional Message: dict-ERR-1107: Unable to read row 0 from export file (UTLDataValRead pBuf, col 0 ). Message: GEN-10, Additional Message: Calling Function: DICLoadDObjectInfo; Called Function: Calling DICGetRDObjects Message: GEN-10, Additional Message: Calling Function: DICLoadDict; Called Function: DICLoadDObjectInfo GenericError (srpdb.cpp (860) err=3006 sys=2) SBL-GEN-03006: Error calling function: DICFindTable m_pReqTbl (srpsmech.cpp (74) err=3006 sys=0) SBL-GEN-03006: Error calling function: DICFindTable m_pReqTbl (srpmtsrv.cpp (107) err=3006 sys=0) SBL-GEN-03006: Error calling function: DICFindTable m_pReqTbl (smimtsrv.cpp (1203) err=3006 sys=0) SBL-GEN-03006: Error calling function: DICFindTable m_pReqTbl SmiLayerLog Error Terminate process due to unrecoverable error: 3006. (Main Thread) An inconsistent or corrupted dictionary file "diccache.dat" is likely the cause. Solution: Stop the application server and manually kill the remaining Siebel application specific processes eg., stop_server all pkill siebmtsh pkill siebproc .. Remove $SIEBEL_HOME/bin/diccache.dat file. It will be re-generated during the application server startup Start the application server start_server all

    Read the article

  • Custom Profile Provider with Web Deployment Project

    - by Ben Griswold
    I wrote about implementing a custom profile provider inside of your ASP.NET MVC application yesterday. If you haven’t read the article, don’t sweat it.  Most of the stuff I write is rubbish anyway. Since you have joined me today, though, I might as well offer up a little tip: you can run into trouble, like I did, if you enable your custom profile provider inside of an application which is deployed using a Web Deployment Project.  Everything will run great on your local machine and you’ll probably take an early lunch because you got the code running in no time flat and the build server is happy and all tests pass and, gosh, maybe you’ll just cut out early because it is Friday after all.  But then the first user hits the integration machine and, that’s right, yellow screen of death. Lucky you, just as you’re walking out the door, the user kindly sends the exception message and stack trace: Value cannot be null. Parameter name: type Description: An unhandled exception occurred during the execution of the current web request. Please review the stack trace for more information about the error and where it originated in the code. Stack Trace: [ArgumentNullException: Value cannot be null. Parameter name: type] System.Activator.CreateInstance(Type type, Boolean nonPublic) +2796915 System.Web.Profile.ProfileBase.CreateMyInstance(String username, Boolean isAuthenticated) +76 System.Web.Profile.ProfileBase.Create(String username, Boolean isAuthenticated) +312 User error?  Not this time. Damn! One hour later… you notice the harmless “Treat as library component (remove the App_Code.compiled file)” setting on the Output Assemblies Tab of your Web Deployment Project. You have no idea why, but you uncheck it.  You test and everything works great both locally and on the integration machine.  Application users think you’re the best and you’re still going to catch the last half hour of happy hour.  Happy Friday.

    Read the article

  • My IIS server won't serve SSL sites to some browsers

    - by sbleon
    (Update: This is now cross-posted at http://stackoverflow.com/questions/3355000. This is the more appropriate forum, but StackOverflow gets a lot more traffic.) I've got an IIS 6.0 server that won't serve pages over SSL to some browsers. In Webkit-based browsers on OS X 10.6, I can't load pages at all. In MSIE 8 on Windows XP SP3, I can load pages, but it will sometimes hang downloading images or sending POSTs. Working: Firefox 3.6 (OS X + Windows) Chrome (Windows) Partially Working: MSIE 8 (works sometimes, but hangs up, especially on POSTs) Not Working: Chrome 5 (OS X) Safari 5 (OS X) Mobile Safari (iOS 4) On OS X (the easiest platform for me to test on), Chrome and Firefox both negotiate the same TLS Cipher, but Chrome hangs on or after the post-negotiation handshake. Chrome packet capture (via ssldump): 1 1 0.0485 (0.0485) C>S Handshake ClientHello Version 3.1 cipher suites Unknown value 0xc00a Unknown value 0xc009 Unknown value 0xc007 Unknown value 0xc008 Unknown value 0xc013 Unknown value 0xc014 Unknown value 0xc011 Unknown value 0xc012 Unknown value 0xc004 Unknown value 0xc005 Unknown value 0xc002 Unknown value 0xc003 Unknown value 0xc00e Unknown value 0xc00f Unknown value 0xc00c Unknown value 0xc00d Unknown value 0x2f TLS_RSA_WITH_RC4_128_SHA TLS_RSA_WITH_RC4_128_MD5 Unknown value 0x35 TLS_RSA_WITH_3DES_EDE_CBC_SHA Unknown value 0x32 Unknown value 0x33 Unknown value 0x38 Unknown value 0x39 TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA compression methods NULL 1 2 0.3106 (0.2620) S>C Handshake ServerHello Version 3.1 session_id[32]= bb 0e 00 00 7a 7e 07 50 5e 78 48 cf 43 5a f7 4d d2 ed 72 8f ff 1d 9e 74 66 74 03 b3 bb 92 8d eb cipherSuite TLS_RSA_WITH_RC4_128_MD5 compressionMethod NULL Certificate ServerHelloDone 1 3 0.3196 (0.0090) C>S Handshake ClientKeyExchange 1 4 0.3197 (0.0000) C>S ChangeCipherSpec 1 5 0.3197 (0.0000) C>S Handshake [hang, no more data transmitted] Firefox packet capture: 1 1 0.0485 (0.0485) C>S Handshake ClientHello Version 3.1 resume [32]= 14 03 00 00 4e 28 de aa da 7a 25 87 25 32 f3 a7 ae 4c 2d a0 e4 57 cc dd d7 0e d7 82 19 f7 8f b9 cipher suites Unknown value 0xff Unknown value 0xc00a Unknown value 0xc014 Unknown value 0x88 Unknown value 0x87 Unknown value 0x39 Unknown value 0x38 Unknown value 0xc00f Unknown value 0xc005 Unknown value 0x84 Unknown value 0x35 Unknown value 0xc007 Unknown value 0xc009 Unknown value 0xc011 Unknown value 0xc013 Unknown value 0x45 Unknown value 0x44 Unknown value 0x33 Unknown value 0x32 Unknown value 0xc00c Unknown value 0xc00e Unknown value 0xc002 Unknown value 0xc004 Unknown value 0x96 Unknown value 0x41 TLS_RSA_WITH_RC4_128_MD5 TLS_RSA_WITH_RC4_128_SHA Unknown value 0x2f Unknown value 0xc008 Unknown value 0xc012 TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA Unknown value 0xc00d Unknown value 0xc003 Unknown value 0xfeff TLS_RSA_WITH_3DES_EDE_CBC_SHA compression methods NULL 1 2 0.0983 (0.0497) S>C Handshake ServerHello Version 3.1 session_id[32]= 14 03 00 00 4e 28 de aa da 7a 25 87 25 32 f3 a7 ae 4c 2d a0 e4 57 cc dd d7 0e d7 82 19 f7 8f b9 cipherSuite TLS_RSA_WITH_RC4_128_MD5 compressionMethod NULL 1 3 0.0983 (0.0000) S>C ChangeCipherSpec 1 4 0.0983 (0.0000) S>C Handshake 1 5 0.1019 (0.0035) C>S ChangeCipherSpec 1 6 0.1019 (0.0000) C>S Handshake 1 7 0.1019 (0.0000) C>S application_data 1 8 0.2460 (0.1440) S>C application_data 1 9 0.3108 (0.0648) S>C application_data 1 10 0.3650 (0.0542) S>C application_data 1 11 0.4188 (0.0537) S>C application_data 1 12 0.4580 (0.0392) S>C application_data 1 13 0.4831 (0.0251) S>C application_data [etc] Update: Here's a Wireshark capture from the server end. What's going on with those two much-delayed RST packets? Is that just IIS terminating what it perceives as a non-responsive connection? 19 10.129450 67.249.xxx.xxx 10.100.xxx.xx TCP 50653 > https [SYN] Seq=0 Win=65535 Len=0 MSS=1460 WS=3 TSV=699250189 TSER=0 20 10.129517 10.100.xxx.xx 67.249.xxx.xxx TCP https > 50653 [SYN, ACK] Seq=0 Ack=1 Win=16384 Len=0 MSS=1460 WS=0 TSV=0 TSER=0 21 10.168596 67.249.xxx.xxx 10.100.xxx.xx TCP 50653 > https [ACK] Seq=1 Ack=1 Win=524280 Len=0 TSV=699250189 TSER=0 22 10.172950 67.249.xxx.xxx 10.100.xxx.xx TLSv1 Client Hello 23 10.173267 10.100.xxx.xx 67.249.xxx.xxx TCP [TCP segment of a reassembled PDU] 24 10.173297 10.100.xxx.xx 67.249.xxx.xxx TCP [TCP segment of a reassembled PDU] 25 10.385180 67.249.xxx.xxx 10.100.xxx.xx TCP 50653 > https [ACK] Seq=148 Ack=2897 Win=524280 Len=0 TSV=699250191 TSER=163006 26 10.385235 10.100.xxx.xx 67.249.xxx.xxx TLSv1 Server Hello, Certificate, Server Hello Done 27 10.424682 67.249.xxx.xxx 10.100.xxx.xx TCP 50653 > https [ACK] Seq=148 Ack=4215 Win=524280 Len=0 TSV=699250192 TSER=163008 28 10.435245 67.249.xxx.xxx 10.100.xxx.xx TLSv1 Client Key Exchange 29 10.438522 67.249.xxx.xxx 10.100.xxx.xx TLSv1 Change Cipher Spec 30 10.438553 10.100.xxx.xx 67.249.xxx.xxx TCP https > 50653 [ACK] Seq=4215 Ack=421 Win=65115 Len=0 TSV=163008 TSER=699250192 31 10.449036 67.249.xxx.xxx 10.100.xxx.xx TLSv1 Encrypted Handshake Message 32 10.580652 10.100.xxx.xx 67.249.xxx.xxx TCP https > 50653 [ACK] Seq=4215 Ack=458 Win=65078 Len=0 TSV=163010 TSER=699250192 7312 57.315338 10.100.xxx.xx 67.249.xxx.xxx TCP https > 50644 [RST, ACK] Seq=1 Ack=1 Win=0 Len=0 19531 142.316425 10.100.xxx.xx 67.249.xxx.xxx TCP https > 50653 [RST, ACK] Seq=4215 Ack=458 Win=0 Len=0

    Read the article

  • Glassfish V3 won't start

    - by Zakaria
    Hi everybody, I installed NetBeans 6.8 and tried to run the GlasshFish V3 server. I'm working under Windows Vista 32 Bits. First, it won't run. Then I modified the c:\Windows\System32\drivers\etc\hosts file and put the following line into it: 127.0.0.1 localhost And when I run the GlasshFish V3 Server, no error is showing but only "INFOs" are displayed: 3 avr. 2010 19:23:19 com.sun.enterprise.glassfish.bootstrap.ASMain main INFO: Launching GlassFish on Felix platform Welcome to Felix ================ INFO: Perform lazy SSL initialization for the listener 'http-listener-2' INFO: Starting Grizzly Framework 1.9.18-k - Sat Apr 03 19:23:24 CEST 2010 INFO: Starting Grizzly Framework 1.9.18-k - Sat Apr 03 19:23:25 CEST 2010 INFO: Grizzly Framework 1.9.18-k started in: 423ms listening on port 35127 INFO: GlassFish v3 (74.2) startup time : Felix(4456ms) startup services(1709ms) total(6165ms) INFO: Grizzly Framework 1.9.18-k started in: 459ms listening on port 35116 INFO: Grizzly Framework 1.9.18-k started in: 428ms listening on port 35155 INFO: Grizzly Framework 1.9.18-k started in: 470ms listening on port 35160 INFO: Grizzly Framework 1.9.18-k started in: 513ms listening on port 35159 INFO: javassist.util.proxy.ProxyFactory.classLoaderProvider = org.glassfish.weld.WeldActivator$GlassFishClassLoaderProvider@5be8f4 INFO: Hibernate Validator bean-validator-3.0-JBoss-4.0.2 INFO: Binding RMI port to *:35165 INFO: Instantiated an instance of org.hibernate.validator.engine.resolver.JPATraversableResolver. INFO: JMXStartupService: Started JMXConnector, JMXService URL = service:jmx:rmi://PC-de-Charlotte:35165/jndi/rmi://PC-de-Charlotte:35165/jmxrmi INFO: Using com.sun.enterprise.transaction.jts.JavaEETransactionManagerJTSDelegate as the delegate INFO: [Thread[GlassFish Kernel Main Thread,5,main]] started INFO: Grizzly Framework 1.9.18-k started in: 150ms listening on port 35159 INFO: Perform lazy SSL initialization for the listener 'http-listener-2' INFO: {felix.fileinstall.poll (ms) = 5000, felix.fileinstall.dir = C:\Program Files\sges-v3\glassfish\modules\autostart, felix.fileinstall.debug = 1, felix.fileinstall.bundles.new.start = true, felix.fileinstall.tmpdir = C:\Users\CHARLO~1\AppData\Local\Temp\fileinstall-330907148519261411, felix.fileinstall.filter = null} INFO: {felix.fileinstall.poll (ms) = 5000, felix.fileinstall.dir = C:\Users\Charlotte\.netbeans\6.8\GlassFish_v3\autodeploy\bundles, felix.fileinstall.debug = 1, felix.fileinstall.bundles.new.start = true, felix.fileinstall.tmpdir = C:\Users\CHARLO~1\AppData\Local\Temp\fileinstall-2938963288421854459, felix.fileinstall.filter = null} INFO: Grizzly Framework 1.9.18-k started in: 95ms listening on port 35160 INFO: Updating configuration from org.apache.felix.fileinstall-autodeploy-bundles.cfg INFO: Installed C:\Program Files\sges-v3\glassfish\modules\autostart\org.apache.felix.fileinstall-autodeploy-bundles.cfg INFO: {felix.fileinstall.poll (ms) = 5000, felix.fileinstall.dir = C:\Users\Charlotte\.netbeans\6.8\GlassFish_v3\autodeploy\bundles, felix.fileinstall.debug = 1, felix.fileinstall.bundles.new.start = true, felix.fileinstall.tmpdir = C:\Users\CHARLO~1\AppData\Local\Temp\fileinstall-6474085409014899009, felix.fileinstall.filter = null} And there is no message such as "Glassfish started"! So, when I try to access to the admin web interface: localhost:4848 or localhost:8080 or localhost:8181 , It doesn't work. What should I do? Thank you very much, Regards.

    Read the article

  • Cant add network printer with system-config-printer package

    - by Erick David Ruiz Coronel
    Hello im new here and I dont know if im doing it right but I hope yes. I have a printer conected to a windows 8 machine, also I had ubuntu 13.04 and it worked fine when I printed from linux to windows but when I upgraded to 13.10 my printer didnt worked, I removed it thinking that would fix it but when I tryed to add the printer again I couldnt, I reinstalled cups and the system-config-printer-gnome package but didnt worked. Here is the terminal log : erick@Tauro:~$ system-config-printer Caught non-fatal exception. Traceback: File "/usr/share/system-config-printer/probe_printer.py", line 255, in _do_find fn () File "/usr/share/system-config-printer/probe_printer.py", line 367, in _probe_hplip stderr=null) File "/usr/lib/python2.7/subprocess.py", line 709, in init errread, errwrite) File "/usr/lib/python2.7/subprocess.py", line 1326, in _execute_child raise child_exception OSError: [Errno 2] No existe el archivo o el directorio Continuing anyway.. Traceback (most recent call last): File "/usr/share/system-config-printer/newprinter.py", line 912, in on_btnNPForward_clicked self.nextNPTab() File "/usr/share/system-config-printer/newprinter.py", line 1064, in nextNPTab stderr=file("/dev/null")) File "/usr/lib/python2.7/subprocess.py", line 709, in init errread, errwrite) File "/usr/lib/python2.7/subprocess.py", line 1326, in _execute_child raise child_exception OSError: [Errno 2] No existe el archivo o el directorio Any suggestion please? C:

    Read the article

< Previous Page | 214 215 216 217 218 219 220 221 222 223 224 225  | Next Page >