Search Results

Search found 6054 results on 243 pages for 'git extensions'.

Page 237/243 | < Previous Page | 233 234 235 236 237 238 239 240 241 242 243  | Next Page >

  • CCSpriteHole in cocos2d 2.0?

    - by rakkarage
    i was using this cocos2d class CCSpriteHole in cocos2d 1.0 fine... http://jpsarda.tumblr.com/post/15779708304/new-cocos2d-iphone-extensions-a-progress-bar-and-a i am trying to convert it to cocos2d 2.0... i got it to compile by changing glVertexPointer to glVertexAttribPointer like in the 2.0 version of CCSpriteScale9 here http://jpsarda.tumblr.com/post/9162433577/scale9grid-for-cocos2d and changing contentSizeInPixels_ to contentSize_... -(id) init { if( (self=[super init]) ) { opacityModifyRGB_ = YES; opacity_ = 255; color_ = colorUnmodified_ = ccWHITE; capSize=capSizeInPixels=CGSizeZero; //Not used blendFunc_.src = CC_BLEND_SRC; blendFunc_.dst = CC_BLEND_DST; // update texture (calls updateBlendFunc) [self setTexture:nil]; // default transform anchor anchorPoint_ = ccp(0.5f, 0.5f); vertexDataCount=24; vertexData = (ccV2F_C4F_T2F*) malloc(vertexDataCount * sizeof(ccV2F_C4F_T2F)); [self setTextureRectInPixels:CGRectZero untrimmedSize:CGSizeZero]; } return self; } -(id) initWithTexture:(CCTexture2D*)texture rect:(CGRect)rect { NSAssert(texture!=nil, @"Invalid texture for sprite"); // IMPORTANT: [self init] and not [super init]; if( (self = [self init]) ) { [self setTexture:texture]; [self setTextureRect:rect]; } return self; } -(id) initWithTexture:(CCTexture2D*)texture { NSAssert(texture!=nil, @"Invalid texture for sprite"); CGRect rect = CGRectZero; rect.size = texture.contentSize; return [self initWithTexture:texture rect:rect]; } -(id) initWithFile:(NSString*)filename { NSAssert(filename!=nil, @"Invalid filename for sprite"); CCTexture2D *texture = [[CCTextureCache sharedTextureCache] addImage: filename]; if( texture ) return [self initWithTexture:texture]; return nil; } +(id)spriteWithFile:(NSString*)f { return [[self alloc] initWithFile:f]; } - (void) dealloc { if (vertexData) free(vertexData); } -(void) updateColor { ccColor4F color4; color4.r=(float)color_.r/255.0f; color4.g=(float)color_.g/255.0f; color4.b=(float)color_.b/255.0f; color4.a=(float)opacity_/255.0f; for (int i=0; i<vertexDataCount; i++) { vertexData[i].colors=color4; } } -(void)updateTextureCoords:(CGRect)rect { CCTexture2D *tex = texture_; if(!tex) return; float atlasWidth = (float)tex.pixelsWide; float atlasHeight = (float)tex.pixelsHigh; float left,right,top,bottom; left = rect.origin.x/atlasWidth; right = left + rect.size.width/atlasWidth; top = rect.origin.y/atlasHeight; bottom = top + rect.size.height/atlasHeight; // // |/|/|/| // CGSize capTexCoordsSize=CGSizeMake(capSizeInPixels.width/atlasWidth, capSizeInPixels.height/atlasHeight); // From left to right //Top band // Left vertexData[0].texCoords=(ccTex2F){left,top}; vertexData[1].texCoords=(ccTex2F){left,top+capTexCoordsSize.height}; vertexData[2].texCoords=(ccTex2F){left+capTexCoordsSize.width,top}; vertexData[3].texCoords=(ccTex2F){left+capTexCoordsSize.width,top+capTexCoordsSize.height}; // Center vertexData[4].texCoords=(ccTex2F){right-capTexCoordsSize.width,top}; vertexData[5].texCoords=(ccTex2F){right-capTexCoordsSize.width,top+capTexCoordsSize.height}; // Right vertexData[6].texCoords=(ccTex2F){right,top}; vertexData[7].texCoords=(ccTex2F){right,top+capTexCoordsSize.height}; //Center band // Left vertexData[8].texCoords=(ccTex2F){left,bottom-capTexCoordsSize.height}; vertexData[9].texCoords=(ccTex2F){left,top+capTexCoordsSize.height}; vertexData[10].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom-capTexCoordsSize.height}; vertexData[11].texCoords=(ccTex2F){left+capTexCoordsSize.width,top+capTexCoordsSize.height}; // Center vertexData[12].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom-capTexCoordsSize.height}; vertexData[13].texCoords=(ccTex2F){right-capTexCoordsSize.width,top+capTexCoordsSize.height}; // Right vertexData[14].texCoords=(ccTex2F){right,bottom-capTexCoordsSize.height}; vertexData[15].texCoords=(ccTex2F){right,top+capTexCoordsSize.height}; //Bottom band //Left vertexData[16].texCoords=(ccTex2F){left,bottom}; vertexData[17].texCoords=(ccTex2F){left,bottom-capTexCoordsSize.height}; vertexData[18].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom}; vertexData[19].texCoords=(ccTex2F){left+capTexCoordsSize.width,bottom-capTexCoordsSize.height}; // Center vertexData[20].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom}; vertexData[21].texCoords=(ccTex2F){right-capTexCoordsSize.width,bottom-capTexCoordsSize.height}; // Right vertexData[22].texCoords=(ccTex2F){right,bottom}; vertexData[23].texCoords=(ccTex2F){right,bottom-capTexCoordsSize.height}; } -(void) updateVertices { float left=0; //-spriteSizeInPixels.width*0.5f; float right=left+contentSize_.width; float bottom=0; //-spriteSizeInPixels.height*0.5f; float top=bottom+contentSize_.height; float holeLeft=holeRect.origin.x*CC_CONTENT_SCALE_FACTOR(); float holeRight=holeLeft+holeRect.size.width*CC_CONTENT_SCALE_FACTOR(); float holeBottom=holeRect.origin.y*CC_CONTENT_SCALE_FACTOR(); float holeTop=holeBottom+holeRect.size.height*CC_CONTENT_SCALE_FACTOR(); // // |/|/|/| // // From left to right //Top band // Left vertexData[0].vertices=(ccVertex2F){left,top}; vertexData[1].vertices=(ccVertex2F){left,holeTop}; vertexData[2].vertices=(ccVertex2F){holeLeft,top}; vertexData[3].vertices=(ccVertex2F){holeLeft,holeTop}; // Center vertexData[4].vertices=(ccVertex2F){holeRight,top}; vertexData[5].vertices=(ccVertex2F){holeRight,holeTop}; // Right vertexData[6].vertices=(ccVertex2F){right,top}; vertexData[7].vertices=(ccVertex2F){right,holeTop}; //Center band // Left vertexData[8].vertices=(ccVertex2F){left,holeBottom}; vertexData[9].vertices=(ccVertex2F){left,holeTop}; vertexData[10].vertices=(ccVertex2F){holeLeft,holeBottom}; vertexData[11].vertices=(ccVertex2F){holeLeft,holeTop}; // Center vertexData[12].vertices=(ccVertex2F){holeRight,holeBottom}; vertexData[13].vertices=(ccVertex2F){holeRight,holeTop}; // Right vertexData[14].vertices=(ccVertex2F){right,holeBottom}; vertexData[15].vertices=(ccVertex2F){right,holeTop}; //Bottom band //Left vertexData[16].vertices=(ccVertex2F){left,bottom}; vertexData[17].vertices=(ccVertex2F){left,holeBottom}; vertexData[18].vertices=(ccVertex2F){holeLeft,bottom}; vertexData[19].vertices=(ccVertex2F){holeLeft,holeBottom}; // Center vertexData[20].vertices=(ccVertex2F){holeRight,bottom}; vertexData[21].vertices=(ccVertex2F){holeRight,holeBottom}; // Right vertexData[22].vertices=(ccVertex2F){right,bottom}; vertexData[23].vertices=(ccVertex2F){right,holeBottom}; } -(void) setHole:(CGRect)r inRect:(CGRect)totalSurface { holeRect=r; self.contentSize=totalSurface.size; holeRect.origin=ccpSub(holeRect.origin,totalSurface.origin); CGPoint holeCenter=ccp(holeRect.origin.x+holeRect.size.width*0.5f,holeRect.origin.y+holeRect.size.height*0.5f); self.anchorPoint=ccp(holeCenter.x/contentSize_.width,holeCenter.y/contentSize_.height); //[self updateTextureCoords:rectInPixels_]; [self updateVertices]; [self updateColor]; } -(void) draw { BOOL newBlend = NO; if( blendFunc_.src != CC_BLEND_SRC || blendFunc_.dst != CC_BLEND_DST ) { newBlend = YES; glBlendFunc( blendFunc_.src, blendFunc_.dst ); } glBindTexture(GL_TEXTURE_2D, [texture_ name]); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[0].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[8].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].vertices); glVertexAttribPointer(kCCVertexAttrib_TexCoords, 2, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].texCoords); glVertexAttribPointer(kCCVertexAttrib_Color, 4, GL_FLOAT, GL_FALSE, sizeof(ccV2F_C4F_T2F), &vertexData[16].colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 8); if( newBlend ) glBlendFunc(CC_BLEND_SRC, CC_BLEND_DST); } -(void)setTextureRectInPixels:(CGRect)rect untrimmedSize:(CGSize)untrimmedSize { rectInPixels_ = rect; rect_ = CC_RECT_PIXELS_TO_POINTS( rect ); //[self setContentSizeInPixels:untrimmedSize]; [self updateTextureCoords:rectInPixels_]; } -(void)setTextureRect:(CGRect)rect { CGRect rectInPixels = CC_RECT_POINTS_TO_PIXELS( rect ); [self setTextureRectInPixels:rectInPixels untrimmedSize:rectInPixels.size]; } // // RGBA protocol // #pragma mark CCSpriteHole - RGBA protocol -(GLubyte) opacity { return opacity_; } -(void) setOpacity:(GLubyte) anOpacity { opacity_ = anOpacity; // special opacity for premultiplied textures if( opacityModifyRGB_ ) [self setColor: (opacityModifyRGB_ ? colorUnmodified_ : color_ )]; [self updateColor]; } - (ccColor3B) color { if(opacityModifyRGB_){ return colorUnmodified_; } return color_; } -(void) setColor:(ccColor3B)color3 { color_ = colorUnmodified_ = color3; if( opacityModifyRGB_ ){ color_.r = color3.r * opacity_/255; color_.g = color3.g * opacity_/255; color_.b = color3.b * opacity_/255; } [self updateColor]; } -(void) setOpacityModifyRGB:(BOOL)modify { ccColor3B oldColor = self.color; opacityModifyRGB_ = modify; self.color = oldColor; } -(BOOL) doesOpacityModifyRGB { return opacityModifyRGB_; } #pragma mark CCSpriteHole - CocosNodeTexture protocol -(void) updateBlendFunc { if( !texture_ || ! [texture_ hasPremultipliedAlpha] ) { blendFunc_.src = GL_SRC_ALPHA; blendFunc_.dst = GL_ONE_MINUS_SRC_ALPHA; [self setOpacityModifyRGB:NO]; } else { blendFunc_.src = CC_BLEND_SRC; blendFunc_.dst = CC_BLEND_DST; [self setOpacityModifyRGB:YES]; } } -(void) setTexture:(CCTexture2D*)texture { // accept texture==nil as argument NSAssert( !texture || [texture isKindOfClass:[CCTexture2D class]], @"setTexture expects a CCTexture2D. Invalid argument"); texture_ = texture; [self updateBlendFunc]; } -(CCTexture2D*) texture { return texture_; } @end but now positioning and scaling seem to not work? and it starts in the wrong position... but changing the opacity still works. so i was wondering if anyone can see why my 2.0 version is not working? or if maybe there is a better way to do a sprite hole with cocos2d/opengl 2.0? shaders? thanks

    Read the article

  • CSS Dropdown Menu issues

    - by Simon Hume
    Can anyone help with a small problem. I've got a nice simple CSS dropdown menu http://www.cinderellahair.co.uk/new/CSSDropdown.html The problem I have is when you rollover a menu item that has children which are wider than the content, it pushes the whole menu right. Aside of shortening the child menu links down, is there any tweak I can make to my CSS to stop this happening? CSS Code: /* General */ #cssdropdown, #cssdropdown ul { list-style: none; } #cssdropdown, #cssdropdown * { padding: 0; margin: 0; } #cssdropdown {padding:43px 0px 0px 0px;} /* Head links */ #cssdropdown li.headlink { margin:0px 40px 0px -1px; float: left; background-color: #e9e9e9;} #cssdropdown li.headlink a { display: block; padding: 0px 0px 0px 5px; text-decoration:none; } #cssdropdown li.headlink a:hover { text-decoration:underline; } /* Child lists and links */ #cssdropdown li.headlink ul { display: none; text-align: left; padding:10px 0px 0px 0px; font-size:12px; float:left;} #cssdropdown li.headlink:hover ul { display: block; } #cssdropdown li.headlink ul li a { padding: 5px; height: 17px; } #cssdropdown li.headlink ul li a:hover { background-color: #333; } /* Pretty styling */ body { font-family:Georgia, "Times New Roman", Times, serif; font-size: 16px; } #cssdropdown a { color: grey; } #cssdropdown ul li a:hover { text-decoration: none; } #cssdropdown li.headlink { background-color: white; } #cssdropdown li.headlink ul { padding-bottom: 10px;} HTML: <ul id="cssdropdown"> <li class="headlink"><a href="http://www.cinderellahair.co.uk/new/index.php">HOME</a></li> <li class="headlink"><a href="http://www.cinderellahair.co.uk/new/gallery/gallery.php">GALLERY</a> <ul> <li><a href="http://amazon.com/">CELEBRITY</a></li> <li><a href="http://ebay.com/">BEFORE &amp; AFTER</a></li> <li><a href="http://craigslist.com/">HAIR TYPES</a></li> </ul> </li> <li class="headlink"><a href="http://www.cinderellahair.co.uk/new/about-cinderella-hair-extensions/about-us.php">ABOUT US</a> <ul> <li><a href="http://amazon.com/">WHY CHOOSE CINDERELLA</a></li> <li><a href="http://ebay.com/">TESTIMONIALS</a></li> <li><a href="http://craigslist.com/">MINI VIDEO CLIPS</a></li> <li><a href="http://craigslist.com/">OUR HAIR PRODUCTS</a></li> </ul> </li> <li class="headlink"><a href="http://www.cinderellahair.co.uk/new/news-and-offers/news.php">NEWS &amp; OFFERS</a> <ul> <li><a href="http://amazon.com/">VERA WANG FREE GIVEAWAY</a></li> <li><a href="http://ebay.com/">CINDERELLA ON TV</a></li> <li><a href="http://craigslist.com/">CINDERELLA IN THE PRESS</a></li> <li><a href="http://craigslist.com/">CINDRELLA NEWSLETTERS</a></li> </ul> </li> <li class="headlink"><a href="http://www.cinderellahair.co.uk/new/cinderella-salon/salon-finder.php">SALON FINDER</a></li> </ul> JS Code: $(document).ready(function(){ $('#cssdropdown li.headlink').hover( function() { $('ul', this).css('display', 'block'); }, function() { $('ul', this).css('display', 'none'); }); }); Full code is on the link above, just view source.

    Read the article

  • Unexpected start of already-primary server processes when heartbeat on secondary is stopped.

    - by vorik
    Hi, I've got an active-passive Heartbeat cluster with Apache, MySQL, ActiveMQ and DRBD. Today, I wanted to perform hardware-maintenance on the secondary node (node04), so I stopped the heartbeat service before shutting it down. Then, the primary node (node03) received a shutdown notice from the secondary node (node04). This logging comes from the primary node: node03 heartbeat[4458]: 2010/03/08_08:52:56 info: Received shutdown notice from 'node04.companydomain.nl'. heartbeat[4458]: 2010/03/08_08:52:56 info: Resources being acquired from node04.companydomain.nl. harc[27522]: 2010/03/08_08:52:56 info: Running /etc/ha.d/rc.d/status status heartbeat[27523]: 2010/03/08_08:52:56 info: Local Resource acquisition completed. mach_down[27567]: 2010/03/08_08:52:56 info: /usr/share/heartbeat/mach_down: nice_failback: foreign resources acquired mach_down[27567]: 2010/03/08_08:52:56 info: mach_down takeover complete for node node04.companydomain.nl. heartbeat[4458]: 2010/03/08_08:52:56 info: mach_down takeover complete. harc[27620]: 2010/03/08_08:52:56 info: Running /etc/ha.d/rc.d/ip-request-resp ip-request-resp ip-request-resp[27620]: 2010/03/08_08:52:56 received ip-request-resp drbddisk OK yes ResourceManager[27645]: 2010/03/08_08:52:56 info: Acquiring resource group: node03.companydomain.nl drbddisk Filesystem::/dev/drbd0::/data::ext3 mysql apache::/etc/httpd/conf/httpd.conf LVSSyncDaemonSwap::master monitor activemq tivoli-cluster MailTo::[email protected]::DRBDFailureDrisAcc MailTo::[email protected]::DRBDFailureDrisAcc 1.2.3.212 ResourceManager[27645]: 2010/03/08_08:52:56 info: Running /etc/ha.d/resource.d/drbddisk start Filesystem[27700]: 2010/03/08_08:52:57 INFO: Running OK ResourceManager[27645]: 2010/03/08_08:52:57 info: Running /etc/ha.d/resource.d/mysql start mysql[27783]: 2010/03/08_08:52:57 Starting MySQL[ OK ] apache[27853]: 2010/03/08_08:52:57 INFO: Running OK ResourceManager[27645]: 2010/03/08_08:52:57 info: Running /etc/ha.d/resource.d/monitor start monitor[28160]: 2010/03/08_08:52:58 ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/activemq start activemq[28210]: 2010/03/08_08:52:58 Starting ActiveMQ Broker... ActiveMQ Broker is already running. ResourceManager[27645]: 2010/03/08_08:52:58 ERROR: Return code 1 from /etc/ha.d/resource.d/activemq ResourceManager[27645]: 2010/03/08_08:52:58 CRIT: Giving up resources due to failure of activemq ResourceManager[27645]: 2010/03/08_08:52:58 info: Releasing resource group: node03.companydomain.nl drbddisk Filesystem::/dev/drbd0::/data::ext3 mysql apache::/etc/httpd/conf/httpd.conf LVSSyncDaemonSwap::master monitor activemq tivoli-cluster MailTo::[email protected]::DRBDFailureDrisAcc MailTo::[email protected]::DRBDFailureDrisAcc 1.2.3.212 ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/IPaddr 1.2.3.212 stop IPaddr[28329]: 2010/03/08_08:52:58 INFO: ifconfig eth0:0 down IPaddr[28312]: 2010/03/08_08:52:58 INFO: Success ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/MailTo [email protected] DRBDFailureDrisAcc stop MailTo[28378]: 2010/03/08_08:52:58 INFO: Success ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/MailTo [email protected] DRBDFailureDrisAcc stop MailTo[28433]: 2010/03/08_08:52:58 INFO: Success ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/tivoli-cluster stop ResourceManager[27645]: 2010/03/08_08:52:58 info: Running /etc/ha.d/resource.d/activemq stop activemq[28503]: 2010/03/08_08:53:01 Stopping ActiveMQ Broker... Stopped ActiveMQ Broker. ResourceManager[27645]: 2010/03/08_08:53:01 info: Running /etc/ha.d/resource.d/monitor stop monitor[28681]: 2010/03/08_08:53:01 ResourceManager[27645]: 2010/03/08_08:53:01 info: Running /etc/ha.d/resource.d/LVSSyncDaemonSwap master stop LVSSyncDaemonSwap[28714]: 2010/03/08_08:53:02 info: ipvs_syncmaster down LVSSyncDaemonSwap[28714]: 2010/03/08_08:53:02 info: ipvs_syncbackup up LVSSyncDaemonSwap[28714]: 2010/03/08_08:53:02 info: ipvs_syncmaster released ResourceManager[27645]: 2010/03/08_08:53:02 info: Running /etc/ha.d/resource.d/apache /etc/httpd/conf/httpd.conf stop apache[28782]: 2010/03/08_08:53:03 INFO: Killing apache PID 18390 apache[28782]: 2010/03/08_08:53:03 INFO: apache stopped. apache[28771]: 2010/03/08_08:53:03 INFO: Success ResourceManager[27645]: 2010/03/08_08:53:03 info: Running /etc/ha.d/resource.d/mysql stop mysql[28851]: 2010/03/08_08:53:24 Shutting down MySQL.....................[ OK ] ResourceManager[27645]: 2010/03/08_08:53:24 info: Running /etc/ha.d/resource.d/Filesystem /dev/drbd0 /data ext3 stop Filesystem[29010]: 2010/03/08_08:53:25 INFO: Running stop for /dev/drbd0 on /data Filesystem[29010]: 2010/03/08_08:53:25 INFO: Trying to unmount /data Filesystem[29010]: 2010/03/08_08:53:25 ERROR: Couldn't unmount /data; trying cleanup with SIGTERM Filesystem[29010]: 2010/03/08_08:53:25 INFO: Some processes on /data were signalled Filesystem[29010]: 2010/03/08_08:53:27 INFO: unmounted /data successfully Filesystem[28999]: 2010/03/08_08:53:27 INFO: Success ResourceManager[27645]: 2010/03/08_08:53:27 info: Running /etc/ha.d/resource.d/drbddisk stop heartbeat[4458]: 2010/03/08_08:53:29 WARN: node node04.companydomain.nl: is dead heartbeat[4458]: 2010/03/08_08:53:29 info: Dead node node04.companydomain.nl gave up resources. heartbeat[4458]: 2010/03/08_08:53:29 info: Link node04.companydomain.nl:eth0 dead. heartbeat[4458]: 2010/03/08_08:53:29 info: Link node04.companydomain.nl:eth1 dead. hb_standby[29193]: 2010/03/08_08:53:57 Going standby [foreign]. heartbeat[4458]: 2010/03/08_08:53:57 info: node03.companydomain.nl wants to go standby [foreign] Soo... What just happened here??? Heartbeat on node04 stopped and told node03, which was the active node at the time. Somehow, node03 decided to start the cluster processes that were already running. (For the processes that are not critical, I always return a 0 from the startupscript so it does not stops the entire cluster when a non-essential part fails.) When starting ActiveMQ, it returns status 1 because it is already running. This fails the node and shuts everything down. As heartbeat is not running on the secondary node, it cannot failover to there. When I tried to run ha_takeover to restart the resources, absolutely nothing happened. Only after I restarted heartbeat on the primary node the resources could be started (after a delay of 2 minutes). These are my questions: Why does heartbeat on the primary node try to start the cluster processes again? Why did ha_takeover not work? What can I do to prevent this from happening? Server configuration: DRBD: version: 8.3.7 (api:88/proto:86-91) GIT-hash: ea9e28dbff98e331a62bcbcc63a6135808fe2917 build by [email protected], 2010-01-20 09:14:48 0: cs:Connected ro:Secondary/Primary ds:UpToDate/UpToDate B r---- ns:0 nr:6459432 dw:6459432 dr:0 al:0 bm:301 lo:0 pe:0 ua:0 ap:0 ep:1 wo:d oos:0 uname -a Linux node04 2.6.18-164.11.1.el5 #1 SMP Wed Jan 6 13:26:04 EST 2010 x86_64 x86_64 x86_64 GNU/Linux haresources node03.companydomain.nl \ drbddisk \ Filesystem::/dev/drbd0::/data::ext3 \ mysql \ apache::/etc/httpd/conf/httpd.conf \ LVSSyncDaemonSwap::master \ monitor \ activemq \ tivoli-cluster \ MailTo::[email protected]::DRBDFailureDrisAcc \ MailTo::[email protected]::DRBDFailureDrisAcc \ 1.2.3.212 ha.cf debugfile /var/log/ha-debug logfile /var/log/ha-log keepalive 500ms deadtime 30 warntime 10 initdead 120 udpport 694 mcast eth0 225.0.0.3 694 1 0 mcast eth1 225.0.0.4 694 1 0 auto_failback off node node03.companydomain.nl node node04.companydomain.nl respawn hacluster /usr/lib64/heartbeat/dopd apiauth dopd gid=haclient uid=hacluster Thank you very much in advance, Ger Apeldoorn

    Read the article

  • BizTalk server problem

    - by WtFudgE
    Hi, we have a biztalk server (a virtual one (1!)...) at our company, and an sql server where the data is being kept. Now we have a lot of data traffic. I'm talking about hundred of thousands. So I'm actually not even sure if one server is pretty safe, but our company is not that easy to convince. Now recently we have a lot of problems. Allow me to situate in detail, so I'm not missing anything: Our server has 5 applications: One with 3 orchestrations, 12 send ports, 16 receive locations. One with 4 orchestrations, 32 send ports, 20 receive locations. One with 4 orchestrations, 24 send ports, 20 receive locations. One with 47 (yes 47) orchestrations, 37 send ports, 6 receive locations. One with common application with a couple of resources. Our problems have occured since we deployed the applications with the 47 orchestrations. A lot of these orchestrations use assign shapes which use c# code to do the mapping. This is because we use HL7 extensions and this is kind of special, so by using c# code & xpath it was a lot easier to do the mapping because a lot of these schema's look alike. The c# reads in XmlNodes received through xpath, and returns XmlNode which are then assigned again to biztalk messages. I'm not sure if this could be the cause, but I thought I'd mention it. The send and receive ports have a lot of different types: File, MQSeries, SQL, MLLP, FTP. Each of these types have a different host instances, to balance out the load. Our orchestrations use the BiztalkApplication host. On this server also a couple of scripts are running, mostly ftp upload scripts & also a zipper script, which zips files every half an hour in a daily zip and deletes the zip files after a month. We use this zipscript on our backup files (we backup a lot, backups are also on our server), we did this because the server had problems with sending files to a location where there were a lot (A LOT) of files, so after the files were reduced to zips it went better. Now the problems we are having recently are mainly two major problems: Our most important problem is the following. We kept a receive location with a lot of messages on a queue for testing. After we start this receive location which uses the 47 orchestrations, the running service instances start to sky rock. Ok, this is pretty normal. Let's say about 10000, and then we stop the receive location to see how biztalk handles these 10000 instances. Normally they would go down pretty fast, and it does sometimes, but after a while it starts to "throttle", meaning they just stop being processed and the service instances stay at the same number, for example in 30 seconds it goes down from 10000 to 4000 and then it stays at 4000 and it lowers very very very slowly, like 30 in 5minutes or something. So this means, that all the other service instances of the other applications are also stuck in here, and they are also not processed. We noticed that after restarting our host instances the instance number went down fast again. So we tried to selectively restart different host instances to locate the problem. We noticed that eventually restarting the file send/receive host instance would do the trick. So we thought file sends would be the problem. Concidering that we make a lot of backups. So we replaced the file type backups with mqseries backups. The same problem occured, and funny thing, restarting the file send/receive host still fixes the problem. No errors can be found in the event viewer either. A second problem we're having is. That sometimes at arround 6 am, all or a part of the host instances are being stopped. In the event viewer we noticed the following errors (these are more than one): The receive location "MdnBericht SQL" with URL "SQL://ZNACDBPEG/mdnd0001/" is shutting down. Details:"The error threshold has been exceeded. The receive location is shutting down.". The Messaging Engine failed to add a receive location "M2m Othello Export Start Bestand" with URL "\m2mservices\Othello_import$\DataFilter Start*.xml" to the adapter "FILE". Reason: "The FILE adapter cannot access the folder \m2mservices\Othello_import$\DataFilter Start. Verify this folder exists. Error: Logon failure: unknown user name or bad password. ". The FILE adapter cannot access the folder \m2mservices\Othello_import$\DataFilter Start. Verify this folder exists. Error: Logon failure: unknown user name or bad password. An attempt to connect to "BizTalkMsgBoxDb" SQL Server database on server "ZNACDBBTS" failed. Error: "Login failed for user ''. The user is not associated with a trusted SQL Server connection." It woould seem that there's a login failure at this time and that because of it other services are also experiencing problems, and eventually they are shut down. The thing is, our user is admin, and it's impossible that it's password is wrong "sometimes". We have concidering that the problem could be due to an infrastructure problem, but that's not really are department. I know it's a long post, but we're not sure anymore what to do. Would adding another server and balancing the load solve our problems? Is there a way to meassure our balance and know where to start splitting? What are normal numbers of load etc? I appreciate any answers because these issues are getting worse and we're also on a deadline. Thanks a lot for replies!

    Read the article

  • how do i install PHP with JSON and OAuth on Mac Snow Leopard?

    - by meilas
    i want to use the dropbox api via this library http://code.google.com/p/dropbox-php/ i installed MAMP then I tried "sudo pecl install oauth" but I got downloading oauth-1.0.0.tgz ... Starting to download oauth-1.0.0.tgz (42,834 bytes) ............done: 42,834 bytes 6 source files, building running: phpize Configuring for: PHP Api Version: 20090626 Zend Module Api No: 20090626 Zend Extension Api No: 220090626 building in /var/tmp/pear-build-root/oauth-1.0.0 running: /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/configure checking for grep that handles long lines and -e... /usr/bin/grep checking for egrep... /usr/bin/grep -E checking for a sed that does not truncate output... /opt/local/bin/gsed checking for cc... cc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether cc accepts -g... yes checking for cc option to accept ISO C89... none needed checking how to run the C preprocessor... cc -E checking for icc... no checking for suncc... no checking whether cc understands -c and -o together... yes checking for system library directory... lib checking if compiler supports -R... no checking if compiler supports -Wl,-rpath,... yes checking build system type... i686-apple-darwin10.4.0 checking host system type... i686-apple-darwin10.4.0 checking target system type... i686-apple-darwin10.4.0 checking for PHP prefix... /usr checking for PHP includes... -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib checking for PHP extension directory... /usr/lib/php/extensions/no-debug-non-zts-20090626 checking for PHP installed headers prefix... /usr/include/php checking if debug is enabled... no checking if zts is enabled... no checking for re2c... no configure: WARNING: You will need re2c 0.13.4 or later if you want to regenerate PHP parsers. checking for gawk... gawk checking for oauth support... yes, shared checking for cURL in default path... found in /usr checking for ld used by cc... /usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld checking if the linker (/usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld) is GNU ld... no checking for /usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld option to reload object files... -r checking for BSD-compatible nm... /usr/bin/nm checking whether ln -s works... yes checking how to recognise dependent libraries... pass_all checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking dlfcn.h usability... yes checking dlfcn.h presence... yes checking for dlfcn.h... yes checking the maximum length of command line arguments... 196608 checking command to parse /usr/bin/nm output from cc object... rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory ok checking for objdir... .libs checking for ar... ar checking for ranlib... ranlib checking for strip... strip rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory checking if cc static flag works... rm: conftest.dSYM: is a directory yes checking if cc supports -fno-rtti -fno-exceptions... rm: conftest.dSYM: is a directory no checking for cc option to produce PIC... -fno-common checking if cc PIC flag -fno-common works... rm: conftest.dSYM: is a directory yes checking if cc supports -c -o file.o... rm: conftest.dSYM: is a directory yes checking whether the cc linker (/usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld) supports shared libraries... yes checking dynamic linker characteristics... darwin10.4.0 dyld checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... no creating libtool appending configuration tag "CXX" to libtool configure: creating ./config.status config.status: creating config.h running: make /bin/sh /private/var/tmp/pear-build-root/oauth-1.0.0/libtool --mode=compile cc -I. -I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth -DPHP_ATOM_INC -I/private/var/tmp/pear-build-root/oauth-1.0.0/include -I/private/var/tmp/pear-build-root/oauth-1.0.0/main -I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -Wall -g -c /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c -o oauth.lo mkdir .libs cc -I. "-I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth" -DPHP_ATOM_INC -I/private/var/tmp/pear-build-root/oauth-1.0.0/include -I/private/var/tmp/pear-build-root/oauth-1.0.0/main "-I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth" -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -Wall -g -c "/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c" -fno-common -DPIC -o .libs/oauth.o In file included from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/php_oauth.h:47, from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c:14: /usr/include/php/ext/pcre/php_pcre.h:29:18: error: pcre.h: No such file or directory In file included from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/php_oauth.h:47, from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c:14: /usr/include/php/ext/pcre/php_pcre.h:37: error: expected '=', ',', ';', 'asm' or 'attribute' before '*' token /usr/include/php/ext/pcre/php_pcre.h:38: error: expected '=', ',', ';', 'asm' or 'attribute' before '*' token /usr/include/php/ext/pcre/php_pcre.h:44: error: expected specifier-qualifier-list before 'pcre' make: * [oauth.lo] Error 1 ERROR: `make' failed

    Read the article

  • Too many apache processes, killing the CPU

    - by RULE101
    I am noticed that too many apache processes killing the CPU in my dedicated server. 14193 (Trace) (Kill) nobody 0 66.1 0.0 /usr/local/apache/bin/httpd -k start -DSSL 14128 (Trace) (Kill) nobody 0 65.9 0.0 /usr/local/apache/bin/httpd -k start -DSSL 14136 (Trace) (Kill) nobody 0 65.9 0.0 /usr/local/apache/bin/httpd -k start -DSSL 14129 (Trace) (Kill) nobody 0 65.8 0.0 /usr/local/apache/bin/httpd -k start -DSSL 13419 (Trace) (Kill) nobody 0 65.7 0.0 /usr/local/apache/bin/httpd -k start -DSSL 13421 (Trace) (Kill) nobody 0 65.7 0.0 /usr/local/apache/bin/httpd -k start -DSSL 13426 (Trace) (Kill) nobody 0 65.7 0.0 /usr/local/apache/bin/httpd -k start -DSSL 13428 (Trace) (Kill) nobody 0 65.7 0.0 /usr/local/apache/bin/httpd -k start -DSSL 13429 (Trace) (Kill) nobody 0 65.7 0.0 /usr/local/apache/bin/httpd -k start -DSSL 12173 (Trace) (Kill) nobody 0 65.5 0.0 /usr/local/apache/bin/httpd -k start -DSSL 14073 (Trace) (Kill) nobody 0 65.5 0.0 /usr/local/apache/bin/httpd -k start -DSSL I am getting high load email notification from cpanel during the day. FROM httpd.conf Include "/usr/local/apache/conf/includes/pre_main_global.conf" Include "/usr/local/apache/conf/includes/pre_main_2.conf" LoadModule bwlimited_module modules/mod_bwlimited.so LoadModule h264_streaming_module /usr/local/apache/modules/mod_h264_streaming.so AddHandler h264-streaming.extensions .mp4 Include "/usr/local/apache/conf/php.conf" Include "/usr/local/apache/conf/includes/errordocument.conf" ErrorLog "logs/error_log" ScriptAliasMatch ^/?controlpanel/?$ /usr/local/cpanel/cgi-sys/redirect.cgi ScriptAliasMatch ^/?cpanel/?$ /usr/local/cpanel/cgi-sys/redirect.cgi ScriptAliasMatch ^/?kpanel/?$ /usr/local/cpanel/cgi-sys/redirect.cgi ScriptAliasMatch ^/?securecontrolpanel/?$ /usr/local/cpanel/cgi-sys/sredirect.cgi ScriptAliasMatch ^/?securecpanel/?$ /usr/local/cpanel/cgi-sys/sredirect.cgi ScriptAliasMatch ^/?securewhm/?$ /usr/local/cpanel/cgi-sys/swhmredirect.cgi ScriptAliasMatch ^/?webmail/?$ /usr/local/cpanel/cgi-sys/wredirect.cgi ScriptAliasMatch ^/?whm/?$ /usr/local/cpanel/cgi-sys/whmredirect.cgi RewriteEngine on AddType text/html .shtml Alias /akopia /usr/local/cpanel/3rdparty/interchange/share/akopia/ Alias /bandwidth /usr/local/bandmin/htdocs/ Alias /img-sys /usr/local/cpanel/img-sys/ Alias /interchange /usr/local/cpanel/3rdparty/interchange/share/interchange/ Alias /interchange-5 /usr/local/cpanel/3rdparty/interchange/share/interchange-5/ Alias /java-sys /usr/local/cpanel/java-sys/ Alias /mailman/archives /usr/local/cpanel/3rdparty/mailman/archives/public/ Alias /pipermail /usr/local/cpanel/3rdparty/mailman/archives/public/ Alias /sys_cpanel /usr/local/cpanel/sys_cpanel/ ScriptAlias /cgi-sys /usr/local/cpanel/cgi-sys/ ScriptAlias /mailman /usr/local/cpanel/3rdparty/mailman/cgi-bin/ <Directory "/"> AllowOverride All Options All </Directory> <Directory "/usr/local/apache/htdocs"> Options All AllowOverride None Require all granted </Directory> <Files ~ "^error_log$"> Order allow,deny Deny from all Satisfy All </Files> <Files ".ht*"> Require all denied </Files> <IfModule log_config_module> LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %b" common CustomLog "logs/access_log" common <IfModule logio_module> LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio </IfModule> </IfModule> <IfModule alias_module> ScriptAlias /cgi-bin/ "/usr/local/apache/cgi-bin/" </IfModule> <Directory "/usr/local/apache/cgi-bin"> AllowOverride None Options All Require all granted </Directory> <IfModule mime_module> TypesConfig conf/mime.types AddType application/x-compress .Z AddType application/x-gzip .gz .tgz </IfModule> <IfModule prefork.c> Mutex default mpm-accept </IfModule> <IfModule mod_log_config.c> LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %b" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent CustomLog logs/access_log common </IfModule> <IfModule worker.c> Mutex default mpm-accept </IfModule> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Direct modifications to the Apache configuration file may be lost upon subsequent regeneration of the # # configuration file. To have modifications retained, all modifications must be checked into the # # configuration system by running: # # /usr/local/cpanel/bin/apache_conf_distiller --update # # To see if your changes will be conserved, regenerate the Apache configuration file by running: # # /usr/local/cpanel/bin/build_apache_conf # # and check the configuration file for your alterations. If your changes have been ignored, then they will # # need to be added directly to their respective template files. # # # # It is also possible to add custom directives to the various "Include" files loaded by this httpd.conf # # For detailed instructions on using Include files and the apache_conf_distiller with the new configuration # # system refer to the documentation at: http://www.cpanel.net/support/docs/ea/ea3/customdirectives.html # # # # This configuration file was built from the following templates: # # /var/cpanel/templates/apache2/main.default # # /var/cpanel/templates/apache2/main.local # # /var/cpanel/templates/apache2/vhost.default # # /var/cpanel/templates/apache2/vhost.local # # /var/cpanel/templates/apache2/ssl_vhost.default # # /var/cpanel/templates/apache2/ssl_vhost.local # # # # Templates with the '.local' extension will be preferred over templates with the '.default' extension. # # The only template updated by the apache_conf_distiller is main.default. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # PidFile logs/httpd.pid # Defined in /var/cpanel/cpanel.config: apache_port Listen 0.0.0.0:80 User nobody Group nobody ExtendedStatus On ServerAdmin [email protected] ServerName server.powerlabel.net LogLevel warn # These can be set in WHM under 'Apache Global Configuration' Timeout 300 ServerSignature On <IfModule prefork.c> </IfModule> RewriteEngine on RewriteMap LeechProtect prg:/usr/local/cpanel/bin/leechprotect Mutex file:/usr/local/apache/logs rewrite-map <IfModule !mod_ruid2.c> UserDir public_html </IfModule> <IfModule mod_ruid2.c> UserDir disabled </IfModule> # DirectoryIndex is set via the WHM -> Service Configuration -> Apache Setup -> DirectoryIndex Priority DirectoryIndex index.html.var index.htm index.html index.shtml index.xhtml index.wml index.perl index.pl index.plx index.ppl index.cgi index.jsp index.js index.jp index.php4 index.php3 index.php index.phtml default.htm default.html home.htm index.php5 Default.html Default.htm home.html # SSLCipherSuite can be set in WHM under 'Apache Global Configuration' SSLPassPhraseDialog builtin SSLUseStapling on SSLStaplingCache shmcb:/usr/local/apache/logs/stapling_cache_shmcb(256000) SSLSessionCache shmcb:/usr/local/apache/logs/ssl_gcache_data_shmcb(1024000) SSLSessionCacheTimeout 300 Mutex file:/usr/local/apache/logs ssl-cache SSLRandomSeed startup builtin SSLRandomSeed connect builtin # Defined in /var/cpanel/cpanel.config: apache_ssl_port Listen 0.0.0.0:443 AddType application/x-x509-ca-cert .crt AddType application/x-pkcs7-crl .crl AddHandler cgi-script .cgi .pl .plx .ppl .perl AddHandler server-parsed .shtml AddType text/html .shtml AddType application/x-tar .tgz AddType text/vnd.wap.wml .wml AddType image/vnd.wap.wbmp .wbmp AddType text/vnd.wap.wmlscript .wmls AddType application/vnd.wap.wmlc .wmlc AddType application/vnd.wap.wmlscriptc .wmlsc <Location /whm-server-status> SetHandler server-status Order deny,allow Deny from all Allow from 127.0.0.1 </Location> # SUEXEC is supported Include "/usr/local/apache/conf/includes/pre_virtualhost_global.conf" Include "/usr/local/apache/conf/includes/pre_virtualhost_2.conf" What can cause this and how can i fix it ?

    Read the article

  • Why do I get this error when I try to push my SQLite3 to Postgresql (via Taps) on Cedar Stack?

    - by rhodee
    I've done quite a bit of research on Heroku Dev Center and I am now looking to the community for help. Here is my problem. I can not push my db to Heroku Cedar Stack. I am trying to migrate a sqlite database to postgresql via Taps gem. When I am ready to deploy I run: bundle install --without production heroku run db:push I get the following result: Running db:seed attached to terminal... up, run.17 sh: db:seed: not found heroku run rake db:migrate And when I run the migration: heroku run rake db:migrate I get the following: Running rake db:migrate attached to terminal... up, run.18 rake aborted! No Rakefile found (looking for: rakefile, Rakefile, rakefile.rb, Rakefile.rb) /usr/local/lib/ruby/1.9.1/rake.rb:2367:in `raw_load_rakefile' /usr/local/lib/ruby/1.9.1/rake.rb:2007:in `block in load_rakefile' /usr/local/lib/ruby/1.9.1/rake.rb:2058:in `standard_exception_handling' /usr/local/lib/ruby/1.9.1/rake.rb:2006:in `load_rakefile' /usr/local/lib/ruby/1.9.1/rake.rb:1991:in `run' /usr/local/bin/rake:31:in `<main>' Everytime I push to Heroku (git push heroku master) it fails because my gem file is attempting to install sqlite3 gem-even though its inside of the development and test groups in my Gemfile. My database.yml production environment still points to sqlite adapter even after I have run the following command successfully: heroku config:add BUNDLE_WITHOUT="test development" --app app_name_on_heroku Out of ideas. Please help. If its useful I can post results of my gemfile, heroku ps and logs. Cheers UPDATE: After following @John's direction I now receive the following terminal message. Sending schema Schema: 100% |==========================================| Time: 00:00:07 Sending indexes schema_migrat: 100% |==========================================| Time: 00:00:00 Sending data 4 tables, 6 records schema_migrat: 0% | | ETA: --:--:-- Saving session to push_201111070749.dat.. !!! Caught Server Exception HTTP CODE: 500 Taps Server Error: LoadError: no such file to load -- sequel/adapters/ And the following warnings: ["/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:249:in require'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:249:inblock in tsk_require'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:72:in block in check_requiring_thread'", "<internal:prelude>:10:insynchronize'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:69:in check_requiring_thread'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:249:intsk_require'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/database/connecting.rb:25:in adapter_class'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/database/connecting.rb:54:inconnect'", "/app/.bundle/gems/ruby/1.9.1/gems/sequel-3.20.0/lib/sequel/core.rb:119:in connect'", "/app/lib/taps/db_session.rb:14:inconn'", "/app/lib/taps/server.rb:91:in block in <class:Server>'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:865:incall'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:865:in block in route'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:521:ininstance_eval'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:521:in route_eval'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:500:inblock (2 levels) in route!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:497:in catch'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:497:inblock in route!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:476:in each'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:476:inroute!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:601:in dispatch!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:411:inblock in call!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:566:in instance_eval'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:566:inblock in invoke'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:566:in catch'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:566:ininvoke'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:411:in call!'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:399:incall'", "/app/.bundle/gems/ruby/1.9.1/gems/rack-1.2.1/lib/rack/auth/basic.rb:25:in call'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:979:inblock in call'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:1005:in synchronize'", "/app/.bundle/gems/ruby/1.9.1/gems/sinatra-1.0/lib/sinatra/base.rb:979:incall'", "/home/heroku_rack/lib/static_assets.rb:9:in call'", "/home/heroku_rack/lib/last_access.rb:15:incall'", "/app/.bundle/gems/ruby/1.9.1/gems/rack-1.2.1/lib/rack/urlmap.rb:47:in block in call'", "/app/.bundle/gems/ruby/1.9.1/gems/rack-1.2.1/lib/rack/urlmap.rb:41:ineach'", "/app/.bundle/gems/ruby/1.9.1/gems/rack-1.2.1/lib/rack/urlmap.rb:41:in call'", "/home/heroku_rack/lib/date_header.rb:14:incall'", "/app/.bundle/gems/ruby/1.9.1/gems/rack-1.2.1/lib/rack/builder.rb:77:in call'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/connection.rb:76:inblock in pre_process'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/connection.rb:74:in catch'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/connection.rb:74:inpre_process'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/connection.rb:57:in process'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/connection.rb:42:inreceive_data'", "/app/.bundle/gems/ruby/1.9.1/gems/eventmachine-0.12.10/lib/eventmachine.rb:256:in run_machine'", "/app/.bundle/gems/ruby/1.9.1/gems/eventmachine-0.12.10/lib/eventmachine.rb:256:inrun'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/backends/base.rb:57:in start'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/server.rb:156:instart'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/controllers/controller.rb:80:in start'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/runner.rb:177:inrun_command'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/lib/thin/runner.rb:143:in run!'", "/app/.bundle/gems/ruby/1.9.1/gems/thin-1.2.7/bin/thin:6:in'", "/usr/ruby1.9.2/bin/thin:19:in load'", "/usr/ruby1.9.2/bin/thin:19:in'"]

    Read the article

  • Am I going about this the right way?

    - by Psytronic
    Hey Guys, I'm starting a WPF project, and just finished the base of the UI, it seems very convoluted though, so I'm not sure if I've gone around laying it out in the right way. I don't want to get to start developing the back-end and realise that I've done the front wrong, and make life harder for myself. Coming from a background of <DIV's and CSS to style this is a lot different, and really want to get it right from the start. Essentially it's a one week calendar (7 days, Mon-Sunday, defaulting to the current week.) Which will eventually link up to a DB and if I have an appointment for something on this day it will show it in the relevant day. I've opted for a Grid rather than ListView because of the way it will work I will not be binding the results to a collection or anything along those lines. Rather I will be filling out a Combo box within the canvas for each day (yet to be placed in the code) for each event and on selection it will show me further details. XAML: <Window x:Class="WOW_Widget.Window1" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:Extensions="clr-namespace:WOW_Widget" DataContext="{Binding RelativeSource={RelativeSource Self}}" Title="Window1" Height="239" Width="831" <Window.Resources <LinearGradientBrush x:Key="NormalBrush" StartPoint="0,0" EndPoint="0,1" <GradientBrush.GradientStops <GradientStopCollection <GradientStop Offset="1.0" Color="White"/ <GradientStop Offset="0.0" Color="LightSlateGray"/ </GradientStopCollection </GradientBrush.GradientStops </LinearGradientBrush <LinearGradientBrush x:Key="grdDayHeader" StartPoint="0,0" EndPoint="0,1" <GradientBrush.GradientStops <GradientStopCollection <GradientStop Offset="0.0" Color="Peru" / <GradientStop Offset="1.0" Color="White" / </GradientStopCollection </GradientBrush.GradientStops </LinearGradientBrush <LinearGradientBrush x:Key="grdToday" StartPoint="0,0" EndPoint="0,1" <GradientBrush.GradientStops <GradientStopCollection <GradientStop Offset="0.0" Color="LimeGreen"/ <GradientStop Offset="1.0" Color="DarkGreen" / </GradientStopCollection </GradientBrush.GradientStops </LinearGradientBrush <Style TargetType="{x:Type GridViewColumnHeader}" <Setter Property="Background" Value="Khaki" / </Style <Style x:Key="DayHeader" TargetType="{x:Type Label}" <Setter Property="Background" Value="{StaticResource grdDayHeader}" / <Setter Property="Width" Value="111" / <Setter Property="Height" Value="25" / <Setter Property="HorizontalContentAlignment" Value="Center" / </Style <Style x:Key="DayField" <Setter Property="Canvas.Width" Value="111" / <Setter Property="Canvas.Height" Value="60" / <Setter Property="Canvas.Background" Value="White" / </Style <Style x:Key="Today" <Setter Property="Canvas.Background" Value="{StaticResource grdToday}" / </Style <Style x:Key="CalendarColSpacer" <Setter Property="Canvas.Width" Value="1" / <Setter Property="Canvas.Background" Value="Black" / </Style <Style x:Key="CalendarRowSpacer" <Setter Property="Canvas.Height" Value="1" / <Setter Property="Canvas.Background" Value="Black" / </Style </Window.Resources <Grid Background="{StaticResource NormalBrush}" <Border BorderBrush="Black" BorderThickness="1" Width="785" Height="86" Margin="12,12,12,104" <Canvas Height="86" Width="785" VerticalAlignment="Top" <Grid <Grid.ColumnDefinitions <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / <ColumnDefinition / </Grid.ColumnDefinitions <Grid.RowDefinitions <RowDefinition / <RowDefinition / <RowDefinition / </Grid.RowDefinitions <Label Grid.Column="0" Grid.Row="0" Content="Monday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="1" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="2" Grid.Row="0" Content="Tuesday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="3" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="4" Grid.Row="0" Content="Wednesday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="5" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="6" Grid.Row="0" Content="Thursday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="7" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="8" Grid.Row="0" Content="Friday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="9" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="10" Grid.Row="0" Content="Saturday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="11" Grid.RowSpan="3" Grid.Row="0" Style="{StaticResource CalendarColSpacer}" / <Label Grid.Column="12" Grid.Row="0" Content="Sunday" Style="{StaticResource DayHeader}" / <Canvas Grid.Column="0" Grid.ColumnSpan="13" Grid.Row="1" Style="{StaticResource CalendarRowSpacer}" / <Canvas Grid.Column="0" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblMondayDate" / </Canvas <Canvas Grid.Column="2" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblTuesdayDate" / </Canvas <Canvas Grid.Column="4" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblWednesdayDate" / </Canvas <Canvas Grid.Column="6" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblThursdayDate" / </Canvas <Canvas Grid.Column="8" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblFridayDate" / </Canvas <Canvas Grid.Column="10" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblSaturdayDate" / </Canvas <Canvas Grid.Column="12" Grid.Row="2" Margin="0" Style="{StaticResource DayField}" <Label Name="lblSundayDate" / </Canvas </Grid </Canvas </Border <Canvas Height="86" HorizontalAlignment="Right" Margin="0,0,12,12" Name="canvas1" VerticalAlignment="Bottom" Width="198"</Canvas </Grid </Window CS: public partial class Window1 : Window { private DateTime today = new DateTime(); private Label[] Dates = new Label[7]; public Window1() { DateTime start = today = DateTime.Now; int day = (int)today.DayOfWeek; while (day != 1) { start = start.Subtract(new TimeSpan(1, 0, 0, 0)); day--; } InitializeComponent(); Dates[0] = lblMondayDate; Dates[1] = lblTuesdayDate; Dates[2] = lblWednesdayDate; Dates[3] = lblThursdayDate; Dates[4] = lblFridayDate; Dates[5] = lblSaturdayDate; Dates[6] = lblSundayDate; FillWeek(start); } private void FillWeek(DateTime start) { for (int d = 0; d < Dates.Length; d++) { TimeSpan td = new TimeSpan(d, 0, 0, 0); DateTime _day = start.Add(td); if (_day.Date == today.Date) { Canvas dayCanvas = (Canvas)Dates[d].Parent; dayCanvas.Style = (Style)this.Resources["Today"]; } Dates[d].Content = (int)start.Add(td).Day; } } } Thanks for any tips you guys can give Psytronic

    Read the article

  • How do I install PHP with JSON and OAuth on Mac Snow Leopard?

    - by meilas
    I want to use the Dropbox API via this library, http://code.google.com/p/dropbox-php/. I installed MAMP, and then I tried sudo pecl install oauth but I got the following. >>>> downloading oauth-1.0.0.tgz ... Starting to download oauth-1.0.0.tgz (42,834 bytes) ............done: 42,834 bytes 6 source files, building running: phpize Configuring for: PHP Api Version: 20090626 Zend Module Api No: 20090626 Zend Extension Api No: 220090626 building in /var/tmp/pear-build-root/oauth-1.0.0 running: /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/configure checking for grep that handles long lines and -e... /usr/bin/grep checking for egrep... /usr/bin/grep -E checking for a sed that does not truncate output... /opt/local/bin/gsed checking for cc... cc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether cc accepts -g... yes checking for cc option to accept ISO C89... none needed checking how to run the C preprocessor... cc -E checking for icc... no checking for suncc... no checking whether cc understands -c and -o together... yes checking for system library directory... lib checking if compiler supports -R... no checking if compiler supports -Wl,-rpath,... yes checking build system type... i686-apple-darwin10.4.0 checking host system type... i686-apple-darwin10.4.0 checking target system type... i686-apple-darwin10.4.0 checking for PHP prefix... /usr checking for PHP includes... -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib checking for PHP extension directory... /usr/lib/php/extensions/no-debug-non-zts-20090626 checking for PHP installed headers prefix... /usr/include/php checking if debug is enabled... no checking if zts is enabled... no checking for re2c... no configure: WARNING: You will need re2c 0.13.4 or later if you want to regenerate PHP parsers. checking for gawk... gawk checking for oauth support... yes, shared checking for cURL in default path... found in /usr checking for ld used by cc... /usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld checking if the linker (/usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld) is GNU ld... no checking for /usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld option to reload object files... -r checking for BSD-compatible nm... /usr/bin/nm checking whether ln -s works... yes checking how to recognise dependent libraries... pass_all checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking dlfcn.h usability... yes checking dlfcn.h presence... yes checking for dlfcn.h... yes checking the maximum length of command line arguments... 196608 checking command to parse /usr/bin/nm output from cc object... rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory ok checking for objdir... .libs checking for ar... ar checking for ranlib... ranlib checking for strip... strip rm: conftest.dSYM: is a directory rm: conftest.dSYM: is a directory checking if cc static flag works... rm: conftest.dSYM: is a directory yes checking if cc supports -fno-rtti -fno-exceptions... rm: conftest.dSYM: is a directory no checking for cc option to produce PIC... -fno-common checking if cc PIC flag -fno-common works... rm: conftest.dSYM: is a directory yes checking if cc supports -c -o file.o... rm: conftest.dSYM: is a directory yes checking whether the cc linker (/usr/libexec/gcc/i686-apple-darwin10/4.2.1/ld) supports shared libraries... yes checking dynamic linker characteristics... darwin10.4.0 dyld checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... no >>>> creating libtool appending configuration tag "CXX" to libtool configure: creating ./config.status config.status: creating config.h running: make /bin/sh /private/var/tmp/pear-build-root/oauth-1.0.0/libtool --mode=compile cc -I. -I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth -DPHP_ATOM_INC -I/private/var/tmp/pear-build-root/oauth-1.0.0/include -I/private/var/tmp/pear-build-root/oauth-1.0.0/main -I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -Wall -g -c /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c -o oauth.lo mkdir .libs cc -I. "-I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth" -DPHP_ATOM_INC -I/private/var/tmp/pear-build-root/oauth-1.0.0/include -I/private/var/tmp/pear-build-root/oauth-1.0.0/main "-I/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth" -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -Wall -g -c "/private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c" -fno-common -DPIC -o .libs/oauth.o In file included from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/php_oauth.h:47, from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c:14: /usr/include/php/ext/pcre/php_pcre.h:29:18: error: pcre.h: No such file or directory In file included from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/php_oauth.h:47, from /private/var/tmp/apache_mod_php/apache_mod_php-53~1/Build/tmp/pear/temp/oauth/oauth.c:14: /usr/include/php/ext/pcre/php_pcre.h:37: error: expected '=', ',', ';', 'asm' or '__attribute__' before '*' token /usr/include/php/ext/pcre/php_pcre.h:38: error: expected '=', ',', ';', 'asm' or '__attribute__' before '*' token /usr/include/php/ext/pcre/php_pcre.h:44: error: expected specifier-qualifier-list before 'pcre' make: *** [oauth.lo] Error 1 ERROR: `make' failed </block>

    Read the article

  • Injection with google guice does not work anymore after obfuscation with proguard

    - by sme
    Has anyone ever tried to combine the use of google guice with obfuscation (in particular proguard)? The obfuscated version of my code does not work with google guice as guice complains about missing type parameters. This information seems to be erased by the transformation step that proguard does, even when the relevant classes are excluded from the obfuscation. The stack trace looks like this: com.google.inject.CreationException: Guice creation errors: 1) Cannot inject a Provider that has no type parameter while locating com.google.inject.Provider for parameter 0 at de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel.setPasswordPanelProvider(SourceFile:499) at de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel.setPasswordPanelProvider(SourceFile:499) while locating de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel for parameter 0 at de.repower.lvs.client.admin.user.administration.b.k.setParentPanel(SourceFile:65) at de.repower.lvs.client.admin.user.administration.b.k.setParentPanel(SourceFile:65) at de.repower.lvs.client.admin.user.administration.o.a(SourceFile:38) 2) Cannot inject a Provider that has no type parameter while locating com.google.inject.Provider for parameter 0 at de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel.setWindTurbineAccessGroupProvider(SourceFile:509) at de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel.setWindTurbineAccessGroupProvider(SourceFile:509) while locating de.repower.lvs.client.admin.user.administration.AdminUserCommonPanel for parameter 0 at de.repower.lvs.client.admin.user.administration.b.k.setParentPanel(SourceFile:65) at de.repower.lvs.client.admin.user.administration.b.k.setParentPanel(SourceFile:65) at de.repower.lvs.client.admin.user.administration.o.a(SourceFile:38) 2 errors at com.google.inject.internal.Errors.throwCreationExceptionIfErrorsExist(Errors.java:354) at com.google.inject.InjectorBuilder.initializeStatically(InjectorBuilder.java:152) at com.google.inject.InjectorBuilder.build(InjectorBuilder.java:105) at com.google.inject.Guice.createInjector(Guice.java:92) at com.google.inject.Guice.createInjector(Guice.java:69) at com.google.inject.Guice.createInjector(Guice.java:59) I tried to create a small example (without using guice) that seems to reproduce the problem: package de.repower.common; import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; class SomeClass<S> { } public class ParameterizedTypeTest { public void someMethod(SomeClass<Integer> param) { System.out.println("value: " + param); System.setProperty("my.dummmy.property", "hallo"); } private static void checkParameterizedMethod(ParameterizedTypeTest testObject) { System.out.println("checking parameterized method ..."); Method[] methods = testObject.getClass().getMethods(); for (Method method : methods) { if (method.getName().equals("someMethod")) { System.out.println("Found method " + method.getName()); Type[] types = method.getGenericParameterTypes(); Type parameterType = types[0]; if (parameterType instanceof ParameterizedType) { Type parameterizedType = ((ParameterizedType) parameterType).getActualTypeArguments()[0]; System.out.println("Parameter: " + parameterizedType); System.out.println("Class: " + ((Class) parameterizedType).getName()); } else { System.out.println("Failed: type ist not instance of ParameterizedType"); } } } } public static void main(String[] args) { System.out.println("Starting ..."); try { ParameterizedTypeTest someInstance = new ParameterizedTypeTest(); checkParameterizedMethod(someInstance); } catch (SecurityException e) { e.printStackTrace(); } } } If you run this code unsbfuscated, the output looks like this: Starting ... checking parameterized method ... Found method someMethod Parameter: class java.lang.Integer Class: java.lang.Integer But running the version obfuscated with proguard yields: Starting ... checking parameterized method ... Found method someMethod Failed: type ist not instance of ParameterizedType These are the options I used for obfuscation: -injars classes_eclipse\methodTest.jar -outjars classes_eclipse\methodTestObfuscated.jar -libraryjars 'C:\Program Files\Java\jre6\lib\rt.jar' -dontskipnonpubliclibraryclasses -dontskipnonpubliclibraryclassmembers -dontshrink -printusage classes_eclipse\shrink.txt -dontoptimize -dontpreverify -verbose -keep class **.ParameterizedTypeTest.class { <fields>; <methods>; } -keep class ** { <fields>; <methods>; } # Keep - Applications. Keep all application classes, along with their 'main' # methods. -keepclasseswithmembers public class * { public static void main(java.lang.String[]); } # Also keep - Enumerations. Keep the special static methods that are required in # enumeration classes. -keepclassmembers enum * { public static **[] values(); public static ** valueOf(java.lang.String); } # Also keep - Database drivers. Keep all implementations of java.sql.Driver. -keep class * extends java.sql.Driver # Also keep - Swing UI L&F. Keep all extensions of javax.swing.plaf.ComponentUI, # along with the special 'createUI' method. -keep class * extends javax.swing.plaf.ComponentUI { public static javax.swing.plaf.ComponentUI createUI(javax.swing.JComponent); } # Keep names - Native method names. Keep all native class/method names. -keepclasseswithmembers,allowshrinking class * { native <methods>; } # Keep names - _class method names. Keep all .class method names. This may be # useful for libraries that will be obfuscated again with different obfuscators. -keepclassmembers,allowshrinking class * { java.lang.Class class$(java.lang.String); java.lang.Class class$(java.lang.String,boolean); } Does anyone have an idea of how to solve this (apart from the obvious workaround to put the relevant files into a seperate jar and not obfuscate it)? Best regards, Stefan

    Read the article

  • Rails + Nginx + Unicorn multiple apps

    - by Mikhail Nikalyukin
    I get the server where is currently installed two apps and i need to add another one, here is my configs. nginx.conf user www-data www-data; worker_processes 4; pid /var/run/nginx.pid; events { worker_connections 768; # multi_accept on; } http { sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Disable unknown domains ## server { listen 80 default; server_name _; return 444; } ## # Virtual Host Configs ## include /home/ruby/apps/*/shared/config/nginx.conf; } unicorn.rb deploy_to = "/home/ruby/apps/staging.domain.com" rails_root = "#{deploy_to}/current" pid_file = "#{deploy_to}/shared/pids/unicorn.pid" socket_file= "#{deploy_to}/shared/sockets/.sock" log_file = "#{rails_root}/log/unicorn.log" err_log = "#{rails_root}/log/unicorn_error.log" old_pid = pid_file + '.oldbin' timeout 30 worker_processes 10 # ????? ???? ? ??????????? ?? ????????, ???????? ??????? ? ??????? ???? ???? listen socket_file, :backlog => 1024 pid pid_file stderr_path err_log stdout_path log_file preload_app true GC.copy_on_write_friendly = true if GC.respond_to?(:copy_on_write_friendly=) before_exec do |server| ENV["BUNDLE_GEMFILE"] = "#{rails_root}/Gemfile" end before_fork do |server, worker| defined?(ActiveRecord::Base) and ActiveRecord::Base.connection.disconnect! if File.exists?(old_pid) && server.pid != old_pid begin Process.kill("QUIT", File.read(old_pid).to_i) rescue Errno::ENOENT, Errno::ESRCH end end end after_fork do |server, worker| defined?(ActiveRecord::Base) and ActiveRecord::Base.establish_connection end Also im added capistrano to the project deploy.rb # encoding: utf-8 require 'capistrano/ext/multistage' require 'rvm/capistrano' require 'bundler/capistrano' set :stages, %w(staging production) set :default_stage, "staging" default_run_options[:pty] = true ssh_options[:paranoid] = false ssh_options[:forward_agent] = true set :scm, "git" set :user, "ruby" set :runner, "ruby" set :use_sudo, false set :deploy_via, :remote_cache set :rvm_ruby_string, '1.9.2' # Create uploads directory and link task :configure, :roles => :app do run "cp #{shared_path}/config/database.yml #{release_path}/config/database.yml" # run "ln -s #{shared_path}/db/sphinx #{release_path}/db/sphinx" # run "ln -s #{shared_path}/config/unicorn.rb #{release_path}/config/unicorn.rb" end namespace :deploy do task :restart do run "if [ -f #{unicorn_pid} ] && [ -e /proc/$(cat #{unicorn_pid}) ]; then kill -s USR2 `cat #{unicorn_pid}`; else cd #{deploy_to}/current && bundle exec unicorn_rails -c #{unicorn_conf} -E #{rails_env} -D; fi" end task :start do run "cd #{deploy_to}/current && bundle exec unicorn_rails -c #{unicorn_conf} -E #{rails_env} -D" end task :stop do run "if [ -f #{unicorn_pid} ] && [ -e /proc/$(cat #{unicorn_pid}) ]; then kill -QUIT `cat #{unicorn_pid}`; fi" end end before 'deploy:finalize_update', 'configure' after "deploy:update", "deploy:migrate", "deploy:cleanup" require './config/boot' nginx.conf in app shared path upstream staging_whotracker { server unix:/home/ruby/apps/staging.whotracker.com/shared/sockets/.sock; } server { listen 209.105.242.45; server_name beta.whotracker.com; rewrite ^/(.*) http://www.beta.whotracker.com/$1 permanent; } server { listen 209.105.242.45; server_name www.beta.hotracker.com; root /home/ruby/apps/staging.whotracker.com/current/public; location ~ ^/sitemaps/ { root /home/ruby/apps/staging.whotracker.com/current/system; if (!-f $request_filename) { break; } if (-f $request_filename) { expires -1; break; } } # cache static files :P location ~ ^/(images|javascripts|stylesheets)/ { root /home/ruby/apps/staging.whotracker.com/current/public; if ($query_string ~* "^[0-9a-zA-Z]{40}$") { expires max; break; } if (!-f $request_filename) { break; } } if ( -f /home/ruby/apps/staging.whotracker.com/shared/offline ) { return 503; } location /blog { index index.php index.html index.htm; try_files $uri $uri/ /blog/index.php?q=$uri; } location ~ \.php$ { try_files $uri =404; include /etc/nginx/fastcgi_params; fastcgi_pass unix:/var/run/php-fastcgi/php-fastcgi.socket; fastcgi_index index.php; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; } location / { proxy_set_header HTTP_REFERER $http_referer; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Host $http_host; proxy_redirect off; proxy_max_temp_file_size 0; # If the file exists as a static file serve it directly without # running all the other rewite tests on it if (-f $request_filename) { break; } if (!-f $request_filename) { proxy_pass http://staging_whotracker; break; } } error_page 502 =503 @maintenance; error_page 500 504 /500.html; error_page 503 @maintenance; location @maintenance { rewrite ^(.*)$ /503.html break; } } unicorn.log executing ["/home/ruby/apps/staging.whotracker.com/shared/bundle/ruby/1.9.1/bin/unicorn_rails", "-c", "/home/ruby/apps/staging.whotracker.com/current/config/unicorn.rb", "-E", "staging", "-D", {5=>#<Kgio::UNIXServer:/home/ruby/apps/staging.whotracker.com/shared/sockets/.sock>}] (in /home/ruby/apps/staging.whotracker.com/releases/20120517114413) I, [2012-05-17T06:43:48.111717 #14636] INFO -- : inherited addr=/home/ruby/apps/staging.whotracker.com/shared/sockets/.sock fd=5 I, [2012-05-17T06:43:48.111938 #14636] INFO -- : Refreshing Gem list worker=0 ready ... master process ready ... reaped #<Process::Status: pid 2590 exit 0> worker=6 ... master complete Deploy goes successfully, but when i try to access beta.whotracker.com or ip-address i get SERVER NOT FOUND error, while others app works great. Nothing shows up in error logs. Can you please point me where is my fault?

    Read the article

  • Root certificate authority works windows/linux but not mac osx - (malformed)

    - by AKwhat
    I have created a self-signed root certificate authority which if I install onto windows, linux, or even using the certificate store in firefox (windows/linux/macosx) will work perfectly with my terminating proxy. I have installed it into the system keychain and I have set the certificate to always trust. Within the chrome browser details it says "The certificate that Chrome received during this connection attempt is not formatted correctly, so Chrome cannot use it to protect your information. Error type: Malformed certificate" I used this code to create the certificate: openssl genrsa -des3 -passout pass:***** -out private/server.key 4096 openssl req -batch -passin pass:***** -new -x509 -nodes -sha1 -days 3600 -key private/server.key -out server.crt -config ../openssl.cnf If the issue is NOT that it is malformed (because it works everywhere else) then what else could it be? Am I installing it incorrectly? To be clear: Within the windows/linux OS, all browsers work perfectly. Within mac only firefox works if it uses its internal certificate store and not the keychain. It's the keychain method of importing a certificate that causes the issue. Thus, all browsers using the keychain will not work. Root CA Cert: -----BEGIN CERTIFICATE----- **some base64 stuff** -----END CERTIFICATE----- Intermediate CA Cert: Certificate: Data: Version: 3 (0x2) Serial Number: 1 (0x1) Signature Algorithm: sha1WithRSAEncryption Issuer: C=*****, ST=*******, L=******, O=*******, CN=******/emailAddress=****** Validity Not Before: May 21 13:57:32 2014 GMT Not After : Jun 20 13:57:32 2014 GMT Subject: C=*****, ST=********, O=*******, CN=*******/emailAddress=******* Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public Key: (4096 bit) Modulus (4096 bit): 00:e7:2d:75:38:23:02:8e:b9:8d:2f:33:4c:2a:11: 6d:d4:f8:29:ab:f3:fc:12:00:0f:bb:34:ec:35:ed: a5:38:10:1e:f3:54:c2:69:ae:3b:22:c0:0d:00:97: 08:da:b9:c9:32:c0:c6:b1:8b:22:7e:53:ea:69:e2: 6d:0f:bd:f5:96:b2:d0:0d:b2:db:07:ba:f1:ce:53: 8a:5e:e0:22:ce:3e:36:ed:51:63:21:e7:45:ad:f9: 4d:9b:8f:7f:33:4c:ed:fc:a6:ac:16:70:f5:96:36: 37:c8:65:47:d1:d3:12:70:3e:8d:2f:fb:9f:94:e0: c9:5f:d0:8c:30:e0:04:23:38:22:e5:d9:84:15:b8: 31:e7:a7:28:51:b8:7f:01:49:fb:88:e9:6c:93:0e: 63:eb:66:2b:b4:a0:f0:31:33:8b:b4:04:84:1f:9e: d5:ed:23:cc:bf:9b:8e:be:9a:5c:03:d6:4f:1a:6f: 2d:8f:47:60:6c:89:c5:f0:06:df:ac:cb:26:f8:1a: 48:52:5e:51:a0:47:6a:30:e8:bc:88:8b:fd:bb:6b: c9:03:db:c2:46:86:c0:c5:a5:45:5b:a9:a3:61:35: 37:e9:fc:a1:7b:ae:71:3a:5c:9c:52:84:dd:b2:86: b3:2e:2e:7a:5b:e1:40:34:4a:46:f0:f8:43:26:58: 30:87:f9:c6:c9:bc:b4:73:8b:fc:08:13:33:cc:d0: b7:8a:31:e9:38:a3:a9:cc:01:e2:d4:c2:a5:c1:55: 52:72:52:2b:06:a3:36:30:0c:5c:29:1a:dd:14:93: 2b:9d:bf:ac:c1:2d:cd:3f:89:1f:bc:ad:a4:f2:bd: 81:77:a9:f4:f0:b9:50:9e:fb:f5:da:ee:4e:b7:66: e5:ab:d1:00:74:29:6f:01:28:32:ea:7d:3f:b3:d7: 97:f2:60:63:41:0f:30:6a:aa:74:f4:63:4f:26:7b: 71:ed:57:f1:d4:99:72:61:f4:69:ad:31:82:76:67: 21:e1:32:2f:e8:46:d3:28:61:b1:10:df:4c:02:e5: d3:cc:22:30:a4:bb:81:10:dc:7d:49:94:b2:02:2d: 96:7f:e5:61:fa:6b:bd:22:21:55:97:82:18:4e:b5: a0:67:2b:57:93:1c:ef:e5:d2:fb:52:79:95:13:11: 20:06:8c:fb:e7:0b:fd:96:08:eb:17:e6:5b:b5:a0: 8d:dd:22:63:99:af:ad:ce:8c:76:14:9a:31:55:d7: 95:ea:ff:10:6f:7c:9c:21:00:5e:be:df:b0:87:75: 5d:a6:87:ca:18:94:e7:6a:15:fe:27:dd:28:5e:c0: ad:d2:91:d3:2d:8e:c3:c0:9f:fb:ff:c0:36:7e:e2: d7:bc:41 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Alternative Name: DNS:localhost, DNS:dropbox.com, DNS:*.dropbox.com, DNS:filedropper.com, DNS:*.filedropper.com X509v3 Subject Key Identifier: F3:E5:38:5B:3C:AF:1C:73:C1:4C:7D:8B:C8:A1:03:82:65:0D:FF:45 X509v3 Authority Key Identifier: keyid:2B:37:39:7B:9F:45:14:FE:F8:BC:CA:E0:6E:B4:5F:D6:1A:2B:D7:B0 DirName:/C=****/ST=******/L=*******/O=*******/CN=******/emailAddress=******* serial:EE:8C:A3:B4:40:90:B0:62 X509v3 Basic Constraints: CA:TRUE Signature Algorithm: sha1WithRSAEncryption 46:2a:2c:e0:66:e3:fa:c6:80:b6:81:e7:db:c3:29:ab:e7:1c: f0:d9:a0:b7:a9:57:8c:81:3e:30:8f:7d:ef:f7:ed:3c:5f:1e: a5:f6:ae:09:ab:5e:63:b4:f6:d6:b6:ac:1c:a0:ec:10:19:ce: dd:5a:62:06:b4:88:5a:57:26:81:8e:38:b9:0f:26:cd:d9:36: 83:52:ec:df:f4:63:ce:a1:ba:d4:1c:ec:b6:66:ed:f0:32:0e: 25:87:79:fa:95:ee:0f:a0:c6:2d:8f:e9:fb:11:de:cf:26:fa: 59:fa:bd:0b:74:76:a6:5d:41:0d:cd:35:4e:ca:80:58:2a:a8: 5d:e4:d8:cf:ef:92:8d:52:f9:f2:bf:65:50:da:a8:10:1b:5e: 50:a7:7e:57:7b:94:7f:5c:74:2e:80:ae:1e:24:5f:0b:7b:7e: 19:b6:b5:bd:9d:46:5a:e8:47:43:aa:51:b3:4b:3f:12:df:7f: ef:65:21:85:c2:f6:83:84:d0:8d:8b:d9:6d:a8:f9:11:d4:65: 7d:8f:28:22:3c:34:bb:99:4e:14:89:45:a4:62:ed:52:b1:64: 9a:fd:08:cd:ff:ca:9e:3b:51:81:33:e6:37:aa:cb:76:01:90: d1:39:6f:6a:8b:2d:f5:07:f8:f4:2a:ce:01:37:ba:4b:7f:d4: 62:d7:d6:66:b8:78:ad:0b:23:b6:2e:b0:9a:fc:0f:8c:4c:29: 86:a0:bc:33:71:e5:7f:aa:3e:0e:ca:02:e1:f6:88:f0:ff:a2: 04:5a:f5:d7:fe:7d:49:0a:d2:63:9c:24:ed:02:c7:4d:63:e6: 0c:e1:04:cd:a4:bf:a8:31:d3:10:db:b4:71:48:f7:1a:1b:d9: eb:a7:2e:26:00:38:bd:a8:96:b4:83:09:c9:3d:79:90:e1:61: 2c:fc:a0:2c:6b:7d:46:a8:d7:17:7f:ae:60:79:c1:b6:5c:f9: 3c:84:64:7b:7f:db:e9:f1:55:04:6e:b5:d3:5e:d3:e3:13:29: 3f:0b:03:f2:d7:a8:30:02:e1:12:f4:ae:61:6f:f5:4b:e9:ed: 1d:33:af:cd:9b:43:42:35:1a:d4:f6:b9:fb:bf:c9:8d:6c:30: 25:33:43:49:32:43:a5:a8:d8:82:ef:b0:a6:bd:8b:fb:b6:ed: 72:fd:9a:8f:00:3b:97:a3:35:a4:ad:26:2f:a9:7d:74:08:82: 26:71:40:f9:9b:01:14:2e:82:fb:2f:c0:11:51:00:51:07:f9: e1:f6:1f:13:6e:03:ee:d7:85:c2:64:ce:54:3f:15:d4:d7:92: 5f:87:aa:1e:b4:df:51:77:12:04:d2:a5:59:b3:26:87:79:ce: ee:be:60:4e:87:20:5c:7f -----BEGIN CERTIFICATE----- **some base64 stuff** -----END CERTIFICATE-----

    Read the article

  • Enterprise Manager will not start on WebLogic after ADF install

    - by retrodev
    I just built a WebLogic 10.3.6 cluster with EM and JRF checked in the domain extensions. Next I installed ADR 11.1.1.7 by first installing ADR 11.1.1.6, then patching the environment and running upgradeADF in wlst. All seems well except I cannot start EM. The application transitions to STATE_ADMIN, but then fails with the exception below. Any advice would be appreciated. <[ACTIVE] ExecuteThread: '6' for queue: 'weblogic.kernel.Default (self-tuning)' < < <1372081430346 java.lang.RuntimeException: com.sun.faces.config.ConfigurationException: CONFIGURATION FAILED! null at com.sun.faces.config.ConfigureListener.contextInitialized(ConfigureListener.java:293) at weblogic.servlet.internal.EventsManager$FireContextListenerAction.run(EventsManager.java:481) at weblogic.security.acl.internal.AuthenticatedSubject.doAs(AuthenticatedSubject.java:321) at weblogic.security.service.SecurityManager.runAs(SecurityManager.java:120) at weblogic.servlet.internal.EventsManager.notifyContextCreatedEvent(EventsManager.java:181) at weblogic.servlet.internal.WebAppServletContext.preloadResources(WebAppServletContext.java:1870) at weblogic.servlet.internal.WebAppServletContext.start(WebAppServletContext.java:3155) at weblogic.servlet.internal.WebAppModule.startContexts(WebAppModule.java:1518) at weblogic.servlet.internal.WebAppModule.start(WebAppModule.java:487) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.ScopedModuleDriver.start(ScopedModuleDriver.java:201) at weblogic.application.internal.flow.ModuleListenerInvoker.start(ModuleListenerInvoker.java:249) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.StartModulesFlow.activate(StartModulesFlow.java:28) at weblogic.application.internal.BaseDeployment$2.next(BaseDeployment.java:672) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.BaseDeployment.activate(BaseDeployment.java:212) at weblogic.application.internal.EarDeployment.activate(EarDeployment.java:59) at weblogic.application.internal.DeploymentStateChecker.activate(DeploymentStateChecker.java:161) at weblogic.deploy.internal.targetserver.AppContainerInvoker.activate(AppContainerInvoker.java:79) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.activate(AbstractOperation.java:569) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.activateDeployment(ActivateOperation.java:150) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.doCommit(ActivateOperation.java:116) at weblogic.deploy.internal.targetserver.operations.StartOperation.doCommit(StartOperation.java:149) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.commit(AbstractOperation.java:323) at weblogic.deploy.internal.targetserver.DeploymentManager.handleDeploymentCommit(DeploymentManager.java:844) at weblogic.deploy.internal.targetserver.DeploymentManager.activateDeploymentList(DeploymentManager.java:1249) at weblogic.deploy.internal.targetserver.DeploymentManager.handleCommit(DeploymentManager.java:440) at weblogic.deploy.internal.targetserver.DeploymentServiceDispatcher.commit(DeploymentServiceDispatcher.java:164) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.doCommitCallback(DeploymentReceiverCallbackDeliverer.java:195) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.access$100(DeploymentReceiverCallbackDeliverer.java:13) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer$2.run(DeploymentReceiverCallbackDeliverer.java:69) at weblogic.work.SelfTuningWorkManagerImpl$WorkAdapterImpl.run(SelfTuningWorkManagerImpl.java:545) at weblogic.work.ExecuteThread.execute(ExecuteThread.java:256) at weblogic.work.ExecuteThread.run(ExecuteThread.java:221) Caused By: com.sun.faces.config.ConfigurationException: CONFIGURATION FAILED! null at com.sun.faces.config.ConfigManager.initialize(ConfigManager.java:357) at com.sun.faces.config.ConfigureListener.contextInitialized(ConfigureListener.java:227) at weblogic.servlet.internal.EventsManager$FireContextListenerAction.run(EventsManager.java:481) at weblogic.security.acl.internal.AuthenticatedSubject.doAs(AuthenticatedSubject.java:321) at weblogic.security.service.SecurityManager.runAs(SecurityManager.java:120) at weblogic.servlet.internal.EventsManager.notifyContextCreatedEvent(EventsManager.java:181) at weblogic.servlet.internal.WebAppServletContext.preloadResources(WebAppServletContext.java:1870) at weblogic.servlet.internal.WebAppServletContext.start(WebAppServletContext.java:3155) at weblogic.servlet.internal.WebAppModule.startContexts(WebAppModule.java:1518) at weblogic.servlet.internal.WebAppModule.start(WebAppModule.java:487) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.ScopedModuleDriver.start(ScopedModuleDriver.java:201) at weblogic.application.internal.flow.ModuleListenerInvoker.start(ModuleListenerInvoker.java:249) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.StartModulesFlow.activate(StartModulesFlow.java:28) at weblogic.application.internal.BaseDeployment$2.next(BaseDeployment.java:672) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.BaseDeployment.activate(BaseDeployment.java:212) at weblogic.application.internal.EarDeployment.activate(EarDeployment.java:59) at weblogic.application.internal.DeploymentStateChecker.activate(DeploymentStateChecker.java:161) at weblogic.deploy.internal.targetserver.AppContainerInvoker.activate(AppContainerInvoker.java:79) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.activate(AbstractOperation.java:569) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.activateDeployment(ActivateOperation.java:150) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.doCommit(ActivateOperation.java:116) at weblogic.deploy.internal.targetserver.operations.StartOperation.doCommit(StartOperation.java:149) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.commit(AbstractOperation.java:323) at weblogic.deploy.internal.targetserver.DeploymentManager.handleDeploymentCommit(DeploymentManager.java:844) at weblogic.deploy.internal.targetserver.DeploymentManager.activateDeploymentList(DeploymentManager.java:1249) at weblogic.deploy.internal.targetserver.DeploymentManager.handleCommit(DeploymentManager.java:440) at weblogic.deploy.internal.targetserver.DeploymentServiceDispatcher.commit(DeploymentServiceDispatcher.java:164) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.doCommitCallback(DeploymentReceiverCallbackDeliverer.java:195) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.access$100(DeploymentReceiverCallbackDeliverer.java:13) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer$2.run(DeploymentReceiverCallbackDeliverer.java:69) at weblogic.work.SelfTuningWorkManagerImpl$WorkAdapterImpl.run(SelfTuningWorkManagerImpl.java:545) at weblogic.work.ExecuteThread.execute(ExecuteThread.java:256) at weblogic.work.ExecuteThread.run(ExecuteThread.java:221) Caused By: java.lang.NullPointerException at oracle.adfinternal.view.faces.unified.renderkit.UnifiedRenderKit.(UnifiedRenderKit.java:129) at oracle.adfinternal.view.faces.unified.renderkit.UnifiedRenderKit.createRenderKit(UnifiedRenderKit.java:111) at oracle.adfinternal.view.faces.unified.renderkit.UnifiedRenderKitFactory.getRenderKit(UnifiedRenderKitFactory.java:59) at org.apache.myfaces.trinidadinternal.renderkit.CoreRenderKitFactory.getRenderKit(CoreRenderKitFactory.java:55) at com.sun.faces.config.processor.RenderKitConfigProcessor.addRenderKits(RenderKitConfigProcessor.java:240) at com.sun.faces.config.processor.RenderKitConfigProcessor.process(RenderKitConfigProcessor.java:159) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.ManagedBeanConfigProcessor.process(ManagedBeanConfigProcessor.java:270) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.ValidatorConfigProcessor.process(ValidatorConfigProcessor.java:120) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.ConverterConfigProcessor.process(ConverterConfigProcessor.java:126) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.ComponentConfigProcessor.process(ComponentConfigProcessor.java:117) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.ApplicationConfigProcessor.process(ApplicationConfigProcessor.java:341) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.LifecycleConfigProcessor.process(LifecycleConfigProcessor.java:116) at com.sun.faces.config.processor.AbstractConfigProcessor.invokeNext(AbstractConfigProcessor.java:114) at com.sun.faces.config.processor.FactoryConfigProcessor.process(FactoryConfigProcessor.java:216) at com.sun.faces.config.ConfigManager.initialize(ConfigManager.java:338) at com.sun.faces.config.ConfigureListener.contextInitialized(ConfigureListener.java:227) at weblogic.servlet.internal.EventsManager$FireContextListenerAction.run(EventsManager.java:481) at weblogic.security.acl.internal.AuthenticatedSubject.doAs(AuthenticatedSubject.java:321) at weblogic.security.service.SecurityManager.runAs(SecurityManager.java:120) at weblogic.servlet.internal.EventsManager.notifyContextCreatedEvent(EventsManager.java:181) at weblogic.servlet.internal.WebAppServletContext.preloadResources(WebAppServletContext.java:1870) at weblogic.servlet.internal.WebAppServletContext.start(WebAppServletContext.java:3155) at weblogic.servlet.internal.WebAppModule.startContexts(WebAppModule.java:1518) at weblogic.servlet.internal.WebAppModule.start(WebAppModule.java:487) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.ScopedModuleDriver.start(ScopedModuleDriver.java:201) at weblogic.application.internal.flow.ModuleListenerInvoker.start(ModuleListenerInvoker.java:249) at weblogic.application.internal.flow.ModuleStateDriver$3.next(ModuleStateDriver.java:427) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.flow.ModuleStateDriver.start(ModuleStateDriver.java:119) at weblogic.application.internal.flow.StartModulesFlow.activate(StartModulesFlow.java:28) at weblogic.application.internal.BaseDeployment$2.next(BaseDeployment.java:672) at weblogic.application.utils.StateMachineDriver.nextState(StateMachineDriver.java:52) at weblogic.application.internal.BaseDeployment.activate(BaseDeployment.java:212) at weblogic.application.internal.EarDeployment.activate(EarDeployment.java:59) at weblogic.application.internal.DeploymentStateChecker.activate(DeploymentStateChecker.java:161) at weblogic.deploy.internal.targetserver.AppContainerInvoker.activate(AppContainerInvoker.java:79) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.activate(AbstractOperation.java:569) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.activateDeployment(ActivateOperation.java:150) at weblogic.deploy.internal.targetserver.operations.ActivateOperation.doCommit(ActivateOperation.java:116) at weblogic.deploy.internal.targetserver.operations.StartOperation.doCommit(StartOperation.java:149) at weblogic.deploy.internal.targetserver.operations.AbstractOperation.commit(AbstractOperation.java:323) at weblogic.deploy.internal.targetserver.DeploymentManager.handleDeploymentCommit(DeploymentManager.java:844) at weblogic.deploy.internal.targetserver.DeploymentManager.activateDeploymentList(DeploymentManager.java:1249) at weblogic.deploy.internal.targetserver.DeploymentManager.handleCommit(DeploymentManager.java:440) at weblogic.deploy.internal.targetserver.DeploymentServiceDispatcher.commit(DeploymentServiceDispatcher.java:164) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.doCommitCallback(DeploymentReceiverCallbackDeliverer.java:195) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer.access$100(DeploymentReceiverCallbackDeliverer.java:13) at weblogic.deploy.service.internal.targetserver.DeploymentReceiverCallbackDeliverer$2.run(DeploymentReceiverCallbackDeliverer.java:69) at weblogic.work.SelfTuningWorkManagerImpl$WorkAdapterImpl.run(SelfTuningWorkManagerImpl.java:545) at weblogic.work.ExecuteThread.execute(ExecuteThread.java:256) at weblogic.work.ExecuteThread.run(ExecuteThread.java:221)

    Read the article

  • solved: puppet master REST API returns 403 when running under passenger works when master runs from command line

    - by Anadi Misra
    I am using the standard auth.conf provided in puppet install for the puppet master which is running through passenger under Nginx. However for most of the catalog, files and certitifcate request I get a 403 response. ### Authenticated paths - these apply only when the client ### has a valid certificate and is thus authenticated # allow nodes to retrieve their own catalog path ~ ^/catalog/([^/]+)$ method find allow $1 # allow nodes to retrieve their own node definition path ~ ^/node/([^/]+)$ method find allow $1 # allow all nodes to access the certificates services path ~ ^/certificate_revocation_list/ca method find allow * # allow all nodes to store their reports path /report method save allow * # unconditionally allow access to all file services # which means in practice that fileserver.conf will # still be used path /file allow * ### Unauthenticated ACL, for clients for which the current master doesn't ### have a valid certificate; we allow authenticated users, too, because ### there isn't a great harm in letting that request through. # allow access to the master CA path /certificate/ca auth any method find allow * path /certificate/ auth any method find allow * path /certificate_request auth any method find, save allow * path /facts auth any method find, search allow * # this one is not stricly necessary, but it has the merit # of showing the default policy, which is deny everything else path / auth any Puppet master however does not seems to be following this as I get this error on client [amisr1@blramisr195602 ~]$ sudo puppet agent --no-daemonize --verbose --server bangvmpllda02.XXXXX.com [sudo] password for amisr1: Starting Puppet client version 3.0.1 Warning: Unable to fetch my node definition, but the agent run will continue: Warning: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /certificate_revocation_list/ca [find] at :110 Info: Retrieving plugin Error: /File[/var/lib/puppet/lib]: Failed to generate additional resources using 'eval_generate: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /file_metadata/plugins [search] at :110 Error: /File[/var/lib/puppet/lib]: Could not evaluate: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /file_metadata/plugins [find] at :110 Could not retrieve file metadata for puppet://devops.XXXXX.com/plugins: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /file_metadata/plugins [find] at :110 Error: Could not retrieve catalog from remote server: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /catalog/blramisr195602.XXXXX.com [find] at :110 Using cached catalog Error: Could not retrieve catalog; skipping run Error: Could not send report: Error 403 on SERVER: Forbidden request: XX.XXX.XX.XX(XX.XXX.XX.XX) access to /report/blramisr195602.XXXXX.com [save] at :110 and the server logs show XX.XXX.XX.XX - - [10/Dec/2012:14:46:52 +0530] "GET /production/certificate_revocation_list/ca? HTTP/1.1" 403 102 "-" "Ruby" XX.XXX.XX.XX - - [10/Dec/2012:14:46:52 +0530] "GET /production/file_metadatas/plugins?links=manage&recurse=true&&ignore=---+%0A++-+%22.svn%22%0A++-+CVS%0A++-+%22.git%22&checksum_type=md5 HTTP/1.1" 403 95 "-" "Ruby" XX.XXX.XX.XX - - [10/Dec/2012:14:46:52 +0530] "GET /production/file_metadata/plugins? HTTP/1.1" 403 93 "-" "Ruby" XX.XXX.XX.XX - - [10/Dec/2012:14:46:53 +0530] "POST /production/catalog/blramisr195602.XXXXX.com HTTP/1.1" 403 106 "-" "Ruby" XX.XXX.XX.XX - - [10/Dec/2012:14:46:53 +0530] "PUT /production/report/blramisr195602.XXXXX.com HTTP/1.1" 403 105 "-" "Ruby" thefile server conf file is as follows (and goin by what they say on puppet site, It is better to regulate access in auth.conf for reaching file server and then allow file server to server all) [files] path /apps/puppet/files allow * [private] path /apps/puppet/private/%H allow * [modules] allow * I am using server and client version 3 Nginx has been compiled using the following options nginx version: nginx/1.3.9 built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) TLS SNI support enabled configure arguments: --prefix=/apps/nginx --conf-path=/apps/nginx/nginx.conf --pid-path=/apps/nginx/run/nginx.pid --error-log-path=/apps/nginx/logs/error.log --http-log-path=/apps/nginx/logs/access.log --with-http_ssl_module --with-http_gzip_static_module --add-module=/usr/lib/ruby/gems/1.8/gems/passenger-3.0.18/ext/nginx --add-module=/apps/Downloads/nginx/nginx-auth-ldap-master/ and the standard nginx puppet master conf server { ssl on; listen 8140 ssl; server_name _; passenger_enabled on; passenger_set_cgi_param HTTP_X_CLIENT_DN $ssl_client_s_dn; passenger_set_cgi_param HTTP_X_CLIENT_VERIFY $ssl_client_verify; passenger_min_instances 5; access_log logs/puppet_access.log; error_log logs/puppet_error.log; root /apps/nginx/html/rack/public; ssl_certificate /var/lib/puppet/ssl/certs/bangvmpllda02.XXXXXX.com.pem; ssl_certificate_key /var/lib/puppet/ssl/private_keys/bangvmpllda02.XXXXXX.com.pem; ssl_crl /var/lib/puppet/ssl/ca/ca_crl.pem; ssl_client_certificate /var/lib/puppet/ssl/certs/ca.pem; ssl_ciphers SSLv2:-LOW:-EXPORT:RC4+RSA; ssl_prefer_server_ciphers on; ssl_verify_client optional; ssl_verify_depth 1; ssl_session_cache shared:SSL:128m; ssl_session_timeout 5m; } Puppet is picking up the correct settings from the files mentioned because config print command points to /etc/puppet [amisr1@bangvmpllDA02 puppet]$ sudo puppet config print | grep conf async_storeconfigs = false authconfig = /etc/puppet/namespaceauth.conf autosign = /etc/puppet/autosign.conf catalog_cache_terminus = store_configs confdir = /etc/puppet config = /etc/puppet/puppet.conf config_file_name = puppet.conf config_version = "" configprint = all configtimeout = 120 dblocation = /var/lib/puppet/state/clientconfigs.sqlite3 deviceconfig = /etc/puppet/device.conf fileserverconfig = /etc/puppet/fileserver.conf genconfig = false hiera_config = /etc/puppet/hiera.yaml localconfig = /var/lib/puppet/state/localconfig name = config rest_authconfig = /etc/puppet/auth.conf storeconfigs = true storeconfigs_backend = puppetdb tagmap = /etc/puppet/tagmail.conf thin_storeconfigs = false I checked the firewall rules on this VM; 80, 443, 8140, 3000 are allowed. Do I still have to tweak any specifics to auth.conf for getting this to work? Update I added verbose logging to the puppet master and restarted nginx; here's the additional info I see in logs Mon Dec 10 18:19:15 +0530 2012 Puppet (err): Could not resolve 10.209.47.31: no name for 10.209.47.31 Mon Dec 10 18:19:15 +0530 2012 access[/] (info): defaulting to no access for 10.209.47.31 Mon Dec 10 18:19:15 +0530 2012 Puppet (warning): Denying access: Forbidden request: 10.209.47.31(10.209.47.31) access to /file_metadata/plugins [find] at :111 Mon Dec 10 18:19:15 +0530 2012 Puppet (err): Forbidden request: 10.209.47.31(10.209.47.31) access to /file_metadata/plugins [find] at :111 10.209.47.31 - - [10/Dec/2012:18:19:15 +0530] "GET /production/file_metadata/plugins? HTTP/1.1" 403 93 "-" "Ruby" On the agent machine facter fqdn and hostname both return a fully qualified host name [amisr1@blramisr195602 ~]$ sudo facter fqdn blramisr195602.XXXXXXX.com I then updated the agent configuration to add dns_alt_names = 10.209.47.31 cleaned all certificates on master and agent and regenerated the certificates and signed them on master using the option --allow-dns-alt-names [amisr1@bangvmpllDA02 ~]$ sudo puppet cert sign blramisr195602.XXXXXX.com Error: CSR 'blramisr195602.XXXXXX.com' contains subject alternative names (DNS:10.209.47.31, DNS:blramisr195602.XXXXXX.com), which are disallowed. Use `puppet cert --allow-dns-alt-names sign blramisr195602.XXXXXX.com` to sign this request. [amisr1@bangvmpllDA02 ~]$ sudo puppet cert --allow-dns-alt-names sign blramisr195602.XXXXXX.com Signed certificate request for blramisr195602.XXXXXX.com Removing file Puppet::SSL::CertificateRequest blramisr195602.XXXXXX.com at '/var/lib/puppet/ssl/ca/requests/blramisr195602.XXXXXX.com.pem' however, that doesn't help either; I get same errors as before. Not sure why in the logs it shows comparing access rules by IP and not hostname. Is there any Nginx configuration to change this behavior?

    Read the article

  • I never really understood: what is CGI?

    - by claws
    CGI is a Comman Gateway Interface. As the name says, it is a "common" gateway interface for everything. It is so trivial and naive from the name. I feel that I understood this and I felt this every time I encountered this word. But frankly, I didn't. I'm still confused. I am a PHP programmer. I did lot of web development. user (client) request for page --- webserver(-embedded PHP interpreter) ---- Server side(PHP) Script --- MySQL Server. Now say my PHP Script can fetch results from MySQL Server && MATLAB Server && Some other server. So, now PHP Script is the CGI? because its interface for the between webserver & All other servers? I don't know. Sometimes they call CGI, a technology & othertimes they call CGI a program or someother server. What exactly is CGI? Whats the big deal with /cgi-bin/*.cgi? Whats up with this? I don't know what is this cgi-bin directory on the server for. I don't know why they have *.cgi extensions. Why does Perl always comes in the way. CGI & Perl (language). I also don't know whats up with these two. Almost all the time I keep hearing these two in combination "CGI & Perl". This book is another great example CGI Programming with Perl Why not "CGI Programming with PHP/JSP/ASP". I never saw such things. CGI Programming in C this confuses me a lot. in C?? Seriously?? I don't know what to say. I"m just confused. "in C"?? This changes everything. Program needs to be compiled and executed. This entirely changes my view of web programming. When do I compile? How does the program gets executed (because it will be a machine code, so it must execute as a independent process). How does it communicate with the web server? IPC? and interfacing with all the servers (in my example MATLAB & MySQL) using socket programming? I'm lost!! They say that CGI is depreciated. Its no more in use. Is it so? What is its latest update? Once, I ran into a situation where I had to give HTTP PUT request access to web server (Apache HTTPD). Its a long back. So, as far as I remember this is what I did: Edited the configuration file of Apache HTTPD to tell webserver to pass all HTTP PUT requests to some put.php ( I had to write this PHP script) Implement put.php to handle the request (save the file to the location mentioned) People said that I wrote a CGI Script. Seriously, I didn't have clue what they were talking about. Did I really write CGI Script? I hope you understood what my confusion is. (Because I myself don't know where I'm confused). I request you guys to keep your answer as simple as possible. I really can't understand any fancy technical terminology. At least not in this case. EDIT: I found this amazing tutorial "CGI Programming Is Simple!" - CGI Tutorial Which explains the concepts in simplest possible way. I've only have one complaint about this tutorial. Just to make what ever he explained complete he should have shown the C code he used for generating response for those GET / POST requests. I've also added link to this tutorial to Wikipedia's article : http://en.wikipedia.org/wiki/Common_Gateway_Interface

    Read the article

  • Can't connect to samba

    - by Rick
    Windows 7, connecting to Samba shares I have a follow up question from the link above. I am running Samba 3.0.23d on FreeBSD is release 7.1 I changed the policies as described above but still cannot connect to the samba server with the windows 7 or a server 2008. I feel it is a problem with recognizing the new machines on the network. the windows machines can see the samba server, but cannot connect to it or view any of the files. After changing the security policies the samba server asked for network id and password but would not allow the machine to connect, said they were unknown username or bad password. Here is my current config file. there is no sign of encryption anywhere, should I just add the line? not sure what that would do elsewhere. Workgroup = WWOFFSET server string = WWO File Server (%v) security = server username map = /usr/local/etc/smb.users hosts allow = 10. 127. # If you want to automatically load your printer list rather # than setting them up individually then you'll need this ; load printers = yes # you may wish to override the location of the printcap file ; printcap name = /etc/printcap # on SystemV system setting printcap name to lpstat should allow # you to automatically obtain a printer list from the SystemV spool # system ; printcap name = lpstat # It should not be necessary to specify the print system type unless # it is non-standard. Currently supported print systems include: # bsd, cups, sysv, plp, lprng, aix, hpux, qnx ; printing = cups # Uncomment this if you want a guest account, you must add this to /etc/passwd # otherwise the user "nobody" is used ; guest account = pcguest # this tells Samba to use a separate log file for each machine # that connects log file = /var/log/samba/log.%m # Put a capping on the size of the log files (in Kb). max log size = 50 # Use password server option only with security = server # The argument list may include: # password server = My_PDC_Name [My_BDC_Name] [My_Next_BDC_Name] # or to auto-locate the domain controller/s # password server = * ; password server = <NT-Server-Name> password server = SERVER0 # Use the realm option only with security = ads # Specifies the Active Directory realm the host is part of ; realm = MY_REALM # Backend to store user information in. New installations should # use either tdbsam or ldapsam. smbpasswd is available for backwards # compatibility. tdbsam requires no further configuration. ; passdb backend = tdbsam ; passdb backend = smbpasswd # Using the following line enables you to customise your configuration # on a per machine basis. The %m gets replaced with the netbios name # of the machine that is connecting. # Note: Consider carefully the location in the configuration file of # this line. The included file is read at that point. ; include = /usr/local/etc/smb.conf.%m # Most people will find that this option gives better performance. # See the chapter 'Samba performance issues' in the Samba HOWTO Collection # and the manual pages for details. # You may want to add the following on a Linux system: # SO_RCVBUF=8192 SO_SNDBUF=8192 socket options = TCP_NODELAY # Configure Samba to use multiple interfaces # If you have multiple network interfaces then you must list them # here. See the man page for details. ; interfaces = 192.168.12.2/24 192.168.13.2/24 # Browser Control Options: # set local master to no if you don't want Samba to become a master # browser on your network. Otherwise the normal election rules apply ; local master = no # OS Level determines the precedence of this server in master browser # elections. The default value should be reasonable ; os level = 33 # Domain Master specifies Samba to be the Domain Master Browser. This # allows Samba to collate browse lists between subnets. Don't use this # if you already have a Windows NT domain controller doing this job ; domain master = yes # Preferred Master causes Samba to force a local browser election on startup # and gives it a slightly higher chance of winning the election ; preferred master = yes # Enable this if you want Samba to be a domain logon server for # Windows95 workstations. ; domain logons = yes # if you enable domain logons then you may want a per-machine or # per user logon script # run a specific logon batch file per workstation (machine) ; logon script = %m.bat # run a specific logon batch file per username ; logon script = %U.bat # Where to store roving profiles (only for Win95 and WinNT) # %L substitutes for this servers netbios name, %U is username # You must uncomment the [Profiles] share below ; logon path = \\%L\Profiles\%U # Windows Internet Name Serving Support Section: # WINS Support - Tells the NMBD component of Samba to enable it's WINS Server ; wins support = yes # WINS Server - Tells the NMBD components of Samba to be a WINS Client # Note: Samba can be either a WINS Server, or a WINS Client, but NOT both ; wins server = w.x.y.z # WINS Proxy - Tells Samba to answer name resolution queries on # behalf of a non WINS capable client, for this to work there must be # at least one WINS Server on the network. The default is NO. ; wins proxy = yes # DNS Proxy - tells Samba whether or not to try to resolve NetBIOS names # via DNS nslookups. The default is NO. dns proxy = no # charset settings ; display charset = ASCII ; unix charset = ASCII ; dos charset = ASCII # These scripts are used on a domain controller or stand-alone # machine to add or delete corresponding unix accounts ; add user script = /usr/sbin/useradd %u ; add group script = /usr/sbin/groupadd %g ; add machine script = /usr/sbin/adduser -n -g machines -c Machine -d /dev/null -s /bin/false %u ; delete user script = /usr/sbin/userdel %u ; delete user from group script = /usr/sbin/deluser %u %g ; delete group script = /usr/sbin/groupdel %g unix extensions = no

    Read the article

  • Windows 7 explorer always crashes, opens small "Personalized Settings" window

    - by Ian Sellar
    My Windows 7 desktop PC, built by me, started acting very weird in the last couple of days. I use it quite often, about half of the time through TeamViewer. Explorer would crash and restart randomly, almost always through TeamViewer. This made me suspect that TeamViewer was the problem but I have reproduced it with and without TeamViewer several times. The only way I can seem to get the problem not to occur is by booting into Safe Mode. I have used CCleaner and Malwarebytes to make sure it wasn't a registry error or malware causing the problem, and I have tried the fix in the seemly related issue here as well every other fix I have found online including removing security updates KB980408 and KB2926765 as well as using "sfc /scannow" and a bunch of other things I can't remember. More recently when I try to start explorer it is popping up a small window that says "Personalized Settings" on the top, but is completely empty and crashes instantly. The only way I can get it to disappear is to kill the explorer.exe process. I wish I could take a screenshot but I can't seem to open paint or even find the exe. I have tried restarting it, I have tried starting it while the personalized settings window was open. I have come up with two lists of processes the first is the list of active processes when I boot into safe mode and explorer seems to work fine. The second is the list of processes that I can narrow it down to in normal boot and still replicate the problem. There is one process that I can't seem to close. NisSrv.exe which is describes as "Microsoft Network Realtime Inspection Service". When I try to close the process NisSrv.exe it says "The operation could not be completed. Access is denied." When I try to close the related service it gives the same message. Image Name PID Session Name Session# Mem Usage ========================= ======== ================ =========== ============ System Idle Process 0 Services 0 24 K System 4 Services 0 2,660 K smss.exe 304 Services 0 1,196 K csrss.exe 408 Services 0 4,156 K wininit.exe 444 Services 0 4,608 K csrss.exe 452 Console 1 8,700 K services.exe 492 Services 0 7,700 K winlogon.exe 524 Console 1 5,756 K lsass.exe 536 Services 0 10,644 K lsm.exe 544 Services 0 4,316 K svchost.exe 652 Services 0 8,976 K MsMpEng.exe 804 Services 0 40,696 K explorer.exe 1332 Console 1 85,220 K ctfmon.exe 1376 Console 1 3,680 K dllhost.exe 1624 Console 1 8,656 K chrome.exe 1408 Console 1 98,504 K WmiPrvSE.exe 2352 Services 0 6,472 K chrome.exe 1744 Console 1 65,116 K taskmgr.exe 372 Console 1 14,948 K cmd.exe 2776 Console 1 2,960 K conhost.exe 1816 Console 1 3,580 K tasklist.exe 2308 Console 1 5,868 K And the list of processes I have narrowed it down to. Image Name PID Session Name Session# Mem Usage ========================= ======== ================ =========== ============ System Idle Process 0 Services 0 24 K System 4 Services 0 2,808 K smss.exe 316 Services 0 1,216 K csrss.exe 484 Services 0 4,532 K wininit.exe 596 Services 0 4,604 K csrss.exe 604 Console 1 23,676 K services.exe 652 Services 0 11,344 K lsass.exe 668 Services 0 12,692 K lsm.exe 676 Services 0 4,464 K MsMpEng.exe 972 Services 0 68,436 K winlogon.exe 168 Console 1 7,784 K svchost.exe 496 Services 0 19,140 K NisSrv.exe 3176 Services 0 808 K svchost.exe 1684 Services 0 11,260 K taskmgr.exe 4524 Console 1 20,696 K cmd.exe 4764 Console 1 7,224 K conhost.exe 4772 Console 1 6,916 K sublime_text.exe 2340 Console 1 45,012 K dllhost.exe 4476 Console 1 8,736 K tasklist.exe 3796 Console 1 5,768 K WmiPrvSE.exe 1768 Services 0 6,344 K Here is the event data xml from event viewer for the error I am getting. <EventData> <Data>explorer.exe</Data> <Data>6.1.7601.17567</Data> <Data>4d672ee4</Data> <Data>vrfcore.dll</Data> <Data>6.3.9600.16384</Data> <Data>5215f8f5</Data> <Data>80000003</Data> <Data>0000000000003a00</Data> <Data>12e4</Data> <Data>01cfb84fa70f89dc</Data> <Data>C:\Windows\system32\explorer.exe</Data> <Data>C:\Windows\SYSTEM32\vrfcore.dll</Data> <Data>e5957093-2442-11e4-9f8a-94de806ed9cb</Data> </EventData> I was looking through the eventvwr log again and I found this, possibly related <EventData> <Data>runonce.exe</Data> <Data>6.1.7601.17514</Data> <Data>4ce7a253</Data> <Data>MSVCR100.dll</Data> <Data>10.0.40219.325</Data> <Data>4df2bcac</Data> <Data>c0000005</Data> <Data>000000000003c145</Data> <Data>670</Data> <Data>01cfb8dabbd85942</Data> <Data>C:\Windows\system32\runonce.exe</Data> <Data>C:\Windows\system32\MSVCR100.dll</Data> <Data>fa6f82b9-24cd-11e4-80a8-94de806ed9cb</Data> </EventData> And the general error details Faulting application name: Explorer.EXE, version: 6.1.7601.17567, time stamp: 0x4d672ee4 Faulting module name: vrfcore.dll, version: 6.3.9600.16384, time stamp: 0x5215f8f5 Exception code: 0x80000003 Fault offset: 0x0000000000003a00 Faulting process id: 0xc38 Faulting application start time: 0x01cfb84e5e852c5f Faulting application path: C:\Windows\Explorer.EXE Faulting module path: C:\Windows\SYSTEM32\vrfcore.dll Report Id: 9dc19e6d-2441-11e4-9f8a-94de806ed9cb Another probably unrelated error that I seem to be getting pretty often. Event filter with query "SELECT * FROM __InstanceModificationEvent WITHIN 60 WHERE TargetInstance ISA "Win32_Processor" AND TargetInstance.LoadPercentage > 99" could not be reactivated in namespace "//./root/CIMV2" because of error 0x80041003. Events cannot be delivered through this filter until the problem is corrected. My explorer tab in Autoruns seen below along with the error when I try to uncheck something. I should add that I seem to be able to disable shell extensions with ShellExView but I still can't get explorer to start correctly. EXPLORER SHELL UPDATE - See screenshot below I can access the explorer right click menu through a file manager I downloaded called NexusFile, but still no luck starting explorer. Another round of errors that I am getting regarding Windows Search Service The search service has detected corrupted data files in the index {id=4700}. The service will attempt to automatically correct this problem by rebuilding the index. Details: The content index catalog is corrupt. (HRESULT : 0xc0041801) (0xc0041801) followed by The Windows Search Service is being stopped because there is a problem with the indexer: The catalog is corrupt. Details: The content index catalog is corrupt. (HRESULT : 0xc0041801) (0xc0041801 and The plug-in in <Search.JetPropStore> cannot be initialized. Context: Windows Application, SystemIndex Catalog Details: The content index catalog is corrupt. (HRESULT : 0xc0041801) (0xc0041801) and The gatherer object cannot be initialized. Context: Windows Application, SystemIndex Catalog Details: The content index catalog is corrupt. (HRESULT : 0xc0041801) (0xc0041801) and The Windows Search Service cannot load the property store information. Context: Windows Application, SystemIndex Catalog Details: The content index database is corrupt. (HRESULT : 0xc0041800) (0xc0041800) WER Log http://pastebin.com/WXKGDT4Q I'll add information as I remember it or people request it.

    Read the article

  • Prefer extension methods for encapsulation and reusability?

    - by tzaman
    edit4: wikified, since this seems to have morphed more into a discussion than a specific question. In C++ programming, it's generally considered good practice to "prefer non-member non-friend functions" instead of instance methods. This has been recommended by Scott Meyers in this classic Dr. Dobbs article, and repeated by Herb Sutter and Andrei Alexandrescu in C++ Coding Standards (item 44); the general argument being that if a function can do its job solely by relying on the public interface exposed by the class, it actually increases encapsulation to have it be external. While this confuses the "packaging" of the class to some extent, the benefits are generally considered worth it. Now, ever since I've started programming in C#, I've had a feeling that here is the ultimate expression of the concept that they're trying to achieve with "non-member, non-friend functions that are part of a class interface". C# adds two crucial components to the mix - the first being interfaces, and the second extension methods: Interfaces allow a class to formally specify their public contract, the methods and properties that they're exposing to the world. Any other class can choose to implement the same interface and fulfill that same contract. Extension methods can be defined on an interface, providing any functionality that can be implemented via the interface to all implementers automatically. And best of all, because of the "instance syntax" sugar and IDE support, they can be called the same way as any other instance method, eliminating the cognitive overhead! So you get the encapsulation benefits of "non-member, non-friend" functions with the convenience of members. Seems like the best of both worlds to me; the .NET library itself providing a shining example in LINQ. However, everywhere I look I see people warning against extension method overuse; even the MSDN page itself states: In general, we recommend that you implement extension methods sparingly and only when you have to. (edit: Even in the current .NET library, I can see places where it would've been useful to have extensions instead of instance methods - for example, all of the utility functions of List<T> (Sort, BinarySearch, FindIndex, etc.) would be incredibly useful if they were lifted up to IList<T> - getting free bonus functionality like that adds a lot more benefit to implementing the interface.) So what's the verdict? Are extension methods the acme of encapsulation and code reuse, or am I just deluding myself? (edit2: In response to Tomas - while C# did start out with Java's (overly, imo) OO mentality, it seems to be embracing more multi-paradigm programming with every new release; the main thrust of this question is whether using extension methods to drive a style change (towards more generic / functional C#) is useful or worthwhile..) edit3: overridable extension methods The only real problem identified so far with this approach, is that you can't specialize extension methods if you need to. I've been thinking about the issue, and I think I've come up with a solution. Suppose I have an interface MyInterface, which I want to extend - I define my extension methods in a MyExtension static class, and pair it with another interface, call it MyExtensionOverrider. MyExtension methods are defined according to this pattern: public static int MyMethod(this MyInterface obj, int arg, bool attemptCast=true) { if (attemptCast && obj is MyExtensionOverrider) { return ((MyExtensionOverrider)obj).MyMethod(arg); } // regular implementation here } The override interface mirrors all of the methods defined in MyExtension, except without the this or attemptCast parameters: public interface MyExtensionOverrider { int MyMethod(int arg); string MyOtherMethod(); } Now, any class can implement the interface and get the default extension functionality: public class MyClass : MyInterface { ... } Anyone that wants to override it with specific implementations can additionally implement the override interface: public class MySpecializedClass : MyInterface, MyExtensionOverrider { public int MyMethod(int arg) { //specialized implementation for one method } public string MyOtherMethod() { // fallback to default for others MyExtension.MyOtherMethod(this, attemptCast: false); } } And there we go: extension methods provided on an interface, with the option of complete extensibility if needed. Fully general too, the interface itself doesn't need to know about the extension / override, and multiple extension / override pairs can be implemented without interfering with each other. I can see three problems with this approach - It's a little bit fragile - the extension methods and override interface have to be kept synchronized manually. It's a little bit ugly - implementing the override interface involves boilerplate for every function you don't want to specialize. It's a little bit slow - there's an extra bool comparison and cast attempt added to the mainline of every method. Still, all those notwithstanding, I think this is the best we can get until there's language support for interface functions. Thoughts?

    Read the article

  • How to upload video on YouTube with Ruby

    - by viatropos
    I am trying to upload a youtube video using the GData gem (I have seen the youtube_g gem but would like to make it work with pure GData if possible), but I keep getting this error: GData::Client::BadRequestError in 'MyProject::Google::YouTube should upload the actual video to youtube (once it does, mock this test out)' request error 400: No file found in upload request. I am using this code: def metadata data = <<-EOF <?xml version="1.0"?> <entry xmlns="http://www.w3.org/2005/Atom" xmlns:media="http://search.yahoo.com/mrss/" xmlns:yt="http://gdata.youtube.com/schemas/2007"> <media:group> <media:title type="plain">Bad Wedding Toast</media:title> <media:description type="plain"> I gave a bad toast at my friend's wedding. </media:description> <media:category scheme="http://gdata.youtube.com/schemas/2007/categories.cat">People</media:category> <media:keywords>toast, wedding</media:keywords> </media:group> </entry> EOF end @yt = GData::Client::YouTube.new @yt.clientlogin("name", "pass") @yt.developer_key = "myKey" url = "http://uploads.gdata.youtube.com/feeds/api/users/name/uploads" mime_type = "multipart/related" file_path = "sample_upload.mp4" @yt.post_file(url, file_path, mime_type, metadata) What is the recommended/standard way for uploading videos to youtube with ruby, what is your method? Update After applying the changes to wrapped_entry, the string it produces looks like this: --END_OF_PART_59003 Content-Type: application/atom+xml; charset=UTF-8 <?xml version="1.0"?> <entry xmlns="http://www.w3.org/2005/Atom" xmlns:media="http://search.yahoo.com/mrss/" xmlns:yt="http://gdata.youtube.com/schemas/2007"> <media:group> <media:title type="plain">Bad Wedding Toast</media:title> <media:description type="plain"> I gave a bad toast at my friend's wedding. </media:description> <media:category scheme="http://gdata.youtube.com/schemas/2007/categories.cat">People</media:category> <media:keywords>toast, wedding</media:keywords> </media:group> </entry> --END_OF_PART_59003 Content-Type: multipart/related Content-Transfer-Encoding: binary ... and inspecting the request and response looks like this: Request: <GData::HTTP::Request:0x1b8bb44 @method=:post @url="http://uploads.gdata.youtube.com/feeds/api/users/lancejpollard/uploads" @body=#<GData::HTTP::MimeBody:0x1b8c738 @parts=[#<GData::HTTP::MimeBodyString:0x1b8c058 @bytes_read=0 @string="--END_OF_PART_30909\r\nContent-Type: application/atom+xml; charset=UTF-8\r\n\r\n <?xml version=\"1.0\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\"\n xmlns:media=\"http://search.yahoo.com/mrss/\"\n xmlns:yt=\"http://gdata.youtube.com/schemas/2007\">\n <media:group>\n <media:title type=\"plain\">Bad Wedding Toast</media:title>\n <media:description type=\"plain\">\n I gave a bad toast at my friend's wedding.\n </media:description>\n <media:category scheme=\"http://gdata.youtube.com/schemas/2007/categories.cat\">People</media:category>\n <media:keywords>toast wedding</media:keywords>\n </media:group>\n</entry> \n\r\n--END_OF_PART_30909\r\nContent-Type: multipart/related\r\nContent-Transfer-Encoding: binary\r\n\r\n"> #<File:/Users/Lance/Documents/Development/git/thing/spec/fixtures/sample_upload.mp4> #<GData::HTTP::MimeBodyString:0x1b8c044 @bytes_read=0 @string="\r\n--END_OF_PART_30909--"] @current_part=0 @boundary="END_OF_PART_30909" @headers={"Slug"="sample_upload.mp4" "User-Agent"="GoogleDataRubyUtil-AnonymousApp" "GData-Version"="2" "X-GData-Key"="key=AI39si7jkhs_ECjF4unOQz8gpWGSKXgq0KJpm8wywkvBSw4s8oJd5p5vkpvURHBNh-hiYJtoKwQqSfot7KoCkeCE32rNcZqMxA" "Content-Type"="multipart/related; boundary=\"END_OF_PART_30909\"" "MIME-Version"="1.0"} Response: #<GData::HTTP::Response:0x1b897e0 @body="No file found in upload request." @headers={"cache-control"=>"no-cache no-store must-revalidate" "connection"=>"close" "expires"=>"Fri 01 Jan 1990 00:00:00 GMT" "content-type"=>"text/plain; charset=utf-8" "date"=>"Fri 11 Dec 2009 02:10:25 GMT" "server"=>"Upload Server Built on Nov 30 2009 13:21:18 (1259616078)" "x-xss-protection"=>"0" "content-length"=>"32" "pragma"=>"no-cache"} @status_code=400> Still not working, I'll have to check it out more with those changes.

    Read the article

  • Migrating Ruby Site from EngineYard to Heroku

    - by user410925
    As part of a larger project I've been tasked with migrating some existing Ruby on Rails sites (built with an old version of refinerycms 0.9.6.34, at least that's the version listed in the Gemfile included with the source). I don't normally work with Ruby so I'm at a bit of a loss. The previous developers simply handed over the latest git dump as well as a db dump. I'm working first with trying to get the site up working locally on an Ubuntu 11.10 local machine before pushing up to at test Heroku install. If it's possible to just push directly to Heroku with the files they gave, then I can try that, but it's my understanding I need to get everything working and then use Heroku's tools to deploy. The previous devs said they're using ruby 1.8.7 so in Ubuntu I've done the following: aptitude install ruby1.8 ruby1.8-dev ruby1.8-full aptitude install rubygems1.8 I've restored the database and in the config directory I've made changes to the database.yml to point to the restored database. When I try and run "bundle install" from the root of the extracted source dir I get: Invalid gemspec in [/var/lib/gems/1.8/specifications/mail-2.4.4.gemspec]: invalid date format in specification: "2012-03-14 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/tilt-1.3.3.gemspec]: invalid date format in specification: "2011-08-25 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/mime-types-1.18.gemspec]: invalid date format in specification: "2012-03-21 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/sass-rails-3.2.5.gemspec]: invalid date format in specification: "2012-03-19 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/jquery-rails-2.0.2.gemspec]: invalid date format in specification: "2012-04-03 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/mail-2.4.4.gemspec]: invalid date format in specification: "2012-03-14 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/tilt-1.3.3.gemspec]: invalid date format in specification: "2011-08-25 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/mime-types-1.18.gemspec]: invalid date format in specification: "2012-03-21 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/sass-rails-3.2.5.gemspec]: invalid date format in specification: "2012-03-19 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/jquery-rails-2.0.2.gemspec]: invalid date format in specification: "2012-04-03 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/mail-2.4.4.gemspec]: invalid date format in specification: "2012-03-14 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/tilt-1.3.3.gemspec]: invalid date format in specification: "2011-08-25 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/mime-types-1.18.gemspec]: invalid date format in specification: "2012-03-21 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/sass-rails-3.2.5.gemspec]: invalid date format in specification: "2012-03-19 00:00:00.000000000Z" Invalid gemspec in [/var/lib/gems/1.8/specifications/jquery-rails-2.0.2.gemspec]: invalid date format in specification: "2012-04-03 00:00:00.000000000Z" Fetching gem metadata from https://rubygems.org/....... Fetching gem metadata from https://rubygems.org/.. Using rake (0.9.2.2) Using i18n (0.6.0) Using multi_json (1.3.6) Using activesupport (3.2.3) Using builder (3.0.0) Using activemodel (3.2.3) Using erubis (2.7.0) Using journey (1.0.3) Using rack (1.4.1) Using rack-cache (1.2) Using rack-test (0.6.1) Using hike (1.2.1) Installing tilt (1.3.3) Using sprockets (2.1.3) Using actionpack (3.2.3) Installing mime-types (1.18) Using polyglot (0.3.3) Using treetop (1.4.10) Installing mail (2.4.4) Using actionmailer (3.2.3) Using arel (3.0.2) Using tzinfo (0.3.33) Using activerecord (3.2.3) Using activeresource (3.2.3) Using acts_as_indexed (0.7.8) Using awesome_nested_set (2.1.3) Using babosa (0.3.7) Using bcrypt-ruby (3.0.1) Using coffee-script-source (1.3.3) Using execjs (1.4.0) Using coffee-script (2.2.0) Using rack-ssl (1.3.2) Using json (1.7.3) Using rdoc (3.12) Using thor (0.14.6) Using railties (3.2.3) Using coffee-rails (3.2.2) Using orm_adapter (0.0.7) Using warden (1.1.1) Using devise (2.0.4) Using dragonfly (0.9.12) Using friendly_id (4.0.6) Using paper_trail (2.6.3) Using globalize3 (0.2.0) Installing jquery-rails (2.0.2) Using bundler (1.1.4) Using rails (3.2.3) Using sass (3.1.19) Installing sass-rails (3.2.5) Using truncate_html (0.5.5) Using uglifier (1.2.4) Using will_paginate (3.0.3) Using refinerycms-core (2.0.4) Using refinerycms-authentication (2.0.4) Using refinerycms-dashboard (2.0.4) Using refinerycms-images (2.0.4) Using seo_meta (1.3.0) Using refinerycms-pages (2.0.4) Using refinerycms-resources (2.0.4) Using refinerycms (2.0.4) Using routing-filter (0.3.1) Using refinerycms-i18n (2.0.0) Using sqlite3 (1.3.6) Your bundle is complete! Use `bundle show [gemname]` to see where a bundled gem is installed. Obviously the errors with Invalid gemspec need to be resolved, but the other thing that's troubling to me are the lines: Using refinerycms-core (2.0.4) Using refinerycms-authentication (2.0.4) Using refinerycms-dashboard (2.0.4) Using refinerycms-images (2.0.4) Using seo_meta (1.3.0) Using refinerycms-pages (2.0.4) Using refinerycms-resources (2.0.4) Using refinerycms (2.0.4) Using routing-filter (0.3.1) Using refinerycms-i18n (2.0.0) Since the refinerycms version listed in the Gemfile was 0.9.6.34. When it comes to the Ruby world, I'm a bit lost so any help would be greatly appreciated. Thanks,

    Read the article

  • Error when pushing to Heroku - ...appear in group - Ruby on Rails

    - by bgadoci
    I am trying to deploy my first rails app to Heroku and seem to be having a problem. After git push heroku master, and heroku rake db:migrate I get an error saying: SELECT posts.*, count(*) as vote_total FROM "posts" INNER JOIN "votes" ON votes.post_id = posts.id GROUP BY votes.post_id ORDER BY created_at DESC LIMIT 5 OFFSET 0): I have included the full error below and also included the PostControll#index as it seems that is where I am doing the grouping. Lastly I included my routes.rb file. I am new to ruby, rails, and heroku so sorry for simple/obvious questions. Processing PostsController#index (for 99.7.50.140 at 2010-04-21 12:50:47) [GET] ActiveRecord::StatementInvalid (PGError: ERROR: column "posts.id" must appear in the GROUP BY clause or be used in an aggregate function : SELECT posts.*, count(*) as vote_total FROM "posts" INNER JOIN "votes" ON votes.post_id = posts.id GROUP BY votes.post_id ORDER BY created_at DESC LIMIT 5 OFFSET 0): vendor/gems/will_paginate-2.3.12/lib/will_paginate/finder.rb:82:in `send' vendor/gems/will_paginate-2.3.12/lib/will_paginate/finder.rb:82:in `paginate' vendor/gems/will_paginate-2.3.12/lib/will_paginate/collection.rb:87:in `create' vendor/gems/will_paginate-2.3.12/lib/will_paginate/finder.rb:76:in `paginate' app/controllers/posts_controller.rb:28:in `index' /home/heroku_rack/lib/static_assets.rb:9:in `call' /home/heroku_rack/lib/last_access.rb:25:in `call' /home/heroku_rack/lib/date_header.rb:14:in `call' thin (1.0.1) lib/thin/connection.rb:80:in `pre_process' thin (1.0.1) lib/thin/connection.rb:78:in `catch' thin (1.0.1) lib/thin/connection.rb:78:in `pre_process' thin (1.0.1) lib/thin/connection.rb:57:in `process' thin (1.0.1) lib/thin/connection.rb:42:in `receive_data' eventmachine (0.12.6) lib/eventmachine.rb:240:in `run_machine' eventmachine (0.12.6) lib/eventmachine.rb:240:in `run' thin (1.0.1) lib/thin/backends/base.rb:57:in `start' thin (1.0.1) lib/thin/server.rb:150:in `start' thin (1.0.1) lib/thin/controllers/controller.rb:80:in `start' thin (1.0.1) lib/thin/runner.rb:173:in `send' thin (1.0.1) lib/thin/runner.rb:173:in `run_command' thin (1.0.1) lib/thin/runner.rb:139:in `run!' thin (1.0.1) bin/thin:6 /usr/local/bin/thin:20:in `load' /usr/local/bin/thin:20 PostsController def index @tag_counts = Tag.count(:group => :tag_name, :order => 'count_all DESC', :limit => 20) conditions, joins = {}, :votes @ugtag_counts = Ugtag.count(:group => :ugctag_name, :order => 'count_all DESC', :limit => 20) conditions, joins = {}, :votes @vote_counts = Vote.count(:group => :post_title, :order => 'count_all DESC', :limit => 20) conditions, joins = {}, :votes unless(params[:tag_name] || "").empty? conditions = ["tags.tag_name = ? ", params[:tag_name]] joins = [:tags, :votes] end @posts=Post.paginate( :select => "posts.*, count(*) as vote_total", :joins => joins, :conditions=> conditions, :group => "votes.post_id", :order => "created_at DESC", :page => params[:page], :per_page => 5) @popular_posts=Post.paginate( :select => "posts.*, count(*) as vote_total", :joins => joins, :conditions=> conditions, :group => "votes.post_id", :order => "vote_total DESC", :page => params[:page], :per_page => 3) respond_to do |format| format.html # index.html.erb format.xml { render :xml => @posts } format.json { render :json => @posts } format.atom end end routes.rb ActionController::Routing::Routes.draw do |map| map.resources :ugtags map.resources :wysihat_files map.resources :users map.resources :votes map.resources :votes, :belongs_to => :user map.resources :tags, :belongs_to => :user map.resources :ugtags, :belongs_to => :user map.resources :posts, :collection => {:auto_complete_for_tag_tag_name => :get } map.resources :posts, :sessions map.resources :posts, :has_many => :comments map.resources :posts, :has_many => :tags map.resources :posts, :has_many => :ugtags map.resources :posts, :has_many => :votes map.resources :posts, :belongs_to => :user map.resources :tags, :collection => {:auto_complete_for_tag_tag_name => :get } map.resources :ugtags, :collection => {:auto_complete_for_ugtag_ugctag_name => :get } map.login 'login', :controller => 'sessions', :action => 'new' map.logout 'logout', :controller => 'sessions', :action => 'destroy' map.root :controller => "posts" map.connect ':controller/:action/:id' map.connect ':controller/:action/:id.:format' end UPDATE TO SHOW MODEL AND MIGRATION FOR POST class Post < ActiveRecord::Base has_attached_file :photo validates_presence_of :body, :title has_many :comments, :dependent => :destroy has_many :tags, :dependent => :destroy has_many :ugtags, :dependent => :destroy has_many :votes, :dependent => :destroy belongs_to :user after_create :self_vote def self_vote # I am assuming you have a user_id field in `posts` and `votes` table. self.votes.create(:user => self.user) end cattr_reader :per_page @@per_page = 10 end migrations for post class CreatePosts < ActiveRecord::Migration def self.up create_table :posts do |t| t.string :title t.text :body t.timestamps end end def self.down drop_table :posts end end _ class AddUserIdToPost < ActiveRecord::Migration def self.up add_column :posts, :user_id, :string end def self.down remove_column :posts, :user_id end end

    Read the article

  • How to add correct cancellation when downloading a file with the example in the samples of the new P

    - by Mike
    Hello everybody, I have downloaded the last samples of the Parallel Programming team, and I don't succeed in adding correctly the possibility to cancel the download of a file. Here is the code I ended to have: var wreq = (HttpWebRequest)WebRequest.Create(uri); // Fire start event DownloadStarted(this, new DownloadStartedEventArgs(remoteFilePath)); long totalBytes = 0; wreq.DownloadDataInFileAsync(tmpLocalFile, cancellationTokenSource.Token, allowResume, totalBytesAction => { totalBytes = totalBytesAction; }, readBytes => { Log.Debug("Progression : {0} / {1} => {2}%", readBytes, totalBytes, 100 * (double)readBytes / totalBytes); DownloadProgress(this, new DownloadProgressEventArgs(remoteFilePath, readBytes, totalBytes, (int)(100 * readBytes / totalBytes))); }) .ContinueWith( (antecedent ) => { if (antecedent.IsFaulted) Log.Debug(antecedent.Exception.Message); //Fire end event SetEndDownload(antecedent.IsCanceled, antecedent.Exception, tmpLocalFile, 0); }, cancellationTokenSource.Token); I want to fire an end event after the download is finished, hence the ContinueWith. I slightly changed the code of the samples to add the CancellationToken and the 2 delegates to get the size of the file to download, and the progression of the download: return webRequest.GetResponseAsync() .ContinueWith(response => { if (totalBytesAction != null) totalBytesAction(response.Result.ContentLength); response.Result.GetResponseStream().WriteAllBytesAsync(filePath, ct, resumeDownload, progressAction).Wait(ct); }, ct); I had to add the call to the Wait function, because if I don't, the method exits and the end event is fired too early. Here are the modified method extensions (lot of code, apologies :p) public static Task WriteAllBytesAsync(this Stream stream, string filePath, CancellationToken ct, bool resumeDownload = false, Action<long> progressAction = null) { if (stream == null) throw new ArgumentNullException("stream"); // Copy from the source stream to the memory stream and return the copied data return stream.CopyStreamToFileAsync(filePath, ct, resumeDownload, progressAction); } public static Task CopyStreamToFileAsync(this Stream source, string destinationPath, CancellationToken ct, bool resumeDownload = false, Action<long> progressAction = null) { if (source == null) throw new ArgumentNullException("source"); if (destinationPath == null) throw new ArgumentNullException("destinationPath"); // Open the output file for writing var destinationStream = FileAsync.OpenWrite(destinationPath); // Copy the source to the destination stream, then close the output file. return CopyStreamToStreamAsync(source, destinationStream, ct, progressAction).ContinueWith(t => { var e = t.Exception; destinationStream.Close(); if (e != null) throw e; }, ct, TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Current); } public static Task CopyStreamToStreamAsync(this Stream source, Stream destination, CancellationToken ct, Action<long> progressAction = null) { if (source == null) throw new ArgumentNullException("source"); if (destination == null) throw new ArgumentNullException("destination"); return Task.Factory.Iterate(CopyStreamIterator(source, destination, ct, progressAction)); } private static IEnumerable<Task> CopyStreamIterator(Stream input, Stream output, CancellationToken ct, Action<long> progressAction = null) { // Create two buffers. One will be used for the current read operation and one for the current // write operation. We'll continually swap back and forth between them. byte[][] buffers = new byte[2][] { new byte[BUFFER_SIZE], new byte[BUFFER_SIZE] }; int filledBufferNum = 0; Task writeTask = null; int readBytes = 0; // Until there's no more data to be read or cancellation while (true) { ct.ThrowIfCancellationRequested(); // Read from the input asynchronously var readTask = input.ReadAsync(buffers[filledBufferNum], 0, buffers[filledBufferNum].Length); // If we have no pending write operations, just yield until the read operation has // completed. If we have both a pending read and a pending write, yield until both the read // and the write have completed. yield return writeTask == null ? readTask : Task.Factory.ContinueWhenAll(new[] { readTask, writeTask }, tasks => tasks.PropagateExceptions()); // If no data was read, nothing more to do. if (readTask.Result <= 0) break; readBytes += readTask.Result; if (progressAction != null) progressAction(readBytes); // Otherwise, write the written data out to the file writeTask = output.WriteAsync(buffers[filledBufferNum], 0, readTask.Result); // Swap buffers filledBufferNum ^= 1; } } So basically, at the end of the chain of called methods, I let the CancellationToken throw an OperationCanceledException if a Cancel has been requested. What I hoped was to get IsFaulted == true in the appealing code and to fire the end event with the canceled flags and the correct exception. But what I get is an unhandled exception on the line response.Result.GetResponseStream().WriteAllBytesAsync(filePath, ct, resumeDownload, progressAction).Wait(ct); telling me that I don't catch an AggregateException. I've tried various things, but I don't succeed to make the whole thing work properly. Does anyone of you have played enough with that library and may help me? Thanks in advance Mike

    Read the article

  • Concurrent Generation of Sequential Keys

    - by GenTiradentes
    I'm working on a project which generates a very large number of sequential text strings, in a very tight loop. My application makes heavy use of SIMD instruction set extensions like SSE and MMX, in other parts of the program, but the key generator is plain C++. The way my key generator works is I have a keyGenerator class, which holds a single char array that stores the current key. To get the next key, there is a function called "incrementKey," which treats the string as a number, adding one to the string, carrying where necessary. Now, the problem is, the keygen is somewhat of a bottleneck. It's fast, but it would be nice if it were faster. One of the biggest problems is that when I'm generating a set of sequential keys to be processed using my SSE2 code, I have to have the entire set stored in an array, which means I have to sequentially generate and copy 12 strings into an array, one by one, like so: char* keys[12]; for(int i = 0; i < 12; i++) { keys[i] = new char[16]; strcmp(keys[i], keygen++); } So how would you efficiently generate these plaintext strings in order? I need some ideas to help move this along. Concurrency would be nice; as my code is right now, each successive key depends on the previous one, which means that the processor can't start work on the next key until the current one has been completely generated. Here is the code relevant to the key generator: KeyGenerator.h class keyGenerator { public: keyGenerator(unsigned long long location, characterSet* charset) : location(location), charset(charset) { for(int i = 0; i < 16; i++) key[i] = 0; charsetStr = charset->getCharsetStr(); integerToKey(); } ~keyGenerator() { } inline void incrementKey() { register size_t keyLength = strlen(key); for(register char* place = key; place; place++) { if(*place == charset->maxChar) { // Overflow, reset char at place *place = charset->minChar; if(!*(place+1)) { // Carry, no space, insert char *(place+1) = charset->minChar; ++keyLength; break; } else { continue; } } else { // Space available, increment char at place if(*place == charset->charSecEnd[0]) *place = charset->charSecBegin[0]; else if(*place == charset->charSecEnd[1]) *place = charset->charSecBegin[1]; (*place)++; break; } } } inline char* operator++() // Pre-increment { incrementKey(); return key; } inline char* operator++(int) // Post-increment { memcpy(postIncrementRetval, key, 16); incrementKey(); return postIncrementRetval; } void integerToKey() { register unsigned long long num = location; if(!num) { key[0] = charsetStr[0]; } else { num++; while(num) { num--; unsigned int remainder = num % charset->length; num /= charset->length; key[strlen(key)] = charsetStr[remainder]; } } } inline unsigned long long keyToInteger() { // TODO return 0; } inline char* getKey() { return key; } private: unsigned long long location; characterSet* charset; std::string charsetStr; char key[16]; // We need a place to store the key for the post increment operation. char postIncrementRetval[16]; }; CharacterSet.h struct characterSet { characterSet() { } characterSet(unsigned int len, int min, int max, int charsec0, int charsec1, int charsec2, int charsec3) { init(length, min, max, charsec0, charsec1, charsec2, charsec3); } void init(unsigned int len, int min, int max, int charsec0, int charsec1, int charsec2, int charsec3) { length = len; minChar = min; maxChar = max; charSecEnd[0] = charsec0; charSecBegin[0] = charsec1; charSecEnd[1] = charsec2; charSecBegin[1] = charsec3; } std::string getCharsetStr() { std::string retval; for(int chr = minChar; chr != maxChar; chr++) { for(int i = 0; i < 2; i++) if(chr == charSecEnd[i]) chr = charSecBegin[i]; retval += chr; } return retval; } int minChar, maxChar; // charSec = character set section int charSecEnd[2], charSecBegin[2]; unsigned int length; };

    Read the article

  • Use DivX settings to encode to mp4 with ffmpeg

    - by sjngm
    I'm used to use VirtualDub to encode a video to AVI container with DivX-codec (and MP3 for audio). Now I'm planning to use ffmpeg to encode videos to MP4 container with h264-codec. What I've figured out is that I need to use libx264 and one of those presets to make anything work. However, I'm amazed about the video bitrate ffmpeg uses for encoding. What I currently have is this little batch file: @ECHO OFF SETLOCAL SET IN=source.avs SET FFMPEG_PATH=C:\Program Files (x86)\ffmpeg SET PRESET=-fpre "%FFMPEG_PATH%\presets\libx264-lossless_slow.ffpreset" SET AUDIO=-acodec libmp3lame -ab 128000 SET VIDEO=-vcodec libx264 -vb 1978000 "%FFMPEG_PATH%\ffmpeg.exe" -i %IN% %AUDIO% %VIDEO% %PRESET% test.mp4 ENDLOCAL With this I tell ffmpeg to use 1978k as the bitrate, but ffmpeg uses 15000k+! I tried other presets, but they don't use my specified bitrate. Here are the presets I have: libx264-baseline.ffpreset libx264-ipod320.ffpreset libx264-ipod640.ffpreset libx264-lossless_fast.ffpreset libx264-lossless_max.ffpreset libx264-lossless_medium.ffpreset libx264-lossless_slow.ffpreset libx264-lossless_slower.ffpreset libx264-lossless_ultrafast.ffpreset ffmpeg version: FFmpeg git-N-29181-ga304071 libavutil 50. 40. 1 / 50. 40. 1 libavcodec 52.120. 0 / 52.120. 0 libavformat 52.108. 0 / 52.108. 0 libavdevice 52. 4. 0 / 52. 4. 0 libavfilter 1. 79. 0 / 1. 79. 0 libswscale 0. 13. 0 / 0. 13. 0 Note that I don't use the latest version as it has problems with spaces in filenames. Here's what seems to be the full parameter list DivX 6.9.2 uses: -bvnn 1978000 -vbv 218691200,100663296,100663296 -dir "C:\Users\sjngm\AppData\Roaming\DivX\DivX Codec" -w -b 1 -use_presets=1 -preset=10 -windowed_fullsearch=2 -thread_delay=1 What command line parameters would that be for ffmpeg? EDIT: Going with slhck's suggestion I tried a new 32-bit version. I have no idea if that is 0.9 or newer, I can't find that info. ffmpeg version N-36890-g67f5650 libavutil 51. 34.100 / 51. 34.100 libavcodec 53. 56.105 / 53. 56.105 libavformat 53. 30.100 / 53. 30.100 libavdevice 53. 4.100 / 53. 4.100 libavfilter 2. 59.100 / 2. 59.100 libswscale 2. 1.100 / 2. 1.100 libswresample 0. 6.100 / 0. 6.100 libpostproc 51. 2.100 / 51. 2.100 I reworked my batch file to look like this (interestingly enough I can't find parameter -vprofile in the documentation): @ECHO OFF SETLOCAL SET IN=VTS_01_1.avs SET FFMPEG_PATH=C:\Program Files (x86)\ffmpeg SET PRESET=-vprofile high -preset veryslow SET AUDIO=-acodec libmp3lame -ab 128000 SET VIDEO=-vcodec libx264 -vb 1978000 "%FFMPEG_PATH%\ffmpeg.exe" -i %IN% %AUDIO% %PRESET% %VIDEO% test.mp4 ENDLOCAL I see that it now uses the bitrate properly (thanks to LongNeckbeard for pointing out that the lossless-stuff ignores the bitrate!). Just in case you wonder how I came up with the 1978000, I'm using this formula which I found valid for DivX-files (I'm guessing the bitrate won't change that much for h264): width * height * 25 * 0.22 / 1000 I'm not sure if the 0.22 correlates with the CRF somehow. Overall I forgot to say the I will use a two-pass scenario, which is why I don't use the CRF here. I will try to read more about this. Currently I'm just trying to get something running that shows me that I'm doing something right (ffmpeg isn't the easiest tool to understand ;)). C:\Program Files (x86)\ffmpeg\ffmpeg.exe" -i VTS_01_1.avs -acodec libmp3lame -ab 128000 -vcodec libx264 -vb 1978000 -vprofile high -preset veryslow test.mp4 The output is now: ffmpeg version N-36890-g67f5650 Copyright (c) 2000-2012 the FFmpeg developers built on Jan 16 2012 21:57:13 with gcc 4.6.2 configuration: --enable-gpl --enable-version3 --disable-w32threads --enable-runtime-cpudetect --enable-avisynth --enable-bzlib --enable-frei0r --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libfreetype --enable-libgsm --enable-libmp3lame --enable-libopenjpeg --enable-librtmp --enable-libschroedinger --enable-libspeex --enable-libtheora --enable-libvo-aacenc --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libxavs --enable-libxvid --enable-zlib libavutil 51. 34.100 / 51. 34.100 libavcodec 53. 56.105 / 53. 56.105 libavformat 53. 30.100 / 53. 30.100 libavdevice 53. 4.100 / 53. 4.100 libavfilter 2. 59.100 / 2. 59.100 libswscale 2. 1.100 / 2. 1.100 libswresample 0. 6.100 / 0. 6.100 libpostproc 51. 2.100 / 51. 2.100 Input #0, avs, from 'VTS_01_1.avs': Duration: 00:58:46.12, start: 0.000000, bitrate: 0 kb/s Stream #0:0: Video: rawvideo (YV12 / 0x32315659), yuv420p, 576x448, 77414 kb/s, 25 tbr, 25 tbn, 25 tbc Stream #0:1: Audio: pcm_s16le ([1][0][0][0] / 0x0001), 48000 Hz, 2 channels, s16, 1536 kb/s File 'test.mp4' already exists. Overwrite ? [y/N] y w:576 h:448 pixfmt:yuv420p tb:1/1000000 sar:0/1 sws_param: [libx264 @ 05A2C400] using cpu capabilities: MMX2 SSE2Fast FastShuffle SSEMisalign LZCNT [libx264 @ 05A2C400] profile High, level 3.1 [libx264 @ 05A2C400] 264 - core 120 r2120 0c7dab9 - H.264/MPEG-4 AVC codec - Copyleft 2003-2011 - http://www.videolan.org/x264.html - options: cabac=1 ref=16 deblock=1:0:0 analyse=0x3:0x133 me=umh subme=10 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=24 chroma_me=1 trellis=2 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=3 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=8 b_pyramid=2 b_adapt=2 b_bias=0 direct=3 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=60 rc=abr mbtree=1 bitrate=1978 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00 Output #0, mp4, to 'test.mp4': Metadata: encoder : Lavf53.30.100 Stream #0:0: Video: h264 (![0][0][0] / 0x0021), yuv420p, 576x448, q=-1--1, 1978 kb/s, 25 tbn, 25 tbc Stream #0:1: Audio: mp3 (i[0][0][0] / 0x0069), 48000 Hz, 2 channels, s16, 128 kb/s Stream mapping: Stream #0:0 -> #0:0 (rawvideo -> libx264) Stream #0:1 -> #0:1 (pcm_s16le -> libmp3lame) Press [q] to stop, [?] for help frame= 0 fps= 0 q=0.0 size= 0kB time=00:00:00.00 bitrate= 0.0kbits/s frame= 0 fps= 0 q=0.0 size= 0kB time=00:00:00.00 bitrate= 0.0kbits/s frame= 0 fps= 0 q=0.0 size= 0kB time=00:00:00.00 bitrate= 0.0kbits/s frame= 3 fps= 1 q=22.0 size= 39kB time=00:00:00.04 bitrate=8063.8kbits/ frame= 8 fps= 2 q=22.0 size= 82kB time=00:00:00.24 bitrate=2801.3kbits/ frame= 13 fps= 3 q=23.0 size= 120kB time=00:00:00.44 bitrate=2229.5kbits/ frame= 16 fps= 4 q=23.0 size= 147kB time=00:00:00.56 bitrate=2156.7kbits/ frame= 20 fps= 4 q=22.0 size= 175kB time=00:00:00.72 bitrate=1987.4kbits/ : video:4387kB audio:273kB global headers:0kB muxing overhead 0.260038% [libx264 @ 05A2C400] frame I:2 Avg QP:19.53 size: 29850 [libx264 @ 05A2C400] frame P:76 Avg QP:22.24 size: 19541 [libx264 @ 05A2C400] frame B:359 Avg QP:25.93 size: 8210 [libx264 @ 05A2C400] consecutive B-frames: 0.5% 0.5% 0.0% 8.2% 17.2% 52.2% 16.0% 5.5% 0.0% [libx264 @ 05A2C400] mb I I16..4: 5.4% 75.3% 19.3% [libx264 @ 05A2C400] mb P I16..4: 1.3% 16.5% 2.2% P16..4: 36.3% 28.6% 12.7% 1.8% 0.2% skip: 0.4% [libx264 @ 05A2C400] mb B I16..4: 0.4% 3.8% 0.3% B16..8: 40.0% 18.4% 4.7% direct:18.5% skip:13.9% L0:45.4% L1:38.1% BI:16.5% [libx264 @ 05A2C400] final ratefactor: 20.35 [libx264 @ 05A2C400] 8x8 transform intra:83.1% inter:68.5% [libx264 @ 05A2C400] direct mvs spatial:99.2% temporal:0.8% [libx264 @ 05A2C400] coded y,uvDC,uvAC intra: 64.9% 83.4% 49.2% inter: 49.0% 50.4% 4.4% [libx264 @ 05A2C400] i16 v,h,dc,p: 25% 22% 27% 26% [libx264 @ 05A2C400] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 10% 7% 23% 9% 10% 10% 10%10% 13% [libx264 @ 05A2C400] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 12% 11% 13% 9% 12% 11% 10% 9% 12% [libx264 @ 05A2C400] i8c dc,h,v,p: 42% 28% 16% 14% [libx264 @ 05A2C400] Weighted P-Frames: Y:18.4% UV:7.9% [libx264 @ 05A2C400] ref P L0: 29.1% 11.3% 15.7% 7.3% 6.9% 4.9% 5.1% 3.4%3.9% 2.7% 2.8% 1.8% 1.7% 1.2% 1.4% 0.9% [libx264 @ 05A2C400] ref B L0: 68.8% 11.4% 5.5% 2.9% 2.3% 1.9% 1.5% 1.1%1.1% 1.0% 0.9% 0.7% 0.5% 0.3% 0.1% [libx264 @ 05A2C400] ref B L1: 91.9% 8.1% [libx264 @ 05A2C400] kb/s:2055.88 As far as I'm concerned it doesn't look that bad to me.

    Read the article

  • Convert XML namespace prefixes with C#?

    - by jrista
    I have run into an exasperating problem getting a Java service client to communicate successfully with a WCF service. I have overcome many hurdles, and I believe that this is my last one. The problem boils down to how Java Axis + WSS4J seem to handle xml namespaces. The Java platform seem to be very rigid in what they expect for xml namespace prefixes, and as such, do not understand the WCF reply messages. My problem in a nutshell is as follows. I have an xml response similar to the following from my WCF service: <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <s:Header> <a:Action s:mustUnderstand="1" u:Id="_3">http://tempuri.org/IProcessor/DoProcessingResponse</a:Action> <h:CorrelationID xmlns:h="http://tempuri.org/">1234</h:CorrelationID> <a:RelatesTo u:Id="_4">uuid:40f800a0-9613-4f4a-96c5-b9fd98085deb</a:RelatesTo> <o:Security s:mustUnderstand="1" xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <!-- WS-Security header stuff --> </o:Security> </s:Header> <s:Body u:Id="_1"> <e:EncryptedData Id="_2" Type="http://www.w3.org/2001/04/xmlenc#Content" xmlns:e="http://www.w3.org/2001/04/xmlenc#"> <e:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/> <e:CipherData> <e:CipherValue>NfA6XunmyLlT2ucA+5QneoawHm+imcaCltDAJC1mRZOSxoB6YGpDLY1FyVykPbPGDoFGUESLsmvvbD62sNnRrgE+AuKPo+1CD3DF4LfurRcEv9A50ba9V+ViqlrhydhK</e:CipherValue> </e:CipherData> </e:EncryptedData> </s:Body> </s:Envelope> This response uses simple one-character namespace prefixes for most things, such as 's' for SOAP Envelope, 'a' for WS-Addressing, 'o' for 'WS-Security', etc. The Java client, namely WSS4J, seems to expect the following: <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:wsa="http://www.w3.org/2005/08/addressing" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <soap:Header> <wsa:Action soap:mustUnderstand="1" wsu:Id="_3">http://tempuri.org/IProcessor/DoProcessingResponse</wsa:Action> <h:CorrelationID xmlns:h="http://tempuri.org/">1234</h:CorrelationID> <wsa:RelatesTo wsu:Id="_4">uuid:40f800a0-9613-4f4a-96c5-b9fd98085deb</a:RelatesTo> <wsse:Security soap:mustUnderstand="1" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <!-- WS-Security header stuff --> </wsse:Security> </soap:Header> <soap:Body u:Id="_1"> <xenc:EncryptedData Id="_2" Type="http://www.w3.org/2001/04/xmlenc#Content" xmlns:xenc="http://www.w3.org/2001/04/xmlenc#"> <xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/> <xenc:CipherData> <xenc:CipherValue>NfA6XunmyLlT2ucA+5QneoawHm+imcaCltDAJC1mRZOSxoB6YGpDLY1FyVykPbPGDoFGUESLsmvvbD62sNnRrgE+AuKPo+1CD3DF4LfurRcEv9A50ba9V+ViqlrhydhK</xenc:CipherValue> </xenc:CipherData> </xenc:EncryptedData> </soap:Body> </soap:Envelope> Upon receipt of my response message, the Java client and WSS4J seem to want to look up elements by their own internal xml aliases, such as 'wsa' for WS-Addressing, and 'wsse' for WS-Security Extensions. Since neither of those namespaces are present in the actual response xml, exceptions are thrown. I am wondering if there is any simple way to transform an xml document from one set of namespaces to another set using C#, .NET, and the System.Xml namespace. I've poked around with XmlNamespaceManager a bit, but it does not seem to fully support what I need...or at least, I have been unable to find any really useful examples, and am not fully sure how it works. I am trying to avoid having to write some heavy-duty process to handle this manually myself, as I do not want to drastically impact the performance of our services when called by a Java Axis/WSS4J client.

    Read the article

< Previous Page | 233 234 235 236 237 238 239 240 241 242 243  | Next Page >