Search Results

Search found 20299 results on 812 pages for 'git log'.

Page 36/812 | < Previous Page | 32 33 34 35 36 37 38 39 40 41 42 43  | Next Page >

  • Using R to Analyze G1GC Log Files

    - by user12620111
    Using R to Analyze G1GC Log Files body, td { font-family: sans-serif; background-color: white; font-size: 12px; margin: 8px; } tt, code, pre { font-family: 'DejaVu Sans Mono', 'Droid Sans Mono', 'Lucida Console', Consolas, Monaco, monospace; } h1 { font-size:2.2em; } h2 { font-size:1.8em; } h3 { font-size:1.4em; } h4 { font-size:1.0em; } h5 { font-size:0.9em; } h6 { font-size:0.8em; } a:visited { color: rgb(50%, 0%, 50%); } pre { margin-top: 0; max-width: 95%; border: 1px solid #ccc; white-space: pre-wrap; } pre code { display: block; padding: 0.5em; } code.r, code.cpp { background-color: #F8F8F8; } table, td, th { border: none; } blockquote { color:#666666; margin:0; padding-left: 1em; border-left: 0.5em #EEE solid; } hr { height: 0px; border-bottom: none; border-top-width: thin; border-top-style: dotted; border-top-color: #999999; } @media print { * { background: transparent !important; color: black !important; filter:none !important; -ms-filter: none !important; } body { font-size:12pt; max-width:100%; } a, a:visited { text-decoration: underline; } hr { visibility: hidden; page-break-before: always; } pre, blockquote { padding-right: 1em; page-break-inside: avoid; } tr, img { page-break-inside: avoid; } img { max-width: 100% !important; } @page :left { margin: 15mm 20mm 15mm 10mm; } @page :right { margin: 15mm 10mm 15mm 20mm; } p, h2, h3 { orphans: 3; widows: 3; } h2, h3 { page-break-after: avoid; } } pre .operator, pre .paren { color: rgb(104, 118, 135) } pre .literal { color: rgb(88, 72, 246) } pre .number { color: rgb(0, 0, 205); } pre .comment { color: rgb(76, 136, 107); } pre .keyword { color: rgb(0, 0, 255); } pre .identifier { color: rgb(0, 0, 0); } pre .string { color: rgb(3, 106, 7); } var hljs=new function(){function m(p){return p.replace(/&/gm,"&").replace(/"}while(y.length||w.length){var v=u().splice(0,1)[0];z+=m(x.substr(q,v.offset-q));q=v.offset;if(v.event=="start"){z+=t(v.node);s.push(v.node)}else{if(v.event=="stop"){var p,r=s.length;do{r--;p=s[r];z+=("")}while(p!=v.node);s.splice(r,1);while(r'+M[0]+""}else{r+=M[0]}O=P.lR.lastIndex;M=P.lR.exec(L)}return r+L.substr(O,L.length-O)}function J(L,M){if(M.sL&&e[M.sL]){var r=d(M.sL,L);x+=r.keyword_count;return r.value}else{return F(L,M)}}function I(M,r){var L=M.cN?'':"";if(M.rB){y+=L;M.buffer=""}else{if(M.eB){y+=m(r)+L;M.buffer=""}else{y+=L;M.buffer=r}}D.push(M);A+=M.r}function G(N,M,Q){var R=D[D.length-1];if(Q){y+=J(R.buffer+N,R);return false}var P=q(M,R);if(P){y+=J(R.buffer+N,R);I(P,M);return P.rB}var L=v(D.length-1,M);if(L){var O=R.cN?"":"";if(R.rE){y+=J(R.buffer+N,R)+O}else{if(R.eE){y+=J(R.buffer+N,R)+O+m(M)}else{y+=J(R.buffer+N+M,R)+O}}while(L1){O=D[D.length-2].cN?"":"";y+=O;L--;D.length--}var r=D[D.length-1];D.length--;D[D.length-1].buffer="";if(r.starts){I(r.starts,"")}return R.rE}if(w(M,R)){throw"Illegal"}}var E=e[B];var D=[E.dM];var A=0;var x=0;var y="";try{var s,u=0;E.dM.buffer="";do{s=p(C,u);var t=G(s[0],s[1],s[2]);u+=s[0].length;if(!t){u+=s[1].length}}while(!s[2]);if(D.length1){throw"Illegal"}return{r:A,keyword_count:x,value:y}}catch(H){if(H=="Illegal"){return{r:0,keyword_count:0,value:m(C)}}else{throw H}}}function g(t){var p={keyword_count:0,r:0,value:m(t)};var r=p;for(var q in e){if(!e.hasOwnProperty(q)){continue}var s=d(q,t);s.language=q;if(s.keyword_count+s.rr.keyword_count+r.r){r=s}if(s.keyword_count+s.rp.keyword_count+p.r){r=p;p=s}}if(r.language){p.second_best=r}return p}function i(r,q,p){if(q){r=r.replace(/^((]+|\t)+)/gm,function(t,w,v,u){return w.replace(/\t/g,q)})}if(p){r=r.replace(/\n/g,"")}return r}function n(t,w,r){var x=h(t,r);var v=a(t);var y,s;if(v){y=d(v,x)}else{return}var q=c(t);if(q.length){s=document.createElement("pre");s.innerHTML=y.value;y.value=k(q,c(s),x)}y.value=i(y.value,w,r);var u=t.className;if(!u.match("(\\s|^)(language-)?"+v+"(\\s|$)")){u=u?(u+" "+v):v}if(/MSIE [678]/.test(navigator.userAgent)&&t.tagName=="CODE"&&t.parentNode.tagName=="PRE"){s=t.parentNode;var p=document.createElement("div");p.innerHTML=""+y.value+"";t=p.firstChild.firstChild;p.firstChild.cN=s.cN;s.parentNode.replaceChild(p.firstChild,s)}else{t.innerHTML=y.value}t.className=u;t.result={language:v,kw:y.keyword_count,re:y.r};if(y.second_best){t.second_best={language:y.second_best.language,kw:y.second_best.keyword_count,re:y.second_best.r}}}function o(){if(o.called){return}o.called=true;var r=document.getElementsByTagName("pre");for(var p=0;p|=||=||=|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.ER="(?![\\s\\S])";this.BE={b:"\\\\.",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE],r:0};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE],r:0};this.CLCM={cN:"comment",b:"//",e:"$"};this.CBLCLM={cN:"comment",b:"/\\*",e:"\\*/"};this.HCM={cN:"comment",b:"#",e:"$"};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.inherit=function(r,s){var p={};for(var q in r){p[q]=r[q]}if(s){for(var q in s){p[q]=s[q]}}return p}}();hljs.LANGUAGES.cpp=function(){var a={keyword:{"false":1,"int":1,"float":1,"while":1,"private":1,"char":1,"catch":1,"export":1,virtual:1,operator:2,sizeof:2,dynamic_cast:2,typedef:2,const_cast:2,"const":1,struct:1,"for":1,static_cast:2,union:1,namespace:1,unsigned:1,"long":1,"throw":1,"volatile":2,"static":1,"protected":1,bool:1,template:1,mutable:1,"if":1,"public":1,friend:2,"do":1,"return":1,"goto":1,auto:1,"void":2,"enum":1,"else":1,"break":1,"new":1,extern:1,using:1,"true":1,"class":1,asm:1,"case":1,typeid:1,"short":1,reinterpret_cast:2,"default":1,"double":1,register:1,explicit:1,signed:1,typename:1,"try":1,"this":1,"switch":1,"continue":1,wchar_t:1,inline:1,"delete":1,alignof:1,char16_t:1,char32_t:1,constexpr:1,decltype:1,noexcept:1,nullptr:1,static_assert:1,thread_local:1,restrict:1,_Bool:1,complex:1},built_in:{std:1,string:1,cin:1,cout:1,cerr:1,clog:1,stringstream:1,istringstream:1,ostringstream:1,auto_ptr:1,deque:1,list:1,queue:1,stack:1,vector:1,map:1,set:1,bitset:1,multiset:1,multimap:1,unordered_set:1,unordered_map:1,unordered_multiset:1,unordered_multimap:1,array:1,shared_ptr:1}};return{dM:{k:a,i:"",k:a,r:10,c:["self"]}]}}}();hljs.LANGUAGES.r={dM:{c:[hljs.HCM,{cN:"number",b:"\\b0[xX][0-9a-fA-F]+[Li]?\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\b\\d+(?:[eE][+\\-]?\\d*)?L\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\b\\d+\\.(?!\\d)(?:i\\b)?",e:hljs.IMMEDIATE_RE,r:1},{cN:"number",b:"\\b\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"keyword",b:"(?:tryCatch|library|setGeneric|setGroupGeneric)\\b",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\.\\.\\.",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\.\\.\\d+(?![\\w.])",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\b(?:function)",e:hljs.IMMEDIATE_RE,r:2},{cN:"keyword",b:"(?:if|in|break|next|repeat|else|for|return|switch|while|try|stop|warning|require|attach|detach|source|setMethod|setClass)\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"literal",b:"(?:NA|NA_integer_|NA_real_|NA_character_|NA_complex_)\\b",e:hljs.IMMEDIATE_RE,r:10},{cN:"literal",b:"(?:NULL|TRUE|FALSE|T|F|Inf|NaN)\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"identifier",b:"[a-zA-Z.][a-zA-Z0-9._]*\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"operator",b:"|=||   Using R to Analyze G1GC Log Files   Using R to Analyze G1GC Log Files Introduction Working in Oracle Platform Integration gives an engineer opportunities to work on a wide array of technologies. My team’s goal is to make Oracle applications run best on the Solaris/SPARC platform. When looking for bottlenecks in a modern applications, one needs to be aware of not only how the CPUs and operating system are executing, but also network, storage, and in some cases, the Java Virtual Machine. I was recently presented with about 1.5 GB of Java Garbage First Garbage Collector log file data. If you’re not familiar with the subject, you might want to review Garbage First Garbage Collector Tuning by Monica Beckwith. The customer had been running Java HotSpot 1.6.0_31 to host a web application server. I was told that the Solaris/SPARC server was running a Java process launched using a commmand line that included the following flags: -d64 -Xms9g -Xmx9g -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:InitiatingHeapOccupancyPercent=80 -XX:PermSize=256m -XX:MaxPermSize=256m -XX:+PrintGC -XX:+PrintGCTimeStamps -XX:+PrintHeapAtGC -XX:+PrintGCDateStamps -XX:+PrintFlagsFinal -XX:+DisableExplicitGC -XX:+UnlockExperimentalVMOptions -XX:ParallelGCThreads=8 Several sources on the internet indicate that if I were to print out the 1.5 GB of log files, it would require enough paper to fill the bed of a pick up truck. Of course, it would be fruitless to try to scan the log files by hand. Tools will be required to summarize the contents of the log files. Others have encountered large Java garbage collection log files. There are existing tools to analyze the log files: IBM’s GC toolkit The chewiebug GCViewer gchisto HPjmeter Instead of using one of the other tools listed, I decide to parse the log files with standard Unix tools, and analyze the data with R. Data Cleansing The log files arrived in two different formats. I guess that the difference is that one set of log files was generated using a more verbose option, maybe -XX:+PrintHeapAtGC, and the other set of log files was generated without that option. Format 1 In some of the log files, the log files with the less verbose format, a single trace, i.e. the report of a singe garbage collection event, looks like this: {Heap before GC invocations=12280 (full 61): garbage-first heap total 9437184K, used 7499918K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) region size 4096K, 1 young (4096K), 0 survivors (0K) compacting perm gen total 262144K, used 144077K [0xffffffff40000000, 0xffffffff50000000, 0xffffffff50000000) the space 262144K, 54% used [0xffffffff40000000, 0xffffffff48cb3758, 0xffffffff48cb3800, 0xffffffff50000000) No shared spaces configured. 2014-05-14T07:24:00.988-0700: 60586.353: [GC pause (young) 7324M->7320M(9216M), 0.1567265 secs] Heap after GC invocations=12281 (full 61): garbage-first heap total 9437184K, used 7496533K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) region size 4096K, 0 young (0K), 0 survivors (0K) compacting perm gen total 262144K, used 144077K [0xffffffff40000000, 0xffffffff50000000, 0xffffffff50000000) the space 262144K, 54% used [0xffffffff40000000, 0xffffffff48cb3758, 0xffffffff48cb3800, 0xffffffff50000000) No shared spaces configured. } A simple grep can be used to extract a summary: $ grep "\[ GC pause (young" g1gc.log 2014-05-13T13:24:35.091-0700: 3.109: [GC pause (young) 20M->5029K(9216M), 0.0146328 secs] 2014-05-13T13:24:35.440-0700: 3.459: [GC pause (young) 9125K->6077K(9216M), 0.0086723 secs] 2014-05-13T13:24:37.581-0700: 5.599: [GC pause (young) 25M->8470K(9216M), 0.0203820 secs] 2014-05-13T13:24:42.686-0700: 10.704: [GC pause (young) 44M->15M(9216M), 0.0288848 secs] 2014-05-13T13:24:48.941-0700: 16.958: [GC pause (young) 51M->20M(9216M), 0.0491244 secs] 2014-05-13T13:24:56.049-0700: 24.066: [GC pause (young) 92M->26M(9216M), 0.0525368 secs] 2014-05-13T13:25:34.368-0700: 62.383: [GC pause (young) 602M->68M(9216M), 0.1721173 secs] But that format wasn't easily read into R, so I needed to be a bit more tricky. I used the following Unix command to create a summary file that was easy for R to read. $ echo "SecondsSinceLaunch BeforeSize AfterSize TotalSize RealTime" $ grep "\[GC pause (young" g1gc.log | grep -v mark | sed -e 's/[A-SU-z\(\),]/ /g' -e 's/->/ /' -e 's/: / /g' | more SecondsSinceLaunch BeforeSize AfterSize TotalSize RealTime 2014-05-13T13:24:35.091-0700 3.109 20 5029 9216 0.0146328 2014-05-13T13:24:35.440-0700 3.459 9125 6077 9216 0.0086723 2014-05-13T13:24:37.581-0700 5.599 25 8470 9216 0.0203820 2014-05-13T13:24:42.686-0700 10.704 44 15 9216 0.0288848 2014-05-13T13:24:48.941-0700 16.958 51 20 9216 0.0491244 2014-05-13T13:24:56.049-0700 24.066 92 26 9216 0.0525368 2014-05-13T13:25:34.368-0700 62.383 602 68 9216 0.1721173 Format 2 In some of the log files, the log files with the more verbose format, a single trace, i.e. the report of a singe garbage collection event, was more complicated than Format 1. Here is a text file with an example of a single G1GC trace in the second format. As you can see, it is quite complicated. It is nice that there is so much information available, but the level of detail can be overwhelming. I wrote this awk script (download) to summarize each trace on a single line. #!/usr/bin/env awk -f BEGIN { printf("SecondsSinceLaunch IncrementalCount FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize\n") } ###################### # Save count data from lines that are at the start of each G1GC trace. # Each trace starts out like this: # {Heap before GC invocations=14 (full 0): # garbage-first heap total 9437184K, used 325496K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) ###################### /{Heap.*full/{ gsub ( "\\)" , "" ); nf=split($0,a,"="); split(a[2],b," "); getline; if ( match($0, "first") ) { G1GC=1; IncrementalCount=b[1]; FullCount=substr( b[3], 1, length(b[3])-1 ); } else { G1GC=0; } } ###################### # Pull out time stamps that are in lines with this format: # 2014-05-12T14:02:06.025-0700: 94.312: [GC pause (young), 0.08870154 secs] ###################### /GC pause/ { DateTime=$1; SecondsSinceLaunch=substr($2, 1, length($2)-1); } ###################### # Heap sizes are in lines that look like this: # [ 4842M->4838M(9216M)] ###################### /\[ .*]$/ { gsub ( "\\[" , "" ); gsub ( "\ \]" , "" ); gsub ( "->" , " " ); gsub ( "\\( " , " " ); gsub ( "\ \)" , " " ); split($0,a," "); if ( split(a[1],b,"M") > 1 ) {BeforeSize=b[1]*1024;} if ( split(a[1],b,"K") > 1 ) {BeforeSize=b[1];} if ( split(a[2],b,"M") > 1 ) {AfterSize=b[1]*1024;} if ( split(a[2],b,"K") > 1 ) {AfterSize=b[1];} if ( split(a[3],b,"M") > 1 ) {TotalSize=b[1]*1024;} if ( split(a[3],b,"K") > 1 ) {TotalSize=b[1];} } ###################### # Emit an output line when you find input that looks like this: # [Times: user=1.41 sys=0.08, real=0.24 secs] ###################### /\[Times/ { if (G1GC==1) { gsub ( "," , "" ); split($2,a,"="); UserTime=a[2]; split($3,a,"="); SysTime=a[2]; split($4,a,"="); RealTime=a[2]; print DateTime,SecondsSinceLaunch,IncrementalCount,FullCount,UserTime,SysTime,RealTime,BeforeSize,AfterSize,TotalSize; G1GC=0; } } The resulting summary is about 25X smaller that the original file, but still difficult for a human to digest. SecondsSinceLaunch IncrementalCount FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ... 2014-05-12T18:36:34.669-0700: 3985.744 561 0 0.57 0.06 0.16 1724416 1720320 9437184 2014-05-12T18:36:34.839-0700: 3985.914 562 0 0.51 0.06 0.19 1724416 1720320 9437184 2014-05-12T18:36:35.069-0700: 3986.144 563 0 0.60 0.04 0.27 1724416 1721344 9437184 2014-05-12T18:36:35.354-0700: 3986.429 564 0 0.33 0.04 0.09 1725440 1722368 9437184 2014-05-12T18:36:35.545-0700: 3986.620 565 0 0.58 0.04 0.17 1726464 1722368 9437184 2014-05-12T18:36:35.726-0700: 3986.801 566 0 0.43 0.05 0.12 1726464 1722368 9437184 2014-05-12T18:36:35.856-0700: 3986.930 567 0 0.30 0.04 0.07 1726464 1723392 9437184 2014-05-12T18:36:35.947-0700: 3987.023 568 0 0.61 0.04 0.26 1727488 1723392 9437184 2014-05-12T18:36:36.228-0700: 3987.302 569 0 0.46 0.04 0.16 1731584 1724416 9437184 Reading the Data into R Once the GC log data had been cleansed, either by processing the first format with the shell script, or by processing the second format with the awk script, it was easy to read the data into R. g1gc.df = read.csv("summary.txt", row.names = NULL, stringsAsFactors=FALSE,sep="") str(g1gc.df) ## 'data.frame': 8307 obs. of 10 variables: ## $ row.names : chr "2014-05-12T14:00:32.868-0700:" "2014-05-12T14:00:33.179-0700:" "2014-05-12T14:00:33.677-0700:" "2014-05-12T14:00:35.538-0700:" ... ## $ SecondsSinceLaunch: num 1.16 1.47 1.97 3.83 6.1 ... ## $ IncrementalCount : int 0 1 2 3 4 5 6 7 8 9 ... ## $ FullCount : int 0 0 0 0 0 0 0 0 0 0 ... ## $ UserTime : num 0.11 0.05 0.04 0.21 0.08 0.26 0.31 0.33 0.34 0.56 ... ## $ SysTime : num 0.04 0.01 0.01 0.05 0.01 0.06 0.07 0.06 0.07 0.09 ... ## $ RealTime : num 0.02 0.02 0.01 0.04 0.02 0.04 0.05 0.04 0.04 0.06 ... ## $ BeforeSize : int 8192 5496 5768 22528 24576 43008 34816 53248 55296 93184 ... ## $ AfterSize : int 1400 1672 2557 4907 7072 14336 16384 18432 19456 21504 ... ## $ TotalSize : int 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 ... head(g1gc.df) ## row.names SecondsSinceLaunch IncrementalCount ## 1 2014-05-12T14:00:32.868-0700: 1.161 0 ## 2 2014-05-12T14:00:33.179-0700: 1.472 1 ## 3 2014-05-12T14:00:33.677-0700: 1.969 2 ## 4 2014-05-12T14:00:35.538-0700: 3.830 3 ## 5 2014-05-12T14:00:37.811-0700: 6.103 4 ## 6 2014-05-12T14:00:41.428-0700: 9.720 5 ## FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ## 1 0 0.11 0.04 0.02 8192 1400 9437184 ## 2 0 0.05 0.01 0.02 5496 1672 9437184 ## 3 0 0.04 0.01 0.01 5768 2557 9437184 ## 4 0 0.21 0.05 0.04 22528 4907 9437184 ## 5 0 0.08 0.01 0.02 24576 7072 9437184 ## 6 0 0.26 0.06 0.04 43008 14336 9437184 Basic Statistics Once the data has been read into R, simple statistics are very easy to generate. All of the numbers from high school statistics are available via simple commands. For example, generate a summary of every column: summary(g1gc.df) ## row.names SecondsSinceLaunch IncrementalCount FullCount ## Length:8307 Min. : 1 Min. : 0 Min. : 0.0 ## Class :character 1st Qu.: 9977 1st Qu.:2048 1st Qu.: 0.0 ## Mode :character Median :12855 Median :4136 Median : 12.0 ## Mean :12527 Mean :4156 Mean : 31.6 ## 3rd Qu.:15758 3rd Qu.:6262 3rd Qu.: 61.0 ## Max. :55484 Max. :8391 Max. :113.0 ## UserTime SysTime RealTime BeforeSize ## Min. :0.040 Min. :0.0000 Min. : 0.0 Min. : 5476 ## 1st Qu.:0.470 1st Qu.:0.0300 1st Qu.: 0.1 1st Qu.:5137920 ## Median :0.620 Median :0.0300 Median : 0.1 Median :6574080 ## Mean :0.751 Mean :0.0355 Mean : 0.3 Mean :5841855 ## 3rd Qu.:0.920 3rd Qu.:0.0400 3rd Qu.: 0.2 3rd Qu.:7084032 ## Max. :3.370 Max. :1.5600 Max. :488.1 Max. :8696832 ## AfterSize TotalSize ## Min. : 1380 Min. :9437184 ## 1st Qu.:5002752 1st Qu.:9437184 ## Median :6559744 Median :9437184 ## Mean :5785454 Mean :9437184 ## 3rd Qu.:7054336 3rd Qu.:9437184 ## Max. :8482816 Max. :9437184 Q: What is the total amount of User CPU time spent in garbage collection? sum(g1gc.df$UserTime) ## [1] 6236 As you can see, less than two hours of CPU time was spent in garbage collection. Is that too much? To find the percentage of time spent in garbage collection, divide the number above by total_elapsed_time*CPU_count. In this case, there are a lot of CPU’s and it turns out the the overall amount of CPU time spent in garbage collection isn’t a problem when viewed in isolation. When calculating rates, i.e. events per unit time, you need to ask yourself if the rate is homogenous across the time period in the log file. Does the log file include spikes of high activity that should be separately analyzed? Averaging in data from nights and weekends with data from business hours may alias problems. If you have a reason to suspect that the garbage collection rates include peaks and valleys that need independent analysis, see the “Time Series” section, below. Q: How much garbage is collected on each pass? The amount of heap space that is recovered per GC pass is surprisingly low: At least one collection didn’t recover any data. (“Min.=0”) 25% of the passes recovered 3MB or less. (“1st Qu.=3072”) Half of the GC passes recovered 4MB or less. (“Median=4096”) The average amount recovered was 56MB. (“Mean=56390”) 75% of the passes recovered 36MB or less. (“3rd Qu.=36860”) At least one pass recovered 2GB. (“Max.=2121000”) g1gc.df$Delta = g1gc.df$BeforeSize - g1gc.df$AfterSize summary(g1gc.df$Delta) ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## 0 3070 4100 56400 36900 2120000 Q: What is the maximum User CPU time for a single collection? The worst garbage collection (“Max.”) is many standard deviations away from the mean. The data appears to be right skewed. summary(g1gc.df$UserTime) ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## 0.040 0.470 0.620 0.751 0.920 3.370 sd(g1gc.df$UserTime) ## [1] 0.3966 Basic Graphics Once the data is in R, it is trivial to plot the data with formats including dot plots, line charts, bar charts (simple, stacked, grouped), pie charts, boxplots, scatter plots histograms, and kernel density plots. Histogram of User CPU Time per Collection I don't think that this graph requires any explanation. hist(g1gc.df$UserTime, main="User CPU Time per Collection", xlab="Seconds", ylab="Frequency") Box plot to identify outliers When the initial data is viewed with a box plot, you can see the one crazy outlier in the real time per GC. Save this data point for future analysis and drop the outlier so that it’s not throwing off our statistics. Now the box plot shows many outliers, which will be examined later, using times series analysis. Notice that the scale of the x-axis changes drastically once the crazy outlier is removed. par(mfrow=c(2,1)) boxplot(g1gc.df$UserTime,g1gc.df$SysTime,g1gc.df$RealTime, main="Box Plot of Time per GC\n(dominated by a crazy outlier)", names=c("usr","sys","elapsed"), xlab="Seconds per GC", ylab="Time (Seconds)", horizontal = TRUE, outcol="red") crazy.outlier.df=g1gc.df[g1gc.df$RealTime > 400,] g1gc.df=g1gc.df[g1gc.df$RealTime < 400,] boxplot(g1gc.df$UserTime,g1gc.df$SysTime,g1gc.df$RealTime, main="Box Plot of Time per GC\n(crazy outlier excluded)", names=c("usr","sys","elapsed"), xlab="Seconds per GC", ylab="Time (Seconds)", horizontal = TRUE, outcol="red") box(which = "outer", lty = "solid") Here is the crazy outlier for future analysis: crazy.outlier.df ## row.names SecondsSinceLaunch IncrementalCount ## 8233 2014-05-12T23:15:43.903-0700: 20741 8316 ## FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ## 8233 112 0.55 0.42 488.1 8381440 8235008 9437184 ## Delta ## 8233 146432 R Time Series Data To analyze the garbage collection as a time series, I’ll use Z’s Ordered Observations (zoo). “zoo is the creator for an S3 class of indexed totally ordered observations which includes irregular time series.” require(zoo) ## Loading required package: zoo ## ## Attaching package: 'zoo' ## ## The following objects are masked from 'package:base': ## ## as.Date, as.Date.numeric head(g1gc.df[,1]) ## [1] "2014-05-12T14:00:32.868-0700:" "2014-05-12T14:00:33.179-0700:" ## [3] "2014-05-12T14:00:33.677-0700:" "2014-05-12T14:00:35.538-0700:" ## [5] "2014-05-12T14:00:37.811-0700:" "2014-05-12T14:00:41.428-0700:" options("digits.secs"=3) times=as.POSIXct( g1gc.df[,1], format="%Y-%m-%dT%H:%M:%OS%z:") g1gc.z = zoo(g1gc.df[,-c(1)], order.by=times) head(g1gc.z) ## SecondsSinceLaunch IncrementalCount FullCount ## 2014-05-12 17:00:32.868 1.161 0 0 ## 2014-05-12 17:00:33.178 1.472 1 0 ## 2014-05-12 17:00:33.677 1.969 2 0 ## 2014-05-12 17:00:35.538 3.830 3 0 ## 2014-05-12 17:00:37.811 6.103 4 0 ## 2014-05-12 17:00:41.427 9.720 5 0 ## UserTime SysTime RealTime BeforeSize AfterSize ## 2014-05-12 17:00:32.868 0.11 0.04 0.02 8192 1400 ## 2014-05-12 17:00:33.178 0.05 0.01 0.02 5496 1672 ## 2014-05-12 17:00:33.677 0.04 0.01 0.01 5768 2557 ## 2014-05-12 17:00:35.538 0.21 0.05 0.04 22528 4907 ## 2014-05-12 17:00:37.811 0.08 0.01 0.02 24576 7072 ## 2014-05-12 17:00:41.427 0.26 0.06 0.04 43008 14336 ## TotalSize Delta ## 2014-05-12 17:00:32.868 9437184 6792 ## 2014-05-12 17:00:33.178 9437184 3824 ## 2014-05-12 17:00:33.677 9437184 3211 ## 2014-05-12 17:00:35.538 9437184 17621 ## 2014-05-12 17:00:37.811 9437184 17504 ## 2014-05-12 17:00:41.427 9437184 28672 Example of Two Benchmark Runs in One Log File The data in the following graph is from a different log file, not the one of primary interest to this article. I’m including this image because it is an example of idle periods followed by busy periods. It would be uninteresting to average the rate of garbage collection over the entire log file period. More interesting would be the rate of garbage collect in the two busy periods. Are they the same or different? Your production data may be similar, for example, bursts when employees return from lunch and idle times on weekend evenings, etc. Once the data is in an R Time Series, you can analyze isolated time windows. Clipping the Time Series data Flashing back to our test case… Viewing the data as a time series is interesting. You can see that the work intensive time period is between 9:00 PM and 3:00 AM. Lets clip the data to the interesting period:     par(mfrow=c(2,1)) plot(g1gc.z$UserTime, type="h", main="User Time per GC\nTime: Complete Log File", xlab="Time of Day", ylab="CPU Seconds per GC", col="#1b9e77") clipped.g1gc.z=window(g1gc.z, start=as.POSIXct("2014-05-12 21:00:00"), end=as.POSIXct("2014-05-13 03:00:00")) plot(clipped.g1gc.z$UserTime, type="h", main="User Time per GC\nTime: Limited to Benchmark Execution", xlab="Time of Day", ylab="CPU Seconds per GC", col="#1b9e77") box(which = "outer", lty = "solid") Cumulative Incremental and Full GC count Here is the cumulative incremental and full GC count. When the line is very steep, it indicates that the GCs are repeating very quickly. Notice that the scale on the Y axis is different for full vs. incremental. plot(clipped.g1gc.z[,c(2:3)], main="Cumulative Incremental and Full GC count", xlab="Time of Day", col="#1b9e77") GC Analysis of Benchmark Execution using Time Series data In the following series of 3 graphs: The “After Size” show the amount of heap space in use after each garbage collection. Many Java objects are still referenced, i.e. alive, during each garbage collection. This may indicate that the application has a memory leak, or may indicate that the application has a very large memory footprint. Typically, an application's memory footprint plateau's in the early stage of execution. One would expect this graph to have a flat top. The steep decline in the heap space may indicate that the application crashed after 2:00. The second graph shows that the outliers in real execution time, discussed above, occur near 2:00. when the Java heap seems to be quite full. The third graph shows that Full GCs are infrequent during the first few hours of execution. The rate of Full GC's, (the slope of the cummulative Full GC line), changes near midnight.   plot(clipped.g1gc.z[,c("AfterSize","RealTime","FullCount")], xlab="Time of Day", col=c("#1b9e77","red","#1b9e77")) GC Analysis of heap recovered Each GC trace includes the amount of heap space in use before and after the individual GC event. During garbage coolection, unreferenced objects are identified, the space holding the unreferenced objects is freed, and thus, the difference in before and after usage indicates how much space has been freed. The following box plot and bar chart both demonstrate the same point - the amount of heap space freed per garbage colloection is surprisingly low. par(mfrow=c(2,1)) boxplot(as.vector(clipped.g1gc.z$Delta), main="Amount of Heap Recovered per GC Pass", xlab="Size in KB", horizontal = TRUE, col="red") hist(as.vector(clipped.g1gc.z$Delta), main="Amount of Heap Recovered per GC Pass", xlab="Size in KB", breaks=100, col="red") box(which = "outer", lty = "solid") This graph is the most interesting. The dark blue area shows how much heap is occupied by referenced Java objects. This represents memory that holds live data. The red fringe at the top shows how much data was recovered after each garbage collection. barplot(clipped.g1gc.z[,c("AfterSize","Delta")], col=c("#7570b3","#e7298a"), xlab="Time of Day", border=NA) legend("topleft", c("Live Objects","Heap Recovered on GC"), fill=c("#7570b3","#e7298a")) box(which = "outer", lty = "solid") When I discuss the data in the log files with the customer, I will ask for an explaination for the large amount of referenced data resident in the Java heap. There are two are posibilities: There is a memory leak and the amount of space required to hold referenced objects will continue to grow, limited only by the maximum heap size. After the maximum heap size is reached, the JVM will throw an “Out of Memory” exception every time that the application tries to allocate a new object. If this is the case, the aplication needs to be debugged to identify why old objects are referenced when they are no longer needed. The application has a legitimate requirement to keep a large amount of data in memory. The customer may want to further increase the maximum heap size. Another possible solution would be to partition the application across multiple cluster nodes, where each node has responsibility for managing a unique subset of the data. Conclusion In conclusion, R is a very powerful tool for the analysis of Java garbage collection log files. The primary difficulty is data cleansing so that information can be read into an R data frame. Once the data has been read into R, a rich set of tools may be used for thorough evaluation.

    Read the article

  • Configure (or mimic) svn:externals to include code from Github in a svn-hosted project

    - by Dylan Beattie
    We use Subversion locally, and we're working on a project that uses a fork of Fluent NHibernate, which is hosted on Github. I'd like it set up so that a single svn checkout will retrieve everything necessary to build the project, but maintain the ability to fetch HEAD updates from github. Is there any way I can pull code from the Git repository as though it was an svn:external dependency? Can I just check the .git folder into our Subversion repository and just run git fetch when I need to, then svn commit the results?

    Read the article

  • How to understand these lines in apache.log

    - by chefnelone
    I just get 19000 lines like these in the apache.log file for my site example.com. My hosting provider shut down the hosting and notified me that I need to avoid to activate my hosting again. I understand that I got a big amount of visits but I don't know how to avoid this. 88.190.47.233 - - [27/Jun/2013:09:51:34 +0200] "GET / HTTP/1.0" 403 389 "http://example.com/" "Opera/9.80 (Windows NT 6.1; U; ru) Presto/2.10.289 Version/12.02" 417 88.190.47.233 - - [27/Jun/2013:09:51:34 +0200] "GET / HTTP/1.0" 403 389 "http://example.com/" "Opera/9.80 (Windows NT 6.1; U; ru) Presto/2.10.289 Version/12.02" 417 175.44.28.155 - - [27/Jun/2013:09:51:44 +0200] "GET /en/user/register HTTP/1.1" 403 503 "http://example.com/en/" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1;)" 248 175.44.29.140 - - [27/Jun/2013:09:53:19 +0200] "GET /en/node/1557?page=2 HTTP/1.0" 403 517 "http://example.com/en/node/1557?page=2" "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11" 491 These are the lines from apache-error.log. There are more than 35000 lines like this. [Thu Jun 27 09:50:58 2013] [error] [client 5.39.19.183] (13)Permission denied: access to /index.php denied, referer: http://example.com/ [Thu Jun 27 09:51:03 2013] [error] [client 125.112.29.105] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/node/1557?page=1#comment-701 [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/node/1557?page=1#comment-701 [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.html denied, referer: http://example.com/en/node/1557?page=1#comment-701 [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.htm denied, referer: http://example.com/en/node/1557?page=1#comment-701 [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.php denied, referer: http://example.com/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.html denied, referer: http://example.com/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.htm denied, referer: http://example.com/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.php denied, referer: http://example.com/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.html denied, referer: http://example.com/ [Thu Jun 27 09:51:34 2013] [error] [client 88.190.47.233] (13)Permission denied: access to /index.htm denied, referer: http://example.com/ [Thu Jun 27 09:51:44 2013] [error] [client 175.44.28.155] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/ [Thu Jun 27 09:53:19 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/node/1557?page=2 [Thu Jun 27 09:53:20 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/node/1557?page=2 [Thu Jun 27 09:53:20 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.html denied, referer: http://example.com/en/node/1557?page=2 [Thu Jun 27 09:53:20 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.htm denied, referer: http://example.com/en/node/1557?page=2 [Thu Jun 27 09:53:21 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.php denied, referer: http://example.com/ [Thu Jun 27 09:53:21 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.html denied, referer: http://example.com/ [Thu Jun 27 09:53:21 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.htm denied, referer: http://example.com/ [Thu Jun 27 09:53:22 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.php denied, referer: http://example.com/ [Thu Jun 27 09:53:22 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.html denied, referer: http://example.com/ [Thu Jun 27 09:53:22 2013] [error] [client 175.44.29.140] (13)Permission denied: access to /index.htm denied, referer: http://example.com/ [Thu Jun 27 09:56:53 2013] [error] [client 113.246.6.147] (13)Permission denied: access to /index.php denied, referer: http://example.com/en/ [Thu Jun 27 09:58:58 2013] [error] [client 108.62.71.180] (13)Permission denied: access to /index.php denied, referer: http://example.com/

    Read the article

  • Is Version control with GIT useful to work in small projects fon an individual developer? [closed]

    - by chefnelone
    I work as website developer. I develop with Drupal CSM. I have a drupal base installation which has some settings which are sort of default for all my proyects. This drupal installation is my drupal-base folder Every time I start a new project I just duplicate the `drupal-base- folder and start coding the new features I need for the new proyect. The problem is that sometimes I work in more than one projects at the same time and I get a new feature in one of the project that I'd like to commit to my drupal base installation and also to the other projects. Then keeping the sync of all this is nightmare. I thought that Version Control with GIT could help me with this and I went into a tutorial about it. But now I'm not sure if this will be usefull for me. Then my question is: I think that GIT is just usefull for big projects where a team is working all together in the same files. But it is not usefull to work in small and individual projects. Am I right?

    Read the article

  • How to open-source a project whose git repository has copyrighted media in the history?

    - by phyzome
    I want to release an audio fingerprinting software project under a free license, but the repository contains copyrighted audio files. The test cases also currently use these files. How do I release the code to the public with maximum version history but without violating copyright? Details: The code is versioned under git. We will collapse it all back into one branch before release. There are 400 MB of audio data. Some files are free-licensed music from e.g. Jamendo, others are MP3s from our personal collections. No matter what approach we take, we'll always keep an immutable copy of the original repo, so as not to destroy project history. Main question: How to handle the public release? Expunge all history of the files in question from the git repository and release the altered repo. (v64 pointed out a way to do this.) Alternatively, take a snapshot of the current state of the code and don't even bother having a public history of the pre-release code. Side question: How could we have avoided this dilemma in the first place, given that sometimes private code or media is needed for the early stages of a project?

    Read the article

  • Free APress e-book on GIT!

    - by TATWORTH
    Originally posted on: http://geekswithblogs.net/TATWORTH/archive/2013/07/24/free-apress-e-book-on-git.aspxA free e-book in PDF, mobi and ePub formats is available at http://git-scm.com/book"Programmers or project leaders will learn to use Git, the version control system developed by Linus Torvalds for Linux kernel development. You'll discover the world of distributed version control and learn how to build a Git development workflow, with expert guidance from Scott Chacon."

    Read the article

  • GIT repository layout for server with multiple projects

    - by Paul Alexander
    One of the things I like about the way I have Subversion set up is that I can have a single main repository with multiple projects. When I want to work on a project I can check out just that project. Like this \main \ProductA \ProductB \Shared then svn checkout http://.../main/ProductA As a new user to git I want to explore a bit of best practice in the field before committing to a specific workflow. From what I've read so far, git stores everything in a single .git folder at the root of the project tree. So I could do one of two things. Set up a separate project for each Product. Set up a single massive project and store products in sub folders. There are dependencies between the products, so the single massive project seems appropriate. We'll be using a server where all the developers can share their code. I've already got this working over SSH & HTTP and that part I love. However, the repositories in SVN are already many GB in size so dragging around the entire repository on each machine seems like a bad idea - especially since we're billed for excessive network bandwidth. I'd imagine that the Linux kernel project repositories are equally large so there must be a proper way of handling this with Git but I just haven't figured it out yet. Are there any guidelines or best practices for working with very large multi-project repositories?

    Read the article

  • git merge with renamed files

    - by Kevin
    I have a large website that I am moving into a new framework and in the process adding git. The current site doesn't have any version control on it. I started by copying the site into a new git repository. I made a new branch and made all of the changes that were needed to make it work with the new framework. One of those steps was changing the file extension of all of the pages. Now in the time that I have been working on the new site changes have been made to files on the old site. So I switched to master and copied all of those changes in. The problem is when I merge the branch with the new framework back onto master there is a conflict on every file that was changed on the master branch. I wouldn't be to worried about it but there are a couple of hundred files with changes. I have tried git rebase and git rebase --merge with no luck. How can I merge these 2 branches without dealing with every file?

    Read the article

  • Managing large binary files with git

    - by pi
    Hi there. I am looking for opinions of how to handle large binary files on which my source code (web application) is dependent. We are currently discussing several alternatives: Copy the binary files by hand. Pro: Not sure. Contra: I am strongly against this, as it increases the likelihood of errors when setting up a new site/migrating the old one. Builds up another hurdle to take. Manage them all with git. Pro: Removes the possibility to 'forget' to copy a important file Contra: Bloats the repository and decreases flexibility to manage the code-base and checkouts/clones/etc will take quite a while. Separate repositories. Pro: Checking out/cloning the source code is fast as ever, and the images are properly archived in their own repository. Contra: Removes the simpleness of having the one and only git repository on the project. Surely introduces some other things I haven't thought about. What are your experiences/thoughts regarding this? Also: Does anybody have experience with multiple git repositories and managing them in one project? Update: The files are images for a program which generates PDFs with those files in it. The files will not change very often(as in years) but are very relevant to a program. The program will not work without the files. Update2: I found a really nice screencast on using git-submodule at GitCasts.

    Read the article

  • Git graph with ref logs

    - by Francisco Garcia
    I am trying to improve my custom git log format string. I have almost everything I want except the ref names. I can already get a log similar to what I want: > git log --all --source --pretty=oneline --graph * b7c7ad3855b54e94ad7ac03f2d2e5b96d6e5ac1d refs/heads/b1 na | * 695e1482622a79230fa1d83afb8d70e86847334a refs/heads/master Merge branch 'b1' | |\ | |/ |/| * | ec21f370f82096c0208f43b390da234d92e8c74a refs/heads/b1 beta * | c6bc1f55ab3b1bd568493a5de4298dfcb4f66d8d refs/heads/b1 alfa * | 762dd868ae87753afc1cbf9803744c76f9a9e121 refs/heads/b1 tango | * 57fb27bff06ee9bb569f93ba815e9dcd69521c13 refs/heads/master little last post commit |/ | * 8d613d09b43152a7263b6e02d47ec8a4304f54be refs/heads/b3 the other commit | * e1f32b7cb86633351df06e37c2c58ef3f9fafc40 refs/heads/b3 something |/ | * 01b5c6728cf25dd576733211ce75dd3ecc29c7ba refs/heads/b2 this time a I am fighting to get a customized output with my own format string like this: > git log --pretty=format:'%h - %gD %s' --source -g b7c7ad3 - HEAD@{0} na ec21f37 - HEAD@{1} beta 01b5c67 - HEAD@{2} this time a 01b5c67 - HEAD@{3} this time a 695e148 - HEAD@{4} Merge branch 'b1' 57fb27b - HEAD@{5} little last post commit My main problem is that I cannot get the ref names I want. I assume it is one of the %g? format strings, but none of them seem to give me the full ref name. Another problem is that the %g? format strings are empty unless I walk the reflogs (-g). However git refuses to combine --graph with -g How can reproduce the first sample with a format string which I can further customize?

    Read the article

  • Web development scheme for staging and production servers using Git Push

    - by ServAce85
    I am using git to manage a dynamic website (PHP + MySQL) and I want to send my files from my localhost to my staging and development servers in the most efficient and hassle-free way. I am currently convinced that the best way for me to approach this problem is to use this git branching model to organize my local git repo. From there, I will use the release branches to push to my staging server for testing. Once I am happy that the release code works on the staging server, I can then merge with my master branch and push that to my production server. Pushing to Staging Server: As noted in many introductory git posts, I could run into problems pushing into a non-bare repo, so, as suggested in this response, I plan to push the release branch to a bare repo on the server and have a post-receive hook that clones the bare repo to a non-bare repo that also acts as the web-hosted directory. Pushing to Production Server: Here's my newest source of confusion... In the response that I cited above, it made me curious as to why @Paul states that it's a completely different story when pushing to a live, development server. I guess I don't see the problem. Would it be safe and hassle-free to follow the same steps as above, but for the master branch? Where are the potential pit-falls? Config Files: With respect to configuration files that are unique to each environment (.htaccess, config.php, etc), it seems simplest to .gitignore each of those files in their respective repos on their respective servers. Can you see anything immediately wrong with this? Better solutions? Accessing Data: Finally, as I initially stated, the site uses MySQL databases to store data. How would you suggest I access that data (for testing purposes) from the staging server and localhost? I realize that I may have asked way too many questions for a single post, but since they're all related to the best way to set up this development scheme, I thought it was necessary.

    Read the article

  • Outgoing Emails (Git Patches) Blocked by Windows Live

    - by SteveStifler
    Just recently I dove into the VideoLAN open source project. This was my first time using git, and when sending in my first patch (using git send-email --to [email protected] patches), I was sent the following message from my computer's local mail in the terminal (I'm on OSX 10.6 by the way): Mail rejected by Windows Live Hotmail for policy reasons. We generally do not accept email from dynamic IP's as they are not typically used to deliver unauthenticated SMTP e-mail to an Internet mail server. http:/www.spamhaus.org maintains lists of dynamic and residential IP addresses. If you are not an email/network admin please contact your E-mail/Internet Service Provider for help. Email/network admins, please visit http://postmaster.live.com for email delivery information and support They must think I'm a spammer. I have a dynamic IP and my ISP (Charter) won't let me get a static one, so I tried editing git preferences: git config --global user.email "[email protected]" to my gmail account. However I got the exact same message again. My guess is that it has something to do with the native mail's preferences, but I have no idea how to access them or modify them. Anybody have any ideas for solving this? Thanks!

    Read the article

  • git: remove 2nd commit

    - by cwolves
    I'm trying to remove the 2nd commit to a repo. At this point I could just blow away the .git dir and re-do it, but I'm curious how to do this... I've deleted commits before, but apparently never the 2nd one :) > git log commit c39019e4b08497406c53ceb532f99801793205ca Author: Me Date: Thu Mar 22 14:02:41 2012 -0700 Initializing registry directories commit 535dce28f1c68e8af9d22bc653aca426fb7825d8 Author: Me Date: Tue Jan 31 21:04:13 2012 -0800 First Commit > git rebase -i HEAD~2 fatal: Needed a single revision invalid upstream HEAD~2 > git rebase -i HEAD~1 at which point I get in my editor: pick c39019e Initializing registry directories # Rebase 535dce2..c39019e onto 535dce2 # # Commands: # p, pick = use commit # r, reword = use commit, but edit the commit message # e, edit = use commit, but stop for amending # s, squash = use commit, but meld into previous commit # f, fixup = like "squash", but discard this commit's log message # x, exec = run command (the rest of the line) using shell # # If you remove a line here THAT COMMIT WILL BE LOST. # However, if you remove everything, the rebase will be aborted. # Now my problem is that I can't just blow away this 2nd commit since "if you remove everything, the rebase will be aborted"

    Read the article

  • Git is not using the first editor in my $PATH

    - by GuillaumeA
    I am using OS X 10.8, and I used brew to install a more recent version of emacs than the one shipped with OS X. The newer emacs binary is installed in /usr/local/bin (24.2.1), and the old "shipped-with-osx" one in /usr/bin (22.1.1). I updated my $PATH env variable by prepending /usr/local/bin to it. It works fine in my shell (ie. typing emacs runs the 24.2.1 version), but when git opens the editor, the emacs version is 22.1.1. Isn't git supposed to use $PATH to find the editor I want to use ? Additional informations: $ type -a emacs emacs is /usr/local/bin/emacs emacs is /usr/bin/emacs emacs is /usr/local/bin/emacs $ env PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin SHELL=/bin/zsh PAGER=most EDITOR=emacs -nw _=/usr/bin/env Please note that I'd prefer not to set the absolute path of my editor directly in my git conf, as I use this conf across multiple systems. EDIT: Here's an bit of my .zshrc: # Mac OS X if [ `uname` = "Darwin" ]; then # Brew binaries PATH="/usr/local/bin":"/usr/local/sbin":$PATH else # Everyone else (Linux) # snip fi So, yes, I could add a line export EDITOR='/usr/local/bin emacs -nw' in the first if, but I'd like to understand why git is not using my PATH variable :)

    Read the article

  • Expressionengine 2 and git (version control)

    - by Danny
    Hey guys I’m looking to move over to using git to make my EE development a lot easier and more manageable. I’m already aware of the guides posted on devotee and a few othersites but after scanning over them they seem a little old and seem to be specifically for ee 1.x, I was wondering if anyone had been successful with ee 2. I’ve only recently made the transition from svn to git, previously I found that using ee via svn was a ballache, so many confit conflicts, wrong urls, and all versions of the site were using the same database. I’m basically looking for the best or should I say the ideal way to setup both git and ee to work in harmony together. I’d like to also learn how to branch other sites I develop with ee from this too, if anyone has experience with this that’d be great! Also if it’s any use I’m hosted by dreamhost, As far as I understand they support git, I’ve looked over their knowledge base on how best to set things up, would anyone reccomend their way of doing things? And has anyone had a successful experience whilst doing so?  I look forward to hearing your responses! Thanks Sent from my iPhone, whilst falling asleep so excuse the possible typos!a

    Read the article

  • Using git (or some other VCS) at your company

    - by supercheetah
    Some friends of mine and I were talking recently about version control, and how they were using VSS at their jobs, and were probably going to be moving off of that soon. One of them said that his company will likely be going with Team Foundation Server. Eventually, the conversation did get around to talking about some of the open source VCSes out there, including git and SVN. None of us really knew about any companies that use either of these internally, although we imagined that a number of them did so for SVN, but we weren't too sure about git. I brought up Google and Android using it, but my friend figured that's only for the public facing source code, and that they may use something different for internal projects. Apparently it's more than just SCM that makes TFS so intriguing: Microsoft Sales people and support (although my friend did point out somethings to his managers that he thought might be misleading on MS' part) Integration of things beyond SCM, including project management (I'm just finding out that there are geared towards the same things for git) Again, it's Microsoft, and the transition from VSS to TFS seems logical (or does it?) I'm not much of a fan of SVN, so I didn't really bring it up much, but I am curious about whether or not git is used at your company for internal projects. Have you thought about it, and decided against it? Any reason why?

    Read the article

  • Git repositories on shared hosting with ssh access - multiple users / one ssh account

    - by acp
    I'm part of a small team trying to start coding on a project. I've decided it's time to give git a chance (no more svn) and was trying to see if we could use our shared web hosting to deploy a "public" repository there so that we can easily push/pull to/from it and keep up-to-date with each others changes. The problem I'm having now is that we only have a single ssh account for that hosting. Having used svn in the past, I could enforce a svn username on a given pair of ssh keys, however I don't seem to be able to do something similar with git (in other words tie the ssh keypair to a specific dev). I don't mind everybody having read/write permissions everywhere, since anything that is private should stay on each others machine. Finally, solutions such as gitosis can not be used. I guess my question to you is how is accountability to git pushes given? Is it tied to the ssh account being used, or the email address given in git config? Can I create different ssh keys for every developer (for the same ssh account though), and just send them to the devs?

    Read the article

  • Windows 7 CHKDSK log - What is "Internal Info"?

    - by Ron Klein
    If I run Disk Scan (CHKDSK) on Windows 7, I get the log in the event viewer. If I look inside it, I can see some kind of a binary dump: Internal Info: 00 4f 05 00 53 4a 05 00 ec 46 09 00 00 00 00 00 .O..SJ...F...... fa 03 00 00 5c 00 00 00 00 00 00 00 00 00 00 00 ....\........... 48 93 42 00 50 01 41 00 f8 1f 41 00 00 00 41 00 H.B.P.A...A...A. Is there any meaningful information in that field, other than debug info for the programmers who developed this tool?

    Read the article

  • SQL log shipping for reporting

    - by Patrick J Collins
    I would like to create a read-only copy of my SQL Server 2008 database on a secondary server for reporting and analysis. I've been testing log shipping, configured to run every 5 minutes or so. Alas, there appears to be a stumbling block, for exclusive access is required on the target database during the restore, which in turn requires killing all active connections. This is far from ideal, especially if a user is in the middle of running a report. Any better suggestions? Edit : I'm doing this on the Express edition.

    Read the article

  • transaction log shipping sql server 2005 to 2008

    - by Andrew Jahn
    I have a reporting setup with SSRS on our sql server 2005 database. Because sql server 2008 is not supported by the main program which populates our database we are stuck with 2005 on our prod database. Unfortunately when I run our weekly check reports the web interface constantly times out because the server cant do the conversion to PDF. I've read that sql server 2008's SSRS is ALOT better with memory management. I was wondering if I can do some kind transact log shipping subscription publication from 2005 to 2008? Am I chasing a dream here. Currently I have to open up the ssrs project in visual studio and run the reports inside because it doesn't ever time out when doing the pdf conversion, only times out if I try to run it through the ssis web interface.

    Read the article

  • Why are there unknown URLs in router log?

    - by Martin
    I recently looked at my router log. Why are a lot of requests that I don't send originated from a computer in my home network? They do not look like 3rd-party advertisements / images embedded in a page. The request have patterns, such as: top-visitor.com/look.php www.dottip.com/search/result.php?aff=8755&req=nickelodeon+games www.placeca.com/search/result.php?aff=3778&req=wireless+cell+phone www.bb5a.com/search.php?username=3348&keywords=flights www.blazerbox.com/search.php?username=2341&keywords=colorado+springs+real+estate www.freeautosource.com/search.php?username=sun100&keywords=vehicle www.1sp2.com/search.php?username=20190&keywords=las+the+hotel+vegas www.loadgeo.com/search/result.php?aff=10357&req=winamp www.exalt123.com/portal.php?ref=seo2007 www.7catalogs.com/search.php?username=la24&keywords=shutter www.theloaninstitute.com/search.php?username=kevin&keywords=webcam www.grammt.com/search.php?username=2530&keywords=bob And there are hundreds of these requests send within a second. So what's happening?

    Read the article

  • Strange stuff in apache log

    - by aL3xa
    Hi lads, I'm building some kind of webapp, and currently the whole thing runs on my machine. I was combing down my logs, and found several "strange" log entries that made me a bit paranoid. Here goes: ***.***.***.** - - [19/Dec/2010:19:47:47 +0100] "\x99\x91g\xca\xa8" 501 1054 **.***.***.** - - [19/Dec/2010:20:14:58 +0100] "<}\xdbe\x86E\x18\xe7\x8b" 501 1054 **.**.***.*** - - [21/Dec/2010:15:28:14 +0100] "J\xaa\x9f\xa3\xdd\x9c\x81\\\xbd\xb3\xbe\xf7\xa6A\x92g'\x039\x97\xac,vC\x8d\x12\xec\x80\x06\x10\x8e\xab7e\xa9\x98\x10\xa7" 501 1054 Bloody hell... what is this?!

    Read the article

  • how can i use apache log files to recreate usage scenario

    - by daigorocub
    Recently i installed a website that had too many requests and it was too slow. Many improvements have been made to the web site code and we've also bought a new server. I want to test the new server with exactly the same requests that made the old server slow. After that, i will double the requests, make new tests and so on. These requests are logged in the apache log files. So, I can parse those files and make some kind of script to make the same requests. Of course, in this case, the requests will be made only by my computer against the server, but hey, better than nothing. Questions: - is there some app that does this already? - would you use wget? ab? python script? Thanks!

    Read the article

  • Sharepoint 2007 - Transaction log full

    - by Kenny Bones
    So I have this SharePoint 2007 site that is basically trash. I'm supposed to just toss it, but I'm in need of copying all of the data in form of traditional files and folders from certain projects. And since the transaction log is full, it's so damn slow. Even opening SharePoint takes up to 15 minutes, or it won't open at all. Copying of files is extremely slow. So I'm in need of a quick fix here. Just to be able to copy out some files and folders. I don't need to fix the problem per se. What can I do to fix it temporarily to be able to copy out the data?

    Read the article

< Previous Page | 32 33 34 35 36 37 38 39 40 41 42 43  | Next Page >