Search Results

Search found 7107 results on 285 pages for 'processing efficiency'.

Page 47/285 | < Previous Page | 43 44 45 46 47 48 49 50 51 52 53 54  | Next Page >

  • Please help me correct the small bugs in this image editor

    - by Alex
    Hi, I'm working on a website that will sell hand made jewelry and I'm finishing the image editor, but it's not behaving quite right. Basically, the user uploads an image which will be saved as a source and then it will be resized to fit the user's screen and saved as a temp. The user will then go to a screen that will allow them to crop the image and then save it to it's final versions. All of that works fine, except, the final versions have 3 bugs. First is some black horizontal line on the very bottom of the image. Second is an outline of sorts that follows the edges. I thought it was because I was reducing the quality, but even at 100% it still shows up... And lastly, I've noticed that the cropped image is always a couple of pixels lower than what I'm specifying... Anyway, I'm hoping someone whose got experience in editing images with C# can maybe take a look at the code and see where I might be going off the right path. Oh, by the way, this in an ASP.NET MVC application. Here's the code: using System; using System.Drawing; using System.Drawing.Drawing2D; using System.Drawing.Imaging; using System.IO; using System.Linq; using System.Web; namespace Website.Models.Providers { public class ImageProvider { private readonly ProductProvider ProductProvider = null; private readonly EncoderParameters HighQualityEncoder = new EncoderParameters(); private readonly ImageCodecInfo JpegCodecInfo = ImageCodecInfo.GetImageEncoders().Single( c => (c.MimeType == "image/jpeg")); private readonly string Path = HttpContext.Current.Server.MapPath("~/Resources/Images/Products"); private readonly short[][] Dimensions = new short[3][] { new short[2] { 640, 480 }, new short[2] { 240, 0 }, new short[2] { 80, 60 } }; ////////////////////////////////////////////////////////// // Constructor ////////////////////////////////////////// ////////////////////////////////////////////////////////// public ImageProvider( ProductProvider ProductProvider) { this.ProductProvider = ProductProvider; HighQualityEncoder.Param[0] = new EncoderParameter(Encoder.Quality, 100L); } ////////////////////////////////////////////////////////// // Crop ////////////////////////////////////////////// ////////////////////////////////////////////////////////// public void Crop( string FileName, Image Image, Crop Crop) { using (Bitmap Source = new Bitmap(Image)) { using (Bitmap Target = new Bitmap(Crop.Width, Crop.Height)) { using (Graphics Graphics = Graphics.FromImage(Target)) { Graphics.InterpolationMode = InterpolationMode.HighQualityBicubic; Graphics.SmoothingMode = SmoothingMode.HighQuality; Graphics.PixelOffsetMode = PixelOffsetMode.HighQuality; Graphics.CompositingQuality = CompositingQuality.HighQuality; Graphics.DrawImage(Source, new Rectangle(0, 0, Target.Width, Target.Height), new Rectangle(Crop.Left, Crop.Top, Crop.Width, Crop.Height), GraphicsUnit.Pixel); }; Target.Save(FileName, JpegCodecInfo, HighQualityEncoder); }; }; } ////////////////////////////////////////////////////////// // Crop & Resize ////////////////////////////////////// ////////////////////////////////////////////////////////// public void CropAndResize( Product Product, Crop Crop) { using (Image Source = Image.FromFile(String.Format("{0}/{1}.source", Path, Product.ProductId))) { using (Image Temp = Image.FromFile(String.Format("{0}/{1}.temp", Path, Product.ProductId))) { float Percent = ((float)Source.Width / (float)Temp.Width); short Width = (short)(Temp.Width * Percent); short Height = (short)(Temp.Height * Percent); Crop.Height = (short)(Crop.Height * Percent); Crop.Left = (short)(Crop.Left * Percent); Crop.Top = (short)(Crop.Top * Percent); Crop.Width = (short)(Crop.Width * Percent); Img Img = new Img(); this.ProductProvider.AddImageAndSave(Product, Img); this.Crop(String.Format("{0}/{1}.cropped", Path, Img.ImageId), Source, Crop); using (Image Cropped = Image.FromFile(String.Format("{0}/{1}.cropped", Path, Img.ImageId))) { this.Resize(this.Dimensions[0], String.Format("{0}/{1}-L.jpg", Path, Img.ImageId), Cropped, HighQualityEncoder); this.Resize(this.Dimensions[1], String.Format("{0}/{1}-T.jpg", Path, Img.ImageId), Cropped, HighQualityEncoder); this.Resize(this.Dimensions[2], String.Format("{0}/{1}-S.jpg", Path, Img.ImageId), Cropped, HighQualityEncoder); }; }; }; this.Purge(Product); } ////////////////////////////////////////////////////////// // Queue ////////////////////////////////////////////// ////////////////////////////////////////////////////////// public void QueueFor( Product Product, HttpPostedFileBase PostedFile) { using (Image Image = Image.FromStream(PostedFile.InputStream)) { this.Resize(new short[2] { 1152, 0 }, String.Format("{0}/{1}.temp", Path, Product.ProductId), Image, HighQualityEncoder); }; PostedFile.SaveAs(String.Format("{0}/{1}.source", Path, Product.ProductId)); } ////////////////////////////////////////////////////////// // Purge ////////////////////////////////////////////// ////////////////////////////////////////////////////////// private void Purge( Product Product) { string Source = String.Format("{0}/{1}.source", Path, Product.ProductId); string Temp = String.Format("{0}/{1}.temp", Path, Product.ProductId); if (File.Exists(Source)) { File.Delete(Source); }; if (File.Exists(Temp)) { File.Delete(Temp); }; foreach (Img Img in Product.Imgs) { string Cropped = String.Format("{0}/{1}.cropped", Path, Img.ImageId); if (File.Exists(Cropped)) { File.Delete(Cropped); }; }; } ////////////////////////////////////////////////////////// // Resize ////////////////////////////////////////////// ////////////////////////////////////////////////////////// public void Resize( short[] Dimensions, string FileName, Image Image, EncoderParameters EncoderParameters) { if (Dimensions[1] == 0) { Dimensions[1] = (short)(Image.Height / ((float)Image.Width / (float)Dimensions[0])); }; using (Bitmap Bitmap = new Bitmap(Dimensions[0], Dimensions[1])) { using (Graphics Graphics = Graphics.FromImage(Bitmap)) { Graphics.InterpolationMode = InterpolationMode.HighQualityBicubic; Graphics.SmoothingMode = SmoothingMode.HighQuality; Graphics.PixelOffsetMode = PixelOffsetMode.HighQuality; Graphics.CompositingQuality = CompositingQuality.HighQuality; Graphics.DrawImage(Image, 0, 0, Dimensions[0], Dimensions[1]); }; Bitmap.Save(FileName, JpegCodecInfo, EncoderParameters); }; } } } Here's one of the images this produces:

    Read the article

  • Why do I get completely different results when saving a BitmapSource to bmp, jpeg, and png in WPF

    - by DanM
    I wrote a little utility class that saves BitmapSource objects to image files. The image files can be either bmp, jpeg, or png. Here is the code: public class BitmapProcessor { public void SaveAsBmp(BitmapSource bitmapSource, string path) { Save(bitmapSource, path, new BmpBitmapEncoder()); } public void SaveAsJpg(BitmapSource bitmapSource, string path) { Save(bitmapSource, path, new JpegBitmapEncoder()); } public void SaveAsPng(BitmapSource bitmapSource, string path) { Save(bitmapSource, path, new PngBitmapEncoder()); } private void Save(BitmapSource bitmapSource, string path, BitmapEncoder encoder) { using (var stream = new FileStream(path, FileMode.Create)) { encoder.Frames.Add(BitmapFrame.Create(bitmapSource)); encoder.Save(stream); } } } Each of the three Save methods work, but I get unexpected results with bmp and jpeg. Png is the only format that produces an exact reproduction of what I see if I show the BitmapSource on screen using a WPF Image control. Here are the results: BMP - too dark JPEG - too saturated PNG - correct Why am I getting completely different results for different file types? I should note that the BitmapSource in my example uses an alpha value of 0.1 (which is why it appears very desaturated), but it should be possible to show the resulting colors in any image format. I know if I take a screen capture using something like HyperSnap, it will look correct regardless of what file type I save to. Here's a HyperSnap screen capture saved as a bmp: As you can see, this isn't a problem, so there's definitely something strange about WPF's image encoders. Do I have a setting wrong? Am I missing something?

    Read the article

  • Video conversion in java

    - by Maksim
    Is there any framework or open source project for Java that does video conversion from any video format to any video format. Something similar to Panda Video Conversion framework.

    Read the article

  • C# or C++ game: many 16 color images loaded into RAM. Efficient solution?

    - by user560639
    I am in the planning stages of creating a fighting game and am unsure how to handle one issue relating to memory. Background info: - Still debating whether to use C# (XNA) or C++. We do not want to commit to either until we have explored how to solve this problem in both languages. - Using a max of 256MB RAM would be great if possible. - Two characters will be present at a time, and these characters can only change between battles. There is time to load/free memory between battles, but the game needs to run at a constant 60 drawn frames per second during combat. Each frame is 16.67ms - The total number of images per character is in the low hundreds. Each image is roughly 200x400 pixels. Only one image from each character will be displayed at any given moment. Uncompressed, each image takes roughly 300kb from my calculations; upwards of 100MB for a whole character. This is pushing too close to the 256MB limit given that memory will be needed for some other resources as well. Since each image can be made with a total of 16 colors. Theoretically I should be able to use 1/8th the space if I can take advantage of this. I've looked around but haven't found any word of native support for paletted images. (Storing each pixel using fewer bits that each map to a 32-bit RGBa color) I was considering making my own file format with 4 bits per pixel (and some extra palette info), loading all the images of this new format into RAM before battle, and then when drawing any specific image, decompress only that image into a raw image so it can be rendered properly. I don't know if it's realistic to perform so many assignment operations (appx 200x400 for each character = 160k) each frame. It sounds very hacky to me. Does anyone have advice on whether my solution sounds reasonable, and if there is perhaps a better one available? Thanks so much! (I also attempted to use an image with only 1 channel, then use a shader to perform a series of if statements to translate various values into other colors. Unfortunately, there were too many lines of code for the shader. It is also rather hacky and does not scale well.)

    Read the article

  • Extrapolation using fft in octave

    - by CFP
    Using GNU octave, I'm computing a fft over a piece of signal, then eliminating some frequencies, and finally reconstructing the signal. This give me a nice approximation of the signal ; but it doesn't give me a way to extrapolate the data. Suppose basically that I have plotted three periods and a half of f: x -> sin(x) + 0.5*sin(3*x) + 1.2*sin(5*x) and then added a piece of low amplitude, zero-centered random noise. With fft/ifft, I can easily remove most of the noise ; but then how do I extrapolate 3 more periods of my signal data? (other of course that duplicating the signal). The math way is easy : you have a decomposition of your function as an infinite sum of sines/cosines, and you just need to extract a partial sum and apply it anywhere. But I don't quite get the programmatic way... Thanks!

    Read the article

  • Converting from samplerate/cutoff frequency to pi-radians/sample in a discrete time sampled IIR filter system.

    - by Fake Name
    I am working on doing some digital filter work using Python and Numpy/Scipy. I'm using scipy.signal.iirdesign to generate my filter coefficents, but it requires the filter passband coefficents in a format I am not familiar with wp, ws : float Passband and stopband edge frequencies, normalized from 0 to 1 (1 corresponds to pi radians / sample). For example: Lowpass: wp = 0.2, ws = 0.3 Highpass: wp = 0.3, ws = 0.2 (from here) I'm not familiar with digital filters (I'm coming from a hardware design background). In an analog context, I would determine the desired slope and the 3db down point, and calculate component values from that. In this context, how do I take a known sample rate, a desired corner frequency, and a desired rolloff, and calculate the wp, ws values from that? (This might be more appropriate for math.stackexchange. I'm not sure)

    Read the article

  • How to use ImageMagick to batch desaturate images?

    - by Zando
    I have an image that is black text with some gray and pale yellowish background. I basically want to keep the text as black as possible, and make the gray and yellow comparatively lighter...at the very least, turn yellow into a light gray. What's the most efficient way to do that in ImageMagick?

    Read the article

  • User avatar cropping/resizing with paperclip and jquery in rails

    - by Micke
    Hello fellow stackoverflow users. I am building my first Ruby on Rails app which is going to be my own little community. I have made the User model with several fields and information. But now i am thinking of adding avatar support for the users. And i've been researching and found that my best option would be using Paperclip. But then i've encountered a little dilemma. I want my users to be able to resie/crop their avatar image after they have chosen a image. I have googled alot and since i am using jQuery i found that imgAreaSelect would be best in my opinion. And i think MiniMagick looks best. So to my question. How can i best intergrate Paperclip and imgAreaSelect? I am new to Ruby and Rails so i don't know how i can resize the image. Do you folks know how to intergrate the to or maybe some other options that fits my needs best? I also need to apologize for my bad/misspelled english since i am not english :) Thanking you in advance. Yours sincerely, Micke.

    Read the article

  • Image Comparision

    - by harigm
    How do I compare the 2 images in Java based web application. I have installed the Biometric thumb reader, I need to read the user Thumb and compare it with his thumb image which is captured during the registration process. Initially I am storing the image in the Mysql as Blob. Else I can store that image in a separate folder as well Please suggest which is best way to do 1)Shall i Use the Java script 2)Is there any built in Java API to perform this

    Read the article

  • TMS320C64x Quick start reference for porgrammers

    - by osgx
    Hello Is thare any quickstart guide for programmers for writing DSP-accelerated appliations for TMS320C64x? I have a program with custom algorythm (not the fft, or usial filtering) and I want to accelerate it using multi-DSP coprocessor. So, how should I modify source to move computation from main CPU to DSPs? What limitations are there for DSP-running code? I have some experience with CUDA. In CUDA I should mark every function as being host, device, or entry point for device (kernel). There are also functions to start kernels and to upload/download data to/from GPU. There are also some limitations, for device code, described in CUDA Reference manual. I hope, there is an similar interface and a documentation for DSP.

    Read the article

  • perl multiple tasks problem

    - by Alice Wozownik
    I have finished my earlier multithreaded program that uses perl threads and it works on my system. The problem is that on some systems that it needs to run on, thread support is not compiled into perl and I cannot install additional packages. I therefore need to use something other than threads, and I am moving my code to using fork(). This works on my windows system in starting the subtasks. A few problems: How to determine when the child process exits? I created new threads when the thread count was below a certain value, I need to keep track of how many threads are running. For processes, how do I know when one exits so I can keep track of how many exist at the time, incrementing a counter when one is created and decrementing when one exits? Is file I/O using handles obtained with OPEN when opened by the parent process safe in the child process? I need to append to a file for each of the child processes, is this safe on unix as well. Is there any alternative to fork and threads? I tried use Parallel::ForkManager, but that isn't installed on my system (use Parallel::ForkManager; gave an error) and I absolutely require that my perl script work on all unix/windows systems without installing any additional modules.

    Read the article

  • How are you taking advantage of Multicore?

    - by tgamblin
    As someone in the world of HPC who came from the world of enterprise web development, I'm always curious to see how developers back in the "real world" are taking advantage of parallel computing. This is much more relevant now that all chips are going multicore, and it'll be even more relevant when there are thousands of cores on a chip instead of just a few. My questions are: How does this affect your software roadmap? I'm particularly interested in real stories about how multicore is affecting different software domains, so specify what kind of development you do in your answer (e.g. server side, client-side apps, scientific computing, etc). What are you doing with your existing code to take advantage of multicore machines, and what challenges have you faced? Are you using OpenMP, Erlang, Haskell, CUDA, TBB, UPC or something else? What do you plan to do as concurrency levels continue to increase, and how will you deal with hundreds or thousands of cores? If your domain doesn't easily benefit from parallel computation, then explaining why is interesting, too. Finally, I've framed this as a multicore question, but feel free to talk about other types of parallel computing. If you're porting part of your app to use MapReduce, or if MPI on large clusters is the paradigm for you, then definitely mention that, too. Update: If you do answer #5, mention whether you think things will change if there get to be more cores (100, 1000, etc) than you can feed with available memory bandwidth (seeing as how bandwidth is getting smaller and smaller per core). Can you still use the remaining cores for your application?

    Read the article

  • C# manipulating video

    - by grey88
    i want to take a folder of pictures, and turn it into a slideshow video with music in the background. i have no idea how to do this, or where to get help, cos this isnt the kind of thing you can search in google. idk if there are api's for it, or if it can even be done in C#. maybe ill have to move the project to C++ or something, but first i need to know where the hell to start. thanks.

    Read the article

  • Copy & rename file using .bat file language?

    - by flyout
    I want to make a .bat to copy & rename a file multiple times. I want to have a list of names, and an original file, then I want to copy that file and rename it for each name on the list. How I can do this using a .bat file? Also is it possible to run winrar fromthe .bat to .rar or .zip every file after copying/renaming?

    Read the article

  • Calculating length of objects in binary image - algorithm

    - by Jacek
    I need to calculate length of the object in a binary image (maximum distance between the pixels inside the object). As it is a binary image, so we might consider it a 2D array with values 0 (white) and 1 (black). The thing I need is a clever (and preferably simple) algorithm to perform this operation. Keep in mind there are many objects in the image. The image to clarify: Sample input image:

    Read the article

  • selecting the pixels from an image in opencv

    - by ajith
    hi everyone, this is refined version of my previous question http://stackoverflow.com/questions/2602628/computing-matting-laplacian-matrix-of-an-image actually i want to do following operation... summation for all k|(i,j)?wk [(Ii-µk)*(Ij-µk)]...where wk is 3X3 window & µk is mean of wk...here i dont know how to select Ii & Ij separately from an image which is 2 dimensional[Iij]...or does the eqn means anything else??please someone help me..

    Read the article

  • How to overwirte i-frames of a video?

    - by Dominik
    hi, i want to destroy alle i-frames of a video. doing this i want to check if encrypting only the i-frames of a video would be sufficient for making it unwatchable. How can i do this? Only removing them and recompressing the video would not be the same as really overwriting the iframe in the stream without recalculating b-frames etc.

    Read the article

  • Is using the .NET Image Conversion enough?

    - by contactmatt
    I've seen a lot of people try to code their own image conversion techniques. It often seems to be very complicated, and ends up using GDI+ function calls, and manipulating bits of the image. This has got me wondering if I am missing something in the simplicity of .NET's image conversion call when saving an image. Here's the code I have: Bitmap tempBmp = new Bitmap("c:\temp\img.jpg"); Bitmap bmp = new Bitmap(tempBmp, 800, 600); bmp.Save(c:\temp\img.bmp, //extension depends on format ImageFormat.Bmp) //These are all the ImageFormats I allow conversion to within the program. Ignore the syntax for a second ;) ImageFormat.Gif) //or ImageFormat.Jpeg) //or ImageFormat.Png) //or ImageFormat.Tiff) //or ImageFormat.Wmf) //or ImageFormat.Bmp)//or ); This is all I'm doing in my image conversion. Just setting the location of where the image should be saved, and passing it an ImageFormat type. I've tested it the best I can, but I'm wondering if I am missing anything in this simple format conversion, or if this is sufficient?

    Read the article

  • Simplex noise vs Perlin noise

    - by raRaRa
    I would like to know why Perlin noise is still so popular today after Simplex came out. Simplex noise was made by Ken Perlin himself and it was suppose to take over his old algorithm which was slow for higher dimensions and with better quality (no visible artifacts). Simplex noise came out in 2001 and over those 10 years I've only seen people talk of Perlin noise when it comes to generating heightmaps for terrains, creating procedural textures, et cetera. Could anyone help me out, is there some downside of Simplex noise? I heard rumors that Perlin noise is faster when it comes to 1D and 2D noise, but I don't know if it's true or not. Thanks!

    Read the article

  • How would I write the following applescript in Obj-C AppScript? ASTranslate was of no help =(

    - by demonslayer319
    The translation tool isn't able to translate this working code. I copied it out of a working script. set pathToTemp to (POSIX path of ((path to desktop) as string)) -- change jpg to pict tell application "Image Events" try launch set albumArt to open file (pathToTemp & "albumart.jpg") save albumArt as PICT in file (pathToTemp & "albumart.pict") --the first 512 bytes are the PICT header, so it reads from byte 513 --this is to allow the image to be added to an iTunes track later. set albumArt to (read file (pathToTemp & "albumart.pict") from 513 as picture) close end try end tell The code is taking a jpg image, converting it to a PICT file, and then reading the file minus the header (the first 512 bytes). Later in the script, albumArt will be added to an iTunes track. I tried translating the code (minus the comments), but ASTranslate froze for a good 2 minutes before giving me this: Untranslated event 'earsffdr' #import "IEGlue/IEGlue.h" IEApplication *imageEvents = [IEApplication applicationWithName: @"Image Events"]; IELaunchCommand *cmd = [[imageEvents launch] ignoreReply]; id result = [cmd send]; #import "IEGlue/IEGlue.h" IEApplication *imageEvents = [IEApplication applicationWithName: @"Image Events"]; IEReference *ref = [[imageEvents files] byName: @"/Users/Doom/Desktop/albumart.jpg"]; id result = [[ref open] send]; #import "IEGlue/IEGlue.h" IEApplication *imageEvents = [IEApplication applicationWithName: @"Image Events"]; IEReference *ref = [[imageEvents images] byName: @"albumart.jpg"]; IESaveCommand *cmd = [[[ref save] in: [[imageEvents files] byName: @"/Users/Doom/Desktop/albumart.pict"]] as: [IEConstant PICT]]; id result = [cmd send]; 'crdwrread' Traceback (most recent call last): File "objcrenderer.pyc", line 283, in renderCommand KeyError: 'crdwrread' 'cascrgdut' Traceback (most recent call last): File "objcrenderer.pyc", line 283, in renderCommand KeyError: 'cascrgdut' 'crdwrread' Traceback (most recent call last): File "objcrenderer.pyc", line 283, in renderCommand KeyError: 'crdwrread' Untranslated event 'rdwrread' OK I have no clue how to make sense of this. Thanks for any and all help!

    Read the article

  • convert RGB values to equivalent HSV values using python

    - by sree01
    Hi, I want to convert RGB values to HSV using python. I got some code samples, which gave the result with the S and V values greater than 100. (example : http://code.activestate.com/recipes/576554-covert-color-space-from-hsv-to-rgb-and-rgb-to-hsv/ ) . anybody got a better code which convert RGB to HSV and vice versa thanks

    Read the article

  • Loading .png image from array of uint8_t into OpenGL ES texture

    - by unknownthreat
    Normally, when we want to load a texture for OpenGL ES with .png, we simply add the .png images into XCode. The .png files will be altered for optimization by XCode and these altered png files can loaded into OpenGL ES texture during the runtime. However, what I am trying to do is quite different. I am trying to load a .png file that is not from the prebuilt/compile. The png file will be transmitted externally from UDP, and it will be in the form of array of bytes. I am very sure that the png is transferred correctly, but when it comes to displaying the png image in the form of the OpenGL ES texture, the image somehow shows incorrectly. The colors that are being sent are presented but the positions are somehow very incorrect. However, the position of the colors still retain some aspects of the original position. Here: The left image shows the original .png, while the right shows the png being displayed on iPhone using OpenGL ES Texture. It looks more like the png data is not being decoded or incorrectly processed. Below is OpenGL ES code for turning the image into texture: - (void) setTextureFromImageByte: (uint8_t*)imageByte{ if (self = [super init]){ NSData* imageData = [[NSData alloc] initWithBytes: imageByte length: imageLength]; UIImage* img = [[UIImage alloc] initWithData: imageData]; CGImageRef image = img.CGImage; int width = 512; int height = 512; if (image){ int tempWidth = (int)width, tempHeight = (int)height; if ((tempWidth & (tempWidth - 1)) != 0 ){ NSLog(@"CAUTION! width is not power of 2. width == %d", tempWidth); }else if ((tempHeight & (tempHeight - 1)) != 0 ){ NSLog(@"CAUTION! height is not power of 2. height == %d", tempHeight); }else{ void *spriteData = calloc(width * 4, height * 4); CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4, CGImageGetColorSpace(image), kCGImageAlphaPremultipliedLast); CGContextDrawImage(spriteContext, CGRectMake(0.0, 0.0, width, height), image); CGContextRelease(spriteContext); glBindTexture(GL_TEXTURE_2D, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 320, 435, GL_RGBA, GL_UNSIGNED_BYTE, spriteData); free(spriteData); } }else NSLog(@"ERROR: Image not loaded..."); [img release]; [imageData release]; } } So does anyone knows how to deal with this? Is it because of iPhone only accepts altered png from XCode? What can we do in this case in order to make the png image be able to display correctly?

    Read the article

  • Need help using libpng to read an image

    - by jonathanasdf
    Here is my function... I don't know why it's not working. The resulting image looks nothing like what the .png looks like. But there's no errors either. bool Bullet::read_png(std::string file_name, int pos) { png_structp png_ptr; png_infop info_ptr; FILE *fp; if ((fp = fopen(file_name.c_str(), "rb")) == NULL) { return false; } png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (png_ptr == NULL) { fclose(fp); return false; } info_ptr = png_create_info_struct(png_ptr); if (info_ptr == NULL) { fclose(fp); png_destroy_read_struct(&png_ptr, NULL, NULL); return false; } if (setjmp(png_jmpbuf(png_ptr))) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); fclose(fp); return false; } png_init_io(png_ptr, fp); png_read_png(png_ptr, info_ptr, PNG_TRANSFORM_STRIP_16 | PNG_TRANSFORM_SWAP_ALPHA | PNG_TRANSFORM_EXPAND, NULL); png_uint_32 width = png_get_image_width(png_ptr, info_ptr); png_uint_32 height = png_get_image_height(png_ptr, info_ptr); imageData[pos].width = width; imageData[pos].height = height; png_bytepp row_pointers; row_pointers = png_get_rows(png_ptr, info_ptr); imageData[pos].data = new unsigned int[width*height]; for (unsigned int i=0; i < height; ++i) { memcpy(&imageData[pos].data[i*width], &row_pointers[i], width*sizeof(unsigned int)); } png_destroy_read_struct(&png_ptr, &info_ptr, NULL); fclose(fp); for (unsigned int i=0; i < height; ++i) { for (unsigned int j=0; j < width; ++j) { unsigned int val = imageData[pos].data[i*width+j]; if (val != 0) { unsigned int a = ((val >> 24)); unsigned int r = (((val - (a << 24)) >> 16)); unsigned int g = (((val - (a << 24) - (r << 16)) >> 8)); unsigned int b = (((val - (a << 24) - (r << 16) - (g << 8)))); // for debugging std::string s(AS3_StringValue(AS3_Int(i*width+j))); s += " "; s += AS3_StringValue(AS3_Int(val)); s += " "; s += AS3_StringValue(AS3_Int(a)); s += " "; s += AS3_StringValue(AS3_Int(r)); s += " "; s += AS3_StringValue(AS3_Int(g)); s += " "; s += AS3_StringValue(AS3_Int(b)); AS3_Trace(AS3_String(s.c_str())); } } } return true; } ImageData is just a simple struct to keep x, y, width, and height, and imageData is an array of that struct. struct ImageData { int x; int y; int width; int height; unsigned int* data; }; Here is a side by side screenshot of the input and output graphics (something I made in a minute for testing), and this was after setting alpha to 255 in order to make it show up (because the alpha I was getting back was 1). Left side is original, right side is what happened after reading it through this function. Scaled up 400% for visibility. Here is a log of the traces: 0 16855328 1 1 49 32 1 16855424 1 1 49 128 2 16855456 1 1 49 160 3 16855488 1 1 49 192 4 16855520 1 1 49 224 5 16855552 1 1 50 0 6 16855584 1 1 50 32 7 16855616 1 1 50 64 8 16855424 1 1 49 128 9 16855456 1 1 49 160 10 16855488 1 1 49 192 11 16855520 1 1 49 224 12 16855552 1 1 50 0 13 16855584 1 1 50 32 14 16855616 1 1 50 64 15 16855648 1 1 50 96 16 16855456 1 1 49 160 17 16855488 1 1 49 192 18 16855520 1 1 49 224 19 16855552 1 1 50 0 20 16855584 1 1 50 32 21 16855616 1 1 50 64 22 16855648 1 1 50 96 23 16855680 1 1 50 128 24 16855488 1 1 49 192 25 16855520 1 1 49 224 26 16855552 1 1 50 0 27 16855584 1 1 50 32 28 16855616 1 1 50 64 29 16855648 1 1 50 96 30 16855680 1 1 50 128 31 16855712 1 1 50 160 32 16855520 1 1 49 224 33 16855552 1 1 50 0 34 16855584 1 1 50 32 35 16855616 1 1 50 64 36 16855648 1 1 50 96 37 16855680 1 1 50 128 38 16855712 1 1 50 160 39 16855744 1 1 50 192 40 16855552 1 1 50 0 41 16855584 1 1 50 32 42 16855616 1 1 50 64 43 16855648 1 1 50 96 44 16855680 1 1 50 128 45 16855712 1 1 50 160 46 16855744 1 1 50 192 47 16855776 1 1 50 224 48 16855584 1 1 50 32 49 16855616 1 1 50 64 50 16855648 1 1 50 96 51 16855680 1 1 50 128 52 16855712 1 1 50 160 53 16855744 1 1 50 192 54 16855776 1 1 50 224 55 16855808 1 1 51 0 56 16855616 1 1 50 64 57 16855648 1 1 50 96 58 16855680 1 1 50 128 59 16855712 1 1 50 160 60 16855744 1 1 50 192 61 16855776 1 1 50 224 62 16855808 1 1 51 0 63 16855840 1 1 51 32 64 16855648 1 1 50 96 65 16855680 1 1 50 128 66 16855712 1 1 50 160 67 16855744 1 1 50 192 68 16855776 1 1 50 224 69 16855808 1 1 51 0 70 16855840 1 1 51 32 71 16855872 1 1 51 64 72 16855680 1 1 50 128 73 16855712 1 1 50 160 74 16855744 1 1 50 192 75 16855776 1 1 50 224 76 16855808 1 1 51 0 77 16855840 1 1 51 32 78 16855872 1 1 51 64 79 16855904 1 1 51 96 80 16855712 1 1 50 160 81 16855744 1 1 50 192 82 16855776 1 1 50 224 83 16855808 1 1 51 0 84 16855840 1 1 51 32 85 16855872 1 1 51 64 86 16855904 1 1 51 96 87 16855936 1 1 51 128 88 16855744 1 1 50 192 89 16855776 1 1 50 224 90 16855808 1 1 51 0 91 16855840 1 1 51 32 92 16855872 1 1 51 64 93 16855904 1 1 51 96 94 16855936 1 1 51 128 95 16855968 1 1 51 160 96 16855776 1 1 50 224 97 16855808 1 1 51 0 98 16855840 1 1 51 32 99 16855872 1 1 51 64 100 16855904 1 1 51 96 101 16855936 1 1 51 128 102 16855968 1 1 51 160 103 16856000 1 1 51 192 104 16855808 1 1 51 0 105 16855840 1 1 51 32 106 16855872 1 1 51 64 107 16855904 1 1 51 96 108 16855936 1 1 51 128 109 16855968 1 1 51 160 110 16856000 1 1 51 192 111 16856032 1 1 51 224 112 16855840 1 1 51 32 113 16855872 1 1 51 64 114 16855904 1 1 51 96 115 16855936 1 1 51 128 116 16855968 1 1 51 160 117 16856000 1 1 51 192 118 16856032 1 1 51 224 119 16856064 1 1 52 0 120 16855872 1 1 51 64 121 16855904 1 1 51 96 122 16855936 1 1 51 128 123 16855968 1 1 51 160 124 16856000 1 1 51 192 125 16856032 1 1 51 224 126 16856064 1 1 52 0 127 16856096 1 1 52 32 128 16855904 1 1 51 96 129 16855936 1 1 51 128 130 16855968 1 1 51 160 131 16856000 1 1 51 192 132 16856032 1 1 51 224 133 16856064 1 1 52 0 134 16856096 1 1 52 32 135 16856128 1 1 52 64 136 16855936 1 1 51 128 137 16855968 1 1 51 160 138 16856000 1 1 51 192 139 16856032 1 1 51 224 140 16856064 1 1 52 0 141 16856096 1 1 52 32 142 16856128 1 1 52 64 143 16856160 1 1 52 96 144 16855968 1 1 51 160 145 16856000 1 1 51 192 146 16856032 1 1 51 224 147 16856064 1 1 52 0 148 16856096 1 1 52 32 149 16856128 1 1 52 64 150 16856160 1 1 52 96 151 16856192 1 1 52 128 152 16856000 1 1 51 192 153 16856032 1 1 51 224 154 16856064 1 1 52 0 155 16856096 1 1 52 32 156 16856128 1 1 52 64 157 16856160 1 1 52 96 158 16856192 1 1 52 128 159 16856224 1 1 52 160 160 16856032 1 1 51 224 161 16856064 1 1 52 0 162 16856096 1 1 52 32 163 16856128 1 1 52 64 164 16856160 1 1 52 96 165 16856192 1 1 52 128 166 16856224 1 1 52 160 167 16856256 1 1 52 192 168 16856064 1 1 52 0 169 16856096 1 1 52 32 170 16856128 1 1 52 64 171 16856160 1 1 52 96 172 16856192 1 1 52 128 173 16856224 1 1 52 160 174 16856256 1 1 52 192 175 16856288 1 1 52 224 176 16856096 1 1 52 32 177 16856128 1 1 52 64 178 16856160 1 1 52 96 179 16856192 1 1 52 128 180 16856224 1 1 52 160 181 16856256 1 1 52 192 182 16856288 1 1 52 224 183 16856320 1 1 53 0 184 16856128 1 1 52 64 185 16856160 1 1 52 96 186 16856192 1 1 52 128 187 16856224 1 1 52 160 188 16856256 1 1 52 192 189 16856288 1 1 52 224 190 16856320 1 1 53 0 192 16856160 1 1 52 96 193 16856192 1 1 52 128 194 16856224 1 1 52 160 195 16856256 1 1 52 192 196 16856288 1 1 52 224 197 16856320 1 1 53 0 200 16856192 1 1 52 128 201 16856224 1 1 52 160 202 16856256 1 1 52 192 203 16856288 1 1 52 224 204 16856320 1 1 53 0 208 16856224 1 1 52 160 209 16856256 1 1 52 192 210 16856288 1 1 52 224 211 16856320 1 1 53 0 216 16856256 1 1 52 192 217 16856288 1 1 52 224 218 16856320 1 1 53 0 224 16856288 1 1 52 224 225 16856320 1 1 53 0 232 16856320 1 1 53 0 Was stuck on this for a couple of days.

    Read the article

  • OpenCV: Shift/Align face image relative to reference Image (Image Registration)

    - by Abhischek
    I am new to OpenCV2 and working on a project in emotion recognition and would like to align a facial image in relation to a reference facial image. I would like to get the image translation working before moving to rotation. Current idea is to run a search within a limited range on both x and y coordinates and use the sum of squared differences as error metric to select the optimal x/y parameters to align the image. I'm using the OpenCV face_cascade function to detect the face images, all images are resized to a fixed (128x128). Question: Which parameters of the Mat image do I need to modify to shift the image in a positive/negative direction on both x and y axis? I believe setImageROI is no longer supported by Mat datatypes? I have the ROIs for both faces available however I am unsure how to use them. void alignImage(vector<Rect> faceROIstore, vector<Mat> faceIMGstore) { Mat refimg = faceIMGstore[1]; //reference image Mat dispimg = faceIMGstore[52]; // "displaced" version of reference image //Rect refROI = faceROIstore[1]; //Bounding box for face in reference image //Rect dispROI = faceROIstore[52]; //Bounding box for face in displaced image Mat aligned; matchTemplate(dispimg, refimg, aligned, CV_TM_SQDIFF_NORMED); imshow("Aligned image", aligned); } The idea for this approach is based on Image Alignment Tutorial by Richard Szeliski Working on Windows with OpenCV 2.4. Any suggestions are much appreciated.

    Read the article

< Previous Page | 43 44 45 46 47 48 49 50 51 52 53 54  | Next Page >