mrplants mrplants - 2 months ago 29
iOS Question

Make an UIImage from a CMSampleBuffer

This is not the same as the countless questions about converting a

CMSampleBuffer
to a
UIImage
. I'm simply wondering why I can't convert it like this:

CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage * imageFromCoreImageLibrary = [CIImage imageWithCVPixelBuffer: pixelBuffer];
UIImage * imageForUI = [UIImage imageWithCIImage: imageFromCoreImageLibrary];


It seems a lot simpler because it works for YCbCr color spaces, as well as RGBA and others. Is there something wrong with that code?

Answer

Use following code to convert image from PixelBuffer Option 1:

CIImage *ciImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];

CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef myImage = [context
                         createCGImage:ciImage
                         fromRect:CGRectMake(0, 0,
                                             CVPixelBufferGetWidth(pixelBuffer),
                                             CVPixelBufferGetHeight(pixelBuffer))];

UIImage *uiImage = [UIImage imageWithCGImage:myImage];

Option 2:

int w = CVPixelBufferGetWidth(pixelBuffer);
int h = CVPixelBufferGetHeight(pixelBuffer);
int r = CVPixelBufferGetBytesPerRow(pixelBuffer);
int bytesPerPixel = r/w;

unsigned char *buffer = CVPixelBufferGetBaseAddress(pixelBuffer);

UIGraphicsBeginImageContext(CGSizeMake(w, h));

CGContextRef c = UIGraphicsGetCurrentContext();

unsigned char* data = CGBitmapContextGetData(c);
if (data != NULL) {
    int maxY = h;
    for(int y = 0; y<maxY; y++) {
        for(int x = 0; x<w; x++) {
            int offset = bytesPerPixel*((w*y)+x);
            data[offset] = buffer[offset];     // R
            data[offset+1] = buffer[offset+1]; // G
            data[offset+2] = buffer[offset+2]; // B
            data[offset+3] = buffer[offset+3]; // A
        }
    }
}
UIImage *img = UIGraphicsGetImageFromCurrentImageContext();

UIGraphicsEndImageContext();