裁剪SampleBuffer 自定义渲染

1,840 阅读9分钟

CDN混流--CMSampleBufferRef裁剪

目前一个业务场景,需要把TRTC的混流视频流裁剪,然后通过OpenGL 自定义渲染

首先了解一下混流

在 在线教育的场景下 会有学生举手上台的场景。上台互动的视频流渲染:

  1. 直接接入trtc实时音视频 视频流播放 优势是 直播延迟较低,画面清晰度高,收费较高

  2. 采用CDN混流的方式 对于上台互动的学生同样接入trtc视频流、对于未上台的学生拉取CDN混流-裁剪-自定义渲染

    CDN混流 直播延迟比 trtc 高 画面清晰高略差,收费相对便宜

image.png

一、会话采集

下面一 摄像机采集源 替代CDN混流资源

准备工作

  1. 导入 AVFoundation

    #import <AVFoundation/AVFoundation.h>
    
  2. 设备权限配置.

        Privacy - Camera Usage Description  
        Privacy - Microphone Usage Description
    
  1. 初始化会话管理对象 AVCaptureSession

        //初始化
        self.session = [[AVCaptureSession alloc] init];
       //设置分辨率
          if ([self.session canSetSessionPreset:AVCaptureSessionPresetHigh]){
              [self.session setSessionPreset:AVCaptureSessionPresetHigh];
          } else{
              [self.session setSessionPreset:AVCaptureSessionPreset1280x720];
          }
    
  2. 配置音视频输入输出对象

        //开始配置
        [self.session beginConfiguration];
    ​
        // 设置视频 I/O 对象 并添加到session
        [self videoInputAndOutput];
    ​
        // 设置音频 I/O 对象 并添加到session
        [self audioInputAndOutput];
    ​
        // 提交配置
        [self.session commitConfiguration];
    

    注意: 配置AVCaptureSession 的时候, 必须先开始配置, beginConfiguration, 配置完成, 必须提交配置 commitConfiguration, 否则配置无效

  3. 视频对象配置

    - (void)videoInputAndOutput{
        // 初始化视频设备对象
        self.videoDevice = nil;
    ​
        //获取视频设备管理对象 (由于分为前置摄像头 和 后置摄像头 所以返回的是数组)
        AVCaptureDeviceDiscoverySession *disSession = [AVCaptureDeviceDiscoverySession discoverySessionWithDeviceTypes:@[AVCaptureDeviceTypeBuiltInWideAngleCamera] mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionUnspecified];
            
        NSArray *videoDevices = disSession.devices;
        for (AVCaptureDevice *device in videoDevices) {
            ///默认先开启前置摄像头
            if (device.position == AVCaptureDevicePositionFront) {
                self.videoDevice = device;
            }
        }
    ​
    ​
        //视频输入
        //根据视频设备来初始化视频输入对象
        NSError *error;
        self.videoInput = [[AVCaptureDeviceInput alloc] initWithDevice:self.videoDevice error:&error];
        if (error) {
            NSLog(@" 摄像头错误 ");
            return;
        }
    ​
        // 将输入对象添加到管理者 AVCaptureSession 中
        // 需要先判断是否能够添加输入对象
        if ([self.session canAddInput:self.videoInput]) {
            [self.session addInput:self.videoInput];
        }
        
        //视频输出对象
        self.videoOutput = [[AVCaptureVideoDataOutput alloc] init];
        //是否允许卡顿时丢帧
        self.videoOutput.alwaysDiscardsLateVideoFrames = NO;
    ​
        if ([self supportsFastTextureUpload]) {
            // 是否支持全频色彩编码 YUV 一种色彩编码方式, 即YCbCr, 现在视频一般采用该颜色空间, 可以分离亮度跟色彩, 在不影响清晰度的情况下来压缩视频
            BOOL supportFullYUVRange = NO;
            
            // 获取输出对象所支持的像素格式
            NSArray *supportedPixelFormats = self.videoOutput.availableVideoCVPixelFormatTypes;
            for (NSNumber *currentPixelFormat in supportedPixelFormats) {
                if ([currentPixelFormat integerValue] == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
                    supportFullYUVRange = YES;
                }
            }
            
            // 根据是否支持全频色彩编码 YUV 来设置输出对象的视频像素压缩格式
            if (supportFullYUVRange) {
                [self.videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
            } else {
                [self.videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
            }
        } else {
            [self.videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
        }
    ​
        // 创建设置代理是所需要的线程队列 优先级设为高
        dispatch_queue_t videoQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0);
        // 设置代理
        [self.videoOutput setSampleBufferDelegate:self queue:videoQueue];
    ​
        // 判断session 是否可添加视频输出对象
        if ([self.session canAddOutput:self.videoOutput]) {
            [self.session addOutput:self.videoOutput];
            
            // 链接视频 I/O 对象
            [self connectionVideoInputVideoOutput];
        }
    }
    

    输入对象必须要转换成 AVCaptureDeviceInput

  4. 音频对象配置

    - (void)audioInputAndOutput{
        // 初始音频设备对象
        self.audioDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    ​
        // 音频输入对象
        NSError *error;
        self.audioInput = [[AVCaptureDeviceInput alloc] initWithDevice:self.audioDevice error:&error];
        if (error) {
            NSLog(@"== 录音设备出错  %@", error);
        }
    ​
        // 判断session 是否可以添加 音频输入对象
        if ([self.session canAddInput:self.audioInput]) {
            [self.session addInput:self.audioInput];
        }
    ​
        // 音频输出对象
        self.audioOutput = [[AVCaptureAudioDataOutput alloc] init];
    ​
        // 判断是否可以添加音频输出对象
        if ([self.session canAddOutput:self.audioOutput]) {
            [self.session addOutput:self.audioOutput];
        }
    }
    
  5. 设置链接管理对象 AVCaptureConnection

         AVCaptureConnection *captureConnection = [self.videoOutput connectionWithMediaType:AVMediaTypeVideo];
         self.captureConnection = captureConnection;
         //
        [captureConnection setVideoOrientation:AVCaptureVideoOrientationPortraitUpsideDown];
    ​
         captureConnection.videoScaleAndCropFactor = captureConnection.videoMaxScaleAndCropFactor;
         //视频稳定设置
         if ([captureConnection isVideoStabilizationSupported]){
             captureConnection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationModeAuto;
         }
    
  6. 添加预览图层 AVCaptureVideoPreviewLayer

        AVCaptureVideoPreviewLayer *previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:self.session];
        previewLayer.frame = CGRectMake(0, 0, self.view.frame.size.width/2, self.view.frame.size.height);
        [self.view.layer  addSublayer:previewLayer];
    
  7. 开始捕捉会话

     [self.session startRunning];
    
  8. 输出采集结果

    #pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
    // 获取帧数据
    - (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
        // captureSession 会话如果没有强引用,这里不会得到执行
    ​
        //1.从sampleBuffer 获取视频像素缓存区对象
        CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
    ​
        //2.获取捕捉视频的宽和高
        size_t width = CVPixelBufferGetWidth(pixelBuffer);
        size_t height = CVPixelBufferGetHeight(pixelBuffer);
    ​
        CMSampleBufferRef cropSampleBuffer;
        CFRetain(sampleBuffer);
        cropSampleBuffer = [self cropSampleBufferByHardware:sampleBuffer];
        dispatch_async(dispatch_get_main_queue(), ^{
            CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(cropSampleBuffer);
            [self.mGLView displayPixelBuffer:pixelBuffer];
            CFRelease(sampleBuffer);
        });
        CFRelease(cropSampleBuffer);
    }
    

二、CMSampleBuffer 裁剪

  1. 创建裁剪区域

        int _cropX = 0 ;
        int _cropY = 0 ;
        CGFloat g_width_size = 1080/4;//1280;
        CGFloat g_height_size = 1920/4;//720;
        CGRect cropRect    = CGRectMake(_cropX, _cropY, g_width_size, g_height_size);
    
  2. 创建CVPixelBuffer

        OSStatus status;
        
        /* Only resolution has changed we need to reset pixBuffer and videoInfo so that reduce calculate count */
        static CVPixelBufferRef            pixbuffer = NULL;
        static CMVideoFormatDescriptionRef videoInfo = NULL;
        
        if (pixbuffer == NULL) {
            
            CFDictionaryRef empty; // empty value for attr value.
            CFMutableDictionaryRef attrs;
            empty = CFDictionaryCreate(kCFAllocatorDefault, // our empty IOSurface properties dictionary
                                       NULL,
                                       NULL,
                                       0,
                                       &kCFTypeDictionaryKeyCallBacks,
                                       &kCFTypeDictionaryValueCallBacks);
            attrs = CFDictionaryCreateMutable(kCFAllocatorDefault,
                                              1,
                                              &kCFTypeDictionaryKeyCallBacks,
                                              &kCFTypeDictionaryValueCallBacks);
    ​
            CFDictionarySetValue(attrs,
                                 kCVPixelBufferIOSurfacePropertiesKey,
                                 empty);
            status = CVPixelBufferCreate(kCFAllocatorSystemDefault, g_width_size, g_height_size, kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, attrs/*(__bridge CFDictionaryRef)options*/, &pixbuffer);
            // ensures that the CVPixelBuffer is accessible in system memory. This should only be called if the base address is going to be used and the pixel data will be accessed by the CPU
            if (status != noErr) {
                NSLog(@"Crop CVPixelBufferCreate error %d",(int)status);
                return NULL;
            }
        }
    

    First, to render to a texture, you need an image that is compatible with the OpenGL texture cache. Images that were created with the camera API are already compatible and you can immediately map them for inputs. Suppose you want to create an image to render on and later read out for some other processing though. You have to have create the image with a special property. The attributes for the image must have kCVPixelBufferIOSurfacePropertiesKey as one of the keys to the dictionary.

    如果要进行页面渲染,需要一个和OpenGL缓冲兼容的图像。用相机API创建的图像已经兼容,您可以马上映射他们进行输入。假设你从已有画面中截取一个新的画面,用作其他处理,你必须创建一种特殊的属性用来创建图像。对于图像的属性必须有kCVPixelBufferIOSurfacePropertiesKey 作为字典的Key.

CVReturn CVPixelBufferCreate(CFAllocatorRef allocator,
                                size_t width,
                                size_t height,
                                OSType pixelFormatType, 
                                CFDictionaryRef pixelBufferAttributes, 
                                CVPixelBufferRef  _Nullable *pixelBufferOut);

3 . 获取CVImageBufferRef

    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(buffer);
  1. 裁剪图像

    CIImage *ciImage = [CIImage imageWithCVImageBuffer:imageBuffer];
        ciImage = [ciImage imageByCroppingToRect:cropRect];
        // Ciimage get real image is not in the original point  after excute crop. So we need to pan.
        // 平移图像
        ciImage = [ciImage imageByApplyingTransform:CGAffineTransformMakeTranslation(-_cropX, -_cropY)];
    
  2. 渲染图像填充pixbuffer

        static CIContext *ciContext = nil;
        if (ciContext == nil) {
                    NSMutableDictionary *options = [[NSMutableDictionary alloc] init];
                    [options setObject:[NSNull null] forKey:kCIContextWorkingColorSpace];
                    [options setObject:@0            forKey:kCIContextUseSoftwareRenderer];
            EAGLContext *eaglContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES3];
            ciContext = [CIContext contextWithEAGLContext:eaglContext options:options];
        }
            [ciContext render:ciImage toCVPixelBuffer:pixbuffer bounds:cropRect colorSpace:nil];
    

    保持一个CIContext的引用,它提供一个桥梁来连接我们的Core Image对象和 OpenGL上下文。我们创建一次就可以一直使用它。这个上下文允许Core Image在后台做优化,比如缓存和重用纹理之类的资源等。重要的是这个上下文我们一直在重复使用。

    1. 获取视频信息 时长、PTS、DTS

          CMSampleTimingInfo sampleTime = {
              .duration               = CMSampleBufferGetDuration(buffer),
              .presentationTimeStamp  = CMSampleBufferGetPresentationTimeStamp(buffer),
              .decodeTimeStamp        = CMSampleBufferGetDecodeTimeStamp(buffer)
          };
      
    2. 视频格式描述

          if (videoInfo == NULL) {
              status = CMVideoFormatDescriptionCreateForImageBuffer(kCFAllocatorDefault, pixbuffer, &videoInfo);
              if (status != 0) NSLog(@"Crop CMVideoFormatDescriptionCreateForImageBuffer error %d",(int)status);
          }
      
    3. 创建CMSampleBuffer

          CMSampleBufferRef cropBuffer;
          status = CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault, pixbuffer, true, NULL, NULL, videoInfo, &sampleTime, &cropBuffer);
          if (status != 0) NSLog(@"Crop CMSampleBufferCreateForImageBuffer error %d",(int)status);
      

三、OpenGL 渲染 CMSampleBuffe

准备工作

导入头文件

#import <QuartzCore/QuartzCore.h>
#import <AVFoundation/AVUtilities.h>
#import <mach/mach_time.h>
#import <GLKit/GLKit.h>

变量定义

enum
{
  UNIFORM_Y,
  UNIFORM_UV,
  UNIFORM_COLOR_CONVERSION_MATRIX,
  NUM_UNIFORMS
};
GLint uniforms[NUM_UNIFORMS];
​
// Attribute index.
enum
{
  ATTRIB_VERTEX,
  ATTRIB_TEXCOORD,
  NUM_ATTRIBUTES
};
​
// Color Conversion Constants (YUV to RGB) including adjustment from 16-235/16-240 (video range)// BT.601, which is the standard for SDTV.
static const GLfloat kColorConversion601[] = {
    1.164,  1.164, 1.164,
      0.0, -0.392, 2.017,
    1.596, -0.813,   0.0,
};
​
// BT.709, which is the standard for HDTV.
static const GLfloat kColorConversion709[] = {
    1.164,  1.164, 1.164,
      0.0, -0.213, 2.112,
    1.793, -0.533,   0.0,
};
​
// BT.601 full range (ref: http://www.equasys.de/colorconversion.html)
const GLfloat kColorConversion601FullRange[] = {
    1.0,    1.0,    1.0,
    0.0,    -0.343, 1.765,
    1.4,    -0.711, 0.0,
};
  1. 初始化 EAGLContext

        CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;eaglLayer.opaque = TRUE;
        eaglLayer.drawableProperties = @{ kEAGLDrawablePropertyRetainedBacking :[NSNumber numberWithBool:NO],
                          kEAGLDrawablePropertyColorFormat : kEAGLColorFormatRGBA8};_context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
    ​
        if (!_context || ![EAGLContext setCurrentContext:_context] || ![self loadShaders]) {
          return nil;
        }
    ​
        _preferredConversion = kColorConversion709;
    
  2. 帧缓冲区、渲染缓冲区

    - (void)setupBuffers
    {
      glDisable(GL_DEPTH_TEST);
      
      glEnableVertexAttribArray(ATTRIB_VERTEX);
      glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), 0);
      
      glEnableVertexAttribArray(ATTRIB_TEXCOORD);
      glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), 0);
      
      glGenFramebuffers(1, &_frameBufferHandle);
      glBindFramebuffer(GL_FRAMEBUFFER, _frameBufferHandle);
      
      glGenRenderbuffers(1, &_colorBufferHandle);
      glBindRenderbuffer(GL_RENDERBUFFER, _colorBufferHandle);
      
      [_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer *)self.layer];
      glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
      glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
    ​
      glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _colorBufferHandle);
      if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
        NSLog(@"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
      }
    }
    
  3. 加载shader 链接shader

    - (BOOL)loadShaders
    {
      GLuint vertShader, fragShader;
      NSURL *vertShaderURL, *fragShaderURL;
      
      
      self.program = glCreateProgram();
      
      // Create and compile the vertex shader.
      vertShaderURL = [[NSBundle mainBundle] URLForResource:@"Shader" withExtension:@"vsh"];
      if (![self compileShader:&vertShader type:GL_VERTEX_SHADER URL:vertShaderURL]) {
        NSLog(@"Failed to compile vertex shader");
        return NO;
      }
      
      // Create and compile fragment shader.
      fragShaderURL = [[NSBundle mainBundle] URLForResource:@"Shader" withExtension:@"fsh"];
      if (![self compileShader:&fragShader type:GL_FRAGMENT_SHADER URL:fragShaderURL]) {
        NSLog(@"Failed to compile fragment shader");
        return NO;
      }
      
      // Attach vertex shader to program.
      glAttachShader(self.program, vertShader);
      
      // Attach fragment shader to program.
      glAttachShader(self.program, fragShader);
      
      // Bind attribute locations. This needs to be done prior to linking.
      glBindAttribLocation(self.program, ATTRIB_VERTEX, "position");
      glBindAttribLocation(self.program, ATTRIB_TEXCOORD, "texCoord");
      
      // Link the program.
      if (![self linkProgram:self.program]) {
        NSLog(@"Failed to link program: %d", self.program);
        
        if (vertShader) {
          glDeleteShader(vertShader);
          vertShader = 0;
        }
        if (fragShader) {
          glDeleteShader(fragShader);
          fragShader = 0;
        }
        if (self.program) {
          glDeleteProgram(self.program);
          self.program = 0;
        }
        
        return NO;
      }
      
      // Get uniform locations.
      uniforms[UNIFORM_Y] = glGetUniformLocation(self.program, "SamplerY");
      uniforms[UNIFORM_UV] = glGetUniformLocation(self.program, "SamplerUV");
      uniforms[UNIFORM_COLOR_CONVERSION_MATRIX] = glGetUniformLocation(self.program, "colorConversionMatrix");
      
      // Release vertex and fragment shaders.
      if (vertShader) {
        glDetachShader(self.program, vertShader);
        glDeleteShader(vertShader);
      }
      if (fragShader) {
        glDetachShader(self.program, fragShader);
        glDeleteShader(fragShader);
      }
      
      return YES;
    }
    ​
    - (BOOL)compileShader:(GLuint *)shader type:(GLenum)type URL:(NSURL *)URL
    {
        NSError *error;
        NSString *sourceString = [[NSString alloc] initWithContentsOfURL:URL encoding:NSUTF8StringEncoding error:&error];
        if (sourceString == nil) {
        NSLog(@"Failed to load vertex shader: %@", [error localizedDescription]);
            return NO;
        }
        
      GLint status;
      const GLchar *source;
      source = (GLchar *)[sourceString UTF8String];
      
      *shader = glCreateShader(type);
      glShaderSource(*shader, 1, &source, NULL);
      glCompileShader(*shader);
      
    #if defined(DEBUG)
      GLint logLength;
      glGetShaderiv(*shader, GL_INFO_LOG_LENGTH, &logLength);
      if (logLength > 0) {
        GLchar *log = (GLchar *)malloc(logLength);
        glGetShaderInfoLog(*shader, logLength, &logLength, log);
        NSLog(@"Shader compile log:\n%s", log);
        free(log);
      }
    #endif
      
      glGetShaderiv(*shader, GL_COMPILE_STATUS, &status);
      if (status == 0) {
        glDeleteShader(*shader);
        return NO;
      }
      
      return YES;
    }
    ​
    - (BOOL)linkProgram:(GLuint)prog
    {
      GLint status;
      glLinkProgram(prog);
      
    #if defined(DEBUG)
      GLint logLength;
      glGetProgramiv(prog, GL_INFO_LOG_LENGTH, &logLength);
      if (logLength > 0) {
        GLchar *log = (GLchar *)malloc(logLength);
        glGetProgramInfoLog(prog, logLength, &logLength, log);
        NSLog(@"Program link log:\n%s", log);
        free(log);
      }
    #endif
      
      glGetProgramiv(prog, GL_LINK_STATUS, &status);
      if (status == 0) {
        return NO;
      }
      
      return YES;
    }
    ​
    - (BOOL)validateProgram:(GLuint)prog
    {
      GLint logLength, status;
      
      glValidateProgram(prog);
      glGetProgramiv(prog, GL_INFO_LOG_LENGTH, &logLength);
      if (logLength > 0) {
        GLchar *log = (GLchar *)malloc(logLength);
        glGetProgramInfoLog(prog, logLength, &logLength, log);
        NSLog(@"Program validate log:\n%s", log);
        free(log);
      }
      
      glGetProgramiv(prog, GL_VALIDATE_STATUS, &status);
      if (status == 0) {
        return NO;
      }
      
      return YES;
    }
    
  4. UseProgram

      glUseProgram(self.program);
      
      glUniform1i(uniforms[UNIFORM_Y], 0);
      glUniform1i(uniforms[UNIFORM_UV], 1);
      
      glUniformMatrix3fv(uniforms[UNIFORM_COLOR_CONVERSION_MATRIX], 1, GL_FALSE, _preferredConversion);
      
      // Create CVOpenGLESTextureCacheRef for optimal CVPixelBufferRef to GLES texture conversion.
      if (!_videoTextureCache) {
        CVReturn err = CVOpenGLESTextureCacheCreate(kCFAllocatorDefault, NULL, _context, NULL, &_videoTextureCache);
        if (err != noErr) {
          NSLog(@"Error at CVOpenGLESTextureCacheCreate %d", err);
          return;
        }
      }
    
  5. 渲染

 - (void)displayPixelBuffer:(CVPixelBufferRef)pixelBuffer
{
	CVReturn err;
	if (pixelBuffer != NULL) {
		int frameWidth = (int)CVPixelBufferGetWidth(pixelBuffer);
		int frameHeight = (int)CVPixelBufferGetHeight(pixelBuffer);
		
		if (!_videoTextureCache) {
			NSLog(@"No video texture cache");
			return;
		}
        if ([EAGLContext currentContext] != _context) {
            [EAGLContext setCurrentContext:_context]; // 非常重要的一行代码
        }
		[self cleanUpTextures];
		
		
		/*
		 Use the color attachment of the pixel buffer to determine the appropriate color conversion matrix.
		 */
		CFTypeRef colorAttachments = CVBufferGetAttachment(pixelBuffer, kCVImageBufferYCbCrMatrixKey, NULL);
		
		if (colorAttachments == kCVImageBufferYCbCrMatrix_ITU_R_601_4) {
            if (self.isFullYUVRange) {
                _preferredConversion = kColorConversion601FullRange;
            }
            else {
                _preferredConversion = kColorConversion601;
            }
		}
		else {
			_preferredConversion = kColorConversion709;
		}
		
		/*
         CVOpenGLESTextureCacheCreateTextureFromImage will create GLES texture optimally from CVPixelBufferRef.
         */
		
		/*
         Create Y and UV textures from the pixel buffer. These textures will be drawn on the frame buffer Y-plane.
         */
		glActiveTexture(GL_TEXTURE0);
		err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
														   _videoTextureCache,
														   pixelBuffer,
														   NULL,
														   GL_TEXTURE_2D,
														   GL_LUMINANCE,//按照亮度值存储纹理单元
														   frameWidth,
														   frameHeight,
														   GL_LUMINANCE,
														   GL_UNSIGNED_BYTE,
														   0,// 对于planner存储方式的像素数据,这里填写对应的索引。非planner格式写0即可
														   &_lumaTexture);
		if (err) {
			NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
		}
		
        glBindTexture(CVOpenGLESTextureGetTarget(_lumaTexture), CVOpenGLESTextureGetName(_lumaTexture));
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
		
		// UV-plane.
		glActiveTexture(GL_TEXTURE1);
        
        /** 该函数有两个作用:
         *  1、renderTarget像素数据传给opengl es,类似于相当于glTexImage2D(),当然renderTarget中数据可以是由CVPixelBufferCreate()创建的默认值都是
         *  0的像素数据,也可以是具体的像素数据
         *  2、生成对应格式的CVOpenGLESTextureRef对象(相当于glGenTextures()生成的texture id)
         *  CVOpenGLESTextureRef对象(它是对Opengl es中由glGenTextures()生成的texture id的封装)
         */
		err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
														   _videoTextureCache,
														   pixelBuffer,
														   NULL,
                                                           GL_TEXTURE_2D,
                                                           GL_LUMINANCE_ALPHA,//按照亮度和alpha值存储纹理单元
                                                           frameWidth / 2,
                                                           frameHeight / 2,
                                                           GL_LUMINANCE_ALPHA,
														   GL_UNSIGNED_BYTE,
														   1,
														   &_chromaTexture);
		if (err) {
			NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
		}
		
		glBindTexture(CVOpenGLESTextureGetTarget(_chromaTexture), CVOpenGLESTextureGetName(_chromaTexture));
//        NSLog(@"id %d", CVOpenGLESTextureGetName(_chromaTexture));
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
		
		glBindFramebuffer(GL_FRAMEBUFFER, _frameBufferHandle);
		
		// Set the view port to the entire view.
		glViewport(0, 0, _backingWidth, _backingHeight);
	}
	
	glClearColor(0.1f, 0.0f, 0.0f, 1.0f);
	glClear(GL_COLOR_BUFFER_BIT);
	
	// Use shader program.
	glUseProgram(self.program);
	glUniformMatrix3fv(uniforms[UNIFORM_COLOR_CONVERSION_MATRIX], 1, GL_FALSE, _preferredConversion);
	
	// Set up the quad vertices with respect to the orientation and aspect ratio of the video.
	CGRect vertexSamplingRect = AVMakeRectWithAspectRatioInsideRect(CGSizeMake(_backingWidth, _backingHeight), self.layer.bounds);
	
	// Compute normalized quad coordinates to draw the frame into.
	CGSize normalizedSamplingSize = CGSizeMake(0.0, 0.0);
	CGSize cropScaleAmount = CGSizeMake(vertexSamplingRect.size.width/self.layer.bounds.size.width, vertexSamplingRect.size.height/self.layer.bounds.size.height);
	
	// Normalize the quad vertices.
	if (cropScaleAmount.width > cropScaleAmount.height) {
		normalizedSamplingSize.width = 1.0;
		normalizedSamplingSize.height = cropScaleAmount.height/cropScaleAmount.width;
	}
	else {
		normalizedSamplingSize.width = 1.0;
		normalizedSamplingSize.height = cropScaleAmount.width/cropScaleAmount.height;
	}
	
	/*
     The quad vertex data defines the region of 2D plane onto which we draw our pixel buffers.
     Vertex data formed using (-1,-1) and (1,1) as the bottom left and top right coordinates respectively, covers the entire screen.
     */
	GLfloat quadVertexData [] = {
		-1 * normalizedSamplingSize.width, -1 * normalizedSamplingSize.height,
			 normalizedSamplingSize.width, -1 * normalizedSamplingSize.height,
		-1 * normalizedSamplingSize.width, normalizedSamplingSize.height,
			 normalizedSamplingSize.width, normalizedSamplingSize.height,
	};
	
	// 更新顶点数据
	glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, quadVertexData);
	glEnableVertexAttribArray(ATTRIB_VERTEX);
    
    GLfloat quadTextureData[] =  { // 正常坐标
        0, 0,
        1, 0,
        0, 1,
        1, 1
    };
	
	glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, 0, 0, quadTextureData);
	glEnableVertexAttribArray(ATTRIB_TEXCOORD);
	
	glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

	glBindRenderbuffer(GL_RENDERBUFFER, _colorBufferHandle);
    
    if ([EAGLContext currentContext] == _context) {
        [_context presentRenderbuffer:GL_RENDERBUFFER];
    }
    
}

demo

初学者 如有不对之处,欢迎指正