android 上学习ffmpeg 过程

332 阅读1分钟

1.打开视频并观看的ffmpeg处理流程

//1注册组件,注册初始化
av_register_all();
//或者网络初始化
avformat_network_init();
//获取上下文
AVFormatContext	*pFormatCtx = avformat_alloc_context();

//2.打开文件 avformat_open_input
if(avformat_open_input(&pFormatCtx,input_str,NULL,NULL)!=0){
        LOGE("Couldn't open input stream.\n");
        return -1;
}

//3.查找流信息 avformat_find_stream_info
if(avformat_find_stream_info(pFormatCtx,NULL)<0){
        LOGE("Couldn't find stream information.\n");
        return -1;
}

//4.avcode_find_decoder 获取解码器
int videoindex=-1;
//获取视频流的index
for(int i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
                videoindex=i;
                break;
        }
if(videoindex==-1){
        LOGE("Couldn't find a video stream.\n");
        return -1;
}
//获取编码器上下文
AVCodecContext	*pCodecCtx = pFormatCtx->streams[videoindex]->codec;
//获取编码器
AVCodec	*pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL){
        LOGE("Couldn't find Codec.\n");
        return -1;
}

//5. 打开编码器
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
        LOGE("Couldn't open codec.\n");
        return -1;
}

AVFrame	*pFrame,*pFrameYUV;
//帧数据
pFrame=av_frame_alloc();
pFrameYUV=av_frame_alloc();
//读取包
AVPacket *packet = av_packet_alloc();
//    像素数据;初始化一个SwsContext。
SwsContext *sws_ctx = sws_getContext(
        codecContext->width, codecContext->height, codecContext->pix_fmt,
        codecContext->width, codecContext->height, AV_PIX_FMT_RGBA,
        SWS_BILINEAR, 0, 0, 0);


//获取界面传下来的surface,就是java层传来的surfaceview
ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
//视频缓冲区
ANativeWindow_Buffer windowBuffer;
if (0 == nativeWindow) {
    LOGE("Couldn't get native window from surface.\n");
    return -1;
}
//初始化显示窗口
ANativeWindow_setBuffersGeometry(nativeWindow, pCodecCtx->width,
                                     pCodecCtx->height,
                                     WINDOW_FORMAT_RGBA_8888);
                                     
//6. 循环读取视频packet,直到结束
while (av_read_frame(pFormatCtx, vPacket) >= 0) {
    if (vPacket->stream_index == videoindex) {
        //按 dts 递增的顺序向解码器送入编码帧 packet
        int ret = avcodec_send_packet(pCodecCtx, vPacket);
        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
            LOGE("video avcodec_send_packet error %d", ret);
            return -1;
        }
        //解码器按 pts 递增的顺序输出原始帧 frame   [pts和dts后面会讲]
        ret = avcodec_receive_frame(pCodecCtx, vFrame);
        if (ret < 0 && ret != AVERROR_EOF) {
            LOGE("video avcodec_receive_frame error %d", ret);
            av_packet_unref(vPacket);
            continue;
        }
        //根据图片的宽高像素申请内存空间
        uint8_t *dst_data[0];
        int dst_linesize[0];
        av_image_alloc(dst_data, dst_linesize,
                       codecContext->width, codecContext->height, AV_PIX_FMT_RGBA, 1);
       if (packet->stream_index == vidio_stream_idx) {
           //转为指定的YUV420P
           sws_scale(sws_ctx,
                     reinterpret_cast<const uint8_t *const *>(frame->data), frame->linesize, 0,
                     frame->height,
                     dst_data, dst_linesize);
            if (ANativeWindow_lock(nativeWindow, &windowBuffer, NULL) < 0) {
                LOGE("cannot lock window");
            } else {
                av_image_alloc(dst_data, dst_linesize,
                       codecContext->width, codecContext->height, AV_PIX_FMT_RGBA, 1);
                 //将windowBuffer数据传到java层的surfaceview中
                 // [nativeWindow就是android中的surfaceview]
                ANativeWindow_unlockAndPost(nativeWindow);
            }
       }
    }
}


//最后,释放内存
sws_freeContext(sws_ctx);
av_free(vPacket);
av_free(pFrameRGBA);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);