ffmpeg_sample解读_remuxing

226 阅读2分钟

title: ffmpeg_sample解读_remuxing date: 2020-10-28 10:15:02 tags: [读书笔记] typora-copy-images-to: ./imgs typora-root-url: ./imgs

总结

就是读去输入输出文件.根据输出文件的后缀.选择合适的格式.然后把输入文件的所有流都解封装成packet.然后写出到输出文件,并且重新设置流中各个packet的时间参数

流程图

graph TB
afoi[avformat_open_input]
-->affsi[avformat_find_stream_info]
-->afaoc[avformat_alloc_output_context2]
-->ama[av_mallocz_array]
-->afns[avformat_new_stream]
-->acpc[avcodec_parameters_copy]
-->avo[avio_open]
-->afwh[avformat_write_header]
-->arf{av_read_frame>0?}
-->|yes|aiwf[av_interleaved_write_frame]
-->arf
arf-->|no|awt[av_write_trailer]
-->release[release]

image-20201113142321447

代码



/**
 * @file
 * libavformat/libavcodec demuxing and muxing API example.
 *
 * Remux streams from one container format to another.
 * @example remuxing.c
 */

#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}
 //重新封装数据为完整的格式
 //根据输出文件后缀名来决定使用什么封装格式,

int remuxing_main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    const char *in_filename, *out_filename;
    int ret, i;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;

    if (argc < 3) {
        printf("usage: %s input output\n"
               "API example program to remux a media file with libavformat and libavcodec.\n"
               "The output format is guessed according to the file extension.\n"
               "\n", argv[0]);
        return 1;
    }

    //输入输出的文件名
    in_filename  = argv[1];
    out_filename = argv[2];
//打开输入文件.初始化格式上下文,此时数据到了格式上下文里
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
//找到流信息
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
//打印输入文件的格式信息
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
//分配输出的格式上下文空间
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    //所有流的总数
    stream_mapping_size = ifmt_ctx->nb_streams;
    //分配接收流的新的数组,每个流占一项
    stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
    if (!stream_mapping) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    ofmt = ofmt_ctx->oformat;
//遍历处理所有的流,从输入流中拷贝编解码相关参数到输出流中
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx->streams[i];
        //输入流的编解码参数
        AVCodecParameters *in_codecpar = in_stream->codecpar;
        //只处理视频,音频,字母流
        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
            stream_mapping[i] = -1;
            continue;
        }
        //这里存的就是每个音频视频流的索引
        stream_mapping[i] = stream_index++;
//输出格式上下文创建一个新的流,此时这个流已经和上下文绑定
        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
//把输入流的参数信息拷贝到输出流的编解码参数中
        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy codec parameters\n");
            goto end;
        }
        out_stream->codecpar->codec_tag = 0;
    }
    //打印输出格式上下文中第一个流的信息
    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
        //初始化io上下文.用来之后向输出文件写出数据
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
//写出header信息到文件,也就是把流的头部信息写出到文件中,
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }

    while (1) {
        AVStream *in_stream, *out_stream;
        //从输入上下文读取一个packet
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;
//packet对应的流
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        if (pkt.stream_index >= stream_mapping_size ||
            stream_mapping[pkt.stream_index] < 0) {
            av_packet_unref(&pkt);
            continue;
        }
//设置输出流的索引,把索引对齐
        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");

        /* copy packet *///a * bq / cq 其实就是根据两个流的时间基重新设置参数
        //pts 渲染时间戳.真正展示的时候的时间序列 dts.编解码时间戳.决定帧按照什么顺序写入文件
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
//交错着把解码出的packet写入到输出格式上下文中
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }
//信息写出到文件中
    av_write_trailer(ofmt_ctx);
end:
//释放资源
    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    av_freep(&stream_mapping);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }

    return 0;
}