书接上回,我们从stream_open这个方法接着看。
1.stream_open
stream_open 方法中做了以下事情:
- 初始化packet队列
- 初始化同步时钟
- 创建video刷新线程(放到后面分析)
- 创建文件读取线程 ff_read
static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat)
{
assert(!ffp->is);
VideoState *is;
// 分配 VideoState
is = av_mallocz(sizeof(VideoState));
if (!is)
return NULL;
is->filename = av_strdup(filename);
if (!is->filename)
goto fail;
/* start video display */
//初始化图像帧队列 pictq_size默认为3,可设置
if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
goto fail;
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;
//初始化packet队列
if (packet_queue_init(&is->videoq) < 0 ||
packet_queue_init(&is->audioq) < 0 ||
packet_queue_init(&is->subtitleq) < 0)
goto fail;
// 初始化同步时钟
init_clock(&is->vidclk, &is->videoq.serial);
init_clock(&is->audclk, &is->audioq.serial);
init_clock(&is->extclk, &is->extclk.serial);
is->audio_clock_serial = -1;
is->muted = 0;
is->av_sync_type = ffp->av_sync_type;
is->play_mutex = SDL_CreateMutex();
is->accurate_seek_mutex = SDL_CreateMutex();
ffp->is = is;
is->pause_req = !ffp->start_on_prepared;
//创建video刷新线程
is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
if (!is->video_refresh_tid) {
av_freep(&ffp->is);
return NULL;
}
is->initialized_decoder = 0;
// 创建文件读取线程
is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
if (!is->read_tid) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
goto fail;
}
//async_init_decoder 不影响主流程,暂时忽略
if (ffp->async_init_decoder && !ffp->video_disable && ffp->video_mime_type && strlen(ffp->video_mime_type) > 0
&& ffp->mediacodec_default_name && strlen(ffp->mediacodec_default_name) > 0) {
if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2) {
decoder_init(&is->viddec, NULL, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_init_video_decoder(ffp->pipeline, ffp);
}
}
is->initialized_decoder = 1;
return is;
fail:
is->initialized_decoder = 1;
is->abort_request = true;
if (is->video_refresh_tid)
SDL_WaitThread(is->video_refresh_tid, NULL);
stream_close(ffp);
return NULL;
}
2.read_thread
这个方法代码过长,分段进行分析
-
第一步,创建
AVFormatContext,打开文件// 创建AVFormatContext ic = avformat_alloc_context(); if (!ic) { av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n"); ret = AVERROR(ENOMEM); goto fail; } ic->interrupt_callback.callback = decode_interrupt_cb; ic->interrupt_callback.opaque = is; if (ffp->iformat_name) is->iformat = av_find_input_format(ffp->iformat_name); // 打开文件 err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts); -
第二步,查找音频,视频,字幕流信息
int video_stream_count = 0; int h264_stream_count = 0; int first_h264_stream = -1; for (i = 0; i < ic->nb_streams; i++) { AVStream *st = ic->streams[i]; enum AVMediaType type = st->codecpar->codec_type; st->discard = AVDISCARD_ALL; if (type >= 0 && ffp->wanted_stream_spec[type] && st_index[type] == -1) if (avformat_match_stream_specifier(ic, st, ffp->wanted_stream_spec[type]) > 0) st_index[type] = i; // choose first h264 if (type == AVMEDIA_TYPE_VIDEO) { enum AVCodecID codec_id = st->codecpar->codec_id; video_stream_count++; if (codec_id == AV_CODEC_ID_H264) { h264_stream_count++; if (first_h264_stream < 0) first_h264_stream = i; } } } if (video_stream_count > 1 && st_index[AVMEDIA_TYPE_VIDEO] < 0) { st_index[AVMEDIA_TYPE_VIDEO] = first_h264_stream; av_log(NULL, AV_LOG_WARNING, "multiple video stream found, prefer first h264 stream: %d\n", first_h264_stream); } // 查找视频流 if (!ffp->video_disable) st_index[AVMEDIA_TYPE_VIDEO] = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); // 查找音频流 if (!ffp->audio_disable) st_index[AVMEDIA_TYPE_AUDIO] = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, st_index[AVMEDIA_TYPE_AUDIO], st_index[AVMEDIA_TYPE_VIDEO], NULL, 0); // 查找字幕流 if (!ffp->video_disable && !ffp->subtitle_disable) st_index[AVMEDIA_TYPE_SUBTITLE] = av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE, st_index[AVMEDIA_TYPE_SUBTITLE], (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ? st_index[AVMEDIA_TYPE_AUDIO] : st_index[AVMEDIA_TYPE_VIDEO]), NULL, 0); -
初始化查找到的流
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]); } else { ffp->av_sync_type = AV_SYNC_VIDEO_MASTER; is->av_sync_type = ffp->av_sync_type; } ret = -1; if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]); } if (is->show_mode == SHOW_MODE_NONE) is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT; if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) { stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]); } ffp_notify_msg1(ffp, FFP_MSG_COMPONENT_OPEN); -
打开音视频流
先根据流信息拿到对应的
AVCodecContext,在查找解码器,再配置解码参数,并调用avcodec_open2打开解码器。static int stream_component_open(FFPlayer *ffp, int stream_index) { VideoState *is = ffp->is; AVFormatContext *ic = is->ic; AVCodecContext *avctx; AVCodec *codec = NULL; const char *forced_codec_name = NULL; AVDictionary *opts = NULL; AVDictionaryEntry *t = NULL; int sample_rate, nb_channels; int64_t channel_layout; int ret = 0; int stream_lowres = ffp->lowres; if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; avctx = avcodec_alloc_context3(NULL); if (!avctx) return AVERROR(ENOMEM); // 配置 AVCodecContext ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar); if (ret < 0) goto fail; av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base); // 根据配置结果的codec_id 查找解码器 codec = avcodec_find_decoder(avctx->codec_id); //配置解码参数 opts = filter_codec_opts(ffp->codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec); if (!av_dict_get(opts, "threads", NULL, 0)) av_dict_set(&opts, "threads", "auto", 0); if (stream_lowres) av_dict_set_int(&opts, "lowres", stream_lowres, 0); if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) av_dict_set(&opts, "refcounted_frames", "1", 0); //打开解码器 if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) { goto fail; }read_thread方法后面部分是读取Frame的代码,留到下次进行分析。接下来看一下音视频流是如何初始化的。
3.初始化音频流
case AVMEDIA_TYPE_AUDIO:
sample_rate = avctx->sample_rate;
nb_channels = avctx->channels;
channel_layout = avctx->channel_layout;
/* prepare audio output */
// 初始化音频输出
if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
goto fail;
ffp_set_audio_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(avctx->codec_id));
is->audio_hw_buf_size = ret;
is->audio_src = is->audio_tgt;
is->audio_buf_size = 0;
is->audio_buf_index = 0;
/* init averaging filter */
is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
is->audio_diff_avg_count = 0;
/* since we do not have a precise anough audio FIFO fullness,
we correct audio sync only if larger than this threshold */
is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
is->audio_stream = stream_index;
is->audio_st = ic->streams[stream_index];
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
is->auddec.start_pts = is->audio_st->start_time;
is->auddec.start_pts_tb = is->audio_st->time_base;
}
// 开启音频解码线程
if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
goto out;
SDL_AoutPauseAudio(ffp->aout, 0);
初始化音频输出:准备参数,然后调用之前在func_open_audio_output方法中创建好的SDL_Aout中的open_audio函数。为了方便,也将创建SDL_Aout的代码贴出来。
static int audio_open(FFPlayer *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
SDL_AudioSpec wanted_spec, spec;
const char *env;
static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
#ifdef FFP_MERGE
static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
#endif
static const int next_sample_rates[] = {0, 44100, 48000};
int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
env = SDL_getenv("SDL_AUDIO_CHANNELS");
if (env) {
wanted_nb_channels = atoi(env);
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
}
if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
}
//获取声道数量
wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
wanted_spec.channels = wanted_nb_channels;
wanted_spec.freq = wanted_sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
return -1;
}
while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
next_sample_rate_idx--;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AoutGetAudioPerSecondCallBacks(ffp->aout)));
wanted_spec.callback = sdl_audio_callback;
wanted_spec.userdata = opaque;
while (SDL_AoutOpenAudio(ffp->aout, &wanted_spec, &spec) < 0) {
/* avoid infinity loop on exit. --by bbcallen */
if (is->abort_request)
return -1;
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
wanted_spec.channels, wanted_spec.freq, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if (!wanted_spec.channels) {
wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
wanted_spec.channels = wanted_nb_channels;
if (!wanted_spec.freq) {
av_log(NULL, AV_LOG_ERROR,
"No more combinations to try, audio open failed\n");
return -1;
}
}
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
}
if (spec.format != AUDIO_S16SYS) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised audio format %d is not supported!\n", spec.format);
return -1;
}
if (spec.channels != wanted_spec.channels) {
wanted_channel_layout = av_get_default_channel_layout(spec.channels);
if (!wanted_channel_layout) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised channel count %d is not supported!\n", spec.channels);
return -1;
}
}
audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
audio_hw_params->freq = spec.freq;
audio_hw_params->channel_layout = wanted_channel_layout;
audio_hw_params->channels = spec.channels;
audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
return -1;
}
SDL_AoutSetDefaultLatencySeconds(ffp->aout, ((double)(2 * spec.size)) / audio_hw_params->bytes_per_sec);
return spec.size;
}
int SDL_AoutOpenAudio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{
if (aout && desired && aout->open_audio)
return aout->open_audio(aout, desired, obtained);
return -1;
}
// 创建SDL_Aout
static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
SDL_Aout *aout = NULL;
if (ffp->use_oboe) {
aout = SDL_AoutAndroid_CreateForOboe();
} else if (ffp->opensles) {
// 配置成opensl接口
aout = SDL_AoutAndroid_CreateForOpenSLES();
} else {
// 配置成AudioTrack接口
aout = SDL_AoutAndroid_CreateForAudioTrack();
}
if (aout)
SDL_AoutSetStereoVolume(aout, pipeline->opaque->left_volume, pipeline->opaque->right_volume);
return aout;
}
开启音频解码线程。
static int decoder_start(Decoder *d, int (*fn)(void *), void *arg, const char *name)
{
packet_queue_start(d->queue);
d->decoder_tid = SDL_CreateThreadEx(&d->_decoder_tid, fn, arg, name);
if (!d->decoder_tid) {
av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
return 0;
}
4.初始化视频流
case AVMEDIA_TYPE_VIDEO:
is->video_stream = stream_index;
is->video_st = ic->streams[stream_index];
if (ffp->async_init_decoder) {
while (!is->initialized_decoder) {
SDL_Delay(5);
}
if (ffp->node_vdec) {
is->viddec.avctx = avctx;
ret = ffpipeline_config_video_decoder(ffp->pipeline, ffp);
}
if (ret || !ffp->node_vdec) {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
} else {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
//初始化解码器,如果支持硬解走硬解,否则使用软解
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
//创建video解码线程
if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
goto out;
is->queue_attachments_req = 1;
if (ffp->max_fps >= 0) {
if(is->video_st->avg_frame_rate.den && is->video_st->avg_frame_rate.num) {
double fps = av_q2d(is->video_st->avg_frame_rate);
SDL_ProfilerReset(&is->viddec.decode_profiler, fps + 0.5);
if (fps > ffp->max_fps && fps < 130.0) {
is->is_video_high_fps = 1;
av_log(ffp, AV_LOG_WARNING, "fps: %lf (too high)\n", fps);
} else {
av_log(ffp, AV_LOG_WARNING, "fps: %lf (normal)\n", fps);
}
}
if(is->video_st->r_frame_rate.den && is->video_st->r_frame_rate.num) {
double tbr = av_q2d(is->video_st->r_frame_rate);
if (tbr > ffp->max_fps && tbr < 130.0) {
is->is_video_high_fps = 1;
av_log(ffp, AV_LOG_WARNING, "fps: %lf (too high)\n", tbr);
} else {
av_log(ffp, AV_LOG_WARNING, "fps: %lf (normal)\n", tbr);
}
}
}
if (is->is_video_high_fps) {
avctx->skip_frame = FFMAX(avctx->skip_frame, AVDISCARD_NONREF);
avctx->skip_loop_filter = FFMAX(avctx->skip_loop_filter, AVDISCARD_NONREF);
avctx->skip_idct = FFMAX(avctx->skip_loop_filter, AVDISCARD_NONREF);
}
break;
初始化视频解码器,这里调用的是直接创建的IJKFF_Pipenode中的func_open_video_decoder函数。
IJKFF_Pipenode* ffpipeline_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
return pipeline->func_open_video_decoder(pipeline, ffp);
}
IJKFF_Pipeline *ffpipeline_create_from_android(FFPlayer *ffp)
{
ALOGD("ffpipeline_create_from_android()\n");
IJKFF_Pipeline *pipeline = ffpipeline_alloc(&g_pipeline_class, sizeof(IJKFF_Pipeline_Opaque));
if (!pipeline)
return pipeline;
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
opaque->ffp = ffp;
opaque->surface_mutex = SDL_CreateMutex();
opaque->left_volume = 1.0f;
opaque->right_volume = 1.0f;
if (!opaque->surface_mutex) {
ALOGE("ffpipeline-android:create SDL_CreateMutex failed\n");
goto fail;
}
pipeline->func_destroy = func_destroy;
// 打开视频解码器
pipeline->func_open_video_decoder = func_open_video_decoder;
// 构造SDL_Aout
pipeline->func_open_audio_output = func_open_audio_output;
// 提前初始化视频解码器
pipeline->func_init_video_decoder = func_init_video_decoder;
// 配置视频解码器
pipeline->func_config_video_decoder = func_config_video_decoder;
return pipeline;
fail:
ffpipeline_free_p(&pipeline);
return NULL;
}
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
IJKFF_Pipenode *node = NULL;
//优先使用硬解码器,会先查看视频流格式是否符合,不不符合或者没有开启硬解码,使用软解码
if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
if (!node) {
node = ffpipenode_create_video_decoder_from_ffplay(ffp);
}
return node;
}
这里先不展开分析流具体的操作,后续直接看音视频流是如何播放出来的。