本文已参与「新人创作礼」活动,一起开启掘金创作之路。
使用简介:
在收到码流的时候调用H264::appendData
方法将数据丢给H264
实例去处理,处理完成后会触发信号imageData
,在关联imageData
信号的槽函数中将QImage刷新到界面上就可以了。
代码实现
头文件
#ifndef H264_H
#define H264_H
#include <QObject>
#include <QByteArray>
#include <QMutex>
#include <QByteArray>
#include <QThread>
#include <QImage>
extern "C"{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}
class H264 : public QThread
{
Q_OBJECT
public:
explicit H264(QObject *parent = nullptr);
bool init();
bool genImageObj();
void appendData(const QByteArray& data);
void decode();
void close() { m_isRun = false; wait(); }
private:
AVPacket *m_avPacket;
AVFrame *m_avFrameInput;
AVFrame *m_avFramePicture;
AVCodec *m_pCodec;
AVCodecContext *m_avCodecCtx;
AVCodecParserContext *m_pCodecParserCtx;
SwsContext *m_pSwsContext;
int m_nVideoWidth;
int m_nVideoHeight;
uint8_t *m_pPicBuffer;
QByteArray m_buffer;
QMutex m_mutex;
bool m_isFirst;
bool m_isRun;
signals:
void imageData(QImage image);
protected:
void run() override;
signals:
};
#endif // H264_H
源文件
#include "h264.h"
#include <QMutexLocker>
#include <QDebug>
H264::H264(QObject *parent)
: QThread(parent)
, m_isFirst(true)
{
}
bool H264::init()
{
m_avPacket = av_packet_alloc();
av_init_packet(m_avPacket);
m_avFrameInput = av_frame_alloc();
m_avFramePicture = av_frame_alloc();
m_pCodec = (AVCodec*)avcodec_find_decoder(AVCodecID::AV_CODEC_ID_H264);
if (!m_pCodec) return false;
m_avCodecCtx = avcodec_alloc_context3(m_pCodec);
if (!m_avCodecCtx) return false;
m_pCodecParserCtx = av_parser_init(AVCodecID::AV_CODEC_ID_H264);
if (!m_pCodecParserCtx) return false;
if (avcodec_open2(m_avCodecCtx, m_pCodec, nullptr) < 0) {
return false;
}
return true;
}
bool H264::genImageObj()
{
AVPixelFormat srcFmt = AV_PIX_FMT_YUV420P;
AVPixelFormat dstFmt = AV_PIX_FMT_RGB32;
srcFmt = m_avCodecCtx->pix_fmt;
int bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB32, m_nVideoWidth, m_nVideoHeight, 1);
m_pPicBuffer = (uint8_t*)av_malloc(bytes * sizeof(uint8_t));
av_image_fill_arrays(m_avFramePicture->data, m_avFramePicture->linesize, m_pPicBuffer, dstFmt, m_nVideoWidth, m_nVideoHeight, 1);
m_pSwsContext = sws_getContext(m_nVideoWidth, m_nVideoHeight, srcFmt, m_nVideoWidth, m_nVideoHeight, dstFmt, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
return true;
}
void H264::appendData(const QByteArray &data)
{
QMutexLocker _(&m_mutex);
m_buffer += data;
}
void H264::decode()
{
QMutexLocker _(&m_mutex);
while (m_buffer.size() > 0) {
const int nBufferSize = m_buffer.size() + FF_BUG_NO_PADDING;
QByteArray buf;
buf.fill(0, nBufferSize);
memcpy(buf.data(), m_buffer.constData(), m_buffer.size());
int nLength = av_parser_parse2(
m_pCodecParserCtx,
m_avCodecCtx,
&m_avPacket->data,
&m_avPacket->size,
(const uint8_t*)buf.constData(),
m_buffer.size(),
AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
m_buffer.remove(0, nLength);
if (m_avPacket->size == 0) {
return;
}
switch (m_pCodecParserCtx->pict_type) {
case AV_PICTURE_TYPE_I: break;
case AV_PICTURE_TYPE_P: break;
case AV_PICTURE_TYPE_B: break;
default: break;
}
avcodec_send_packet(m_avCodecCtx, m_avPacket);
int ret = avcodec_receive_frame(m_avCodecCtx, m_avFrameInput);
if (ret < 0) return;
if (m_isFirst) {
m_nVideoWidth = m_avCodecCtx->width;
m_nVideoHeight = m_avCodecCtx->height;
genImageObj();
m_isFirst = false;
}
sws_scale(m_pSwsContext, m_avFrameInput->data, m_avFrameInput->linesize, 0, m_nVideoHeight, m_avFramePicture->data, m_avFramePicture->linesize);
QImage image(m_avFramePicture->data[0], m_nVideoWidth, m_nVideoHeight, QImage::Format_ARGB32);
emit imageData(image);
}
}
void H264::run()
{
m_isRun = true;
while (m_isRun) {
decode();
}
av_packet_free(&m_avPacket);
av_frame_free(&m_avFrameInput);
av_frame_free(&m_avFramePicture);
avcodec_free_context(&m_avCodecCtx);
}
使用
void Widget::onVideoFrame(QImage img){}
// 初始化h264解码
connect(&m_h264, &H264::imageData, this, &Widget::onVideoFrame);
m_h264.init();
m_h264.start();