main.c
#include <stdio.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/hwcontext.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
const char *filter_descr =
"hwupload,scale_rkrga=format=rgb24,hwdownload";
static AVFilterContext *buffersink_ctx;
static AVFilterContext *buffersrc_ctx;
static AVFilterContext *hwupload_ctx;
static AVFilterGraph *filter_graph;
const int width = 1280;
const int height = 720;
static int init_filters(const char *filters_descr) {
int ret = 0;
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
if (!buffersrc) {
fprintf(stderr, "buffersrc not found\n");
return -1;
}
if (!buffersink) {
fprintf(stderr, "buffersink not found\n");
return -1;
}
// 创建 rkmpp 硬件上下文
AVBufferRef *hw_device_ctx;
if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_RKMPP, "rkmpp",
NULL, 0) < 0) {
fprintf(stderr, "Failed to create hardware frames context\n");
return -1;
}
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
char args[512];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=1/25:pixel_aspect=1/1",
width, height, AV_PIX_FMT_YUYV422);
fprintf(stderr, "args: %s\n", args);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args,
NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL,
NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
// 设置 buffersink 滤镜要输出的格式
enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
// 用于根据给定的字符串描述解析并构建一个滤镜图(filter graph)。
// inputs 指向输入滤镜链表的指针。
// outputs 指向输出滤镜链表的指针。
// 优势就是它简化了滤镜图的构建过程。通过传入一个描述滤镜图的字符串,你不需要手动去分配每个滤镜并手动连接它们。这意味着你可以通过字符串形式的描述快速构建复杂的滤镜图。
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, &inputs,
&outputs, NULL)) < 0) {
fprintf(stderr, "avfilter_graph_parse_ptr failed\n");
goto end;
}
printf("avfilter_graph_parse_ptr success\n");
avfilter_graph_set_auto_convert(filter_graph, AVFILTER_AUTO_CONVERT_ALL);
hwupload_ctx = avfilter_graph_get_filter(filter_graph, "Parsed_hwupload_0");
if (hwupload_ctx) {
hwupload_ctx->hw_device_ctx = hw_device_ctx;
fprintf(stderr, "filter name: %d, type: %s\n", filter_graph->nb_filters, hwupload_ctx->filter->name);
}
// 打印滤镜图有多少个滤镜
fprintf(stderr, "nb_filters: %d\n", filter_graph->nb_filters);
// 打印滤镜图
printf("%s\n", avfilter_graph_dump(filter_graph, NULL));
// 用于检查和配置滤镜图的有效性及其连接和格式的函数。它主要用于在滤镜图创建和连接之后,验证图的合法性并最终配置滤镜图中的所有滤镜之间的连接和格式设置。
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
fprintf(stderr, "avfilter_graph_config failed\n");
goto end;
}
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
int yuyv422_to_rgb(AVFrame *yuyv422_frame, AVFrame *rgb_frame)
{
int ret;
while (1) {
/* 将解码后的帧推入滤镜图 */
if (av_buffersrc_write_frame(buffersrc_ctx, yuyv422_frame) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return -1;
}
/* 从过滤图中提取过滤后的帧 */
ret = av_buffersink_get_frame(buffersink_ctx, rgb_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
continue;
}
return ret;
}
}
int read_yuyv422_frame(const char *input_file, AVFrame *yuyv422_frame)
{
int file_size;
// 打开输入文件并读取内容
FILE *input_file_ptr = fopen(input_file, "rb");
if (!input_file_ptr) {
perror("Cannot open input file");
return -1;
}
// 移动文件指针到文件末尾
fseek(input_file_ptr, 0, SEEK_END);
// 获取文件大小
file_size = ftell(input_file_ptr);
if (file_size == -1) {
perror("无法获取文件大小");
fclose(input_file_ptr);
return -1;
}
// 将文件指针移回文件开头(如果需要)
fseek(input_file_ptr, 0, SEEK_SET);
// 输出文件大小
printf("文件大小: %ld 字节\n", file_size);
// 分配内存来存储文件内容
char *file_content = (char *)malloc(file_size);
if (!file_content) {
perror("Failed to allocate memory for file content");
fclose(input_file_ptr);
return -1;
}
if (fread(file_content, 1, file_size, input_file_ptr) != file_size) {
perror("Failed to read the entire file");
fclose(input_file_ptr);
free(file_content);
return -1;
}
// 关闭文件
fclose(input_file_ptr);
const int yuyv422_linesize = width * 2; // 每行字节数,因为每个像素占用 2 个字节
// 设置 frame 的属性
yuyv422_frame->format = AV_PIX_FMT_YUYV422;
yuyv422_frame->width = width;
yuyv422_frame->height = height;
// 设置 data 和 linesize
yuyv422_frame->data[0] = file_content;
yuyv422_frame->linesize[0] = yuyv422_linesize;
return 0;
}
int main(int argc, char **argv) {
int ret;
AVFrame *yuyv422_frame = NULL;
AVFrame *rgb_frame = NULL;
char *input_file;
char *output_file;
if (argc != 3) {
fprintf(stderr, "Usage: %s input_file output_file\n", argv[0]);
exit(1);
}
input_file = argv[1];
output_file = argv[2];
if ((ret = init_filters(filter_descr)) < 0) {
fprintf(stderr, "init_filters failed\n");
return -1;
}
// 分配 AVFrame
yuyv422_frame = av_frame_alloc();
rgb_frame = av_frame_alloc();
if (!yuyv422_frame || !rgb_frame) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame\n");
return ret;
}
// 从yuyv422图片文件中读取图片数据,并设置到yuyv422_frame
ret = read_yuyv422_frame(input_file, yuyv422_frame);
if (ret < 0) {
fprintf(stderr, "read_yuyv422_frame failed\n");
return -1;
}
// 将 yuyv422 格式转为 rgb 格式
ret = yuyv422_to_rgb(yuyv422_frame, rgb_frame);
if (ret < 0) {
fprintf(stderr, "yuyv422_to_rgb failed\n");
return -1;
}
// 计算帧大小
int frame_size = av_image_get_buffer_size(rgb_frame->format, rgb_frame->width, rgb_frame->height, 1);
if (frame_size < 0) {
fprintf(stderr, "无法计算帧大小\n");
return -1;
}
printf("帧大小: %d 字节\n", frame_size);
printf("rgb_frame->format: %d, AV_PIX_FMT_RGB24: %d\n", rgb_frame->format, AV_PIX_FMT_RGB24);
printf("rgb_frame->data[0]: %p\n", rgb_frame->data[0]);
printf("rgb_frame->linesize[0]: %d\n", rgb_frame->linesize[0]);
printf("rgb_frame->width: %d\n", rgb_frame->width);
printf("rgb_frame->height: %d\n", rgb_frame->height);
// 保存到文件 output_file
FILE *fp = fopen(output_file, "wb");
fwrite(rgb_frame->data[0], 1, frame_size, fp);
fclose(fp);
return 0;
}
Makefile
C_COMPILER = aarch64-rockchip1031-linux-gnu-gcc
CFLAGS = -I/home/lingke/mnt/usr/include
LDFLAGS = -L/home/lingke/mnt/usr/lib \
-lrockchip_mpp -ldrm -lrga \
-lavcodec -lavformat -lavutil -lavfilter -lswscale -lswresample -lavdevice -lpostproc -lpthread -lm -lz
all: main.exe
main.exe: main.c
$(C_COMPILER) $(CFLAGS) -o $@ $^ $(LDFLAGS)
clean:
-rm -rf main.exe main.o
准备好yuyv422图片
由于摄像头默认是YUYV422格式
ffmpeg -f v4l2 -i /dev/video81 -vframes 1 -pix_fmt yuyv422 -s 1280x720 snapshot.yuv
ffplay -f rawvideo -pixel_format yuyv422 -video_size 1280x720 snapshot.yuv
重命名为 input.yuv422p
mv snapshot.yuv input.yuyv422
转换示例
./main.exe input.yuyv422 output.rgb24
ffplay -f rawvideo -pixel_format rgb24 -video_size 1280x720 -framerate 1 output.rgb24