FFmpeg中AVFilter模块实践指南

在做音视频相关的开发过程大体如下所示,对于其中的编码/解码,整个流程相对比较固定,使用ffmpeg可以很好的完成这部分的开发。对其中的帧数据处理(包括音频和视频数据)则相对要多样化一些,比如对视频做尺寸变换,进行音频音量均衡,直播中的美颜处理,多路流合成等等,这些都是属于流程中的帧数据处理。今天要介绍FFmpeg中的AVFilter模块进行帧数据处理的开发,AVFilter模块对帧数据处理进行了很好的抽象。AVFilter中的filter graph(滤波器图)概念非常适合帧数据处理中的多级滤波处理,同时对滤波器的接口进行了规定,后期添加一些自定义的滤波器也是很方便。网上关于AVFilter的介绍大多是基于ffmpeg的命令使用,基于代码实现的很少,最近项目中正好要使用到了AVFilter,写个小结,希望对有同样需求的小伙伴有帮助。

原始音视频–>解码–>帧数据处理–>编码–>输出音视频

1. 主要结构体和API介绍

// 对filters系统的整体管理
struct AVFilterGraph
{
    AVFilterContext **filters;
    unsigned nb_filters;
}
// 定义filter本身的能力,拥有的pads,回调函数接口定义
struct AVFilter
{
    const char *name;
    const AVFilterPad *inputs;
    const AVFilterPad *outputs;
}
// filter实例,管理filter与外部的联系
struct AVFilterContext
{
    const AVFilter *filter;
    char *name;
    
    AVFilterPad *input_pads;
    AVFilterLink **inputs;
    unsigned nb_inputs
    
    AVFilterPad *output_pads;
    AVFilterLink **outputs;
    unsigned nb_outputs;
    
    struct AVFilterGraph *graph;
}
// 定义两个filters之间的联接
struct AVFilterLink
{
    AVFilterContext *src;
    AVFilterPad *srcpad;
    
    AVFilterContext *dst;
    AVFilterPad *dstpad;
    
    struct AVFilterGraph *graph;
}
// 定义filter的输入/输出接口
struct AVFilterPad
{
    const char *name;
    AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
    AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
    int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
    int (*request_frame)(AVFilterLink *link);
}
struct AVFilterInOut
{
    char *name;
    AVFilterContext *filter_ctx;
    int pad_idx;
    struct AVFilterInOut *next;
}

在AVFilter模块中定义了AVFilter结构,很个AVFilter都是具有独立功能的节点,如scale filter的作用就是进行图像尺寸变换,overlay filter的作用就是进行图像的叠加,这里需要重点提的是两个特别的filter,一个是buffer,一个是buffersink,滤波器buffer代表filter graph中的源头,原始数据就往这个filter节点输入的;而滤波器buffersink代表filter graph中的输出节点,处理完成的数据从这个filter节点输出。

// 获取FFmpeg中定义的filter,调用该方法前需要先调用avfilter_register_all();进行滤波器注册
AVFilter *avfilter_get_by_name(const char *name);

// 往源滤波器buffer中输入待处理的数据
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);

// 从目的滤波器buffersink中输出处理完的数据
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);

// 创建一个滤波器图filter graph
AVFilterGraph *avfilter_graph_alloc(void);

// 创建一个滤波器实例AVFilterContext,并添加到AVFilterGraph中
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
                                 const char *name, const char *args, void *opaque,
                                 AVFilterGraph *graph_ctx);

// 连接两个滤波器节点
int avfilter_link(AVFilterContext *src, unsigned srcpad,
                  AVFilterContext *dst, unsigned dstpad);

2. AVFilter主体框架流程

在利用AVFilter进行音视频数据处理前先将在进行的处理流程绘制出来,现在以FFmpeg filter官方文档中的一个例子为例进行说明。

                [main]
input --> split ---------------------> overlay --> output
            |                             ^
            |[tmp]                  [flip]|
            +-----> crop --> vflip -------+

这个例子的处理流程如上所示,首先使用split滤波器将input流分成两路流(main和tmp),然后分别对两路流进行处理。对于tmp流,先经过crop滤波器进行裁剪处理,再经过flip滤波器进行垂直方向上的翻转操作,输出的结果命名为flip流。再将main流和flip流输入到overlay滤波器进行合成操作。上图的input就是上面提过的buffer源滤波器,output就是上面的提过的buffersink滤波器。上图中每个节点都是一个AVFilterContext,每个连线就是AVFliterLink。所有这些信息都统一由AVFilterGraph来管理。

image

3. 实例实现

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

int main(int argc, char* argv)
{
	int ret = 0;

	// input yuv
	FILE* inFile = NULL;
	const char* inFileName = "sintel_480x272_yuv420p.yuv";
	fopen_s(&inFile, inFileName, "rb+");
	if (!inFile) {
		printf("Fail to open file\n");
		return -1;
	}

	int in_width = 480;
	int in_height = 272;

	// output yuv
	FILE* outFile = NULL;
	const char* outFileName = "out_crop_vfilter.yuv";
	fopen_s(&outFile, outFileName, "wb");
	if (!outFile) {
		printf("Fail to create file for output\n");
		return -1;
	}

	avfilter_register_all();

	AVFilterGraph* filter_graph = avfilter_graph_alloc();
	if (!filter_graph) {
		printf("Fail to create filter graph!\n");
		return -1;
	}

	// source filter
	char args[512];
	_snprintf_s(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		in_width, in_height, AV_PIX_FMT_YUV420P,
		1, 25, 1, 1);
	AVFilter* bufferSrc = avfilter_get_by_name("buffer");
	AVFilterContext* bufferSrc_ctx;
	ret = avfilter_graph_create_filter(&bufferSrc_ctx, bufferSrc, "in", args, NULL, filter_graph);
	if (ret < 0) {
		printf("Fail to create filter bufferSrc\n");
		return -1;
	}

	// sink filter
	AVBufferSinkParams *bufferSink_params;
	AVFilterContext* bufferSink_ctx;
	AVFilter* bufferSink = avfilter_get_by_name("buffersink");
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
	bufferSink_params = av_buffersink_params_alloc();
	bufferSink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&bufferSink_ctx, bufferSink, "out", NULL, bufferSink_params, filter_graph);
	if (ret < 0) {
		printf("Fail to create filter sink filter\n");
		return -1;
	}

	// split filter
	AVFilter *splitFilter = avfilter_get_by_name("split");
	AVFilterContext *splitFilter_ctx;
	ret = avfilter_graph_create_filter(&splitFilter_ctx, splitFilter, "split", "outputs=2", NULL, filter_graph);
	if (ret < 0) {
		printf("Fail to create split filter\n");
		return -1;
	}

	// crop filter
	AVFilter *cropFilter = avfilter_get_by_name("crop");
	AVFilterContext *cropFilter_ctx;
	ret = avfilter_graph_create_filter(&cropFilter_ctx, cropFilter, "crop", "out_w=iw:out_h=ih/2:x=0:y=0", NULL, filter_graph);
	if (ret < 0) {
		printf("Fail to create crop filter\n");
		return -1;
	}

	// vflip filter
	AVFilter *vflipFilter = avfilter_get_by_name("vflip");
	AVFilterContext *vflipFilter_ctx;
	ret = avfilter_graph_create_filter(&vflipFilter_ctx, vflipFilter, "vflip", NULL, NULL, filter_graph);
	if (ret < 0) {
		printf("Fail to create vflip filter\n");
		return -1;
	}

	// overlay filter
	AVFilter *overlayFilter = avfilter_get_by_name("overlay");
	AVFilterContext *overlayFilter_ctx;
	ret = avfilter_graph_create_filter(&overlayFilter_ctx, overlayFilter, "overlay", "y=0:H/2", NULL, filter_graph);
	if (ret < 0) {
		printf("Fail to create overlay filter\n");
		return -1;
	}

	// src filter to split filter
	ret = avfilter_link(bufferSrc_ctx, 0, splitFilter_ctx, 0);
	if (ret != 0) {
		printf("Fail to link src filter and split filter\n");
		return -1;
	}
	// split filter's first pad to overlay filter's main pad
	ret = avfilter_link(splitFilter_ctx, 0, overlayFilter_ctx, 0);
	if (ret != 0) {
		printf("Fail to link split filter and overlay filter main pad\n");
		return -1;
	}
	// split filter's second pad to crop filter
	ret = avfilter_link(splitFilter_ctx, 1, cropFilter_ctx, 0);
	if (ret != 0) {
		printf("Fail to link split filter's second pad and crop filter\n");
		return -1;
	}
	// crop filter to vflip filter
	ret = avfilter_link(cropFilter_ctx, 0, vflipFilter_ctx, 0);
	if (ret != 0) {
		printf("Fail to link crop filter and vflip filter\n");
		return -1;
	}
	// vflip filter to overlay filter's second pad
	ret = avfilter_link(vflipFilter_ctx, 0, overlayFilter_ctx, 1);
	if (ret != 0) {
		printf("Fail to link vflip filter and overlay filter's second pad\n");
		return -1;
	}
	// overlay filter to sink filter
	ret = avfilter_link(overlayFilter_ctx, 0, bufferSink_ctx, 0);
	if (ret != 0) {
		printf("Fail to link overlay filter and sink filter\n");
		return -1;
	}

	// check filter graph
	ret = avfilter_graph_config(filter_graph, NULL);
	if (ret < 0) {
		printf("Fail in filter graph\n");
		return -1;
	}

	char *graph_str = avfilter_graph_dump(filter_graph, NULL);
	FILE* graphFile = NULL;
	fopen_s(&graphFile, "graphFile.txt", "w");
	fprintf(graphFile, "%s", graph_str);
	av_free(graph_str);

	AVFrame *frame_in = av_frame_alloc();
	unsigned char *frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

	AVFrame *frame_out = av_frame_alloc();
	unsigned char *frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

	frame_in->width = in_width;
	frame_in->height = in_height;
	frame_in->format = AV_PIX_FMT_YUV420P;

	while (1) {

		if (fread(frame_buffer_in, 1, in_width*in_height * 3 / 2, inFile) != in_width*in_height * 3 / 2) {
			break;
		}
		//input Y,U,V
		frame_in->data[0] = frame_buffer_in;
		frame_in->data[1] = frame_buffer_in + in_width*in_height;
		frame_in->data[2] = frame_buffer_in + in_width*in_height * 5 / 4;

		if (av_buffersrc_add_frame(bufferSrc_ctx, frame_in) < 0) {
			printf("Error while add frame.\n");
			break;
		}

		/* pull filtered pictures from the filtergraph */
		ret = av_buffersink_get_frame(bufferSink_ctx, frame_out);
		if (ret < 0)
			break;

		//output Y,U,V
		if (frame_out->format == AV_PIX_FMT_YUV420P) {
			for (int i = 0; i < frame_out->height; i++) {
				fwrite(frame_out->data[0] + frame_out->linesize[0] * i, 1, frame_out->width, outFile);
			}
			for (int i = 0; i < frame_out->height / 2; i++) {
				fwrite(frame_out->data[1] + frame_out->linesize[1] * i, 1, frame_out->width / 2, outFile);
			}
			for (int i = 0; i < frame_out->height / 2; i++) {
				fwrite(frame_out->data[2] + frame_out->linesize[2] * i, 1, frame_out->width / 2, outFile);
			}
		}
		printf("Process 1 frame!\n");
		av_frame_unref(frame_out);
	}

	fclose(inFile);
	fclose(outFile);

	av_frame_free(&frame_in);
	av_frame_free(&frame_out);
	avfilter_graph_free(&filter_graph);
	return 0;
}

github代码仓库

原文地址:https://www.cnblogs.com/lidabo/p/15407936.html