FFmpeg YUV2RGB

AVFrame* YUV2RGB( AVCodecContext * avctx, AVFrame * frame )
{
	AVFrame* pFrameRGB=NULL;
	pFrameRGB=avcodec_alloc_frame();

	if(pFrameRGB==NULL)
		return NULL;
	// Determine required buffer size and allocate buffer
	
	int numBytes=avpicture_get_size(PIX_FMT_BGR24, avctx->width,avctx->height);

	uint8_t * buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_BGR24,avctx->width, avctx->height);

	struct SwsContext *img_convert_ctx=NULL;
	img_convert_ctx=sws_getCachedContext(img_convert_ctx,avctx->width,
		avctx->height, avctx->pix_fmt,avctx->width, avctx->height,
		PIX_FMT_RGB24, SWS_BICUBIC,NULL, NULL, NULL);
	if( !img_convert_ctx ) 
	{
		fprintf(stderr, "Cannot initialize sws conversion context
");
	}
	
	sws_scale(img_convert_ctx, (const uint8_t* const*)frame->data,
		frame->linesize, 0, avctx->height, pFrameRGB->data,
		pFrameRGB->linesize);

	return pFrameRGB;
}

  

pFrameRGB用完要free的
原文地址:https://www.cnblogs.com/ahuo/p/3421416.html