AVFrame与Mat

extern "C" {  
#include "libavutil/avutil.h"  
#include "libavcodec/avcodec.h"  
#include "libavformat/avformat.h"  
#include "libswscale/swscale.h"  
#include <libavutil/imgutils.h>  
}  
#include <opencv2/core.hpp>  


//AVFrame 转 cv::mat  
cv::Mat avframeToCvmat(const AVFrame * frame)  
{  
    int width = frame->width;  
    int height = frame->height;  
    cv::Mat image(height, width, CV_8UC3);  
    int cvLinesizes[1];  
    cvLinesizes[0] = image.step1();  
    SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat) frame->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);  
    sws_scale(conversion, frame->data, frame->linesize, 0, height, &image.data, cvLinesizes);  
    sws_freeContext(conversion);  
    return image;  
}  


//cv::Mat 转 AVFrame  
AVFrame* cvmatToAvframe(cv::Mat* image, AVFrame * frame){  
    int width = image->cols;  
    int height = image->rows;  
    int cvLinesizes[1];  
    cvLinesizes[0] = image->step1();  
    if (frame == NULL){  
        frame = av_frame_alloc();  
        av_image_alloc(frame->data, frame->linesize, width, height, AVPixelFormat::AV_PIX_FMT_YUV420P, 1);  
    }  
    SwsContext* conversion = sws_getContext(width, height, AVPixelFormat::AV_PIX_FMT_BGR24, width, height, (AVPixelFormat) frame->format, SWS_FAST_BILINEAR, NULL, NULL, NULL);  
    sws_scale(conversion, &image->data, cvLinesizes , 0, height, frame->data, frame->linesize);  
    sws_freeContext(conversion);  
    return  frame;  
}
AVFrame* ProcessFrame(AVFrame *frame, int stream_index) 
{ 
//first part 
    AVStream *in_stream = ifmt_ctx->streams[stream_index]; 
    AVCodecContext *pCodecCtx = in_stream->codec; 

    AVFrame *pFrameRGB = NULL; 

    struct SwsContext * img_convert_ctx = NULL; 
    if(img_convert_ctx == NULL){ 
     img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 
             pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 
             AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); 
    } 
    pFrameRGB = av_frame_alloc(); 
    int size = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 

    uint8_t *out_bufferRGB = (uint8_t *)av_malloc(size); 
    avpicture_fill((AVPicture *)pFrameRGB, out_bufferRGB, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 
    sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); 
    Mat imageFrame = Mat(pCodecCtx->height, pCodecCtx->width, CV_8UC3, out_bufferRGB); 

    /////////////////////////////////////////////////////////// 
    //second part starts 
    avpicture_fill((AVPicture *)pFrameRGB, imageFrame.data,AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 

    struct SwsContext * convert_ctx = NULL; 
    if(convert_ctx == NULL){ 
     convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 
             AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 
             pCodecCtx->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); 
    } 

    AVFrame *srcFrame = av_frame_alloc(); 
    size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); 

    uint8_t *out_buffer = (uint8_t *)av_malloc(size); //占内存
    avpicture_fill((AVPicture *)srcFrame, out_buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); 
    sws_scale(convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, srcFrame->data, srcFrame->linesize); 

    av_free(pFrameRGB); 

    srcFrame->width = frame->width; 
    srcFrame->height = frame->height; 
    srcFrame->format = frame->format; 
    av_frame_copy_props(srcFrame, frame); 

    return srcFrame; 
} 
extern "C" {  
#include "libavutil/avutil.h"  
#include "libavcodec/avcodec.h"  
#include "libavformat/avformat.h"  
#include "libswscale/swscale.h"  
#include <libavutil/imgutils.h>  
}  
#include <opencv2/core.hpp>  


//AVFrame 转 cv::mat  
cv::Mat avframeToCvmat(const AVFrame * frame)  
{  
    int width = frame->width;  
    int height = frame->height;  
    cv::Mat image(height, width, CV_8UC3);  
    int cvLinesizes[1];  
    cvLinesizes[0] = image.step1();  
    SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat) frame->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);  
    sws_scale(conversion, frame->data, frame->linesize, 0, height, &image.data, cvLinesizes);  
    sws_freeContext(conversion);  
    return image;  
}  
原文地址:https://www.cnblogs.com/yangwithtao/p/11857704.html