ffmpeg入门之 Tutorial01 分类: ffmpeg-SDL-VLC-Live555 2013-08-22 17:54 495人阅读 评论(0) 收藏


#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <stdio.h>

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
  FILE *pFile;
  char szFilename[32];
  int  y;
 
  // Open file
  sprintf(szFilename, "frame%d.ppm", iFrame);
  pFile=fopen(szFilename, "wb");
  if(pFile==NULL)
    return;
 
  // Write header
  fprintf(pFile, "P6 %d %d 255 ", width, height);
 
  // Write pixel data
  for(y=0; y<height; y++)
    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
 
  // Close file
  fclose(pFile);
}

int main(int argc, char *argv[])
{
  AVFormatContext *pFormatCtx;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx;
  AVCodec         *pCodec;
  AVFrame         *pFrame;
  AVFrame         *pFrameRGB;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer;
  static int sws_flags = SWS_BICUBIC;
  struct SwsContext *img_convert_ctx;
   AVPicture pict; 
  argc = 2;
  argv[1] = "d:\temp\test.264";
  if(argc < 2) {
    printf("Please provide a movie file ");
    return -1;
  }
  // /*注册所有可用的格式和编解码器*/
  av_register_all();
 
  // Open video file /*以输入方式打开一个媒体文件,也即源文件,
codecs并没有打开,只读取了文件的头信息*/
  if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
    return -1; // Couldn't open file
 
  // Retrieve stream information
/*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,

// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
*/
  if(av_find_stream_info(pFormatCtx)<0)
    return -1; // Couldn't find stream information
 
  // Dump information about file onto standard error
//该函数的作用就是检查下初始化过程中设置的参数是否符合规范

  dump_format(pFormatCtx, 0, argv[1], 0);
 
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
 {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
 
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
 
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
/*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器
音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找

*/
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec! ");
    return -1; // Codec not found
  }
  // Open codec
//使用给定的AVCodec初始化AVCodecContext


  if(avcodec_open(pCodecCtx, pCodec)<0)
    return -1; // Could not open codec
 
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
 
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;
 
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
         pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
 
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
   pCodecCtx->width, pCodecCtx->height);
 
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0)
  {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream)
 {
    // Decode video frame
    avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
       packet.data, packet.size);
     
    // Did we get a video frame?
    if(frameFinished)
    {
  // Convert the image from its native format to RGB

    #if 0
     // Convert the image into YUV format that SDL uses
     img_convert(&pict, PIX_FMT_YUV420P,
         (AVPicture *)pFrame, pCodecCtx->pix_fmt,
       pCodecCtx->width, pCodecCtx->height);
    #else
      /*   img_convert(&pict, dst_pix_fmt,
      (AVPicture *)pFrame, is->video_st->codec->pix_fmt,
      is->video_st->codec->width, is->video_st->codec->height);
      */


        img_convert_ctx = sws_getContext( pCodecCtx->width,
               pCodecCtx->height,
               pCodecCtx->pix_fmt,
               pCodecCtx->width,
               pCodecCtx->height,
               PIX_FMT_YUV420P,
               sws_flags, NULL, NULL, NULL);

        sws_scale(img_convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);  
        sws_freeContext(img_convert_ctx);
    #endif
     
     // Save the frame to disk
     if((++i<=100)&&(i>95))
       SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
    }
    }
   
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
 
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
 
  // Free the YUV frame
  av_free(pFrame);
 
  // Close the codec
  avcodec_close(pCodecCtx);
 
  // Close the video file
  av_close_input_file(pFormatCtx);
 
  return 0;
}

版权声明:本文为博主原创文章,未经博主允许不得转载。

原文地址:https://www.cnblogs.com/mao0504/p/4706833.html