ffmpeg解码视频流

//初始化、注册编解码器
avcodec_init();
av_register_all();
avformat_network_init();

//选取测试文件
char* FileName = "test.rmvb";
AVFormatContext *pFormatCtx;
if(av_open_input_file(&pFormatCtx, FileName, NULL, 0, NULL)!=0)
{
	printf("打开文件失败
");
	return -1;
}
dump_format(pFormatCtx, 0, NULL, NULL);//打印相关参数

//寻找流信息,获取格式上下文信息
int err = av_find_stream_info(pFormatCtx);
if(err < 0)
{
	printf("查看流信息失败");
	return 0;
}

//找到所有的音频流和视频流
vector<int> v_video_index;
vector<int> v_audio_index;
for(unsigned int i = 0;i < pFormatCtx->nb_streams; i++)
{
	if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
	{	
		v_audio_index.push_back(i);
	}
	if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
	{
		v_video_index.push_back(i);
	}
}

 AVCodecContext *pAVCodecCtx = pFormatCtx->streams[v_video_index[0]]->codec;
 AVCodec	   *pAVCodec = avcodec_find_decoder(pAVCodecCtx->codec_id);  
 if(!pAVCodec)
 {
  printf("Unsupported codec!
");
  return -1;
 }
 avcodec_open(pAVCodecCtx,pAVCodec); 	

 
//初始化包
AVPacket packet;
av_init_packet(&packet);

int  got_picture_ptr=0;
int  len;
int  width;
int  height;
int  count=1;

//为每帧图像分配内存
AVFrame  *picture = avcodec_alloc_frame();
AVFrame  *pFrameRGB = avcodec_alloc_frame();   
if ( (picture == NULL)||(pFrameRGB == NULL) )   
{   
	printf("avcodec alloc frame failed!
");   
	exit (1);   
}        

 //处理第一条视频流
while(av_read_frame(pFormatCtx,&packet)>=0)
{    
	uint8_t* buff = (packet.data);
	if(packet.stream_index==v_video_index[0])
	{ 
	  int iReadTime = 0; 
	  while(packet.size > 0) 
	  {
		  iReadTime++;
		  len = avcodec_decode_video2(pAVCodecCtx, picture, &got_picture_ptr, &packet);
		  if(len==0)  
		  {
			  got_picture_ptr=0;
		  }
		  if(len<0)
		  {
			  break;
		  }
		  if(got_picture_ptr)
		  { 
			  #if 1     
			  if(picture->key_frame==1)
			  {
				  printf("关键帧
");
			  }
			  else
			  {
				  printf("非关键帧
");
			  }
			  #endif

			  #if 1        //判断I B P 帧
			  if(FF_I_TYPE == picture->pict_type)
			  {
				  o<<"I Frame:"<<"packet.dts:"<<packet.dts<<"        "<<"packet.pts:"<<packet.pts<<endl;
			  }
			  else if(FF_B_TYPE==picture->pict_type)
			  {
				  o<<"B Frame:"<<"packet.dts:"<<packet.dts<<"        "<<"packet.pts:"<<packet.pts<<endl;
			  }
			  else
			  {
				  o<<"P Frame:"<<"packet.dts:"<<packet.dts<<"        "<<"packet.pts:"<<packet.pts<<endl;
			  }
			  #endif

			  width = pAVCodecCtx->width;
			  height = pAVCodecCtx->height;
			if(count)
			 {
			  //保存YUV
			  #if 1   
			  int  dstwidth = 800;
			  int  dstheight = 600;
			  //uint8_t *BufferForSws = (uint8_t*)av_malloc(MAX_FRAME_SIZE);
			  int size = avpicture_get_size(PIX_FMT_YUV420P, dstwidth, dstheight);
			  uint8_t *BufferForSwsCtx = (uint8_t*)av_malloc(size);
			  AVFrame     *dstframe = avcodec_alloc_frame();                  
			  avpicture_fill((AVPicture*)dstframe, BufferForSwsCtx, PIX_FMT_YUV420P, dstwidth, dstheight);   
			  SwsContext* pSwsCtx = sws_getContext(pAVCodecCtx->width, pAVCodecCtx->height, pAVCodecCtx->pix_fmt, dstwidth, dstheight, PIX_FMT_YUV420P, SWS_BILINEAR,NULL,NULL,NULL);                   
			  int ret = sws_scale(pSwsCtx,picture->data, picture->linesize, 0, pAVCodecCtx->height, dstframe->data, dstframe->linesize);
			  dstframe->width = dstwidth;
			  dstframe->height = dstheight;
			  SaveYUV(dstframe, dstwidth, dstheight, count);
			  av_free(dstframe);
			  sws_freeContext(pSwsCtx);
			  #endif			 
			}			  
			  count++;
			  break;
		  }			
		  packet.size -= len;
		  packet.data += len;
	   }
	   if(packet.data != NULL)
	   {
		 av_free_packet(&packet);
	   }
	}
}
 av_free(picture);
 av_free(pFrameRGB);
 
 avcodec_close(pAVCodecCtx);
 av_close_input_file(pFormatCtx);
原文地址:https://www.cnblogs.com/welen/p/3666160.html