Kinect 开发 —— 保持视频影像

相比直接将影像显示出来,如果能将录制到的影像保存到硬盘上就好了。但是,影像录制,是需要一定的技巧,在网上可以看到很多例子演示如何将Kinect获取到的影像以图片的形式保存到本地,前面的博文也介绍了这一点,但是你很少看到如何演示将一个完整的视频影像保存到本地硬盘上。幸运的是Emgu类库提供了一个VideoWriter类型来帮助我们实现这一功能。

   下面的方法展示了Record和StopRecording方法如何将Kinect彩色影像摄像头产生的数据流保存到avi文件中。我们在D盘创建了一个vids文件夹,要写入avi文件之前需要保证该文件夹存在。当录制开始时,我们使用当前时间作为文件名创建一个文件,同时我们创建一个list对象来保存从彩色影像数据流中获取到的一帧帧影像。当停止录制时,将list对象中的一些列Emgu影像最为参数传入到VideoWriter对象来将这些影像转为为avi格式并保存到硬盘上。这部分代码没有对avi编码,所以产生的avi文件非常大。我们可以对avi文件进行编码压缩然后保存到硬盘上,但是这样会加大计算机的运算开销。

本例中,运动识别最后一点代码是简单的修改从彩色影像数据流“拉”取数据部分的逻辑。使得不仅将在探测到运动时将影像显示到UI界面上,同时也调用Record方法。当没有探测到运动时,UI界面上什么也不显示,并调用StopRecording方法。这部分代码也通过演示如何分析原始数据流,并探测Kinect视野中常用的变化给出了一个原型,这些变化信息可能会提供很有用的信息。


namespace KinectWriteToVideo
{
    /// <summary>
    /// Interaction logic for MainWindow.xaml
    /// </summary>
    public partial class MainWindow : Window
    {
        KinectSensor _kinectSensor;
        private MotionHistory _motionHistory;
        private IBGFGDetector<Bgr> _forgroundDetector;
        bool _isTracking = false;


        public MainWindow()
        {
            InitializeComponent();

            this.Unloaded += delegate
            {
                _kinectSensor.ColorStream.Disable();
            };

            this.Loaded += delegate
            {

                _motionHistory = new MotionHistory(
                    1.0, //in seconds, the duration of motion history you wants to keep
                    0.05, //in seconds, parameter for cvCalcMotionGradient
                    0.5); //in seconds, parameter for cvCalcMotionGradient

                _kinectSensor = KinectSensor.KinectSensors[0];

                _kinectSensor.ColorStream.Enable();
                _kinectSensor.Start();

                BackgroundWorker bw = new BackgroundWorker();
                bw.DoWork += (a, b) => Pulse();
                bw.RunWorkerCompleted += (c, d) => bw.RunWorkerAsync();
                bw.RunWorkerAsync();
            };
        }


        private void Pulse()
        {
            using (ColorImageFrame imageFrame = _kinectSensor.ColorStream.OpenNextFrame(200))
            {
                if (imageFrame == null)
                    return;

                using (Image<Bgr, byte> image = imageFrame.ToOpenCVImage<Bgr, byte>())
                using (MemStorage storage = new MemStorage()) //create storage for motion components
                {
                    if (_forgroundDetector == null)
                    {
                        _forgroundDetector = new BGStatModel<Bgr>(image
                            , Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL);
                    }

                    _forgroundDetector.Update(image);

                    //update the motion history
                    _motionHistory.Update(_forgroundDetector.ForegroundMask);

                    //get a copy of the motion mask and enhance its color
                    double[] minValues, maxValues;
                    System.Drawing.Point[] minLoc, maxLoc;
                    _motionHistory.Mask.MinMax(out minValues, out maxValues
                        , out minLoc, out maxLoc);
                    Image<Gray, Byte> motionMask = _motionHistory.Mask
                        .Mul(255.0 / maxValues[0]);

                    //create the motion image 
                    Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
                    motionImage[0] = motionMask;

                    //Threshold to define a motion area
                    //reduce the value to detect smaller motion
                    double minArea = 100;

                    storage.Clear(); //clear the storage
                    Seq<MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage);
                    bool isMotionDetected = false;
                    //iterate through each of the motion component
                    for (int c = 0; c < motionComponents.Count(); c++)
                    {
                        MCvConnectedComp comp = motionComponents[c];
                        //reject the components that have small area;
                        if (comp.area < minArea) continue;

                        OnDetection();
                        isMotionDetected = true;
                        break;
                    }
                    if (isMotionDetected == false)
                    {
                        OnDetectionStopped();
                        this.Dispatcher.Invoke(new Action(() => rgbImage.Source = null));
                        StopRecording();
                        return;
                    }

                    this.Dispatcher.Invoke(
                        new Action(() => rgbImage.Source = imageFrame.ToBitmapSource())
                        );

                    Record(imageFrame);
                }
            }
        }

        DateTime _lastTracked = DateTime.Now;
        private void OnDetection()
        {
            _lastTracked = DateTime.Now;
            if (!_isTracking)
                _isTracking = true;
        }

        private void OnDetectionStopped()
        {
            var waitTime = 2;
            if (DateTime.Now.Subtract(_lastTracked) > TimeSpan.FromSeconds(waitTime))
                _isTracking = false;
        }

        bool _isRecording = false;
        string _baseDirectory = @"d:vids";
        string _fileName;
        List<Image<Rgb, Byte>> _videoArray = new List<Image<Rgb, Byte>>();

        void Record(ColorImageFrame image)
        {
            if (!_isRecording)
            {
                _fileName = string.Format("{0}{1}{2}", _baseDirectory, DateTime.Now.ToString("MMddyyyyHmmss"), ".avi");
                _isRecording = true;
            }
            _videoArray.Add(image.ToOpenCVImage<Rgb, Byte>());
        }

        void StopRecording()
        {
            if (!_isRecording)
                return;

            CvInvoke.CV_FOURCC('P', 'I', 'M', '1');   //= MPEG-1 codec
            CvInvoke.CV_FOURCC('M', 'J', 'P', 'G');  //= motion-jpeg codec (does not work well)
            CvInvoke.CV_FOURCC('M', 'P', '4', '2');//= MPEG-4.2 codec
            CvInvoke.CV_FOURCC('D', 'I', 'V', '3'); //= MPEG-4.3 codec
            CvInvoke.CV_FOURCC('D', 'I', 'V', 'X'); //= MPEG-4 codec
            CvInvoke.CV_FOURCC('U', '2', '6', '3'); //= H263 codec
            CvInvoke.CV_FOURCC('I', '2', '6', '3'); //= H263I codec
            CvInvoke.CV_FOURCC('F', 'L', 'V', '1'); //= FLV1 codec

            using (VideoWriter vw = new VideoWriter(_fileName, 0, 30, 640, 480, true))
            {
                for (int i = 0; i < _videoArray.Count(); i++)
                    vw.WriteFrame<Rgb, Byte>(_videoArray[i]);
            }
            _fileName = string.Empty;
            _videoArray.Clear();
            _isRecording = false;

        }

    }
}
原文地址:https://www.cnblogs.com/sprint1989/p/3860041.html