请教tornadomeet大神Kinect+OpenNI学习笔记之8(Robert Walter手部提取代码的分析)(OpenNI2、NiTE2改编)

// skeletonHand.cpp : Defines the entry point for the console application.
//

// STL Header
#include "stdafx.h"
#include <iostream>

#include <vector>
  
// OpenCV Header
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
  
// o1. OpenNI Header
#include <OpenNI.h>
  
// n1. NiTE Header
#include <NiTE.h>
  
// namespace
using namespace std;
using namespace openni;
using namespace nite;
using std::vector;

const unsigned int ROI_OFFSET            = 70;
const unsigned int XRES                    = 640;
const unsigned int YRES                    = 480;
const unsigned int BIN_THRESH_OFFSET    = 5;
const unsigned int MEDIAN_BLUR_K        = 5;
const unsigned int GRASPING_THRESH        = 0.9;
//define color
const cv::Scalar COLOR_DARK_GREEN    =    cv::Scalar (0 , 128, 0);
const cv::Scalar COLOR_YELLOW        =    cv::Scalar (0 , 128, 200 );
const cv::Scalar COLOR_BLUE            =    cv::Scalar ( 240, 40, 0);
const cv::Scalar COLOR_LIGHT_GREEN    =    cv::Scalar (0, 255 ,0);
const cv::Scalar COLOR_RED            =    cv::Scalar ( 0, 0, 255);



struct ConvexityDefect
{
    cv::Point start;
    cv::Point end;
    cv::Point depth_point;
    float depth;
};

bool handApproachingDisplayPerimeter(float x, float y)
{
    return (x > (XRES - ROI_OFFSET)) || (x < (ROI_OFFSET)) ||
           (y > (YRES - ROI_OFFSET)) || (y < (ROI_OFFSET));
}
//
//// Thanks to Jose Manuel Cabrera for part of this C++ wrapper function
////Convexity為凸的意思,Defect為缺陷的意思,hull為殼的意思
////貌似這個函數在opencv中已經被實現了
//    void findConvexityDefects(vector<cv::Point>& contour, vector<int>& hull, vector<ConvexityDefect>& convexDefects)
//    {
//        if(hull.size() > 0 && contour.size() > 0)
//        {    
//        CvSeq* contourPoints;
//        CvSeq* defects;
//        CvMemStorage* storage;
//        CvMemStorage* strDefects;
//        CvMemStorage* contourStr;
//        CvConvexityDefect *defectArray = 0;
//
//        strDefects = cvCreateMemStorage();
//        defects = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq),sizeof(CvPoint), strDefects );
//
//        //We transform our vector<Point> into a CvSeq* object of CvPoint.
//        contourStr = cvCreateMemStorage();
//        contourPoints = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), contourStr);
//        for(int i = 0; i < (int)contour.size(); i++) {
//            CvPoint cp = {contour[i].x,  contour[i].y};
//            cvSeqPush(contourPoints, &cp);
//        }
//
//        //Now, we do the same thing with the hull index
//        int count = (int) hull.size();
//        //int hullK[count];
//        int* hullK = (int*) malloc(count*sizeof(int));
//        for(int i = 0; i < count; i++) { hullK[i] = hull.at(i); }
//        CvMat hullMat = cvMat(1, count, CV_32SC1, hullK);
//
//        // calculate convexity defects
//        storage = cvCreateMemStorage(0);
//        defects = cv::convexityDefects(*contourPoints, &hullMat, *storage);
//        defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*defects->total);
//        cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ);
//        //printf("DefectArray %i %i\n",defectArray->end->x, defectArray->end->y);
//
//        //We store defects points in the convexDefects parameter.
//        for(int i = 0; i<defects->total; i++){
//            ConvexityDefect def;
//            def.start       = cv::Point(defectArray[i].start->x, defectArray[i].start->y);
//            def.end         = cv::Point(defectArray[i].end->x, defectArray[i].end->y);
//            def.depth_point = cv::Point(defectArray[i].depth_point->x, defectArray[i].depth_point->y);
//            def.depth       = defectArray[i].depth;
//            convexDefects.push_back(def);
//        }
//
//    // release memory
//    cvReleaseMemStorage(&contourStr);
//    cvReleaseMemStorage(&strDefects);
//    cvReleaseMemStorage(&storage);
//
//    }
//}
//
//
//  
int main( int argc, char **argv )
{
  // o2. Initial OpenNI
  OpenNI::initialize();
  
  // o3. Open Device
  Device  mDevice;
  mDevice.open( ANY_DEVICE );
  
  // o4. create depth stream
  VideoStream mDepthStream;
  mDepthStream.create( mDevice, SENSOR_DEPTH );
  // o4a. set video mode
  VideoMode mDMode;
  mDMode.setResolution( 640, 480 );
  mDMode.setFps( 30 );
  mDMode.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );
  mDepthStream.setVideoMode( mDMode);
  
  // o5. Create color stream
  VideoStream mColorStream;
  mColorStream.create( mDevice, SENSOR_COLOR );
  // o5a. set video mode
  VideoMode mCMode;
  mCMode.setResolution( 640, 480 );
  mCMode.setFps( 30 );
  mCMode.setPixelFormat( PIXEL_FORMAT_RGB888 );
  mColorStream.setVideoMode( mCMode);
   
  // o6. image registration
  mDevice.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );
  
  // n2. Initial NiTE
  NiTE::initialize();
  
  // n3. create user tracker
  UserTracker mUserTracker;
  mUserTracker.create( &mDevice );
  mUserTracker.setSkeletonSmoothingFactor( 0.1f );
  
  //set ROI
  cv::Rect ROI;
  ROI.width = ROI_OFFSET * 2;
  ROI.height = ROI_OFFSET * 2;

  // create OpenCV Window
  cv::namedWindow( "User Image",  CV_WINDOW_AUTOSIZE );
  cv::namedWindow( "Depth Image", CV_WINDOW_AUTOSIZE );
  cv::namedWindow( "leftHandFrame Image", CV_WINDOW_AUTOSIZE );
  cv::namedWindow( "rightHandFrame Image", CV_WINDOW_AUTOSIZE );
  // p1. start
  mColorStream.start();
  mDepthStream.start();
  
   int iMaxDepth = mDepthStream.getMaxPixelValue();

  while( true )
  {
    // main loop

    cv::Mat cImageBGR;
    cv::Mat cImageDepth;
    cv::Mat handDebug;

    vector< cv::Mat > debugFrames;

    // p2a. get color frame
    VideoFrameRef mColorFrame;
    VideoFrameRef mDepthFrame;

    mColorStream.readFrame( &mColorFrame );
    mDepthStream.readFrame( &mDepthFrame );

    // p2b. convert data to OpenCV format
    const cv::Mat mImageRGB( mColorFrame.getHeight(), mColorFrame.getWidth(), CV_8UC3, (void*)mColorFrame.getData() );
    // p2c. convert form RGB to BGR
    cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
    
    // now we do same thing to the depth 
    const cv::Mat mImageDepth( mDepthFrame.getHeight(), mDepthFrame.getWidth(), CV_16UC1, (void*)mDepthFrame.getData() );
    // 8c. re-map depth data [0,Max] to [0,255]
    mImageDepth.convertTo( cImageDepth, CV_8U, 255.0 / iMaxDepth );
 
    // p3. get user frame
    UserTrackerFrameRef  mUserFrame;
    mUserTracker.readFrame( &mUserFrame );
 
    // p4. get users data
    const nite::Array<UserData>& aUsers = mUserFrame.getUsers();
    for( int i = 0; i < aUsers.getSize(); ++ i )
    {
        const UserData& rUser = aUsers[i];
 
        // p4a. check user status
        if( rUser.isNew() )
        {
            // start tracking for new user
            mUserTracker.startSkeletonTracking( rUser.getId() );
        }
 
        if( rUser.isVisible() )
        {
            // p4b. get user skeleton
            const Skeleton& rSkeleton = rUser.getSkeleton();
            
            int handDepth = -1;

            if ( rSkeleton.getState() == SKELETON_TRACKED )
            {
                for ( int handI = 0; handI < 2; handI++)
                {
                    //get the position of the hand
                    //struct SkeletonJoint
                    //{
                    //    float x, y, z, confidence;
                    //};
                    SkeletonJoint hand;

                    if ( handI == 0)
                    {
                        hand = rSkeleton.getJoint( JOINT_RIGHT_HAND );
                    }
                    else
                    {
                        hand = rSkeleton.getJoint( JOINT_LEFT_HAND );
                    }

                    if ( hand.getPositionConfidence() >= 0.5 )
                    {
                        const Point3f& handPosition = hand.getPosition();
                        
                        //change to 8 bites image
                        handDepth = handPosition.z * 255.0 / iMaxDepth;

                        //get the roi
                        if(!handApproachingDisplayPerimeter(handPosition.x, handPosition.y))
                        {
                            ROI.x = handPosition.x - ROI_OFFSET;    
                            ROI.y = handPosition.y - ROI_OFFSET;
                        }
                    }

                    // set the ROI in depth image
                    cv::Mat handCpy( cImageDepth, ROI );
                    cv::Mat handMat = handCpy.clone ();
                    
                    //binary threshold
                    if ( handDepth != -1)
                        handMat = (handMat > (handDepth - BIN_THRESH_OFFSET)) & (handMat < (handDepth + BIN_THRESH_OFFSET));

                    //median blur
                    medianBlur(handMat, handMat, MEDIAN_BLUR_K);

                    handDebug = handMat.clone();
                    debugFrames.push_back(handDebug);
                    cvtColor(debugFrames[handI], debugFrames[handI], CV_GRAY2RGB);

                    std::vector< std::vector < cv::Point >> contours;
                    std::vector<cv::Vec4i> hierarchy;

                    findContours(handMat, contours,  hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

                    if (contours.size()) 
                    {
                        for (int i = 0; i < contours.size(); i++) 
                        {
                            vector<cv::Point> contour = contours[i];
                            //将vector转换成Mat型,此时的Mat还是列向量,只不过是2个通道的列向量而已
                            cv::Mat contourMat = cv::Mat(contour);
                            //返回轮廓的面积
                            double cArea = contourArea(contourMat);
                            
                            if(cArea > 2000)
                            {
                                cv::Scalar center = mean(contourMat);
                                cv::Point centerPoint = cv::Point(center.val[0], center.val[1]);

                                // approximate the contour by a simple curve
                                vector<cv::Point> approxCurve;
                                //求出轮廓的封闭的曲线,保存在approxCurve,轮廓和封闭曲线直接的最大距离为10
                                approxPolyDP(contourMat, approxCurve, 10, true);

                                vector< vector<cv::Point> > debugContourV;
                                debugContourV.push_back(approxCurve);
                                //在参数1中画出轮廓参数2,参数2必须是轮廓的集合,所以参数2是
                                //vector< vector<Point> >类型
                                //深绿色代表近似多边形
                                drawContours(debugFrames[handI], debugContourV, 0, COLOR_DARK_GREEN , 3);

                                vector<int> hull;
                                //找出近似曲线的凸包集合,集合hull中存储的是轮廓中凸包点的下标
                                convexHull(cv::Mat(approxCurve), hull, false, false);

                                // draw the hull points
                                for(int j = 0; j < hull.size(); j++)
                                {
                                    int index = hull[j];
                                    //凸顶点用黄色表示
                                    circle(debugFrames[handI], approxCurve[index], 3, COLOR_YELLOW, 2);
                                }

                                // find convexity defects
                                vector<cv::Vec4i> convexDefects;
                                cv::convexityDefects(cv::Mat(approxCurve), hull, convexDefects);
                                printf("Number of defects: %d.\n", (int) convexDefects.size());

                                for(int j = 0; j < convexDefects.size(); j++)
                                {
                                    //缺陷点用蓝色表示
                                    circle(debugFrames[handI], approxCurve[convexDefects[j][2]], 3, COLOR_BLUE, 2);
                                }


                                // assemble point set of convex hull
                                //将凸包集以点的坐标形式保存下来
                                vector<cv::Point> hullPoints;
                                for(int k = 0; k < hull.size(); k++)
                                {
                                    int curveIndex = hull[k];
                                    cv::Point p = approxCurve[curveIndex];
                                    hullPoints.push_back(p);
                                }



                                // area of hull and curve
                                double hullArea  = contourArea(cv::Mat(hullPoints));
                                double curveArea = contourArea(cv::Mat(approxCurve));
                                double handRatio = curveArea/hullArea;
                        
                                // hand is grasping
                                //GRASPING_THRESH == 0.9
                                if(handRatio > GRASPING_THRESH)
                                //握拳表示绿色
                                circle(debugFrames[handI], centerPoint, 5, COLOR_LIGHT_GREEN, 5);
                                else
                                //一般情况下手张开其中心点是显示红色
                                circle(debugFrames[handI], centerPoint, 5, COLOR_RED, 5);
                            }
                        }
                    }
                }
            }
        }
    }

    // p5. show image
    cv::imshow( "User Image", cImageBGR );
    cv::imshow( "Depth Image", cImageDepth );
    
    //debugFrames只保存2帧图像
    if(debugFrames.size() >= 2 )
    {
        //长和宽的尺寸都扩大3倍
        resize(debugFrames[0], debugFrames[0], cv::Size(), 3, 3);
        resize(debugFrames[1], debugFrames[1], cv::Size(), 3, 3);
        imshow("leftHandFrame Image",  debugFrames[0]);
        imshow("rightHandFrame Image",  debugFrames[1]);
        debugFrames.clear();
    }

    cv::waitKey (20);
    // p6. check keyboard
    if( cv::waitKey( 1 ) == 'q' )
      break;
  }
  
  // p7. stop
  mUserTracker.destroy();
  mColorStream.destroy();
  mDepthStream.destroy();
  mDevice.close();
  NiTE::shutdown();
  OpenNI::shutdown();
  
  return 0;
}
原文地址:https://www.cnblogs.com/hqqxyy/p/2970345.html