V-LOAM 源码解析(一)

转载请注明出处:本文转自zhch_pan的博客http://www.cnblogs.com/zhchp-blog/

本博客为本人之前做项目时做的源码阅读工作,po到网上希望帮助其他人更好的理解V-LOAM的工程实现,有些地方代码做了修改,可能和原工程有出入,但对于该工程的整体流程理解没有妨碍。

源码下载链接:https://github.com/Jinqiang/demo_lidar


节点名称:featureTracking

订阅topic:<sensor_msgs::Image>"/camera/image_raw")

发布topic:1、<sensor_msgs::PointCloud2> ("/image_points_last"

          2、<sensor_msgs::Image>("/image/show")

  1 #include <math.h>
  2 #include <stdio.h>
  3 #include <stdlib.h>
  4 #include <ros/ros.h>
  5 
  6 #include "cameraParameters.h"
  7 #include "pointDefinition.h"
  8 
  9 using namespace std;
 10 using namespace cv;
 11 
 12 bool systemInited = false;
 13 double timeCur, timeLast;
 14 
 15 const int imagePixelNum = imageHeight * imageWidth;
 16 CvSize imgSize = cvSize(imageWidth, imageHeight);
 17 
 18 IplImage *imageCur = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
 19 IplImage *imageLast = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
 20 
 21 int showCount = 0;
 22 const int showSkipNum = 2;
 23 const int showDSRate = 2;
 24 CvSize showSize = cvSize(imageWidth / showDSRate, imageHeight / showDSRate);
 25 
 26 IplImage *imageShow = cvCreateImage(showSize, IPL_DEPTH_8U, 1);
 27 IplImage *harrisLast = cvCreateImage(showSize, IPL_DEPTH_32F, 1);
 28 
 29 CvMat kMat = cvMat(3, 3, CV_64FC1, kImage);
 30 CvMat dMat = cvMat(4, 1, CV_64FC1, dImage);
 31 
 32 IplImage *mapx, *mapy;
 33 
 34 const int maxFeatureNumPerSubregion = 2;
 35 const int xSubregionNum = 12;
 36 const int ySubregionNum = 8;
 37 const int totalSubregionNum = xSubregionNum * ySubregionNum;
 38 const int MAXFEATURENUM = maxFeatureNumPerSubregion * totalSubregionNum;
 39 
 40 const int xBoundary = 20;
 41 const int yBoundary = 20;
 42 const double subregionWidth = (double)(imageWidth - 2 * xBoundary) / (double)xSubregionNum;
 43 const double subregionHeight = (double)(imageHeight - 2 * yBoundary) / (double)ySubregionNum;
 44 
 45 const double maxTrackDis = 100;
 46 const int winSize = 15;
 47 
 48 IplImage *imageEig, *imageTmp, *pyrCur, *pyrLast;
 49 
 50 CvPoint2D32f *featuresCur = new CvPoint2D32f[2 * MAXFEATURENUM];
 51 CvPoint2D32f *featuresLast = new CvPoint2D32f[2 * MAXFEATURENUM];
 52 char featuresFound[2 * MAXFEATURENUM];
 53 float featuresError[2 * MAXFEATURENUM];
 54 
 55 int featuresIndFromStart = 0;
 56 int featuresInd[2 * MAXFEATURENUM] = {0};
 57 
 58 int totalFeatureNum = 0;
 59 //maxFeatureNumPerSubregion=2
 60 int subregionFeatureNum[2 * totalSubregionNum] = {0};
 61 
 62 pcl::PointCloud<ImagePoint>::Ptr imagePointsCur(new pcl::PointCloud<ImagePoint>());
 63 pcl::PointCloud<ImagePoint>::Ptr imagePointsLast(new pcl::PointCloud<ImagePoint>());
 64 
 65 ros::Publisher *imagePointsLastPubPointer;
 66 ros::Publisher *imageShowPubPointer;
 67 cv_bridge::CvImage bridge;
 68 
 69 void imageDataHandler(const sensor_msgs::Image::ConstPtr& imageData) 
 70 {
 71   timeLast = timeCur;
 72   timeCur = imageData->header.stamp.toSec() - 0.1163;
 73 
 74   IplImage *imageTemp = imageLast;
 75   imageLast = imageCur;
 76   imageCur = imageTemp;
 77 
 78   for (int i = 0; i < imagePixelNum; i++) {
 79     imageCur->imageData[i] = (char)imageData->data[i];
 80   }
 81 
 82   IplImage *t = cvCloneImage(imageCur);
 83   //去除图像畸变
 84   cvRemap(t, imageCur, mapx, mapy);
 85   //cvEqualizeHist(imageCur, imageCur);
 86   cvReleaseImage(&t);
 87 
 88   //缩小一点可能角点检测速度比较快
 89   cvResize(imageLast, imageShow);
 90   cvCornerHarris(imageShow, harrisLast, 3);
 91 
 92   CvPoint2D32f *featuresTemp = featuresLast;
 93   featuresLast = featuresCur;
 94   featuresCur = featuresTemp;
 95 
 96   pcl::PointCloud<ImagePoint>::Ptr imagePointsTemp = imagePointsLast;
 97   imagePointsLast = imagePointsCur;
 98   imagePointsCur = imagePointsTemp;
 99   imagePointsCur->clear();
100 
101   if (!systemInited) {
102     systemInited = true;
103     return;
104   }
105 
106   int recordFeatureNum = totalFeatureNum;
107   for (int i = 0; i < ySubregionNum; i++) {
108     for (int j = 0; j < xSubregionNum; j++) {
109       //ind指向当前的subregion编号
110       int ind = xSubregionNum * i + j;
111       int numToFind = maxFeatureNumPerSubregion - subregionFeatureNum[ind];
112 
113       if (numToFind > 0) {
114         int subregionLeft = xBoundary + (int)(subregionWidth * j);
115         int subregionTop = yBoundary + (int)(subregionHeight * i);
116         //将当前的subregion框选出来
117         CvRect subregion = cvRect(subregionLeft, subregionTop, (int)subregionWidth, (int)subregionHeight);
118         cvSetImageROI(imageLast, subregion);
119         //在框选出来的subregion中寻找好的特征点
120         cvGoodFeaturesToTrack(imageLast, imageEig, imageTmp, featuresLast + totalFeatureNum,
121                               &numToFind, 0.1, 5.0, NULL, 3, 1, 0.04);
122 
123         int numFound = 0;
124         for(int k = 0; k < numToFind; k++) {
125           featuresLast[totalFeatureNum + k].x += subregionLeft;
126           featuresLast[totalFeatureNum + k].y += subregionTop;
127 
128           int xInd = (featuresLast[totalFeatureNum + k].x + 0.5) / showDSRate;
129           int yInd = (featuresLast[totalFeatureNum + k].y + 0.5) / showDSRate;
130           //查看检测的角点中是否有匹配到的合适的特征点
131           if (((float*)(harrisLast->imageData + harrisLast->widthStep * yInd))[xInd] > 1e-7) {
132             featuresLast[totalFeatureNum + numFound].x = featuresLast[totalFeatureNum + k].x;
133             featuresLast[totalFeatureNum + numFound].y = featuresLast[totalFeatureNum + k].y;
134             featuresInd[totalFeatureNum  + numFound]   = featuresIndFromStart;
135 
136             numFound++;
137             featuresIndFromStart++;
138           }
139         }
140         totalFeatureNum += numFound;
141         subregionFeatureNum[ind] += numFound;
142 
143         cvResetImageROI(imageLast);
144       }
145     }
146   }
147 
148   cvCalcOpticalFlowPyrLK(imageLast, imageCur, pyrLast, pyrCur,
149                          featuresLast, featuresCur, totalFeatureNum, cvSize(winSize, winSize), 
150                          3, featuresFound, featuresError, 
151                          cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 30, 0.01), 0);
152 
153   for (int i = 0; i < totalSubregionNum; i++) {
154     subregionFeatureNum[i] = 0;
155   }
156 
157   ImagePoint point;
158   int featureCount = 0;
159   double meanShiftX = 0, meanShiftY = 0;
160   for (int i = 0; i < totalFeatureNum; i++) {
161     double trackDis = sqrt((featuresLast[i].x - featuresCur[i].x) 
162                     * (featuresLast[i].x - featuresCur[i].x)
163                     + (featuresLast[i].y - featuresCur[i].y) 
164                     * (featuresLast[i].y - featuresCur[i].y));
165 
166     if (!(trackDis > maxTrackDis || featuresCur[i].x < xBoundary || 
167       featuresCur[i].x > imageWidth - xBoundary || featuresCur[i].y < yBoundary || 
168       featuresCur[i].y > imageHeight - yBoundary)) {
169       //计算当前特征点是哪个subregion中检测到的,ind是subregion的编号
170       int xInd = (int)((featuresLast[i].x - xBoundary) / subregionWidth);
171       int yInd = (int)((featuresLast[i].y - yBoundary) / subregionHeight);
172       int ind = xSubregionNum * yInd + xInd;
173 
174       if (subregionFeatureNum[ind] < maxFeatureNumPerSubregion) {
175         //根据筛选准则将光流法匹配到的特征点进行筛选,这里featureCount是从0开始的,
176         //所以featuresCur[]和featuresLast[]只保存了邻近图像的特征点,很久之前的没有保存
177         featuresCur[featureCount].x = featuresCur[i].x;
178         featuresCur[featureCount].y = featuresCur[i].y;
179         featuresLast[featureCount].x = featuresLast[i].x;
180         featuresLast[featureCount].y = featuresLast[i].y;
181         //有些特征点被筛掉,所以这里featureCount不一定和i相等
182         featuresInd[featureCount] = featuresInd[i];
183         /* 这一步将图像坐标系下的特征点[u,v],变换到了相机坐标系下,即[u,v]->[X/Z,Y/Z,1],参考《14讲》式5.5
184          * 不过要注意这里加了个负号。相机坐标系默认是z轴向前,x轴向右,y轴向下,图像坐标系默认在图像的左上角,
185      * featuresCur[featureCount].x - kImage[2]先将图像坐标系从左上角还原到图像中心,然后加个负号,
186      * 即将默认相机坐标系的x轴负方向作为正方向,y轴同理。所以此时相机坐标系z轴向前,x轴向左,y轴向上
187      */
188         point.u = -(featuresCur[featureCount].x - kImage[2]) / kImage[0];
189         point.v = -(featuresCur[featureCount].y - kImage[5]) / kImage[4];
190         point.ind = featuresInd[featureCount];
191         imagePointsCur->push_back(point);
192 
193         if (i >= recordFeatureNum) {
194           point.u = -(featuresLast[featureCount].x - kImage[2]) / kImage[0];
195           point.v = -(featuresLast[featureCount].y - kImage[5]) / kImage[4];
196           imagePointsLast->push_back(point);
197         }
198 
199         meanShiftX += fabs((featuresCur[featureCount].x - featuresLast[featureCount].x) / kImage[0]);
200         meanShiftY += fabs((featuresCur[featureCount].y - featuresLast[featureCount].y) / kImage[4]);
201 
202         featureCount++;
203         //subregionFeatureNum是根据当前帧与上一帧的特征点匹配数目来计数的
204         subregionFeatureNum[ind]++;
205       }
206     }
207   }
208   totalFeatureNum = featureCount;
209   meanShiftX /= totalFeatureNum;
210   meanShiftY /= totalFeatureNum;
211 
212   sensor_msgs::PointCloud2 imagePointsLast2;
213   pcl::toROSMsg(*imagePointsLast, imagePointsLast2);
214   imagePointsLast2.header.stamp = ros::Time().fromSec(timeLast);
215   imagePointsLastPubPointer->publish(imagePointsLast2);
216 
217   //隔两张图像才输出一副图像,如0,1不要,2输出,3,4不要,5输出
218   showCount = (showCount + 1) % (showSkipNum + 1);
219   if (showCount == showSkipNum) {
220     Mat imageShowMat(imageShow);
221     bridge.image = imageShowMat;
222     bridge.encoding = "mono8";
223     sensor_msgs::Image::Ptr imageShowPointer = bridge.toImageMsg();
224     imageShowPubPointer->publish(imageShowPointer);
225   }
226 }
227 
228 int main(int argc, char** argv)
229 {
230   ros::init(argc, argv, "featureTracking");
231   ros::NodeHandle nh;
232 
233   mapx = cvCreateImage(imgSize, IPL_DEPTH_32F, 1);
234   mapy = cvCreateImage(imgSize, IPL_DEPTH_32F, 1);
235   cvInitUndistortMap(&kMat, &dMat, mapx, mapy);
236 
237   CvSize subregionSize = cvSize((int)subregionWidth, (int)subregionHeight);
238   imageEig = cvCreateImage(subregionSize, IPL_DEPTH_32F, 1);
239   imageTmp = cvCreateImage(subregionSize, IPL_DEPTH_32F, 1);
240 
241   CvSize pyrSize = cvSize(imageWidth + 8, imageHeight / 3);
242   pyrCur = cvCreateImage(pyrSize, IPL_DEPTH_32F, 1);
243   pyrLast = cvCreateImage(pyrSize, IPL_DEPTH_32F, 1);
244 
245   ros::Subscriber imageDataSub = nh.subscribe<sensor_msgs::Image>("/camera/image_raw", 1, imageDataHandler);
246 
247   ros::Publisher imagePointsLastPub = nh.advertise<sensor_msgs::PointCloud2> ("/image_points_last", 5);
248   imagePointsLastPubPointer = &imagePointsLastPub;
249 
250   ros::Publisher imageShowPub = nh.advertise<sensor_msgs::Image>("/image/show", 1);
251   imageShowPubPointer = &imageShowPub;
252 
253   ros::spin();
254 
255   return 0;
256 }
原文地址:https://www.cnblogs.com/zhchp-blog/p/8735132.html