ソースを参照

cv_core, cv_proc, Kalmanfilter

youchen 5 年 前
コミット
f0c615b637

+ 2 - 0
cpp_pkgs/src/basicTest.cpp

@@ -79,6 +79,8 @@ int main(){
     trace(x, c);
     trace2(1);
     std::cout<<int(-2.5)<<std::endl;
+    //std::cout.setf(std::ios::hex, std::ios::basefield);
+    //std::cout<< ( -2 )<<std::endl;
 }
 
 #endif

+ 3 - 0
cv_test/CMakeLists.txt

@@ -134,6 +134,9 @@ include_directories(
 ## The recommended prefix ensures that target names across packages don't collide
 add_executable(${PROJECT_NAME}_node src/cv_core.cpp)
 target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${OpenCV_LIBS})
+
+add_executable(${PROJECT_NAME}_improc src/cv_improc.cpp)
+target_link_libraries(${PROJECT_NAME}_improc ${catkin_LIBRARIES} ${OpenCV_LIBS})
 ## Rename C++ executable without prefix
 ## The above recommended prefix causes long target names, the following renames the
 ## target back to the shorter version for ease of user use

BIN
cv_test/foreGround.png


+ 24 - 0
cv_test/my.yaml

@@ -0,0 +1,24 @@
+%YAML:1.0
+---
+iterationNr: 100
+strings:
+   - "image1.jpg"
+   - Awesomeness
+   - "./src/cv_test/baboon.jpg"
+Mapping:
+   One: 1
+   Two: 2
+R: !!opencv-matrix
+   rows: 3
+   cols: 3
+   dt: u
+   data: [ 1, 0, 0, 0, 1, 0, 0, 0, 1 ]
+T: !!opencv-matrix
+   rows: 3
+   cols: 1
+   dt: d
+   data: [ 0., 0., 0. ]
+MyData:
+   A: 97
+   X: 3.1415926535897931e+00
+   id: mydata1234

+ 437 - 10
cv_test/src/cv_core.cpp

@@ -1,28 +1,455 @@
 #include <stdio.h>
 #include <opencv2/opencv.hpp>
 
+/***************** Mat转vector **********************/
+template<typename _Tp>
+std::vector<_Tp> convertMat2Vector(const cv::Mat &mat)
+{
+	return (std::vector<_Tp>)(mat.reshape(1, 1));//通道数不变,按行转为一行
+}
+ 
+/****************** vector转Mat *********************/
+template<typename _Tp>
+cv::Mat convertVector2Mat(std::vector<_Tp> v, int channels, int rows)
+{
+	cv::Mat mat = cv::Mat(v);//将vector变成单列的mat
+	cv::Mat dest = mat.reshape(channels, rows).clone();//PS:必须clone()一份,否则返回出错
+	return dest;
+}
+
+// 锐化矩阵
+//              j-1                     -1
+//  j-nChannels  j  j+nChannels     -1   5  -1
+//              j+1                     -1
+void Sharpen(const cv::Mat& myImage,cv::Mat& Result)
+{
+    CV_Assert(myImage.depth() == CV_8U);  // accept only uchar images
+    const int nChannels = myImage.channels();
+    Result.create(myImage.size(),myImage.type());
+    for(int j = 1 ; j < myImage.rows-1; ++j)
+    {
+        const uchar* previous = myImage.ptr<uchar>(j - 1);
+        const uchar* current  = myImage.ptr<uchar>(j    );
+        const uchar* next     = myImage.ptr<uchar>(j + 1);
+        uchar* output = Result.ptr<uchar>(j);
+        for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
+        {
+            *output++ = cv::saturate_cast<uchar>(5*current[i]
+                         -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
+        }
+    }
+    Result.row(0).setTo(cv::Scalar(0));
+    Result.row(Result.rows-1).setTo(cv::Scalar(0));
+    Result.col(0).setTo(cv::Scalar(0));
+    Result.col(Result.cols-1).setTo(cv::Scalar(0));
+}
+
+class MyData
+{
+public:
+    MyData() : A(0), X(0), id()
+    {
+    }
+    explicit MyData(int) : A(97), X(CV_PI), id("mydata1234") // explicit to avoid implicit conversion
+    {
+    }
+    void write(cv::FileStorage &fs) const //Write serialization for this class
+    {
+        fs << "{"
+           << "A" << A << "X" << X << "id" << id << "}";
+    }
+    void read(const cv::FileNode &node) //Read serialization for this class
+    {
+        A = (int)node["A"];
+        X = (double)node["X"];
+        id = (std::string)node["id"];
+    }
+
+public: // Data Members
+    int A;
+    double X;
+    std::string id;
+};
+//These write and read functions must be defined for the serialization in FileStorage to work
+static void write(cv::FileStorage &fs, const std::string &, const MyData &x)
+{
+    x.write(fs);
+}
+static void read(const cv::FileNode &node, MyData &x, const MyData &default_value = MyData())
+{
+    if (node.empty())
+        x = default_value;
+    else
+        x.read(node);
+}
+// This function will print our custom class to the console
+static std::ostream &operator<<(std::ostream &out, const MyData &m)
+{
+    out << "{ id = " << m.id << ", ";
+    out << "X = " << m.X << ", ";
+    out << "A = " << m.A << "}";
+    return out;
+}
+
 int main(int argc, char** argv )
 {
     char * addr = "./src/cv_test/backGround.jpg";
     cv::String imageName(addr);
     //创建Mat
-    cv::Mat image;
+    cv::Mat image, foreground;
     //读取图片
     image = cv::imread(imageName, 1 );
+    foreground = cv::imread("./src/cv_test/foreGround.png", 1);
     //判断图片是否存在
-    if(!image.empty() && image.data){
-        //创建窗口
-        cv::namedWindow("Display Image", cv::WINDOW_AUTOSIZE );
-        //显示图片
-        cv::imshow("Display Image", image);
+    if(!image.empty() && image.data && !foreground.empty()){
+        // //创建窗口
+        // cv::namedWindow("Display Image", cv::WINDOW_AUTOSIZE );
+        // //显示图片
+        // cv::imshow("Display Image", image);
         //彩图转灰度图
         cv::Mat gray_image;
         cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
-        cv::namedWindow("Display Image2", cv::WINDOW_AUTOSIZE );
+        cv::namedWindow("Display Image2", cv::WINDOW_FREERATIO );
         cv::imshow("Display Image2", gray_image);
-        //保存图片
-        cv::imwrite( "./src/cv_test/Gray_Image.jpg", gray_image);
-        //中断
+        // //保存图片
+        // cv::imwrite( "./src/cv_test/Gray_Image.jpg", gray_image);
+
+        {//mat 与 vector 转换
+            // //https://blog.csdn.net/guyuealian/article/details/80253066
+            // std::vector<uchar> v = convertMat2Vector<uchar>(gray_image);
+            // cv::Mat dest = convertVector2Mat<uchar>(v, 1, 352); //把数据转为1通道,4行的Mat数据
+            // cv::namedWindow("Display Image dest", cv::WINDOW_KEEPRATIO );
+            // cv::imshow("Display Image dest", dest);
+        }
+
+        {//创建Mat
+            // //浅拷贝
+            // cv::Mat C(gray_image);
+            // cv::Mat D(C, cv::Rect(0, 0, 400, 400));
+            // cv::Mat E = C(cv::Range::all(), cv::Range(1,100));
+            // std::cout<<*C.data<<", "<<*D.data<<std::endl;
+            // // cv::namedWindow("Display Image D", cv::WINDOW_AUTOSIZE );
+            // // cv::imshow("Display Image D", D);
+            // // cv::namedWindow("Display Image E", cv::WINDOW_AUTOSIZE );
+            // // cv::imshow("Display Image E", E);
+            // //深拷贝
+            // cv::Mat F = D.clone();
+            // cv::Mat G; E.copyTo(G);
+            // std::cout<<*D.data<<", "<<*F.data<<std::endl;
+            // // cv::namedWindow("Display Image F", cv::WINDOW_AUTOSIZE);
+            // // cv::imshow("Display Image F", F);
+            // // cv::namedWindow("Display Image G", cv::WINDOW_AUTOSIZE );
+            // // cv::imshow("Display Image G", G);
+
+            // cv::Mat M(6, 6, CV_8UC3, cv::Scalar(255, 0, 255));//BGR
+            // std::cout << "M = " << M << std::endl;
+            // cv::namedWindow("Display Image M", cv::WINDOW_KEEPRATIO );
+            // cv::imshow("Display Image M", M);
+            // M.create(12, 3, CV_8UC3);
+            // std::cout << "M = " << M << std::endl;
+            // cv::namedWindow("Display Image N", cv::WINDOW_KEEPRATIO );
+            // cv::imshow("Display Image N", M);
+
+            // cv::Mat K = cv::Mat::eye(4, 4, CV_64F);
+            // std::cout << "K = " << K << std::endl;
+            // cv::Mat O = cv::Mat::ones(5, 5, CV_32F);
+            // std::cout << "O = " << O << std::endl;
+            // cv::Mat Z = cv::Mat::zeros(6, 6, CV_8UC1);
+            // std::cout << "Z = " << Z << std::endl;
+        }
+
+        {//平滑
+            // cv::Mat result, result2;
+            // //手动锐化
+            // Sharpen(image, result);
+            // cv::namedWindow("Display sharpen 1", cv::WINDOW_KEEPRATIO);
+            // cv::imshow("Display sharpen 1", result);
+            // //封装方法
+            // cv::Mat kernel = (cv::Mat_<char>(3, 3) << 1, 1, 1,
+            //                                         1, -9, 1,
+            //                                         1, 1, 1);
+            // cv::filter2D( image, result2, image.depth(), kernel );
+            // cv::namedWindow("Display sharpen 2", cv::WINDOW_KEEPRATIO);
+            // cv::imshow("Display sharpen 2", result2);
+        }
+
+        {//图像操作
+            // // //计时
+            // // double t = (double)cv::getTickCount();
+            // // int x = 5, y = 2;
+            // // cv::Scalar intensity = gray_image.at<uchar>(y, x);
+            // // cv::Scalar intensity2 = gray_image.at<uchar>(cv::Point(x, y));
+            // // cv::Vec3b intensity3 = image.at<cv::Vec3b>(y, x);
+            // // uchar blue = intensity3.val[0];
+            // // uchar green = intensity3.val[1];
+            // // uchar red = intensity3.val[2];
+            // // std::cout << "intensity: " << intensity << ", " << intensity2 << ", " << intensity3 << std::endl;
+            // // std::cout << "color: " << int(blue) << ", " << int(green) << ", " << int(red) << std::endl;
+            // if (image.channels() == 3)
+            // {
+            //     for (int j = 0; j < image.rows; j++)
+            //     {
+            //         for (int i = 0; i < image.cols; i++)
+            //         {
+            //             cv::Vec3b point = image.at<cv::Vec3b>(j, i);
+            //             uchar temp = point.val[2];
+            //             point.val[2] = point.val[1];
+            //             point.val[1] = point.val[0];
+            //             point.val[0] = temp;
+            //             image.at<cv::Vec3b>(j, i) = point;
+            //         }
+            //     }
+            //     cv::namedWindow("Display manipulated image", cv::WINDOW_KEEPRATIO);
+            //     cv::imshow("Display manipulated image", image);
+            // }
+            // // t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
+            // // std::cout << "Times passed in seconds: " << t << std::endl;
+        }
+
+        {//融合
+            // double alpha = 0.5;
+            // cv::Mat result;
+            // cv::addWeighted(image,alpha, foreground, 1-alpha, 0.0, result);
+            // cv::imshow("linear blend", result);
+        }
+
+        {//对比度与亮度, gamma correction
+            // // When γ<1, the original dark regions will be brighter and 
+            // // the histogram will be shifted to the right whereas it will be the opposite with γ>1
+            // double alpha = 1.25, beta = -50, gamma = 2, temp = 0;
+            // cv::Mat result = cv::Mat::zeros(image.size(), image.type());
+            // for (int i = 0; i < image.rows; i++)
+            // {
+            //     for (int j = 0; j < image.cols; j++)
+            //     {
+            //         for (int ch = 0; ch < image.channels(); ch++)
+            //         {
+            //             temp = alpha * image.at<cv::Vec3b>(i, j)[ch] + beta;
+            //             //std::cout<<temp<<std::endl;
+            //             result.at<cv::Vec3b>(i, j)[ch] = cv::saturate_cast<uchar>( pow( temp/255, gamma)*255 );
+            //         }
+            //     }
+            // }
+            // cv::imshow("contrast & lightness",result);
+        }
+
+        {//drawing基本类型
+            // // cv::Point p0(50,50), p1(100,100); 
+            // // double b = 255, g = 0, r = 255;
+            // // cv::Scalar vector4(b,g,r);
+            // int width = 500;
+            // cv::Mat atom = cv::Mat::zeros(width, width, CV_8UC3);
+            // // cv::line(atom,
+            // //      p0,
+            // //      p1,
+            // //      cv::Scalar(255, 0, 0),
+            // //      2,
+            // //      cv::LINE_8);
+            // // cv::rectangle(atom,
+            // //           cv::Point(210,200),
+            // //           p1,
+            // //           cv::Scalar(0, 255, 255),
+            // //           cv::FILLED,
+            // //           cv::LINE_8);
+            // // cv::circle(atom,
+            // //        p1,
+            // //        width / 32,
+            // //        cv::Scalar(0, 0, 255),
+            // //        cv::FILLED,
+            // //        cv::LINE_8);
+            // // cv::ellipse(atom,
+            // //         p0,
+            // //         cv::Size(width / 4, width / 16),
+            // //         M_PI_4,
+            // //         0,
+            // //         360,
+            // //         cv::Scalar(255, 0, 0),
+            // //         2,
+            // //         cv::LINE_8);
+            // // cv::Point rook_points[1][20];
+            // // rook_points[0][0] = cv::Point(width / 4, 7 * width / 8);
+            // // rook_points[0][1] = cv::Point(3 * width / 4, 7 * width / 8);
+            // // rook_points[0][2] = cv::Point(3 * width / 4, 13 * width / 16);
+            // // rook_points[0][3] = cv::Point(11 * width / 16, 13 * width / 16);
+            // // rook_points[0][4] = cv::Point(19 * width / 32, 3 * width / 8);
+            // // rook_points[0][5] = cv::Point(3 * width / 4, 3 * width / 8);
+            // // rook_points[0][6] = cv::Point(3 * width / 4, width / 8);
+            // // rook_points[0][7] = cv::Point(26 * width / 40, width / 8);
+            // // rook_points[0][8] = cv::Point(26 * width / 40, width / 4);
+            // // rook_points[0][9] = cv::Point(22 * width / 40, width / 4);
+            // // rook_points[0][10] = cv::Point(22 * width / 40, width / 8);
+            // // rook_points[0][11] = cv::Point(18 * width / 40, width / 8);
+            // // rook_points[0][12] = cv::Point(18 * width / 40, width / 4);
+            // // rook_points[0][13] = cv::Point(14 * width / 40, width / 4);
+            // // rook_points[0][14] = cv::Point(14 * width / 40, width / 8);
+            // // rook_points[0][15] = cv::Point(width / 4, width / 8);
+            // // rook_points[0][16] = cv::Point(width / 4, 3 * width / 8);
+            // // rook_points[0][17] = cv::Point(13 * width / 32, 3 * width / 8);
+            // // rook_points[0][18] = cv::Point(5 * width / 16, 13 * width / 16);
+            // // rook_points[0][19] = cv::Point(width / 4, 13 * width / 16);
+            // // const cv::Point *ppt[1] = {rook_points[0]};
+            // // int npt[] = {20};
+            // // fillPoly(atom,
+            // //          ppt,
+            // //          npt,
+            // //          1,
+            // //          cv::Scalar(255, 255, 255),
+            // //          cv::LINE_8);
+
+            // cv::RNG rnd;
+            // cv::Size textsize = cv::getTextSize("OpenCV forever!", cv::FONT_HERSHEY_COMPLEX, 3, 5, 0);
+            // cv::Point org(150, 150);
+
+            // cv::putText(atom, "OpenCV forever!", org, cv::FONT_HERSHEY_COMPLEX, 1,
+            //         cv::Scalar(rnd.uniform(0,255), rnd.uniform(0,255), rnd.uniform(0,255)), 3, cv::LINE_8);
+            // cv::namedWindow("atom", cv::WINDOW_FREERATIO);
+            // cv::moveWindow("atom", 50, 50);
+            // cv::imshow("atom", atom);
+        }
+
+        {//DFT & IDFT
+            // https://blog.csdn.net/abcjennifer/article/details/7622228
+            cv::Mat padded; //expand input image to optimal size
+            int m = cv::getOptimalDFTSize(gray_image.rows);
+            int n = cv::getOptimalDFTSize(gray_image.cols); // on the border add zero values
+            cv::copyMakeBorder(gray_image, padded, 0, m - gray_image.rows, 0, n - gray_image.cols, cv::BORDER_CONSTANT, cv::Scalar::all(0));
+            cv::Mat planes[] = {cv::Mat_<float>(padded), cv::Mat::zeros(padded.size(), CV_32F)};
+            cv::Mat complexI;
+            cv::merge(planes, 2, complexI); // Add to the expanded another plane with zeros
+            cv::dft(complexI, complexI);    // this way the result may fit in the source matrix
+            
+            // compute the magnitude and switch to logarithmic scale
+            // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
+            cv::split(complexI, planes);                    // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
+            cv::Mat magI;
+            cv::magnitude(planes[0], planes[1], magI); // planes[0] = magnitude
+            magI += cv::Scalar::all(1); // switch to logarithmic scale
+            cv::log(magI, magI);
+            // crop the spectrum, if it has an odd number of rows or columns, -2 = fffffffe, ignore the LSB
+            magI = magI(cv::Rect(0, 0, magI.cols & -2, magI.rows & -2));
+            // rearrange the quadrants of Fourier image  so that the origin is at the image center
+            int cx = magI.cols / 2;
+            int cy = magI.rows / 2;
+            //  q0 q1
+            //  q2 q3
+            cv::Mat q0(magI, cv::Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
+            cv::Mat q1(magI, cv::Rect(cx, 0, cx, cy));  // Top-Right
+            cv::Mat q2(magI, cv::Rect(0, cy, cx, cy));  // Bottom-Left
+            cv::Mat q3(magI, cv::Rect(cx, cy, cx, cy)); // Bottom-Right
+            cv::Mat tmp;                            // swap quadrants (Top-Left with Bottom-Right)
+            q0.copyTo(tmp);
+            q3.copyTo(q0);
+            tmp.copyTo(q3);
+            q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
+            q2.copyTo(q1);
+            tmp.copyTo(q2);
+            //  q3 q2
+            //  q1 q0
+            cv::normalize(magI, magI, 0, 1, cv::NORM_MINMAX); // Transform the matrix with float values into a
+                                                      // viewable image form (float between values 0 and 1).
+            cv::Mat mask = cv::Mat::zeros(magI.size(), CV_8UC1);
+            cv::Mat img, img2;
+            // 中心:基本轮廓特征,总体灰度信息;
+            // 内圈:分块与区域特征
+            // 次外圈:边缘、线条、纹理细节
+            // 外圈:高频噪声
+            double ratio = 3.0;
+            // 保留高频细节
+            // mask.setTo(255);
+            // mask(cv::Rect(magI.cols/ratio,magI.rows/ratio,magI.cols*(ratio-2.0)/ratio, magI.rows*(ratio-2.0)/ratio)).setTo(0);
+            // 保留低频特征
+            mask(cv::Rect(magI.cols / ratio, magI.rows / ratio, magI.cols * (ratio - 2.0) / ratio, magI.rows * (ratio - 2.0) / ratio)).setTo(255);
+            //cv::imshow("mask",mask);
+
+            magI.copyTo(img, mask);
+            cv::namedWindow("spectrum magnitude", cv::WINDOW_FREERATIO);
+            cv::imshow("spectrum magnitude", img);
+            // std::cout<<img(cv::Rect(img.cols/2.0-5,img.rows/2.0-5,10,10))<<std::endl;
+
+            complexI.copyTo(img2, mask);
+            cv::idft(img2, img2);
+            cv::split(img2, planes);
+            cv::Mat magR = planes[0];
+            magR += cv::Scalar::all(1); // switch to logarithmic scale
+            cv::log(magR, magR);
+            cv::normalize(magR, magR, -1.0, 0.5, cv::NORM_MINMAX);
+            cv::namedWindow("spatial domain", cv::WINDOW_FREERATIO);
+            cv::imshow("spatial domain", magR);
+        }
+
+        {//yaml & xml
+            // std::string filename = "./src/cv_test/my.yaml";
+            // { //write
+            //     cv::Mat R = cv::Mat_<uchar>::eye(3, 3),
+            //         T = cv::Mat_<double>::zeros(3, 1);
+            //     MyData m(1);
+            //     cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+            //     fs << "iterationNr" << 100;
+            //     fs << "strings"
+            //        << "["; // text - string sequence
+            //     fs << "image1.jpg"
+            //        << "Awesomeness"
+            //        << "./src/cv_test/baboon.jpg";
+            //     fs << "]";       // close sequence
+            //     fs << "Mapping"; // text - mapping
+            //     fs << "{"
+            //        << "One" << 1;
+            //     fs << "Two" << 2 << "}";
+            //     fs << "R" << R; // cv::Mat
+            //     fs << "T" << T;
+            //     fs << "MyData" << m; // your own data structures
+            //     fs.release();        // explicit close
+            //     std::cout << "Write Done." << std::endl;
+            // }
+            // { //read
+            //     std::cout << std::endl
+            //          << "Reading: " << std::endl;
+            //     cv::FileStorage fs;
+            //     fs.open(filename, cv::FileStorage::READ);
+            //     int itNr;
+            //     //fs["iterationNr"] >> itNr;
+            //     itNr = (int)fs["iterationNr"];
+            //     std::cout << itNr;
+            //     if (!fs.isOpened())
+            //     {
+            //         std::cerr << "Failed to open " << filename << std::endl;
+            //         return 1;
+            //     }
+            //     cv::FileNode n = fs["strings"]; // Read string sequence - Get node
+            //     if (n.type() != cv::FileNode::SEQ)
+            //     {
+            //         std::cerr << "strings is not a sequence! FAIL" << std::endl;
+            //         return 1;
+            //     }
+            //     cv::FileNodeIterator it = n.begin(), it_end = n.end(); // Go through the node
+            //     for (; it != it_end; ++it)
+            //         std::cout << (std::string)*it << std::endl;
+            //     n = fs["Mapping"]; // Read mappings from a sequence
+            //     std::cout << "Two  " << (int)(n["Two"]) << "; ";
+            //     std::cout << "One  " << (int)(n["One"]) << std::endl
+            //          << std::endl;
+            //     MyData m;
+            //     cv::Mat R, T;
+            //     fs["R"] >> R; // Read cv::Mat
+            //     fs["T"] >> T;
+            //     fs["MyData"] >> m; // Read your own structure_
+            //     std::cout << std::endl
+            //          << "R = " << R << std::endl;
+            //     std::cout << "T = " << T << std::endl
+            //          << std::endl;
+            //     std::cout << "MyData = " << std::endl
+            //          << m << std::endl
+            //          << std::endl;
+            //     //Show default behavior for non existing nodes
+            //     std::cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
+            //     fs["NonExisting"] >> m;
+            //     std::cout << std::endl
+            //          << "NonExisting = " << std::endl
+            //          << m << std::endl;
+            // }
+        }
+
+        //中断;
         cv::waitKey(0);
     }else
     {

+ 104 - 0
cv_test/src/cv_improc.cpp

@@ -0,0 +1,104 @@
+#include <iostream>
+#include "opencv2/imgproc.hpp"
+#include "opencv2/imgcodecs.hpp"
+#include "opencv2/highgui.hpp"
+
+int DELAY_CAPTION = 1500;
+int DELAY_BLUR = 500;
+int MAX_KERNEL_LENGTH = 31;
+
+cv::Mat src; cv::Mat dst;
+char window_name[] = "Smoothing Demo";
+//显示图像
+int display_dst( int delay )
+{
+    cv::imshow( window_name, dst );
+    int c = cv::waitKey ( delay );
+    if( c >= 0 ) { return -1; }
+    return 0;
+}
+//显示标题
+int display_caption( const char* caption )
+{
+    dst = cv::Mat::zeros( src.size(), src.type() );
+    putText( dst, caption,
+             cv::Point( src.cols/4, src.rows/2),
+             cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(255, 255, 255) );
+    return display_dst(DELAY_CAPTION);
+}
+
+int main(){
+    src = cv::imread("./src/cv_test/Gray_Image.jpg");
+    if (src.empty())
+    {
+        std::cout << "empty image" << std::endl;
+        return -1;
+    }
+
+    { //smoothing
+        // if (display_caption("Original Image") != 0)
+        // {
+        //     return 0;
+        // }
+        // dst = src.clone();
+        // if (display_dst(DELAY_CAPTION) != 0)
+        // {
+        //     return 0;
+        // }
+        // if (display_caption("Homogeneous Blur") != 0)
+        // {
+        //     return 0;
+        // }
+        // for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
+        // {
+        //     blur(src, dst, cv::Size(i, i), cv::Point(-1, -1));
+        //     if (display_dst(DELAY_BLUR) != 0)
+        //     {
+        //         return 0;
+        //     }
+        // }
+        // if (display_caption("Gaussian Blur") != 0)
+        // {
+        //     return 0;
+        // }
+        // for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
+        // {
+        //     GaussianBlur(src, dst, cv::Size(i, i), 0, 0);
+        //     if (display_dst(DELAY_BLUR) != 0)
+        //     {
+        //         return 0;
+        //     }
+        // }
+        // if (display_caption("Median Blur") != 0)
+        // {
+        //     return 0;
+        // }
+        // for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
+        // {
+        //     medianBlur(src, dst, i);
+        //     if (display_dst(DELAY_BLUR) != 0)
+        //     {
+        //         return 0;
+        //     }
+        // }
+        // if (display_caption("Bilateral Blur") != 0)
+        // {
+        //     return 0;
+        // }
+        // for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
+        // {
+        //     bilateralFilter(src, dst, i, i * 2, i / 2);
+        //     if (display_dst(DELAY_BLUR) != 0)
+        //     {
+        //         return 0;
+        //     }
+        // }
+        // display_caption("Done!");
+    }
+
+    {
+        
+    }
+
+    return 0;
+}

+ 3 - 2
scan_matcher_ICP/CMakeLists.txt

@@ -30,7 +30,7 @@ find_package(catkin REQUIRED COMPONENTS
   pcl_conversions
   PCL REQUIRED
 )
-
+find_package(Eigen3 REQUIRED)
 ## System dependencies are found with CMake's conventions
 # find_package(Boost REQUIRED COMPONENTS system)
 
@@ -43,6 +43,7 @@ include_directories(
   ${catkin_INCLUDE_DIRS} 
   ${csm_INCLUDE_DIRS} 
   ${PCL_INCLUDE_DIRS}
+  ${EIGEN3_INCLUDE_DIRS}
   ./src/
   )
 
@@ -170,7 +171,7 @@ add_library(ndt_omp
   src/pclomp/gicp_omp.cpp
 )
 #Create node
-add_executable(scan_matcher_ICP_node src/laser_scan_matcher_node.cpp src/laser_scan_matcher.cpp)
+add_executable(scan_matcher_ICP_node src/laser_scan_matcher_node.cpp src/laser_scan_matcher.cpp src/kalmanFilter.cpp)
 add_dependencies(scan_matcher_ICP_node
   ndt_omp
 )

+ 54 - 46
scan_matcher_ICP/scripts/demo.rviz

@@ -6,8 +6,8 @@ Panels:
       Expanded:
         - /Global Options1
         - /Status1
+        - /TF1
         - /TF1/Frames1
-        - /PointCloud23
       Splitter Ratio: 0.5
     Tree Height: 522
   - Class: rviz/Selection
@@ -86,55 +86,33 @@ Visualization Manager:
       Enabled: true
       Frame Timeout: 15
       Frames:
-        All Enabled: true
+        All Enabled: false
+        ICP:
+          Value: true
+        NDT:
+          Value: true
         base_link:
           Value: true
         laser:
-          Value: true
+          Value: false
         map:
-          Value: true
-      Marker Scale: 1.5
+          Value: false
+      Marker Scale: 0.100000001
       Name: TF
       Show Arrows: true
       Show Axes: true
       Show Names: true
       Tree:
         map:
+          ICP:
+            {}
+          NDT:
+            {}
           base_link:
             laser:
               {}
       Update Interval: 0
       Value: true
-    - Alpha: 1
-      Axes Length: 1
-      Axes Radius: 0.100000001
-      Class: rviz/PoseWithCovariance
-      Color: 255; 25; 0
-      Covariance:
-        Orientation:
-          Alpha: 0.5
-          Color: 255; 255; 127
-          Color Style: Unique
-          Frame: Local
-          Offset: 1
-          Scale: 1
-          Value: true
-        Position:
-          Alpha: 0.300000012
-          Color: 204; 51; 204
-          Scale: 1
-          Value: true
-        Value: true
-      Enabled: false
-      Head Length: 0.300000012
-      Head Radius: 0.100000001
-      Name: PoseWithCovariance
-      Shaft Length: 1
-      Shaft Radius: 0.0500000007
-      Shape: Arrow
-      Topic: /initialpose
-      Unreliable: false
-      Value: false
     - Alpha: 1
       Autocompute Intensity Bounds: true
       Autocompute Value Bounds:
@@ -177,7 +155,7 @@ Visualization Manager:
       Color: 85; 255; 127
       Color Transformer: FlatColor
       Decay Time: 0
-      Enabled: false
+      Enabled: true
       Invert Rainbow: false
       Max Color: 255; 255; 255
       Max Intensity: 4096
@@ -194,7 +172,7 @@ Visualization Manager:
       Unreliable: false
       Use Fixed Frame: true
       Use rainbow: true
-      Value: false
+      Value: true
     - Alpha: 1
       Autocompute Intensity Bounds: true
       Autocompute Value Bounds:
@@ -220,11 +198,41 @@ Visualization Manager:
       Size (Pixels): 2
       Size (m): 0.00999999978
       Style: Points
-      Topic: /current_cloud_pcl
+      Topic: /current_cloud_icp
       Unreliable: false
       Use Fixed Frame: true
       Use rainbow: false
       Value: true
+    - Alpha: 1
+      Autocompute Intensity Bounds: true
+      Autocompute Value Bounds:
+        Max Value: 10
+        Min Value: -10
+        Value: true
+      Axis: Z
+      Channel Name: intensity
+      Class: rviz/PointCloud2
+      Color: 255; 85; 255
+      Color Transformer: FlatColor
+      Decay Time: 0
+      Enabled: true
+      Invert Rainbow: false
+      Max Color: 255; 255; 255
+      Max Intensity: 4096
+      Min Color: 0; 0; 0
+      Min Intensity: 0
+      Name: PointCloud2
+      Position Transformer: XYZ
+      Queue Size: 10
+      Selectable: true
+      Size (Pixels): 2
+      Size (m): 0.00999999978
+      Style: Points
+      Topic: /current_cloud_ndt
+      Unreliable: false
+      Use Fixed Frame: true
+      Use rainbow: true
+      Value: true
   Enabled: true
   Global Options:
     Background Color: 0; 0; 0
@@ -250,25 +258,25 @@ Visualization Manager:
   Views:
     Current:
       Class: rviz/Orbit
-      Distance: 9.26748085
+      Distance: 1.92956924
       Enable Stereo Rendering:
         Stereo Eye Separation: 0.0599999987
         Stereo Focal Distance: 1
         Swap Stereo Eyes: false
         Value: false
       Focal Point:
-        X: 1.92733037
-        Y: -0.596374393
-        Z: 0.233556822
+        X: -0.188358277
+        Y: -0.24325183
+        Z: 0.224161208
       Focal Shape Fixed Size: true
       Focal Shape Size: 0.0500000007
       Invert Z Axis: false
       Name: Current View
       Near Clip Distance: 0.00999999978
-      Pitch: 1.56479633
+      Pitch: 1.55979633
       Target Frame: <Fixed Frame>
       Value: Orbit (rviz)
-      Yaw: 3.08356786
+      Yaw: 3.09856772
     Saved: ~
 Window Geometry:
   Displays:
@@ -286,5 +294,5 @@ Window Geometry:
   Views:
     collapsed: true
   Width: 1311
-  X: 578
-  Y: 98
+  X: 197
+  Y: 87

+ 156 - 0
scan_matcher_ICP/src/kalmanFilter.cpp

@@ -0,0 +1,156 @@
+#include "kalmanFilter.h"
+
+KalmanFilter::KalmanFilter()
+{
+    dt_ = 1.0/20.0; //20hz
+    int n = 3; // Number of states
+    int m = 2; // Number of measurements
+    Eigen::MatrixXd A(n, n); //3x3
+    Eigen::MatrixXd C(m, n); //2x3
+    Eigen::MatrixXd Q(n, n); //3x3
+    Eigen::MatrixXd R(m, m); //2x2
+    Eigen::MatrixXd P(n, n); //3x3
+    A << 1, dt_, 0, 0, 1, dt_, 0, 0, 1;
+    C << 1, 0, 0,
+        0, 1, 0;
+    Q << 1e-4, 1e-4, 1e-4,
+        1e-4, 1e-2, 1e-3,
+        1e-4, 1e-3, 1e-1;
+    R << 1e-4, 1e-3,
+        1e-3, 1e-2;
+    P << 1e-3, 1e-3, 1e-3,
+        1e-3, 1e1, 1e-1,
+        1e-3, 1e-1, 1e3;
+    init(A, C, Q, R, P);
+}
+
+void KalmanFilter::init(
+    const Eigen::MatrixXd& A,
+    const Eigen::MatrixXd& C,
+    const Eigen::MatrixXd& Q,
+    const Eigen::MatrixXd& R,
+    const Eigen::MatrixXd& P)
+{
+    m_ = C.rows();
+    n_ = A.rows();
+    tf::Quaternion quat(0,0,0,1);
+    tf::Vector3 T(0,0,0);
+    m_last_pose=tf::Transform(quat,T);
+    m_last_time=clock();
+    velocity.setZero();
+    for (int i = 0; i < 3; i++)
+    {
+        A_[i] = A;
+        C_[i] = C;
+        Q_[i] = Q;
+        R_[i] = R;
+        P_[i] = P;
+        Eigen::MatrixXd I(n_, n_);
+        I_[i] = I;
+        I_[i].setIdentity();
+        Eigen::MatrixXd temp1(n_, 1), temp2(n_, 1);
+        x_hat[i] = temp1;
+        x_hat[i].setZero();
+        x_hat_new[i] = temp2;
+    }
+}
+
+KalmanFilter::~KalmanFilter()
+{
+}
+
+void KalmanFilter::posesCombine()
+{
+    tf::Vector3 weightedPosition(0,0,0);
+    tf::Quaternion weightedQuat(0,0,0,1);
+    double weightedTheta = 0;
+    double sum=0, angSum=0;
+    for (int i = 0; i < poses_vec.size(); i++)
+    {
+        // weightedPosition += poses_vec[i].pose.getOrigin() * (1.0 / poses_vec[i].error);
+        // weightedTheta += poses_vec[i].theta * (1.0 / poses_vec[i].error);
+        // sum += 1.0 / poses_vec[i].error;
+
+        double dist = (poses_vec[i].pose.getOrigin() - m_last_pose.getOrigin()).length2();
+        double angDist = poses_vec[i].error;
+
+        // double dist = (poses_vec[i].pose.getOrigin() - m_last_pose.getOrigin()).length2();
+        // double angDist = (poses_vec[i].pose.getRotation() - m_last_pose.getRotation()).length2();
+
+        if(dist==0)
+            dist = 1.0 / 1e-7;
+        else
+            dist = 1.0 / dist;
+
+        if(angDist==0)
+            angDist = 1.0 / 1e-7;
+        else
+            angDist = 1.0 / angDist;
+        
+        weightedPosition += poses_vec[i].pose.getOrigin() * dist;
+        weightedTheta += poses_vec[i].theta * angDist;
+        sum += dist;
+        angSum += angDist;
+        // std::cout<<"--trans: "<<poses_vec[i].pose.getOrigin()[0]<<","<<poses_vec[i].pose.getOrigin()[1]<<", "<<poses_vec[i].pose.getOrigin()[2]<<", "<<dist<<std::endl;
+        std::cout<<"--theta: "<<poses_vec[i].theta<<", "<<angDist<<std::endl;
+        // printf("pose: %.3f, %.3f, %.3f\n",poses_vec[i].pose.getOrigin()[0], poses_vec[i].pose.getOrigin()[1], poses_vec[i].pose.getOrigin()[2]);
+        // std::cout<<"pose: \n"<<poses_vec[i].pose.getOrigin()<<std::endl;
+    }
+    weightedPosition /= (sum==0?1e-7:sum);
+    weightedTheta /= (angSum==0?1e-7:angSum);
+    printf("pose: %.3f, %.3f, %.3f %.3f\n",weightedPosition[0],weightedPosition[1],weightedPosition[2],weightedTheta);
+    weightedQuat.setW(cos(weightedTheta/2.0));
+    weightedQuat.setZ(sin(weightedTheta/2.0));
+    m_last_pose.setRotation(weightedQuat);
+    m_last_pose.setOrigin(weightedPosition);
+    poses_vec.clear();
+    m_last_time=clock();
+}
+
+void KalmanFilter::addData(tf::Pose pose, double error)
+{
+    Eigen::MatrixXd y(m_,1);
+    // std::cout<<"add 0 "<<std::endl;
+    
+    for (int i = 0; i < 3; i++)
+    {
+        P_[i] = A_[i] * P_[i] * A_[i].transpose() + Q_[i];
+        K_[i] = P_[i] * C_[i].transpose() * (C_[i] * P_[i] * C_[i].transpose() + R_[i]).inverse();
+        //std::cout << "K_: " << K_[i] << std::endl;
+        y(0,0) = pose.getOrigin()[i];
+        y(1,0) = (y(0,0)-m_last_pose.getOrigin()[i])/dt_;
+        //std::cout<<"hat "<<i<<" : "<<x_hat[i]<<std::endl;
+        x_hat_new[i] = A_[i] * x_hat[i] + K_[i] * (y - C_[i] * A_[i] * x_hat[i]);
+        // std::cout<<"part1: "<<A_ * x_hat[i]<<std::endl;
+        // std::cout<<"part2: "<<K_ * (y - C_ * A_ * x_hat[i])<<std::endl;
+        // std::cout<<"inner: "<<y - C_ * A_ * x_hat[i]<<std::endl;
+        // std::cout<<"y: "<<y<<std::endl;
+        // std::cout<<"inner: "<<C_ * A_ * x_hat[i]<<std::endl;
+        // std::cout<<x_hat[i].rows()<<x_hat[i].cols()<<std::endl;
+        P_[i] = (I_[i] - K_[i] * C_[i]) * P_[i];
+        x_hat[i] = x_hat_new[i];
+    }
+    // std::cout<<"add 2"<<std::endl;
+    tf::Vector3 Position(x_hat[0](0,0), x_hat[1](0,0), x_hat[2](0,0));
+    // tf::Quaternion Quat(0, 0, 0, 1);
+    // Quat.setRPY(0, 0, atan2(x_hat[1](0,0), x_hat[0](0,0)));
+    // std::cout<<"add 3"<<std::endl;
+    poseInfo pInfo;
+    // printf("quat: %.3f, %.3f, %.3f, %.3f\n",Quat.getX(),Quat.getY(),Quat.getZ(),Quat.getW());
+    pInfo.pose.setOrigin(Position);
+    pInfo.pose.setRotation(pose.getRotation());
+    pInfo.theta = atan2(pose.getRotation().getZ(), pose.getRotation().getW());
+    pInfo.error = error==0?1e-10:error;
+    poses_vec.push_back(pInfo);
+    printf("input: %.3f, %.3f, %.3f, %.3f\n",pose.getOrigin()[0], pose.getOrigin()[1], pose.getOrigin()[2], pInfo.theta);
+    // printf("origin: %.3f, %.3f, %.3f\n",x_hat[0](0,0), x_hat[1](0,0), x_hat[2](0,0));
+    // std::cout<<"add 4"<<std::endl;
+}
+
+tf::Pose KalmanFilter::filter()
+{
+    posesCombine();
+    return m_last_pose;
+
+}
+

+ 51 - 0
scan_matcher_ICP/src/kalmanFilter.h

@@ -0,0 +1,51 @@
+#ifndef __KALMAN__FILTER__H
+#define __KALMAN__FILTER__H
+
+#include <tf/transform_datatypes.h>
+#include <ctime>
+#include <vector>
+#include <Eigen/Dense>
+
+class KalmanFilter
+{
+private:
+
+    typedef struct POSEINFO{
+        tf::Pose pose;
+        double theta;
+        double error;
+    } poseInfo;
+    tf::Pose                    m_last_pose;
+    std::vector<poseInfo>       poses_vec;
+    tf::Vector3                 velocity;
+    clock_t                     m_last_time;
+    // Discrete time step
+    double dt_;
+    // Matrices for computation
+    Eigen::MatrixXd A_[3], C_[3], Q_[3], R_[3], P_[3], K_[3], I_[3], x_hat[3], x_hat_new[3];
+    // System dimensions
+    void posesCombine();
+
+public:
+    int m_, n_;
+    /**
+     * Create a Kalman filter with the specified matrices.
+     *   A - System dynamics matrix 转移
+     *   C - Output matrix 筛选
+     *   Q - Process noise covariance 预测conv
+     *   R - Measurement noise covariance 测量conv
+     *   P - Estimate error covariance 预测
+     */
+    void init(
+        const Eigen::MatrixXd &A,
+        const Eigen::MatrixXd &C,
+        const Eigen::MatrixXd &Q,
+        const Eigen::MatrixXd &R,
+        const Eigen::MatrixXd &P);
+    KalmanFilter();
+    ~KalmanFilter();
+    void addData(tf::Pose pose1,double error1);
+    tf::Pose filter();
+};
+
+#endif

+ 71 - 38
scan_matcher_ICP/src/laser_scan_matcher.cpp

@@ -82,7 +82,8 @@ LaserScanMatcher::LaserScanMatcher(ros::NodeHandle nh, ros::NodeHandle nh_privat
   // *** publishers
   origin_cloud_publisher_ = nh_.advertise<sensor_msgs::PointCloud2>("origin_cloud", 10);
   current_cloud_publisher_ = nh_.advertise<sensor_msgs::PointCloud2>("current_cloud", 10);
-  current_pcl_cloud_publisher_ = nh_.advertise<sensor_msgs::PointCloud2>("current_cloud_pcl", 10);
+  current_icp_cloud_publisher_ = nh_.advertise<sensor_msgs::PointCloud2>("current_cloud_icp", 10);
+  current_ndt_cloud_publisher_ = nh_.advertise<sensor_msgs::PointCloud2>("current_cloud_ndt", 10);
 }
 
 LaserScanMatcher::~LaserScanMatcher()
@@ -286,7 +287,7 @@ void LaserScanMatcher::scanCallback (const sensor_msgs::LaserScan::ConstPtr& sca
 
     //printf("min_ang, max_ang, ang_inc: %.3f %.3f %.3f\n",scan_msg->angle_min, scan_msg->angle_max, scan_msg->angle_increment);
   }
-  laserScanToLDP(scan_msg, curr_ldp_scan_, -M_PI_2, M_PI_2);
+  laserScanToLDP(scan_msg, curr_ldp_scan_, -M_PI, M_PI);
  
   //for cloud display
   pc_curr_.width = curr_ldp_scan_->nrays;
@@ -499,10 +500,10 @@ void LaserScanMatcher::processScan(LDP& curr_ldp_scan, const ros::Time& time)
   ndt_omp->setInputTarget(ori_ptr);
   ndt_omp->setInputSource(cur_ptr);
   ndt_omp->setNumThreads(n);
-  ndt_omp->setTransformationEpsilon(0.00001);
-  ndt_omp->setStepSize(0.1);
-  ndt_omp->setResolution(0.5);
-  ndt_omp->setMaximumIterations(40);
+  ndt_omp->setTransformationEpsilon(1e-10);
+  ndt_omp->setStepSize(0.03);
+  ndt_omp->setResolution(0.2);
+  ndt_omp->setMaximumIterations(50);
   ndt_omp->setNeighborhoodSearchMethod(pclomp::DIRECT7);
 
   // Set initial alignment estimate found using robot odometry.
@@ -540,41 +541,54 @@ void LaserScanMatcher::processScan(LDP& curr_ldp_scan, const ros::Time& time)
   {
 
     // the correction of the laser's position, in the laser frame
-    tf::Transform corr_ch_l;
-    createTfFromXYTheta(pcl_trans(0,3),pcl_trans(1,3), atan2(-pcl_trans(0,1), pcl_trans(0,0)), corr_ch_l);
+    tf::Transform tf_icp, tf_ndt, tf_laser;
+    createTfFromXYTheta(output_.x[0],output_.x[1], output_.x[2], tf_icp);
+    createTfFromXYTheta(pcl_trans(0,3),pcl_trans(1,3), atan2(-pcl_trans(0,1), pcl_trans(0,0)), tf_ndt);
+    kf.addData(tf_icp, output_.error);
+    kf.addData(tf_ndt, ndt_omp->getFitnessScore() * 500.0);
+    tf_laser = kf.filter();
+    // kf.filter();
+    // tf_laser = tf_ndt;
+    printf("errors icp ndt: %.4f, %.4f\n", output_.error, ndt_omp->getFitnessScore());
+    corr_ch = laser_to_base_ * tf_laser * base_to_laser_;
+    trans[0] = tf_laser.getOrigin()[0];
+    trans[1] = tf_laser.getOrigin()[1];
+    trans[2] = atan2(tf_laser.getRotation().getZ() , tf_laser.getRotation().getW())*2.0;
+    // printf("laser quat: %.3f, %.3f, %.3f, %.3f\n",tf_laser.getRotation().getX(), tf_laser.getRotation().getY(),tf_laser.getRotation().getZ(),tf_laser.getRotation().getW());
+    printf("laser tf: %.3f, %.3f, %.3f\n",trans[0], trans[1], trans[2]);
+
     // trans[0] = pcl_trans(0,3);
     // trans[1] = pcl_trans(1,3);
     // trans[2] = atan2(-pcl_trans(0,1), pcl_trans(0,0));
     // createTfFromXYTheta(output_.x[0], output_.x[1], output_.x[2], corr_ch_l);
-    trans[0] = output_.x[0];
-    trans[1] = output_.x[1];
-    trans[2] = output_.x[2];
+    // trans[0] = output_.x[0];
+    // trans[1] = output_.x[1];
+    // trans[2] = output_.x[2];
     // printf("csm transform x,y,theta: %.3f, %.3f, %.3f\n",trans[0],trans[1],trans[2]);
-
-    //theta [-pi,pi]
-    //std::cout<<"theta: "<<trans[2]<<std::endl;
-    // the correction of the base's position, in the base frame
-    //corr_ch = base_to_laser_ * corr_ch_l * laser_to_base_;
-    corr_ch = laser_to_base_ * corr_ch_l * base_to_laser_;
-
+    
     // update the pose in the world frame
     f2b_ = f2b_kf_ * corr_ch;
 
     if (publish_tf_)
     {
+      //××××××××× 发布tf ×××××××××
+      tf::StampedTransform transform_msg_icp(tf_icp, time, fixed_frame_, "ICP");
+      tf_broadcaster_.sendTransform(transform_msg_icp);
+
+      tf::StampedTransform transform_msg_ndt(tf_ndt, time, fixed_frame_, "NDT");
+      tf_broadcaster_.sendTransform(transform_msg_ndt);
+
       tf::StampedTransform transform_msg (f2b_, time, fixed_frame_, base_frame_);
       tf_broadcaster_.sendTransform(transform_msg);
 
-      //定义发布的消息
+      //××××××××× 发布三块点云 ×××××××××
+      //发布初始点云
       sensor_msgs::PointCloud2 output_origin;
-      //把点云转化为ros消息
       pcl::toROSMsg(pc_origin_, output_origin);
       output_origin.header.frame_id = fixed_frame_;
       origin_cloud_publisher_.publish(output_origin);
 
-      //定义发布的消息
-      sensor_msgs::PointCloud2 output_curr;
-      // //点云旋转
+      // //点云旋转操作1
       // Eigen::Matrix4f transform_1 = Eigen::Matrix4f::Identity(); //单位矩阵
       // float theta = M_PI / 20;
       // //给矩阵赋值
@@ -586,28 +600,46 @@ void LaserScanMatcher::processScan(LDP& curr_ldp_scan, const ros::Time& time)
       // transform_1(0, 3) = 0.01;
       // printf("Method #1: using a Matrix4f\n");
       // std::cout << transform_1 << std::endl;
+
+      // 点云旋转操作2
+      sensor_msgs::PointCloud2 output_curr;
       pcl::PointCloud<pcl::PointXYZ> curr1;
-      Eigen::Affine3f transform_1 = Eigen::Affine3f::Identity();
-      transform_1.translation() << trans[0], trans[1], 0.0;
-      transform_1.rotate (Eigen::AngleAxisf (trans[2], Eigen::Vector3f::UnitZ()));
-      pcl::transformPointCloud(pc_curr_, curr1, transform_1);
+      Eigen::Affine3f transform_kf = Eigen::Affine3f::Identity();
+      transform_kf.translation() << trans[0], trans[1], 0.0;
+      transform_kf.rotate (Eigen::AngleAxisf (trans[2], Eigen::Vector3f::UnitZ()));
+      pcl::transformPointCloud(pc_curr_, curr1, transform_kf);
       //把点云转化为ros消息
       pcl::toROSMsg(curr1, output_curr);
       output_curr.header.frame_id = fixed_frame_;
       current_cloud_publisher_.publish(output_curr);
       // std::cout<<"----"<<std::endl;
 
+      //icp 点云
+      sensor_msgs::PointCloud2 output_curr_icp;
       pcl::PointCloud<pcl::PointXYZ> curr2;
-      sensor_msgs::PointCloud2 output_curr2;
-      Eigen::Affine3f transform_2 = Eigen::Affine3f::Identity();
-      transform_2.translation() << pcl_trans(0,3),pcl_trans(1,3), 0.0;
-      transform_2.rotate (Eigen::AngleAxisf (atan2(-pcl_trans(0,1), pcl_trans(0,0)), Eigen::Vector3f::UnitZ()));
-      pcl::transformPointCloud(pc_curr_, curr2, transform_2);
-      //把点云转化为ros消息
-      pcl::toROSMsg(curr2, output_curr2);
-      output_curr2.header.frame_id = fixed_frame_;
-      current_pcl_cloud_publisher_.publish(output_curr2);
+      Eigen::Affine3f transform_icp = Eigen::Affine3f::Identity();
+      transform_icp.translation() << output_.x[0], output_.x[1], 0.0;
+      transform_icp.rotate (Eigen::AngleAxisf (output_.x[2], Eigen::Vector3f::UnitZ()));
+      pcl::transformPointCloud(pc_curr_, curr2, transform_icp);
+      pcl::toROSMsg(curr2, output_curr_icp);
+      output_curr_icp.header.frame_id = fixed_frame_;
+      current_icp_cloud_publisher_.publish(output_curr_icp);
+
+      //ndt 点云
+      sensor_msgs::PointCloud2 output_curr_ndt;
+      pcl::PointCloud<pcl::PointXYZ> curr3;
+      Eigen::Affine3f transform_ndt = Eigen::Affine3f::Identity();
+      transform_ndt.translation() << pcl_trans(0,3),pcl_trans(1,3), 0.0;
+      transform_ndt.rotate (Eigen::AngleAxisf (atan2(-pcl_trans(0,1), pcl_trans(0,0)), Eigen::Vector3f::UnitZ()));
+      pcl::transformPointCloud(pc_curr_, curr3, transform_ndt);
+      pcl::toROSMsg(curr3, output_curr_ndt);
+      output_curr_ndt.header.frame_id = fixed_frame_;
+      current_ndt_cloud_publisher_.publish(output_curr_ndt);
       // std::cout<<"****"<<std::endl;
+
+      // printf("total transform x,y,theta: %.3f, %.3f, %.3f\n",trans[0],trans[1],trans[2]);
+      // printf("icp   transform x,y,theta: %.3f, %.3f, %.3f\n",output_.x[0],output_.x[1],output_.x[2]);
+      // printf("ndt   transform x,y,theta: %.3f, %.3f, %.3f\n",pcl_trans(0,3),pcl_trans(1,3),atan2(-pcl_trans(0,1), pcl_trans(0,0)));
     }
   }
   else
@@ -628,7 +660,8 @@ void LaserScanMatcher::processScan(LDP& curr_ldp_scan, const ros::Time& time)
   // }
   // else
   // {
-    ld_free(curr_ldp_scan);
+    //需要清除吗???
+    //ld_free(curr_ldp_scan);
     // ld_free(ldp_prev);
   // }
 
@@ -637,7 +670,7 @@ void LaserScanMatcher::processScan(LDP& curr_ldp_scan, const ros::Time& time)
   // **** statistics
   ros::WallTime end3 = ros::WallTime::now();
   double dur3 = (end3 - start).toSec() * 1e3;
-  // std::cout<<"time consumption(csm, p1 p2 pcl and total): "<<dur1<<", "/* <<dur1_1<<", "<<dur1_2<<", "*/<<dur2<<", "<<dur3<<std::endl;
+  std::cout<<"time consumption(csm, p1 p2 pcl and total): "<<dur1<<", "/* <<dur1_1<<", "<<dur1_2<<", "*/<<dur2<<", "<<dur3<<std::endl;
 }
 
 bool LaserScanMatcher::newKeyframeNeeded(const tf::Transform& d)

+ 5 - 2
scan_matcher_ICP/src/laser_scan_matcher.h

@@ -55,7 +55,8 @@
 #include <pcl/filters/voxel_grid.h>
 #include <pcl_ros/point_cloud.h>
 
-#include <csm/csm_all.h>  // csm defines min and max, but Eigen complains
+#include <csm/csm_all.h>  // csm defines min and max, but Eigen defined already
+#include <kalmanFilter.h>
 #undef min
 #undef max
 
@@ -71,6 +72,7 @@ class LaserScanMatcher
 
   private:
 
+    KalmanFilter kf;
     // **** ros
 
     ros::NodeHandle nh_;
@@ -124,7 +126,8 @@ class LaserScanMatcher
 
     ros::Publisher  origin_cloud_publisher_;
     ros::Publisher  current_cloud_publisher_;
-    ros::Publisher  current_pcl_cloud_publisher_;
+    ros::Publisher  current_ndt_cloud_publisher_;
+    ros::Publisher  current_icp_cloud_publisher_;
     pcl::PointCloud<pcl::PointXYZ> pc_origin_;
     pcl::PointCloud<pcl::PointXYZ> pc_curr_;