This implementation addresses the problem of projecting 3D LiDAR point cloud data onto 2D image planes for visualization and multi-sensor data fusion applications. The projection transforms spatial point coordinates from the LiDAR frame to the camera frame using extrinsic calibration parameters, then applies camera intrinsic parameters and distortion coefficients to compute corresponding pixel locations.
The approach handles the coordinate transformation from radar coordinates to image coordinates, supporting configurable parameters such as elevation angle, azimuth angle, and field of view for flexibility across different sensor setups.
Environment Setup
sudo apt install libpcl-dev
sudo apt-get install libopencv-dev
Camera Intrinsic Calibration
Camera intrinsic paarmeters (focal lengths, principal point, distortion coefficients) can be obtained using checkerboard calibration patterns. Tools like MATLAB or OpenCV's calibration routines provide these parameters.
Camera-LiDAR Extrinsic Calibration
The transformation matrix T_C^L maps points from LiDAR coordinates to camera coordinates. Academic resources on this topic include:
- Reference: http://arxiv.org/abs/2103.01627
- Reference Implementation: https://github.com/hku-mars/livox_camera_calib
Implementation: Intensity-Based Grayscale Projection
#include <iostream>
#include <opencv2/opencv.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <algorithm>
#include <limits>
int main() {
// Load point cloud with intensity values
pcl::PointCloud<pcl::PointXYZI>::Ptr scanData(new pcl::PointCloud<pcl::PointXYZI>);
if (pcl::io::loadPCDFile<pcl::PointXYZI>("input_scan.pcd", *scanData) == -1) {
std::cerr << "Failed to load PCD file" << std::endl;
return -1;
}
// Filter valid 3D points
std::vector<pcl::PointXYZI> validScan;
for (const auto& point : scanData->points) {
if (std::isfinite(point.x) && std::isfinite(point.y) && std::isfinite(point.z)) {
validScan.push_back(point);
}
}
if (validScan.empty()) {
std::cerr << "No valid points found" << std::endl;
return -1;
}
// Prepare 3D points for projection
std::vector<cv::Point3f> worldPoints;
worldPoints.reserve(validScan.size());
for (const auto& pt : validScan) {
worldPoints.emplace_back(pt.x, pt.y, pt.z);
}
// Camera intrinsic matrix
cv::Mat K = (cv::Mat_<double>(3, 3) <<
783.890724, 0.198476, 628.151868,
0.0, 783.350933, 382.160525,
0.0, 0.0, 1.0
);
// Distortion coefficients
cv::Mat D = (cv::Mat_<double>(5, 1) <<
-0.413786, 0.249752, 0.00262, -0.00224, 0.0
);
// Extrinsic transformation: LiDAR to Camera
Eigen::Matrix4f transformMat;
transformMat << -0.013604, -0.999425, -0.031069, 0.075062,
-0.024031, 0.031390, -0.999218, 0.037218,
0.999619, -0.012847, -0.024444, -0.692299,
0.0, 0.0, 0.0, 1.0;
// Extract rotation and translation components
cv::Mat R = (cv::Mat_<double>(3, 3) <<
transformMat(0,0), transformMat(0,1), transformMat(0,2),
transformMat(1,0), transformMat(1,1), transformMat(1,2),
transformMat(2,0), transformMat(2,1), transformMat(2,2)
);
cv::Mat t = (cv::Mat_<double>(3, 1) <<
transformMat(0,3), transformMat(1,3), transformMat(2,3)
);
// Convert rotation matrix to rotation vector
cv::Mat rotVec;
cv::Rodrigues(R, rotVec);
// Project 3D points to 2D image coordinates
std::vector<cv::Point2f> pixelCoords;
cv::projectPoints(worldPoints, rotVec, t, K, D, pixelCoords);
// Initialize output image
int imageWidth = 1280;
int imageHeight = 720;
cv::Mat outputImg = cv::Mat::zeros(imageHeight, imageWidth, CV_8UC3);
// Compute intensity range for normalization
float iMin = std::numeric_limits<float>::max();
float iMax = std::numeric_limits<float>::lowest();
for (const auto& pt : validScan) {
iMin = std::min(iMin, pt.intensity);
iMax = std::max(iMax, pt.intensity);
}
float iRange = iMax - iMin;
if (iRange <= 0) iRange = 1.0f;
// Render points as grayscale
for (size_t idx = 0; idx < pixelCoords.size(); ++idx) {
const cv::Point2f& coord = pixelCoords[idx];
if (coord.x >= 0 && coord.x < imageWidth && coord.y >= 0 && coord.y < imageHeight) {
float normalizedIntensity = (validScan[idx].intensity - iMin) / iRange;
int grayVal = static_cast<int>(normalizedIntensity * 255.0f);
cv::circle(outputImg,
cv::Point(static_cast<int>(coord.x), static_cast<int>(coord.y)),
2, cv::Scalar(grayVal, grayVal, grayVal), -1);
}
}
// Save result
std::string outputPath = "projection_output.png";
if (cv::imwrite(outputPath, outputImg)) {
std::cout << "Projection saved: " << outputPath << std::endl;
std::cout << "Intensity range: [" << iMin << ", " << iMax << "]" << std::endl;
}
return 0;
}
Implementation: Color-Coded Projection with Pseudo-Coloring
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/point_cloud.h>
#include <pcl/common/transforms.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <algorithm>
#include <limits>
int main() {
// Load intensity-enabled point cloud
pcl::PointCloud<pcl::PointXYZI>::Ptr cloudData(new pcl::PointCloud<pcl::PointXYZI>);
if (pcl::io::loadPCDFile<pcl::PointXYZI>("../Config/PCD/SLIDER.pcd", *cloudData) == -1) {
std::cerr << "Error loading point cloud" << std::endl;
return -1;
}
// Collect valid points
std::vector<pcl::PointXYZI> validPoints;
for (const auto& p : cloudData->points) {
if (std::isfinite(p.x) && std::isfinite(p.y) && std::isfinite(p.z)) {
validPoints.push_back(p);
}
}
if (validPoints.empty()) {
std::cerr << "Empty point cloud" << std::endl;
return -1;
}
// Convert to OpenCV Point3f format
std::vector<cv::Point3f> pt3D;
pt3D.reserve(validPoints.size());
for (const auto& p : validPoints) {
pt3D.emplace_back(p.x, p.y, p.z);
}
// Camera intrinsic parameters
cv::Mat K = (cv::Mat_<double>(3, 3) <<
783.890724, 0.198476, 628.151868,
0.0, 783.350933, 382.160525,
0.0, 0.0, 1.0
);
// Lens distortion
cv::Mat D = (cv::Mat_<double>(5, 1) <<
-0.413786, 0.249752, 0.00262, -0.00224, 0.0
);
// Extrinsic transformation matrix
Eigen::Matrix4f T;
T << -0.013604, -0.999425, -0.031069, 0.075062,
-0.024031, 0.031390, -0.999218, 0.037218,
0.999619, -0.012847, -0.024444, -0.692299,
0.0, 0.0, 0.0, 1.0;
// Decompose into rotation and translation
cv::Mat R = (cv::Mat_<double>(3, 3) <<
T(0,0), T(0,1), T(0,2),
T(1,0), T(1,1), T(1,2),
T(2,0), T(2,1), T(2,2)
);
cv::Mat trans = (cv::Mat_<double>(3, 1) <<
T(0,3), T(1,3), T(2,3)
);
// Convert to rotation vector format
cv::Mat rvec;
cv::Rodrigues(R, rvec);
// Compute 2D image coordinates
std::vector<cv::Point2f> imgPts;
cv::projectPoints(pt3D, rvec, trans, K, D, imgPts);
// Output image dimensions
const int outW = 1280;
const int outH = 720;
// Determine intensity normalization bounds
float iMin = std::numeric_limits<float>::max();
float iMax = std::numeric_limits<float>::lowest();
for (const auto& pt : validPoints) {
iMin = std::min(iMin, pt.intensity);
iMax = std::max(iMax, pt.intensity);
}
float iRange = iMax - iMin;
if (iRange <= 0) iRange = 1.0f;
// Create single-channel grayscale image
cv::Mat grayCanvas = cv::Mat::zeros(outH, outW, CV_8UC1);
for (size_t i = 0; i < imgPts.size(); ++i) {
const cv::Point2f& pt = imgPts[i];
int px = static_cast<int>(pt.x + 0.5);
int py = static_cast<int>(pt.y + 0.5);
if (px >= 0 && px < outW && py >= 0 && py < outH) {
float normalized = (validPoints[i].intensity - iMin) / iRange;
uint8_t intensityVal = static_cast<uint8_t>(
std::min(255.0f, std::max(0.0f, normalized * 255.0f))
);
// Keep maximum intensity at each pixel
auto& current = grayCanvas.at<uint8_t>(py, px);
if (intensityVal > current) {
current = intensityVal;
}
}
}
// Apply JET colormap for visualization
cv::Mat coloredOutput;
cv::applyColorMap(grayCanvas, coloredOutput, cv::COLORMAP_JET);
// Mask empty regions to black
cv::Mat emptyMask = (grayCanvas == 0);
coloredOutput.setTo(cv::Scalar(0, 0, 0), emptyMask);
// Save colored projection
std::string resultPath = "color_projection.png";
if (cv::imwrite(resultPath, coloredOutput)) {
std::cout << "Saved: " << resultPath << std::endl;
std::cout << "Range: [" << iMin << ", " << iMax << "]" << std::endl;
}
return 0;
}