pointcloud - yongduek/realsense_codes GitHub Wiki
File: examples/rs-pointcloud.cpp
// Declare pointcloud object, for calculating pointclouds and texture mappings
rs2::pointcloud pc;
// We want the points object to be persistent so we can display the last cloud when a frame drops
rs2::points points;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
while (app) // Application still alive?
{
// Wait for the next set of frames from the camera
auto frames = pipe.wait_for_frames();
auto depth = frames.get_depth_frame();
auto color = frames.get_color_frame();
// Generate the pointcloud and texture mappings
points = pc.calculate(depth);
// Tell pointcloud object to map to this color frame
pc.map_to(color);
// Upload the color frame to OpenGL
app_state.tex.upload(color);
// Draw the pointcloud
draw_pointcloud(app, app_state, points);
}
193 class pointcloud
194 {
195 public:
196 pointcloud() : _queue(1)
197 {
198 rs2_error* e = nullptr;
199
200 _block = std::make_shared<processing_block>(
201 std::shared_ptr<rs2_processing_block>(
202 rs2_create_pointcloud(&e),
203 rs2_delete_processing_block));
204
205 error::handle(e);
206
207 _block->start(_queue);
208 }
209
210 points calculate(frame depth)
211 {
212 _block->invoke(std::move(depth));
213 return _queue.wait_for_frame();
214 }
215
216 void map_to(frame mapped)
217 {
218 _block->invoke(std::move(mapped));
219 }
220 private:
221 friend class context;
222
223 std::shared_ptr<processing_block> _block;
224 frame_queue _queue;
225 };
At line 202, rs2_create_pointcloud(&e)
is in src/rs.cpp
1596 rs2_processing_block* rs2_create_pointcloud(rs2_error** error) BEGIN_API_CALL
1597 {
1598 auto block = std::make_shared<librealsense::pointcloud>();
1599
1600 return new rs2_processing_block { block };
1601 }
The function returns a shared pointer of type librealsense::pointcloud
defined in src/align.h
61 class pointcloud : public processing_block
62 {
63 public:
64 pointcloud();
65
66 private:
67 std::mutex _mutex;
68
69 const rs2_intrinsics* _depth_intrinsics_ptr;
70 const float* _depth_units_ptr;
71 const rs2_intrinsics* _mapped_intrinsics_ptr;
72 const rs2_extrinsics* _extrinsics_ptr;
73
74 rs2_intrinsics _depth_intrinsics;
75 rs2_intrinsics _mapped_intrinsics;
76 float _depth_units;
77 rs2_extrinsics _extrinsics;
78
79 std::shared_ptr<stream_profile_interface> _stream, _mapped;
80 stream_profile_interface* _depth_stream = nullptr;
81 };
In 'rs-pointcloud.cpp', the line pc.calculate(depth)
invokes the function 'src/align.cpp/pointcloud::pointcloud()'.
Depth processing function using depth_to_points()
starts from line 319:
319 auto process_depth_frame = [this](const rs2::depth_frame& depth)
320 {
321 frame_holder res = get_source().allocate_points(_stream, (frame_interface*)depth.get());
322
323 auto pframe = (points*)(res.frame);
324
325 auto depth_data = (const uint16_t*)depth.get_data();
326 //auto original_depth = ((depth_frame*)depth.get())->get_original_depth();
327 //if (original_depth) depth_data = (const uint16_t*)original_depth->get_frame_data();
328
329 auto points = depth_to_points((uint8_t*)pframe->get_vertices(), *_depth_intrinsics_ptr, depth_data, *_depth_units_ptr);
330
......
372
373 get_source().frame_ready(std::move(res));
374 };
depth_to_points()
are defined in align.cpp
28 const float3 * depth_to_points(uint8_t* image, const rs2_intrinsics &depth_intrinsics, const uint16_t * depth_image, float depth_scale)
29 {
30 deproject_depth(reinterpret_cast<float *>(image), depth_intrinsics, depth_image, [depth_scale](uint16_t z) { return depth_scale * z; });
31
32 return reinterpret_cast<float3 *>(image);
33 }
and deproject_depth()
as well
15 template<class MAP_DEPTH> void deproject_depth(float * points, const rs2_intrinsics & intrin, const uint16_t * depth, MAP_DEPTH map_depth)
16 {
17 for (int y = 0; y<intrin.height; ++y)
18 {
19 for (int x = 0; x<intrin.width; ++x)
20 {
21 const float pixel[] = { (float)x, (float)y };
22 rs2_deproject_pixel_to_point(points, &intrin, pixel, map_depth(*depth++));
23 points += 3;
24 }
25 }
26 }
- Notice that
map_depth
is a lambda function. - When a 3D coordinate vector is computed, its depth should be multiplied by the
depth_scale
before furtherdeprojection
computation.
1 /* License: Apache 2.0. See LICENSE file in root directory.
2 Copyright(c) 2015 Intel Corporation. All Rights Reserved. */
3
4 #ifndef LIBREALSENSE_RSUTIL2_H
5 #define LIBREALSENSE_RSUTIL2_H
6
7 #include "librealsense2/h/rs_types.h"
8 #include "assert.h"
9
10 #include <math.h>
11
12
13 /* Given a point in 3D space, compute the corresponding pixel coordinates in an image
14 * with no distortion or forward distortion coefficients produced by the same camera
15 */
16 static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
17 {
18 //assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
19
20 float x = point[0] / point[2], y = point[1] / point[2];
21
22 if(intrin->model == RS2_DISTORTION_MODIFIED_BROWN_CONRADY)
23 {
24
25 float r2 = x*x + y*y;
26 float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
27 x *= f;
28 y *= f;
29 float dx = x + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
30 float dy = y + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
31 x = dx;
32 y = dy;
33 }
34 if (intrin->model == RS2_DISTORTION_FTHETA)
35 {
36 float r = sqrt(x*x + y*y);
37 auto rd = (1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
38 x *= rd / r;
39 y *= rd / r;
40 }
41
42 pixel[0] = x * intrin->fx + intrin->ppx;
43 pixel[1] = y * intrin->fy + intrin->ppy;
44 }
46 /* Given pixel coordinates and depth in an image with no distortion or inverse distortion
47 * coefficients, compute the corresponding point in 3D space relative to the same camera
48 */
49 static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics * intrin, const float pixel[2], float depth)
50 {
51 assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
52 assert(intrin->model != RS2_DISTORTION_FTHETA); // Cannot deproject to an ftheta image
53 //assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
54
55 float x = (pixel[0] - intrin->ppx) / intrin->fx;
56 float y = (pixel[1] - intrin->ppy) / intrin->fy;
57 if(intrin->model == RS2_DISTORTION_INVERSE_BROWN_CONRADY)
58 {
59 float r2 = x*x + y*y;
60 float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
61 float ux = x*f + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
62 float uy = y*f + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
63 x = ux;
64 y = uy;
65 }
66 point[0] = depth * x;
67 point[1] = depth * y;
68 point[2] = depth;
69 }
71 /* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
72 static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
73 {
74 to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
75 to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
76 to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
77 }
79 /* Calculate horizontal and vertical feild of view, based on video intrinsics */
80 static void rs2_fov(const struct rs2_intrinsics * intrin, float to_fov[2])
81 {
82 to_fov[0] = (atan2f(intrin->ppx + 0.5f, intrin->fx) + atan2f(intrin->width - (intrin->ppx + 0.5f), intrin->fx)) * 57.2957795f;
83 to_fov[1] = (atan2f(intrin->ppy + 0.5f, intrin->fy) + atan2f(intrin->height - (intrin->ppy + 0.5f), intrin->fy)) * 57.2957795f;
84 }