[azure kinect] map color operation to depth to 3d space [WIP]
/**
* FUCK MICROSOFT
* CANNOT TRIGGER THE BREAKPOINT UNDER VS2015,DAMN IT
* DON'T FULLY UNDERSTAND HOW TO USE AZURE KINECT YET
*
* BELOWING IS A SIMPLE WORKING CODE MAP COLOR POS TO 3D POINT
*/
#pragma region kinect
#include <k4a\k4a.h>
#pragma comment(lib,"k4a.lib")
#pragma endregion
#include <assert.h>
#include <iostream>
#include <opencv\cv.hpp>
//just like js
#define ConsoleLog(msg) std::cout<<msg<<std::endl;
static bool point_cloud_color_to_depth(k4a_transformation_t transformation_handle,
const k4a_image_t depth_image,
const k4a_image_t color_image)
{
int depth_image_width_pixels = k4a_image_get_width_pixels(depth_image);
int depth_image_height_pixels = k4a_image_get_height_pixels(depth_image);
k4a_image_t transformed_color_image = NULL;
if (K4A_RESULT_SUCCEEDED != k4a_image_create(K4A_IMAGE_FORMAT_COLOR_BGRA32,
depth_image_width_pixels,
depth_image_height_pixels,
depth_image_width_pixels * 4 * (int)sizeof(uint8_t),
&transformed_color_image))
{
printf("Failed to create transformed color image\n");
return false;
}
k4a_image_t point_cloud_image = NULL;
if (K4A_RESULT_SUCCEEDED != k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
depth_image_width_pixels,
depth_image_height_pixels,
depth_image_width_pixels * 3 * (int)sizeof(int16_t),
&point_cloud_image))
{
printf("Failed to create point cloud image\n");
return false;
}
if (K4A_RESULT_SUCCEEDED != k4a_transformation_color_image_to_depth_camera(transformation_handle,
depth_image,
color_image,
transformed_color_image))
{
printf("Failed to compute transformed color image\n");
return false;
}
k4a_image_release(transformed_color_image);
//k4a_image_release(point_cloud_image);
return true;
}
static void generate_point_cloud(const k4a_image_t depth_image,
const k4a_image_t xy_table,
k4a_image_t point_cloud,
int *point_count)
{
int width = k4a_image_get_width_pixels(depth_image);
int height = k4a_image_get_height_pixels(depth_image);
uint16_t *depth_data = (uint16_t *)(void *)k4a_image_get_buffer(depth_image);
k4a_float2_t *xy_table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);
k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);
*point_count = 0;
for (int i = 0; i < width * height; i++)
{
if (depth_data[i] != 0 && !isnan(xy_table_data[i].xy.x) && !isnan(xy_table_data[i].xy.y))
{
point_cloud_data[i].xyz.x = xy_table_data[i].xy.x * (float)depth_data[i];
point_cloud_data[i].xyz.y = xy_table_data[i].xy.y * (float)depth_data[i];
point_cloud_data[i].xyz.z = (float)depth_data[i];
(*point_count)++;
}
else
{
point_cloud_data[i].xyz.x = nanf("");
point_cloud_data[i].xyz.y = nanf("");
point_cloud_data[i].xyz.z = nanf("");
}
}
}
#include <fstream>
static void write_point_cloud(const char *file_name, const k4a_image_t point_cloud, int point_count)
{
int width = k4a_image_get_width_pixels(point_cloud);
int height = k4a_image_get_height_pixels(point_cloud);
k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);
// save to the ply file
std::ofstream ofs(file_name); // text mode first
ofs << "ply" << std::endl;
ofs << "format ascii 1.0" << std::endl;
ofs << "element vertex"
<< " " << point_count << std::endl;
ofs << "property float x" << std::endl;
ofs << "property float y" << std::endl;
ofs << "property float z" << std::endl;
ofs << "end_header" << std::endl;
ofs.close();
std::stringstream ss;
for (int i = 0; i < width * height; i++)
{
if (isnan(point_cloud_data[i].xyz.x) || isnan(point_cloud_data[i].xyz.y) || isnan(point_cloud_data[i].xyz.z))
{
continue;
}
ss << (float)point_cloud_data[i].xyz.x << " " << (float)point_cloud_data[i].xyz.y << " "
<< (float)point_cloud_data[i].xyz.z << std::endl;
}
std::ofstream ofs_text(file_name, std::ios::out | std::ios::app);
ofs_text.write(ss.str().c_str(), (std::streamsize)ss.str().length());
}
static void create_xy_table(const k4a_calibration_t *calibration, k4a_image_t xy_table)
{
k4a_float2_t *table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);
int width = calibration->depth_camera_calibration.resolution_width;
int height = calibration->depth_camera_calibration.resolution_height;
k4a_float2_t p;
k4a_float3_t ray;
int valid;
for (int y = 0, idx = 0; y < height; y++)
{
p.xy.y = (float)y;
for (int x = 0; x < width; x++, idx++)
{
p.xy.x = (float)x;
k4a_calibration_2d_to_3d(
calibration, &p, 1.f, K4A_CALIBRATION_TYPE_DEPTH, K4A_CALIBRATION_TYPE_DEPTH, &ray, &valid);
if (valid)
{
table_data[idx].xy.x = ray.xyz.x;
table_data[idx].xy.y = ray.xyz.y;
}
else
{
table_data[idx].xy.x = nanf("");
table_data[idx].xy.y = nanf("");
}
}
}
}
static int capture( uint8_t deviceId = K4A_DEVICE_DEFAULT)
{
int returnCode = 1;
k4a_device_t device = NULL;
const int32_t TIMEOUT_IN_MS = 1000;
k4a_transformation_t transformation = NULL;
k4a_transformation_t transformation_color_downscaled = NULL;
k4a_capture_t capture = NULL;
std::string file_name = "";
uint32_t device_count = 0;
k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
k4a_image_t depth_image = NULL;
k4a_image_t color_image = NULL;
k4a_image_t color_image_downscaled = NULL;
k4a_image_t xy_table = NULL;
k4a_image_t point_cloud = NULL;
device_count = k4a_device_get_installed_count();
assert(device_count != 0);
if (K4A_RESULT_SUCCEEDED != k4a_device_open(deviceId, &device))
{
printf("Failed to open device\n");
goto Exit;
}
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_720P;
config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
config.camera_fps = K4A_FRAMES_PER_SECOND_30;
config.synchronized_images_only = true; // ensures that depth and color images are both available in the capture
k4a_calibration_t calibration;
if (K4A_RESULT_SUCCEEDED !=
k4a_device_get_calibration(device, config.depth_mode, config.color_resolution, &calibration))
{
printf("Failed to get calibration\n");
goto Exit;
}
transformation = k4a_transformation_create(&calibration);
if (K4A_RESULT_SUCCEEDED != k4a_device_start_cameras(device, &config))
{
printf("Failed to start cameras\n");
goto Exit;
}
k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
calibration.depth_camera_calibration.resolution_width,
calibration.depth_camera_calibration.resolution_height,
calibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float2_t),
&xy_table);
create_xy_table(&calibration, xy_table);
k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
calibration.depth_camera_calibration.resolution_width,
calibration.depth_camera_calibration.resolution_height,
calibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float3_t),
&point_cloud);
static int count = 100;
while (count-- > 0)
{
// Get a capture
switch (k4a_device_get_capture(device, &capture, TIMEOUT_IN_MS))
{
case K4A_WAIT_RESULT_SUCCEEDED:
break;
case K4A_WAIT_RESULT_TIMEOUT:
printf("Timed out waiting for a capture\n");
goto Exit;
case K4A_WAIT_RESULT_FAILED:
printf("Failed to read a capture\n");
goto Exit;
}
// Get a depth image
depth_image = k4a_capture_get_depth_image(capture);
if (depth_image == 0)
{
printf("Failed to get depth image from capture\n");
goto Exit;
}
// Get a color image
color_image = k4a_capture_get_color_image(capture);
if (color_image == 0)
{
printf("Failed to get color image from capture\n");
goto Exit;
}
k4a_float2_t pt_color; pt_color.v[0] = 300; pt_color.v[1] = 400;
k4a_float2_t pt_depth;
int b_valid;
k4a_calibration_color_2d_to_depth_2d(&calibration, &pt_color, depth_image, &pt_depth, &b_valid);
std::cout << "valid: " << b_valid << std::endl;
if (b_valid)
{
//output the right vals;
printf("color to depth pos: %f # %f\n", pt_depth.v[0], pt_depth.v[1]);
}
//TODO:能够简化
int point_count;
generate_point_cloud(depth_image, xy_table, point_cloud, &point_count);
if (count == 50)
{
write_point_cloud("test.ply", point_cloud, point_count);
goto Exit;
}
std::cout << point_count << std::endl;
if (depth_image != NULL)
{
// you can check the format with this function
k4a_image_format_t format = k4a_image_get_format(depth_image); // K4A_IMAGE_FORMAT_DEPTH16
// get raw buffer
// 6.2 Get the sizes of depth image
int width = k4a_image_get_width_pixels(depth_image);
int height = k4a_image_get_height_pixels(depth_image);
int strides = k4a_image_get_stride_bytes(depth_image);
k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);
auto pt = point_cloud_data[((int)pt_depth.v[1]) * width + ((int)pt_depth.v[0])];
printf("point[%.2f-%.2f-%.2f]", pt.v[0], pt.v[1], pt.v[2]);
// 6.3 Store the image using opencv Mat
uint8_t* depth_image_data = k4a_image_get_buffer(depth_image);
const cv::Mat depthimage = cv::Mat(height, width, CV_16U, (void*)depth_image_data, cv::Mat::AUTO_STEP);
// 6.5 Display the images
cv::namedWindow("depthbar", cv::WINDOW_AUTOSIZE);
cv::imshow("depthbar", depthimage);
cv::waitKey(3);
//uint8_t* buffer = k4a_image_get_buffer(depth_image);
//// convert the raw buffer to cv::Mat
//int rows = k4a_image_get_height_pixels(depth_image);
//int cols = k4a_image_get_width_pixels(depth_image);
//int stride = k4a_image_get_stride_bytes(depth_image);
//cv::Mat depthMat(rows, cols, CV_16U, (void*)buffer, cv::Mat::AUTO_STEP);
//uint8_t* ptr = buffer + ((int)pt_depth.v[1]) * stride + ((int)pt_depth.v[0]) * 2;
//auto depth_val = (((unsigned short*)ptr)[0]);// depthMat.at(cv::Point((int)pt_depth.v[0], (int)pt_depth.v[1]));
//printf("depth val: %d\n", depth_val);
}
}
// Compute color point cloud by warping color image into depth camera geometry
if (point_cloud_color_to_depth(transformation, depth_image, color_image) == false)
{
goto Exit;
}
goto Exit;
Exit:
if (depth_image != NULL)
{
k4a_image_release(depth_image);
}
if (color_image != NULL)
{
k4a_image_release(color_image);
}
if (capture != NULL)
{
k4a_capture_release(capture);
}
if (transformation != NULL)
{
k4a_transformation_destroy(transformation);
}
if (transformation_color_downscaled != NULL)
{
k4a_transformation_destroy(transformation_color_downscaled);
}
if (device != NULL)
{
k4a_device_close(device);
}
return returnCode;
}
int main()
{
ConsoleLog("debug point");
capture();
system("pause");
return 0;
}