-
Notifications
You must be signed in to change notification settings - Fork 135
/
Copy pathrgb_depth_aligned.cpp
148 lines (124 loc) · 5.26 KB
/
rgb_depth_aligned.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#include <cstdio>
#include <iostream>
#include "utility.hpp"
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
// Optional. If set (true), the ColorCamera is downscaled from 1080p to 720p.
// Otherwise (false), the aligned depth is automatically upscaled to 1080p
static std::atomic<bool> downscaleColor{true};
static constexpr int fps = 30;
// The disparity is computed at this resolution, then upscaled to RGB resolution
static constexpr auto monoRes = dai::MonoCameraProperties::SensorResolution::THE_720_P;
static float rgbWeight = 0.4f;
static float depthWeight = 0.6f;
static void updateBlendWeights(int percentRgb, void* ctx) {
rgbWeight = float(percentRgb) / 100.f;
depthWeight = 1.f - rgbWeight;
}
int main() {
using namespace std;
// Create pipeline
dai::Pipeline pipeline;
dai::Device device;
std::vector<std::string> queueNames;
// Define sources and outputs
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto left = pipeline.create<dai::node::MonoCamera>();
auto right = pipeline.create<dai::node::MonoCamera>();
auto stereo = pipeline.create<dai::node::StereoDepth>();
auto rgbOut = pipeline.create<dai::node::XLinkOut>();
auto depthOut = pipeline.create<dai::node::XLinkOut>();
rgbOut->setStreamName("rgb");
queueNames.push_back("rgb");
depthOut->setStreamName("depth");
queueNames.push_back("depth");
// Properties
camRgb->setBoardSocket(dai::CameraBoardSocket::CAM_A);
camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
camRgb->setFps(fps);
if(downscaleColor) camRgb->setIspScale(2, 3);
// For now, RGB needs fixed focus to properly align with depth.
// This value was used during calibration
try {
auto calibData = device.readCalibration2();
auto lensPosition = calibData.getLensPosition(dai::CameraBoardSocket::CAM_A);
if(lensPosition) {
camRgb->initialControl.setManualFocus(lensPosition);
}
} catch(const std::exception& ex) {
std::cout << ex.what() << std::endl;
return 1;
}
left->setResolution(monoRes);
left->setCamera("left");
left->setFps(fps);
right->setResolution(monoRes);
right->setCamera("right");
right->setFps(fps);
stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::HIGH_DENSITY);
// LR-check is required for depth alignment
stereo->setLeftRightCheck(true);
stereo->setDepthAlign(dai::CameraBoardSocket::CAM_A);
// Linking
camRgb->isp.link(rgbOut->input);
left->out.link(stereo->left);
right->out.link(stereo->right);
stereo->disparity.link(depthOut->input);
// Connect to device and start pipeline
device.startPipeline(pipeline);
// Sets queues size and behavior
for(const auto& name : queueNames) {
device.getOutputQueue(name, 4, false);
}
std::unordered_map<std::string, cv::Mat> frame;
auto rgbWindowName = "rgb";
auto depthWindowName = "depth";
auto blendedWindowName = "rgb-depth";
cv::namedWindow(rgbWindowName);
cv::namedWindow(depthWindowName);
cv::namedWindow(blendedWindowName);
int defaultValue = (int)(rgbWeight * 100);
cv::createTrackbar("RGB Weight %", blendedWindowName, &defaultValue, 100, updateBlendWeights);
while(true) {
std::unordered_map<std::string, std::shared_ptr<dai::ImgFrame>> latestPacket;
auto queueEvents = device.getQueueEvents(queueNames);
for(const auto& name : queueEvents) {
auto packets = device.getOutputQueue(name)->tryGetAll<dai::ImgFrame>();
auto count = packets.size();
if(count > 0) {
latestPacket[name] = packets[count - 1];
}
}
for(const auto& name : queueNames) {
if(latestPacket.find(name) != latestPacket.end()) {
if(name == depthWindowName) {
frame[name] = latestPacket[name]->getFrame();
auto maxDisparity = stereo->initialConfig.getMaxDisparity();
// Optional, extend range 0..95 -> 0..255, for a better visualisation
if(1) frame[name].convertTo(frame[name], CV_8UC1, 255. / maxDisparity);
// Optional, apply false colorization
if(1) cv::applyColorMap(frame[name], frame[name], cv::COLORMAP_HOT);
} else {
frame[name] = latestPacket[name]->getCvFrame();
}
cv::imshow(name, frame[name]);
}
}
// Blend when both received
if(frame.find(rgbWindowName) != frame.end() && frame.find(depthWindowName) != frame.end()) {
// Need to have both frames in BGR format before blending
if(frame[depthWindowName].channels() < 3) {
cv::cvtColor(frame[depthWindowName], frame[depthWindowName], cv::COLOR_GRAY2BGR);
}
cv::Mat blended;
cv::addWeighted(frame[rgbWindowName], rgbWeight, frame[depthWindowName], depthWeight, 0, blended);
cv::imshow(blendedWindowName, blended);
frame.clear();
}
int key = cv::waitKey(1);
if(key == 'q' || key == 'Q') {
return 0;
}
}
return 0;
}