1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108 | import pyrealsense2 as rs
import numpy as np
import cv2
W = 640
H = 480
pc = rs.pointcloud()
points = rs.points()
pipeline = rs.pipeline() # 创建一个管道
config = rs.config() # Create a config并配置要流式传输的管道。
config.enable_stream(rs.stream.depth, W, H, rs.format.z16, 15)
config.enable_stream(rs.stream.color, W, H, rs.format.bgr8, 15)
'''
self: pyrealsense2.pyrealsense2.config,
stream_type: pyrealsense2.pyrealsense2.stream,
stream_index: int, width: int, height: int,
format: pyrealsense2.pyrealsense2.format=format.any,
framerate: int=0
'''
# 使用选定的流参数显式启用设备流
# Start streaming 开启流
pipe_profile = pipeline.start(config)
''' 流水线循环从设备捕获样本,然后根据每个模块的要求和线程模型,将它们传递到连接的计算机视觉模块和处理模块。
在循环执行期间,应用程序可以通过调用wait_for_frames()或poll_for_frames()来访问摄像机流。
流循环一直运行到管道停止为止。
'''
# Create an align object 创建对其流对象
# "rs.align" allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
# (对其流)
align_to = rs.stream.color
align = rs.align(align_to) # 设置为其他类型的流,意思是我们允许深度流与其他流对齐
# print(type(align))
cap = cv2.VideoCapture(0)
"the parameter is the monitoring position"
def led_practice(x_axis,y_axis):
while True:
frames = pipeline.wait_for_frames() # 等待开启通道,等到新的一组帧集可用为止
aligned_frames = align.process(frames) # 将深度框和颜色框对齐
depth_frame = aligned_frames.get_depth_frame() # ?获得对齐后的帧数深度数据(图)
color_frame = aligned_frames.get_color_frame() # ?获得对齐后的帧数颜色数据(图)
img_color = np.asanyarray(color_frame.get_data()) # 把图像像素转化为数组
img_depth = np.asanyarray(depth_frame.get_data()) # 把图像像素转化为数组
# self: pyrealsense2.pyrealsense2.stream_profile -> rs2::video_stream_profile
# intrinsics 获取流配置文件的内在属性。
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
#get_extrinsics_to 获取两个配置文件之间的外部转换(代表物理传感器)
depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(color_frame.profile)
# 获取深度传感器的深度标尺
depth_sensor = pipe_profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale() # 深度比例系数为: 0.0010000000474974513
print("scale:", depth_scale)
# 由深度到颜色
depth_pixel = [x_axis,y_axis] # specified pixel
# rs2_deproject_pixel_to_point获取实际空间坐标 specified point
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, depth_scale)
print(depth_point)
# perspective conversion
color_point = rs.rs2_transform_point_to_point(depth_to_color_extrin, depth_point)
# 3D space to XY pixels
color_pixel = rs.rs2_project_point_to_pixel(color_intrin, color_point)
pc.map_to(color_frame) # 将点云映射到给定的颜色帧
points = pc.calculate(depth_frame) # 生成深度图的点云和纹理映射
"points.get_vertices() 检索点云的顶点, asanyarray is similar with array"
vtx = np.asanyarray(points.get_vertices()) # transfor into XYZ
# tex = np.asanyarray(points.get_texture_coordinates()) # texture map
# ??????? coordinate transform
# line by line
i = W * y_axis + x_axis
# column by column
# i = H * x_axis + y_axis
cv2.circle(img_color, (x_axis, y_axis), 8, [255, 0, 255], thickness=-1)
cv2.putText(img_color, "Distance/cm:"+str(img_depth[x_axis, y_axis]), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, [255, 0, 255])
cv2.putText(img_color, "X:"+str(np.float64(vtx[i][0])), (80, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, [255, 0, 255])
cv2.putText(img_color, "Y:"+str(np.float64(vtx[i][1])), (80, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, [255, 0, 255])
cv2.putText(img_color, "Z:"+str(np.float64(vtx[i][2])), (80, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, [255, 0, 255])
print('Distance: ',img_depth[x_axis, y_axis]/10)
cv2.imshow('depth_frame', img_color)
cv2.imshow("dasdsadsa", img_depth)
key = cv2.waitKey(1)
if key == 27:
cv2.destroyAllWindows()
break
led_practice(int(W/2), int(H/2))
cv2.waitKey(0)
cv2.destroyAllWindows()
pipeline.stop()
|