1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2024 www.open3d.org
# SPDX-License-Identifier: MIT
# ----------------------------------------------------------------------------
# examples/python/reconstruction_system/sensors/realsense_pcd_visualizer.py
# pyrealsense2 is required.
# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python
import pyrealsense2 as rs
import numpy as np
from enum import IntEnum
from datetime import datetime
import open3d as o3d
from os.path import abspath
import sys
sys.path.append(abspath(__file__))
from realsense_helper import get_profiles
class Preset(IntEnum):
Custom = 0
Default = 1
Hand = 2
HighAccuracy = 3
HighDensity = 4
MediumDensity = 5
def get_intrinsic_matrix(frame):
intrinsics = frame.profile.as_video_stream_profile().intrinsics
out = o3d.camera.PinholeCameraIntrinsic(640, 480, intrinsics.fx,
intrinsics.fy, intrinsics.ppx,
intrinsics.ppy)
return out
if __name__ == "__main__":
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
color_profiles, depth_profiles = get_profiles()
print('Using the default profiles: \n color:{}, depth:{}'.format(
color_profiles[0], depth_profiles[0]))
w, h, fps, fmt = depth_profiles[0]
config.enable_stream(rs.stream.depth, w, h, fmt, fps)
w, h, fps, fmt = color_profiles[0]
config.enable_stream(rs.stream.color, w, h, fmt, fps)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Using preset HighAccuracy for recording
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# print(depth_scale)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
vis = o3d.visualization.Visualizer()
vis.create_window()
pcd = o3d.geometry.PointCloud()
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
# Streaming loop
frame_count = 0
try:
while True:
dt0 = datetime.now()
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
intrinsic = o3d.camera.PinholeCameraIntrinsic(
get_intrinsic_matrix(color_frame))
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = o3d.geometry.Image(
np.array(aligned_depth_frame.get_data()))
color_temp = np.asarray(color_frame.get_data())
color_image = o3d.geometry.Image(color_temp)
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color_image,
depth_image,
depth_scale=1.0 / depth_scale,
depth_trunc=clipping_distance_in_meters,
convert_rgb_to_intensity=False)
temp = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image, intrinsic)
temp.transform(flip_transform)
pcd.points = temp.points
pcd.colors = temp.colors
if frame_count == 0:
vis.add_geometry(pcd)
vis.update_geometry(pcd)
vis.poll_events()
vis.update_renderer()
process_time = datetime.now() - dt0
print("\rFPS: " + str(1 / process_time.total_seconds()), end='')
frame_count += 1
finally:
pipeline.stop()
vis.destroy_window()
|