Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print("For help type --help")
exit()
if os.path.splitext(args.input)[1] != ".bag":
print("The given file is not of correct file format.")
print("Only .bag files are accepted")
exit()
align = rs.align(rs.stream.color)
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, args.input)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
profile = pipeline.start(config)
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
geometrie_added = False
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window("Pointcloud")
pointcloud = o3d.geometry.PointCloud()
print("For help type --help")
exit()
# Check if the given file have bag extension
if os.path.splitext(args.input)[1] != ".bag":
print("The given file is not of correct file format.")
print("Only .bag files are accepted")
exit()
align = rs.align(rs.stream.color)
pipeline = rs.pipeline()
config = rs.config()
# Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)
# Configure the pipeline to stream the depth stream
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
# Start streaming from file
profile = pipeline.start(config)
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
# print( 'camera_intrinsic', intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# Create opencv window to render image in
cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("Color Stream", cv2.WINDOW_AUTOSIZE)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
geometrie_added = False
vis = Visualizer()
vis.create_window("Pointcloud")
"""
_____ _ _
|_ _|___ ___ | |_ (_) _ __ __ _
| | / _ \/ __|| __|| || '_ \ / _` |
| || __/\__ \| |_ | || | | || (_| |
|_| \___||___/ \__||_||_| |_| \__, |
|___/
"""
if __name__ == "__main__":
try:
c = rs.config()
c.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 6)
c.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 6)
c.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8, 6)
c.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 6)
device_manager = DeviceManager(rs.context(), c)
device_manager.enable_all_devices()
for k in range(150):
frames = device_manager.poll_frames()
device_manager.enable_emitter(True)
device_extrinsics = device_manager.get_depth_to_color_extrinsics(frames)
finally:
device_manager.disable_streams()
import pyrealsense2 as rs
import numpy as np
import cv2
import time
from open3d import *
import os
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = Visualizer()
vis.create_window("Pointcloud",640,480)
pointcloud = PointCloud()
i = 0
try:
opt.background_color = np.asarray([1, 1, 1])
backgroundColorFlag = 1
# background_color ~=backgroundColorFlag
return False
key_to_callback={}
key_to_callback[ord(" ")] = saveCurrentRGBD
key_to_callback[ord("Q")] = breakLoop
key_to_callback[ord("K")] = change_background_color
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
# print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window("Pointcloud",640,480)
pointcloud = o3d.geometry.PointCloud()
import numpy as np
from scipy import optimize
import open3d as o3d
import copy
import sys
import os
import time
if __name__=="__main__":
align = rs.align(rs.stream.color)
#align = rs.align(rs.stream.depth)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 6)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 6)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
# print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
RealSense = rgbdTools.Camera(fx = intr.fx, fy = intr.fy, cx = intr.ppx, cy = intr.ppy)
TablePlane = keyPoints.Plane()
# print(type(pinhole_camera_intrinsic))
cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
# cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
time_start = time.time()
import pyrealsense2 as rs
import numpy as np
import cv2
import time
from open3d import *
import os
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
config.enable_record_to_file('object_detection2.bag')
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
geometrie_added = False
import pyrealsense2 as rs
import numpy as np
import cv2
from datetime import datetime
from open3d import *
import os
if __name__=="__main__":
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 15)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
geometrie_added = False
vis = Visualizer()
vis.create_window("Pointcloud",640,480)
pointcloud = PointCloud()
i = 0
while True:
opt.background_color = np.asarray([1, 1, 1])
backgroundColorFlag = 1
# background_color ~=backgroundColorFlag
return False
if __name__=="__main__":
bag_name = time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(time.time()))
output_path = bag_name + '.bag'
align = rs.align(rs.stream.color)
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
config.enable_record_to_file(output_path)
pipeline = rs.pipeline()
profile = pipeline.start(config)
# get camera intrinsics
intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
print(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)
# print(type(pinhole_camera_intrinsic))
cv2.namedWindow('Color Stream', cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('Depth Stream', cv2.WINDOW_AUTOSIZE)
geometrie_added = False
intrinsics.fy, intrinsics.ppx,
intrinsics.ppy)
return out
if __name__ == "__main__":
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Using preset HighAccuracy for recording
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# print(depth_scale)