Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# configure the color stream
self._cfg.enable_stream(
rs.stream.color,
RealSenseSensor.COLOR_IM_WIDTH,
RealSenseSensor.COLOR_IM_HEIGHT,
rs.format.bgr8,
RealSenseSensor.FPS
)
# configure the depth stream
self._cfg.enable_stream(
rs.stream.depth,
RealSenseSensor.DEPTH_IM_WIDTH,
360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT,
rs.format.z16,
RealSenseSensor.FPS
)
def __init__(self, configuration_path):
super(RealsenseController, self).__init__()
self._cfg = readConfiguration(configuration_path)
self.width = self._cfg["FRAME_ARGS"]["width"]
self.height = self._cfg["FRAME_ARGS"]["height"]
self.fps = self._cfg["FRAME_ARGS"]["fps"]
self.serial_id = self._cfg["DEVICE_CONFIGURATION"]["serial_id"]
self.points = rs.points()
self.pipeline = rs.pipeline()
config = rs.config()
if self.serial_id != '':
config.enable_device(serial=self.serial_id)
config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, self.fps)
config.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8, self.fps)
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, self.fps)
config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.fps)
self.profile = self.pipeline.start(config)
align_to = rs.stream.color
self.align = rs.align(align_to)
#####################################################
# First import the library
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
# Import OpenCV for easy image rendering
import cv2
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
def _config_pipe(self):
"""Configures pipeline to stream color and depth.
"""
self._cfg.enable_device(self._serial)
# configure color stream
self._cfg.enable_stream(
rs.stream.color, self._width, self._height, rs.format.rgb8, self._fps,
)
# configure depth stream
self._cfg.enable_stream(
rs.stream.depth, self._width, self._height, rs.format.z16, self._fps,
)