How to use the pyrealsense2.stream function in pyrealsense2

To help you get started, weā€™ve selected a few pyrealsense2 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel-isl / Open3D / examples / Python / ReconstructionSystem / sensors / realsense_recorder.py View on Github external
if exists(path_bag):
            user_input = input("%s exists. Overwrite? (y/n) : " % path_bag)
            if user_input.lower() == 'n':
                exit()

    # Create a pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()

    if args.record_imgs or args.record_rosbag:
        # note: using 640 x 480 depth resolution produces smooth depth boundaries
        #       using rs.format.bgr8 for color image format for OpenCV based image visualization
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        if args.record_rosbag:
            config.enable_record_to_file(path_bag)
    if args.playback_rosbag:
        config.enable_device_from_file(path_bag, repeat_playback=True)

    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()

    # Using preset HighAccuracy for recording
    if args.record_rosbag or args.record_imgs:
        depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()
github IntelRealSense / librealsense / wrappers / python / examples / box_dimensioner_multicam / realsense_device_manager.py View on Github external
-----------
        frames : rs::frame
                  The frame grabbed from the imager inside the Intel RealSense for which the intrinsic is needed

        Return:
        -----------
        device_intrinsics : dict
        keys  : serial
                Serial number of the device
        values: [key]
                Extrinsics of the corresponding device
        """
        device_extrinsics = {}
        for (serial, frameset) in frames.items():
            device_extrinsics[serial] = frameset[
                rs.stream.depth].get_profile().as_video_stream_profile().get_extrinsics_to(
                frameset[rs.stream.color].get_profile())
        return device_extrinsics
github IntelRealSense / librealsense / wrappers / python / examples / export_ply_example.py View on Github external
#####################################################

# First import the library
import pyrealsense2 as rs


# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()

# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
config = rs.config()
# Enable depth stream
config.enable_stream(rs.stream.depth)

# Start streaming with chosen configuration
pipe.start(config)

# We'll use the colorizer to generate texture for our PLY
# (alternatively, texture can be obtained from color or infrared stream)
colorizer = rs.colorizer()

try:
    # Wait for the next set of frames from the camera
    frames = pipe.wait_for_frames()
    colorized = colorizer.process(frames)

    # Create save_to_ply object
    ply = rs.save_to_ply("1.ply")
github pupil-labs / pupil / pupil_src / shared_modules / video_capture / realsense2_backend.py View on Github external
"_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError("Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".format(
                    stream_type, frame_size
                )
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps) for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps".format(
                    old_fps, frame_size, fps
                )
            )
github IntelRealSense / librealsense / wrappers / python / examples / t265_stereo.py View on Github external
# must be divisible by 16
    num_disp = 112 - min_disp
    max_disp = min_disp + num_disp
    stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
                                   numDisparities = num_disp,
                                   blockSize = 16,
                                   P1 = 8*3*window_size**2,
                                   P2 = 32*3*window_size**2,
                                   disp12MaxDiff = 1,
                                   uniquenessRatio = 10,
                                   speckleWindowSize = 100,
                                   speckleRange = 32)

    # Retreive the stream and intrinsic properties for both cameras
    profiles = pipe.get_active_profile()
    streams = {"left"  : profiles.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),
               "right" : profiles.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()}
    intrinsics = {"left"  : streams["left"].get_intrinsics(),
                  "right" : streams["right"].get_intrinsics()}

    # Print information about both cameras
    print("Left camera:",  intrinsics["left"])
    print("Right camera:", intrinsics["right"])

    # Translate the intrinsics from librealsense into OpenCV
    K_left  = camera_matrix(intrinsics["left"])
    D_left  = fisheye_distortion(intrinsics["left"])
    K_right = camera_matrix(intrinsics["right"])
    D_right = fisheye_distortion(intrinsics["right"])
    (width, height) = (intrinsics["left"].width, intrinsics["left"].height)

    # Get the relative extrinsics between the left and right camera
github IntelRealSense / librealsense / tools / rs-imu-calibration / rs-imu-calibration.py View on Github external
if self.status == self.Status.idle:
                    return
                pr = frame.get_profile()
                data = frame.as_motion_frame().get_motion_data()
                data_np = np.array([data.x, data.y, data.z])
                elapsed_time = time.time() - self.step_start_time

                ## Status.collect_data
                if self.status == self.Status.collect_data:
                    sys.stdout.write('\r %15s' % self.status)
                    part_done = len(self.collected_data_accel) / float(self.samples_to_collect)
                    # sys.stdout.write(': %-3.1f (secs)' % (self.time_to_collect - elapsed_time))

                    color = COLOR_GREEN
                    if pr.stream_type() == rs.stream.gyro:
                        self.collected_data_gyro.append(np.append(frame.get_timestamp(), data_np))
                        is_moving = any(abs(data_np) > self.rotating_threshold)
                    else:
                        is_in_norm = np.linalg.norm(data_np - self.crnt_bucket) < self.max_norm
                        if is_in_norm:
                            self.collected_data_accel.append(np.append(frame.get_timestamp(), data_np))
                        else:
                            color = COLOR_RED
                        is_moving = abs(np.linalg.norm(data_np) - g) / g > self.moving_threshold_factor

                        sys.stdout.write(color)
                        sys.stdout.write('['+'.'*int(part_done*self.line_length)+' '*int((1-part_done)*self.line_length) + ']')
                        sys.stdout.write(COLOR_RESET)

                    if is_moving:
                        print('WARNING: MOVING')
github nateroblin33 / 3d-bone-tracking-tool / files / speedcam.py View on Github external
vsbool = 1
    
    # OPTIONAL code for using Default Webcam/Facetime Camera
    # NOTE: If used, comment out the later "vs = color_image" in line ################################################134 (this line number has changed and is no longer correct)
    #vs = cv2.VideoCapture(args["video"])
    #vs = cv2.VideoCapture(-1)
 
    # Allow the camera or video file to warm up
time.sleep(2.0)

# Print start time for log
dt = datetime.now()
print("Time Started: " + str(dt))

# Aligns streams
align_to = rs.stream.color
align = rs.align(align_to)

# Function to normalize vector
def normalize(v):
    norm = np.linalg.norm(v)
    if norm == 0:
        return v
    return v / norm

# Function to calculate Roll
def roll(p1, p2, p3, baseline):
    vecx = [(p1[0]+p2[0])/2 - p3[0], (p1[1]+p2[1])/2 - p3[1], (p1[2]+p2[2])/2 - p3[2]]
    v1 = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
    v2 = [p3[0]-p1[0], p3[1]-p1[1], p3[2]-p1[2]]
    X = normalize(vecx)
    vecz = [(v1[1]*v2[2])-(v1[2]*v2[1]), (v1[2]*v2[0])-(v1[0]*v2[2]), (v1[0]*v2[1])-(v1[1]*v2[0])]
github BerkeleyAutomation / perception / perception / realsense_sensor.py View on Github external
def _config_pipe(self):
        """Configures the pipeline to stream color and depth.
        """
        self._cfg.enable_device(self.id)

        # configure the color stream
        self._cfg.enable_stream(
            rs.stream.color,
            RealSenseSensor.COLOR_IM_WIDTH,
            RealSenseSensor.COLOR_IM_HEIGHT,
            rs.format.bgr8,
            RealSenseSensor.FPS
        )

        # configure the depth stream
        self._cfg.enable_stream(
            rs.stream.depth,
            RealSenseSensor.DEPTH_IM_WIDTH,
            360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT,
            rs.format.z16,
            RealSenseSensor.FPS
        )