Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
while True:
# Pt2 = []
Point2 = o3d.geometry.PointCloud()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
xl,yl,rl = keyPoints.getCircles(color_image1)
old_xyr = []
currentImage = color_image1.copy()
if i == 0:
for ind,x in enumerate(xl):
cv2.circle(currentImage, (xl[ind],yl[ind]), rl[ind], (0, 255, 0), -1)
elif i == 1:
a,b,c,d = keyPoints.calculatePlane(RealSense,depth_image,xl,yl,rl)
TablePlane.getParam(a,b,c,d)
print('calculating plane done!')
i = 0
try:
while True:
time_start = time.time()
pointcloud.clear()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
cv2.namedWindow('color image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('color image', cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))
cv2.namedWindow('depth image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('depth image', depth_image )
depth = Image(depth_image)
color = Image(color_image)
vis.register_key_callback(ord("Q"), breakLoop)
vis.register_key_callback(ord("K"), change_background_color)
try:
while True:
dt0 = time.time()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
depth_color_frame = rs.colorizer().colorize(depth_frame)
depth_color_image = np.asanyarray(depth_color_frame.get_data())
cv2.imshow('Color Stream', color_image1)
cv2.imshow('Depth Stream', depth_color_image )
depth = o3d.geometry.Image(depth_image)
color = o3d.geometry.Image(color_image)
rgbd = o3d.geometry.RGBDImage.create_rgbd_image_from_color_and_depth(color, depth, convert_rgb_to_intensity = False)
pcd = o3d.geometry.PointCloud.create_point_cloud_from_rgbd_image(rgbd, pinhole_camera_intrinsic)
PRESET_FILE = "../cfg/d4xx-default.json"
# List of filters to be applied, in this order.
# Depth Frame (input)
# >> Decimation Filter (reduces depth frame density)
# >> Threshold Filter (removes values outside recommended range)
# >> Depth2Disparity Transform** (transform the scene into disparity domain)
# >> Spatial Filter (edge-preserving spatial smoothing)
# >> Temporal Filter (reduces temporal noise)
# >> Hole Filling Filter (rectify missing data in the resulting image)
# >> Disparity2Depth Transform** (revert the results back to depth)
# >> Filtered Depth (output)
filters = [
[True, "Decimation Filter", rs.decimation_filter()],
[True, "Threshold Filter", rs.threshold_filter()],
[True, "Depth to Disparity", rs.disparity_transform(True)],
[True, "Spatial Filter", rs.spatial_filter()],
[True, "Temporal Filter", rs.temporal_filter()],
[True, "Hole Filling Filter", rs.hole_filling_filter()],
[True, "Disparity to Depth", rs.disparity_transform(False)]
]
######################################################
## Functions to change filtering options online ##
## Description for each option can be found at: ##
## https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md
######################################################
decimation_magnitude_min = 2
decimation_magnitude_max = 8
def on_trackbar_decimation(val):
# Sanity check
if val < decimation_magnitude_min:
FPS = 30 # Defines the rate of frames per second
DEPTH_RANGE = [0.1, 8.0] # Replace with your sensor's specifics, in meter
USE_PRESET_FILE = True
PRESET_FILE = "../cfg/d4xx-default.json"
# List of filters to be applied, in this order.
# https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md
filters = [
[True, "Decimation Filter", rs.decimation_filter()],
[True, "Threshold Filter", rs.threshold_filter()],
[True, "Depth to Disparity", rs.disparity_transform(True)],
[True, "Spatial Filter", rs.spatial_filter()],
[True, "Temporal Filter", rs.temporal_filter()],
[False, "Hole Filling Filter", rs.hole_filling_filter()],
[True, "Disparity to Depth", rs.disparity_transform(False)]
]
######################################################
## ArduPilot-related parameters - reconfigurable ##
######################################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/ttyUSB0'
connection_baudrate_default = 921600
connection_timeout_sec_default = 5
# Use this to rotate all processed data
camera_facing_angle_degree = 0
# Enable/disable each message/function individually
enable_msg_obstacle_distance = True
while True:
time_start = time.time()
pointcloud.clear()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
cv2.namedWindow('color image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('color image', cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))
cv2.namedWindow('depth image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('depth image', depth_image )
depth = Image(depth_image)
color = Image(color_image)
rgbd = create_rgbd_image_from_color_and_depth(color, depth, convert_rgb_to_intensity = False)
pcd = create_point_cloud_from_rgbd_image(rgbd, pinhole_camera_intrinsic)
pointcloud = PointCloud()
i = 0
try:
while True:
dt0 = datetime.now()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
depth_color_frame = rs.colorizer().colorize(depth_frame)
depth_color_image = np.asanyarray(depth_color_frame.get_data())
cv2.imshow('Color Stream', color_image1)
cv2.imshow('Depth Stream', depth_color_image )
depth = Image(depth_image)
color = Image(color_image)
rgbd = create_rgbd_image_from_color_and_depth(color, depth, convert_rgb_to_intensity = False)
pcd = create_point_cloud_from_rgbd_image(rgbd, pinhole_camera_intrinsic)
pointcloud = PointCloud()
i = 0
while True:
dt0 = datetime.now()
pointcloud.clear()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
cv2.namedWindow('color image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('color image', cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))
cv2.namedWindow('depth image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('depth image', depth_image )
depth = Image(depth_image)
color = Image(color_image)
z_min = None
z_max = None
x_min = None
x_max = None
while True:
dt0 = datetime.now()
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_frame = rs.decimation_filter(1).process(depth_frame)
depth_frame = rs.disparity_transform(True).process(depth_frame)
depth_frame = rs.spatial_filter().process(depth_frame)
depth_frame = rs.temporal_filter().process(depth_frame)
depth_frame = rs.disparity_transform(False).process(depth_frame)
# depth_frame = rs.hole_filling_filter().process(depth_frame)
depth_image = np.asanyarray(depth_frame.get_data())
color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if plane_flag == 0:
chessboard_found1, corners1 = cv2.findChessboardCorners(color_image1, (9, 6))
corners = np.asanyarray(corners1).squeeze()
if chessboard_found1:
# cv2.drawChessboardCorners(color_image1,(9,6),corners1,chessboard_found1)
Points = []