Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
R_table_world = np.eye(3)
T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
cb_points_camera = np.load('data/calibration/corners_cb.npy')
T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
T_camera_world = T_camera_table.dot(T_table_world)
T_world_camera = T_camera_world.inverse()
R_stp_obj = stable_pose.r
T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(R_stp_obj.T, np.zeros(3)), from_frame='stp', to_frame='obj')
t_stp_table = np.array([0, 0, z])
T_stp_table = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(3), t_stp_table), from_frame='table', to_frame='stp')
T_obj_world = T_obj_camera.dot(T_camera_world)
# visualize the robot's understanding of the world
logging.info('Displaying robot world state')
mv.clf()
mvis.MayaviVisualizer.plot_table(T_table_world, d=table_extent)
mvis.MayaviVisualizer.plot_pose(T_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
mvis.MayaviVisualizer.plot_pose(T_obj_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
mvis.MayaviVisualizer.plot_pose(T_camera_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
mvis.MayaviVisualizer.plot_mesh(object_mesh, T_obj_world)
mvis.MayaviVisualizer.plot_point_cloud(cb_points_camera, T_world_camera, color=(1,1,0))
mvis.MayaviVisualizer.plot_point_cloud(points_3d, T_world_camera, color=(0,1,0), scale=0.0025)
mv.view(focalpoint=(0,0,0))
mv.show()
T_camera_obj.to_frame = 'camera'
T_obj_camera = T_camera_obj.inverse()
# save depth and color images
min_d = np.min(depth_im)
max_d = np.max(depth_im)
depth_im2 = 255.0 * (depth_im - min_d) / (max_d - min_d)
depth_im2 = Image.fromarray(depth_im2.astype(np.uint8))
filename = 'depth.png'
depth_im2.save(os.path.join(logging_dir, filename))
color_im2 = Image.fromarray(color_im)
filename = 'color.png'
color_im2.save(os.path.join(logging_dir, filename))
# transform the mesh to the stable pose to get a z offset from the table
T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r))
object_mesh = graspable.mesh
object_mesh_tf = object_mesh.transform(T_obj_stp)
mn, mx = object_mesh_tf.bounding_box()
z = mn[2]
# define poses of camera, table, object, tec
T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
R_table_world = np.eye(3)
T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
cb_points_camera = np.load('data/calibration/corners_cb.npy')
T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
T_camera_world = T_camera_table.dot(T_table_world)
T_world_camera = T_camera_world.inverse()
# transform the mesh to the stable pose to get a z offset from the table
T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r))
object_mesh = graspable.mesh
object_mesh_tf = object_mesh.transform(T_obj_stp)
mn, mx = object_mesh_tf.bounding_box()
z = mn[2]
# define poses of camera, table, object, tec
T_world = stf.SimilarityTransform3D(from_frame='world', to_frame='world')
R_table_world = np.eye(3)
T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(R_table_world, np.zeros(3)), from_frame='world', to_frame='table')
R_camera_table = np.load('data/calibration/rotation_camera_cb.npy')
t_camera_table = np.load('data/calibration/translation_camera_cb.npy')
cb_points_camera = np.load('data/calibration/corners_cb.npy')
T_camera_table = stf.SimilarityTransform3D(tfx.pose(R_camera_table, t_camera_table), from_frame='table', to_frame='camera')
T_camera_world = T_camera_table.dot(T_table_world)
T_world_camera = T_camera_world.inverse()
R_stp_obj = stable_pose.r
T_obj_stp = stf.SimilarityTransform3D(pose=tfx.pose(R_stp_obj.T, np.zeros(3)), from_frame='stp', to_frame='obj')
t_stp_table = np.array([0, 0, z])
T_stp_table = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(3), t_stp_table), from_frame='table', to_frame='stp')
T_obj_world = T_obj_camera.dot(T_camera_world)
# visualize the robot's understanding of the world
logging.info('Displaying robot world state')
mv.clf()
mvis.MayaviVisualizer.plot_table(T_table_world, d=table_extent)
mvis.MayaviVisualizer.plot_pose(T_world, alpha=alpha, tube_radius=tube_radius, center_scale=center_scale)
def test(s, n, phi = 45):
target = pose((0.05, 0.05, 0.05), rotation_tb(phi, 90, 0), frame = DexConstants.WORLD_FRAME)
t = DexGripTester(s, s)
t.testGrip(target, n)
t._ctrl.plot_approach_angle()
t._ctrl._zeke.plot()
t._ctrl.stop()
def image_shift_to_transform(source_depth_im, target_depth_im, camera_params, diff_px):
""" Converts 2D pixel shift transformation between two depth images into a 3D transformation """
nonzero_source_depth_px = np.where(source_depth_im > 0)
source_px = np.array([nonzero_source_depth_px[0][0], nonzero_source_depth_px[1][0]])
source_depth = source_depth_im[source_px[0], source_px[1]]
target_px = source_px + diff_px
source_point = source_depth * np.linalg.inv(camera_params.K_).dot(np.array([source_px[1], source_px[0], 1]))
target_point = source_depth * np.linalg.inv(camera_params.K_).dot(np.array([target_px[1], target_px[0], 1]))
translation_source_target = target_point - source_point
translation_source_target[2] = 0
tf_source_target = tfx.pose(np.eye(3), translation_source_target)
return stf.SimilarityTransform3D(pose=tfx.pose(tf_source_target), from_frame='camera', to_frame='camera')
gripper_mesh.center_vertices_bb()
#gripper_mesh.rescale(0.9) # to make fingertips at the wide 0.67 distance
oof = objf.ObjFile(mesh_filename)
oof.write(gripper_mesh)
T_mesh_world = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(4)), from_frame='world', to_frame='mesh')
R_grasp_gripper = np.array([[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
R_mesh_gripper = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
t_mesh_gripper = np.array([0.005, 0.0, 0.055])
T_mesh_gripper = stf.SimilarityTransform3D(pose=tfx.pose(R_mesh_gripper, t_mesh_gripper),
from_frame='gripper', to_frame='mesh')
T_gripper_world = T_mesh_gripper.inverse().dot(T_mesh_world)
T_grasp_gripper = stf.SimilarityTransform3D(pose=tfx.pose(R_grasp_gripper), from_frame='gripper', to_frame='grasp')
T_mesh_gripper.save('/home/jmahler/jeff_working/GPIS/data/grippers/baxter/T_mesh_gripper.stf')
T_grasp_gripper.save('/home/jmahler/jeff_working/GPIS/data/grippers/baxter/T_grasp_gripper.stf')
gripper_params = {}
gripper_params['min_width'] = 0.026
gripper_params['max_width'] = 0.060
f = open('/home/jmahler/jeff_working/GPIS/data/grippers/baxter/params.json', 'w')
json.dump(gripper_params, f)
MayaviVisualizer.plot_pose(T_mesh_world, alpha=0.05, tube_radius=0.0025, center_scale=0.005)
MayaviVisualizer.plot_pose(T_gripper_world, alpha=0.05, tube_radius=0.0025, center_scale=0.005)
MayaviVisualizer.plot_mesh(gripper_mesh, T_mesh_world, style='surface', color=(1,1,1))
mv.axes()
grasp_set = generate_candidate_grasps_random(obj, stable_pose, dataset, num_random_grasps, config, grasp_set=grasp_set)
# plot a histogram for each metric
grasp_metrics = dataset.grasp_metrics(obj.key, grasp_set, gripper=config['gripper'])
for metric, min_q, max_q in zip(metrics, min_q_vals, max_q_vals):
metric_vals = [grasp_metrics[g.grasp_id][metric] for g in grasp_set]
plotting.plot_grasp_histogram(metric_vals, num_bins=config['num_bins'], min_q=min_q, max_q=max_q)
figname = 'obj_%s_metric_%s_histogram.pdf' %(obj.key, metric)
plt.savefig(os.path.join(config['output_dir'], figname), dpi=config['dpi'])
grasp_ids = np.array([g.grasp_id for g in grasp_set])
id_filename = 'obj_%s_%s_grasp_ids.npy' %(obj.key, stable_pose.id)
np.save(os.path.join(config['output_dir'], id_filename), grasp_ids)
# display grasps
T_table_world = stf.SimilarityTransform3D(tfx.pose(np.eye(4)), from_frame='world', to_frame='table')
gripper = gr.RobotGripper.load(config['gripper'])
mlab.figure(bgcolor=(0.5,0.5,0.5), size=(1000,1000))
delta_view = 360.0 / config['num_views']
for i, grasp in enumerate(grasp_set):
logging.info('Displaying grasp %d (%d of %d)' %(grasp.grasp_id, i, len(grasp_set)))
for metric in metrics:
logging.info('Metric %s = %.5f' %(metric.rjust(75), grasp_metrics[grasp.grasp_id][metric]))
logging.info('')
mlab.clf()
T_obj_world = mv.MayaviVisualizer.plot_stable_pose(obj.mesh, stable_pose, T_table_world, d=0.1, style='surface')
mv.MayaviVisualizer.plot_gripper(grasp, T_obj_world, gripper=gripper, color=(1,1,1))
for j in range(config['num_views']):
az = j * delta_view
table_x0 = T_stp_camera.apply(T_camera_p_camera_c.inverse().apply(t_camera_table))
table_x0[2] = table_x0[2] - config['chessboard_thickness']
camera_optical_axis = -T_stp_camera.rotation[:,2]
source_ip = source_object_points.dot(camera_optical_axis)
closest_ind = np.where(source_ip == np.max(source_ip))[0]
source_x0_closest = source_object_points[closest_ind[0],:]
target_ip = target_object_points.dot(camera_optical_axis)
closest_ind = np.where(target_ip == np.max(target_ip))[0]
target_x0_closest = target_object_points[closest_ind[0],:]
t_stp_stp_p = source_x0_closest - target_x0_closest
t_stp_stp_p[2] = source_x0_highest[2] - min(target_x0_highest[2] - config['table_surface_tol'], table_x0[2])
T_align_closest = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(3), t_stp_stp_p), from_frame='stp', to_frame='stp')
target_object_points = T_align_closest.apply(target_object_points.T).T
target_object_normals = T_align_closest.apply(target_object_normals.T, direction=True).T
T_stp_stp_p = T_align_closest.dot(T_stp_stp_p)
# display the points relative to one another
if debug:
subsample_inds3 = np.arange(orig_source_object_points.shape[0])[::10]
subsample_inds2 = np.arange(source_object_points.shape[0])[::10]
subsample_inds = np.arange(target_object_points.shape[0])[::10]
T_table_world = stf.SimilarityTransform3D(pose=tfx.pose(np.eye(4)), from_frame='world', to_frame='table')
mlab.figure()
#mlab.points3d(orig_source_object_points[subsample_inds3,0], orig_source_object_points[subsample_inds3,1], orig_source_object_points[subsample_inds3,2], color=(1,0,1), scale_factor = 0.005)
mlab.points3d(source_object_points[subsample_inds2,0], source_object_points[subsample_inds2,1], source_object_points[subsample_inds2,2], color=(1,0,0), scale_factor = 0.005)
mlab.points3d(target_object_points[subsample_inds,0], target_object_points[subsample_inds,1], target_object_points[subsample_inds,2], color=(0, 1,0), scale_factor = 0.005)
#mlab.points3d(x0_table[0], x0_table[1], x0_table[2], color=(1,1,0), scale_factor = 0.015)
#mlab.points3d(source_x0_closest[0], source_x0_closest[1], source_x0_closest[2], color=(1,0,1), scale_factor = 0.025)
time.sleep(2)
frame = get_frame(np.ravel(self.get_current_cartesian_position().position) + np.array([0,0.018,0.01]), 0)
# self.move_cartesian_frame_linear_interpolation(frame, 0.1)
time.sleep(2)
self.home()
time.sleep(1)
self.gripper.reset()
pt = np.array(pt)
pt[0] -= 0.00
pt[2] += 0.0005
print pt
notch.cut_notch_angle(pt, self, angle)
print "WAT"
time.sleep(2)
# self.gripper.execute_action((0, 0, 2))
frame = tfx.pose(np.ravel(self.get_current_cartesian_position().position) + np.array([0,0,0.005]), np.array(self.get_current_cartesian_position().orientation))
# self.move_cartesian_frame_linear_interpolation(frame, 0.1)
# time.sleep(2)
# frame = get_frame(np.ravel(self.get_current_cartesian_position().position), -50)
# self.move_cartesian_frame_linear_interpolation(frame, 0.04)
time.sleep(2)
self.open_gripper(1)
time.sleep(2)
self.open_gripper(75)
time.sleep(2)
return