Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def global_registration(v1, v2, global_voxel_size):
# See http://www.open3d.org/docs/release/tutorial/Advanced/global_registration.html#global-registration
source = to_point_cloud(v1)
target = to_point_cloud(v2)
source_down = downscale_point_cloud(source, global_voxel_size)
target_down = downscale_point_cloud(target, global_voxel_size)
source_fpfh = estimate_points_features(source_down, global_voxel_size)
target_fpfh = estimate_points_features(target_down, global_voxel_size)
distance_threshold = global_voxel_size * 2.0
max_validation = np.min([len(source_down.points), len(target_down.points)]) // 2
global_registration_result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down,
source_fpfh, target_fpfh,
distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, max_validation))
return source_down, target_down, global_registration_result
def run_ransac(xyz0, xyz1, feat0, feat1, voxel_size):
distance_threshold = voxel_size * 1.5
result_ransac = o3d.registration.registration_ransac_based_on_feature_matching(
xyz0, xyz1, feat0, feat1, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result_ransac.transformation
nr_of_cam_pos = len(traj_to_register_pcd.points)
rand_number_added = np.asanyarray(traj_to_register_pcd.points) * (
np.random.rand(nr_of_cam_pos, 3) * randomvar - randomvar / 2.0 + 1
)
list_rand = list(rand_number_added)
traj_to_register_pcd_rand = o3d.geometry.PointCloud()
for elem in list_rand:
traj_to_register_pcd_rand.points.append(elem)
# Rough registration based on aligned colmap SfM data
reg = o3d.registration.registration_ransac_based_on_correspondence(
traj_to_register_pcd_rand,
traj_pcd_col,
corres,
0.2,
o3d.registration.TransformationEstimationPointToPoint(True),
6,
rr,
)
return reg.transformation
voxel_size,
max_iter,
config,
init_transformation=np.identity(4)):
current_transformation = init_transformation
for i, scale in enumerate(range(len(max_iter))): # multi-scale approach
iter = max_iter[scale]
distance_threshold = config["voxel_size"] * 1.4
print("voxel_size %f" % voxel_size[scale])
source_down = source.voxel_down_sample(voxel_size[scale])
target_down = target.voxel_down_sample(voxel_size[scale])
if config["icp_method"] == "point_to_point":
result_icp = o3d.registration.registration_icp(
source_down, target_down, distance_threshold,
current_transformation,
o3d.registration.TransformationEstimationPointToPoint(),
o3d.registration.ICPConvergenceCriteria(max_iteration=iter))
else:
source_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
2.0,
max_nn=30))
target_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] *
2.0,
max_nn=30))
if config["icp_method"] == "point_to_plane":
result_icp = o3d.registration.registration_icp(
source_down, target_down, distance_threshold,
current_transformation,
o3d.registration.TransformationEstimationPointToPlane(),
o3d.registration.ICPConvergenceCriteria(max_iteration=iter))
):
if verbose:
print("[Registration] threshold: %f" % threshold)
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
s = crop_and_downsample(
source, crop_volume, down_sample_method="uniform", trans=init_trans
)
t = crop_and_downsample(
gt_target, crop_volume, down_sample_method="uniform"
)
reg = o3d.registration.registration_icp(
s,
t,
threshold,
np.identity(4),
o3d.registration.TransformationEstimationPointToPoint(True),
o3d.registration.ICPConvergenceCriteria(1e-6, max_itr),
)
reg.transformation = np.matmul(reg.transformation, init_trans)
return reg
target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd")
print("Visualization of two point clouds before manual alignment")
draw_registration_result(source, target, np.identity(4))
# pick points from two point clouds and builds correspondences
picked_id_source = pick_points(source)
picked_id_target = pick_points(target)
assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3)
assert (len(picked_id_source) == len(picked_id_target))
corr = np.zeros((len(picked_id_source), 2))
corr[:, 0] = picked_id_source
corr[:, 1] = picked_id_target
# estimate rough transformation using correspondences
print("Compute a rough transform using the correspondences given by user")
p2p = o3d.registration.TransformationEstimationPointToPoint()
trans_init = p2p.compute_transformation(source, target,
o3d.utility.Vector2iVector(corr))
# point-to-point ICP for refinement
print("Perform point-to-point ICP refinement")
threshold = 0.03 # 3cm distance threshold
reg_p2p = o3d.registration.registration_icp(
source, target, threshold, trans_init,
o3d.registration.TransformationEstimationPointToPoint())
draw_registration_result(source, target, reg_p2p.transformation)
print("")
if visualize:
draw_geometries([pcloud_transformed, down_model, down_scene])
matrix = np.loadtxt(os.path.join(TMP_PATH, 'final_rigid_matrix.txt'))
transformed = np.dot(model, matrix[:3,:3].T) + matrix[:3, 3].T
pcloud_transformed.points = Vector3dVector(transformed)
pcloud_transformed.paint_uniform_color([0, 0, 1]) # blue
draw_geometries([pcloud_transformed, pcloud_scene])
if icp_refine:
print("Apply point-to-point ICP")
threshold = 0.02
trans_init = matrix
t1 = time.time()
reg_p2p = registration_icp(
down_model, down_scene, threshold, trans_init,
TransformationEstimationPointToPoint())
t2 = time.time()
print("ICP Run time : %s seconds" % (t2 - t1))
print(reg_p2p)
print("Transformation by ICP is:")
print(reg_p2p.transformation)
print("")
R = np.dot(gt[:3,:3].T, reg_p2p.transformation[:3,:3])
theta = np.arccos((np.trace(R) - 1) * 0.5)
print("pose difference (in degrees) after icp-refinement:", theta * 180 / np.pi)
if visualize:
draw_registration_result(pcloud_model, pcloud_scene, reg_p2p.transformation)
return res, theta_before * 180 / np.pi, theta_after * 180 / np.pi
corr = np.zeros((len(picked_id_source), 2))
corr[:, 0] = picked_id_source
corr[:, 1] = picked_id_target
# estimate rough transformation using correspondences
print("Compute a rough transform using the correspondences given by user")
p2p = o3d.registration.TransformationEstimationPointToPoint()
trans_init = p2p.compute_transformation(source, target,
o3d.utility.Vector2iVector(corr))
# point-to-point ICP for refinement
print("Perform point-to-point ICP refinement")
threshold = 0.03 # 3cm distance threshold
reg_p2p = o3d.registration.registration_icp(
source, target, threshold, trans_init,
o3d.registration.TransformationEstimationPointToPoint())
draw_registration_result(source, target, reg_p2p.transformation)
print("")
def register_point_cloud_fpfh(source, target, source_fpfh, target_fpfh, config):
distance_threshold = config["voxel_size"] * 1.4
if config["global_registration"] == "fgr":
result = o3d.registration.registration_fast_based_on_feature_matching(
source, target, source_fpfh, target_fpfh,
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold))
if config["global_registration"] == "ransac":
result = o3d.registration.registration_ransac_based_on_feature_matching(
source, target, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
if (result.transformation.trace() == 4.0):
return (False, np.identity(4), np.zeros((6, 6)))
information = o3d.registration.get_information_matrix_from_point_clouds(
source, target, distance_threshold, result.transformation)
if information[5, 5] / min(len(source.points), len(target.points)) < 0.3:
return (False, np.identity(4), np.zeros((6, 6)))
return (True, result.transformation, information)