Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_jit(self):
@torch.jit.script
def op_script(transform, points):
return kornia.transform_points(transform, points)
points = torch.ones(1, 2, 2)
transform = torch.eye(3)[None]
actual = op_script(transform, points)
expected = kornia.transform_points(transform, points)
assert_allclose(actual, expected)
def test_transform_points(
self, batch_size, num_points, num_dims, device_type):
# generate input data
eye_size = num_dims + 1
points_src = torch.rand(batch_size, num_points, num_dims)
points_src = points_src.to(torch.device(device_type))
dst_homo_src = utils.create_random_homography(batch_size, eye_size)
dst_homo_src = dst_homo_src.to(torch.device(device_type))
# transform the points from dst to ref
points_dst = kornia.transform_points(dst_homo_src, points_src)
# transform the points from ref to dst
src_homo_dst = torch.inverse(dst_homo_src)
points_dst_to_src = kornia.transform_points(src_homo_dst, points_dst)
# projected should be equal as initial
assert_allclose(points_src, points_dst_to_src)
h = torch.ceil(h_max * torch.rand(batch_size)).to(device)
w = torch.ceil(w_max * torch.rand(batch_size)).to(device)
norm = torch.rand(batch_size, 4, 2).to(device)
points_src = torch.zeros_like(norm)
points_src[:, 1, 0] = h
points_src[:, 2, 1] = w
points_src[:, 3, 0] = h
points_src[:, 3, 1] = w
points_dst = points_src + norm
# compute transform from source to target
dst_homo_src = kornia.get_perspective_transform(points_src, points_dst)
assert_allclose(
kornia.transform_points(dst_homo_src, points_src), points_dst)
# compute gradient check
points_src = utils.tensor_to_gradcheck_var(points_src) # to var
points_dst = utils.tensor_to_gradcheck_var(points_dst) # to var
assert gradcheck(
kornia.get_perspective_transform, (
points_src,
points_dst,
),
raise_exception=True)
batch_size = 1
height, width = 2, 4
# create points grid
grid_norm = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=True)
grid_norm = torch.unsqueeze(grid_norm, dim=0)
grid_pix = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=False)
grid_pix = torch.unsqueeze(grid_pix, dim=0)
# grid from pixel space to normalized
norm_trans_pix = kornia.normal_transform_pixel(height, width) # 1x3x3
pix_trans_norm = torch.inverse(norm_trans_pix) # 1x3x3
# transform grids
grid_pix_to_norm = kornia.transform_points(norm_trans_pix, grid_pix)
grid_norm_to_pix = kornia.transform_points(pix_trans_norm, grid_norm)
assert_allclose(grid_pix, grid_norm_to_pix)
assert_allclose(grid_norm, grid_pix_to_norm)
def test_jit_trace(self):
@torch.jit.script
def op_script(transform, points):
return kornia.transform_points(transform, points)
points = torch.ones(1, 2, 2)
transform = torch.eye(3)[None]
op_script_trace = torch.jit.trace(op_script, (transform, points,))
actual = op_script_trace(transform, points)
expected = kornia.transform_points(transform, points)
assert_allclose(actual, expected)
def op_script(transform, points):
return kornia.transform_points(transform, points)
def draw_rectangle(image, dst_homo_src):
height, width = image.shape[:2]
pts_src = torch.FloatTensor([[
[-1, -1], # top-left
[1, -1], # bottom-left
[1, 1], # bottom-right
[-1, 1], # top-right
]]).to(dst_homo_src.device)
# transform points
pts_dst = dgm.transform_points(torch.inverse(dst_homo_src), pts_src)
def compute_factor(size):
return 1.0 * size / 2
def convert_coordinates_to_pixel(coordinates, factor):
return factor * (coordinates + 1.0)
# compute convertion factor
x_factor = compute_factor(width - 1)
y_factor = compute_factor(height - 1)
pts_dst = pts_dst.cpu().squeeze().detach().numpy()
pts_dst[..., 0] = convert_coordinates_to_pixel(
pts_dst[..., 0], x_factor)
pts_dst[..., 1] = convert_coordinates_to_pixel(
pts_dst[..., 1], y_factor)
# do the actual drawing