How to use the imutils.translate function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BerkeleyAutomation / perception / tools / generate_siamese_dataset.py View on Github external
def normalize(color_im, crop_size=(512, 512)):

    # Center object in frame
    color_data = color_im.data
    nzp = color_im.nonzero_pixels().astype(np.int32)
    centroid = np.mean(nzp, axis=0)
    cx, cy = color_data.shape[1] // 2, color_data.shape[0] // 2
    color_data = imutils.translate(color_data, cx - round(centroid[1]), cy - round(centroid[0]))
    color_im = ColorImage(color_data, color_im.frame)

    # Crop about center to 512x512
    cx, cy = color_data.shape[1] // 2, color_data.shape[0] // 2
    crop_x = crop_size[0] / 2
    crop_y = crop_size[1] / 2
    color_data = imcrop(color_data, (cx-crop_x, cy-crop_y, cx+crop_x, cy+crop_y))
    color_im = ColorImage(color_data, color_im.frame)

    return color_im
github IcedDoggie / Micro-Expression-with-Deep-Learning / augmentation.py View on Github external
def translation(move_x, move_y, image):
	# M = np.float32([[ 1, 0, move_x],[ 0, 1, move_y]])
	# dst = cv2.warpAffine( image, M, (image.shape[0], image.shape[1]) )
	dst = imutils.translate(image, move_x, move_y)
	return dst
github jameschen00 / PracticalPythonAndOpenCV_Book / Chapter06 / translation.py View on Github external
# Load the image and show it
image = cv2.imread('C:/PythonProjects/PracticalPythonAndOpenCV_Book/images/trex.png')
cv2.imshow("Original", image)

# Translate the image 25 pixels to the right and 50 pixels down
matrix = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, matrix, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Down and Right", shifted)

# Shift the image 50 pixels to the left and 90 pixels up
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Up and Left", shifted)

# Use our own function in to shift the image down 100 pixels
shifted = imutils.translate(image, 0, 100)
cv2.imshow("Shifted Down", shifted)
cv2.waitKey(0)
github apollos / opencv-practice / translation / translate.py View on Github external
# You simply need to specify how many pixels you want to shift the image
# in the X and Y direction -- let's translate the image 25 pixels to the
# right and 50 pixels down
M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Down and Right", shifted)

# now, let's shift the image 50 pixels to the left and 90 pixels up, we
# accomplish this using negative values
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Up and Left", shifted)

# finally, let's use our helper function in imutils to shift the image down
# 100 pixels
shifted = imutils.translate(image, 0, 100)
cv2.imshow("Shifted Down", shifted)
cv2.waitKey(0)
github LamUong / FacialExpressionRecognition / dataprocessing.py View on Github external
def shiftedUp20(data):
    translated = imutils.translate(data, 0, -5)
    translated2 = translated.reshape(2304).tolist()
    return translated2
def shiftedDown20(data):

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

61 / 100
Full package analysis