How to use the imutils.perspective.four_point_transform function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github apollos / opencv-practice / gathering_character_examples / pyimagesearch / license_plate / license_plate.py View on Github external
def detectCharacterCandidates(self, region):
		# apply a 4-point transform to extract the license plate
		plate = perspective.four_point_transform(self.image, region)
		cv2.imshow("Perspective Transform", imutils.resize(plate, width=400))

		# extract the Value component from the HSV color space and apply adaptive thresholding
		# to reveal the characters on the license plate
		V = cv2.split(cv2.cvtColor(plate, cv2.COLOR_BGR2HSV))[2]
		thresh = threshold_adaptive(V, 29, offset=15).astype("uint8") * 255
		thresh = cv2.bitwise_not(thresh)

		# resize the license plate region to a canonical size
		plate = imutils.resize(plate, width=400)
		thresh = imutils.resize(thresh, width=400)
		cv2.imshow("LP Threshold", thresh)

		# perform a connected components analysis and initialize the mask to store the locations
		# of the character candidates
		labels = measure.label(thresh, neighbors=8, background=0)
github apollos / opencv-practice / segmenting_characters / pyimagesearch / license_plate / license_plate.py View on Github external
def detectCharacterCandidates(self, region):
		# apply a 4-point transform to extract the license plate
		plate = perspective.four_point_transform(self.image, region)
		cv2.imshow("Perspective Transform", imutils.resize(plate, width=400))

		# extract the Value component from the HSV color space and apply adaptive thresholding
		# to reveal the characters on the license plate
		V = cv2.split(cv2.cvtColor(plate, cv2.COLOR_BGR2HSV))[2]
		thresh = threshold_adaptive(V, 29, offset=15).astype("uint8") * 255
		thresh = cv2.bitwise_not(thresh)

		# resize the license plate region to a canonical size
		plate = imutils.resize(plate, width=400)
		thresh = imutils.resize(thresh, width=400)
		cv2.imshow("Thresh", thresh)

		# perform a connected components analysis and initialize the mask to store the locations
		# of the character candidates
		labels = measure.label(thresh, neighbors=8, background=0)
github mythrex / OMR-Scanner / server / bin / module / grader.py View on Github external
# calc the perimeter
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02*peri, True)
        if len(approx) == 4:
            docCnts = approx
            break
try:
    docArea = cv2.contourArea(docCnts)
    if docArea <= 300000 or docArea >= 1000000:
        raise ge.PaperDetectionError(
            'Error in finding paper contour. Area of docCnts is {}'.format(docArea))
except ge.PaperDetectionError as e:
    sys.exit(e.message)

# apply perspective transform to the shape
paper = four_point_transform(image, docCnts.reshape(4, 2))
warped = four_point_transform(gray, docCnts.reshape(4, 2))
# binarisation of image
# instead of otsu thresholding
# we have used adaptive thresholding

thresh = cv2.adaptiveThreshold(
    warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)

# find contours in threshholded image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)

# filter out contours with parents
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# throw new error
try:
github EdgarNg1024 / PaperHelper / main.py View on Github external
# 将轮廓按大小降序排序
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # 对排序后的轮廓循环处理
    for c in cnts:
        # 获取近似的轮廓
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # 如果我们的近似轮廓有四个顶点,那么就认为找到了答题卡
        if len(approx) == 4:
            docCnt = approx
            break
# 对原始图像和灰度图都进行四点透视变换
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))

# # 对灰度图应用大津二值化算法
# thresh = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
#
# # 在二值图像中查找轮廓,然后初始化题目对应的轮廓列表
# cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# questionCnts = []
#
# # 对每一个轮廓进行循环处理
# for q, c in enumerate(cnts):
#
#     # 计算轮廓的边界框,然后利用边界框数据计算宽高比
#     (x, y, w, h) = cv2.boundingRect(c)
#     ar = w / float(h)
#     # print 'w' + str(w), 'h' + str(h)
github nikgens / TankRobotProject / signRecognition / detectWithWebCamera.py View on Github external
area = sideOne*sideTwo
                # find the largest rectangle within all contours
                if area > largestArea:
                    largestArea = area
                    largestRect = box
            

        # draw contour of the found rectangle on  the original image
        if largestArea > frameArea*0.02:
            cv2.drawContours(frame,[largestRect],0,(0,0,255),2)
            


        #if largestRect is not None:
            # cut and warp interesting area
            warped = four_point_transform(mask, [largestRect][0])
            
            # show an image if rectangle was found
            #cv2.imshow("Warped", cv2.bitwise_not(warped))
            
            # use function to detect the sign on the found rectangle
            detectedTrafficSign = identifyTrafficSign(warped)
            #print(detectedTrafficSign)


            # write the description of the sign on the original image
            cv2.putText(frame, detectedTrafficSign, tuple(largestRect[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
        
        # show original image
        cv2.imshow("Original", frame)
        
        # if the `q` key was pressed, break from the loop
github ghostbbbmt / Traffic-Sign-Detection / resources / signRecognition.py View on Github external
for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                # if the contour has four vertices, then we have found
                # the thermostat display
                if len(approx) == 4:
                        displayCnt = approx
                        break

        # extract the sign borders, apply a perspective transform
        # to it
        # A common task in computer vision and image processing is to perform 
        # a 4-point perspective transform of a ROI in an image and obtain a top-down, "birds eye view" of the ROI
        warped = four_point_transform(gray, displayCnt.reshape(4, 2))
        output = four_point_transform(image, displayCnt.reshape(4, 2))

        # draw a red square on the image
        cv2.drawContours(image, [displayCnt], -1, (0, 0, 255), 5)

        # threshold the warped image, then apply a series of morphological
        # operations to cleanup the thresholded image
        # cv2.THRESH_OTSU. it automatically calculates a threshold value from image histogram 
        # for a bimodal image
        thresh = cv2.threshold(warped, 0, 255, 
                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)


        # (roiH, roiW) = roi.shape
github offsouza / parking_lot_opencv / ferramentas.py View on Github external
def getRotateRect(img, cooridnate_lists, WIDTH = 100, HEIGHT = 100):
        
        warped_img_lists = []
        i = 0 
        
        for coordinate in cooridnate_lists :
            warped = perspective.four_point_transform(img, coordinate)
            
            warped_resize = cv2.resize(warped, (WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
            
            # plt.imshow(warped, cmap = 'gray', interpolation = 'bicubic')
            # plt.xticks([]), plt.yticks([])
            # plt.show()
            cv2.imshow("Vaga - %d"%i, warped_resize)
            
            warped_img_lists.append(warped_resize)

            i+=1
        return warped_img_lists
github nikgens / TankRobotProject / moveToTheObject / moveToTheObject.py View on Github external
else:
                # count error with PID to move to the sign direction
                pid = pidController(center[0], halfFrameWidth, 0.5, 0, 0)
 
                # if error with "-", then we need to slow down left motor, else - right
                if pid < 0:
                    moveMotors(MAX_MOTOR_SPEED + pid, MAX_MOTOR_SPEED + pid*0.1)
                else:
                    moveMotors(MAX_MOTOR_SPEED - pid*0.1, MAX_MOTOR_SPEED - pid)
            
            # draw contour of the found rectangle on  the original image   
            cv2.drawContours(frame,[largestRect],0,(0,0,255),2)
            
            # cut and warp interesting area
            warped = four_point_transform(mask, [largestRect][0])
            
            # show an image if rectangle was found
            cv2.imshow("Warped", cv2.bitwise_not(warped))
			
	    # use function to detect the sign on the found rectangle
            detectedTrafficSign = identifyTrafficSign(warped)
            print('detectedTrafficSign', detectedTrafficSign)
            if detectedTrafficSign is not None:
                lastDetectedTrafficSign = detectedTrafficSign
            print('lastDetectedTrafficSign', lastDetectedTrafficSign)
	    # write the description of the sign on the original image
            cv2.putText(frame, detectedTrafficSign, tuple(largestRect[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
        
        # if there is no blue rectangular on the frame, then stop
        else:
            moveMotors(127,127) #127 is mapped to 0 on Arduino
github vitali84 / pdf-to-csv-table-extactor / pdf-to-csv-cv.py View on Github external
blurred = cv2.GaussianBlur(table_cell, (5, 5), 0)
            #cv2.rectangle(color_image,(vx_p,hy_p),(vx_c+vw_c,hy_c+hh_c),(255,0,0),2)

            thresholded = cv2.threshold(blurred, 128, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

            im2, contours, hierarchy = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)

            rect = cv2.minAreaRect(contours[0])
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            extracted = four_point_transform(table_cell.copy(), box.reshape(4, 2))[1:-1,1:-1] #remove 1 px from each side
            ret,extracted = cv2.threshold(extracted,165,255,cv2.THRESH_BINARY)
            extracted_columns.append(extracted)

            # cv2.drawContours(color_image, [contours[0]], -1, (0,255,0), 3)


        extracted_rows_columns.append(extracted_columns)

    #show_wait_destroy("horizontal_lines_img",color_image)
    return extracted_rows_columns

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

61 / 100
Full package analysis