How to use the imutils.video.FileVideoStream function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jeffskinnerbox / people-counter / test / videostream_test_4.py View on Github external
def __init__(self, source='file', path=None, queueSize=128, src=0,
                 resolution=(640, 480), framerate=30):
        # initialize the video stream along with the boolean:w

        # used to indicate if the thread should be stopped or not
        self.vsource = source
        if self.vsource == 'file':
            self.stream = FileVideoStream(path, queueSize=queueSize).start()
        elif self.vsource == 'usbcamera':
            self.stream = VideoStream(src=src, usePiCamera=False,
                                      resolution=resolution,
                                      framerate=framerate).start()
        elif self.vsource == 'picamera':
            self.stream = VideoStream(src=src, usePiCamera=True,
                                      resolution=resolution,
                                      framerate=framerate).start()
github DetectingHumanEmotion / detecting-human-emotion-webapp / emotion-detection / blink / blink-detect.py View on Github external
detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
github grasslandnetwork / node_lite / multi_object_tracking.py View on Github external
if not args.get("video", False):
    framerate = 30
        
    print("[INFO] starting camera stream...")    
    vs = VideoStream(usePiCamera=args["picamera"], resolution=(detection_frame_width, int(detection_frame_width*frame_ratio)), framerate=framerate).start()
    print("[INFO] Warming up camera...")
    time.sleep(3)
    
    if args["picamera"] == 1 or args["picamera"] == True:
        vs.camera.rotation = args["rotation"]
    
    
else: # otherwise, grab a reference to the video file
    framerate = 30
    print("[INFO] starting video file stream...")    
    vs = FileVideoStream(args["video"], queueSize=15).start()

    # loop over frames from the video stream




    
'''
Here we calculate and set the linear map (transformation matrix) that we use to turn the pixel coordinates of the objects on the frame into their corresponding lat/lng coordinates in the real world. It's a computationally expensive calculation and requires inputs from the camera's calibration (frame of reference in the real world) so we do it once here instead of everytime we need to do a transformation from pixels to lat/lng
'''
rw = RealWorldCoordinates({"height": tracking_frame_width*frame_ratio, "width": tracking_frame_width})
if args['mode'] == 'CALIBRATING':
    rw.set_transform(calibrating=True)
    print("set calibration")
else:
    rw.node_update()
github jeffskinnerbox / people-counter / vstream.py View on Github external
def __init__(self, source='file', path=None, qs=128, src=0,
                 resolution=(640, 480), fr=30):
        """
        Only the PiCamera will allow you to set its resolution at creation
        time.  In other cases (i.e. usb camera or file), the function
        VideoCapture.set() needs to be used post-creation to set resolution.
        But this will not work uniformly for all types of cameras.  As a
        result, the frame must be resized manually to your desired resolution.
        """
        self.vsource = source
        self.target_width = resolution[0]
        self.target_height = resolution[1]

        if self.vsource == 'file':
            self.stream = FileVideoStream(path, queueSize=qs).start()
        elif self.vsource == 'usbcamera':
            self.stream = VideoStream(src=src, usePiCamera=False).start()
        elif self.vsource == 'picamera':
            self.stream = VideoStream(usePiCamera=True, resolution=resolution,
                                      framerate=fr).start()

        if self.vsource == 'picamera':
            # read one frame to determine the resolution of the camera
            #frame = self.read()
            #self.native_width = frame.shape[0]
            #self.native_height = frame.shape[1]
            # this isn't right but can't figure out the picarmera
            self.native_width = resolution[0]
            self.native_height = resolution[1]
        else:
            self.native_width = self.get(cv2.CAP_PROP_FRAME_WIDTH)
github mans-men / eye-blink-detection-demo / detect_blinks.py View on Github external
detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])
 
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    
    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True
   
    time.sleep(1.0)
    
    # loop over frames from the video stream
    while True:
    	# if this is a file video stream, then we need to check if
    	# there any more frames left in the buffer to process
    	if fileStream and not vs.more():
    		break
    
    	# grab the frame from the threaded video file stream, resize
    	# it, and convert it to grayscale
    	# channels)
    	frame = vs.read()
    	frame = imutils.resize(frame, width=450)
github forensic-architecture / mtriage / src / lib / analysers / __deprecated / frames_opencv / main.py View on Github external
def opencv_frames(out_folder, fp, rate, threshold, sequential):
    fvs = FileVideoStream(str(fp)).start()
    file_fps = fvs.stream.get(cv2.CAP_PROP_FPS)

    last_frame = None
    num_output = 0
    num_considered = 0
    time_elapsed = 0
    use_every = 1.0 / float(rate)
    frame_duration = 1.0 / file_fps
    while fvs.more():
        frame = fvs.read()
        if frame is None:
            break

        if num_considered == 0:
            use_frame = True
        elif (time_elapsed + frame_duration) >= use_every:
github the-house-of-black-and-white / opencv-dnn-demo / play.py View on Github external
default='yolo', help='yolo, mtcnn or vj')
    parser.add_argument('-thresh', '--threshold', dest='min_confidence', type=float,
                        default=0.5, help='Min confidence threshold.')
    parser.add_argument('-codec', '--codec', dest='codec', type=str,
                        default='XVID', help='codec MJPG or XVID')
    parser.add_argument('-save', '--save', dest='save', type=str,
                        default='output', help='Save video.')
    parser.add_argument('-fps', '--fps', dest='fps', type=float,
                        default=30, help='FPS.')

    args = parser.parse_args()

    # start the file video stream thread and allow the buffer to
    # start to fill
    print("[INFO] starting video file thread...")
    fvs = FileVideoStream(args.video_source).start()
    face_detector = new_face_detector(args.detector, args.min_confidence)
    time.sleep(1.0)
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*args.codec)
    writer = None

    # start the FPS timer
    fps = FPS().start()

    # loop over frames from the video file stream
    while fvs.more():
        frame = fvs.read()

        # check if the writer is None
        if writer is None:
            # store the image dimensions, initialize the video writer
github kimyoon-young / centerNet-deep-sort / demo_centernet_deepsort_thread.py View on Github external
def open(self):

        if opt.input_type == 'webcam':
            self.vdo = FileVideoStream(opt.webcam_ind).start()

        elif opt.input_type == 'ipcam':
            # load cam key, secret
            with open("cam_secret.txt") as f:
                lines = f.readlines()
                key = lines[0].strip()
                secret = lines[1].strip()

            self.vdo = FileVideoStream(opt.ipcam_url.format(key, secret, opt.ipcam_no)).start()

        # video
        else :
            assert os.path.isfile(opt.vid_path), "Error: path error"
            self.vdo = FileVideoStream(opt.vid_path).start()


        self.im_width = int(self.vdo.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.im_height = int(self.vdo.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))


        self.area = 0, 0, self.im_width, self.im_height
        if self.write_video:
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            self.output = cv2.VideoWriter("demo1.avi", fourcc, 20, (self.im_width, self.im_height))
        #return self.vdo.isOpened()

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

61 / 100
Full package analysis