How to use the imutils.video.VideoStream function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jeffbass / imagezmq / tests / timing_send_jpg_buf.py View on Github external
# use either of the formats below to specifiy address of display computer
sender = imagezmq.ImageSender(connect_to='tcp://jeff-macbook:5555')
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')

# optionally, turn on the LED area lighting
use_led = False  # set to True or False as needed
# optionally, filp the image vertically
flip = True  # set to True of False as needed

if use_led:
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(18, GPIO.OUT)
    GPIO.output(18, True)  # turn on LEDs

rpi_name = socket.gethostname()  # send RPi hostname with each image
picam = VideoStream(usePiCamera=True).start()
time.sleep(2.0)  # allow camera sensor to warm up
jpeg_quality = 95  # 0 to 100, higher is better quality, 95 is cv2 default
try:
    while True:  # send images as stream until Ctrl-C
        image = picam.read()
        if flip:
            image = cv2.flip(image, -1)
        ret_code, jpg_buffer = cv2.imencode(
            ".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
        sender.send_jpg(rpi_name, jpg_buffer)
except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
github NarendrenSaravanan / Emotion-Recognition-using-Python / emotion recognition.py View on Github external
h=1
                #os.startfile('chrome')
                #os.system("%systemroot%\system32\scrnsave.scr /s")
                #exit()

        cv2.imshow("Frame", frame)
       
        key = cv2.waitKey(1) & 0xFF
        #if j==15:
        #        break
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
                break
 
# do a bit of cleanup
VideoStream(src=0).stop()
cv2.destroyAllWindows()
github zhanghanbin3159 / MobileNetV2-SSD / infer / filter_object_detection.py View on Github external
#     "sofa", "train", "tvmonitor"]
CLASSES = ["background", "person"
           # ,"face"
           ]
# IGNORE = set(["person"])
IGNORE = set()
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()

# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    # frame = imutils.resize(frame, width=400)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
        0.007843, (300, 300), 127.5)

    # pass the blob through the network and obtain the detections and
github mayank408 / Mousely / eye_detection.py View on Github external
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("Loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]


print("Starting live video stream...")

vs = VideoStream(src=0).start()

fileStream = False
time.sleep(1.0)
currentCount = 0

mouse = Mouse()
face_cascade = cv2.CascadeClassifier('res/haarcascade_frontalface_default.xml')

while True:

    # if this is a file video stream, then we need to check if
    # there any more frames left in the buffer to process
    if fileStream and not vs.more():
        break

    # grab the frame from the threaded video file stream, resize
github pablovela5620 / Hand-Detection-and-Distance-Estimation / hand_detection.py View on Github external
from utils import detector_utils as detector_utils

ap = argparse.ArgumentParser()
ap.add_argument('-d', '--display', dest='display', type=int,
                        default=1, help='Display the detected images using OpenCV. This reduces FPS')
args = vars(ap.parse_args())

detection_graph, sess = detector_utils.load_inference_graph()

if __name__ == '__main__':
    # Detection confidence threshold to draw bounding box
    score_thresh = 0.60

    # Get stream from webcam and set parameters)
    vs = VideoStream().start()

    # max number of hands we want to detect/track
    num_hands_detect = 1

    # Used to calculate fps
    start_time = datetime.datetime.now()
    num_frames = 0

    im_height, im_width = (None, None)

    try:
        while True:
            # Read Frame and process
            frame = vs.read()
            frame = cv2.resize(frame, (320, 240))
github nateroblin33 / 3d-bone-tracking-tool / files / speedcam.py View on Github external
scaphoidBaseline = [0,0,0]
scaphoidBasecalc = [0,0,0]
trapezoidBaseline = [0,0,0]
trapezoidBasecalc = [0,0,0]
triquetrumBaseline = [0,0,0]
triquetrumBasecalc = [0,0,0]

# Counter to name Point Cloud files for each frame
framePC = 1

# Boolean to determine if a frame is bad (missing at least one dot)
badframe = 0

# If a video path was not supplied, grab the reference to the Intel RealSense D435 stream
if not args.get("video", False):
    vs = VideoStream(src=0).start()
 
    # Otherwise, grab a reference to the video file
else:
    vsbool = 1
    
    # OPTIONAL code for using Default Webcam/Facetime Camera
    # NOTE: If used, comment out the later "vs = color_image" in line ################################################134 (this line number has changed and is no longer correct)
    #vs = cv2.VideoCapture(args["video"])
    #vs = cv2.VideoCapture(-1)
 
    # Allow the camera or video file to warm up
time.sleep(2.0)

# Print start time for log
dt = datetime.now()
print("Time Started: " + str(dt))
github DetectingHumanEmotion / detecting-human-emotion-webapp / emotion-detection / blink / blink-detect.py View on Github external
# initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
github jeffskinnerbox / people-counter / vstream.py View on Github external
resolution=(640, 480), fr=30):
        """
        Only the PiCamera will allow you to set its resolution at creation
        time.  In other cases (i.e. usb camera or file), the function
        VideoCapture.set() needs to be used post-creation to set resolution.
        But this will not work uniformly for all types of cameras.  As a
        result, the frame must be resized manually to your desired resolution.
        """
        self.vsource = source
        self.target_width = resolution[0]
        self.target_height = resolution[1]

        if self.vsource == 'file':
            self.stream = FileVideoStream(path, queueSize=qs).start()
        elif self.vsource == 'usbcamera':
            self.stream = VideoStream(src=src, usePiCamera=False).start()
        elif self.vsource == 'picamera':
            self.stream = VideoStream(usePiCamera=True, resolution=resolution,
                                      framerate=fr).start()

        if self.vsource == 'picamera':
            # read one frame to determine the resolution of the camera
            #frame = self.read()
            #self.native_width = frame.shape[0]
            #self.native_height = frame.shape[1]
            # this isn't right but can't figure out the picarmera
            self.native_width = resolution[0]
            self.native_height = resolution[1]
        else:
            self.native_width = self.get(cv2.CAP_PROP_FRAME_WIDTH)
            self.native_height = self.get(cv2.CAP_PROP_FRAME_HEIGHT)
github esimov / pigo / examples / web / capture.py View on Github external
#!/usr/bin/env python2
import cv2
import imutils
from imutils.video import VideoStream
import time, sys

vs = VideoStream(resolution=(320, 240)).start()
time.sleep(1.0)
 
while(True):
    frame = vs.read()
    frame = imutils.resize(frame, width=640, height=480)
    
    #cv2.imshow('frame',frame)
    res = bytearray(cv2.imencode(".jpeg", frame)[1])
    size = str(len(res))

    sys.stdout.write("Content-Type: image/jpeg\r\n")
    sys.stdout.write("Content-Length: " + size + "\r\n\r\n")
    sys.stdout.write( res )
    sys.stdout.write("\r\n")
    sys.stdout.write("--informs\r\n")
github zenika-open-source / zevision / lib / predict.py View on Github external
def recognize_camera (src=0,method="hog",encoding_path=default_path_encodings,record_path=None):
    # initialize the video stream, then allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    vs = VideoStream(src).start()
    writer = None
    time.sleep(2.0)
    # start the FPS throughput estimator
    #fps = FPS().start()
    fps = 1
    #iterator for the object detection to be activated
    #i = 0
    #q = Queue()
    frame = vs.read()
    if record_path != None:
        fourcc = cv2.VideoWriter_fourcc(*"MJPG")
        writer = cv2.VideoWriter(record_path, fourcc, fps,(frame.shape[1], frame.shape[0]), True)
    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video stream
        frame = vs.read()

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

61 / 100
Full package analysis