Files
picar/DecisionSystem/CentralisedDecision/cameraserver.py
2019-02-07 07:00:03 +10:30

108 lines
3.2 KiB
Python

print("It's happening!")
from DecisionSystem.CentralisedDecision.ballotvoter import BallotVoter
from DecisionSystem.CentralisedDecision.messenger import MqttMessenger
import numpy as np
import cv2
import time
import argparse
import os.path
import sys
from GestureRecognition.simplehandrecogniser import SimpleHandRecogniser
from threading import Thread
from queue import Queue
print("Parsing args")
parser = argparse.ArgumentParser(description="Runs a file with OpenCV and gets consensus from the swarm.")
parser.add_argument('-V', '--video', help="Path to video file.")
args = parser.parse_args()
recogniser = SimpleHandRecogniser(None)
# Checks if video file is specified and if that file exists.
if(args.video):
print('finding video')
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
else:
# Exit if no video file specified - we aren't using webcam here.
sys.exit(1)
class VideoGet:
'''
Code taken from Najam R Syed, available here:
https://github.com/nrsyed/computer-vision/tree/master/multithread
'''
def __init__(self, q, src):
'''
Must provide a source so we don't accidently start camera at work.
'''
self._stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self._stream.read()
self.stopped = False
self.q = q
self.q.put(np.copy(self.frame))
self.src = src
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
# self.stopped = True
print('frame not grabbed')
self._stream.release()
self._stream = cv2.VideoCapture(self.src)
# time.sleep(2)
self.grabbed, self.frame = self._stream.read()
else:
(self.grabbed, self.frame) = self._stream.read()
if self.q.full():
self.q.get()
self.q.put(np.copy(self.frame))
time.sleep(0.03)
# Start a new feed.
def stop(self):
self.stopped = True
def on_vote():
# Get the current frame of the camera and process what hand
# is currently being seen.
print('getting frame')
# Need to copy rather than just take a reference, as frame will
# constantly be changing.
global vd
recogniser.setFrame(np.copy(vd.frame))
print('Got frame, voting with recogniser')
return recogniser.get_gesture()
mqtt = MqttMessenger()
v = BallotVoter(on_vote, mqtt)
mqtt.connect()
# Start the video capture at the next whole minute.
# current_time_sec = time.gmtime(time.time()).tm_sec
# if current_time_sec < 40:
# time.sleep(60 - current_time_sec)
# else:
# time.sleep(60 - current_time_sec + 60)
print('loading video')
q = Queue(5)
vd = VideoGet(q, args.video)
vd.start()
stay_alive = input("Press q to stop, anything else to see what the camera is seeing.")
while stay_alive:
if vd.frame is None:
continue
frame = np.copy(vd.frame)
cv2.imshow('Frame', frame)
cv2.waitKey(1) & 0xFF
stay_alive = input("Press q to stop, anything else to see what the camera is seeing.")