diff --git a/DecisionSystem/CentralisedDecision/cameraserver.py b/DecisionSystem/CentralisedDecision/cameraserver.py index c2330be..e9254ff 100644 --- a/DecisionSystem/CentralisedDecision/cameraserver.py +++ b/DecisionSystem/CentralisedDecision/cameraserver.py @@ -11,6 +11,12 @@ from GestureRecognition.simplehandrecogniser import SimpleHandRecogniser from threading import Thread from queue import Queue +import MyRaft.node as raft +import MyRaft.leader as leader +import DecisionSystem.CentralisedDecision.commander as commander +import DecisionSystem.CentralisedDecision.messenger as messenger +import DecisionSystem.CentralisedDecision.ballotvoter as voter + print("Parsing args") parser = argparse.ArgumentParser(description="Runs a file with OpenCV and gets consensus from the swarm.") @@ -30,46 +36,6 @@ else: # Exit if no video file specified - we aren't using webcam here. sys.exit(1) -class VideoGet: - ''' - Code taken from Najam R Syed, available here: - https://github.com/nrsyed/computer-vision/tree/master/multithread - ''' - def __init__(self, q, src): - ''' - Must provide a source so we don't accidently start camera at work. - ''' - self._stream = cv2.VideoCapture(src) - (self.grabbed, self.frame) = self._stream.read() - self.stopped = False - self.q = q - self.q.put(np.copy(self.frame)) - self.src = src - - def start(self): - Thread(target=self.get, args=()).start() - return self - - def get(self): - while not self.stopped: - if not self.grabbed: - # self.stopped = True - print('frame not grabbed') - self._stream.release() - self._stream = cv2.VideoCapture(self.src) - # time.sleep(2) - self.grabbed, self.frame = self._stream.read() - else: - (self.grabbed, self.frame) = self._stream.read() - if self.q.full(): - self.q.get() - self.q.put(np.copy(self.frame)) - time.sleep(0.03) - # Start a new feed. - - def stop(self): - self.stopped = True - def on_vote(): # Get the current frame of the camera and process what hand # is currently being seen. @@ -77,32 +43,54 @@ def on_vote(): # Need to copy rather than just take a reference, as frame will # constantly be changing. global vd - recogniser.setFrame(np.copy(vd.frame)) + recogniser.set_frame(np.copy(vd.frame)) print('Got frame, voting with recogniser') return recogniser.get_gesture() +def connect_to_broker(mqtt): + print("Connecting to broker") + max_collisions = 100 + collisions = 1 + while not mqtt.connect() and collisions <= max_collisions: + time.sleep(2 ** collisions - 1) + print("Reconnecting in %s" %(2 ** collisions - 1)) + collisions += 1 + mqtt = MqttMessenger() v = BallotVoter(on_vote, mqtt) -mqtt.connect() + +def on_disconnect(rc): + print("Client disconnected from broker") + i = input("Would you like to reconnnect? (y|n)") + if i == 'y': + global mqtt + connect_to_broker(mqtt) + +mqtt.add_disconnect_callback(on_disconnect) +connect_to_broker(mqtt) # Start the video capture at the next whole minute. -# current_time_sec = time.gmtime(time.time()).tm_sec -# if current_time_sec < 40: -# time.sleep(60 - current_time_sec) -# else: -# time.sleep(60 - current_time_sec + 60) +current_time_sec = time.gmtime(time.time()).tm_sec +if current_time_sec < 40: + time.sleep(60 - current_time_sec) +else: + time.sleep(60 - current_time_sec + 60) print('loading video') -q = Queue(5) -vd = VideoGet(q, args.video) -vd.start() -stay_alive = input("Press q to stop, anything else to see what the camera is seeing.") -while stay_alive: +print('Press q to quit the server, g to get votes/consensus') + +while True: if vd.frame is None: continue frame = np.copy(vd.frame) cv2.imshow('Frame', frame) - cv2.waitKey(1) & 0xFF - stay_alive = input("Press q to stop, anything else to see what the camera is seeing.") \ No newline at end of file + k = cv2.waitKey(33) + if k == ord('q'): + break + elif k == -1: + continue + elif k == ord('g'): + # Get votes + pass