Add old stuff from DST

This commit is contained in:
Piv
2020-02-21 21:06:05 +10:30
parent 46073b4d7b
commit 18a5a33b5f
6 changed files with 116 additions and 4 deletions

8
.dockerignore Normal file
View File

@@ -0,0 +1,8 @@
**/*.jpg
**/*.png
tests
MotorControl
Messaging
Web
**/*.mdj
**/*.pdf

View File

@@ -2,6 +2,7 @@
from queue import Queue from queue import Queue
import json import json
import argparse
import numpy as np import numpy as np
import cv2 import cv2
@@ -97,7 +98,9 @@ class Instance:
self.kaleid = False self.kaleid = False
parser = argparse.ArgumentParser(description="An instance of CAIDE")
if __name__ == "__main__": if __name__ == "__main__":
instance = Instance() instance = Instance(video_file="/Users/piv/Documents/Projects/Experiments/Camera1/video.mp4")
instance.start() instance.start()

View File

@@ -0,0 +1,73 @@
import numpy as np
import cv2
def make_triangle(start_img):
h, w, d = start_img.shape
#crop square
inset = int((max(w,h) - min(w,h)) / 2)
# sqrimg = start_img.crop(inset, inset, h-inset, w-inset)
insetW = inset if w > h else 0
insetH = inset if h > w else 0
sqrimg = start_img[insetH:h-insetH, insetW:w-insetW]
#solve equilateral triangle
w, h, d = sqrimg.shape
print((w,h))
mask = np.zeros((w,h,d))
t_height = w/2 * np.tan(60)
pts = np.array([[0,w],[h/2,t_height],[h,w]], np.int32)
pts = pts.reshape((-1,1,2))
mask = cv2.fillPoly(mask, [pts], (255,0,0))
# With mask, get the triangle from the original image.
sqrimg[:,:,0] = np.where(mask[:,:,0] == 255, sqrimg[:,:,0], 0)
sqrimg[:,:,1] = np.where(mask[:,:,0] == 255, sqrimg[:,:,1], 0)
sqrimg[:,:,2] = np.where(mask[:,:,0] == 255, sqrimg[:,:,2], 0)
return sqrimg
def rotate(im, rotation):
M = cv2.getRotationMatrix2D((im.shape[1]/2,im.shape[0]/2),rotation,1)
im[:,:,0] = cv2.warpAffine(im[:,:,0],M,(im.shape[1],im.shape[0]))
im[:,:,1] = cv2.warpAffine(im[:,:,1],M,(im.shape[1],im.shape[0]))
im[:,:,2] = cv2.warpAffine(im[:,:,2],M,(im.shape[1],im.shape[0]))
return im
def make_kaleidoscope(img):
triangle = make_triangle(img)
def make_trapezoid(triangle, save=False):
w, h = triangle.size
can_w, can_h = w*3, h
output = np.array((can_w, can_h, 3))
output = Image.new('RGBA', (can_w, can_h), color=255)
def mirror_paste(last_img, coords):
mirror = rotate(cv2.flip(last_img, 1), 60)
output.paste(mirror, (coords), mirror)
return mirror, coords
#paste in bottom left corner
output.paste(triangle,(0, can_h-h), triangle)
last_img, coords = mirror_paste(triangle, (int(w/4.4), -int(h/2.125)))
last_img, coords = mirror_paste(rotateIm(last_img, 120), (int(can_w/7.3), -228))
output = output.crop((0,15, w*2-22, h))
if save:
path = 'output/trapezoid_{}'.format(filename.split('/')[1])
output.save(path)
return output, path
return output
if __name__ == "__main__":
img = cv2.imread("/Users/piv/Documents/Projects/car/GestureRecognition/IMG_0818.png")
triangle = make_triangle(img)
triangle = cv2.resize(triangle, None, fx=0.3, fy=0.3, interpolation = cv2.INTER_AREA)
triangle = rotate(triangle, 180)
cv2.imshow("", triangle)
cv2.waitKey(0)
cv2.destroyAllWindows()

View File

@@ -0,0 +1,23 @@
import cv2 as cv
cvNet = cv.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'graph.pbtxt')
img = cv.imread('IMG_0825.jpg')
img = cv.resize(img, None, fx=0.1, fy=0.1, interpolation = cv.INTER_AREA)
rows = img.shape[0]
cols = img.shape[1]
print(str(rows) + " " + str(cols))
cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
cvOut = cvNet.forward()
for detection in cvOut[0,0,:,:]:
score = float(detection[2])
if score > 0.6:
left = detection[3] * cols
top = detection[4] * rows
right = detection[5] * cols
bottom = detection[6] * rows
cv.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness=2)
cv.imshow('img', img)
cv.waitKey()

5
requirements.txt Normal file
View File

@@ -0,0 +1,5 @@
numpy
opencv-python
six
paho-mqtt
u-msgpack-python

View File

@@ -22,9 +22,9 @@ class TestSimpleHandRecogniser(unittest.TestCase):
img_5 = cv2.resize(img_5, None, fx=0.1, fy=0.1, interpolation = cv2.INTER_AREA) img_5 = cv2.resize(img_5, None, fx=0.1, fy=0.1, interpolation = cv2.INTER_AREA)
self.recogniser_5 = SimpleHandRecogniser(img_5) self.recogniser_5 = SimpleHandRecogniser(img_5)
img_s = cv2.imread("/Users/piv/Documents/Projects/car/GestureRecognition/Screen Shot hand.png") # img_s = cv2.imread("/Users/piv/Documents/Projects/car/GestureRecognition/Screen Shot hand.png")
img_s = cv2.resize(img_s, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_AREA) # img_s = cv2.resize(img_s, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_AREA)
self.recogniser_s = SimpleHandRecogniser(img_s) # self.recogniser_s = SimpleHandRecogniser(img_s)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()