Add sample openvino inference
This commit is contained in:
74
openvino_inference.py
Normal file
74
openvino_inference.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import argparse
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse and return command line arguments"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
# fmt: off
|
||||
args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
|
||||
args.add_argument('-m', '--model', required=True, type=str,
|
||||
help='Required. Path to an .xml or .onnx file with a trained model.')
|
||||
args.add_argument('-i', '--input', required=True, type=str, help='Required. Path to an image file.')
|
||||
args.add_argument('-d', '--device', default='CPU', type=str,
|
||||
help='Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: '
|
||||
'is acceptable. The sample will look for a suitable plugin for device specified. '
|
||||
'Default value is CPU.')
|
||||
# fmt: on
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def sample(model_location, image_location, device='CPU'):
|
||||
ie = IECore()
|
||||
|
||||
net = ie.read_network(model=model_location)
|
||||
input_blob = next(iter(net.input_info))
|
||||
output_blob = next(iter(net.outputs))
|
||||
|
||||
b, c, h, w = net.input_info[input_blob].input_data.shape
|
||||
image = cv2.imread(image_location)
|
||||
input_ratio = h / w
|
||||
target_ratio = image.shape[0] / image.shape[1]
|
||||
crop_axis = 0 if target_ratio > input_ratio else 1
|
||||
crop_factor = input_ratio * target_ratio / 2
|
||||
center = [image.shape[0] / 2, image.shape[1] / 2]
|
||||
x1 = int(center[0] - image.shape[0] * crop_factor) if crop_axis == 0 else 0
|
||||
x2 = int(center[0] + image.shape[0] * crop_factor) if crop_axis == 0 else image.shape[0]
|
||||
y1 = int(center[1] - image.shape[1] * crop_factor) if crop_axis == 1 else 0
|
||||
y2 = int(center[1] + image.shape[1] * crop_factor) if crop_axis == 1 else image.shape[1]
|
||||
# Crop to target aspect ratio
|
||||
image = image[x1:x2, y1:y2]
|
||||
if image.shape[:-1] != (h, w):
|
||||
image = cv2.resize(image, (w, h))
|
||||
|
||||
image = image.transpose((2, 0, 1))
|
||||
# For batching
|
||||
image = np.expand_dims(image, axis=0)
|
||||
|
||||
exec_net = ie.load_network(network=net, device_name=device)
|
||||
start = time.time()
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
print('First Inference Time Seconds: ' + str(time.time() - start))
|
||||
start = time.time()
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
print('Second Inference Time Seconds: ' + str(time.time() - start))
|
||||
start = time.time()
|
||||
res = exec_net.infer(inputs={input_blob: image})
|
||||
print('Third Inference Time Seconds: ' + str(time.time() - start))
|
||||
res = res[output_blob]
|
||||
depth = res[0][0]
|
||||
fig = plt.figure()
|
||||
ii = plt.imshow(depth, interpolation='nearest')
|
||||
fig.colorbar(ii)
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parsed_args = parse_args()
|
||||
sample(parsed_args.model, parsed_args.input, parsed_args.device)
|
||||
Reference in New Issue
Block a user