Define Model
import os
import sys
import logging as log
import cv2
import numpy as np
from openvino.inference_engine import IENetwork, IECore
class Model:
def __init__(self, xml_path, device='CPU', cpu_extension=None):
self.model = Network()
self.model.load_model(xml_path, device, cpu_extension)
def get_input_shape(self):
input_shape = self.model.get_input_shape()
height = int(input_shape[2])
width = int(input_shape[3])
# print('Model input shape: {}'.format(input_shape))
return (width, height)
def infer(self, frame):
formatted_frame = cv2.resize(frame, self.get_input_shape())
formatted_frame = formatted_frame.transpose((2, 0, 1))
formatted_frame = np.expand_dims(formatted_frame, axis=0)
facial_image = self.model.async_inference(formatted_frame)
if self.model.wait() == 0:
return self.model.extract_output()
def resize(self, frame):
return cv2.resize(frame, self.get_input_shape())
class Network:
'''
Load and store information for working with the Inference Engine,
and any loaded models.
'''
def __init__(self):
self.plugin = None
self.network = None
self.input_blob = None
self.output_blob = None
self.exec_network = None
self.infer_request = None
def load_model(self, model, device="CPU", cpu_extension=None):
'''
Load the model given IR files.
Defaults to CPU as device for use in the workspace.
Synchronous requests made within.
'''
model_xml = model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Initialize the plugin
self.plugin = IECore()
# Add a CPU extension, if applicable
if cpu_extension and "CPU" in device:
self.plugin.add_extension(cpu_extension, device)
# Read the IR as a IENetwork
self.network = IENetwork(model=model_xml, weights=model_bin)
# Load the IENetwork into the plugin
self.exec_network = self.plugin.load_network(self.network, device)
# Get the input layer
self.input_blob = next(iter(self.network.inputs))
self.output_blob = next(iter(self.network.outputs))
return
def get_input_shape(self):
'''
Gets the input shape of the network
'''
return self.network.inputs[self.input_blob].shape
def async_inference(self, image):
'''
Makes an asynchronous inference request, given an input image.
'''
# TODO: Start asynchronous inference
self.infer_request = self.exec_network.start_async(
request_id=0, inputs={self.input_blob: image})
return
def wait(self):
'''
Checks the status of the inference request.
'''
# TODO: Wait for the async request to be complete
status = self.exec_network.requests[0].wait(-1)
return status
def extract_output(self):
'''
Returns a list of the results for the output layer of the network.
'''
# TODO: Return the outputs of the network from the output_blob
res = self.exec_network.requests[0].outputs[self.output_blob]
return res