Here are the examples of the python api sys.exis taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
1 Examples
0
View Complete Implementation : run_onnx_dldt.py
Copyright MIT License
Author : pfnet-research
Copyright MIT License
Author : pfnet-research
def inference(args, model_xml, model_bin, inputs, outputs):
from openvino.inference_engine import IENetwork
from openvino.inference_engine import IEPlugin
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
plugin.add_cpu_extension(args.cpu_extension)
log.info('Loading network files:\n\t{}\n\t{}'.format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == 'CPU':
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if
l not in supported_layers]
if not_supported_layers:
log.error('Folowing layers are not supported by the plugin for '
'specified device {}:\n {}'.format(
plugin.device, ', '.join(not_supported_layers)))
log.error('Please try to specify cpu extensions library path in '
'sample\'s command line parameters using '
'--cpu-extension command line argument')
sys.exis(1)
astert len(net.inputs) == len(inputs)
ie_inputs = {}
for item in inputs:
astert item[0] in set(net.inputs.keys())
ie_inputs[item[0]] = item[1]
log.info('Loading model to the plugin')
exec_net = plugin.load(network=net)
res = exec_net.infer(inputs=ie_inputs)
astert len(res) == len(outputs)
for name, output in outputs:
astert name in res
actual_output = res[name]
np.testing.astert_allclose(output, actual_output, rtol=1e-3, atol=1e-4)
log.info('{}: OK'.format(name))
log.info('ALL OK')
def compute():
exec_net.infer(inputs=ie_inputs)
return run_onnx_util.run_benchmark(compute, args.iterations)