#!/usr/bin/env python3from __future__ import print_function
import sys
import os
from argparse import ArgumentParser, SUPPRESS
import cv2
import time
import logging as logfrom openvino.inference_engine import IECoredefbuild_argparser():parser = ArgumentParser(add_help=False)args = parser.add_argument_group('Options')args.add_argument('-h','--help', action='help', default=SUPPRESS,help='Show this help message and exit.')args.add_argument("-m","--model",help="Required. Path to an .xml file with a trained model.",default="./model/face-detection-adas-0001.xml",type=str)args.add_argument("-i","--input",help="Required. Path to video file or image. 'cam' for capturing video stream from camera",required=False,type=str)args.add_argument("-l","--cpu_extension",help="Optional. Required for CPU custom layers. Absolute path to a shared library with the ""kernels implementations.",type=str, default=None)args.add_argument("-d","--device",help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is ""acceptable. The demo will look for a suitable plugin for device specified. ""Default value is CPU", default="MYRIAD",type=str)args.add_argument("--labels",help="Optional. Path to labels mapping file", default=None,type=str)args.add_argument("-pt","--prob_threshold",help="Optional. Probability threshold for detections filtering",default=0.5,type=float)args.add_argument("--no_show",help="Optional. Don't show output", action='store_true')return parserdefmain():log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)args = build_argparser().parse_args()log.info("Creating Inference Engine...")ie = IECore()if args.cpu_extension and'CPU'in args.device:ie.add_extension(args.cpu_extension,"CPU")# Read IRlog.info("Loading network")net = ie.read_network(args.model, os.path.splitext(args.model)[0]+".bin")if"CPU"in args.device:supported_layers = ie.query_network(net,"CPU")not_supported_layers =[l for l in net.layers.keys()if l notin supported_layers]iflen(not_supported_layers)!=0:log.error("Following layers are not supported by the plugin for specified device {}:\n {}".format(args.device,', '.join(not_supported_layers)))log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l ""or --cpu_extension command line argument")sys.exit(1)img_info_input_blob =Nonefeed_dict ={}for blob_name in net.inputs:iflen(net.inputs[blob_name].shape)==4:input_blob = blob_nameeliflen(net.inputs[blob_name].shape)==2:img_info_input_blob = blob_nameelse:raise RuntimeError("Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported".format(len(net.inputs[blob_name].shape), blob_name))assertlen(net.outputs)==1,"Demo supports only single output topologies"out_blob =next(iter(net.outputs))log.info("Loading IR to the plugin...")exec_net = ie.load_network(network=net, num_requests=2, device_name=args.device)# Read and pre-process input imagen, c, h, w = net.inputs[input_blob].shapeif img_info_input_blob:feed_dict[img_info_input_blob]=[h, w,1]if args.input=='cam':input_stream =0else:input_stream = args.input#cap = cv2.VideoCapture(input_stream)cap = cv2.VideoCapture(0)assert cap.isOpened(),"Can't open "+ input_streamif args.labels:withopen(args.labels,'r')as f:labels_map =[x.strip()for x in f]else:labels_map =Nonecur_request_id =0next_request_id =1log.info("Starting inference in async mode...")is_async_mode =Truerender_time =0if is_async_mode:ret, frame = cap.read()frame_h, frame_w = frame.shape[:2]print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")print("To switch between sync/async modes, press TAB key in the output window")while cap.isOpened():if is_async_mode:ret, next_frame = cap.read()else:ret, frame = cap.read()if ret:frame_h, frame_w = frame.shape[:2]ifnot ret:break# abandons the last frame in case of async_mode# Main sync point:# in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete# in the regular mode we start the CURRENT request and immediately wait for it's completioninf_start = time.time()if is_async_mode:in_frame = cv2.resize(next_frame,(w, h))in_frame = in_frame.transpose((2,0,1))# Change data layout from HWC to CHWin_frame = in_frame.reshape((n, c, h, w))feed_dict[input_blob]= in_frameexec_net.start_async(request_id=next_request_id, inputs=feed_dict)else:in_frame = cv2.resize(frame,(w, h))in_frame = in_frame.transpose((2,0,1))# Change data layout from HWC to CHWin_frame = in_frame.reshape((n, c, h, w))feed_dict[input_blob]= in_frameexec_net.start_async(request_id=cur_request_id, inputs=feed_dict)if exec_net.requests[cur_request_id].wait(-1)==0:inf_end = time.time()det_time = inf_end - inf_start# Parse detection results of the current requestres = exec_net.requests[cur_request_id].outputs[out_blob]for obj in res[0][0]:# Draw only objects when probability more than specified thresholdif obj[2]> args.prob_threshold:xmin =int(obj[3]* frame_w)ymin =int(obj[4]* frame_h)xmax =int(obj[5]* frame_w)ymax =int(obj[6]* frame_h)class_id =int(obj[1])# Draw box and label\class_id#color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))#color = (min(class_id * 60, 255), min(class_id * 60, 255), min(class_id * 60, 255))color =[(0,0,0),(0,255,0),(255,0,0),(255,255,255),(0,0,0),(203,192,255),(238,130,238),(0,69,255)]#print(color[class_id])#print(obj)cv2.rectangle(frame,(xmin, ymin),(xmax, ymax), color[class_id],2)#print(class_id)det_label = labels_map[class_id]if labels_map elsestr(class_id)cv2.putText(frame,'face'+' '+str(round(obj[2]*100,1))+' %',(xmin, ymin -7),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,255,0),1)# Draw performance statsinf_time_message ="Inference time: N\A for async mode"if is_async_mode else \"Inference time: {:.3f} ms".format(det_time *1000)render_time_message ="OpenCV rendering time: {:.3f} ms".format(render_time *1000)async_mode_message ="Async mode is on. Processing request {}".format(cur_request_id)if is_async_mode else \"Async mode is off. Processing request {}".format(cur_request_id)cv2.putText(frame, inf_time_message,(15,15), cv2.FONT_HERSHEY_COMPLEX,0.5,(200,10,10),1)cv2.putText(frame, render_time_message,(15,30), cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)cv2.putText(frame, async_mode_message,(10,int(frame_h -20)), cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)#render_start = time.time()ifnot args.no_show:cv2.imshow("Detection Results", frame)render_end = time.time()render_time = render_end - render_startif is_async_mode:cur_request_id, next_request_id = next_request_id, cur_request_idframe = next_frameframe_h, frame_w = frame.shape[:2]ifnot args.no_show:key = cv2.waitKey(1)if key ==27:breakif(9== key):is_async_mode =not is_async_modelog.info("Switched to {} mode".format("async"if is_async_mode else"sync"))cv2.destroyAllWindows()if __name__ =='__main__':sys.exit(main()or0)
運行結果:
出現問題:
缺少 openvino 模塊
Traceback (most recent call last): File “./demo/OpenVINO/python/openvino_inference.py”, line 15, in from openvino.inference_engine import IECore ModuleNotFoundError: No module named ‘openvino’
Raspberry Pi 4B安裝 OpenVINO? 工具包: https://blog.csdn.net/qq_44989881/article/details/119792769?spm=1001.2014.3001.5501