Commit 9144fcd1 authored by jack's avatar jack

add for hat model

parent 4bef3bcb
第一:环境
#运行该项目,需要安装python版本opencv4.4.0以上版本;
如需卸载sudo pip3 uninstall opencv-python
安装最新版本:sudo pip3 install opencv-python
查看版本>>python3
>>>import cv2
>>>cv2.__version__
>>>quit()
第二:运行
python3 test_darknet.py --image 6.jpg
python3 test_darknet.py --video 0904.mp4
yolo.getdetresults(frame,False)//接口设置False为传递结果参数,设置True为测试
rtsp://admin:admin12345@192.168.77.19:554/h264/ch1/main/av_stream
import cv2
import numpy as np
import os.path
class DarknetYolo:
def __init__(self,configPath, weightPath, metaPath, thresh):
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `" + os.path.abspath(configPath) + "`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `" + os.path.abspath(weightPath) + "`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `" + os.path.abspath(metaPath) + "`")
#if netMain is None:
netMain = cv2.dnn.readNetFromDarknet(configPath, weightPath)
netMain.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
netMain.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
#if metaMain is None:
metaMain = open(metaPath).read().strip().split("\n")
np.random.seed(666)
COLORS = np.random.randint(0, 255, size=(len(metaMain), 3), dtype="uint8")
self.netMain=netMain
self.metaMain=metaMain
self.thresh=thresh
self.COLORS=COLORS
def getdetresults(self, frame, hasdraw):
netMain, metaMain, thresh, COLORS = self.netMain, self.metaMain, self.thresh, self.COLORS
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1/255, (416, 416), swapRB=True, crop=False)
# Sets the input to the network
netMain.setInput(blob)
# Runs the forward pass to get output of the output layers
layer = netMain.getUnconnectedOutLayersNames()
layerOutputs = netMain.forward(layer)
(H, W) = frame.shape[:2]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores.
# Assign the box's class label as the class with the highest score.
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > thresh:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
idxs = cv2.dnn.NMSBoxes(boxes, confidences, thresh, 0.3)
if len(idxs) > 0:
if not hasdraw:
return idxs, boxes, confidences, classIDs, metaMain;
for i in idxs.flatten():
# get x,y,w,h
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw rectangle
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 1, lineType=cv2.LINE_AA)
text = "{}: {:.4f}".format(metaMain[classIDs[i]], confidences[i])
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, color, 1, lineType=cv2.LINE_AA)
cv2.imshow("Tag", frame)
#cv2.waitKey(0)
else:
print("Cann't detect objects,please check image!")
This diff is collapsed.
import cv2 as cv
import os.path
import darknet as interface
import argparse
configPath='hat-obj-test.cfg'
weightPath='hat-obj_last.weights'
metaPath='hat.names'
thresh =0.9
yolo=interface.DarknetYolo(configPath, weightPath, metaPath, thresh)
parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
parser.add_argument('--image', help='Path to image file.')
parser.add_argument('--video', help='Path to video file.')
parser.add_argument('--rtsp', help='Path to video file.')
args = parser.parse_args()
#outputFile = "yolo_out_py.avi"
if (args.image):
# Open the image file
if not os.path.isfile(args.image):
print("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.image)
#outputFile = args.image[:-4]+'_yolo_out_py.jpg'
elif (args.video):
# Open the video file
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.video)
#outputFile = args.video[:-4]+'_yolo_out_py.avi'
elif (args.rtsp):
# Open the video file
if not os.path.isfile(args.rtsp):
print("Input rtsp file ", args.rtsp, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.rtsp)
#outputFile = args.video[:-4]+'_yolo_out_py.avi'
else:
# Webcam input
cap = cv.VideoCapture(0)
while cv.waitKey(1) < 0:
# get frame from the video
hasFrame, frame = cap.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
#print("Output file is stored as ", outputFile)
cv.waitKey(0)
# Release device
cap.release()
break
idxs, boxes, confidences, classIDs, metaMain = yolo.getdetresults(frame,False)
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255), 1, lineType=cv.LINE_AA)
text = "{}: {:.4f}".format(metaMain[classIDs[i]], confidences[i])
cv.putText(frame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 1, lineType=cv.LINE_AA)
cv.imshow("Tag", frame)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment