Commit 16e097d0 authored by jiajunjie's avatar jiajunjie

improve the face mutitracking code

parent 7634806d
......@@ -31,5 +31,9 @@ typedef enum {
///< 内存不足
FACE_ERR_NO_FREE_MEMORY,
///< 输入图像错误
FACE_ERR_INVALID_IMAGE
FACE_ERR_INVALID_IMAGE,
///< 没有检测到人脸
FACE_ERR_NO_FACE,
///< 接口程序内部错误
FACE_ERR_INTERNAL_ERROR
}FACE_ERROR_E;
This diff is collapsed.
......@@ -15,19 +15,26 @@
#include "common.h"
#include <opencv2/opencv.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
using namespace cv;
using namespace std;
extern "C"
{
struct tracker_info
{
vector<Rect2d> objects;
vector<Rect2d> boxes;
vector<int> trackerIds;
tracker_info()
{
objects.clear();
tracker_info() {
boxes.clear();
trackerIds.clear();
}
};
......@@ -36,15 +43,14 @@ extern "C"
/**
* @brief 初始化多目标追踪功能
*
* @param objects 跟踪对象目标
* @param trackerType 采用的跟踪方式
* @param pmodel_cascade 输入人脸检测模型文件
*
* @return muti_tracker_instance 返回窗口句柄指针
*/
muti_tracker_instance* muti_tracker_init( vector<Rect2d> objects,
/*std::string trackerType = "KCF",*/
int rows,int cols, unsigned char* imgdata);
muti_tracker_instance* muti_tracker_init(
const char* pmodel_cascade
);
/**
* @brief 执行戴口罩检测判断
......@@ -52,20 +58,24 @@ extern "C"
* @param muti_tracker_instance 输入窗口句柄指针
* @param rows 输入需要检测的图像行
* @param cols 输入需要检测的图像列
* @param imgdata 输入需要检测的图像数据
* @param rate 比对时边框增减的大小
* @param imgdata 输入需要检测的图像数据
* @param trackerType 采用的跟踪方式
* @param extend 对边框增减的大小
* @param updatetime 检测器的校准时间(毫秒级)
* @param ptracker 获取跟踪的人脸框和ID号
*
* @return int 返回结果
* @return FACE_ERROR_E 返回错误码
*/
int muti_tracker_estimate(
FACE_ERROR_E muti_tracker_estimate(
muti_tracker_instance *instance,
int rows,
int cols,
unsigned char* imgdata,
int rate,
struct tracker_info* ptracker
const char* trackerType,
const int extend,
const int updatetime/*,
struct tracker_info* ptracker*/
);
/**
......@@ -73,10 +83,10 @@ extern "C"
*
* @param instance 需要销毁的句柄
*
* @return bool 返回True表示成功
* @return FACE_ERROR_E 返回错误码
*/
bool muti_tracker_destory(
FACE_ERROR_E muti_tracker_destory(
muti_tracker_instance *instance
);
}
......
No preview for this file type
......@@ -8,13 +8,14 @@ top_kp = -4
offset_dead_block = 0.1
so = ctypes.cdll.LoadLibrary
pwm = so("./libPCA9685.so")
so = ctypes.cdll.LoadLibrary
library_path="/home/uboo/project/uboo/face_detect_git/libx64/"
pwm = so(library_path+"libPCA9685.so")
# Set frequency to 50hz, good for servos.
pwm.setPWMFreq(50)
FaceCascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt2.xml')
model_path="/home/uboo/project/uboo/face_detect_git/models/"
FaceCascade = cv2.CascadeClassifier(model_path+"haarcascade_frontalface_alt2.xml")
cv2.namedWindow('FaceDetect',flags=cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(0)
......
import time
from ctypes import *
import ctypes
import cv2
last_btm_degree = 100 # 最近一次底部舵机的角度值记录
last_top_degree = 100 # 最近一次顶部舵机的角度值记录
btm_kp = 5 # 底部舵机的Kp系数
top_kp = 5 # 顶部舵机的Kp系数
offset_dead_block = 0.1 # 设置偏移量的死区
so = ctypes.cdll.LoadLibrary
pwm = so("./libPCA9685.so")
# Set frequency to 50hz, good for servos.
pwm.setPWMFreq(50)
# 载入人脸检测的Cascade模型
FaceCascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
# 创建一个窗口 名字叫做Face
cv2.namedWindow('FaceDetect',flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
# 创建一个VideoCapture
cap = cv2.VideoCapture(0)
# 设置缓存区的大小 !!!
cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
print('IP摄像头是否开启: {}'.format(cap.isOpened()))
def btm_servo_control(offset_x):
'''
底部舵机的比例控制
这里舵机使用开环控制
'''
global offset_dead_block # 偏移量死区大小
global btm_kp # 控制舵机旋转的比例系数
global last_btm_degree # 上一次底部舵机的角度
# 设置最小阈值
if abs(offset_x) < offset_dead_block:
offset_x = 0
# offset范围在-50到50左右
delta_degree = offset_x * btm_kp
# 计算得到新的底部舵机角度
next_btm_degree = last_btm_degree + delta_degree
# 添加边界检测
if next_btm_degree < 0:
next_btm_degree = 0
elif next_btm_degree > 180:
next_btm_degree = 180
return int(next_btm_degree)
def top_servo_control(offset_y):
'''
顶部舵机的比例控制
这里舵机使用开环控制
'''
global offset_dead_block
global top_kp # 控制舵机旋转的比例系数
global last_top_degree # 上一次顶部舵机的角度
# 如果偏移量小于阈值就不相应
if abs(offset_y) < offset_dead_block:
offset_y = 0
# offset_y *= -1
# offset范围在-50到50左右
delta_degree = offset_y * top_kp
# 新的顶部舵机角度
next_top_degree = last_top_degree + delta_degree
# 添加边界检测
if next_top_degree < 0:
next_top_degree = 0
elif next_top_degree > 180:
next_top_degree = 180
return int(next_top_degree)
def face_filter(faces):
'''
对人脸进行一个过滤
'''
if len(faces) == 0:
return None
# 目前找的是画面中面积最大的人脸
max_face = max(faces, key=lambda face: face[2]*face[3])
(x, y, w, h) = max_face
if w < 10 or h < 10:
return None
return max_face
def calculate_offset(img_width, img_height, face):
'''
计算人脸在画面中的偏移量
偏移量的取值范围: [-1, 1]
'''
(x, y, w, h) = face
face_x = float(x + w/2.0)
face_y = float(y + h/2.0)
# 人脸在画面中心X轴上的偏移量
offset_x = float(face_x / img_width - 0.5) * 2
# 人脸在画面中心Y轴上的偏移量
offset_y = float(face_y / img_height - 0.5) * 2
return (offset_x, offset_y)
def update_btm_kp(value):
# 更新底部舵机的比例系数
global btm_kp
btm_kp = value
def update_top_kp(value):
# 更新顶部的比例系数
global top_kp
top_kp = value
def set_servo_angle(channel, angle):
date=4096*((angle*11)+500)/20000
pwm.setPWM(channel, int(date))
def get_servo_angle(channel):
date=pwm.getPWM(channel+1)
angle=(20000*date - 500*11)/(4096*11) -45
print(str(channel) +':'+ str(int(angle)))
return angle
# 舵机角度初始化
last_btm_degree=get_servo_angle(1)
last_top_degree=get_servo_angle(2)
while cap.isOpened():
# TODO 阅读最后一帧
ret, img = cap.read()
# 手机画面水平翻转
img = cv2.flip(img, 1)
# 将彩色图片转换为灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 检测画面中的人脸
faces = FaceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5
)
# 人脸过滤
face = face_filter(faces)
if face is not None:
# 当前画面有人脸
(x, y, w, h) = face
# 在原彩图上绘制矩形
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 4)
img_height, img_width,_ = img.shape
print("img h:{} w:{}".format(img_height, img_width))
# 计算x轴与y轴的偏移量
(offset_x, offset_y) = calculate_offset(img_width, img_height, face)
# 计算下一步舵机要转的角度
next_btm_degree = btm_servo_control(offset_x)
next_top_degree = top_servo_control(offset_y)
# 舵机转动
#set_cloud_platform_degree(next_btm_degree, next_top_degree)
set_servo_angle(1, next_btm_degree)
set_servo_angle(2, next_top_degree)
# 更新角度值
last_btm_degree = next_btm_degree
last_top_degree = next_top_degree
print("X轴偏移量:{} Y轴偏移量:{}".format(offset_x, offset_y))
print('底部角度: {} 顶部角度:{}'.format(next_btm_degree, next_top_degree))
# 在窗口Face上面展示图片img
cv2.imshow('FaceDetect', img)
# 等待键盘事件
key = cv2.waitKey(1)
if key == ord('q'):
# 退出程序
break
elif key == ord('r'):
print('舵机重置')
# 重置舵机
# 最近一次底部舵机的角度值记录
last_btm_degree = 100
# 最近一次顶部舵机的角度值记录
last_top_degree = 100
# 舵机角度初始化
#set_cloud_platform_degree(last_btm_degree, last_top_degree)
set_servo_angle(1, last_btm_degree)
set_servo_angle(2, last_top_degree)
# 释放VideoCapture
cap.release()
# 关闭所有的窗口
cv2.destroyAllWindows()
import numpy as np
import cv2
import sys
import datetime
class item:
def __init__(self):
self.trackid = 0;
self.age = 0;
self.gender = 0;
self.centerx = 0;
self.centery =0;
self.rectx = 0
self.recty = 0
self.rectw = 0
self.recth = 0
self.p1 = []
self.p2 = []
def CatVideo():
cv2.namedWindow("CaptureFace")
cap=cv2.VideoCapture(0)
classfier=cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
mtTracker = cv2.MultiTracker_create()
isdetected = False
face_object_flag = False
facelist = []
facelist_save =[]
idCount = 0;
recordtime = datetime.datetime.now()
while cap.isOpened():
ok,frame=cap.read()
if not ok:
print('Failed to read video')
break
curtime = datetime.datetime.now()
if ( (curtime - recordtime).seconds >= 2):
isdetected = False
print("update date box")
if not isdetected:
grey=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faceRects = classfier.detectMultiScale(grey, scaleFactor = 1.2, minNeighbors = 3, minSize = (32, 32))
facelist = []
if len(faceRects) > 0:
for faceRect in faceRects:
x, y, w, h = faceRect
#cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), (0,255,0), 3)
mtTracker = cv2.MultiTracker_create()
# Define an initial bounding box
bbox = (x, w, y, h)
ok = mtTracker.add(cv2.TrackerKCF_create(), frame, bbox)
isdetected = True
recordtime = curtime
cx =int( x + w/2)
cy =int( y + h/2)
p1 = (int(x), int(y) )#mini point
p2 = (int(x + w), int(y + h))
data= item()
data.centerx = cx
data.centery = cy
data.rectx = x
data.recty = y
data.recth = h
data.rectw = w
data.p1 = p1
data.p2 = p2
#print("test : "+str(cx) )
facelist.append(data)
print len(facelist)
for i in range( len(facelist) ):
face_object_flag = False
cv2.rectangle(frame, p1, p2, (200,0,0))
#cv2.rectangle(frame, (facelist[i].rectx, facelist[i].recty), (x + w + 10, y + h + 10), (0,255,0), 3)
for j in range( len(facelist_save) ):
rate=50
max_centerx= facelist_save[j].p2.x + rate
max_centery= facelist_save[j].p2.y + rate
min_centerx= facelist_save[j].p1.x - rate
min_centery= facelist_save[j].p1.y - rate
if(facelist[i].centerx > min_centerx and facelist[i].centerx < max_centerx)
or
(facelist[i].centery > min_centery and facelist[i].centery < max_centery ):
face_object_flag =True
facedata = item()
facedata.trackid = facelist_save[j].trackid
facelist.append(facedata)
break
#print ( i + 1, facelist_save[i].centerx )
if( !face_object_flag ):
idCount++
data = item()
data.trackid = idCount
facelist.append(data)
cv2.putText(frame, "ID : " + str(facelist[i].trackid), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2);
facelist_save = facelist
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, boxes = mtTracker.update(frame)
print ok, boxes
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
for newbox in boxes:
p1 = (int(newbox[0]), int(newbox[1]))#mini point
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
#cv2.rectangle(frame, p1, p2, (200,0,0))
cx =int( newbox[0] + newbox[2]/2)
cy =int( newbox[1] + newbox[3]/2)
data= item()
data.centerx = cx
data.centery = cy
data.rectx = newbox[0]
data.recty = newbox[1]
data.recth = newbox[2]
data.rectw = newbox[3]
data.p1 = p1
data.p2 = p2
facelist.append(data)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
cv2.imshow("CaptureFace",frame)
if cv2.waitKey(10)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
CatVideo()
......@@ -2,14 +2,15 @@ from ctypes import *
import ctypes
import cv2
so = ctypes.cdll.LoadLibrary
lib = so("/home/uboo/project/uboo/face_detect/libx64/libbeauty.so")
so = ctypes.cdll.LoadLibrary
Library_path="/home/uboo/project/uboo/face_detect/libx64/"
lib = so(Library_path+"libbeauty.so")
src=cv2.imread('/home/uboo/project/uboo/face_detect/image/image_0001.png')
cv2.imshow("src", src)
print 'start'
instance = lib.face_beauty_init("/home/uboo/project/uboo/face_detect/models/haarcascade_frontalface_alt.xml",50)
print 'start'
models_path= "/home/uboo/project/uboo/face_detect/models/"
instance = lib.face_beauty_init(models_path+"haarcascade_frontalface_alt.xml",50)
(rows, cols) = (src.shape[0], src.shape[1])
print (rows, cols)
......
from ctypes import *
import ctypes
import cv2
so = ctypes.cdll.LoadLibrary
Library_path="/home/uboo/project/uboo/face_detect_git/libx64/"
lib = so(Library_path+"libtracker.so")
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
print 'start'
models_path= "/home/uboo/project/uboo/face_detect_git/models/"
instance = lib.muti_tracker_init(models_path+"haarcascade_frontalface_alt.xml")
while cap.isOpened():
ret, img = cap.read()
img = cv2.flip(img, 1)
(rows, cols) = (img.shape[0], img.shape[1])
print (rows, cols)
ret = lib.muti_tracker_estimate(instance, rows, cols, img.ctypes.data_as(POINTER(c_ubyte)),"KCF",50,100)
print 'result: ' +str(ret)
cv2.imshow("FaceDetect", img)
k = cv2.waitKey(1)
if( k== ord('q')):
break
lib.muti_tracker_destory(instance)
print "end"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment