Commit 42a5d815 authored by jiajunjie's avatar jiajunjie

improve face tracking modules,insert deeplearning model

parent ea98e345
......@@ -7,7 +7,7 @@ SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR})
# vars
set(OPENCV_INCLUDE /usr/local/include/)
set(OPENCV_LIBPATH /usr/local/lib/)
set(OPENCV_LIBS opencv_highgui opencv_core opencv_imgproc opencv_videoio opencv_tracking opencv_ml opencv_objdetect opencv_imgcodecs)
set(OPENCV_LIBS opencv_highgui opencv_core opencv_imgproc opencv_videoio opencv_tracking opencv_dnn opencv_objdetect opencv_imgcodecs)
# compile flags
......
本目录下存放人脸跟踪接口代码
2019-5-28 优化人脸跟踪接口,加入人脸检测器,增加错误码,简化上层调用逻辑,持续优化中...
2019-5-29 修改计数不清除bug
2019-5-30 人脸跟踪重大升级,加入深度学习人脸检测模型,提到准确率到99%,降低CPU使用率50%
构建方法:
1、进入相应目录下
2、mkdir build #构建build目录
......
......@@ -3,7 +3,7 @@
*
* @brief
*
* 定义戴口罩检测接口文件
* 定义多目标跟踪踪接口文件
*
* @copyright
*
......@@ -21,9 +21,6 @@
#include "tracker.h"
#define IMG_WIDHT_SIZE 128
#define IMG_HEIGHT_SIZE 128
struct tracker_param
{
bool bParamIsOk;
......@@ -43,6 +40,8 @@ struct tracker_param
std::vector<Ptr<Tracker> > algorithms;
dnn::Net net;
tracker_param()
{
bParamIsOk = false;
......@@ -51,7 +50,6 @@ struct tracker_param
detect_time = clock();
algorithms.clear();
}
};
struct muti_tracker_instance
......@@ -115,6 +113,38 @@ muti_tracker_instance * muti_tracker_init(
return instance;
}
muti_tracker_instance * muti_tracker_init_dnn(
const char* pmodel_prototxt,
const char* pmodel_binary
)
{
if( NULL == pmodel_prototxt || NULL == pmodel_binary ) {
cout << "The prototxt or model file can not be empty!" << endl;
return NULL;
}
muti_tracker_instance *instance = new muti_tracker_instance();
if (NULL == instance) {
return NULL;
}
instance->_param = std::make_shared<tracker_param>();
//! [Initialize network]
//Reads a network model stored in Caffe model in memory
instance->_param->net = readNetFromCaffe(pmodel_prototxt, pmodel_binary);
if (instance->_param->net.empty())
{
cout << "Can't load network by using the following files: " << endl;
cout << "prototxt: " << pmodel_prototxt << endl;
cout << "caffemodel: " << pmodel_binary << endl;
delete instance;
return NULL;
}
instance->_param->bParamIsOk = true;
return instance;
}
FACE_ERROR_E muti_tracker_estimate(
muti_tracker_instance *instance,
......@@ -235,6 +265,144 @@ FACE_ERROR_E muti_tracker_estimate(
return FACE_OK;
}
FACE_ERROR_E muti_tracker_estimate_dnn(
muti_tracker_instance *instance,
int rows,
int cols,
unsigned char* imgdata,
const char* trackerType,
const int extend,
const int updatetime,
const float threshold/*,
struct tracker_info* ptracker*/
)
{
if (!instance || !instance->_param->bParamIsOk) {
cout << "param is NULL" << endl;
return FACE_ERR_INVALID_PARAM;
}
if (NULL == imgdata) {
cout << "imgdata is empty" << endl;
return FACE_ERR_INVALID_IMAGE;
}
cv::Mat frame(rows, cols, CV_8UC3, (void *)imgdata);
if (frame.empty())
{
cout << "input image can not be empty!" << endl;
return FACE_ERR_INVALID_IMAGE;
}
clock_t detect_time = instance->_param->detect_time;
clock_t now_time = instance->_param->now_time;
double diffticks = now_time - detect_time;
double diffms = diffticks / (CLOCKS_PER_SEC / 1000);
if(diffms > updatetime )
{
if (frame.channels() == 4)
cvtColor(frame, frame, COLOR_BGRA2BGR);
const double inScaleFactor = 1.0;
const Scalar meanVal(104.0, 117.0, 123.0);
Mat inputBlob = blobFromImage(frame, inScaleFactor,
Size(300, 300), meanVal, false, false);
instance->_param->net.setInput(inputBlob, "data"); //set the network input
cv::Mat detection = instance->_param->net.forward("detection_out");
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
vector<Rect> facesvec;
float confidenceThreshold = threshold;
for (int i = 0; i < detectionMat.rows; i++)
{
float confidence = detectionMat.at<float>(i, 2); //第二列存放可信度
if (confidence > confidenceThreshold)//满足阈值条件
{
//存放人脸所在的图像中的位置信息
int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);
cv::Rect facerect((int)xLeftBottom, (int)yLeftBottom,
(int)(xRightTop - xLeftBottom),
(int)(yRightTop - yLeftBottom));
facesvec.push_back(facerect);
}
}
instance->_param->algorithms.clear();
instance->_param->objects.boxes.clear();
instance->_param->objects.trackerIds.clear();
int face_num = facesvec.size();
if(face_num == 0)
return FACE_ERR_NO_FACE;
for(int i =0; i<face_num; i++ ) {
rectangle(frame, facesvec[i], Scalar(255, 0, 0), 2, 1);
instance->_param->objects.boxes.push_back(facesvec[i]);//push boxes
instance->_param->algorithms.push_back( createTrackerByName(trackerType) );
}
MultiTracker trackers;
instance->_param->mttrackers = trackers;
instance->_param->mttrackers.add(instance->_param->algorithms, frame, instance->_param->objects.boxes);
instance->_param->detect_time = now_time;
}
instance->_param->now_time = clock();
vector<Rect2d> objects_boxes = instance->_param->objects.boxes;
vector<Rect2d> objects_save_boxes = instance->_param->objects_save.boxes;
for(int i=0; i< objects_boxes.size(); i++)
{
bool track_object_flag =false;
int cx = objects_boxes[i].x + objects_boxes[i].width/2;
int cy = objects_boxes[i].y + objects_boxes[i].height/2;
for(int j=0; j< objects_save_boxes.size(); j++)
{
int max_objects_save_x =
(int)(objects_save_boxes[j].x + objects_save_boxes[j].width) +extend;
int max_objects_save_y =
(int)(objects_save_boxes[j].y + objects_save_boxes[j].height) +extend;
int min_objects_save_x =(int)(objects_save_boxes[j].x - extend);
int min_objects_save_y =(int)(objects_save_boxes[j].y - extend);
if( cx > min_objects_save_x && cx < max_objects_save_x
&& cy > min_objects_save_y && cy < max_objects_save_y )
{
track_object_flag = true;
instance->_param->objects.trackerIds.push_back( instance->_param->objects_save.trackerIds[j] );
break;
}
}//end of for j
if( !track_object_flag ) {//add objects
instance->_param->counts++;
instance->_param->objects.trackerIds.push_back( instance->_param->counts );
}
string strIdnum = " ID: "+ Int_String(instance->_param->objects.trackerIds[i]);
putText(frame, strIdnum,cv::Point(instance->_param->objects.boxes[i].x,
instance->_param->objects.boxes[i].y-5),FONT_HERSHEY_PLAIN,2,Scalar(0,0,255),1,8);
}//end of for i
instance->_param->objects_save = instance->_param->objects;
instance->_param->mttrackers.update(frame, instance->_param->objects.boxes);
return FACE_OK;
}
FACE_ERROR_E muti_tracker_destory(
muti_tracker_instance *instance
)
......
......@@ -3,7 +3,7 @@
*
* @brief
*
* 定义多目标跟踪接口
* 定义多目标跟踪接口
*
* @copyright
*
......@@ -15,16 +15,13 @@
#include "common.h"
#include <opencv2/opencv.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/dnn.hpp>
using namespace cv;
using namespace std;
using namespace cv::dnn;
extern "C"
{
......@@ -53,7 +50,21 @@ extern "C"
);
/**
* @brief 执行戴口罩检测判断
* @brief 初始化多目标追踪功能-深度学习
*
* @param pmodel_prototxt 输入人脸检测深度学习模型配置文件
* @param pmodel_binary 输入人脸检测深度学习模型文件
*
* @return muti_tracker_instance 返回窗口句柄指针
*/
muti_tracker_instance* muti_tracker_init_dnn(
const char* pmodel_prototxt,
const char* pmodel_binary
);
/**
* @brief 执行人脸跟踪
*
* @param muti_tracker_instance 输入窗口句柄指针
* @param rows 输入需要检测的图像行
......@@ -78,6 +89,33 @@ extern "C"
struct tracker_info* ptracker*/
);
/**
* @brief 执行人脸跟踪-深度学习
*
* @param muti_tracker_instance 输入窗口句柄指针
* @param rows 输入需要检测的图像行
* @param cols 输入需要检测的图像列
* @param imgdata 输入需要检测的图像数据
* @param trackerType 采用的跟踪方式
* @param extend 对边框增减的大小
* @param updatetime 检测器的校准时间(毫秒级)
* @param threshold 预测置信度(0~1),推荐值0.9
*
* @return FACE_ERROR_E 返回错误码
*/
FACE_ERROR_E muti_tracker_estimate_dnn(
muti_tracker_instance *instance,
int rows,
int cols,
unsigned char* imgdata,
const char* trackerType,
const int extend,
const int updatetime,
const float threshold/*,
struct tracker_info* ptracker*/
);
/**
* @brief 销毁句柄
*
......
No preview for this file type
This diff is collapsed.
from ctypes import *
import ctypes
import cv2
so = ctypes.cdll.LoadLibrary
#library_path="/home/uboo/project/uboo/face_detect_git/c++/tracker/build/"
library_path="/home/uboo/project/uboo/face_detect_git/libx64/"
lib = so(library_path+"libtracker.so")
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
print 'start'
models_path= "/home/uboo/project/uboo/face_detect_git/models/"
instance = lib.muti_tracker_init_dnn(models_path+"res10_300x300_ssd_deploy.prototxt", models_path+"res10_300x300_ssd_iter_140000_fp16.caffemodel")
while cap.isOpened():
ret, img = cap.read()
img = cv2.flip(img, 1)
(rows, cols) = (img.shape[0], img.shape[1])
print (rows, cols)
ret = lib.muti_tracker_estimate_dnn(instance, rows, cols, img.ctypes.data_as(POINTER(c_ubyte)),"KCF",50,100,c_float(0.9))
print 'result: ' +str(ret)
cv2.imshow("FaceDetect", img)
k = cv2.waitKey(1)
if( k== ord('q')):
break
lib.muti_tracker_destory(instance)
print "end"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment