add Yunet C++ demo (#138)
Browse files* add cpp demo for yunet
* add a new line before end of file
* new line before eof
- CMakeLists.txt +11 -0
- README.md +19 -0
- demo.cpp +220 -0
CMakeLists.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cmake_minimum_required(VERSION 3.24.0)
|
| 2 |
+
project(opencv_zoo_face_detection_yunet)
|
| 3 |
+
|
| 4 |
+
set(OPENCV_VERSION "4.7.0")
|
| 5 |
+
set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation")
|
| 6 |
+
|
| 7 |
+
# Find OpenCV
|
| 8 |
+
find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH})
|
| 9 |
+
|
| 10 |
+
add_executable(demo demo.cpp)
|
| 11 |
+
target_link_libraries(demo ${OpenCV_LIBS})
|
README.md
CHANGED
|
@@ -20,6 +20,8 @@ Results of accuracy evaluation with [tools/eval](../../tools/eval).
|
|
| 20 |
|
| 21 |
## Demo
|
| 22 |
|
|
|
|
|
|
|
| 23 |
Run the following command to try the demo:
|
| 24 |
|
| 25 |
```shell
|
|
@@ -32,6 +34,23 @@ python demo.py --input /path/to/image
|
|
| 32 |
python demo.py --help
|
| 33 |
```
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
### Example outputs
|
| 36 |
|
| 37 |

|
|
|
|
| 20 |
|
| 21 |
## Demo
|
| 22 |
|
| 23 |
+
### Python
|
| 24 |
+
|
| 25 |
Run the following command to try the demo:
|
| 26 |
|
| 27 |
```shell
|
|
|
|
| 34 |
python demo.py --help
|
| 35 |
```
|
| 36 |
|
| 37 |
+
### C++
|
| 38 |
+
|
| 39 |
+
Install latest OpenCV and CMake >= 3.24.0 to get started with:
|
| 40 |
+
|
| 41 |
+
```shell
|
| 42 |
+
# A typical and default installation path of OpenCV is /usr/local
|
| 43 |
+
cmake -B build -D OPENCV_INSTALLATION_PATH /path/to/opencv/installation .
|
| 44 |
+
cmake --build build
|
| 45 |
+
|
| 46 |
+
# detect on camera input
|
| 47 |
+
./build/demo
|
| 48 |
+
# detect on an image
|
| 49 |
+
./build/demo -i=/path/to/image
|
| 50 |
+
# get help messages
|
| 51 |
+
./build/demo -h
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
### Example outputs
|
| 55 |
|
| 56 |

|
demo.cpp
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "opencv2/opencv.hpp"
|
| 2 |
+
|
| 3 |
+
#include <map>
|
| 4 |
+
#include <vector>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <iostream>
|
| 7 |
+
|
| 8 |
+
const std::map<std::string, int> str2backend{
|
| 9 |
+
{"opencv", cv::dnn::DNN_BACKEND_OPENCV}, {"cuda", cv::dnn::DNN_BACKEND_CUDA},
|
| 10 |
+
{"timvx", cv::dnn::DNN_BACKEND_TIMVX}, {"cann", cv::dnn::DNN_BACKEND_CANN}
|
| 11 |
+
};
|
| 12 |
+
const std::map<std::string, int> str2target{
|
| 13 |
+
{"cpu", cv::dnn::DNN_TARGET_CPU}, {"cuda", cv::dnn::DNN_TARGET_CUDA},
|
| 14 |
+
{"npu", cv::dnn::DNN_TARGET_NPU}, {"cuda_fp16", cv::dnn::DNN_TARGET_CUDA_FP16}
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
class YuNet
|
| 18 |
+
{
|
| 19 |
+
public:
|
| 20 |
+
YuNet(const std::string& model_path,
|
| 21 |
+
const cv::Size& input_size = cv::Size(320, 320),
|
| 22 |
+
float conf_threshold = 0.6f,
|
| 23 |
+
float nms_threshold = 0.3f,
|
| 24 |
+
int top_k = 5000,
|
| 25 |
+
int backend_id = 0,
|
| 26 |
+
int target_id = 0)
|
| 27 |
+
: model_path_(model_path), input_size_(input_size),
|
| 28 |
+
conf_threshold_(conf_threshold), nms_threshold_(nms_threshold),
|
| 29 |
+
top_k_(top_k), backend_id_(backend_id), target_id_(target_id)
|
| 30 |
+
{
|
| 31 |
+
model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
void setBackendAndTarget(int backend_id, int target_id)
|
| 35 |
+
{
|
| 36 |
+
backend_id_ = backend_id;
|
| 37 |
+
target_id_ = target_id;
|
| 38 |
+
model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
/* Overwrite the input size when creating the model. Size format: [Width, Height].
|
| 42 |
+
*/
|
| 43 |
+
void setInputSize(const cv::Size& input_size)
|
| 44 |
+
{
|
| 45 |
+
input_size_ = input_size;
|
| 46 |
+
model->setInputSize(input_size_);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
cv::Mat infer(const cv::Mat image)
|
| 50 |
+
{
|
| 51 |
+
cv::Mat res;
|
| 52 |
+
model->detect(image, res);
|
| 53 |
+
return res;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
private:
|
| 57 |
+
cv::Ptr<cv::FaceDetectorYN> model;
|
| 58 |
+
|
| 59 |
+
std::string model_path_;
|
| 60 |
+
cv::Size input_size_;
|
| 61 |
+
float conf_threshold_;
|
| 62 |
+
float nms_threshold_;
|
| 63 |
+
int top_k_;
|
| 64 |
+
int backend_id_;
|
| 65 |
+
int target_id_;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
cv::Mat visualize(const cv::Mat& image, const cv::Mat& faces, float fps = -1.f)
|
| 69 |
+
{
|
| 70 |
+
static cv::Scalar box_color{0, 255, 0};
|
| 71 |
+
static std::vector<cv::Scalar> landmark_color{
|
| 72 |
+
cv::Scalar(255, 0, 0), // right eye
|
| 73 |
+
cv::Scalar( 0, 0, 255), // left eye
|
| 74 |
+
cv::Scalar( 0, 255, 0), // nose tip
|
| 75 |
+
cv::Scalar(255, 0, 255), // right mouth corner
|
| 76 |
+
cv::Scalar( 0, 255, 255) // left mouth corner
|
| 77 |
+
};
|
| 78 |
+
static cv::Scalar text_color{0, 255, 0};
|
| 79 |
+
|
| 80 |
+
auto output_image = image.clone();
|
| 81 |
+
|
| 82 |
+
if (fps >= 0)
|
| 83 |
+
{
|
| 84 |
+
cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
for (int i = 0; i < faces.rows; ++i)
|
| 88 |
+
{
|
| 89 |
+
// Draw bounding boxes
|
| 90 |
+
int x1 = static_cast<int>(faces.at<float>(i, 0));
|
| 91 |
+
int y1 = static_cast<int>(faces.at<float>(i, 1));
|
| 92 |
+
int w = static_cast<int>(faces.at<float>(i, 2));
|
| 93 |
+
int h = static_cast<int>(faces.at<float>(i, 3));
|
| 94 |
+
cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2);
|
| 95 |
+
|
| 96 |
+
// Confidence as text
|
| 97 |
+
float conf = faces.at<float>(i, 14);
|
| 98 |
+
cv::putText(output_image, cv::format("%.4f", conf), cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.5, text_color);
|
| 99 |
+
|
| 100 |
+
// Draw landmarks
|
| 101 |
+
for (int j = 0; j < landmark_color.size(); ++j)
|
| 102 |
+
{
|
| 103 |
+
int x = static_cast<int>(faces.at<float>(i, 2*j+4)), y = static_cast<int>(faces.at<float>(i, 2*j+5));
|
| 104 |
+
cv::circle(output_image, cv::Point(x, y), 2, landmark_color[j], 2);
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
return output_image;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
int main(int argc, char** argv)
|
| 111 |
+
{
|
| 112 |
+
cv::CommandLineParser parser(argc, argv,
|
| 113 |
+
"{help h | | Print this message}"
|
| 114 |
+
"{input i | | Set input to a certain image, omit if using camera}"
|
| 115 |
+
"{model m | face_detection_yunet_2022mar.onnx | Set path to the model}"
|
| 116 |
+
"{backend b | opencv | Set DNN backend}"
|
| 117 |
+
"{target t | cpu | Set DNN target}"
|
| 118 |
+
"{save s | false | Whether to save result image or not}"
|
| 119 |
+
"{vis v | false | Whether to visualize result image or not}"
|
| 120 |
+
/* model params below*/
|
| 121 |
+
"{conf_threshold | 0.9 | Set the minimum confidence for the model to identify a face. Filter out faces of conf < conf_threshold}"
|
| 122 |
+
"{nms_threshold | 0.3 | Set the threshold to suppress overlapped boxes. Suppress boxes if IoU(box1, box2) >= nms_threshold, the one of higher score is kept.}"
|
| 123 |
+
"{top_k | 5000 | Keep top_k bounding boxes before NMS. Set a lower value may help speed up postprocessing.}"
|
| 124 |
+
);
|
| 125 |
+
if (parser.has("help"))
|
| 126 |
+
{
|
| 127 |
+
parser.printMessage();
|
| 128 |
+
return 0;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
std::string input_path = parser.get<std::string>("input");
|
| 132 |
+
std::string model_path = parser.get<std::string>("model");
|
| 133 |
+
std::string backend = parser.get<std::string>("backend");
|
| 134 |
+
std::string target = parser.get<std::string>("target");
|
| 135 |
+
bool save_flag = parser.get<bool>("save");
|
| 136 |
+
bool vis_flag = parser.get<bool>("vis");
|
| 137 |
+
|
| 138 |
+
// model params
|
| 139 |
+
float conf_threshold = parser.get<float>("conf_threshold");
|
| 140 |
+
float nms_threshold = parser.get<float>("nms_threshold");
|
| 141 |
+
int top_k = parser.get<int>("top_k");
|
| 142 |
+
const int backend_id = str2backend.at(backend);
|
| 143 |
+
const int target_id = str2target.at(target);
|
| 144 |
+
|
| 145 |
+
// Instantiate YuNet
|
| 146 |
+
YuNet model(model_path, cv::Size(320, 320), conf_threshold, nms_threshold, top_k, backend_id, target_id);
|
| 147 |
+
|
| 148 |
+
// If input is an image
|
| 149 |
+
if (!input_path.empty())
|
| 150 |
+
{
|
| 151 |
+
auto image = cv::imread(input_path);
|
| 152 |
+
|
| 153 |
+
// Inference
|
| 154 |
+
model.setInputSize(image.size());
|
| 155 |
+
auto faces = model.infer(image);
|
| 156 |
+
|
| 157 |
+
// Print faces
|
| 158 |
+
std::cout << cv::format("%d faces detected:\n", faces.rows);
|
| 159 |
+
for (int i = 0; i < faces.rows; ++i)
|
| 160 |
+
{
|
| 161 |
+
int x1 = static_cast<int>(faces.at<float>(i, 0));
|
| 162 |
+
int y1 = static_cast<int>(faces.at<float>(i, 1));
|
| 163 |
+
int w = static_cast<int>(faces.at<float>(i, 2));
|
| 164 |
+
int h = static_cast<int>(faces.at<float>(i, 3));
|
| 165 |
+
float conf = faces.at<float>(i, 14);
|
| 166 |
+
std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f\n", i, x1, y1, w, h, conf);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
// Draw reults on the input image
|
| 170 |
+
if (save_flag || vis_flag)
|
| 171 |
+
{
|
| 172 |
+
auto res_image = visualize(image, faces);
|
| 173 |
+
if (save_flag)
|
| 174 |
+
{
|
| 175 |
+
std::cout << "Results are saved to result.jpg\n";
|
| 176 |
+
cv::imwrite("result.jpg", res_image);
|
| 177 |
+
}
|
| 178 |
+
if (vis_flag)
|
| 179 |
+
{
|
| 180 |
+
cv::namedWindow(input_path, cv::WINDOW_AUTOSIZE);
|
| 181 |
+
cv::imshow(input_path, res_image);
|
| 182 |
+
cv::waitKey(0);
|
| 183 |
+
}
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
else // Call default camera
|
| 187 |
+
{
|
| 188 |
+
int device_id = 0;
|
| 189 |
+
auto cap = cv::VideoCapture(device_id);
|
| 190 |
+
int w = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
|
| 191 |
+
int h = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
|
| 192 |
+
model.setInputSize(cv::Size(w, h));
|
| 193 |
+
|
| 194 |
+
auto tick_meter = cv::TickMeter();
|
| 195 |
+
cv::Mat frame;
|
| 196 |
+
while (cv::waitKey(1) < 0)
|
| 197 |
+
{
|
| 198 |
+
bool has_frame = cap.read(frame);
|
| 199 |
+
if (!has_frame)
|
| 200 |
+
{
|
| 201 |
+
std::cout << "No frames grabbed! Exiting ...\n";
|
| 202 |
+
break;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
// Inference
|
| 206 |
+
tick_meter.start();
|
| 207 |
+
cv::Mat faces = model.infer(frame);
|
| 208 |
+
tick_meter.stop();
|
| 209 |
+
|
| 210 |
+
// Draw results on the input image
|
| 211 |
+
auto res_image = visualize(frame, faces, (float)tick_meter.getFPS());
|
| 212 |
+
// Visualize in a new window
|
| 213 |
+
cv::imshow("YuNet Demo", res_image);
|
| 214 |
+
|
| 215 |
+
tick_meter.reset();
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
return 0;
|
| 220 |
+
}
|