-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.cpp
68 lines (48 loc) · 1.87 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#include <iostream>
#include <vector>
#include <getopt.h>
#include <opencv2/opencv.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/core.hpp>
#include <inference.h>
using namespace std;
using namespace cv;
std::vector<Detection> detectImage(cv::Mat &img,Inference &model);
void drawDetections(cv::Mat &frame,std::vector<Detection> &Detections);
int main(int argc, char **argv)
{
// yolo export model=yolov8n.pt format=onnx opset=12
Inference inf("../model/yolov8n.onnx", cv::Size(640, 640), "", true);
cv::String videoUrl = "../videos/example.mp4";
cv::VideoCapture cap(videoUrl);
cv::Mat frame;
cv::Size showSize(640,480);
bool state;
for(;;){
state = cap.read(frame);
if(!state) break;
std::vector<Detection> detections = detectImage(frame,inf);
drawDetections(frame,detections);
cv::resize(frame,frame,showSize);
cv::imshow("yolo-cpp", frame);
char key = cv::waitKey(40);
if(key == 'q') break;
}
cv::destroyAllWindows();
cap.release();
}
std::vector<Detection> detectImage(cv::Mat &img,Inference &model){
std::vector<Detection> output = model.runInference(img);
return output;
}
void drawDetections(cv::Mat &img,std::vector<Detection> &Detections){
for(Detection detection : Detections){
cv::Rect box = detection.box;
cv::Scalar color = detection.color;
cv::rectangle(img, box, color, 2);
std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
cv::putText(img, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, color, 3, 0);
}
}