-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.cpp
41 lines (32 loc) · 1.42 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#include "engine.hpp"
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
#include <string>
// Basically everything has been stolen from https://github.com/FourierMourier/yolov8-onnx-cpp
// I've just reformatted and simplified everything for this project
// You should be able to run Infer in a loop at max speed, which was as issue in the original project
// Additionally, I removed all but Detection tasks, as this was whats important to me :)
int main()
{
std::wstring modelPath = L"best.onnx";
const char* logid = "yolo_inference";
const char* provider = "CPU"; // or "CUDA"
YoloInferencer inferencer(modelPath, logid, provider);
std::string imagePath = "test.jpg"; // Replace with your image path
cv::Mat image = cv::imread(imagePath);
if (image.empty()) {
std::cerr << "Error: Unable to load image!" << std::endl;
return -1;
}
std::vector<Detection> detections = inferencer.infer(image, 0.1, 0.5);
for (const auto& detection : detections) {
cv::rectangle(image, detection.box, cv::Scalar(0, 255, 0), 2);
std::cout << "Detection: Class=" << detection.class_id << ", Confidence=" << detection.confidence
<< ", x=" << detection.box.x << ", y=" << detection.box.y
<< ", width=" << detection.box.width << ", height=" << detection.box.height << std::endl;
}
cv::imshow("output", image);
cv::waitKey(0);
return 0;
}