| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240 |
-
- #include "YoloFeatureExtractor.h"
- #include <fstream>
- #include <algorithm>
- #include <iostream>
- #include <opencv2/core/ocl.hpp>
- YoloFeatureExtractor::YoloFeatureExtractor(const std::string & modelPath, const std::string & classesPath)
- : inputWidth(640), inputHeight(640)
- {
- net = cv::dnn::readNetFromONNX(modelPath);
- loadClassNames(classesPath);
- }
- void YoloFeatureExtractor::initOpenCL()
- {
- if (cv::ocl::haveOpenCL())
- {
- std::cout << "OpenCL is available. Enabling OpenCL for DNN module." << std::endl;
- //net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
- //net.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
- }
- else
- {
- std::cout << "OpenCL is not available. Using default backend and target." << std::endl;
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
- net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
- }
- }
- void YoloFeatureExtractor::loadClassNames(const std::string & file)
- {
- std::ifstream ifs(file);
- std::string line;
- while (std::getline(ifs, line))
- {
- classNames.push_back(line);
- }
- }
- std::vector<float> YoloFeatureExtractor::extractFeatures(const std::string & imagePath)
- {
- auto start_time = std::chrono::high_resolution_clock::now();
- cv::Mat image = cv::imread(imagePath);
- if (image.empty())
- {
- throw std::runtime_error("Could not load image: " + imagePath);
- }
- cv::Mat blob;
- cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false);
- net.setInput(blob);
- auto start_time2 = std::chrono::high_resolution_clock::now();
- std::vector<cv::Mat> outputs;
- net.forward(outputs, net.getUnconnectedOutLayersNames());
- auto start_time3 = std::chrono::high_resolution_clock::now();
- std::vector<float> features;
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- float * data = (float *)outputs[i].data;
- int totalElements = outputs[i].total();
- for (int idx = 0; idx < totalElements; ++idx)
- {
- features.push_back(data[idx]);
- }
- }
-
- auto duration1 = std::chrono::duration_cast<std::chrono::milliseconds>(start_time2 - start_time);
- auto duration2 = std::chrono::duration_cast<std::chrono::milliseconds>(start_time3 - start_time2);
- auto totalDuration = std::chrono::duration_cast<std::chrono::milliseconds>(start_time3 - start_time);
- std::cout << "图片前期处理耗时: " << duration1.count() << " 毫秒" << std::endl;
- std::cout << "模型推理耗时: " << duration2.count() << " 毫秒" << std::endl;
- std::cout << "总耗时: " << totalDuration.count() << " 毫秒" << std::endl;
- return features;
- }
- std::vector<float> YoloFeatureExtractor::extractBackboneFeatures(const std::string & imagePath)
- {
- cv::Mat image = cv::imread(imagePath);
- if (image.empty())
- {
- throw std::runtime_error("Could not load image: " + imagePath);
- }
- cv::Mat blob;
- cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false);
- net.setInput(blob);
- std::vector<cv::String> layerNames = net.getLayerNames();
- std::vector<cv::String> backboneLayers;
- for (const auto & name : layerNames)
- {
- if (name.find("backbone") != std::string::npos ||
- name.find("conv") != std::string::npos ||
- name.find("stage") != std::string::npos)
- {
- backboneLayers.push_back(name);
- }
- }
- if (backboneLayers.empty())
- {
- backboneLayers.push_back(layerNames[layerNames.size() / 2]);
- }
- std::vector<cv::Mat> outputs;
- net.forward(outputs, backboneLayers);
- std::vector<float> features;
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- cv::Mat output = outputs[i];
- features.reserve(features.size() + output.total());
- for (int j = 0; j < output.total(); ++j)
- {
- features.push_back(output.at<float>(j));
- }
- }
- return features;
- }
- std::vector<std::vector<float>> YoloFeatureExtractor::extractROIFeatures(const std::string & imagePath)
- {
- cv::Mat image = cv::imread(imagePath);
- if (image.empty())
- {
- throw std::runtime_error("Could not load image: " + imagePath);
- }
- cv::Mat blob;
- cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false);
- net.setInput(blob);
- std::vector<cv::Mat> outputs;
- net.forward(outputs, net.getUnconnectedOutLayersNames());
- const float CONFIDENCE_THRESHOLD = 0.5;
- const float NMS_THRESHOLD = 0.4;
- std::vector<int> classIds;
- std::vector<float> confidences;
- std::vector<cv::Rect> boxes;
- float x_factor = static_cast<float>(image.cols) / inputWidth;
- float y_factor = static_cast<float>(image.rows) / inputHeight;
- for (size_t outputIdx = 0; outputIdx < outputs.size(); ++outputIdx)
- {
- float * data = (float *)outputs[outputIdx].data;
- int rows = outputs[outputIdx].rows;
- int dimensions = outputs[outputIdx].cols;
- for (int i = 0; i < rows; ++i)
- {
- float objectness = data[4];
- if (objectness >= CONFIDENCE_THRESHOLD)
- {
- std::vector<float> probs;
- for (int c = 5; c < dimensions; ++c)
- {
- probs.push_back(data[c]);
- }
- int maxClassId = 0;
- float maxScore = probs[0];
- for (size_t p = 1; p < probs.size(); ++p)
- {
- if (probs[p] > maxScore)
- {
- maxScore = probs[p];
- maxClassId = static_cast<int>(p);
- }
- }
- if (maxScore > CONFIDENCE_THRESHOLD)
- {
- confidences.push_back(objectness * maxScore);
- classIds.push_back(maxClassId);
- float x = data[0];
- float y = data[1];
- float w = data[2];
- float h = data[3];
- int left = static_cast<int>((x - 0.5 * w) * x_factor);
- int top = static_cast<int>((y - 0.5 * h) * y_factor);
- int width = static_cast<int>(w * x_factor);
- int height = static_cast<int>(h * y_factor);
- boxes.push_back(cv::Rect(left, top, width, height));
- }
- }
- data += dimensions;
- }
- }
- std::vector<int> nms_result;
- cv::dnn::NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, NMS_THRESHOLD, nms_result);
- std::vector<std::vector<float>> roiFeatures;
- for (size_t i = 0; i < nms_result.size(); ++i)
- {
- int idx = nms_result[i];
- cv::Rect box = boxes[idx];
- box.x = std::max(0, std::min(box.x, image.cols - 1));
- box.y = std::max(0, std::min(box.y, image.rows - 1));
- box.width = std::max(0, std::min(box.width, image.cols - box.x));
- box.height = std::max(0, std::min(box.height, image.rows - box.y));
- std::vector<float> roiFeature;
- roiFeature.push_back(static_cast<float>(box.x) / image.cols);
- roiFeature.push_back(static_cast<float>(box.y) / image.rows);
- roiFeature.push_back(static_cast<float>(box.width) / image.cols);
- roiFeature.push_back(static_cast<float>(box.height) / image.rows);
- roiFeature.push_back(confidences[idx]);
- roiFeature.push_back(static_cast<float>(classIds[idx]));
- roiFeatures.push_back(roiFeature);
- }
- return roiFeatures;
- }
|