#include "YoloFeatureExtractor.h" #include #include #include #include YoloFeatureExtractor::YoloFeatureExtractor(const std::string & modelPath, const std::string & classesPath) : inputWidth(640), inputHeight(640) { net = cv::dnn::readNetFromONNX(modelPath); loadClassNames(classesPath); } void YoloFeatureExtractor::initOpenCL() { if (cv::ocl::haveOpenCL()) { std::cout << "OpenCL is available. Enabling OpenCL for DNN module." << std::endl; //net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT); //net.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL); } else { std::cout << "OpenCL is not available. Using default backend and target." << std::endl; net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT); net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); } } void YoloFeatureExtractor::loadClassNames(const std::string & file) { std::ifstream ifs(file); std::string line; while (std::getline(ifs, line)) { classNames.push_back(line); } } std::vector YoloFeatureExtractor::extractFeatures(const std::string & imagePath) { auto start_time = std::chrono::high_resolution_clock::now(); cv::Mat image = cv::imread(imagePath); if (image.empty()) { throw std::runtime_error("Could not load image: " + imagePath); } cv::Mat blob; cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false); net.setInput(blob); auto start_time2 = std::chrono::high_resolution_clock::now(); std::vector outputs; net.forward(outputs, net.getUnconnectedOutLayersNames()); auto start_time3 = std::chrono::high_resolution_clock::now(); std::vector features; for (size_t i = 0; i < outputs.size(); ++i) { float * data = (float *)outputs[i].data; int totalElements = outputs[i].total(); for (int idx = 0; idx < totalElements; ++idx) { features.push_back(data[idx]); } } auto duration1 = std::chrono::duration_cast(start_time2 - start_time); auto duration2 = std::chrono::duration_cast(start_time3 - start_time2); auto totalDuration = std::chrono::duration_cast(start_time3 - start_time); std::cout << "图片前期处理耗时: " << duration1.count() << " 毫秒" << std::endl; std::cout << "模型推理耗时: " << duration2.count() << " 毫秒" << std::endl; std::cout << "总耗时: " << totalDuration.count() << " 毫秒" << std::endl; return features; } std::vector YoloFeatureExtractor::extractBackboneFeatures(const std::string & imagePath) { cv::Mat image = cv::imread(imagePath); if (image.empty()) { throw std::runtime_error("Could not load image: " + imagePath); } cv::Mat blob; cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false); net.setInput(blob); std::vector layerNames = net.getLayerNames(); std::vector backboneLayers; for (const auto & name : layerNames) { if (name.find("backbone") != std::string::npos || name.find("conv") != std::string::npos || name.find("stage") != std::string::npos) { backboneLayers.push_back(name); } } if (backboneLayers.empty()) { backboneLayers.push_back(layerNames[layerNames.size() / 2]); } std::vector outputs; net.forward(outputs, backboneLayers); std::vector features; for (size_t i = 0; i < outputs.size(); ++i) { cv::Mat output = outputs[i]; features.reserve(features.size() + output.total()); for (int j = 0; j < output.total(); ++j) { features.push_back(output.at(j)); } } return features; } std::vector> YoloFeatureExtractor::extractROIFeatures(const std::string & imagePath) { cv::Mat image = cv::imread(imagePath); if (image.empty()) { throw std::runtime_error("Could not load image: " + imagePath); } cv::Mat blob; cv::dnn::blobFromImage(image, blob, 1.0 / 255.0, cv::Size(inputWidth, inputHeight), cv::Scalar(0, 0, 0), true, false); net.setInput(blob); std::vector outputs; net.forward(outputs, net.getUnconnectedOutLayersNames()); const float CONFIDENCE_THRESHOLD = 0.5; const float NMS_THRESHOLD = 0.4; std::vector classIds; std::vector confidences; std::vector boxes; float x_factor = static_cast(image.cols) / inputWidth; float y_factor = static_cast(image.rows) / inputHeight; for (size_t outputIdx = 0; outputIdx < outputs.size(); ++outputIdx) { float * data = (float *)outputs[outputIdx].data; int rows = outputs[outputIdx].rows; int dimensions = outputs[outputIdx].cols; for (int i = 0; i < rows; ++i) { float objectness = data[4]; if (objectness >= CONFIDENCE_THRESHOLD) { std::vector probs; for (int c = 5; c < dimensions; ++c) { probs.push_back(data[c]); } int maxClassId = 0; float maxScore = probs[0]; for (size_t p = 1; p < probs.size(); ++p) { if (probs[p] > maxScore) { maxScore = probs[p]; maxClassId = static_cast(p); } } if (maxScore > CONFIDENCE_THRESHOLD) { confidences.push_back(objectness * maxScore); classIds.push_back(maxClassId); float x = data[0]; float y = data[1]; float w = data[2]; float h = data[3]; int left = static_cast((x - 0.5 * w) * x_factor); int top = static_cast((y - 0.5 * h) * y_factor); int width = static_cast(w * x_factor); int height = static_cast(h * y_factor); boxes.push_back(cv::Rect(left, top, width, height)); } } data += dimensions; } } std::vector nms_result; cv::dnn::NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, NMS_THRESHOLD, nms_result); std::vector> roiFeatures; for (size_t i = 0; i < nms_result.size(); ++i) { int idx = nms_result[i]; cv::Rect box = boxes[idx]; box.x = std::max(0, std::min(box.x, image.cols - 1)); box.y = std::max(0, std::min(box.y, image.rows - 1)); box.width = std::max(0, std::min(box.width, image.cols - box.x)); box.height = std::max(0, std::min(box.height, image.rows - box.y)); std::vector roiFeature; roiFeature.push_back(static_cast(box.x) / image.cols); roiFeature.push_back(static_cast(box.y) / image.rows); roiFeature.push_back(static_cast(box.width) / image.cols); roiFeature.push_back(static_cast(box.height) / image.rows); roiFeature.push_back(confidences[idx]); roiFeature.push_back(static_cast(classIds[idx])); roiFeatures.push_back(roiFeature); } return roiFeatures; }