Export onnx
from ultralytics import YOLO
model = YOLO("yolo11n-cls.yaml").load("yolo11n-cls.pt")
trainer = model.train(data="mnist160", epochs=10, imgsz=64)
results = model.predict("bus.jpg")
model.export(format="onnx", dynamic = True)
Python
import cv2
import numpy as np
import onnxruntime as ort
import glob
def load_image(filename, sizeX, sizeY):
image = cv2.imread(filename)
if image is None:
print(f"No image found: {filename}")
return None
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (sizeX, sizeY))
image = image.astype(np.float32) / 255.0
image = image.transpose(2, 0, 1).flatten()
return image
model_path = "yolo11n-cls.onnx"
image_path = "./images/*.png"
num_channels, width, height, num_classes = 3, 64, 64, 10
image_files = glob.glob(image_path)
image_batch = []
for filename in image_files:
img_vec = load_image(filename, width, height)
if img_vec is not None and len(img_vec) == num_channels * width * height:
image_batch.append(img_vec)
else:
print(f"Invalid image format for {filename}")
batch_size = len(image_batch)
if batch_size == 0:
print("No valid images found.")
exit(1)
input_data = np.array(image_batch, dtype=np.float32).reshape((batch_size, num_channels, height, width))
print(f"Batch size: {input_data.shape}")
input_shape = (batch_size, num_channels, height, width)
print(f"Input shape: {input_shape}")
session = ort.InferenceSession(model_path, providers=['CUDAExecutionProvider'])
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
output = session.run([output_name], {input_name: input_data})[0]
batch_size = output.shape[0]
for i in range(batch_size):
single_output = output[i]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(single_output)
predicted_class = max_loc[0]
print(f'Sample {i}: Predicted class: {predicted_class}, Confidence: {max_val}')
Cpp
#include <iostream>
#include <opencv2/opencv.hpp>
#include <onnxruntime_cxx_api.h>
static std::vector<float> loadImage(const std::string& filename, int sizeX, int sizeY)
{
cv::Mat image = cv::imread(filename);
if (image.empty()) {
std::cout << "No image found.";
}
cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
cv::resize(image, image, cv::Size(sizeX, sizeY));
image = image.reshape(1, 1);
std::vector<float> vec;
image.convertTo(vec, CV_32FC1, 1. / 255);
std::vector<float> output;
for (size_t ch = 0; ch < 3; ++ch) {
for (size_t i = ch; i < vec.size(); i += 3) {
output.emplace_back(vec[i]);
}
}
return output;
}
int main()
{
Ort::Env env;
Ort::RunOptions runOptions;
Ort::Session session(nullptr);
constexpr int64_t numChannels = 3;
constexpr int64_t width = 64;
constexpr int64_t height = 64;
constexpr int64_t numClasses = 10;
constexpr int64_t numInputElements = numChannels * height * width;
auto modelPath = L"yolo11n-cls.onnx";
const std::string imagePath = "./images/";
std::string _strPattern = imagePath + "*.png";
std::vector<cv::String> filesVec;
cv::glob(_strPattern, filesVec);
int Batchsize = 0;
std::vector<std::vector<float>> ImageBatch;
for (int i = 0; i < filesVec.size(); i++)
{
const std::vector<float> imageVec = loadImage(filesVec[i], 64, 64);
if (imageVec.empty()) {
std::cout << "Failed to load image: " << filesVec[i] << std::endl;
return 1;
}
if (imageVec.size() != numInputElements) {
std::cout << "Invalid image format. Must be 224x224 RGB image." << std::endl;
return 1;
}
Batchsize++;
ImageBatch.push_back(imageVec);
}
Ort::SessionOptions ort_session_options;
OrtCUDAProviderOptions options;
options.device_id = 0;
OrtSessionOptionsAppendExecutionProvider_CUDA(ort_session_options, options.device_id);
session = Ort::Session(env, modelPath, ort_session_options);
const std::array<int64_t, 4> inputShape = { Batchsize, numChannels, height, width };
const std::array<int64_t, 2> outputShape = { Batchsize, numClasses };
std::vector<float> input(Batchsize * numInputElements);
std::vector<float> results(Batchsize * numClasses);
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
auto inputTensor = Ort::Value::CreateTensor<float>(memory_info, input.data(), input.size(), inputShape.data(), inputShape.size());
auto outputTensor = Ort::Value::CreateTensor<float>(memory_info, results.data(), results.size(), outputShape.data(), outputShape.size());
for (int i = 0; i < Batchsize; ++i) {
std::copy(ImageBatch[i].begin(), ImageBatch[i].end(), input.begin() + i * numInputElements);
}
Ort::AllocatorWithDefaultOptions ort_alloc;
Ort::AllocatedStringPtr inputName = session.GetInputNameAllocated(0, ort_alloc);
Ort::AllocatedStringPtr outputName = session.GetOutputNameAllocated(0, ort_alloc);
const std::array<const char*, 1> inputNames = { inputName.get() };
const std::array<const char*, 1> outputNames = { outputName.get() };
inputName.release();
outputName.release();
std::vector<Ort::Value> output_tensors;
try
{
output_tensors = session.Run(Ort::RunOptions{ nullptr }, inputNames.data(), &inputTensor, 1, outputNames.data(), 1);
}
catch (const std::exception& e)
{
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}
const float* pdata = output_tensors[0].GetTensorMutableData<float>();
cv::Mat classify_outputs(Batchsize, numClasses, CV_32F, (float*)pdata);
for (int i = 0; i < Batchsize; ++i) {
cv::Mat scores = classify_outputs.row(i);
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::minMaxLoc(scores, &minVal, &maxVal, &minLoc, &maxLoc);
int classId = maxLoc.x;
std::cout << "Sample " << i << ": Class ID = " << classId << ", Score = " << maxVal << std::endl;
}
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.18)
project(Dynamic)
set("OpenCV_DIR" "E:\\Opencv\\opencv_vs\\build")
set("ONNXRUNTIME_DIR" "E:\\Onnxruntime\\gpu\\1.15")
set(OpenCV_INCLUDE_DIRS ${OpenCV_DIR}\\include)
set(OpenCV_LIB_DIRS ${OpenCV_DIR}\\x64\\vc16\\lib)
set(OpenCV_LIBS "opencv_world480d.lib" "opencv_world480.lib")
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
include_directories(${OpenCV_INCLUDE_DIRS})
link_directories(${OpenCV_LIB_DIRS})
find_package(OpenCV QUIET)
link_libraries(${OpenCV_LIBS})
add_executable(Dynamic main.cpp)
target_compile_features(Dynamic PRIVATE cxx_std_14)
find_library(PATH ${ONNXRUNTIME_DIR})
target_include_directories(Dynamic PRIVATE "${ONNXRUNTIME_DIR}/include")
target_link_libraries(Dynamic "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib")