为了能让APP使用上深度模型 一般需要将 模型 转换为 MNN 或者NCNN
下面讲下 ONNX 转换 MNN 过程
模型转换
1.安装MNN
git clone https://github.com/alibaba/MNN
cd MNN
2.生成用于flatbuffer格式的自定义的数据结构
./shema/generate.sh
- 创建编译路径
mkdir bulid
cd build
4.打开编译器转换工具开关并编译工程
cmake .. -DMNN_BUILD_CONVERTER=true && make -j4
- onnx ->mnn
./MNNConvert -f ONNX --modelFile XXX.onnx --MNNModel XXX.mnn --bizCode MNN
模型推理
#define IMAGE_VERIFY_SIZE 28 //图像大小
#define CLASSES_SIZE 10 // 类别 0-9
#define INPUT_NAME "input_0"
#define OUTPUT_NAME "output_0"
std::string MNNAdapterInference::startTest(const std::string &model, const std::string &imgPath) {
// 创建MNN解释器实例
auto mnnNet = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model.c_str()));
// 配置调度参数
MNN::ScheduleConfig netConfig;
// 使用CPU推理
netConfig.type = MNN_FORWARD_CPU;
// 推理线程数为 4
netConfig.numThread = 4;
//创建会话
auto session = mnnNet->createSession(netConfig);
// 获取输入张量
auto input = mnnNet->getSessionInput(session, INPUT_NAME);
// 检查输入张量大小 确保输入张量 为 1 * 1 * 28 * 28
if (input->elementSize() <= 4) {
mnnNet->resizeTensor(input, {1, 1, IMAGE_VERIFY_SIZE, IMAGE_VERIFY_SIZE});
mnnNet->resizeSession(session);
}
// std::cout << "input shape: " << input->shape()[0] << " " << input->shape()[1] << " " << input->shape()[2] << " "
// << input->shape()[3] << std::endl;
// preprocess image
MNN::Tensor givenTensor(input, MNN::Tensor::CAFFE);
// const int inputSize = givenTensor.elementSize();
// std::cout << inputSize << std::endl;
auto inputData = givenTensor.host<float>();
//图像预处理
cv::Mat bgr_image = cv::imread(imgPath, cv::IMREAD_GRAYSCALE);
cv::Mat norm_image;
cv::resize(bgr_image, norm_image, cv::Size(IMAGE_VERIFY_SIZE, IMAGE_VERIFY_SIZE));
// for (int k = 0; k < 3; k++) {
// for (int i = 0; i < norm_image.rows; i++) {
// for (int j = 0; j < norm_image.cols; j++) {
// const auto src = norm_image.at<cv::Vec3b>(i, j)[k];
// auto dst = 0.0;
// if (k == 0) dst = (float(src) / 255.0f - 0.485) / 0.229;
// if (k == 1) dst = (float(src) / 255.0f - 0.456) / 0.224;
// if (k == 2) dst = (float(src) / 255.0f - 0.406) / 0.225;
// inputData[k * IMAGE_VERIFY_SIZE * IMAGE_VERIFY_SIZE + i * IMAGE_VERIFY_SIZE + j] = dst;
// }/
// }
// }
// 归一化并填充数据
for (int i = 0; i < norm_image.rows; ++i) {
for (int j = 0; j < norm_image.cols; ++j) {
const auto src = norm_image.at<uchar>(i, j);
float dst = (src / 255.0f - 0.1307f) / 0.3081f;
inputData[i * IMAGE_VERIFY_SIZE + j] = dst;
}
}
// 将数据拷贝到输入张量
input->copyFromHostTensor(&givenTensor);
// run session
mnnNet->runSession(session);
// 获取输出张量
auto output = mnnNet->getSessionOutput(session, OUTPUT_NAME);
// std::cout << "output shape: " << output->shape()[0] << " " << output->shape()[1] << std::endl;
auto output_host = std::make_shared<MNN::Tensor>(output, MNN::Tensor::CAFFE);
output->copyToHostTensor(output_host.get());
// 解析分类结果
auto values = output_host->host<float>();
int max_index = 0;
//遍历输出概率数组,找到最大概率的索引
for (int i = 0; i < CLASSES_SIZE; i++) {
if (values[i] > values[max_index]) max_index = i;
}
// 计算Softmax概率
float exp_sum = 0.0;
for (int i = 0; i < CLASSES_SIZE; i++) {
exp_sum += std::exp(values[i]);
}
float prob = std::exp(values[max_index]) / exp_sum;
std::cout << "cls id: " << max_index << std::endl; // 类别ID
std::cout << "cls prob: " << prob << std::endl; // 置信度
}
std::string total = "cls id: "+ std::to_string(max_index) + "cls prob: " + std::to_string(std::exp(output_values[max_index]) / exp_sum);
std::cout << "total: " << total << std::endl;
return total;
执行结果