安装libtorch
X86平台直接下载官方编译的版本,解压后可直接用, arm平台需要下载源码编译
libtorch库的安装参考:https://pytorch.org/cppdocs/installing.html。
下载:libtorch c++
GPU版本
Libtorch1.3.1 cuda10.0
https://download.pytorch.org/libtorch/cu100/libtorch-shared-with-deps-1.3.1%2Bcu100.zip
https://download.pytorch.org/libtorch/cu100/libtorch-cxx11-abi-shared-with-deps-1.3.1%2Bcu100.zip (使用这个链接)
unzip libtorch-cxx11-abi-shared-with-deps-1.3.1%2Bcu100.zip
解压后就可以使用
一个简单的 example-app
example-app/
CMakeLists.txt
example-app.cpp
example-app.cpp
#include <torch/torch.h>
#include <iostream>
int main() {
torch::Tensor tensor = torch::rand({2, 3});
std::cout << tensor << std::endl;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(example-app)
find_package(Torch REQUIRED)
add_executable(example-app example-app.cpp)
target_link_libraries(example-app "${TORCH_LIBRARIES}")
set_property(TARGET example-app PROPERTY CXX_STANDARD 11)
编译example-app
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/absolute/path/to/libtorch .. //(=后面不能有空格)
cmake --build . --config Release 或者 make
libTorch调用预训练好的模型
转换一个简单的分类模型resnet18
python代码 , 将.pth模型转为.pt模型, 环境配置 torch1.3.1 libtorch1.3.1
opencv4+
demo.py
#!/usr/bin/env python
#coding:utf-8
import torch
import torchvision
from torchvision import transforms
from PIL import Image
from time import time
import numpy as np
# An instance of your model.
model = torchvision.models.resnet18(pretrained=False)
#将模型提前下载下来, 直接加载
state_dict = torch.load('./resnet18-5c106cde.pth')
model.load_state_dict(state_dict)
model.eval()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model.pt")
# evalute time
batch = torch.rand(64, 3, 224, 224)
start = time()
output = traced_script_module(batch)
stop = time()
print(str(stop-start) + "s")
# read image
image = Image.open('cat.jpg').convert('RGB')
default_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image = default_transform(image)
# forward
output = traced_script_module(image.unsqueeze(0))
print(output[0, :10])
# print top-5 predicted labels
labels = np.loadtxt('synset_words.txt', dtype=str, delimiter='\n')
data_out = output[0].data.numpy()
sorted_idxs = np.argsort(-data_out)
for i,idx in enumerate(sorted_idxs[:5]):
print('top-%d label: %s, score: %f' % (i, labels[idx], data_out[idx]))
将.pt模型用c++调用
example-app.cpp
#include "torch/script.h"
#include "torch/torch.h"
//#include "torch/Tensor.h"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/types_c.h"
#include <iostream>
#include <memory>
#include <string>
#include <vector>
/* main */
int main(int argc, const char* argv[]) {
if (argc < 4) {
std::cerr << "usage: example-app <path-to-exported-script-module> "
<< "<path-to-image> <path-to-category-text>\n";
return -1;
}
// Deserialize the ScriptModule from a file using torch::jit::load().
//std::shared_ptr<torch::jit::script::Module> module = torch::jit::load(argv[1]);
torch::jit::script::Module module = torch::jit::load(argv[1]);
//assert(module != nullptr);
std::cout << "load model ok\n";
// Create a vector of inputs.
std::vector<torch::jit::IValue> inputs;
inputs.push_back(torch::rand({64, 3, 224, 224}));
// evalute time
double t = (double)cv::getTickCount();
module.forward(inputs).toTensor();
t = (double)cv::getTickCount() - t;
printf("execution time = %gs\n", t / cv::getTickFrequency());
inputs.pop_back();
// load image with opencv and transform
cv::Mat image;
image = cv::imread(argv[2], 1);
cv::cvtColor(image, image, CV_BGR2RGB);
cv::Mat img_float;
image.convertTo(img_float, CV_32F, 1.0/255);
cv::resize(img_float, img_float, cv::Size(224, 224));
//std::cout << img_float.at<cv::Vec3f>(56,34)[1] << std::endl;
//auto img_tensor = torch::CPU(torch::kFloat32).tensorFromBlob(img_float.data, {1, 224, 224, 3});
auto img_tensor = torch::from_blob(img_float.data, {1, 224, 224, 3}); //.permute({0, 3, 1, 2}).to(torch::kCUDA);
img_tensor = img_tensor.permute({0,3,1,2}); //.to(torch::kCUDA);
img_tensor[0][0] = img_tensor[0][0].sub_(0.485).div_(0.229);
img_tensor[0][1] = img_tensor[0][1].sub_(0.456).div_(0.224);
img_tensor[0][2] = img_tensor[0][2].sub_(0.406).div_(0.225);
//auto img_var = torch::autograd::make_variable(img_tensor, false);
//torch::Tensor img_var = torch::autograd::make_variable(img_tensor, false);
inputs.push_back(img_tensor);
// Execute the model and turn its output into a tensor.
torch::Tensor out_tensor = module.forward(inputs).toTensor();
std::cout << out_tensor.slice(/*dim=*/1, /*start=*/0, /*end=*/10) << '\n';
// Load labels
std::string label_file = argv[3];
std::ifstream rf(label_file.c_str());
CHECK(rf) << "Unable to open labels file " << label_file;
std::string line;
std::vector<std::string> labels;
while (std::getline(rf, line))
labels.push_back(line);
// print predicted top-5 labels
std::tuple<torch::Tensor,torch::Tensor> result = out_tensor.sort(-1, true);
torch::Tensor top_scores = std::get<0>(result)[0];
torch::Tensor top_idxs = std::get<1>(result)[0].toType(torch::kInt32);
auto top_scores_a = top_scores.accessor<float,1>();
auto top_idxs_a = top_idxs.accessor<int,1>();
for (int i = 0; i < 5; ++i) {
int idx = top_idxs_a[i];
std::cout << "top-" << i+1 << " label: ";
std::cout << labels[idx] << ", score: " << top_scores_a[i] << std::endl;
}
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(custom_ops)
find_package(Torch REQUIRED)
find_package( OpenCV REQUIRED )
include_directories( ${OpenCV_INCLUDE_DIRS} )
add_executable(example-app example-app.cpp)
target_link_libraries(example-app ${TORCH_LIBRARIES} ${OpenCV_LIBS})
set_property(TARGET example-app PROPERTY CXX_STANDARD 11)
编译 与运行
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/home/jonado/soft/libtorch/libtorch ..
make
//运行
./example-app ../model.pt ../dog.png ../synset_words.txt
…/model.pt 为运行demo.py程序生成的
…/dog.png 任意一张图片
…/synset_words.txt 是googlenet的synset_words.txt为分类标签, 可以在github搜索下载, 里面放的是1000个类的名称
以下为遇到的相关错误及处理:
fatal error: torch/Tensor.h: 没有那个文件或目录
屏蔽该行
//#include <torch/Tensor.h>
std::shared_ptr<torch::jit::script::Module> module = torch::jit::load("../xxx.pt");
修改为:
torch::jit::script::Module module = torch::jit::load("../xxx.pt");
现在的Module已经不是指针,这个断言没有存在的必要了,删掉就行
assert(module != nullptr);
torch::Tensor output = module->forward(std::move(inputs)).toTensor();
修改为
torch::Tensor output = module.forward(std::move(inputs)).toTensor();
error: ‘class at::DeprecatedTypeProperties’ has no member named ‘tensorFromBlob’
auto img_tensor = torch::CPU(torch::kFloat32).tensorFromBlob(img_float.data, {1, 224, 224, 3});
错误代码:
auto img_tensor = torch::CPU(torch::kFloat32).tensorFromBlob(img_float.data, { 1, input_image_size, input_image_size, 3 });
原因:libtorch 1.1的接口改了,
正确代码:
auto img_tensor = torch::from_blob(image.data, {1, inp_dim[0], inp_dim[1], 3}).permute({0, 3, 1, 2}).to(torch::kCUDA);
terminate called after throwing an instance of 'c10::Error'
what(): Must not create a new variable from a variable, use its .tensor_data() (make_variable at /home/jonado/soft/libtorch/libtorch/include/torch/csrc/autograd/variable.h:545)
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x6a (0x7fecf0a1dc4a in /home/jonado/soft/libtorch/libtorch/lib/libc10.so)
frame #1: torch::autograd::make_variable(at::Tensor, bool, bool) + 0x10c (0x5575108ced11 in ./example-app)
frame #2: main + 0xa76 (0x5575108c64aa in ./example-app)
frame #3: __libc_start_main + 0xe7 (0x7fecee847b97 in /lib/x86_64-linux-gnu/libc.so.6)
frame #4: _start + 0x2a (0x5575108c43aa in ./example-app)
定位错误代码行: auto img_var = torch::autograd::make_variable(img_tensor, false);
img_tensor已经是tensor类型数据了, 删掉这一行, 直接把img_tensor放入到inputs容器 inputs.push_back(img_tensor);