LibTorch Backend 🔥

The LibTorch backend is the right choice when:

  • You plan to host the model on servers as remote services. But if you don’t mind the binary size overhead made by LibTorch Mobile, you could also deploy the model to mobile devices.

  • Your serving platform doesn’t provide a Python interpreter.

  • You care about throughput and need to avoid Python GIL.

  • You want to embed the model into your own C/C++/Java/C# applications.

Author Model with Python

As an example, we will buid a rule-based model. The model tells if an user query is a weather related one by checking if it appears in the allow-list. It supports two languages:English and Chinese.

import torch
from pyis.torch import ops

class Model(torch.nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.trie1 = ops.CedarTrie()
        self.trie1.insert('what is the weather')
        self.trie1.insert('will it rain')

        self.trie2 = ops.CedarTrie()
        self.trie2.insert('今天天气怎么样')
        self.trie2.insert('明天会下雨吗')

    def forward(self, query: str, locale: str) -> bool:
        if locale == 'en-us':
            return self.trie1.contains(query)
        elif locale == 'zh-cn':
            return self.trie2.contains(query)
        return False

model = Model()
is_weather = model.forward('what is the weather', 'en-us') 
print(is_weather) # True
is_weather = model.forward('what is the answer', 'en-us') 
print(is_weather) # False
is_weather = model.forward('明天会下雨吗', 'zh-cn') 
print(is_weather) # True

Save Model

The model object with all dependent files are serialized to .pt (torchscript) file.

# serialize the model
os.makedirs('tmp', exist_ok=True)
model_file = 'tmp/model.pt'
scripted_model = torch.jit.script(model)
save(scripted_model, model_file)

Load Model and Validate in Python

Load the model with Pytorch and run the loaded model.

from pyis.torch import load

# de-serialize the model
loaded_model = load(model_file)
is_weather = loaded_model.forward('what is the weather', 'en-us') 
print(is_weather) # True
is_weather = loaded_model.forward('what is the answer', 'en-us') 
print(is_weather) # False
is_weather = loaded_model.forward('明天会下雨吗', 'zh-cn') 
print(is_weather) # True

Load Model and Run without Python

The ultimate step is to deploy the generated torchscript to production environment (normally C++). Below is the sample code to load and run the model in C++. Please refer to this pytorch tutorial for more details on running pytorch in C++.

#include "pyis/pyis_c_api.h"
#include <torch/script.h>

#include <iostream>

int main() {
    auto* pyis_api = GetPyisApi();
    auto* context = pyis_api->ModelContextCreate("[PATH_TO_PT_MODEL_FILE]", "");
    pyis_api->ModelContextActivate(context);

    torch::jit::script::Module module;
    try {
        module = torch::jit::load("[PATH_TO_PT_MODEL_FILE]");

        auto query = torch::IValue("what is the weather");
        auto locale = torch::IValue("en-us");
        
        auto output = module.forward({query, locale}).toBool();
        std::cout << "Weather intent : " << output << std::endl;
    }
    catch (const std::exception& e) {
        std::cerr << "error loading or running the model " << e.what() << std::endl;
        return -1;
    }
}

Along with CMakeLists.txt file to build the sample.

cmake_minimum_required(VERSION 3.12 FATAL_ERROR)

project(pyis_torch_test VERSION 0.1)

include(FetchContent)
FetchContent_Declare(
    pyis
    GIT_REPOSITORY https://github.com/microsoft/python-inference-script
    GIT_TAG main)
set(PYTHON_BACKEND
    OFF
    CACHE BOOL "" FORCE)
set(TORCH_BACKEND
    ON
    CACHE BOOL "" FORCE)
set(ONNX_BACKEND
    OFF
    CACHE BOOL "" FORCE)

FetchContent_MakeAvailable(pyis)

add_executable(pyis_torch_test)
target_sources(pyis_torch_test PRIVATE test.cpp)

# link libtorch library
find_package(Torch REQUIRED PATHS ${TORCH_INSTALL_DIR})
target_link_libraries(pyis_torch_test "${TORCH_LIBRARIES}")
target_link_libraries(pyis_torch_test -Wl,--no-as-needed pyis_torch)

if (MSVC)
    file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
    add_custom_command(TARGET pyis_torch_test
                       POST_BUILD
                       COMMAND ${CMAKE_COMMAND} -E copy_if_different ${TORCH_DLLS} $<TARGET_FILE_DIR:pyis_torch_test>)
endif (MSVC)

add_custom_command(TARGET pyis_torch_test
                   POST_BUILD
                   COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:pyis_torch> $<TARGET_FILE_DIR:pyis_torch_test>)

# include pyis c api header
target_include_directories(pyis_torch_test PRIVATE ${pyis_SOURCE_DIR}/pyis/c_api/include)

Build this sample

cmake -S . -B build -A x64
cmake --build build

Now we can run the successfully built executable:

> .\build\Debug\pyis_torch_test.exe
Weather intent : 1