onnxruntime helpers#

Frequent functions#

onnxruntime.get_device() str#

Return the device used to compute the prediction (CPU, MKL, …)

<<<

import onnxruntime
print(onnxruntime.get_device())

>>>

    CPU
onnxruntime.get_all_providers() List[str]#

Return list of Execution Providers that this version of Onnxruntime can support. The order of elements represents the default priority order of Execution Providers from highest to lowest.

<<<

import pprint
import onnxruntime
pprint.pprint(onnxruntime.get_all_providers())

>>>

    ['TensorrtExecutionProvider',
     'CUDAExecutionProvider',
     'MIGraphXExecutionProvider',
     'ROCMExecutionProvider',
     'OpenVINOExecutionProvider',
     'DnnlExecutionProvider',
     'TvmExecutionProvider',
     'VitisAIExecutionProvider',
     'NnapiExecutionProvider',
     'CoreMLExecutionProvider',
     'ArmNNExecutionProvider',
     'ACLExecutionProvider',
     'DmlExecutionProvider',
     'RknpuExecutionProvider',
     'XnnpackExecutionProvider',
     'CANNExecutionProvider',
     'CPUExecutionProvider']
onnxruntime.get_available_providers() List[str]#

Return list of available Execution Providers in this installed version of Onnxruntime. The order of elements represents the default priority order of Execution Providers from highest to lowest.

<<<

import onnxruntime
import pprint
pprint.pprint(onnxruntime.get_available_providers())

>>>

    ['CPUExecutionProvider']
onnxruntime.set_default_logger_severity(arg0: int) None#

Sets the default logging severity. 0:Verbose, 1:Info, 2:Warning, 3:Error, 4:Fatal

onnxruntime.set_seed(arg0: int) None#

Sets the seed used for random number generation in Onnxruntime.

Python Wrapper OrtDevice#

class onnxruntime.OrtDevice(c_ort_device)#

A data structure that exposes the underlying C++ OrtDevice

Internal constructor

__init__(c_ort_device)#

Internal constructor

_get_c_device()#

Internal accessor to underlying object

device_id()#
device_type()#
static make(ort_device_name, device_id)#

C class, OrtDevice or C_OrtDevice#

class onnxruntime.capi._pybind_state.OrtDevice(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtDevice, arg0: int, arg1: int, arg2: int)#

ONNXRuntime device informaion.

__init__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtDevice, arg0: int, arg1: int, arg2: int) None#
static cpu() int#
static cuda() int#
static default_memory() int#
device_id(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtDevice) int#

Device Id.

device_type(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtDevice) int#

Device Type.

OrtMemoryInfo#

class onnxruntime.capi._pybind_state.OrtMemoryInfo(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemoryInfo, arg0: str, arg1: onnxruntime.capi.onnxruntime_pybind11_state.OrtAllocatorType, arg2: int, arg3: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType)#
__init__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemoryInfo, arg0: str, arg1: onnxruntime.capi.onnxruntime_pybind11_state.OrtAllocatorType, arg2: int, arg3: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType) None#

C classes, frequent types#

class onnxruntime.capi._pybind_state.ModelMetadata#

Pre-defined and custom metadata about the model. It is usually used to identify the model used to run the prediction and facilitate the comparison.

__init__(*args, **kwargs)#
property custom_metadata_map#

additional metadata

property description#

description of the model

property domain#

ONNX domain

property graph_description#

description of the graph hosted in the model

property graph_name#

graph name

property producer_name#

producer name

property version#

version of the model

class onnxruntime.capi._pybind_state.OrtMemType(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType, value: int)#

Members:

CPU_INPUT

CPU_OUTPUT

CPU

DEFAULT

__eq__(self: object, other: object) bool#
__getstate__(self: object) int#
__hash__(self: object) int#
__index__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType) int#
__init__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType, value: int) None#
__int__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType) int#
__members__ = {'CPU': <OrtMemType.CPU_OUTPUT: -1>, 'CPU_INPUT': <OrtMemType.CPU_INPUT: -2>, 'CPU_OUTPUT': <OrtMemType.CPU_OUTPUT: -1>, 'DEFAULT': <OrtMemType.DEFAULT: 0>}#
__ne__(self: object, other: object) bool#
__repr__(self: object) str#
__setstate__(self: onnxruntime.capi.onnxruntime_pybind11_state.OrtMemType, state: int) None#
__str__()#

name(self: handle) -> str

property name#

Rare functions#

onnxruntime.capi._pybind_state.clear_training_ep_instances() None#

Clean the execution provider instances used in ort training module.

onnxruntime.capi._pybind_state.create_and_register_allocator(arg0: OrtMemoryInfo, arg1: OrtArenaCfg) None#
onnxruntime.capi._pybind_state.enable_telemetry_events() None#

Enables platform-specific telemetry collection where applicable.

onnxruntime.capi._pybind_state.disable_telemetry_events() None#

Disables platform-specific telemetry collection.

onnxruntime.capi._pybind_state.get_session_initializer() onnxruntime::python::SessionObjectInitializer#

Return a default session object initializer.

onnxruntime.capi._pybind_state.is_dlpack_uint8_tensor(arg0: capsule) bool#

Tells if a DLPack structure is a uint8 tensor. .. note:

Boolean tensors are also uint8 tensor once converted with DLPack protocol.
onnxruntime.capi._pybind_state.unregister_python_functions() None#

Rare functions for training#

onnxruntime.tools.have_torch()#

The specification for a module, used for loading.

A module’s spec is the source for information about the module. For data associated with the module, including source, use the spec’s loader.

name is the absolute name of the module. loader is the loader to use when loading the module. parent is the name of the package the module is in. The parent is derived from the name.

is_package determines if the module is considered a package or not. On modules this is reflected by the __path__ attribute.

origin is the specific location used by the loader from which to load the module, if that information is available. When filename is set, origin will match.

has_location indicates that a spec’s “origin” reflects a location. When this is True, __file__ attribute of the module is set.

cached is the location of the cached bytecode file, if any. It corresponds to the __cached__ attribute.

submodule_search_locations is the sequence of path entries to search when importing submodules. If set, is_package should be True–and False otherwise.

Packages are simply modules that (may) have submodules. If a spec has a non-None value in submodule_search_locations, the import system will consider modules loaded from the spec as packages.

Only finders (see importlib.abc.MetaPathFinder and importlib.abc.PathEntryFinder) should modify ModuleSpec instances.

onnxruntime.tools.infer_input_info(module: Module, *inputs, **kwargs)#

Infer the input names and order from the arguments used to execute a PyTorch module for usage exporting the model via torch.onnx.export. Assumes model is on CPU. Use module.to(torch.device(‘cpu’)) if it isn’t.

Example usage: input_names, inputs_as_tuple = infer_input_info(module, …) torch.onnx.export(module, inputs_as_type, ‘model.onnx’, input_names=input_names, output_names=[…], …)

Parameters:
  • module – Module

  • inputs – Positional inputs

  • kwargs – Keyword argument inputs

Returns:

Tuple of ordered input names and input values. These can be used directly with torch.onnx.export as the input_names and inputs arguments.

onnxruntime.tools.optimize_onnx_model.optimize_model(model_path: ~pathlib.Path, output_path: ~pathlib.Path, level: ~onnxruntime.capi.onnxruntime_pybind11_state.GraphOptimizationLevel = <GraphOptimizationLevel.ORT_ENABLE_BASIC: 1>, log_level: int = 3)#

Optimize an ONNX model using ONNX Runtime to the specified level :param model_path: Path to ONNX model :param output_path: Path to save optimized model to. :param level: onnxruntime.GraphOptimizationLevel to use. Default is ORT_ENABLE_BASIC. :param log_level: Log level. Defaults to Error (3) so we don’t get output about unused initializers being removed.

Warning (2) or Info (1) may be desirable in some scenarios.

onnxruntime.tools.pytorch_export_helpers()#