module training._base#

Inheritance diagram of onnxcustom.training._base

Short summary#

module onnxcustom.training._base

Base class for BaseEstimator and BaseOnnxFunction.

source on GitHub

Classes#

class

truncated documentation

BaseOnnxClass

Bases class with common functions to handle attributes in classes owning ONNX graphs.

Static Methods#

staticmethod

truncated documentation

_get_param_names

Extracts all parameters to serialize.

Methods#

method

truncated documentation

save_onnx_graph

Saves all ONNX files stored in this class.

Documentation#

Base class for BaseEstimator and BaseOnnxFunction.

source on GitHub

class onnxcustom.training._base.BaseOnnxClass#

Bases: object

Bases class with common functions to handle attributes in classes owning ONNX graphs.

source on GitHub

classmethod _get_param_names()#

Extracts all parameters to serialize.

save_onnx_graph(folder, prefix=None, suffix=None)#

Saves all ONNX files stored in this class.

Parameters:
  • folder – folder where to save (it must exists) or bytes if the onnx graph must be returned as bytes, not files

  • prefix – suffix to add to the name

  • suffix – suffix to add to the name

Returns:

list of saved files (dictionary { attribute: filename or dictionary })

The function raises a warning if a file already exists. The function uses class name, attribute names to compose file names. It shortens them for frequent classes.

  • ‘Learning’ -> ‘L’

  • ‘OrtGradient’ -> ‘Grad’

  • ‘ForwardBackward’ -> ‘FB’

<<<

import io
import numpy
import onnx
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from skl2onnx import to_onnx
from mlprodict.plotting.text_plot import onnx_simple_text_plot
from onnxcustom.training.optimizers_partial import (
    OrtGradientForwardBackwardOptimizer)
from onnxcustom.training.sgd_learning_rate import (
    LearningRateSGDNesterov)
from onnxcustom.training.sgd_learning_penalty import (
    ElasticLearningPenalty)


def walk_through(obj, prefix="", only_name=True):
    for k, v in obj.items():
        if isinstance(v, dict):
            p = prefix + "." + k if prefix else k
            walk_through(v, prefix=p, only_name=only_name)
        elif only_name:
            name = "%s.%s" % (prefix, k) if prefix else k
            print('+', name)
        else:
            name = "%s.%s" % (prefix, k) if prefix else k
            print('\n++++++', name)
            print()
            bf = io.BytesIO(v)
            onx = onnx.load(bf)
            print(onnx_simple_text_plot(onx))


X, y = make_regression(  # pylint: disable=W0632
    100, n_features=3, bias=2, random_state=0)
X = X.astype(numpy.float32)
y = y.astype(numpy.float32)
X_train, _, y_train, __ = train_test_split(X, y)
reg = LinearRegression()
reg.fit(X_train, y_train)
reg.coef_ = reg.coef_.reshape((1, -1))
opset = 15
onx = to_onnx(reg, X_train, target_opset=opset,
              black_op={'LinearRegressor'})
inits = ['coef', 'intercept']

train_session = OrtGradientForwardBackwardOptimizer(
    onx, inits,
    learning_rate=LearningRateSGDNesterov(
        1e-4, nesterov=False, momentum=0.9),
    learning_penalty=ElasticLearningPenalty(l1=1e-3, l2=1e-4),
    warm_start=False, max_iter=100, batch_size=10)

onxs = train_session.save_onnx_graph(bytes)

print("+ all onnx graphs")
walk_through(onxs, only_name=True)
walk_through(onxs, only_name=False)

>>>

    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_doc/sphinxdoc/source/onnxcustom/training/sgd_learning_rate.py:386: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      [0], dtype=TENSOR_TYPE_TO_NP_TYPE[
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_doc/sphinxdoc/source/onnxcustom/training/sgd_learning_rate.py:389: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      [0], dtype=TENSOR_TYPE_TO_NP_TYPE[
    + all onnx graphs
    + model_onnx
    + learning_rate.axpyw_onnx_
    + learning_loss.loss_grad_onnx_
    + learning_loss.loss_score_onnx_
    + learning_penalty.penalty_onnx_
    + learning_penalty.penalty_grad_onnx_
    + zero_onnx_
    
    ++++++ model_onnx
    
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:458: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.INT64]  # pylint: disable=E1101
    opset: domain='' version=13
    input: name='X' type=dtype('float32') shape=[None, 3]
    init: name='coef' type=dtype('float32') shape=(3, 1) -- array([39.843, 62.89 ,  6.271], dtype=float32)
    init: name='intercept' type=dtype('float32') shape=(1,) -- array([2.], dtype=float32)
    init: name='shape_tensor' type=dtype('int64') shape=(2,) -- array([-1,  1])
    MatMul(X, coef) -> multiplied
      Add(multiplied, intercept) -> resh
        Reshape(resh, shape_tensor) -> variable
    output: name='variable' type=dtype('float32') shape=[None, 1]
    
    ++++++ learning_rate.axpyw_onnx_
    
    opset: domain='' version=14
    input: name='X1' type=dtype('float32') shape=None
    input: name='X2' type=dtype('float32') shape=None
    input: name='G' type=dtype('float32') shape=None
    input: name='alpha' type=dtype('float32') shape=[1]
    input: name='beta' type=dtype('float32') shape=[1]
    Mul(X1, alpha) -> Mu_C0
    Mul(G, beta) -> Mu_C02
      Add(Mu_C0, Mu_C02) -> Z
        Add(Z, X2) -> Y
    output: name='Y' type=dtype('float32') shape=None
    output: name='Z' type=dtype('float32') shape=None
    
    ++++++ learning_loss.loss_grad_onnx_
    
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:458: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.INT64]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    opset: domain='' version=13
    input: name='X1' type=dtype('float32') shape=[None, None]
    input: name='X2' type=dtype('float32') shape=[None, None]
    init: name='Mu_Mulcst' type=dtype('float32') shape=(1,) -- array([0.5], dtype=float32)
    init: name='Re_Reshapecst' type=dtype('int64') shape=(1,) -- array([-1])
    init: name='Mu_Mulcst1' type=dtype('float32') shape=(1,) -- array([-1.], dtype=float32)
    Sub(X1, X2) -> Su_C0
      ReduceSumSquare(Su_C0) -> Re_reduced0
        Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0
          Reshape(Mu_C0, Re_Reshapecst) -> Y
      Mul(Su_C0, Mu_Mulcst1) -> Y_grad
    output: name='Y' type=dtype('float32') shape=None
    output: name='Y_grad' type=dtype('float32') shape=None
    
    ++++++ learning_loss.loss_score_onnx_
    
    opset: domain='' version=13
    input: name='X1' type=dtype('float32') shape=[None, None]
    input: name='X2' type=dtype('float32') shape=[None, None]
    Sub(X1, X2) -> Su_C0
      Mul(Su_C0, Su_C0) -> Y
    output: name='Y' type=dtype('float32') shape=[None, 1]
    
    ++++++ learning_penalty.penalty_onnx_
    
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:458: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.INT64]  # pylint: disable=E1101
    opset: domain='' version=13
    input: name='loss' type=dtype('float32') shape=None
    input: name='W0' type=dtype('float32') shape=None
    input: name='W1' type=dtype('float32') shape=None
    init: name='Mu_Mulcst' type=dtype('float32') shape=(1,) -- array([0.001], dtype=float32)
    init: name='Mu_Mulcst1' type=dtype('float32') shape=(1,) -- array([1.e-04], dtype=float32)
    init: name='Re_Reshapecst' type=dtype('int64') shape=(1,) -- array([-1])
    Abs(W0) -> Ab_Y0
      ReduceSum(Ab_Y0) -> Re_reduced0
        Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0
    ReduceSumSquare(W1) -> Re_reduced04
    ReduceSumSquare(W0) -> Re_reduced02
      Mul(Re_reduced02, Mu_Mulcst1) -> Mu_C02
        Add(Mu_C0, Mu_C02) -> Ad_C02
    Identity(Mu_Mulcst) -> Mu_Mulcst2
    Abs(W1) -> Ab_Y02
      ReduceSum(Ab_Y02) -> Re_reduced03
      Mul(Re_reduced03, Mu_Mulcst2) -> Mu_C03
    Identity(Mu_Mulcst1) -> Mu_Mulcst3
      Mul(Re_reduced04, Mu_Mulcst3) -> Mu_C04
        Add(Mu_C03, Mu_C04) -> Ad_C03
          Add(Ad_C02, Ad_C03) -> Ad_C01
            Add(loss, Ad_C01) -> Ad_C0
              Reshape(Ad_C0, Re_Reshapecst) -> Y
    output: name='Y' type=dtype('float32') shape=[None]
    
    ++++++ learning_penalty.penalty_grad_onnx_
    
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    opset: domain='' version=13
    input: name='X' type=dtype('float32') shape=None
    init: name='Mu_Mulcst' type=dtype('float32') shape=(1,) -- array([1.], dtype=float32)
    init: name='Mu_Mulcst1' type=dtype('float32') shape=(1,) -- array([0.001], dtype=float32)
    Mul(X, Mu_Mulcst) -> Mu_C0
    Sign(X) -> Si_output0
      Mul(Si_output0, Mu_Mulcst1) -> Mu_C02
      Sub(Mu_C0, Mu_C02) -> Y
    output: name='Y' type=dtype('float32') shape=None
    
    ++++++ zero_onnx_
    
    somewhere/workspace/onnxcustom/onnxcustom_UT_39_std/_venv/lib/python3.9/site-packages/mlprodict/plotting/text_plot.py:452: DeprecationWarning: `mapping.TENSOR_TYPE_TO_NP_TYPE` is now deprecated and will be removed in the next release or so.To silence this warning, please use `helper.{self._future_function}` instead.
      return TENSOR_TYPE_TO_NP_TYPE[TensorProto.FLOAT]  # pylint: disable=E1101
    opset: domain='' version=14
    input: name='X' type=dtype('float32') shape=None
    init: name='Mu_Mulcst' type=dtype('float32') shape=(1,) -- array([0.], dtype=float32)
    Mul(X, Mu_Mulcst) -> Y
    output: name='Y' type=dtype('float32') shape=None

source on GitHub