Convert a pipeline with a XGBoost model#

sklearn-onnx only converts scikit-learn models into ONNX but many libraries implement scikit-learn API so that their models can be included in a scikit-learn pipeline. This example considers a pipeline including a XGBoost model. sklearn-onnx can convert the whole pipeline as long as it knows the converter associated to a XGBClassifier. Let’s see how to do it.

Train a XGBoost classifier#

from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
from mlprodict.onnxrt import OnnxInference
import numpy
import onnxruntime as rt
from sklearn.datasets import load_iris, load_diabetes, make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier, XGBRegressor, DMatrix, train as train_xgb
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx import convert_sklearn, to_onnx, update_registered_converter
from skl2onnx.common.shape_calculator import (
from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
from onnxmltools.convert import convert_xgboost as convert_xgboost_booster

data = load_iris()
X =[:, :2]
y =

ind = numpy.arange(X.shape[0])
X = X[ind, :].copy()
y = y[ind].copy()

pipe = Pipeline([('scaler', StandardScaler()),
                 ('xgb', XGBClassifier(n_estimators=3))]), y)

# The conversion fails but it is expected.

    convert_sklearn(pipe, 'pipeline_xgboost',
                    [('input', FloatTensorType([None, 2]))],
                    target_opset={'': 14, '': 2})
except Exception as e:

# The error message tells no converter was found
# for :epkg:`XGBoost` models. By default, :epkg:`sklearn-onnx`
# only handles models from :epkg:`scikit-learn` but it can
# be extended to every model following :epkg:`scikit-learn`
# API as long as the module knows there exists a converter
# for every model used in a pipeline. That's why
# we need to register a converter.

Register the converter for XGBClassifier#

The converter is implemented in onnxmltools: onnxmltools… and the shape calculator: onnxmltools…

    XGBClassifier, 'XGBoostXGBClassifier',
    calculate_linear_classifier_output_shapes, convert_xgboost,
    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})

Convert again#

model_onnx = convert_sklearn(
    pipe, 'pipeline_xgboost',
    [('input', FloatTensorType([None, 2]))],
    target_opset={'': 14, '': 2})

# And save.
with open("pipeline_xgboost.onnx", "wb") as f:

Compare the predictions#

Predictions with XGBoost.

print("predict", pipe.predict(X[:5]))
print("predict_proba", pipe.predict_proba(X[:1]))
predict [0 2 0 2 1]
predict_proba [[0.69600695 0.1526681  0.15132491]]

Predictions with onnxruntime.

sess = rt.InferenceSession("pipeline_xgboost.onnx",
pred_onx =, {"input": X[:5].astype(numpy.float32)})
print("predict", pred_onx[0])
print("predict_proba", pred_onx[1][:1])
predict [0 2 0 2 1]
predict_proba [{0: 0.6960069537162781, 1: 0.15266810357570648, 2: 0.15132491290569305}]

Final graph#

oinf = OnnxInference(model_onnx)
ax = plot_graphviz(oinf.to_dot())
plot gexternal xgboost

Same example with XGBRegressor#

    XGBRegressor, 'XGBoostXGBRegressor',
    calculate_linear_regressor_output_shapes, convert_xgboost)

data = load_diabetes()
x =
y =
X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5)

pipe = Pipeline([('scaler', StandardScaler()),
                 ('xgb', XGBRegressor(n_estimators=3))]), y_train)

print("predict", pipe.predict(X_test[:5]))
predict [ 60.626884  39.222885 121.9652   126.65325   37.804596]


onx = to_onnx(pipe, X_train.astype(numpy.float32),
              target_opset={'': 14, '': 2})

sess = rt.InferenceSession(onx.SerializeToString(),
pred_onx =, {"X": X_test[:5].astype(numpy.float32)})
print("predict", pred_onx[0].ravel())
predict [ 60.626884  39.222885 121.9652   126.65325   37.804596]

Some discrepencies may appear. In that case, you should read Issues when switching to float.

Same with a Booster#

A booster cannot be inserted in a pipeline. It requires a different conversion function because it does not follow scikit-learn API.

x, y = make_classification(n_classes=2, n_features=5,
                           random_state=42, n_informative=3)
X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5,

dtrain = DMatrix(X_train, label=y_train)

param = {'objective': 'multi:softmax', 'num_class': 3}
bst = train_xgb(param, dtrain, 10)

initial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]

    onx = convert_xgboost_booster(bst, "name", initial_types=initial_type)
    cont = True
except AssertionError as e:
    print("XGBoost is too recent or onnxmltools too old.", e)
    cont = False

if cont:
    sess = rt.InferenceSession(onx.SerializeToString(),
    input_name = sess.get_inputs()[0].name
    label_name = sess.get_outputs()[0].name
    pred_onx =
        [label_name], {input_name: X_test.astype(numpy.float32)})[0]
XGBoost is too recent or onnxmltools too old. Missing required property "tree_info".

Total running time of the script: ( 0 minutes 1.627 seconds)

Gallery generated by Sphinx-Gallery