Coverage for onnxcustom/utils/onnx_function.py: 98%

240 statements  

« prev     ^ index     » next       coverage.py v7.0.5, created at 2023-01-17 01:42 +0100

1# pylint: disable=C0415,E0611 

2""" 

3@file 

4@brief Onnx helper. 

5""" 

6import numpy 

7from .onnx_helper import dtype_to_var_type, add_initializer 

8from .. import __max_supported_opset__ as SUPPORTED_OPSET 

9 

10 

11def get_supported_functions(): 

12 """ 

13 Returns the list of supported function by @see fn function_onnx_graph. 

14 """ 

15 glo = globals() 

16 res = {} 

17 for k, v in glo.items(): 

18 if k.startswith('_onnx_'): 

19 res[k[6:]] = v.__doc__ 

20 return res 

21 

22 

23def function_onnx_graph(name, target_opset=None, dtype=numpy.float32, 

24 weight_name=None, **kwargs): 

25 """ 

26 Returns the ONNX graph corresponding to a function. 

27 

28 :param name: name 

29 :param target_opset: opset version, if None, *target_opset* 

30 is replaced by the latest supported opset defined 

31 in the main `__init__.py` of this package in 

32 `__max_supported_opset__` 

33 :param dtype: computation type 

34 :param weight_name: weight name if any 

35 :param kwargs: additional parameters 

36 :return: ONNX graph 

37 

38 A wrong name will raise an exception giving the whole of 

39 supported function. One example with function `square_error`: 

40 

41 .. gdot:: 

42 :script: DOT-SECTION 

43 

44 from mlprodict.onnxrt import OnnxInference 

45 from onnxcustom.utils.onnx_function import function_onnx_graph 

46 

47 model_onnx = function_onnx_graph('square_error') 

48 oinf = OnnxInference(model_onnx, inplace=False) 

49 

50 print("DOT-SECTION", oinf.to_dot()) 

51 

52 An example on how to use it: 

53 

54 .. runpython:: 

55 :showcode: 

56 

57 import numpy 

58 from onnxruntime import InferenceSession 

59 from onnxcustom.utils.onnx_function import function_onnx_graph 

60 

61 model_onnx = function_onnx_graph('square_error') 

62 sess = InferenceSession(model_onnx.SerializeToString()) 

63 res = sess.run(None, { 

64 'X1': numpy.array([[0, 1]], dtype=numpy.float32).T, 

65 'X2': numpy.array([[1, 2]], dtype=numpy.float32).T}) 

66 print(res[0]) 

67 

68 List of supported functions: 

69 

70 .. runpython:: 

71 :showcode: 

72 :warningout: DeprecationWarning 

73 

74 from onnxcustom.utils.onnx_function import get_supported_functions 

75 print("\\n".join(sorted(get_supported_functions()))) 

76 """ 

77 if target_opset is None: 

78 target_opset = SUPPORTED_OPSET 

79 glo = globals() 

80 full_name = "_onnx_" + name 

81 if full_name in glo: 

82 if weight_name is None: 

83 return glo[full_name](target_opset=target_opset, 

84 dtype=dtype, **kwargs) 

85 return glo[full_name](target_opset=target_opset, dtype=dtype, 

86 weight_name=weight_name, **kwargs) 

87 raise ValueError( 

88 "Unable to find function %r in %r." % ( 

89 full_name, list(sorted( 

90 k for k in glo if k.startswith('_onnx_'))))) 

91 

92 

93def _onnx_axpy(target_opset=None, dtype=numpy.float32): 

94 """ 

95 Returns the ONNX graph for function 

96 :math:`Y = f(X1, X2, \\alpha) = \\alpha X1 + X2`. 

97 

98 .. gdot:: 

99 :script: DOT-SECTION 

100 

101 from mlprodict.onnxrt import OnnxInference 

102 from onnxcustom.utils.onnx_function import function_onnx_graph 

103 

104 model_onnx = function_onnx_graph('axpy') 

105 oinf = OnnxInference(model_onnx, inplace=False) 

106 

107 print("DOT-SECTION", oinf.to_dot()) 

108 """ 

109 from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul 

110 res = OnnxAdd(OnnxMul('X1', 'alpha', op_version=target_opset), 

111 'X2', op_version=target_opset, output_names=['Y']) 

112 var_type = dtype_to_var_type(dtype) 

113 varsx = [('X1', var_type()), ('X2', var_type()), 

114 ('alpha', var_type([1]))] 

115 onx = res.to_onnx(varsx, outputs=[('Y', var_type())], 

116 target_opset=target_opset) 

117 return onx 

118 

119 

120def _onnx_axpyw(target_opset=None, dtype=numpy.float32): 

121 """ 

122 Returns the ONNX graph for function 

123 :math:`Y, Z = f(X1, X2, G, \\alpha, \\beta) = (Y, Z)` 

124 where :math:`Z = \\beta G + \\alpha X1` and 

125 :math:`Y = Z + X2`. 

126 

127 .. gdot:: 

128 :script: DOT-SECTION 

129 

130 from mlprodict.onnxrt import OnnxInference 

131 from onnxcustom.utils.onnx_function import function_onnx_graph 

132 

133 model_onnx = function_onnx_graph('axpy') 

134 oinf = OnnxInference(model_onnx, inplace=False) 

135 

136 print("DOT-SECTION", oinf.to_dot()) 

137 """ 

138 from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul 

139 s1 = OnnxMul('X1', 'alpha', op_version=target_opset) 

140 s2 = OnnxMul('G', 'beta', op_version=target_opset) 

141 Z = OnnxAdd(s1, s2, op_version=target_opset, 

142 output_names=['Z']) 

143 Y = OnnxAdd(Z, 'X2', op_version=target_opset, output_names=['Y']) 

144 var_type = dtype_to_var_type(dtype) 

145 varsx = [('X1', var_type()), ('X2', var_type()), 

146 ('G', var_type()), 

147 ('alpha', var_type([1])), ('beta', var_type([1]))] 

148 onx = Y.to_onnx( 

149 varsx, outputs=[('Y', var_type()), ('Z', var_type())], 

150 target_opset=target_opset, other_outputs=[Z]) 

151 return onx 

152 

153 

154def _onnx_axpyw2(target_opset=None, dtype=numpy.float32): 

155 """ 

156 Returns the ONNX graph for function 

157 :math:`Y, Z = f(X1, X2, G, \\alpha, \\beta) = (Y, Z)` 

158 where :math:`Z = \\beta G + \\alpha X1` and 

159 :math:`Y = \\beta * Z + \\alpha X1 + X2`. 

160 

161 .. gdot:: 

162 :script: DOT-SECTION 

163 

164 from mlprodict.onnxrt import OnnxInference 

165 from onnxcustom.utils.onnx_function import function_onnx_graph 

166 

167 model_onnx = function_onnx_graph('axpy') 

168 oinf = OnnxInference(model_onnx, inplace=False) 

169 

170 print("DOT-SECTION", oinf.to_dot()) 

171 """ 

172 from skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul 

173 s1 = OnnxMul('X1', 'alpha', op_version=target_opset) 

174 s2 = OnnxMul('G', 'beta', op_version=target_opset) 

175 Z = OnnxAdd(s1, s2, op_version=target_opset, 

176 output_names=['Z']) 

177 s2_2 = OnnxMul(Z, 'beta', op_version=target_opset) 

178 s2_3 = OnnxAdd(s1, s2_2, op_version=target_opset) 

179 Y = OnnxAdd(s2_3, 'X2', op_version=target_opset, output_names=['Y']) 

180 var_type = dtype_to_var_type(dtype) 

181 varsx = [('X1', var_type()), ('X2', var_type()), 

182 ('G', var_type()), 

183 ('alpha', var_type([1])), ('beta', var_type([1]))] 

184 onx = Y.to_onnx( 

185 varsx, outputs=[('Y', var_type()), ('Z', var_type())], 

186 target_opset=target_opset, other_outputs=[Z]) 

187 return onx 

188 

189 

190def _onnx_square_error(target_opset=None, dtype=numpy.float32, 

191 weight_name=None): 

192 """ 

193 Returns the ONNX graph for function 

194 :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or 

195 :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if 

196 *weight_name* is not None 

197 

198 .. gdot:: 

199 :script: DOT-SECTION 

200 

201 from mlprodict.onnxrt import OnnxInference 

202 from onnxcustom.utils.onnx_function import function_onnx_graph 

203 

204 model_onnx = function_onnx_graph('square_error') 

205 oinf = OnnxInference(model_onnx, inplace=False) 

206 

207 print("DOT-SECTION", oinf.to_dot()) 

208 """ 

209 from skl2onnx.algebra.onnx_ops import ( 

210 OnnxSub, OnnxReduceSumSquare, OnnxReshape, 

211 OnnxReduceSum, OnnxMul) 

212 diff = OnnxSub('X1', 'X2', op_version=target_opset) 

213 if weight_name is None: 

214 res = OnnxReduceSumSquare(diff, op_version=target_opset) 

215 else: 

216 mul = OnnxMul( 

217 OnnxMul(diff, diff, op_version=target_opset), 

218 OnnxReshape(weight_name, 

219 numpy.array([-1, 1], dtype=numpy.int64), 

220 op_version=target_opset), 

221 op_version=target_opset) 

222 res = OnnxReduceSum(mul, op_version=target_opset) 

223 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

224 op_version=target_opset, 

225 output_names=['Y']) 

226 var_type = dtype_to_var_type(dtype) 

227 varsx = [('X1', var_type([None, None])), 

228 ('X2', var_type([None, None]))] 

229 if weight_name is not None: 

230 varsx.append((weight_name, var_type([None]))) 

231 onx = res.to_onnx(varsx, outputs=[('Y', var_type())], 

232 target_opset=target_opset) 

233 if weight_name is not None: 

234 onx = add_initializer( 

235 onx, weight_name, numpy.array([1], dtype=dtype)) 

236 return onx 

237 

238 

239def _onnx_grad_square_error(target_opset=None, dtype=numpy.float32, 

240 weight_name=None): 

241 """ 

242 Returns the ONNX graph for the gradient of function 

243 :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2` or 

244 :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert ^2 w` if 

245 *weight_name* is not None 

246 

247 .. gdot:: 

248 :script: DOT-SECTION 

249 

250 from mlprodict.onnxrt import OnnxInference 

251 from onnxcustom.utils.onnx_function import function_onnx_graph 

252 

253 model_onnx = function_onnx_graph('grad_square_error') 

254 oinf = OnnxInference(model_onnx, inplace=False) 

255 

256 print("DOT-SECTION", oinf.to_dot()) 

257 """ 

258 from skl2onnx.algebra.onnx_ops import OnnxSub, OnnxMul, OnnxReshape 

259 diff = OnnxSub('X1', 'X2', op_version=target_opset) 

260 if weight_name is None: 

261 res = OnnxMul(diff, numpy.array([-2], dtype=dtype), 

262 op_version=target_opset, output_names=['Y_grad']) 

263 else: 

264 res = OnnxMul( 

265 OnnxMul(diff, numpy.array([-2], dtype=dtype), 

266 op_version=target_opset), 

267 OnnxReshape(weight_name, 

268 numpy.array([-1, 1], dtype=numpy.int64), 

269 op_version=target_opset), 

270 op_version=target_opset, output_names=['Y_grad']) 

271 var_type = dtype_to_var_type(dtype) 

272 varsx = [('X1', var_type([None, None])), ('X2', var_type([None, None]))] 

273 if weight_name is not None: 

274 varsx.append((weight_name, var_type([None]))) 

275 onx = res.to_onnx(varsx, outputs=[('Y_grad', var_type())], 

276 target_opset=target_opset) 

277 if weight_name is not None: 

278 onx = add_initializer( 

279 onx, weight_name, numpy.array([1], dtype=dtype)) 

280 return onx 

281 

282 

283def _onnx_copy(target_opset=None, dtype=numpy.float32): 

284 """ 

285 Returns the ONNX graph for function 

286 :math:`Y = X`. 

287 

288 .. gdot:: 

289 :script: DOT-SECTION 

290 

291 from mlprodict.onnxrt import OnnxInference 

292 from onnxcustom.utils.onnx_function import function_onnx_graph 

293 

294 model_onnx = function_onnx_graph('copy') 

295 oinf = OnnxInference(model_onnx, inplace=False) 

296 

297 print("DOT-SECTION", oinf.to_dot()) 

298 """ 

299 from skl2onnx.algebra.onnx_ops import OnnxIdentity 

300 res = OnnxIdentity('X', op_version=target_opset, 

301 output_names=['Y']) 

302 var_type = dtype_to_var_type(dtype) 

303 varsx = [('X', var_type())] 

304 onx = res.to_onnx(varsx, outputs=[('Y', var_type())], 

305 target_opset=target_opset) 

306 return onx 

307 

308 

309def _onnx_zero(target_opset=None, dtype=numpy.float32): 

310 """ 

311 Returns the ONNX graph for function 

312 :math:`Y = X * 0`. 

313 

314 .. gdot:: 

315 :script: DOT-SECTION 

316 

317 from mlprodict.onnxrt import OnnxInference 

318 from onnxcustom.utils.onnx_function import function_onnx_graph 

319 

320 model_onnx = function_onnx_graph('zero') 

321 oinf = OnnxInference(model_onnx, inplace=False) 

322 

323 print("DOT-SECTION", oinf.to_dot()) 

324 """ 

325 from skl2onnx.algebra.onnx_ops import OnnxMul 

326 res = OnnxMul('X', numpy.array([0], dtype=dtype), 

327 op_version=target_opset, 

328 output_names=['Y']) 

329 var_type = dtype_to_var_type(dtype) 

330 varsx = [('X', var_type())] 

331 onx = res.to_onnx(varsx, outputs=[('Y', var_type())], 

332 target_opset=target_opset) 

333 return onx 

334 

335 

336def _onnx_linear_regression(target_opset=None, dtype=numpy.float32): 

337 """ 

338 Returns the ONNX graph for function 

339 :math:`Y = f(X, A, B) = A X + B`. 

340 

341 .. gdot:: 

342 :script: DOT-SECTION 

343 

344 from mlprodict.onnxrt import OnnxInference 

345 from onnxcustom.utils.onnx_function import function_onnx_graph 

346 

347 model_onnx = function_onnx_graph('linear_regression') 

348 oinf = OnnxInference(model_onnx, inplace=False) 

349 

350 print("DOT-SECTION", oinf.to_dot()) 

351 """ 

352 from skl2onnx.algebra.onnx_ops import ( 

353 OnnxMatMul, OnnxAdd) 

354 res = OnnxAdd( 

355 OnnxMatMul('X', 'A', op_version=target_opset), 

356 'B', op_version=target_opset, output_names=['Y']) 

357 

358 var_type = dtype_to_var_type(dtype) 

359 varsx = [('X', var_type([None, None])), 

360 ('A', var_type([None, None])), 

361 ('B', var_type([None, None]))] 

362 onx = res.to_onnx( 

363 varsx, outputs=[('Y', var_type())], 

364 target_opset=target_opset, other_outputs=[res]) 

365 return onx 

366 

367 

368def _onnx_grad_loss_square_error(target_opset=None, dtype=numpy.float32, 

369 weight_name=None, multiply=2): 

370 """ 

371 Returns the ONNX graph for function 

372 :math:`Y = f(X1, X2) = \\lVert (X1 - X2) \\rVert ^2` or 

373 :math:`Y = f(X1, X2) = \\lVert (\\sqrt{w}(X1 - X2) \\rVert ^2 w` if 

374 *weight_name* is not None and its gradient. 

375 

376 .. gdot:: 

377 :script: DOT-SECTION 

378 

379 from mlprodict.onnxrt import OnnxInference 

380 from onnxcustom.utils.onnx_function import function_onnx_graph 

381 

382 model_onnx = function_onnx_graph('grad_loss_square_error') 

383 oinf = OnnxInference(model_onnx, inplace=False) 

384 

385 print("DOT-SECTION", oinf.to_dot()) 

386 """ 

387 from skl2onnx.algebra.onnx_ops import ( 

388 OnnxSub, OnnxReduceSumSquare, OnnxMul, 

389 OnnxReduceSum, OnnxReshape) 

390 diff = OnnxSub('X1', 'X2', op_version=target_opset) 

391 if weight_name is None: 

392 res = OnnxMul(OnnxReduceSumSquare(diff, op_version=target_opset), 

393 numpy.array([multiply * 0.5], dtype=numpy.float32), 

394 op_version=target_opset) 

395 res2 = OnnxMul(diff, numpy.array([-multiply], dtype=dtype), 

396 op_version=target_opset, output_names=['Y_grad']) 

397 else: 

398 resh = OnnxReshape(weight_name, 

399 numpy.array([-1, 1], dtype=numpy.int64), 

400 op_version=target_opset) 

401 mul = OnnxMul( 

402 OnnxMul(diff, diff, op_version=target_opset), 

403 resh, op_version=target_opset) 

404 res = OnnxMul(OnnxReduceSum(mul, op_version=target_opset), 

405 numpy.array([multiply * 0.5], dtype=numpy.float32), 

406 op_version=target_opset) 

407 

408 res2 = OnnxMul( 

409 OnnxMul(diff, numpy.array([-multiply], dtype=dtype), 

410 op_version=target_opset), 

411 resh, op_version=target_opset, output_names=['Y_grad']) 

412 

413 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

414 op_version=target_opset, 

415 output_names=['Y']) 

416 

417 var_type = dtype_to_var_type(dtype) 

418 varsx = [('X1', var_type([None, None])), 

419 ('X2', var_type([None, None]))] 

420 if weight_name is not None: 

421 varsx.append((weight_name, var_type([None]))) 

422 onx = res.to_onnx( 

423 varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], 

424 target_opset=target_opset, other_outputs=[res2]) 

425 if weight_name is not None: 

426 onx = add_initializer( 

427 onx, weight_name, numpy.array([1], dtype=dtype)) 

428 return onx 

429 

430 

431def _onnx_grad_loss_absolute_error(target_opset=None, dtype=numpy.float32, 

432 weight_name=None): 

433 """ 

434 Returns the ONNX graph for function 

435 :math:`Y = f(X1, X2) = \\lVert X1 - X2 \\rVert` or 

436 :math:`Y = f(X1, X2) = \\lVert (X1 - X2)w \\rVert` if 

437 *weight_name* is not None and its gradient. 

438 

439 .. gdot:: 

440 :script: DOT-SECTION 

441 

442 from mlprodict.onnxrt import OnnxInference 

443 from onnxcustom.utils.onnx_function import function_onnx_graph 

444 

445 model_onnx = function_onnx_graph('grad_loss_absolute_error') 

446 oinf = OnnxInference(model_onnx, inplace=False) 

447 

448 print("DOT-SECTION", oinf.to_dot()) 

449 """ 

450 from skl2onnx.algebra.onnx_ops import ( 

451 OnnxSub, OnnxMul, OnnxReduceSum, OnnxReshape, 

452 OnnxSign, OnnxAbs) 

453 diff = OnnxSub('X1', 'X2', op_version=target_opset) 

454 abs_diff = OnnxAbs(diff, op_version=target_opset) 

455 if weight_name is None: 

456 res = OnnxReduceSum(abs_diff, op_version=target_opset) 

457 res2 = OnnxSign(diff, op_version=target_opset, 

458 output_names=['Y_grad']) 

459 else: 

460 resh = OnnxReshape(weight_name, 

461 numpy.array([-1, 1], dtype=numpy.int64), 

462 op_version=target_opset) 

463 mul = OnnxMul(abs_diff, resh, op_version=target_opset) 

464 res = OnnxReduceSum(mul, op_version=target_opset) 

465 res2 = OnnxMul( 

466 OnnxSign(diff, op_version=target_opset), 

467 resh, op_version=target_opset, output_names=['Y_grad']) 

468 

469 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

470 op_version=target_opset, 

471 output_names=['Y']) 

472 var_type = dtype_to_var_type(dtype) 

473 varsx = [('X1', var_type([None, None])), 

474 ('X2', var_type([None, None]))] 

475 if weight_name is not None: 

476 varsx.append((weight_name, var_type([None]))) 

477 onx = res.to_onnx( 

478 varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], 

479 target_opset=target_opset, other_outputs=[res2]) 

480 if weight_name is not None: 

481 onx = add_initializer( 

482 onx, weight_name, numpy.array([1], dtype=dtype)) 

483 return onx 

484 

485 

486def _onnx_grad_loss_elastic_error(target_opset=None, dtype=numpy.float32, 

487 weight_name=None, 

488 l1_weight=0.01, l2_weight=0.01): 

489 """ 

490 Returns the ONNX graph for function 

491 :math:`Y = f(X1, X2) = \\beta \\lVert X1 - X2 \\rVert + 

492 \\alpha \\lVert X1 - X2 \\rVert^2` or 

493 :math:`Y = f(X1, X2) = \\beta \\lVert w(X1 - X2) \\rVert + 

494 \\alpha \\lVert (\\sqrt{w})(X1 - X2) \\rVert^2` if 

495 *weight_name* is not None and its gradient. 

496 *l1_weight* is :math:`\\beta` and 

497 *l2_weight* is :math:`\\alpha`. 

498 

499 .. gdot:: 

500 :script: DOT-SECTION 

501 

502 from mlprodict.onnxrt import OnnxInference 

503 from onnxcustom.utils.onnx_function import function_onnx_graph 

504 

505 model_onnx = function_onnx_graph('grad_loss_elastic_error') 

506 oinf = OnnxInference(model_onnx, inplace=False) 

507 

508 print("DOT-SECTION", oinf.to_dot()) 

509 """ 

510 from skl2onnx.algebra.onnx_ops import ( 

511 OnnxSub, OnnxMul, OnnxAdd, OnnxIdentity, 

512 OnnxReduceSum, OnnxReshape, OnnxSign, OnnxAbs) 

513 diff = OnnxSub('X1', 'X2', op_version=target_opset) 

514 abs_diff = OnnxAbs(diff, op_version=target_opset) 

515 

516 # loss 

517 abs_diff_l1 = OnnxMul( 

518 abs_diff, numpy.array([l1_weight], dtype=dtype), 

519 op_version=target_opset) 

520 diff_l2 = OnnxMul( 

521 OnnxMul(diff, diff, op_version=target_opset), 

522 numpy.array([l2_weight], dtype=dtype), 

523 op_version=target_opset) 

524 score = OnnxAdd(abs_diff_l1, diff_l2, op_version=target_opset) 

525 

526 # gradient 

527 grad_l1 = OnnxMul( 

528 OnnxSign(diff, op_version=target_opset), 

529 numpy.array([l1_weight], dtype=dtype), 

530 op_version=target_opset) 

531 grad_l2 = OnnxMul( 

532 diff, numpy.array([l2_weight * -2], dtype=dtype), 

533 op_version=target_opset) 

534 grad = OnnxAdd(grad_l1, grad_l2, op_version=target_opset) 

535 

536 if weight_name is None: 

537 res = OnnxReduceSum(score, op_version=target_opset) 

538 res2 = OnnxIdentity(grad, op_version=target_opset, 

539 output_names=['Y_grad']) 

540 else: 

541 resh = OnnxReshape(weight_name, 

542 numpy.array([-1, 1], dtype=numpy.int64), 

543 op_version=target_opset) 

544 res = OnnxReduceSum( 

545 OnnxMul(score, resh, op_version=target_opset), 

546 op_version=target_opset) 

547 res2 = OnnxMul(grad, resh, op_version=target_opset, 

548 output_names=['Y_grad']) 

549 

550 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

551 op_version=target_opset, 

552 output_names=['Y']) 

553 

554 var_type = dtype_to_var_type(dtype) 

555 varsx = [('X1', var_type([None, None])), 

556 ('X2', var_type([None, None]))] 

557 if weight_name is not None: 

558 varsx.append((weight_name, var_type([None]))) 

559 onx = res.to_onnx( 

560 varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], 

561 target_opset=target_opset, other_outputs=[res2]) 

562 if weight_name is not None: 

563 onx = add_initializer( 

564 onx, weight_name, numpy.array([1], dtype=dtype)) 

565 return onx 

566 

567 

568def _onnx_grad_penalty_elastic_error(target_opset=None, dtype=numpy.float32, 

569 l1_weight=0.01, l2_weight=0.01): 

570 """ 

571 Returns the ONNX graph for function 

572 :math:`Y = f(W) = \\beta \\lVert W \\rVert + 

573 \\alpha \\lVert W \\rVert^2` 

574 *l1_weight* is :math:`\\beta` and 

575 *l2_weight* is :math:`\\alpha`. 

576 

577 .. gdot:: 

578 :script: DOT-SECTION 

579 

580 from mlprodict.onnxrt import OnnxInference 

581 from onnxcustom.utils.onnx_function import function_onnx_graph 

582 

583 model_onnx = function_onnx_graph('grad_penalty_elastic_error') 

584 oinf = OnnxInference(model_onnx, inplace=False) 

585 

586 print("DOT-SECTION", oinf.to_dot()) 

587 """ 

588 from skl2onnx.algebra.onnx_ops import ( 

589 OnnxMul, OnnxAdd, OnnxReduceSumSquare, 

590 OnnxReduceSum, OnnxSign, OnnxAbs, OnnxReshape) 

591 diff = 'X' 

592 abs_diff = OnnxAbs(diff, op_version=target_opset) 

593 res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset) 

594 res2_l1 = OnnxSign(diff, op_version=target_opset) 

595 res_l2 = OnnxReduceSumSquare(diff, op_version=target_opset) 

596 res2_l2 = diff 

597 

598 res = OnnxAdd( 

599 OnnxMul(res_l1, numpy.array([l1_weight], dtype=dtype), 

600 op_version=target_opset), 

601 OnnxMul(res_l2, numpy.array([l2_weight], dtype=dtype), 

602 op_version=target_opset), 

603 op_version=target_opset) 

604 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

605 op_version=target_opset, 

606 output_names=['Y']) 

607 

608 res2 = OnnxAdd( 

609 OnnxMul(res2_l1, numpy.array([l1_weight], dtype=dtype), 

610 op_version=target_opset), 

611 OnnxMul(res2_l2, numpy.array([l2_weight * (2)], dtype=dtype), 

612 op_version=target_opset), 

613 op_version=target_opset, output_names=['Y_grad']) 

614 

615 var_type = dtype_to_var_type(dtype) 

616 varsx = [('X', var_type([None, None]))] 

617 onx = res.to_onnx( 

618 varsx, outputs=[('Y', var_type([None])), ('Y_grad', var_type())], 

619 target_opset=target_opset, other_outputs=[res2]) 

620 return onx 

621 

622 

623def _onnx_n_penalty_elastic_error(target_opset=None, dtype=numpy.float32, 

624 weight_name=None, 

625 l1_weight=0.01, l2_weight=0.01, n_tensors=1, 

626 loss_shape=(1, 1)): 

627 """ 

628 Returns the ONNX graph for function 

629 :math:`Y = f(W) = \\beta \\lVert W \\rVert + 

630 \\alpha \\lVert W \\rVert^2` 

631 *l1_weight* is :math:`\\beta` and 

632 *l2_weight* is :math:`\\alpha`. 

633 It does that for *n_tensors* and adds all of the results 

634 to an input loss. 

635 

636 .. gdot:: 

637 :script: DOT-SECTION 

638 

639 from mlprodict.onnxrt import OnnxInference 

640 from onnxcustom.utils.onnx_function import function_onnx_graph 

641 

642 model_onnx = function_onnx_graph( 

643 'n_penalty_elastic_error', n_tensors=2) 

644 oinf = OnnxInference(model_onnx, inplace=False) 

645 

646 print("DOT-SECTION", oinf.to_dot()) 

647 """ 

648 from skl2onnx.algebra.onnx_ops import ( 

649 OnnxMul, OnnxAdd, OnnxReduceSumSquare, 

650 OnnxReduceSum, OnnxAbs, OnnxReshape) 

651 

652 if n_tensors <= 0: 

653 raise ValueError( # pragma: no cover 

654 "This function is useless if the number of tensors is null.") 

655 

656 var_type = dtype_to_var_type(dtype) 

657 varsx = [('loss', var_type(loss_shape))] 

658 names = ['loss'] 

659 for n in range(n_tensors): 

660 name = 'W%d' % n 

661 abs_diff = OnnxAbs(name, op_version=target_opset) 

662 res_l1 = OnnxReduceSum(abs_diff, op_version=target_opset) 

663 # res2_l1 = OnnxSign(diff, op_version=target_opset) 

664 res_l2 = OnnxReduceSumSquare(name, op_version=target_opset) 

665 # res2_l2 = diff 

666 res = OnnxAdd( 

667 OnnxMul(res_l1, numpy.array([l1_weight], dtype=dtype), 

668 op_version=target_opset), 

669 OnnxMul(res_l2, numpy.array([l2_weight], dtype=dtype), 

670 op_version=target_opset), 

671 op_version=target_opset) 

672 names.append(res) 

673 varsx.append(('W%d' % n, var_type())) 

674 

675 if len(names) == 2: 

676 res = OnnxAdd(*names, op_version=target_opset) 

677 else: 

678 res = OnnxAdd(names[1], names[2], op_version=target_opset) 

679 for i in range(3, len(names)): 

680 res = OnnxAdd(res, names[i], op_version=target_opset) 

681 res = OnnxAdd(names[0], res, op_version=target_opset) 

682 

683 res = OnnxReshape(res, numpy.array([-1], numpy.int64), 

684 op_version=target_opset, 

685 output_names=['Y']) 

686 onx = res.to_onnx( 

687 varsx, outputs=[('Y', var_type([None]))], 

688 target_opset=target_opset) 

689 return onx 

690 

691 

692def _onnx_update_penalty_elastic_error(target_opset=None, dtype=numpy.float32, 

693 l1=1e-4, l2=1e-4): 

694 """ 

695 Returns the ONNX graph for function 

696 :math:`Y = f(W) = W - 2 \\beta W - \\alpha sign(W)` 

697 *l1* is :math:`\\beta` and 

698 *l2* is :math:`\\alpha`. 

699 

700 .. gdot:: 

701 :script: DOT-SECTION 

702 

703 from mlprodict.onnxrt import OnnxInference 

704 from onnxcustom.utils.onnx_function import function_onnx_graph 

705 

706 model_onnx = function_onnx_graph( 

707 'update_penalty_elastic_error') 

708 oinf = OnnxInference(model_onnx, inplace=False) 

709 

710 print("DOT-SECTION", oinf.to_dot()) 

711 """ 

712 from skl2onnx.algebra.onnx_ops import ( 

713 OnnxSub, OnnxMul, OnnxSign) 

714 

715 res = OnnxSub( 

716 OnnxMul('X', numpy.array([1 - 2 * l2], dtype=dtype), 

717 op_version=target_opset), 

718 OnnxMul(OnnxSign('X', op_version=target_opset), 

719 numpy.array([l1], dtype=dtype), 

720 op_version=target_opset), 

721 op_version=target_opset, 

722 output_names=['Y']) 

723 

724 var_type = dtype_to_var_type(dtype) 

725 varsx = [('X', var_type())] 

726 onx = res.to_onnx( 

727 varsx, outputs=[('Y', var_type())], 

728 target_opset=target_opset) 

729 return onx 

730 

731 

732def _onnx_grad_sigmoid_neg_log_loss_error(target_opset=None, 

733 dtype=numpy.float32, 

734 eps=1e-5, 

735 weight_name=None): 

736 """ 

737 The function the raw scores from a classifier, uses the 

738 sigmoid function to compute probabilities, then the log function 

739 to compute the loss. It creates the ONNX graph for this function 

740 and the associated gradient of the loss against the raw scores. 

741 

742 Probabilites (class 1): :math:`p(s) = \\frac{1}{1 + \\exp(-s)}`. 

743 Loss (for two classes): :math:`L(y, s) = (1 - y)\\log(1 - p(s)) + 

744 y \\log(p(s))`. 

745 Gradient :math:`\\frac{dL(y, s)}{ds} = y - p(s)`. 

746 To avoid nan values, probabilies are clipped: 

747 :math:`p(s) = \\max(\\min(p(s), 1 - \\epsilon), \\epsilon)`. 

748 :math:`y \\in \\{0, 1\\}` (integer). *s* is a float. 

749 

750 :param eps: to clip probabilities and avoid computing `log(0)` 

751 

752 .. gdot:: 

753 :script: DOT-SECTION 

754 

755 from mlprodict.onnxrt import OnnxInference 

756 from onnxcustom.utils.onnx_function import function_onnx_graph 

757 

758 model_onnx = function_onnx_graph('grad_sigmoid_neg_log_loss_error') 

759 oinf = OnnxInference(model_onnx, inplace=False) 

760 

761 print("DOT-SECTION", oinf.to_dot()) 

762 """ 

763 try: 

764 from onnx.helper import np_dtype_to_tensor_dtype 

765 except ImportError: 

766 from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE 

767 

768 def np_dtype_to_tensor_dtype(dtype): 

769 return NP_TYPE_TO_TENSOR_TYPE[dtype] 

770 

771 from skl2onnx.algebra.onnx_ops import ( 

772 OnnxSub, OnnxMul, OnnxSigmoid, OnnxLog, OnnxNeg, 

773 OnnxReduceSum, OnnxReshape, OnnxAdd, OnnxCast, OnnxClip) 

774 

775 p1c = OnnxSigmoid('X2', op_version=target_opset) 

776 p1 = OnnxClip(p1c, numpy.array([eps], dtype=dtype), 

777 numpy.array([1 - eps], dtype=dtype), 

778 op_version=target_opset) 

779 p0 = OnnxSub(numpy.array([1], dtype=dtype), p1, 

780 op_version=target_opset) 

781 y1 = OnnxCast('X1', to=np_dtype_to_tensor_dtype(numpy.dtype(dtype)), 

782 op_version=target_opset) 

783 y0 = OnnxSub(numpy.array([1], dtype=dtype), y1, 

784 op_version=target_opset) 

785 loss_obs = OnnxAdd( 

786 OnnxMul(y0, OnnxLog(p0, op_version=target_opset), 

787 op_version=target_opset), 

788 OnnxMul(y1, OnnxLog(p1, op_version=target_opset), 

789 op_version=target_opset), 

790 op_version=target_opset) 

791 

792 loss_neg = OnnxNeg(loss_obs, op_version=target_opset) 

793 if weight_name is None: 

794 loss = OnnxReduceSum(loss_neg, op_version=target_opset) 

795 grad = OnnxSub(p1, y1, op_version=target_opset, 

796 output_names=['Y_grad']) 

797 else: 

798 loss = OnnxReduceSum( 

799 OnnxMul(loss_neg, 

800 OnnxReshape( 

801 weight_name, numpy.array([-1, 1], dtype=numpy.int64), 

802 op_version=target_opset), 

803 op_version=target_opset), 

804 op_version=target_opset) 

805 grad = OnnxMul( 

806 OnnxSub(p1, y1, op_version=target_opset), 

807 OnnxReshape(weight_name, numpy.array([-1, 1], dtype=numpy.int64), 

808 op_version=target_opset), 

809 output_names=['Y_grad'], op_version=target_opset) 

810 

811 res = OnnxReshape(loss, numpy.array([-1], numpy.int64), 

812 op_version=target_opset, 

813 output_names=['Y']) 

814 

815 var_type_int64 = dtype_to_var_type(numpy.int64) 

816 var_type = dtype_to_var_type(dtype) 

817 varsx = [('X1', var_type_int64([None, None])), 

818 ('X2', var_type([None, None]))] 

819 if weight_name is not None: 

820 varsx.append((weight_name, var_type([None]))) 

821 onx = res.to_onnx( 

822 varsx, outputs=[('Y', var_type()), ('Y_grad', var_type())], 

823 target_opset=target_opset, other_outputs=[grad]) 

824 if weight_name is not None: 

825 onx = add_initializer( 

826 onx, weight_name, numpy.array([1], dtype=dtype)) 

827 return onx