Coverage for mlprodict/onnxrt/ops_cpu/op_adagrad.py: 100%

28 statements  

« prev     ^ index     » next       coverage.py v7.1.0, created at 2023-02-04 02:28 +0100

1# -*- encoding: utf-8 -*- 

2# pylint: disable=E0203,E1101,C0111 

3""" 

4@file 

5@brief Runtime operator. 

6""" 

7import numpy 

8from ._op import OpRun 

9 

10 

11def _apply_adagrad(r, t, x, g, h, norm_coefficient, 

12 epsilon, decay_factor): 

13 # Compute adjusted learning-rate. 

14 r_ = r / (1 + t * decay_factor) 

15 # Add gradient of regularization term. 

16 g_regularized = norm_coefficient * x + g 

17 # Update squared accumulated gradient. 

18 h_new = h + g_regularized * g_regularized 

19 # Compute ADAGRAD's gradient scaling factors 

20 h_sqrt = numpy.sqrt(h_new) + epsilon 

21 # Apply ADAGRAD update rule. 

22 x_new = x - r_ * g_regularized / h_sqrt 

23 return (x_new, h_new) 

24 

25 

26class Adagrad(OpRun): 

27 

28 atts = {'decay_factor': 0., 

29 'epsilon': 9.999999974752427e-07, 

30 'norm_coefficient': 0.} 

31 

32 def __init__(self, onnx_node, desc=None, **options): 

33 OpRun.__init__(self, onnx_node, desc=desc, 

34 expected_attributes=Adagrad.atts, 

35 **options) 

36 

37 def _run(self, *data, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221 

38 if len(data) == 5: 

39 return self._run1(*data) 

40 n = (len(data) - 2) // 3 

41 xs = [] 

42 hs = [] 

43 for i in range(0, n): 

44 a, b = self._run1(*data[:2], data[2 + i], 

45 data[2 + n + i], data[2 + n * 2 + i]) 

46 xs.append(a) 

47 hs.append(b) 

48 return tuple(xs + hs) 

49 

50 def _run1(self, r, t, x, g, h): # pylint: disable=W0221 

51 x_new, h_new = _apply_adagrad( 

52 r, t, x, g, h, self.norm_coefficient, self.epsilon, self.decay_factor) 

53 return x_new, h_new