rk.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. import numpy as np
  2. from .base import OdeSolver, DenseOutput
  3. from .common import (validate_max_step, validate_tol, select_initial_step,
  4. norm, warn_extraneous, validate_first_step)
  5. from . import dop853_coefficients
  6. # Multiply steps computed from asymptotic behaviour of errors by this.
  7. SAFETY = 0.9
  8. MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
  9. MAX_FACTOR = 10 # Maximum allowed increase in a step size.
  10. def rk_step(fun, t, y, f, h, A, B, C, K):
  11. """Perform a single Runge-Kutta step.
  12. This function computes a prediction of an explicit Runge-Kutta method and
  13. also estimates the error of a less accurate method.
  14. Notation for Butcher tableau is as in [1]_.
  15. Parameters
  16. ----------
  17. fun : callable
  18. Right-hand side of the system.
  19. t : float
  20. Current time.
  21. y : ndarray, shape (n,)
  22. Current state.
  23. f : ndarray, shape (n,)
  24. Current value of the derivative, i.e., ``fun(x, y)``.
  25. h : float
  26. Step to use.
  27. A : ndarray, shape (n_stages, n_stages)
  28. Coefficients for combining previous RK stages to compute the next
  29. stage. For explicit methods the coefficients at and above the main
  30. diagonal are zeros.
  31. B : ndarray, shape (n_stages,)
  32. Coefficients for combining RK stages for computing the final
  33. prediction.
  34. C : ndarray, shape (n_stages,)
  35. Coefficients for incrementing time for consecutive RK stages.
  36. The value for the first stage is always zero.
  37. K : ndarray, shape (n_stages + 1, n)
  38. Storage array for putting RK stages here. Stages are stored in rows.
  39. The last row is a linear combination of the previous rows with
  40. coefficients
  41. Returns
  42. -------
  43. y_new : ndarray, shape (n,)
  44. Solution at t + h computed with a higher accuracy.
  45. f_new : ndarray, shape (n,)
  46. Derivative ``fun(t + h, y_new)``.
  47. References
  48. ----------
  49. .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
  50. Equations I: Nonstiff Problems", Sec. II.4.
  51. """
  52. K[0] = f
  53. for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
  54. dy = np.dot(K[:s].T, a[:s]) * h
  55. K[s] = fun(t + c * h, y + dy)
  56. y_new = y + h * np.dot(K[:-1].T, B)
  57. f_new = fun(t + h, y_new)
  58. K[-1] = f_new
  59. return y_new, f_new
  60. class RungeKutta(OdeSolver):
  61. """Base class for explicit Runge-Kutta methods."""
  62. C: np.ndarray = NotImplemented
  63. A: np.ndarray = NotImplemented
  64. B: np.ndarray = NotImplemented
  65. E: np.ndarray = NotImplemented
  66. P: np.ndarray = NotImplemented
  67. order: int = NotImplemented
  68. error_estimator_order: int = NotImplemented
  69. n_stages: int = NotImplemented
  70. def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
  71. rtol=1e-3, atol=1e-6, vectorized=False,
  72. first_step=None, **extraneous):
  73. warn_extraneous(extraneous)
  74. super().__init__(fun, t0, y0, t_bound, vectorized,
  75. support_complex=True)
  76. self.y_old = None
  77. self.max_step = validate_max_step(max_step)
  78. self.rtol, self.atol = validate_tol(rtol, atol, self.n)
  79. self.f = self.fun(self.t, self.y)
  80. if first_step is None:
  81. self.h_abs = select_initial_step(
  82. self.fun, self.t, self.y, self.f, self.direction,
  83. self.error_estimator_order, self.rtol, self.atol)
  84. else:
  85. self.h_abs = validate_first_step(first_step, t0, t_bound)
  86. self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
  87. self.error_exponent = -1 / (self.error_estimator_order + 1)
  88. self.h_previous = None
  89. def _estimate_error(self, K, h):
  90. return np.dot(K.T, self.E) * h
  91. def _estimate_error_norm(self, K, h, scale):
  92. return norm(self._estimate_error(K, h) / scale)
  93. def _step_impl(self):
  94. t = self.t
  95. y = self.y
  96. max_step = self.max_step
  97. rtol = self.rtol
  98. atol = self.atol
  99. min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
  100. if self.h_abs > max_step:
  101. h_abs = max_step
  102. elif self.h_abs < min_step:
  103. h_abs = min_step
  104. else:
  105. h_abs = self.h_abs
  106. step_accepted = False
  107. step_rejected = False
  108. while not step_accepted:
  109. if h_abs < min_step:
  110. return False, self.TOO_SMALL_STEP
  111. h = h_abs * self.direction
  112. t_new = t + h
  113. if self.direction * (t_new - self.t_bound) > 0:
  114. t_new = self.t_bound
  115. h = t_new - t
  116. h_abs = np.abs(h)
  117. y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
  118. self.B, self.C, self.K)
  119. scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
  120. error_norm = self._estimate_error_norm(self.K, h, scale)
  121. if error_norm < 1:
  122. if error_norm == 0:
  123. factor = MAX_FACTOR
  124. else:
  125. factor = min(MAX_FACTOR,
  126. SAFETY * error_norm ** self.error_exponent)
  127. if step_rejected:
  128. factor = min(1, factor)
  129. h_abs *= factor
  130. step_accepted = True
  131. else:
  132. h_abs *= max(MIN_FACTOR,
  133. SAFETY * error_norm ** self.error_exponent)
  134. step_rejected = True
  135. self.h_previous = h
  136. self.y_old = y
  137. self.t = t_new
  138. self.y = y_new
  139. self.h_abs = h_abs
  140. self.f = f_new
  141. return True, None
  142. def _dense_output_impl(self):
  143. Q = self.K.T.dot(self.P)
  144. return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
  145. class RK23(RungeKutta):
  146. """Explicit Runge-Kutta method of order 3(2).
  147. This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
  148. assuming accuracy of the second-order method, but steps are taken using the
  149. third-order accurate formula (local extrapolation is done). A cubic Hermite
  150. polynomial is used for the dense output.
  151. Can be applied in the complex domain.
  152. Parameters
  153. ----------
  154. fun : callable
  155. Right-hand side of the system. The calling signature is ``fun(t, y)``.
  156. Here ``t`` is a scalar and there are two options for ndarray ``y``.
  157. It can either have shape (n,), then ``fun`` must return array_like with
  158. shape (n,). Or alternatively it can have shape (n, k), then ``fun``
  159. must return array_like with shape (n, k), i.e. each column
  160. corresponds to a single column in ``y``. The choice between the two
  161. options is determined by `vectorized` argument (see below).
  162. t0 : float
  163. Initial time.
  164. y0 : array_like, shape (n,)
  165. Initial state.
  166. t_bound : float
  167. Boundary time - the integration won't continue beyond it. It also
  168. determines the direction of the integration.
  169. first_step : float or None, optional
  170. Initial step size. Default is ``None`` which means that the algorithm
  171. should choose.
  172. max_step : float, optional
  173. Maximum allowed step size. Default is np.inf, i.e., the step size is not
  174. bounded and determined solely by the solver.
  175. rtol, atol : float and array_like, optional
  176. Relative and absolute tolerances. The solver keeps the local error
  177. estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
  178. relative accuracy (number of correct digits), while `atol` controls
  179. absolute accuracy (number of correct decimal places). To achieve the
  180. desired `rtol`, set `atol` to be smaller than the smallest value that
  181. can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
  182. allowable error. If `atol` is larger than ``rtol * abs(y)`` the
  183. number of correct digits is not guaranteed. Conversely, to achieve the
  184. desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
  185. than `atol`. If components of y have different scales, it might be
  186. beneficial to set different `atol` values for different components by
  187. passing array_like with shape (n,) for `atol`. Default values are
  188. 1e-3 for `rtol` and 1e-6 for `atol`.
  189. vectorized : bool, optional
  190. Whether `fun` is implemented in a vectorized fashion. Default is False.
  191. Attributes
  192. ----------
  193. n : int
  194. Number of equations.
  195. status : string
  196. Current status of the solver: 'running', 'finished' or 'failed'.
  197. t_bound : float
  198. Boundary time.
  199. direction : float
  200. Integration direction: +1 or -1.
  201. t : float
  202. Current time.
  203. y : ndarray
  204. Current state.
  205. t_old : float
  206. Previous time. None if no steps were made yet.
  207. step_size : float
  208. Size of the last successful step. None if no steps were made yet.
  209. nfev : int
  210. Number evaluations of the system's right-hand side.
  211. njev : int
  212. Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
  213. nlu : int
  214. Number of LU decompositions. Is always 0 for this solver.
  215. References
  216. ----------
  217. .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
  218. Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
  219. """
  220. order = 3
  221. error_estimator_order = 2
  222. n_stages = 3
  223. C = np.array([0, 1/2, 3/4])
  224. A = np.array([
  225. [0, 0, 0],
  226. [1/2, 0, 0],
  227. [0, 3/4, 0]
  228. ])
  229. B = np.array([2/9, 1/3, 4/9])
  230. E = np.array([5/72, -1/12, -1/9, 1/8])
  231. P = np.array([[1, -4 / 3, 5 / 9],
  232. [0, 1, -2/3],
  233. [0, 4/3, -8/9],
  234. [0, -1, 1]])
  235. class RK45(RungeKutta):
  236. """Explicit Runge-Kutta method of order 5(4).
  237. This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
  238. assuming accuracy of the fourth-order method accuracy, but steps are taken
  239. using the fifth-order accurate formula (local extrapolation is done).
  240. A quartic interpolation polynomial is used for the dense output [2]_.
  241. Can be applied in the complex domain.
  242. Parameters
  243. ----------
  244. fun : callable
  245. Right-hand side of the system. The calling signature is ``fun(t, y)``.
  246. Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
  247. It can either have shape (n,); then ``fun`` must return array_like with
  248. shape (n,). Alternatively it can have shape (n, k); then ``fun``
  249. must return an array_like with shape (n, k), i.e., each column
  250. corresponds to a single column in ``y``. The choice between the two
  251. options is determined by `vectorized` argument (see below).
  252. t0 : float
  253. Initial time.
  254. y0 : array_like, shape (n,)
  255. Initial state.
  256. t_bound : float
  257. Boundary time - the integration won't continue beyond it. It also
  258. determines the direction of the integration.
  259. first_step : float or None, optional
  260. Initial step size. Default is ``None`` which means that the algorithm
  261. should choose.
  262. max_step : float, optional
  263. Maximum allowed step size. Default is np.inf, i.e., the step size is not
  264. bounded and determined solely by the solver.
  265. rtol, atol : float and array_like, optional
  266. Relative and absolute tolerances. The solver keeps the local error
  267. estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
  268. relative accuracy (number of correct digits), while `atol` controls
  269. absolute accuracy (number of correct decimal places). To achieve the
  270. desired `rtol`, set `atol` to be smaller than the smallest value that
  271. can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
  272. allowable error. If `atol` is larger than ``rtol * abs(y)`` the
  273. number of correct digits is not guaranteed. Conversely, to achieve the
  274. desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
  275. than `atol`. If components of y have different scales, it might be
  276. beneficial to set different `atol` values for different components by
  277. passing array_like with shape (n,) for `atol`. Default values are
  278. 1e-3 for `rtol` and 1e-6 for `atol`.
  279. vectorized : bool, optional
  280. Whether `fun` is implemented in a vectorized fashion. Default is False.
  281. Attributes
  282. ----------
  283. n : int
  284. Number of equations.
  285. status : string
  286. Current status of the solver: 'running', 'finished' or 'failed'.
  287. t_bound : float
  288. Boundary time.
  289. direction : float
  290. Integration direction: +1 or -1.
  291. t : float
  292. Current time.
  293. y : ndarray
  294. Current state.
  295. t_old : float
  296. Previous time. None if no steps were made yet.
  297. step_size : float
  298. Size of the last successful step. None if no steps were made yet.
  299. nfev : int
  300. Number evaluations of the system's right-hand side.
  301. njev : int
  302. Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
  303. nlu : int
  304. Number of LU decompositions. Is always 0 for this solver.
  305. References
  306. ----------
  307. .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
  308. formulae", Journal of Computational and Applied Mathematics, Vol. 6,
  309. No. 1, pp. 19-26, 1980.
  310. .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
  311. of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
  312. """
  313. order = 5
  314. error_estimator_order = 4
  315. n_stages = 6
  316. C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
  317. A = np.array([
  318. [0, 0, 0, 0, 0],
  319. [1/5, 0, 0, 0, 0],
  320. [3/40, 9/40, 0, 0, 0],
  321. [44/45, -56/15, 32/9, 0, 0],
  322. [19372/6561, -25360/2187, 64448/6561, -212/729, 0],
  323. [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
  324. ])
  325. B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
  326. E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
  327. 1/40])
  328. # Corresponds to the optimum value of c_6 from [2]_.
  329. P = np.array([
  330. [1, -8048581381/2820520608, 8663915743/2820520608,
  331. -12715105075/11282082432],
  332. [0, 0, 0, 0],
  333. [0, 131558114200/32700410799, -68118460800/10900136933,
  334. 87487479700/32700410799],
  335. [0, -1754552775/470086768, 14199869525/1410260304,
  336. -10690763975/1880347072],
  337. [0, 127303824393/49829197408, -318862633887/49829197408,
  338. 701980252875 / 199316789632],
  339. [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
  340. [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
  341. class DOP853(RungeKutta):
  342. """Explicit Runge-Kutta method of order 8.
  343. This is a Python implementation of "DOP853" algorithm originally written
  344. in Fortran [1]_, [2]_. Note that this is not a literate translation, but
  345. the algorithmic core and coefficients are the same.
  346. Can be applied in the complex domain.
  347. Parameters
  348. ----------
  349. fun : callable
  350. Right-hand side of the system. The calling signature is ``fun(t, y)``.
  351. Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
  352. It can either have shape (n,); then ``fun`` must return array_like with
  353. shape (n,). Alternatively it can have shape (n, k); then ``fun``
  354. must return an array_like with shape (n, k), i.e. each column
  355. corresponds to a single column in ``y``. The choice between the two
  356. options is determined by `vectorized` argument (see below).
  357. t0 : float
  358. Initial time.
  359. y0 : array_like, shape (n,)
  360. Initial state.
  361. t_bound : float
  362. Boundary time - the integration won't continue beyond it. It also
  363. determines the direction of the integration.
  364. first_step : float or None, optional
  365. Initial step size. Default is ``None`` which means that the algorithm
  366. should choose.
  367. max_step : float, optional
  368. Maximum allowed step size. Default is np.inf, i.e. the step size is not
  369. bounded and determined solely by the solver.
  370. rtol, atol : float and array_like, optional
  371. Relative and absolute tolerances. The solver keeps the local error
  372. estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
  373. relative accuracy (number of correct digits), while `atol` controls
  374. absolute accuracy (number of correct decimal places). To achieve the
  375. desired `rtol`, set `atol` to be smaller than the smallest value that
  376. can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
  377. allowable error. If `atol` is larger than ``rtol * abs(y)`` the
  378. number of correct digits is not guaranteed. Conversely, to achieve the
  379. desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
  380. than `atol`. If components of y have different scales, it might be
  381. beneficial to set different `atol` values for different components by
  382. passing array_like with shape (n,) for `atol`. Default values are
  383. 1e-3 for `rtol` and 1e-6 for `atol`.
  384. vectorized : bool, optional
  385. Whether `fun` is implemented in a vectorized fashion. Default is False.
  386. Attributes
  387. ----------
  388. n : int
  389. Number of equations.
  390. status : string
  391. Current status of the solver: 'running', 'finished' or 'failed'.
  392. t_bound : float
  393. Boundary time.
  394. direction : float
  395. Integration direction: +1 or -1.
  396. t : float
  397. Current time.
  398. y : ndarray
  399. Current state.
  400. t_old : float
  401. Previous time. None if no steps were made yet.
  402. step_size : float
  403. Size of the last successful step. None if no steps were made yet.
  404. nfev : int
  405. Number evaluations of the system's right-hand side.
  406. njev : int
  407. Number of evaluations of the Jacobian. Is always 0 for this solver
  408. as it does not use the Jacobian.
  409. nlu : int
  410. Number of LU decompositions. Is always 0 for this solver.
  411. References
  412. ----------
  413. .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
  414. Equations I: Nonstiff Problems", Sec. II.
  415. .. [2] `Page with original Fortran code of DOP853
  416. <http://www.unige.ch/~hairer/software.html>`_.
  417. """
  418. n_stages = dop853_coefficients.N_STAGES
  419. order = 8
  420. error_estimator_order = 7
  421. A = dop853_coefficients.A[:n_stages, :n_stages]
  422. B = dop853_coefficients.B
  423. C = dop853_coefficients.C[:n_stages]
  424. E3 = dop853_coefficients.E3
  425. E5 = dop853_coefficients.E5
  426. D = dop853_coefficients.D
  427. A_EXTRA = dop853_coefficients.A[n_stages + 1:]
  428. C_EXTRA = dop853_coefficients.C[n_stages + 1:]
  429. def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
  430. rtol=1e-3, atol=1e-6, vectorized=False,
  431. first_step=None, **extraneous):
  432. super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol,
  433. vectorized, first_step, **extraneous)
  434. self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
  435. self.n), dtype=self.y.dtype)
  436. self.K = self.K_extended[:self.n_stages + 1]
  437. def _estimate_error(self, K, h): # Left for testing purposes.
  438. err5 = np.dot(K.T, self.E5)
  439. err3 = np.dot(K.T, self.E3)
  440. denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
  441. correction_factor = np.ones_like(err5)
  442. mask = denom > 0
  443. correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
  444. return h * err5 * correction_factor
  445. def _estimate_error_norm(self, K, h, scale):
  446. err5 = np.dot(K.T, self.E5) / scale
  447. err3 = np.dot(K.T, self.E3) / scale
  448. err5_norm_2 = np.linalg.norm(err5)**2
  449. err3_norm_2 = np.linalg.norm(err3)**2
  450. if err5_norm_2 == 0 and err3_norm_2 == 0:
  451. return 0.0
  452. denom = err5_norm_2 + 0.01 * err3_norm_2
  453. return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
  454. def _dense_output_impl(self):
  455. K = self.K_extended
  456. h = self.h_previous
  457. for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
  458. start=self.n_stages + 1):
  459. dy = np.dot(K[:s].T, a[:s]) * h
  460. K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
  461. F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
  462. dtype=self.y_old.dtype)
  463. f_old = K[0]
  464. delta_y = self.y - self.y_old
  465. F[0] = delta_y
  466. F[1] = h * f_old - delta_y
  467. F[2] = 2 * delta_y - h * (self.f + f_old)
  468. F[3:] = h * np.dot(self.D, K)
  469. return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
  470. class RkDenseOutput(DenseOutput):
  471. def __init__(self, t_old, t, y_old, Q):
  472. super().__init__(t_old, t)
  473. self.h = t - t_old
  474. self.Q = Q
  475. self.order = Q.shape[1] - 1
  476. self.y_old = y_old
  477. def _call_impl(self, t):
  478. x = (t - self.t_old) / self.h
  479. if t.ndim == 0:
  480. p = np.tile(x, self.order + 1)
  481. p = np.cumprod(p)
  482. else:
  483. p = np.tile(x, (self.order + 1, 1))
  484. p = np.cumprod(p, axis=0)
  485. y = self.h * np.dot(self.Q, p)
  486. if y.ndim == 2:
  487. y += self.y_old[:, None]
  488. else:
  489. y += self.y_old
  490. return y
  491. class Dop853DenseOutput(DenseOutput):
  492. def __init__(self, t_old, t, y_old, F):
  493. super().__init__(t_old, t)
  494. self.h = t - t_old
  495. self.F = F
  496. self.y_old = y_old
  497. def _call_impl(self, t):
  498. x = (t - self.t_old) / self.h
  499. if t.ndim == 0:
  500. y = np.zeros_like(self.y_old)
  501. else:
  502. x = x[:, None]
  503. y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
  504. for i, f in enumerate(reversed(self.F)):
  505. y += f
  506. if i % 2 == 0:
  507. y *= x
  508. else:
  509. y *= 1 - x
  510. y += self.y_old
  511. return y.T