bdf.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. import numpy as np
  2. from scipy.linalg import lu_factor, lu_solve
  3. from scipy.sparse import issparse, csc_matrix, eye
  4. from scipy.sparse.linalg import splu
  5. from scipy.optimize._numdiff import group_columns
  6. from .common import (validate_max_step, validate_tol, select_initial_step,
  7. norm, EPS, num_jac, validate_first_step,
  8. warn_extraneous)
  9. from .base import OdeSolver, DenseOutput
  10. MAX_ORDER = 5
  11. NEWTON_MAXITER = 4
  12. MIN_FACTOR = 0.2
  13. MAX_FACTOR = 10
  14. def compute_R(order, factor):
  15. """Compute the matrix for changing the differences array."""
  16. I = np.arange(1, order + 1)[:, None]
  17. J = np.arange(1, order + 1)
  18. M = np.zeros((order + 1, order + 1))
  19. M[1:, 1:] = (I - 1 - factor * J) / I
  20. M[0] = 1
  21. return np.cumprod(M, axis=0)
  22. def change_D(D, order, factor):
  23. """Change differences array in-place when step size is changed."""
  24. R = compute_R(order, factor)
  25. U = compute_R(order, 1)
  26. RU = R.dot(U)
  27. D[:order + 1] = np.dot(RU.T, D[:order + 1])
  28. def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
  29. """Solve the algebraic system resulting from BDF method."""
  30. d = 0
  31. y = y_predict.copy()
  32. dy_norm_old = None
  33. converged = False
  34. for k in range(NEWTON_MAXITER):
  35. f = fun(t_new, y)
  36. if not np.all(np.isfinite(f)):
  37. break
  38. dy = solve_lu(LU, c * f - psi - d)
  39. dy_norm = norm(dy / scale)
  40. if dy_norm_old is None:
  41. rate = None
  42. else:
  43. rate = dy_norm / dy_norm_old
  44. if (rate is not None and (rate >= 1 or
  45. rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
  46. break
  47. y += dy
  48. d += dy
  49. if (dy_norm == 0 or
  50. rate is not None and rate / (1 - rate) * dy_norm < tol):
  51. converged = True
  52. break
  53. dy_norm_old = dy_norm
  54. return converged, k + 1, y, d
  55. class BDF(OdeSolver):
  56. """Implicit method based on backward-differentiation formulas.
  57. This is a variable order method with the order varying automatically from
  58. 1 to 5. The general framework of the BDF algorithm is described in [1]_.
  59. This class implements a quasi-constant step size as explained in [2]_.
  60. The error estimation strategy for the constant-step BDF is derived in [3]_.
  61. An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
  62. Can be applied in the complex domain.
  63. Parameters
  64. ----------
  65. fun : callable
  66. Right-hand side of the system. The calling signature is ``fun(t, y)``.
  67. Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
  68. It can either have shape (n,); then ``fun`` must return array_like with
  69. shape (n,). Alternatively it can have shape (n, k); then ``fun``
  70. must return an array_like with shape (n, k), i.e. each column
  71. corresponds to a single column in ``y``. The choice between the two
  72. options is determined by `vectorized` argument (see below). The
  73. vectorized implementation allows a faster approximation of the Jacobian
  74. by finite differences (required for this solver).
  75. t0 : float
  76. Initial time.
  77. y0 : array_like, shape (n,)
  78. Initial state.
  79. t_bound : float
  80. Boundary time - the integration won't continue beyond it. It also
  81. determines the direction of the integration.
  82. first_step : float or None, optional
  83. Initial step size. Default is ``None`` which means that the algorithm
  84. should choose.
  85. max_step : float, optional
  86. Maximum allowed step size. Default is np.inf, i.e., the step size is not
  87. bounded and determined solely by the solver.
  88. rtol, atol : float and array_like, optional
  89. Relative and absolute tolerances. The solver keeps the local error
  90. estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
  91. relative accuracy (number of correct digits), while `atol` controls
  92. absolute accuracy (number of correct decimal places). To achieve the
  93. desired `rtol`, set `atol` to be smaller than the smallest value that
  94. can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
  95. allowable error. If `atol` is larger than ``rtol * abs(y)`` the
  96. number of correct digits is not guaranteed. Conversely, to achieve the
  97. desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
  98. than `atol`. If components of y have different scales, it might be
  99. beneficial to set different `atol` values for different components by
  100. passing array_like with shape (n,) for `atol`. Default values are
  101. 1e-3 for `rtol` and 1e-6 for `atol`.
  102. jac : {None, array_like, sparse_matrix, callable}, optional
  103. Jacobian matrix of the right-hand side of the system with respect to y,
  104. required by this method. The Jacobian matrix has shape (n, n) and its
  105. element (i, j) is equal to ``d f_i / d y_j``.
  106. There are three ways to define the Jacobian:
  107. * If array_like or sparse_matrix, the Jacobian is assumed to
  108. be constant.
  109. * If callable, the Jacobian is assumed to depend on both
  110. t and y; it will be called as ``jac(t, y)`` as necessary.
  111. For the 'Radau' and 'BDF' methods, the return value might be a
  112. sparse matrix.
  113. * If None (default), the Jacobian will be approximated by
  114. finite differences.
  115. It is generally recommended to provide the Jacobian rather than
  116. relying on a finite-difference approximation.
  117. jac_sparsity : {None, array_like, sparse matrix}, optional
  118. Defines a sparsity structure of the Jacobian matrix for a
  119. finite-difference approximation. Its shape must be (n, n). This argument
  120. is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
  121. elements in *each* row, providing the sparsity structure will greatly
  122. speed up the computations [4]_. A zero entry means that a corresponding
  123. element in the Jacobian is always zero. If None (default), the Jacobian
  124. is assumed to be dense.
  125. vectorized : bool, optional
  126. Whether `fun` is implemented in a vectorized fashion. Default is False.
  127. Attributes
  128. ----------
  129. n : int
  130. Number of equations.
  131. status : string
  132. Current status of the solver: 'running', 'finished' or 'failed'.
  133. t_bound : float
  134. Boundary time.
  135. direction : float
  136. Integration direction: +1 or -1.
  137. t : float
  138. Current time.
  139. y : ndarray
  140. Current state.
  141. t_old : float
  142. Previous time. None if no steps were made yet.
  143. step_size : float
  144. Size of the last successful step. None if no steps were made yet.
  145. nfev : int
  146. Number of evaluations of the right-hand side.
  147. njev : int
  148. Number of evaluations of the Jacobian.
  149. nlu : int
  150. Number of LU decompositions.
  151. References
  152. ----------
  153. .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
  154. Solution of Ordinary Differential Equations", ACM Transactions on
  155. Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
  156. .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
  157. COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
  158. .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
  159. Nonstiff Problems", Sec. III.2.
  160. .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
  161. sparse Jacobian matrices", Journal of the Institute of Mathematics
  162. and its Applications, 13, pp. 117-120, 1974.
  163. """
  164. def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
  165. rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
  166. vectorized=False, first_step=None, **extraneous):
  167. warn_extraneous(extraneous)
  168. super().__init__(fun, t0, y0, t_bound, vectorized,
  169. support_complex=True)
  170. self.max_step = validate_max_step(max_step)
  171. self.rtol, self.atol = validate_tol(rtol, atol, self.n)
  172. f = self.fun(self.t, self.y)
  173. if first_step is None:
  174. self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
  175. self.direction, 1,
  176. self.rtol, self.atol)
  177. else:
  178. self.h_abs = validate_first_step(first_step, t0, t_bound)
  179. self.h_abs_old = None
  180. self.error_norm_old = None
  181. self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
  182. self.jac_factor = None
  183. self.jac, self.J = self._validate_jac(jac, jac_sparsity)
  184. if issparse(self.J):
  185. def lu(A):
  186. self.nlu += 1
  187. return splu(A)
  188. def solve_lu(LU, b):
  189. return LU.solve(b)
  190. I = eye(self.n, format='csc', dtype=self.y.dtype)
  191. else:
  192. def lu(A):
  193. self.nlu += 1
  194. return lu_factor(A, overwrite_a=True)
  195. def solve_lu(LU, b):
  196. return lu_solve(LU, b, overwrite_b=True)
  197. I = np.identity(self.n, dtype=self.y.dtype)
  198. self.lu = lu
  199. self.solve_lu = solve_lu
  200. self.I = I
  201. kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
  202. self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
  203. self.alpha = (1 - kappa) * self.gamma
  204. self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
  205. D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
  206. D[0] = self.y
  207. D[1] = f * self.h_abs * self.direction
  208. self.D = D
  209. self.order = 1
  210. self.n_equal_steps = 0
  211. self.LU = None
  212. def _validate_jac(self, jac, sparsity):
  213. t0 = self.t
  214. y0 = self.y
  215. if jac is None:
  216. if sparsity is not None:
  217. if issparse(sparsity):
  218. sparsity = csc_matrix(sparsity)
  219. groups = group_columns(sparsity)
  220. sparsity = (sparsity, groups)
  221. def jac_wrapped(t, y):
  222. self.njev += 1
  223. f = self.fun_single(t, y)
  224. J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
  225. self.atol, self.jac_factor,
  226. sparsity)
  227. return J
  228. J = jac_wrapped(t0, y0)
  229. elif callable(jac):
  230. J = jac(t0, y0)
  231. self.njev += 1
  232. if issparse(J):
  233. J = csc_matrix(J, dtype=y0.dtype)
  234. def jac_wrapped(t, y):
  235. self.njev += 1
  236. return csc_matrix(jac(t, y), dtype=y0.dtype)
  237. else:
  238. J = np.asarray(J, dtype=y0.dtype)
  239. def jac_wrapped(t, y):
  240. self.njev += 1
  241. return np.asarray(jac(t, y), dtype=y0.dtype)
  242. if J.shape != (self.n, self.n):
  243. raise ValueError("`jac` is expected to have shape {}, but "
  244. "actually has {}."
  245. .format((self.n, self.n), J.shape))
  246. else:
  247. if issparse(jac):
  248. J = csc_matrix(jac, dtype=y0.dtype)
  249. else:
  250. J = np.asarray(jac, dtype=y0.dtype)
  251. if J.shape != (self.n, self.n):
  252. raise ValueError("`jac` is expected to have shape {}, but "
  253. "actually has {}."
  254. .format((self.n, self.n), J.shape))
  255. jac_wrapped = None
  256. return jac_wrapped, J
  257. def _step_impl(self):
  258. t = self.t
  259. D = self.D
  260. max_step = self.max_step
  261. min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
  262. if self.h_abs > max_step:
  263. h_abs = max_step
  264. change_D(D, self.order, max_step / self.h_abs)
  265. self.n_equal_steps = 0
  266. elif self.h_abs < min_step:
  267. h_abs = min_step
  268. change_D(D, self.order, min_step / self.h_abs)
  269. self.n_equal_steps = 0
  270. else:
  271. h_abs = self.h_abs
  272. atol = self.atol
  273. rtol = self.rtol
  274. order = self.order
  275. alpha = self.alpha
  276. gamma = self.gamma
  277. error_const = self.error_const
  278. J = self.J
  279. LU = self.LU
  280. current_jac = self.jac is None
  281. step_accepted = False
  282. while not step_accepted:
  283. if h_abs < min_step:
  284. return False, self.TOO_SMALL_STEP
  285. h = h_abs * self.direction
  286. t_new = t + h
  287. if self.direction * (t_new - self.t_bound) > 0:
  288. t_new = self.t_bound
  289. change_D(D, order, np.abs(t_new - t) / h_abs)
  290. self.n_equal_steps = 0
  291. LU = None
  292. h = t_new - t
  293. h_abs = np.abs(h)
  294. y_predict = np.sum(D[:order + 1], axis=0)
  295. scale = atol + rtol * np.abs(y_predict)
  296. psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
  297. converged = False
  298. c = h / alpha[order]
  299. while not converged:
  300. if LU is None:
  301. LU = self.lu(self.I - c * J)
  302. converged, n_iter, y_new, d = solve_bdf_system(
  303. self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
  304. scale, self.newton_tol)
  305. if not converged:
  306. if current_jac:
  307. break
  308. J = self.jac(t_new, y_predict)
  309. LU = None
  310. current_jac = True
  311. if not converged:
  312. factor = 0.5
  313. h_abs *= factor
  314. change_D(D, order, factor)
  315. self.n_equal_steps = 0
  316. LU = None
  317. continue
  318. safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
  319. + n_iter)
  320. scale = atol + rtol * np.abs(y_new)
  321. error = error_const[order] * d
  322. error_norm = norm(error / scale)
  323. if error_norm > 1:
  324. factor = max(MIN_FACTOR,
  325. safety * error_norm ** (-1 / (order + 1)))
  326. h_abs *= factor
  327. change_D(D, order, factor)
  328. self.n_equal_steps = 0
  329. # As we didn't have problems with convergence, we don't
  330. # reset LU here.
  331. else:
  332. step_accepted = True
  333. self.n_equal_steps += 1
  334. self.t = t_new
  335. self.y = y_new
  336. self.h_abs = h_abs
  337. self.J = J
  338. self.LU = LU
  339. # Update differences. The principal relation here is
  340. # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
  341. # contained difference for previous interpolating polynomial and
  342. # d = D^{k + 1} y_n. Thus this elegant code follows.
  343. D[order + 2] = d - D[order + 1]
  344. D[order + 1] = d
  345. for i in reversed(range(order + 1)):
  346. D[i] += D[i + 1]
  347. if self.n_equal_steps < order + 1:
  348. return True, None
  349. if order > 1:
  350. error_m = error_const[order - 1] * D[order]
  351. error_m_norm = norm(error_m / scale)
  352. else:
  353. error_m_norm = np.inf
  354. if order < MAX_ORDER:
  355. error_p = error_const[order + 1] * D[order + 2]
  356. error_p_norm = norm(error_p / scale)
  357. else:
  358. error_p_norm = np.inf
  359. error_norms = np.array([error_m_norm, error_norm, error_p_norm])
  360. with np.errstate(divide='ignore'):
  361. factors = error_norms ** (-1 / np.arange(order, order + 3))
  362. delta_order = np.argmax(factors) - 1
  363. order += delta_order
  364. self.order = order
  365. factor = min(MAX_FACTOR, safety * np.max(factors))
  366. self.h_abs *= factor
  367. change_D(D, order, factor)
  368. self.n_equal_steps = 0
  369. self.LU = None
  370. return True, None
  371. def _dense_output_impl(self):
  372. return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
  373. self.order, self.D[:self.order + 1].copy())
  374. class BdfDenseOutput(DenseOutput):
  375. def __init__(self, t_old, t, h, order, D):
  376. super().__init__(t_old, t)
  377. self.order = order
  378. self.t_shift = self.t - h * np.arange(self.order)
  379. self.denom = h * (1 + np.arange(self.order))
  380. self.D = D
  381. def _call_impl(self, t):
  382. if t.ndim == 0:
  383. x = (t - self.t_shift) / self.denom
  384. p = np.cumprod(x)
  385. else:
  386. x = (t - self.t_shift[:, None]) / self.denom[:, None]
  387. p = np.cumprod(x, axis=0)
  388. y = np.dot(self.D[1:].T, p)
  389. if y.ndim == 1:
  390. y += self.D[0]
  391. else:
  392. y += self.D[0, :, None]
  393. return y