common.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. from itertools import groupby
  2. from warnings import warn
  3. import numpy as np
  4. from scipy.sparse import find, coo_matrix
  5. EPS = np.finfo(float).eps
  6. def validate_first_step(first_step, t0, t_bound):
  7. """Assert that first_step is valid and return it."""
  8. if first_step <= 0:
  9. raise ValueError("`first_step` must be positive.")
  10. if first_step > np.abs(t_bound - t0):
  11. raise ValueError("`first_step` exceeds bounds.")
  12. return first_step
  13. def validate_max_step(max_step):
  14. """Assert that max_Step is valid and return it."""
  15. if max_step <= 0:
  16. raise ValueError("`max_step` must be positive.")
  17. return max_step
  18. def warn_extraneous(extraneous):
  19. """Display a warning for extraneous keyword arguments.
  20. The initializer of each solver class is expected to collect keyword
  21. arguments that it doesn't understand and warn about them. This function
  22. prints a warning for each key in the supplied dictionary.
  23. Parameters
  24. ----------
  25. extraneous : dict
  26. Extraneous keyword arguments
  27. """
  28. if extraneous:
  29. warn("The following arguments have no effect for a chosen solver: {}."
  30. .format(", ".join("`{}`".format(x) for x in extraneous)))
  31. def validate_tol(rtol, atol, n):
  32. """Validate tolerance values."""
  33. if np.any(rtol < 100 * EPS):
  34. warn("At least one element of `rtol` is too small. "
  35. f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.")
  36. rtol = np.maximum(rtol, 100 * EPS)
  37. atol = np.asarray(atol)
  38. if atol.ndim > 0 and atol.shape != (n,):
  39. raise ValueError("`atol` has wrong shape.")
  40. if np.any(atol < 0):
  41. raise ValueError("`atol` must be positive.")
  42. return rtol, atol
  43. def norm(x):
  44. """Compute RMS norm."""
  45. return np.linalg.norm(x) / x.size ** 0.5
  46. def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
  47. """Empirically select a good initial step.
  48. The algorithm is described in [1]_.
  49. Parameters
  50. ----------
  51. fun : callable
  52. Right-hand side of the system.
  53. t0 : float
  54. Initial value of the independent variable.
  55. y0 : ndarray, shape (n,)
  56. Initial value of the dependent variable.
  57. f0 : ndarray, shape (n,)
  58. Initial value of the derivative, i.e., ``fun(t0, y0)``.
  59. direction : float
  60. Integration direction.
  61. order : float
  62. Error estimator order. It means that the error controlled by the
  63. algorithm is proportional to ``step_size ** (order + 1)`.
  64. rtol : float
  65. Desired relative tolerance.
  66. atol : float
  67. Desired absolute tolerance.
  68. Returns
  69. -------
  70. h_abs : float
  71. Absolute value of the suggested initial step.
  72. References
  73. ----------
  74. .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
  75. Equations I: Nonstiff Problems", Sec. II.4.
  76. """
  77. if y0.size == 0:
  78. return np.inf
  79. scale = atol + np.abs(y0) * rtol
  80. d0 = norm(y0 / scale)
  81. d1 = norm(f0 / scale)
  82. if d0 < 1e-5 or d1 < 1e-5:
  83. h0 = 1e-6
  84. else:
  85. h0 = 0.01 * d0 / d1
  86. y1 = y0 + h0 * direction * f0
  87. f1 = fun(t0 + h0 * direction, y1)
  88. d2 = norm((f1 - f0) / scale) / h0
  89. if d1 <= 1e-15 and d2 <= 1e-15:
  90. h1 = max(1e-6, h0 * 1e-3)
  91. else:
  92. h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
  93. return min(100 * h0, h1)
  94. class OdeSolution:
  95. """Continuous ODE solution.
  96. It is organized as a collection of `DenseOutput` objects which represent
  97. local interpolants. It provides an algorithm to select a right interpolant
  98. for each given point.
  99. The interpolants cover the range between `t_min` and `t_max` (see
  100. Attributes below). Evaluation outside this interval is not forbidden, but
  101. the accuracy is not guaranteed.
  102. When evaluating at a breakpoint (one of the values in `ts`) a segment with
  103. the lower index is selected.
  104. Parameters
  105. ----------
  106. ts : array_like, shape (n_segments + 1,)
  107. Time instants between which local interpolants are defined. Must
  108. be strictly increasing or decreasing (zero segment with two points is
  109. also allowed).
  110. interpolants : list of DenseOutput with n_segments elements
  111. Local interpolants. An i-th interpolant is assumed to be defined
  112. between ``ts[i]`` and ``ts[i + 1]``.
  113. Attributes
  114. ----------
  115. t_min, t_max : float
  116. Time range of the interpolation.
  117. """
  118. def __init__(self, ts, interpolants):
  119. ts = np.asarray(ts)
  120. d = np.diff(ts)
  121. # The first case covers integration on zero segment.
  122. if not ((ts.size == 2 and ts[0] == ts[-1])
  123. or np.all(d > 0) or np.all(d < 0)):
  124. raise ValueError("`ts` must be strictly increasing or decreasing.")
  125. self.n_segments = len(interpolants)
  126. if ts.shape != (self.n_segments + 1,):
  127. raise ValueError("Numbers of time stamps and interpolants "
  128. "don't match.")
  129. self.ts = ts
  130. self.interpolants = interpolants
  131. if ts[-1] >= ts[0]:
  132. self.t_min = ts[0]
  133. self.t_max = ts[-1]
  134. self.ascending = True
  135. self.ts_sorted = ts
  136. else:
  137. self.t_min = ts[-1]
  138. self.t_max = ts[0]
  139. self.ascending = False
  140. self.ts_sorted = ts[::-1]
  141. def _call_single(self, t):
  142. # Here we preserve a certain symmetry that when t is in self.ts,
  143. # then we prioritize a segment with a lower index.
  144. if self.ascending:
  145. ind = np.searchsorted(self.ts_sorted, t, side='left')
  146. else:
  147. ind = np.searchsorted(self.ts_sorted, t, side='right')
  148. segment = min(max(ind - 1, 0), self.n_segments - 1)
  149. if not self.ascending:
  150. segment = self.n_segments - 1 - segment
  151. return self.interpolants[segment](t)
  152. def __call__(self, t):
  153. """Evaluate the solution.
  154. Parameters
  155. ----------
  156. t : float or array_like with shape (n_points,)
  157. Points to evaluate at.
  158. Returns
  159. -------
  160. y : ndarray, shape (n_states,) or (n_states, n_points)
  161. Computed values. Shape depends on whether `t` is a scalar or a
  162. 1-D array.
  163. """
  164. t = np.asarray(t)
  165. if t.ndim == 0:
  166. return self._call_single(t)
  167. order = np.argsort(t)
  168. reverse = np.empty_like(order)
  169. reverse[order] = np.arange(order.shape[0])
  170. t_sorted = t[order]
  171. # See comment in self._call_single.
  172. if self.ascending:
  173. segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
  174. else:
  175. segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
  176. segments -= 1
  177. segments[segments < 0] = 0
  178. segments[segments > self.n_segments - 1] = self.n_segments - 1
  179. if not self.ascending:
  180. segments = self.n_segments - 1 - segments
  181. ys = []
  182. group_start = 0
  183. for segment, group in groupby(segments):
  184. group_end = group_start + len(list(group))
  185. y = self.interpolants[segment](t_sorted[group_start:group_end])
  186. ys.append(y)
  187. group_start = group_end
  188. ys = np.hstack(ys)
  189. ys = ys[:, reverse]
  190. return ys
  191. NUM_JAC_DIFF_REJECT = EPS ** 0.875
  192. NUM_JAC_DIFF_SMALL = EPS ** 0.75
  193. NUM_JAC_DIFF_BIG = EPS ** 0.25
  194. NUM_JAC_MIN_FACTOR = 1e3 * EPS
  195. NUM_JAC_FACTOR_INCREASE = 10
  196. NUM_JAC_FACTOR_DECREASE = 0.1
  197. def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
  198. """Finite differences Jacobian approximation tailored for ODE solvers.
  199. This function computes finite difference approximation to the Jacobian
  200. matrix of `fun` with respect to `y` using forward differences.
  201. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
  202. ``d f_i / d y_j``.
  203. A special feature of this function is the ability to correct the step
  204. size from iteration to iteration. The main idea is to keep the finite
  205. difference significantly separated from its round-off error which
  206. approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
  207. huge error and assures that the estimated derivative are reasonably close
  208. to the true values (i.e., the finite difference approximation is at least
  209. qualitatively reflects the structure of the true Jacobian).
  210. Parameters
  211. ----------
  212. fun : callable
  213. Right-hand side of the system implemented in a vectorized fashion.
  214. t : float
  215. Current time.
  216. y : ndarray, shape (n,)
  217. Current state.
  218. f : ndarray, shape (n,)
  219. Value of the right hand side at (t, y).
  220. threshold : float
  221. Threshold for `y` value used for computing the step size as
  222. ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
  223. absolute tolerance (atol) for a solver should be passed as `threshold`.
  224. factor : ndarray with shape (n,) or None
  225. Factor to use for computing the step size. Pass None for the very
  226. evaluation, then use the value returned from this function.
  227. sparsity : tuple (structure, groups) or None
  228. Sparsity structure of the Jacobian, `structure` must be csc_matrix.
  229. Returns
  230. -------
  231. J : ndarray or csc_matrix, shape (n, n)
  232. Jacobian matrix.
  233. factor : ndarray, shape (n,)
  234. Suggested `factor` for the next evaluation.
  235. """
  236. y = np.asarray(y)
  237. n = y.shape[0]
  238. if n == 0:
  239. return np.empty((0, 0)), factor
  240. if factor is None:
  241. factor = np.full(n, EPS ** 0.5)
  242. else:
  243. factor = factor.copy()
  244. # Direct the step as ODE dictates, hoping that such a step won't lead to
  245. # a problematic region. For complex ODEs it makes sense to use the real
  246. # part of f as we use steps along real axis.
  247. f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
  248. y_scale = f_sign * np.maximum(threshold, np.abs(y))
  249. h = (y + factor * y_scale) - y
  250. # Make sure that the step is not 0 to start with. Not likely it will be
  251. # executed often.
  252. for i in np.nonzero(h == 0)[0]:
  253. while h[i] == 0:
  254. factor[i] *= 10
  255. h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
  256. if sparsity is None:
  257. return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
  258. else:
  259. structure, groups = sparsity
  260. return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
  261. structure, groups)
  262. def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
  263. n = y.shape[0]
  264. h_vecs = np.diag(h)
  265. f_new = fun(t, y[:, None] + h_vecs)
  266. diff = f_new - f[:, None]
  267. max_ind = np.argmax(np.abs(diff), axis=0)
  268. r = np.arange(n)
  269. max_diff = np.abs(diff[max_ind, r])
  270. scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
  271. diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
  272. if np.any(diff_too_small):
  273. ind, = np.nonzero(diff_too_small)
  274. new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
  275. h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
  276. h_vecs[ind, ind] = h_new
  277. f_new = fun(t, y[:, None] + h_vecs[:, ind])
  278. diff_new = f_new - f[:, None]
  279. max_ind = np.argmax(np.abs(diff_new), axis=0)
  280. r = np.arange(ind.shape[0])
  281. max_diff_new = np.abs(diff_new[max_ind, r])
  282. scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
  283. update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
  284. if np.any(update):
  285. update, = np.nonzero(update)
  286. update_ind = ind[update]
  287. factor[update_ind] = new_factor[update]
  288. h[update_ind] = h_new[update]
  289. diff[:, update_ind] = diff_new[:, update]
  290. scale[update_ind] = scale_new[update]
  291. max_diff[update_ind] = max_diff_new[update]
  292. diff /= h
  293. factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
  294. factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
  295. factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
  296. return diff, factor
  297. def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
  298. n = y.shape[0]
  299. n_groups = np.max(groups) + 1
  300. h_vecs = np.empty((n_groups, n))
  301. for group in range(n_groups):
  302. e = np.equal(group, groups)
  303. h_vecs[group] = h * e
  304. h_vecs = h_vecs.T
  305. f_new = fun(t, y[:, None] + h_vecs)
  306. df = f_new - f[:, None]
  307. i, j, _ = find(structure)
  308. diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
  309. max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
  310. r = np.arange(n)
  311. max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
  312. scale = np.maximum(np.abs(f[max_ind]),
  313. np.abs(f_new[max_ind, groups[r]]))
  314. diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
  315. if np.any(diff_too_small):
  316. ind, = np.nonzero(diff_too_small)
  317. new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
  318. h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
  319. h_new_all = np.zeros(n)
  320. h_new_all[ind] = h_new
  321. groups_unique = np.unique(groups[ind])
  322. groups_map = np.empty(n_groups, dtype=int)
  323. h_vecs = np.empty((groups_unique.shape[0], n))
  324. for k, group in enumerate(groups_unique):
  325. e = np.equal(group, groups)
  326. h_vecs[k] = h_new_all * e
  327. groups_map[group] = k
  328. h_vecs = h_vecs.T
  329. f_new = fun(t, y[:, None] + h_vecs)
  330. df = f_new - f[:, None]
  331. i, j, _ = find(structure[:, ind])
  332. diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
  333. (i, j)), shape=(n, ind.shape[0])).tocsc()
  334. max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
  335. r = np.arange(ind.shape[0])
  336. max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
  337. scale_new = np.maximum(
  338. np.abs(f[max_ind_new]),
  339. np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
  340. update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
  341. if np.any(update):
  342. update, = np.nonzero(update)
  343. update_ind = ind[update]
  344. factor[update_ind] = new_factor[update]
  345. h[update_ind] = h_new[update]
  346. diff[:, update_ind] = diff_new[:, update]
  347. scale[update_ind] = scale_new[update]
  348. max_diff[update_ind] = max_diff_new[update]
  349. diff.data /= np.repeat(h, np.diff(diff.indptr))
  350. factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
  351. factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
  352. factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
  353. return diff, factor