_rbfinterp.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. """Module for RBF interpolation."""
  2. import warnings
  3. from itertools import combinations_with_replacement
  4. import numpy as np
  5. from numpy.linalg import LinAlgError
  6. from scipy.spatial import KDTree
  7. from scipy.special import comb
  8. from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
  9. from ._rbfinterp_pythran import (_build_system,
  10. _build_evaluation_coefficients,
  11. _polynomial_matrix)
  12. __all__ = ["RBFInterpolator"]
  13. # These RBFs are implemented.
  14. _AVAILABLE = {
  15. "linear",
  16. "thin_plate_spline",
  17. "cubic",
  18. "quintic",
  19. "multiquadric",
  20. "inverse_multiquadric",
  21. "inverse_quadratic",
  22. "gaussian"
  23. }
  24. # The shape parameter does not need to be specified when using these RBFs.
  25. _SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
  26. # For RBFs that are conditionally positive definite of order m, the interpolant
  27. # should include polynomial terms with degree >= m - 1. Define the minimum
  28. # degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
  29. # Approximation Methods with MATLAB". The RBFs that are not in this dictionary
  30. # are positive definite and do not need polynomial terms.
  31. _NAME_TO_MIN_DEGREE = {
  32. "multiquadric": 0,
  33. "linear": 0,
  34. "thin_plate_spline": 1,
  35. "cubic": 1,
  36. "quintic": 2
  37. }
  38. def _monomial_powers(ndim, degree):
  39. """Return the powers for each monomial in a polynomial.
  40. Parameters
  41. ----------
  42. ndim : int
  43. Number of variables in the polynomial.
  44. degree : int
  45. Degree of the polynomial.
  46. Returns
  47. -------
  48. (nmonos, ndim) int ndarray
  49. Array where each row contains the powers for each variable in a
  50. monomial.
  51. """
  52. nmonos = comb(degree + ndim, ndim, exact=True)
  53. out = np.zeros((nmonos, ndim), dtype=int)
  54. count = 0
  55. for deg in range(degree + 1):
  56. for mono in combinations_with_replacement(range(ndim), deg):
  57. # `mono` is a tuple of variables in the current monomial with
  58. # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
  59. for var in mono:
  60. out[count, var] += 1
  61. count += 1
  62. return out
  63. def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
  64. """Build and solve the RBF interpolation system of equations.
  65. Parameters
  66. ----------
  67. y : (P, N) float ndarray
  68. Data point coordinates.
  69. d : (P, S) float ndarray
  70. Data values at `y`.
  71. smoothing : (P,) float ndarray
  72. Smoothing parameter for each data point.
  73. kernel : str
  74. Name of the RBF.
  75. epsilon : float
  76. Shape parameter.
  77. powers : (R, N) int ndarray
  78. The exponents for each monomial in the polynomial.
  79. Returns
  80. -------
  81. coeffs : (P + R, S) float ndarray
  82. Coefficients for each RBF and monomial.
  83. shift : (N,) float ndarray
  84. Domain shift used to create the polynomial matrix.
  85. scale : (N,) float ndarray
  86. Domain scaling used to create the polynomial matrix.
  87. """
  88. lhs, rhs, shift, scale = _build_system(
  89. y, d, smoothing, kernel, epsilon, powers
  90. )
  91. _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
  92. if info < 0:
  93. raise ValueError(f"The {-info}-th argument had an illegal value.")
  94. elif info > 0:
  95. msg = "Singular matrix."
  96. nmonos = powers.shape[0]
  97. if nmonos > 0:
  98. pmat = _polynomial_matrix((y - shift)/scale, powers)
  99. rank = np.linalg.matrix_rank(pmat)
  100. if rank < nmonos:
  101. msg = (
  102. "Singular matrix. The matrix of monomials evaluated at "
  103. "the data point coordinates does not have full column "
  104. f"rank ({rank}/{nmonos})."
  105. )
  106. raise LinAlgError(msg)
  107. return shift, scale, coeffs
  108. class RBFInterpolator:
  109. """Radial basis function (RBF) interpolation in N dimensions.
  110. Parameters
  111. ----------
  112. y : (P, N) array_like
  113. Data point coordinates.
  114. d : (P, ...) array_like
  115. Data values at `y`.
  116. neighbors : int, optional
  117. If specified, the value of the interpolant at each evaluation point
  118. will be computed using only this many nearest data points. All the data
  119. points are used by default.
  120. smoothing : float or (P,) array_like, optional
  121. Smoothing parameter. The interpolant perfectly fits the data when this
  122. is set to 0. For large values, the interpolant approaches a least
  123. squares fit of a polynomial with the specified degree. Default is 0.
  124. kernel : str, optional
  125. Type of RBF. This should be one of
  126. - 'linear' : ``-r``
  127. - 'thin_plate_spline' : ``r**2 * log(r)``
  128. - 'cubic' : ``r**3``
  129. - 'quintic' : ``-r**5``
  130. - 'multiquadric' : ``-sqrt(1 + r**2)``
  131. - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
  132. - 'inverse_quadratic' : ``1/(1 + r**2)``
  133. - 'gaussian' : ``exp(-r**2)``
  134. Default is 'thin_plate_spline'.
  135. epsilon : float, optional
  136. Shape parameter that scales the input to the RBF. If `kernel` is
  137. 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
  138. 1 and can be ignored because it has the same effect as scaling the
  139. smoothing parameter. Otherwise, this must be specified.
  140. degree : int, optional
  141. Degree of the added polynomial. For some RBFs the interpolant may not
  142. be well-posed if the polynomial degree is too small. Those RBFs and
  143. their corresponding minimum degrees are
  144. - 'multiquadric' : 0
  145. - 'linear' : 0
  146. - 'thin_plate_spline' : 1
  147. - 'cubic' : 1
  148. - 'quintic' : 2
  149. The default value is the minimum degree for `kernel` or 0 if there is
  150. no minimum degree. Set this to -1 for no added polynomial.
  151. Notes
  152. -----
  153. An RBF is a scalar valued function in N-dimensional space whose value at
  154. :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
  155. is the center of the RBF.
  156. An RBF interpolant for the vector of data values :math:`d`, which are from
  157. locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
  158. plus a polynomial with a specified degree. The RBF interpolant is written
  159. as
  160. .. math::
  161. f(x) = K(x, y) a + P(x) b,
  162. where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
  163. evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
  164. monomials, which span polynomials with the specified degree, evaluated at
  165. :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
  166. linear equations
  167. .. math::
  168. (K(y, y) + \\lambda I) a + P(y) b = d
  169. and
  170. .. math::
  171. P(y)^T a = 0,
  172. where :math:`\\lambda` is a non-negative smoothing parameter that controls
  173. how well we want to fit the data. The data are fit exactly when the
  174. smoothing parameter is 0.
  175. The above system is uniquely solvable if the following requirements are
  176. met:
  177. - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
  178. column rank when `degree` is -1 or 0. When `degree` is 1,
  179. :math:`P(y)` has full column rank if the data point locations are not
  180. all collinear (N=2), coplanar (N=3), etc.
  181. - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
  182. 'cubic', or 'quintic', then `degree` must not be lower than the
  183. minimum value listed above.
  184. - If `smoothing` is 0, then each data point location must be distinct.
  185. When using an RBF that is not scale invariant ('multiquadric',
  186. 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
  187. shape parameter must be chosen (e.g., through cross validation). Smaller
  188. values for the shape parameter correspond to wider RBFs. The problem can
  189. become ill-conditioned or singular when the shape parameter is too small.
  190. The memory required to solve for the RBF interpolation coefficients
  191. increases quadratically with the number of data points, which can become
  192. impractical when interpolating more than about a thousand data points.
  193. To overcome memory limitations for large interpolation problems, the
  194. `neighbors` argument can be specified to compute an RBF interpolant for
  195. each evaluation point using only the nearest data points.
  196. .. versionadded:: 1.7.0
  197. See Also
  198. --------
  199. NearestNDInterpolator
  200. LinearNDInterpolator
  201. CloughTocher2DInterpolator
  202. References
  203. ----------
  204. .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
  205. World Scientific Publishing Co.
  206. .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
  207. .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
  208. .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
  209. Examples
  210. --------
  211. Demonstrate interpolating scattered data to a grid in 2-D.
  212. >>> import numpy as np
  213. >>> import matplotlib.pyplot as plt
  214. >>> from scipy.interpolate import RBFInterpolator
  215. >>> from scipy.stats.qmc import Halton
  216. >>> rng = np.random.default_rng()
  217. >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
  218. >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
  219. >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
  220. >>> xflat = xgrid.reshape(2, -1).T
  221. >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
  222. >>> ygrid = yflat.reshape(50, 50)
  223. >>> fig, ax = plt.subplots()
  224. >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
  225. >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
  226. >>> fig.colorbar(p)
  227. >>> plt.show()
  228. """
  229. def __init__(self, y, d,
  230. neighbors=None,
  231. smoothing=0.0,
  232. kernel="thin_plate_spline",
  233. epsilon=None,
  234. degree=None):
  235. y = np.asarray(y, dtype=float, order="C")
  236. if y.ndim != 2:
  237. raise ValueError("`y` must be a 2-dimensional array.")
  238. ny, ndim = y.shape
  239. d_dtype = complex if np.iscomplexobj(d) else float
  240. d = np.asarray(d, dtype=d_dtype, order="C")
  241. if d.shape[0] != ny:
  242. raise ValueError(
  243. f"Expected the first axis of `d` to have length {ny}."
  244. )
  245. d_shape = d.shape[1:]
  246. d = d.reshape((ny, -1))
  247. # If `d` is complex, convert it to a float array with twice as many
  248. # columns. Otherwise, the LHS matrix would need to be converted to
  249. # complex and take up 2x more memory than necessary.
  250. d = d.view(float)
  251. if np.isscalar(smoothing):
  252. smoothing = np.full(ny, smoothing, dtype=float)
  253. else:
  254. smoothing = np.asarray(smoothing, dtype=float, order="C")
  255. if smoothing.shape != (ny,):
  256. raise ValueError(
  257. "Expected `smoothing` to be a scalar or have shape "
  258. f"({ny},)."
  259. )
  260. kernel = kernel.lower()
  261. if kernel not in _AVAILABLE:
  262. raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
  263. if epsilon is None:
  264. if kernel in _SCALE_INVARIANT:
  265. epsilon = 1.0
  266. else:
  267. raise ValueError(
  268. "`epsilon` must be specified if `kernel` is not one of "
  269. f"{_SCALE_INVARIANT}."
  270. )
  271. else:
  272. epsilon = float(epsilon)
  273. min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
  274. if degree is None:
  275. degree = max(min_degree, 0)
  276. else:
  277. degree = int(degree)
  278. if degree < -1:
  279. raise ValueError("`degree` must be at least -1.")
  280. elif degree < min_degree:
  281. warnings.warn(
  282. f"`degree` should not be below {min_degree} when `kernel` "
  283. f"is '{kernel}'. The interpolant may not be uniquely "
  284. "solvable, and the smoothing parameter may have an "
  285. "unintuitive effect.",
  286. UserWarning
  287. )
  288. if neighbors is None:
  289. nobs = ny
  290. else:
  291. # Make sure the number of nearest neighbors used for interpolation
  292. # does not exceed the number of observations.
  293. neighbors = int(min(neighbors, ny))
  294. nobs = neighbors
  295. powers = _monomial_powers(ndim, degree)
  296. # The polynomial matrix must have full column rank in order for the
  297. # interpolant to be well-posed, which is not possible if there are
  298. # fewer observations than monomials.
  299. if powers.shape[0] > nobs:
  300. raise ValueError(
  301. f"At least {powers.shape[0]} data points are required when "
  302. f"`degree` is {degree} and the number of dimensions is {ndim}."
  303. )
  304. if neighbors is None:
  305. shift, scale, coeffs = _build_and_solve_system(
  306. y, d, smoothing, kernel, epsilon, powers
  307. )
  308. # Make these attributes private since they do not always exist.
  309. self._shift = shift
  310. self._scale = scale
  311. self._coeffs = coeffs
  312. else:
  313. self._tree = KDTree(y)
  314. self.y = y
  315. self.d = d
  316. self.d_shape = d_shape
  317. self.d_dtype = d_dtype
  318. self.neighbors = neighbors
  319. self.smoothing = smoothing
  320. self.kernel = kernel
  321. self.epsilon = epsilon
  322. self.powers = powers
  323. def _chunk_evaluator(
  324. self,
  325. x,
  326. y,
  327. shift,
  328. scale,
  329. coeffs,
  330. memory_budget=1000000
  331. ):
  332. """
  333. Evaluate the interpolation while controlling memory consumption.
  334. We chunk the input if we need more memory than specified.
  335. Parameters
  336. ----------
  337. x : (Q, N) float ndarray
  338. array of points on which to evaluate
  339. y: (P, N) float ndarray
  340. array of points on which we know function values
  341. shift: (N, ) ndarray
  342. Domain shift used to create the polynomial matrix.
  343. scale : (N,) float ndarray
  344. Domain scaling used to create the polynomial matrix.
  345. coeffs: (P+R, S) float ndarray
  346. Coefficients in front of basis functions
  347. memory_budget: int
  348. Total amount of memory (in units of sizeof(float)) we wish
  349. to devote for storing the array of coefficients for
  350. interpolated points. If we need more memory than that, we
  351. chunk the input.
  352. Returns
  353. -------
  354. (Q, S) float ndarray
  355. Interpolated array
  356. """
  357. nx, ndim = x.shape
  358. if self.neighbors is None:
  359. nnei = len(y)
  360. else:
  361. nnei = self.neighbors
  362. # in each chunk we consume the same space we already occupy
  363. chunksize = memory_budget // ((self.powers.shape[0] + nnei)) + 1
  364. if chunksize <= nx:
  365. out = np.empty((nx, self.d.shape[1]), dtype=float)
  366. for i in range(0, nx, chunksize):
  367. vec = _build_evaluation_coefficients(
  368. x[i:i + chunksize, :],
  369. y,
  370. self.kernel,
  371. self.epsilon,
  372. self.powers,
  373. shift,
  374. scale)
  375. out[i:i + chunksize, :] = np.dot(vec, coeffs)
  376. else:
  377. vec = _build_evaluation_coefficients(
  378. x,
  379. y,
  380. self.kernel,
  381. self.epsilon,
  382. self.powers,
  383. shift,
  384. scale)
  385. out = np.dot(vec, coeffs)
  386. return out
  387. def __call__(self, x):
  388. """Evaluate the interpolant at `x`.
  389. Parameters
  390. ----------
  391. x : (Q, N) array_like
  392. Evaluation point coordinates.
  393. Returns
  394. -------
  395. (Q, ...) ndarray
  396. Values of the interpolant at `x`.
  397. """
  398. x = np.asarray(x, dtype=float, order="C")
  399. if x.ndim != 2:
  400. raise ValueError("`x` must be a 2-dimensional array.")
  401. nx, ndim = x.shape
  402. if ndim != self.y.shape[1]:
  403. raise ValueError("Expected the second axis of `x` to have length "
  404. f"{self.y.shape[1]}.")
  405. # Our memory budget for storing RBF coefficients is
  406. # based on how many floats in memory we already occupy
  407. # If this number is below 1e6 we just use 1e6
  408. # This memory budget is used to decide how we chunk
  409. # the inputs
  410. memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
  411. if self.neighbors is None:
  412. out = self._chunk_evaluator(
  413. x,
  414. self.y,
  415. self._shift,
  416. self._scale,
  417. self._coeffs,
  418. memory_budget=memory_budget)
  419. else:
  420. # Get the indices of the k nearest observation points to each
  421. # evaluation point.
  422. _, yindices = self._tree.query(x, self.neighbors)
  423. if self.neighbors == 1:
  424. # `KDTree` squeezes the output when neighbors=1.
  425. yindices = yindices[:, None]
  426. # Multiple evaluation points may have the same neighborhood of
  427. # observation points. Make the neighborhoods unique so that we only
  428. # compute the interpolation coefficients once for each
  429. # neighborhood.
  430. yindices = np.sort(yindices, axis=1)
  431. yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
  432. # `inv` tells us which neighborhood will be used by each evaluation
  433. # point. Now we find which evaluation points will be using each
  434. # neighborhood.
  435. xindices = [[] for _ in range(len(yindices))]
  436. for i, j in enumerate(inv):
  437. xindices[j].append(i)
  438. out = np.empty((nx, self.d.shape[1]), dtype=float)
  439. for xidx, yidx in zip(xindices, yindices):
  440. # `yidx` are the indices of the observations in this
  441. # neighborhood. `xidx` are the indices of the evaluation points
  442. # that are using this neighborhood.
  443. xnbr = x[xidx]
  444. ynbr = self.y[yidx]
  445. dnbr = self.d[yidx]
  446. snbr = self.smoothing[yidx]
  447. shift, scale, coeffs = _build_and_solve_system(
  448. ynbr,
  449. dnbr,
  450. snbr,
  451. self.kernel,
  452. self.epsilon,
  453. self.powers,
  454. )
  455. out[xidx] = self._chunk_evaluator(
  456. xnbr,
  457. ynbr,
  458. shift,
  459. scale,
  460. coeffs,
  461. memory_budget=memory_budget)
  462. out = out.view(self.d_dtype)
  463. out = out.reshape((nx, ) + self.d_shape)
  464. return out