_minpack_py.py 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. import warnings
  2. from . import _minpack
  3. import numpy as np
  4. from numpy import (atleast_1d, dot, take, triu, shape, eye,
  5. transpose, zeros, prod, greater,
  6. asarray, inf,
  7. finfo, inexact, issubdtype, dtype)
  8. from scipy import linalg
  9. from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
  10. from scipy._lib._util import _asarray_validated, _lazywhere
  11. from scipy._lib._util import getfullargspec_no_self as _getfullargspec
  12. from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
  13. from ._lsq import least_squares
  14. # from ._lsq.common import make_strictly_feasible
  15. from ._lsq.least_squares import prepare_bounds
  16. from scipy.optimize._minimize import Bounds
  17. error = _minpack.error
  18. __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
  19. def _check_func(checker, argname, thefunc, x0, args, numinputs,
  20. output_shape=None):
  21. res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
  22. if (output_shape is not None) and (shape(res) != output_shape):
  23. if (output_shape[0] != 1):
  24. if len(output_shape) > 1:
  25. if output_shape[1] == 1:
  26. return shape(res)
  27. msg = "%s: there is a mismatch between the input and output " \
  28. "shape of the '%s' argument" % (checker, argname)
  29. func_name = getattr(thefunc, '__name__', None)
  30. if func_name:
  31. msg += " '%s'." % func_name
  32. else:
  33. msg += "."
  34. msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
  35. raise TypeError(msg)
  36. if issubdtype(res.dtype, inexact):
  37. dt = res.dtype
  38. else:
  39. dt = dtype(float)
  40. return shape(res), dt
  41. def fsolve(func, x0, args=(), fprime=None, full_output=0,
  42. col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
  43. epsfcn=None, factor=100, diag=None):
  44. """
  45. Find the roots of a function.
  46. Return the roots of the (non-linear) equations defined by
  47. ``func(x) = 0`` given a starting estimate.
  48. Parameters
  49. ----------
  50. func : callable ``f(x, *args)``
  51. A function that takes at least one (possibly vector) argument,
  52. and returns a value of the same length.
  53. x0 : ndarray
  54. The starting estimate for the roots of ``func(x) = 0``.
  55. args : tuple, optional
  56. Any extra arguments to `func`.
  57. fprime : callable ``f(x, *args)``, optional
  58. A function to compute the Jacobian of `func` with derivatives
  59. across the rows. By default, the Jacobian will be estimated.
  60. full_output : bool, optional
  61. If True, return optional outputs.
  62. col_deriv : bool, optional
  63. Specify whether the Jacobian function computes derivatives down
  64. the columns (faster, because there is no transpose operation).
  65. xtol : float, optional
  66. The calculation will terminate if the relative error between two
  67. consecutive iterates is at most `xtol`.
  68. maxfev : int, optional
  69. The maximum number of calls to the function. If zero, then
  70. ``100*(N+1)`` is the maximum where N is the number of elements
  71. in `x0`.
  72. band : tuple, optional
  73. If set to a two-sequence containing the number of sub- and
  74. super-diagonals within the band of the Jacobi matrix, the
  75. Jacobi matrix is considered banded (only for ``fprime=None``).
  76. epsfcn : float, optional
  77. A suitable step length for the forward-difference
  78. approximation of the Jacobian (for ``fprime=None``). If
  79. `epsfcn` is less than the machine precision, it is assumed
  80. that the relative errors in the functions are of the order of
  81. the machine precision.
  82. factor : float, optional
  83. A parameter determining the initial step bound
  84. (``factor * || diag * x||``). Should be in the interval
  85. ``(0.1, 100)``.
  86. diag : sequence, optional
  87. N positive entries that serve as a scale factors for the
  88. variables.
  89. Returns
  90. -------
  91. x : ndarray
  92. The solution (or the result of the last iteration for
  93. an unsuccessful call).
  94. infodict : dict
  95. A dictionary of optional outputs with the keys:
  96. ``nfev``
  97. number of function calls
  98. ``njev``
  99. number of Jacobian calls
  100. ``fvec``
  101. function evaluated at the output
  102. ``fjac``
  103. the orthogonal matrix, q, produced by the QR
  104. factorization of the final approximate Jacobian
  105. matrix, stored column wise
  106. ``r``
  107. upper triangular matrix produced by QR factorization
  108. of the same matrix
  109. ``qtf``
  110. the vector ``(transpose(q) * fvec)``
  111. ier : int
  112. An integer flag. Set to 1 if a solution was found, otherwise refer
  113. to `mesg` for more information.
  114. mesg : str
  115. If no solution is found, `mesg` details the cause of failure.
  116. See Also
  117. --------
  118. root : Interface to root finding algorithms for multivariate
  119. functions. See the ``method='hybr'`` in particular.
  120. Notes
  121. -----
  122. ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
  123. Examples
  124. --------
  125. Find a solution to the system of equations:
  126. ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
  127. >>> import numpy as np
  128. >>> from scipy.optimize import fsolve
  129. >>> def func(x):
  130. ... return [x[0] * np.cos(x[1]) - 4,
  131. ... x[1] * x[0] - x[1] - 5]
  132. >>> root = fsolve(func, [1, 1])
  133. >>> root
  134. array([6.50409711, 0.90841421])
  135. >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
  136. array([ True, True])
  137. """
  138. options = {'col_deriv': col_deriv,
  139. 'xtol': xtol,
  140. 'maxfev': maxfev,
  141. 'band': band,
  142. 'eps': epsfcn,
  143. 'factor': factor,
  144. 'diag': diag}
  145. res = _root_hybr(func, x0, args, jac=fprime, **options)
  146. if full_output:
  147. x = res['x']
  148. info = dict((k, res.get(k))
  149. for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
  150. info['fvec'] = res['fun']
  151. return x, info, res['status'], res['message']
  152. else:
  153. status = res['status']
  154. msg = res['message']
  155. if status == 0:
  156. raise TypeError(msg)
  157. elif status == 1:
  158. pass
  159. elif status in [2, 3, 4, 5]:
  160. warnings.warn(msg, RuntimeWarning)
  161. else:
  162. raise TypeError(msg)
  163. return res['x']
  164. def _root_hybr(func, x0, args=(), jac=None,
  165. col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
  166. factor=100, diag=None, **unknown_options):
  167. """
  168. Find the roots of a multivariate function using MINPACK's hybrd and
  169. hybrj routines (modified Powell method).
  170. Options
  171. -------
  172. col_deriv : bool
  173. Specify whether the Jacobian function computes derivatives down
  174. the columns (faster, because there is no transpose operation).
  175. xtol : float
  176. The calculation will terminate if the relative error between two
  177. consecutive iterates is at most `xtol`.
  178. maxfev : int
  179. The maximum number of calls to the function. If zero, then
  180. ``100*(N+1)`` is the maximum where N is the number of elements
  181. in `x0`.
  182. band : tuple
  183. If set to a two-sequence containing the number of sub- and
  184. super-diagonals within the band of the Jacobi matrix, the
  185. Jacobi matrix is considered banded (only for ``fprime=None``).
  186. eps : float
  187. A suitable step length for the forward-difference
  188. approximation of the Jacobian (for ``fprime=None``). If
  189. `eps` is less than the machine precision, it is assumed
  190. that the relative errors in the functions are of the order of
  191. the machine precision.
  192. factor : float
  193. A parameter determining the initial step bound
  194. (``factor * || diag * x||``). Should be in the interval
  195. ``(0.1, 100)``.
  196. diag : sequence
  197. N positive entries that serve as a scale factors for the
  198. variables.
  199. """
  200. _check_unknown_options(unknown_options)
  201. epsfcn = eps
  202. x0 = asarray(x0).flatten()
  203. n = len(x0)
  204. if not isinstance(args, tuple):
  205. args = (args,)
  206. shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
  207. if epsfcn is None:
  208. epsfcn = finfo(dtype).eps
  209. Dfun = jac
  210. if Dfun is None:
  211. if band is None:
  212. ml, mu = -10, -10
  213. else:
  214. ml, mu = band[:2]
  215. if maxfev == 0:
  216. maxfev = 200 * (n + 1)
  217. retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
  218. ml, mu, epsfcn, factor, diag)
  219. else:
  220. _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
  221. if (maxfev == 0):
  222. maxfev = 100 * (n + 1)
  223. retval = _minpack._hybrj(func, Dfun, x0, args, 1,
  224. col_deriv, xtol, maxfev, factor, diag)
  225. x, status = retval[0], retval[-1]
  226. errors = {0: "Improper input parameters were entered.",
  227. 1: "The solution converged.",
  228. 2: "The number of calls to function has "
  229. "reached maxfev = %d." % maxfev,
  230. 3: "xtol=%f is too small, no further improvement "
  231. "in the approximate\n solution "
  232. "is possible." % xtol,
  233. 4: "The iteration is not making good progress, as measured "
  234. "by the \n improvement from the last five "
  235. "Jacobian evaluations.",
  236. 5: "The iteration is not making good progress, "
  237. "as measured by the \n improvement from the last "
  238. "ten iterations.",
  239. 'unknown': "An error occurred."}
  240. info = retval[1]
  241. info['fun'] = info.pop('fvec')
  242. sol = OptimizeResult(x=x, success=(status == 1), status=status)
  243. sol.update(info)
  244. try:
  245. sol['message'] = errors[status]
  246. except KeyError:
  247. sol['message'] = errors['unknown']
  248. return sol
  249. LEASTSQ_SUCCESS = [1, 2, 3, 4]
  250. LEASTSQ_FAILURE = [5, 6, 7, 8]
  251. def leastsq(func, x0, args=(), Dfun=None, full_output=0,
  252. col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
  253. gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
  254. """
  255. Minimize the sum of squares of a set of equations.
  256. ::
  257. x = arg min(sum(func(y)**2,axis=0))
  258. y
  259. Parameters
  260. ----------
  261. func : callable
  262. Should take at least one (possibly length ``N`` vector) argument and
  263. returns ``M`` floating point numbers. It must not return NaNs or
  264. fitting might fail. ``M`` must be greater than or equal to ``N``.
  265. x0 : ndarray
  266. The starting estimate for the minimization.
  267. args : tuple, optional
  268. Any extra arguments to func are placed in this tuple.
  269. Dfun : callable, optional
  270. A function or method to compute the Jacobian of func with derivatives
  271. across the rows. If this is None, the Jacobian will be estimated.
  272. full_output : bool, optional
  273. non-zero to return all optional outputs.
  274. col_deriv : bool, optional
  275. non-zero to specify that the Jacobian function computes derivatives
  276. down the columns (faster, because there is no transpose operation).
  277. ftol : float, optional
  278. Relative error desired in the sum of squares.
  279. xtol : float, optional
  280. Relative error desired in the approximate solution.
  281. gtol : float, optional
  282. Orthogonality desired between the function vector and the columns of
  283. the Jacobian.
  284. maxfev : int, optional
  285. The maximum number of calls to the function. If `Dfun` is provided,
  286. then the default `maxfev` is 100*(N+1) where N is the number of elements
  287. in x0, otherwise the default `maxfev` is 200*(N+1).
  288. epsfcn : float, optional
  289. A variable used in determining a suitable step length for the forward-
  290. difference approximation of the Jacobian (for Dfun=None).
  291. Normally the actual step length will be sqrt(epsfcn)*x
  292. If epsfcn is less than the machine precision, it is assumed that the
  293. relative errors are of the order of the machine precision.
  294. factor : float, optional
  295. A parameter determining the initial step bound
  296. (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
  297. diag : sequence, optional
  298. N positive entries that serve as a scale factors for the variables.
  299. Returns
  300. -------
  301. x : ndarray
  302. The solution (or the result of the last iteration for an unsuccessful
  303. call).
  304. cov_x : ndarray
  305. The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
  306. estimate of the Hessian. A value of None indicates a singular matrix,
  307. which means the curvature in parameters `x` is numerically flat. To
  308. obtain the covariance matrix of the parameters `x`, `cov_x` must be
  309. multiplied by the variance of the residuals -- see curve_fit.
  310. infodict : dict
  311. a dictionary of optional outputs with the keys:
  312. ``nfev``
  313. The number of function calls
  314. ``fvec``
  315. The function evaluated at the output
  316. ``fjac``
  317. A permutation of the R matrix of a QR
  318. factorization of the final approximate
  319. Jacobian matrix, stored column wise.
  320. Together with ipvt, the covariance of the
  321. estimate can be approximated.
  322. ``ipvt``
  323. An integer array of length N which defines
  324. a permutation matrix, p, such that
  325. fjac*p = q*r, where r is upper triangular
  326. with diagonal elements of nonincreasing
  327. magnitude. Column j of p is column ipvt(j)
  328. of the identity matrix.
  329. ``qtf``
  330. The vector (transpose(q) * fvec).
  331. mesg : str
  332. A string message giving information about the cause of failure.
  333. ier : int
  334. An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
  335. found. Otherwise, the solution was not found. In either case, the
  336. optional output variable 'mesg' gives more information.
  337. See Also
  338. --------
  339. least_squares : Newer interface to solve nonlinear least-squares problems
  340. with bounds on the variables. See ``method='lm'`` in particular.
  341. Notes
  342. -----
  343. "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
  344. cov_x is a Jacobian approximation to the Hessian of the least squares
  345. objective function.
  346. This approximation assumes that the objective function is based on the
  347. difference between some observed target data (ydata) and a (non-linear)
  348. function of the parameters `f(xdata, params)` ::
  349. func(params) = ydata - f(xdata, params)
  350. so that the objective function is ::
  351. min sum((ydata - f(xdata, params))**2, axis=0)
  352. params
  353. The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
  354. or whether `x0` is a scalar.
  355. Examples
  356. --------
  357. >>> from scipy.optimize import leastsq
  358. >>> def func(x):
  359. ... return 2*(x-3)**2+1
  360. >>> leastsq(func, 0)
  361. (array([2.99999999]), 1)
  362. """
  363. x0 = asarray(x0).flatten()
  364. n = len(x0)
  365. if not isinstance(args, tuple):
  366. args = (args,)
  367. shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
  368. m = shape[0]
  369. if n > m:
  370. raise TypeError(f"Improper input: func input vector length N={n} must"
  371. f" not exceed func output vector length M={m}")
  372. if epsfcn is None:
  373. epsfcn = finfo(dtype).eps
  374. if Dfun is None:
  375. if maxfev == 0:
  376. maxfev = 200*(n + 1)
  377. retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
  378. gtol, maxfev, epsfcn, factor, diag)
  379. else:
  380. if col_deriv:
  381. _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
  382. else:
  383. _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
  384. if maxfev == 0:
  385. maxfev = 100 * (n + 1)
  386. retval = _minpack._lmder(func, Dfun, x0, args, full_output,
  387. col_deriv, ftol, xtol, gtol, maxfev,
  388. factor, diag)
  389. errors = {0: ["Improper input parameters.", TypeError],
  390. 1: ["Both actual and predicted relative reductions "
  391. "in the sum of squares\n are at most %f" % ftol, None],
  392. 2: ["The relative error between two consecutive "
  393. "iterates is at most %f" % xtol, None],
  394. 3: ["Both actual and predicted relative reductions in "
  395. "the sum of squares\n are at most %f and the "
  396. "relative error between two consecutive "
  397. "iterates is at \n most %f" % (ftol, xtol), None],
  398. 4: ["The cosine of the angle between func(x) and any "
  399. "column of the\n Jacobian is at most %f in "
  400. "absolute value" % gtol, None],
  401. 5: ["Number of calls to function has reached "
  402. "maxfev = %d." % maxfev, ValueError],
  403. 6: ["ftol=%f is too small, no further reduction "
  404. "in the sum of squares\n is possible." % ftol,
  405. ValueError],
  406. 7: ["xtol=%f is too small, no further improvement in "
  407. "the approximate\n solution is possible." % xtol,
  408. ValueError],
  409. 8: ["gtol=%f is too small, func(x) is orthogonal to the "
  410. "columns of\n the Jacobian to machine "
  411. "precision." % gtol, ValueError]}
  412. # The FORTRAN return value (possible return values are >= 0 and <= 8)
  413. info = retval[-1]
  414. if full_output:
  415. cov_x = None
  416. if info in LEASTSQ_SUCCESS:
  417. # This was
  418. # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
  419. # r = triu(transpose(retval[1]['fjac'])[:n, :])
  420. # R = dot(r, perm)
  421. # cov_x = inv(dot(transpose(R), R))
  422. # but the explicit dot product was not necessary and sometimes
  423. # the result was not symmetric positive definite. See gh-4555.
  424. perm = retval[1]['ipvt'] - 1
  425. n = len(perm)
  426. r = triu(transpose(retval[1]['fjac'])[:n, :])
  427. inv_triu = linalg.get_lapack_funcs('trtri', (r,))
  428. try:
  429. # inverse of permuted matrix is a permuation of matrix inverse
  430. invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
  431. if trtri_info != 0: # explicit comparison for readability
  432. raise LinAlgError(f'trtri returned info {trtri_info}')
  433. invR[perm] = invR.copy()
  434. cov_x = invR @ invR.T
  435. except (LinAlgError, ValueError):
  436. pass
  437. return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
  438. else:
  439. if info in LEASTSQ_FAILURE:
  440. warnings.warn(errors[info][0], RuntimeWarning)
  441. elif info == 0:
  442. raise errors[info][1](errors[info][0])
  443. return retval[0], info
  444. def _wrap_func(func, xdata, ydata, transform):
  445. if transform is None:
  446. def func_wrapped(params):
  447. return func(xdata, *params) - ydata
  448. elif transform.ndim == 1:
  449. def func_wrapped(params):
  450. return transform * (func(xdata, *params) - ydata)
  451. else:
  452. # Chisq = (y - yd)^T C^{-1} (y-yd)
  453. # transform = L such that C = L L^T
  454. # C^{-1} = L^{-T} L^{-1}
  455. # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
  456. # Define (y-yd)' = L^{-1} (y-yd)
  457. # by solving
  458. # L (y-yd)' = (y-yd)
  459. # and minimize (y-yd)'^T (y-yd)'
  460. def func_wrapped(params):
  461. return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
  462. return func_wrapped
  463. def _wrap_jac(jac, xdata, transform):
  464. if transform is None:
  465. def jac_wrapped(params):
  466. return jac(xdata, *params)
  467. elif transform.ndim == 1:
  468. def jac_wrapped(params):
  469. return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
  470. else:
  471. def jac_wrapped(params):
  472. return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
  473. return jac_wrapped
  474. def _initialize_feasible(lb, ub):
  475. p0 = np.ones_like(lb)
  476. lb_finite = np.isfinite(lb)
  477. ub_finite = np.isfinite(ub)
  478. mask = lb_finite & ub_finite
  479. p0[mask] = 0.5 * (lb[mask] + ub[mask])
  480. mask = lb_finite & ~ub_finite
  481. p0[mask] = lb[mask] + 1
  482. mask = ~lb_finite & ub_finite
  483. p0[mask] = ub[mask] - 1
  484. return p0
  485. def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
  486. check_finite=True, bounds=(-np.inf, np.inf), method=None,
  487. jac=None, *, full_output=False, **kwargs):
  488. """
  489. Use non-linear least squares to fit a function, f, to data.
  490. Assumes ``ydata = f(xdata, *params) + eps``.
  491. Parameters
  492. ----------
  493. f : callable
  494. The model function, f(x, ...). It must take the independent
  495. variable as the first argument and the parameters to fit as
  496. separate remaining arguments.
  497. xdata : array_like
  498. The independent variable where the data is measured.
  499. Should usually be an M-length sequence or an (k,M)-shaped array for
  500. functions with k predictors, and each element should be float
  501. convertible if it is an array like object.
  502. ydata : array_like
  503. The dependent data, a length M array - nominally ``f(xdata, ...)``.
  504. p0 : array_like, optional
  505. Initial guess for the parameters (length N). If None, then the
  506. initial values will all be 1 (if the number of parameters for the
  507. function can be determined using introspection, otherwise a
  508. ValueError is raised).
  509. sigma : None or M-length sequence or MxM array, optional
  510. Determines the uncertainty in `ydata`. If we define residuals as
  511. ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
  512. depends on its number of dimensions:
  513. - A 1-D `sigma` should contain values of standard deviations of
  514. errors in `ydata`. In this case, the optimized function is
  515. ``chisq = sum((r / sigma) ** 2)``.
  516. - A 2-D `sigma` should contain the covariance matrix of
  517. errors in `ydata`. In this case, the optimized function is
  518. ``chisq = r.T @ inv(sigma) @ r``.
  519. .. versionadded:: 0.19
  520. None (default) is equivalent of 1-D `sigma` filled with ones.
  521. absolute_sigma : bool, optional
  522. If True, `sigma` is used in an absolute sense and the estimated parameter
  523. covariance `pcov` reflects these absolute values.
  524. If False (default), only the relative magnitudes of the `sigma` values matter.
  525. The returned parameter covariance matrix `pcov` is based on scaling
  526. `sigma` by a constant factor. This constant is set by demanding that the
  527. reduced `chisq` for the optimal parameters `popt` when using the
  528. *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
  529. match the sample variance of the residuals after the fit. Default is False.
  530. Mathematically,
  531. ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
  532. check_finite : bool, optional
  533. If True, check that the input arrays do not contain nans of infs,
  534. and raise a ValueError if they do. Setting this parameter to
  535. False may silently produce nonsensical results if the input arrays
  536. do contain nans. Default is True.
  537. bounds : 2-tuple of array_like or `Bounds`, optional
  538. Lower and upper bounds on parameters. Defaults to no bounds.
  539. There are two ways to specify the bounds:
  540. - Instance of `Bounds` class.
  541. - 2-tuple of array_like: Each element of the tuple must be either
  542. an array with the length equal to the number of parameters, or a
  543. scalar (in which case the bound is taken to be the same for all
  544. parameters). Use ``np.inf`` with an appropriate sign to disable
  545. bounds on all or some parameters.
  546. method : {'lm', 'trf', 'dogbox'}, optional
  547. Method to use for optimization. See `least_squares` for more details.
  548. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
  549. provided. The method 'lm' won't work when the number of observations
  550. is less than the number of variables, use 'trf' or 'dogbox' in this
  551. case.
  552. .. versionadded:: 0.17
  553. jac : callable, string or None, optional
  554. Function with signature ``jac(x, ...)`` which computes the Jacobian
  555. matrix of the model function with respect to parameters as a dense
  556. array_like structure. It will be scaled according to provided `sigma`.
  557. If None (default), the Jacobian will be estimated numerically.
  558. String keywords for 'trf' and 'dogbox' methods can be used to select
  559. a finite difference scheme, see `least_squares`.
  560. .. versionadded:: 0.18
  561. full_output : boolean, optional
  562. If True, this function returns additioal information: `infodict`,
  563. `mesg`, and `ier`.
  564. .. versionadded:: 1.9
  565. **kwargs
  566. Keyword arguments passed to `leastsq` for ``method='lm'`` or
  567. `least_squares` otherwise.
  568. Returns
  569. -------
  570. popt : array
  571. Optimal values for the parameters so that the sum of the squared
  572. residuals of ``f(xdata, *popt) - ydata`` is minimized.
  573. pcov : 2-D array
  574. The estimated covariance of popt. The diagonals provide the variance
  575. of the parameter estimate. To compute one standard deviation errors
  576. on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
  577. How the `sigma` parameter affects the estimated covariance
  578. depends on `absolute_sigma` argument, as described above.
  579. If the Jacobian matrix at the solution doesn't have a full rank, then
  580. 'lm' method returns a matrix filled with ``np.inf``, on the other hand
  581. 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
  582. the covariance matrix.
  583. infodict : dict (returned only if `full_output` is True)
  584. a dictionary of optional outputs with the keys:
  585. ``nfev``
  586. The number of function calls. Methods 'trf' and 'dogbox' do not
  587. count function calls for numerical Jacobian approximation,
  588. as opposed to 'lm' method.
  589. ``fvec``
  590. The function values evaluated at the solution.
  591. ``fjac``
  592. A permutation of the R matrix of a QR
  593. factorization of the final approximate
  594. Jacobian matrix, stored column wise.
  595. Together with ipvt, the covariance of the
  596. estimate can be approximated.
  597. Method 'lm' only provides this information.
  598. ``ipvt``
  599. An integer array of length N which defines
  600. a permutation matrix, p, such that
  601. fjac*p = q*r, where r is upper triangular
  602. with diagonal elements of nonincreasing
  603. magnitude. Column j of p is column ipvt(j)
  604. of the identity matrix.
  605. Method 'lm' only provides this information.
  606. ``qtf``
  607. The vector (transpose(q) * fvec).
  608. Method 'lm' only provides this information.
  609. .. versionadded:: 1.9
  610. mesg : str (returned only if `full_output` is True)
  611. A string message giving information about the solution.
  612. .. versionadded:: 1.9
  613. ier : int (returnned only if `full_output` is True)
  614. An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
  615. found. Otherwise, the solution was not found. In either case, the
  616. optional output variable `mesg` gives more information.
  617. .. versionadded:: 1.9
  618. Raises
  619. ------
  620. ValueError
  621. if either `ydata` or `xdata` contain NaNs, or if incompatible options
  622. are used.
  623. RuntimeError
  624. if the least-squares minimization fails.
  625. OptimizeWarning
  626. if covariance of the parameters can not be estimated.
  627. See Also
  628. --------
  629. least_squares : Minimize the sum of squares of nonlinear functions.
  630. scipy.stats.linregress : Calculate a linear least squares regression for
  631. two sets of measurements.
  632. Notes
  633. -----
  634. Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
  635. are ``float64``, or else the optimization may return incorrect results.
  636. With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
  637. through `leastsq`. Note that this algorithm can only deal with
  638. unconstrained problems.
  639. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
  640. the docstring of `least_squares` for more information.
  641. Examples
  642. --------
  643. >>> import numpy as np
  644. >>> import matplotlib.pyplot as plt
  645. >>> from scipy.optimize import curve_fit
  646. >>> def func(x, a, b, c):
  647. ... return a * np.exp(-b * x) + c
  648. Define the data to be fit with some noise:
  649. >>> xdata = np.linspace(0, 4, 50)
  650. >>> y = func(xdata, 2.5, 1.3, 0.5)
  651. >>> rng = np.random.default_rng()
  652. >>> y_noise = 0.2 * rng.normal(size=xdata.size)
  653. >>> ydata = y + y_noise
  654. >>> plt.plot(xdata, ydata, 'b-', label='data')
  655. Fit for the parameters a, b, c of the function `func`:
  656. >>> popt, pcov = curve_fit(func, xdata, ydata)
  657. >>> popt
  658. array([2.56274217, 1.37268521, 0.47427475])
  659. >>> plt.plot(xdata, func(xdata, *popt), 'r-',
  660. ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
  661. Constrain the optimization to the region of ``0 <= a <= 3``,
  662. ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
  663. >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
  664. >>> popt
  665. array([2.43736712, 1. , 0.34463856])
  666. >>> plt.plot(xdata, func(xdata, *popt), 'g--',
  667. ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
  668. >>> plt.xlabel('x')
  669. >>> plt.ylabel('y')
  670. >>> plt.legend()
  671. >>> plt.show()
  672. """
  673. if p0 is None:
  674. # determine number of parameters by inspecting the function
  675. sig = _getfullargspec(f)
  676. args = sig.args
  677. if len(args) < 2:
  678. raise ValueError("Unable to determine number of fit parameters.")
  679. n = len(args) - 1
  680. else:
  681. p0 = np.atleast_1d(p0)
  682. n = p0.size
  683. if isinstance(bounds, Bounds):
  684. lb, ub = bounds.lb, bounds.ub
  685. else:
  686. lb, ub = prepare_bounds(bounds, n)
  687. if p0 is None:
  688. p0 = _initialize_feasible(lb, ub)
  689. bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
  690. if method is None:
  691. if bounded_problem:
  692. method = 'trf'
  693. else:
  694. method = 'lm'
  695. if method == 'lm' and bounded_problem:
  696. raise ValueError("Method 'lm' only works for unconstrained problems. "
  697. "Use 'trf' or 'dogbox' instead.")
  698. # optimization may produce garbage for float32 inputs, cast them to float64
  699. # NaNs cannot be handled
  700. if check_finite:
  701. ydata = np.asarray_chkfinite(ydata, float)
  702. else:
  703. ydata = np.asarray(ydata, float)
  704. if isinstance(xdata, (list, tuple, np.ndarray)):
  705. # `xdata` is passed straight to the user-defined `f`, so allow
  706. # non-array_like `xdata`.
  707. if check_finite:
  708. xdata = np.asarray_chkfinite(xdata, float)
  709. else:
  710. xdata = np.asarray(xdata, float)
  711. if ydata.size == 0:
  712. raise ValueError("`ydata` must not be empty!")
  713. # Determine type of sigma
  714. if sigma is not None:
  715. sigma = np.asarray(sigma)
  716. # if 1-D, sigma are errors, define transform = 1/sigma
  717. if sigma.shape == (ydata.size, ):
  718. transform = 1.0 / sigma
  719. # if 2-D, sigma is the covariance matrix,
  720. # define transform = L such that L L^T = C
  721. elif sigma.shape == (ydata.size, ydata.size):
  722. try:
  723. # scipy.linalg.cholesky requires lower=True to return L L^T = A
  724. transform = cholesky(sigma, lower=True)
  725. except LinAlgError as e:
  726. raise ValueError("`sigma` must be positive definite.") from e
  727. else:
  728. raise ValueError("`sigma` has incorrect shape.")
  729. else:
  730. transform = None
  731. func = _wrap_func(f, xdata, ydata, transform)
  732. if callable(jac):
  733. jac = _wrap_jac(jac, xdata, transform)
  734. elif jac is None and method != 'lm':
  735. jac = '2-point'
  736. if 'args' in kwargs:
  737. # The specification for the model function `f` does not support
  738. # additional arguments. Refer to the `curve_fit` docstring for
  739. # acceptable call signatures of `f`.
  740. raise ValueError("'args' is not a supported keyword argument.")
  741. if method == 'lm':
  742. # if ydata.size == 1, this might be used for broadcast.
  743. if ydata.size != 1 and n > ydata.size:
  744. raise TypeError(f"The number of func parameters={n} must not"
  745. f" exceed the number of data points={ydata.size}")
  746. res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
  747. popt, pcov, infodict, errmsg, ier = res
  748. ysize = len(infodict['fvec'])
  749. cost = np.sum(infodict['fvec'] ** 2)
  750. if ier not in [1, 2, 3, 4]:
  751. raise RuntimeError("Optimal parameters not found: " + errmsg)
  752. else:
  753. # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
  754. if 'max_nfev' not in kwargs:
  755. kwargs['max_nfev'] = kwargs.pop('maxfev', None)
  756. res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
  757. **kwargs)
  758. if not res.success:
  759. raise RuntimeError("Optimal parameters not found: " + res.message)
  760. infodict = dict(nfev=res.nfev, fvec=res.fun)
  761. ier = res.status
  762. errmsg = res.message
  763. ysize = len(res.fun)
  764. cost = 2 * res.cost # res.cost is half sum of squares!
  765. popt = res.x
  766. # Do Moore-Penrose inverse discarding zero singular values.
  767. _, s, VT = svd(res.jac, full_matrices=False)
  768. threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
  769. s = s[s > threshold]
  770. VT = VT[:s.size]
  771. pcov = np.dot(VT.T / s**2, VT)
  772. warn_cov = False
  773. if pcov is None:
  774. # indeterminate covariance
  775. pcov = zeros((len(popt), len(popt)), dtype=float)
  776. pcov.fill(inf)
  777. warn_cov = True
  778. elif not absolute_sigma:
  779. if ysize > p0.size:
  780. s_sq = cost / (ysize - p0.size)
  781. pcov = pcov * s_sq
  782. else:
  783. pcov.fill(inf)
  784. warn_cov = True
  785. if warn_cov:
  786. warnings.warn('Covariance of the parameters could not be estimated',
  787. category=OptimizeWarning)
  788. if full_output:
  789. return popt, pcov, infodict, errmsg, ier
  790. else:
  791. return popt, pcov
  792. def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
  793. """Perform a simple check on the gradient for correctness.
  794. """
  795. x = atleast_1d(x0)
  796. n = len(x)
  797. x = x.reshape((n,))
  798. fvec = atleast_1d(fcn(x, *args))
  799. m = len(fvec)
  800. fvec = fvec.reshape((m,))
  801. ldfjac = m
  802. fjac = atleast_1d(Dfcn(x, *args))
  803. fjac = fjac.reshape((m, n))
  804. if col_deriv == 0:
  805. fjac = transpose(fjac)
  806. xp = zeros((n,), float)
  807. err = zeros((m,), float)
  808. fvecp = None
  809. _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
  810. fvecp = atleast_1d(fcn(xp, *args))
  811. fvecp = fvecp.reshape((m,))
  812. _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
  813. good = (prod(greater(err, 0.5), axis=0))
  814. return (good, err)
  815. def _del2(p0, p1, d):
  816. return p0 - np.square(p1 - p0) / d
  817. def _relerr(actual, desired):
  818. return (actual - desired) / desired
  819. def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
  820. p0 = x0
  821. for i in range(maxiter):
  822. p1 = func(p0, *args)
  823. if use_accel:
  824. p2 = func(p1, *args)
  825. d = p2 - 2.0 * p1 + p0
  826. p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
  827. else:
  828. p = p1
  829. relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
  830. if np.all(np.abs(relerr) < xtol):
  831. return p
  832. p0 = p
  833. msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
  834. raise RuntimeError(msg)
  835. def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
  836. """
  837. Find a fixed point of the function.
  838. Given a function of one or more variables and a starting point, find a
  839. fixed point of the function: i.e., where ``func(x0) == x0``.
  840. Parameters
  841. ----------
  842. func : function
  843. Function to evaluate.
  844. x0 : array_like
  845. Fixed point of function.
  846. args : tuple, optional
  847. Extra arguments to `func`.
  848. xtol : float, optional
  849. Convergence tolerance, defaults to 1e-08.
  850. maxiter : int, optional
  851. Maximum number of iterations, defaults to 500.
  852. method : {"del2", "iteration"}, optional
  853. Method of finding the fixed-point, defaults to "del2",
  854. which uses Steffensen's Method with Aitken's ``Del^2``
  855. convergence acceleration [1]_. The "iteration" method simply iterates
  856. the function until convergence is detected, without attempting to
  857. accelerate the convergence.
  858. References
  859. ----------
  860. .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
  861. Examples
  862. --------
  863. >>> import numpy as np
  864. >>> from scipy import optimize
  865. >>> def func(x, c1, c2):
  866. ... return np.sqrt(c1/(x+c2))
  867. >>> c1 = np.array([10,12.])
  868. >>> c2 = np.array([3, 5.])
  869. >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
  870. array([ 1.4920333 , 1.37228132])
  871. """
  872. use_accel = {'del2': True, 'iteration': False}[method]
  873. x0 = _asarray_validated(x0, as_inexact=True)
  874. return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)