test_minpack.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. """
  2. Unit tests for optimization routines from minpack.py.
  3. """
  4. import warnings
  5. import pytest
  6. from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
  7. assert_array_almost_equal, assert_allclose,
  8. assert_warns, suppress_warnings)
  9. from pytest import raises as assert_raises
  10. import numpy as np
  11. from numpy import array, float64
  12. from multiprocessing.pool import ThreadPool
  13. from scipy import optimize, linalg
  14. from scipy.special import lambertw
  15. from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point
  16. from scipy.optimize import OptimizeWarning
  17. from scipy.optimize._minimize import Bounds
  18. class ReturnShape:
  19. """This class exists to create a callable that does not have a '__name__' attribute.
  20. __init__ takes the argument 'shape', which should be a tuple of ints. When an instance
  21. is called with a single argument 'x', it returns numpy.ones(shape).
  22. """
  23. def __init__(self, shape):
  24. self.shape = shape
  25. def __call__(self, x):
  26. return np.ones(self.shape)
  27. def dummy_func(x, shape):
  28. """A function that returns an array of ones of the given shape.
  29. `x` is ignored.
  30. """
  31. return np.ones(shape)
  32. def sequence_parallel(fs):
  33. with ThreadPool(len(fs)) as pool:
  34. return pool.map(lambda f: f(), fs)
  35. # Function and Jacobian for tests of solvers for systems of nonlinear
  36. # equations
  37. def pressure_network(flow_rates, Qtot, k):
  38. """Evaluate non-linear equation system representing
  39. the pressures and flows in a system of n parallel pipes::
  40. f_i = P_i - P_0, for i = 1..n
  41. f_0 = sum(Q_i) - Qtot
  42. where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
  43. Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
  44. Q is the flow rate.
  45. Parameters
  46. ----------
  47. flow_rates : float
  48. A 1-D array of n flow rates [kg/s].
  49. k : float
  50. A 1-D array of n valve coefficients [1/kg m].
  51. Qtot : float
  52. A scalar, the total input flow rate [kg/s].
  53. Returns
  54. -------
  55. F : float
  56. A 1-D array, F[i] == f_i.
  57. """
  58. P = k * flow_rates**2
  59. F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
  60. return F
  61. def pressure_network_jacobian(flow_rates, Qtot, k):
  62. """Return the jacobian of the equation system F(flow_rates)
  63. computed by `pressure_network` with respect to
  64. *flow_rates*. See `pressure_network` for the detailed
  65. description of parrameters.
  66. Returns
  67. -------
  68. jac : float
  69. *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
  70. and *f_i* and *Q_i* are described in the doc for `pressure_network`
  71. """
  72. n = len(flow_rates)
  73. pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
  74. jac = np.empty((n, n))
  75. jac[:n-1, :n-1] = pdiff * 0
  76. jac[:n-1, n-1] = 0
  77. jac[n-1, :] = np.ones(n)
  78. return jac
  79. def pressure_network_fun_and_grad(flow_rates, Qtot, k):
  80. return (pressure_network(flow_rates, Qtot, k),
  81. pressure_network_jacobian(flow_rates, Qtot, k))
  82. class TestFSolve:
  83. def test_pressure_network_no_gradient(self):
  84. # fsolve without gradient, equal pipes -> equal flows.
  85. k = np.full(4, 0.5)
  86. Qtot = 4
  87. initial_guess = array([2., 0., 2., 0.])
  88. final_flows, info, ier, mesg = optimize.fsolve(
  89. pressure_network, initial_guess, args=(Qtot, k),
  90. full_output=True)
  91. assert_array_almost_equal(final_flows, np.ones(4))
  92. assert_(ier == 1, mesg)
  93. def test_pressure_network_with_gradient(self):
  94. # fsolve with gradient, equal pipes -> equal flows
  95. k = np.full(4, 0.5)
  96. Qtot = 4
  97. initial_guess = array([2., 0., 2., 0.])
  98. final_flows = optimize.fsolve(
  99. pressure_network, initial_guess, args=(Qtot, k),
  100. fprime=pressure_network_jacobian)
  101. assert_array_almost_equal(final_flows, np.ones(4))
  102. def test_wrong_shape_func_callable(self):
  103. func = ReturnShape(1)
  104. # x0 is a list of two elements, but func will return an array with
  105. # length 1, so this should result in a TypeError.
  106. x0 = [1.5, 2.0]
  107. assert_raises(TypeError, optimize.fsolve, func, x0)
  108. def test_wrong_shape_func_function(self):
  109. # x0 is a list of two elements, but func will return an array with
  110. # length 1, so this should result in a TypeError.
  111. x0 = [1.5, 2.0]
  112. assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
  113. def test_wrong_shape_fprime_callable(self):
  114. func = ReturnShape(1)
  115. deriv_func = ReturnShape((2,2))
  116. assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
  117. def test_wrong_shape_fprime_function(self):
  118. func = lambda x: dummy_func(x, (2,))
  119. deriv_func = lambda x: dummy_func(x, (3,3))
  120. assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
  121. def test_func_can_raise(self):
  122. def func(*args):
  123. raise ValueError('I raised')
  124. with assert_raises(ValueError, match='I raised'):
  125. optimize.fsolve(func, x0=[0])
  126. def test_Dfun_can_raise(self):
  127. func = lambda x: x - np.array([10])
  128. def deriv_func(*args):
  129. raise ValueError('I raised')
  130. with assert_raises(ValueError, match='I raised'):
  131. optimize.fsolve(func, x0=[0], fprime=deriv_func)
  132. def test_float32(self):
  133. func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
  134. p = optimize.fsolve(func, np.array([1, 1], np.float32))
  135. assert_allclose(func(p), [0, 0], atol=1e-3)
  136. def test_reentrant_func(self):
  137. def func(*args):
  138. self.test_pressure_network_no_gradient()
  139. return pressure_network(*args)
  140. # fsolve without gradient, equal pipes -> equal flows.
  141. k = np.full(4, 0.5)
  142. Qtot = 4
  143. initial_guess = array([2., 0., 2., 0.])
  144. final_flows, info, ier, mesg = optimize.fsolve(
  145. func, initial_guess, args=(Qtot, k),
  146. full_output=True)
  147. assert_array_almost_equal(final_flows, np.ones(4))
  148. assert_(ier == 1, mesg)
  149. def test_reentrant_Dfunc(self):
  150. def deriv_func(*args):
  151. self.test_pressure_network_with_gradient()
  152. return pressure_network_jacobian(*args)
  153. # fsolve with gradient, equal pipes -> equal flows
  154. k = np.full(4, 0.5)
  155. Qtot = 4
  156. initial_guess = array([2., 0., 2., 0.])
  157. final_flows = optimize.fsolve(
  158. pressure_network, initial_guess, args=(Qtot, k),
  159. fprime=deriv_func)
  160. assert_array_almost_equal(final_flows, np.ones(4))
  161. def test_concurrent_no_gradient(self):
  162. v = sequence_parallel([self.test_pressure_network_no_gradient] * 10)
  163. assert all([result is None for result in v])
  164. def test_concurrent_with_gradient(self):
  165. v = sequence_parallel([self.test_pressure_network_with_gradient] * 10)
  166. assert all([result is None for result in v])
  167. class TestRootHybr:
  168. def test_pressure_network_no_gradient(self):
  169. # root/hybr without gradient, equal pipes -> equal flows
  170. k = np.full(4, 0.5)
  171. Qtot = 4
  172. initial_guess = array([2., 0., 2., 0.])
  173. final_flows = optimize.root(pressure_network, initial_guess,
  174. method='hybr', args=(Qtot, k)).x
  175. assert_array_almost_equal(final_flows, np.ones(4))
  176. def test_pressure_network_with_gradient(self):
  177. # root/hybr with gradient, equal pipes -> equal flows
  178. k = np.full(4, 0.5)
  179. Qtot = 4
  180. initial_guess = array([[2., 0., 2., 0.]])
  181. final_flows = optimize.root(pressure_network, initial_guess,
  182. args=(Qtot, k), method='hybr',
  183. jac=pressure_network_jacobian).x
  184. assert_array_almost_equal(final_flows, np.ones(4))
  185. def test_pressure_network_with_gradient_combined(self):
  186. # root/hybr with gradient and function combined, equal pipes -> equal
  187. # flows
  188. k = np.full(4, 0.5)
  189. Qtot = 4
  190. initial_guess = array([2., 0., 2., 0.])
  191. final_flows = optimize.root(pressure_network_fun_and_grad,
  192. initial_guess, args=(Qtot, k),
  193. method='hybr', jac=True).x
  194. assert_array_almost_equal(final_flows, np.ones(4))
  195. class TestRootLM:
  196. def test_pressure_network_no_gradient(self):
  197. # root/lm without gradient, equal pipes -> equal flows
  198. k = np.full(4, 0.5)
  199. Qtot = 4
  200. initial_guess = array([2., 0., 2., 0.])
  201. final_flows = optimize.root(pressure_network, initial_guess,
  202. method='lm', args=(Qtot, k)).x
  203. assert_array_almost_equal(final_flows, np.ones(4))
  204. class TestLeastSq:
  205. def setup_method(self):
  206. x = np.linspace(0, 10, 40)
  207. a,b,c = 3.1, 42, -304.2
  208. self.x = x
  209. self.abc = a,b,c
  210. y_true = a*x**2 + b*x + c
  211. np.random.seed(0)
  212. self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
  213. def residuals(self, p, y, x):
  214. a,b,c = p
  215. err = y-(a*x**2 + b*x + c)
  216. return err
  217. def residuals_jacobian(self, _p, _y, x):
  218. return -np.vstack([x**2, x, np.ones_like(x)]).T
  219. def test_basic(self):
  220. p0 = array([0,0,0])
  221. params_fit, ier = leastsq(self.residuals, p0,
  222. args=(self.y_meas, self.x))
  223. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  224. # low precision due to random
  225. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  226. def test_basic_with_gradient(self):
  227. p0 = array([0,0,0])
  228. params_fit, ier = leastsq(self.residuals, p0,
  229. args=(self.y_meas, self.x),
  230. Dfun=self.residuals_jacobian)
  231. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  232. # low precision due to random
  233. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  234. def test_full_output(self):
  235. p0 = array([[0,0,0]])
  236. full_output = leastsq(self.residuals, p0,
  237. args=(self.y_meas, self.x),
  238. full_output=True)
  239. params_fit, cov_x, infodict, mesg, ier = full_output
  240. assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
  241. def test_input_untouched(self):
  242. p0 = array([0,0,0],dtype=float64)
  243. p0_copy = array(p0, copy=True)
  244. full_output = leastsq(self.residuals, p0,
  245. args=(self.y_meas, self.x),
  246. full_output=True)
  247. params_fit, cov_x, infodict, mesg, ier = full_output
  248. assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
  249. assert_array_equal(p0, p0_copy)
  250. def test_wrong_shape_func_callable(self):
  251. func = ReturnShape(1)
  252. # x0 is a list of two elements, but func will return an array with
  253. # length 1, so this should result in a TypeError.
  254. x0 = [1.5, 2.0]
  255. assert_raises(TypeError, optimize.leastsq, func, x0)
  256. def test_wrong_shape_func_function(self):
  257. # x0 is a list of two elements, but func will return an array with
  258. # length 1, so this should result in a TypeError.
  259. x0 = [1.5, 2.0]
  260. assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
  261. def test_wrong_shape_Dfun_callable(self):
  262. func = ReturnShape(1)
  263. deriv_func = ReturnShape((2,2))
  264. assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
  265. def test_wrong_shape_Dfun_function(self):
  266. func = lambda x: dummy_func(x, (2,))
  267. deriv_func = lambda x: dummy_func(x, (3,3))
  268. assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
  269. def test_float32(self):
  270. # Regression test for gh-1447
  271. def func(p,x,y):
  272. q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
  273. return q - y
  274. x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
  275. 1.231], dtype=np.float32)
  276. y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
  277. 0.034,0.0396], dtype=np.float32)
  278. p0 = np.array([1.0,1.0,1.0,1.0])
  279. p1, success = optimize.leastsq(func, p0, args=(x,y))
  280. assert_(success in [1,2,3,4])
  281. assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
  282. def test_func_can_raise(self):
  283. def func(*args):
  284. raise ValueError('I raised')
  285. with assert_raises(ValueError, match='I raised'):
  286. optimize.leastsq(func, x0=[0])
  287. def test_Dfun_can_raise(self):
  288. func = lambda x: x - np.array([10])
  289. def deriv_func(*args):
  290. raise ValueError('I raised')
  291. with assert_raises(ValueError, match='I raised'):
  292. optimize.leastsq(func, x0=[0], Dfun=deriv_func)
  293. def test_reentrant_func(self):
  294. def func(*args):
  295. self.test_basic()
  296. return self.residuals(*args)
  297. p0 = array([0,0,0])
  298. params_fit, ier = leastsq(func, p0,
  299. args=(self.y_meas, self.x))
  300. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  301. # low precision due to random
  302. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  303. def test_reentrant_Dfun(self):
  304. def deriv_func(*args):
  305. self.test_basic()
  306. return self.residuals_jacobian(*args)
  307. p0 = array([0,0,0])
  308. params_fit, ier = leastsq(self.residuals, p0,
  309. args=(self.y_meas, self.x),
  310. Dfun=deriv_func)
  311. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  312. # low precision due to random
  313. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  314. def test_concurrent_no_gradient(self):
  315. v = sequence_parallel([self.test_basic] * 10)
  316. assert all([result is None for result in v])
  317. def test_concurrent_with_gradient(self):
  318. v = sequence_parallel([self.test_basic_with_gradient] * 10)
  319. assert all([result is None for result in v])
  320. def test_func_input_output_length_check(self):
  321. def func(x):
  322. return 2 * (x[0] - 3) ** 2 + 1
  323. with assert_raises(TypeError,
  324. match='Improper input: func input vector length N='):
  325. optimize.leastsq(func, x0=[0, 1])
  326. class TestCurveFit:
  327. def setup_method(self):
  328. self.y = array([1.0, 3.2, 9.5, 13.7])
  329. self.x = array([1.0, 2.0, 3.0, 4.0])
  330. def test_one_argument(self):
  331. def func(x,a):
  332. return x**a
  333. popt, pcov = curve_fit(func, self.x, self.y)
  334. assert_(len(popt) == 1)
  335. assert_(pcov.shape == (1,1))
  336. assert_almost_equal(popt[0], 1.9149, decimal=4)
  337. assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
  338. # Test if we get the same with full_output. Regression test for #1415.
  339. # Also test if check_finite can be turned off.
  340. res = curve_fit(func, self.x, self.y,
  341. full_output=1, check_finite=False)
  342. (popt2, pcov2, infodict, errmsg, ier) = res
  343. assert_array_almost_equal(popt, popt2)
  344. def test_two_argument(self):
  345. def func(x, a, b):
  346. return b*x**a
  347. popt, pcov = curve_fit(func, self.x, self.y)
  348. assert_(len(popt) == 2)
  349. assert_(pcov.shape == (2,2))
  350. assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
  351. assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
  352. decimal=4)
  353. def test_func_is_classmethod(self):
  354. class test_self:
  355. """This class tests if curve_fit passes the correct number of
  356. arguments when the model function is a class instance method.
  357. """
  358. def func(self, x, a, b):
  359. return b * x**a
  360. test_self_inst = test_self()
  361. popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
  362. assert_(pcov.shape == (2,2))
  363. assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
  364. assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
  365. decimal=4)
  366. def test_regression_2639(self):
  367. # This test fails if epsfcn in leastsq is too large.
  368. x = [574.14200000000005, 574.154, 574.16499999999996,
  369. 574.17700000000002, 574.18799999999999, 574.19899999999996,
  370. 574.21100000000001, 574.22199999999998, 574.23400000000004,
  371. 574.245]
  372. y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
  373. 1550.0, 949.0, 841.0]
  374. guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
  375. 0.0035019999999983615, 859.0]
  376. good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
  377. 1.0068462e-02, 8.57450661e+02]
  378. def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
  379. return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
  380. + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
  381. popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
  382. assert_allclose(popt, good, rtol=1e-5)
  383. def test_pcov(self):
  384. xdata = np.array([0, 1, 2, 3, 4, 5])
  385. ydata = np.array([1, 1, 5, 7, 8, 12])
  386. sigma = np.array([1, 2, 1, 2, 1, 2])
  387. def f(x, a, b):
  388. return a*x + b
  389. for method in ['lm', 'trf', 'dogbox']:
  390. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
  391. method=method)
  392. perr_scaled = np.sqrt(np.diag(pcov))
  393. assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
  394. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
  395. method=method)
  396. perr_scaled = np.sqrt(np.diag(pcov))
  397. assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
  398. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
  399. absolute_sigma=True, method=method)
  400. perr = np.sqrt(np.diag(pcov))
  401. assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
  402. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
  403. absolute_sigma=True, method=method)
  404. perr = np.sqrt(np.diag(pcov))
  405. assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
  406. # infinite variances
  407. def f_flat(x, a, b):
  408. return a*x
  409. pcov_expected = np.array([np.inf]*4).reshape(2, 2)
  410. with suppress_warnings() as sup:
  411. sup.filter(OptimizeWarning,
  412. "Covariance of the parameters could not be estimated")
  413. popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
  414. popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
  415. assert_(pcov.shape == (2, 2))
  416. assert_array_equal(pcov, pcov_expected)
  417. assert_(pcov1.shape == (2, 2))
  418. assert_array_equal(pcov1, pcov_expected)
  419. def test_array_like(self):
  420. # Test sequence input. Regression test for gh-3037.
  421. def f_linear(x, a, b):
  422. return a*x + b
  423. x = [1, 2, 3, 4]
  424. y = [3, 5, 7, 9]
  425. assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
  426. def test_indeterminate_covariance(self):
  427. # Test that a warning is returned when pcov is indeterminate
  428. xdata = np.array([1, 2, 3, 4, 5, 6])
  429. ydata = np.array([1, 2, 3, 4, 5.5, 6])
  430. assert_warns(OptimizeWarning, curve_fit,
  431. lambda x, a, b: a*x, xdata, ydata)
  432. def test_NaN_handling(self):
  433. # Test for correct handling of NaNs in input data: gh-3422
  434. # create input with NaNs
  435. xdata = np.array([1, np.nan, 3])
  436. ydata = np.array([1, 2, 3])
  437. assert_raises(ValueError, curve_fit,
  438. lambda x, a, b: a*x + b, xdata, ydata)
  439. assert_raises(ValueError, curve_fit,
  440. lambda x, a, b: a*x + b, ydata, xdata)
  441. assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
  442. xdata, ydata, **{"check_finite": True})
  443. def test_empty_inputs(self):
  444. # Test both with and without bounds (regression test for gh-9864)
  445. assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [])
  446. assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [],
  447. bounds=(1, 2))
  448. assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], [])
  449. assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [],
  450. bounds=(1, 2))
  451. def test_function_zero_params(self):
  452. # Fit args is zero, so "Unable to determine number of fit parameters."
  453. assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4])
  454. def test_None_x(self): # Added in GH10196
  455. popt, pcov = curve_fit(lambda _, a: a * np.arange(10),
  456. None, 2 * np.arange(10))
  457. assert_allclose(popt, [2.])
  458. def test_method_argument(self):
  459. def f(x, a, b):
  460. return a * np.exp(-b*x)
  461. xdata = np.linspace(0, 1, 11)
  462. ydata = f(xdata, 2., 2.)
  463. for method in ['trf', 'dogbox', 'lm', None]:
  464. popt, pcov = curve_fit(f, xdata, ydata, method=method)
  465. assert_allclose(popt, [2., 2.])
  466. assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
  467. def test_full_output(self):
  468. def f(x, a, b):
  469. return a * np.exp(-b * x)
  470. xdata = np.linspace(0, 1, 11)
  471. ydata = f(xdata, 2., 2.)
  472. for method in ['trf', 'dogbox', 'lm', None]:
  473. popt, pcov, infodict, errmsg, ier = curve_fit(
  474. f, xdata, ydata, method=method, full_output=True)
  475. assert_allclose(popt, [2., 2.])
  476. assert "nfev" in infodict
  477. assert "fvec" in infodict
  478. if method == 'lm' or method is None:
  479. assert "fjac" in infodict
  480. assert "ipvt" in infodict
  481. assert "qtf" in infodict
  482. assert isinstance(errmsg, str)
  483. assert ier in (1, 2, 3, 4)
  484. def test_bounds(self):
  485. def f(x, a, b):
  486. return a * np.exp(-b*x)
  487. xdata = np.linspace(0, 1, 11)
  488. ydata = f(xdata, 2., 2.)
  489. # The minimum w/out bounds is at [2., 2.],
  490. # and with bounds it's at [1.5, smth].
  491. lb = [1., 0]
  492. ub = [1.5, 3.]
  493. # Test that both variants of the bounds yield the same result
  494. bounds = (lb, ub)
  495. bounds_class = Bounds(lb, ub)
  496. for method in [None, 'trf', 'dogbox']:
  497. popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
  498. method=method)
  499. assert_allclose(popt[0], 1.5)
  500. popt_class, pcov_class = curve_fit(f, xdata, ydata,
  501. bounds=bounds_class,
  502. method=method)
  503. assert_allclose(popt_class, popt)
  504. # With bounds, the starting estimate is feasible.
  505. popt, pcov = curve_fit(f, xdata, ydata, method='trf',
  506. bounds=([0., 0], [0.6, np.inf]))
  507. assert_allclose(popt[0], 0.6)
  508. # method='lm' doesn't support bounds.
  509. assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
  510. method='lm')
  511. def test_bounds_p0(self):
  512. # This test is for issue #5719. The problem was that an initial guess
  513. # was ignored when 'trf' or 'dogbox' methods were invoked.
  514. def f(x, a):
  515. return np.sin(x + a)
  516. xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
  517. ydata = np.sin(xdata)
  518. bounds = (-3 * np.pi, 3 * np.pi)
  519. for method in ['trf', 'dogbox']:
  520. popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
  521. popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
  522. bounds=bounds, method=method)
  523. # If the initial guess is ignored, then popt_2 would be close 0.
  524. assert_allclose(popt_1, popt_2)
  525. def test_jac(self):
  526. # Test that Jacobian callable is handled correctly and
  527. # weighted if sigma is provided.
  528. def f(x, a, b):
  529. return a * np.exp(-b*x)
  530. def jac(x, a, b):
  531. e = np.exp(-b*x)
  532. return np.vstack((e, -a * x * e)).T
  533. xdata = np.linspace(0, 1, 11)
  534. ydata = f(xdata, 2., 2.)
  535. # Test numerical options for least_squares backend.
  536. for method in ['trf', 'dogbox']:
  537. for scheme in ['2-point', '3-point', 'cs']:
  538. popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
  539. method=method)
  540. assert_allclose(popt, [2, 2])
  541. # Test the analytic option.
  542. for method in ['lm', 'trf', 'dogbox']:
  543. popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
  544. assert_allclose(popt, [2, 2])
  545. # Now add an outlier and provide sigma.
  546. ydata[5] = 100
  547. sigma = np.ones(xdata.shape[0])
  548. sigma[5] = 200
  549. for method in ['lm', 'trf', 'dogbox']:
  550. popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
  551. jac=jac)
  552. # Still the optimization process is influenced somehow,
  553. # have to set rtol=1e-3.
  554. assert_allclose(popt, [2, 2], rtol=1e-3)
  555. def test_maxfev_and_bounds(self):
  556. # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
  557. # but with bounds, the parameter is `max_nfev` (via least_squares)
  558. x = np.arange(0, 10)
  559. y = 2*x
  560. popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
  561. popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
  562. assert_allclose(popt1, 2, atol=1e-14)
  563. assert_allclose(popt2, 2, atol=1e-14)
  564. def test_curvefit_simplecovariance(self):
  565. def func(x, a, b):
  566. return a * np.exp(-b*x)
  567. def jac(x, a, b):
  568. e = np.exp(-b*x)
  569. return np.vstack((e, -a * x * e)).T
  570. np.random.seed(0)
  571. xdata = np.linspace(0, 4, 50)
  572. y = func(xdata, 2.5, 1.3)
  573. ydata = y + 0.2 * np.random.normal(size=len(xdata))
  574. sigma = np.zeros(len(xdata)) + 0.2
  575. covar = np.diag(sigma**2)
  576. for jac1, jac2 in [(jac, jac), (None, None)]:
  577. for absolute_sigma in [False, True]:
  578. popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
  579. jac=jac1, absolute_sigma=absolute_sigma)
  580. popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
  581. jac=jac2, absolute_sigma=absolute_sigma)
  582. assert_allclose(popt1, popt2, atol=1e-14)
  583. assert_allclose(pcov1, pcov2, atol=1e-14)
  584. def test_curvefit_covariance(self):
  585. def funcp(x, a, b):
  586. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  587. return rotn.dot(a * np.exp(-b*x))
  588. def jacp(x, a, b):
  589. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  590. e = np.exp(-b*x)
  591. return rotn.dot(np.vstack((e, -a * x * e)).T)
  592. def func(x, a, b):
  593. return a * np.exp(-b*x)
  594. def jac(x, a, b):
  595. e = np.exp(-b*x)
  596. return np.vstack((e, -a * x * e)).T
  597. np.random.seed(0)
  598. xdata = np.arange(1, 4)
  599. y = func(xdata, 2.5, 1.0)
  600. ydata = y + 0.2 * np.random.normal(size=len(xdata))
  601. sigma = np.zeros(len(xdata)) + 0.2
  602. covar = np.diag(sigma**2)
  603. # Get a rotation matrix, and obtain ydatap = R ydata
  604. # Chisq = ydata^T C^{-1} ydata
  605. # = ydata^T R^T R C^{-1} R^T R ydata
  606. # = ydatap^T Cp^{-1} ydatap
  607. # Cp^{-1} = R C^{-1} R^T
  608. # Cp = R C R^T, since R^-1 = R^T
  609. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  610. ydatap = rotn.dot(ydata)
  611. covarp = rotn.dot(covar).dot(rotn.T)
  612. for jac1, jac2 in [(jac, jacp), (None, None)]:
  613. for absolute_sigma in [False, True]:
  614. popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
  615. jac=jac1, absolute_sigma=absolute_sigma)
  616. popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
  617. jac=jac2, absolute_sigma=absolute_sigma)
  618. assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14)
  619. assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14)
  620. def test_dtypes(self):
  621. # regression test for gh-9581: curve_fit fails if x and y dtypes differ
  622. x = np.arange(-3, 5)
  623. y = 1.5*x + 3.0 + 0.5*np.sin(x)
  624. def func(x, a, b):
  625. return a*x + b
  626. for method in ['lm', 'trf', 'dogbox']:
  627. for dtx in [np.float32, np.float64]:
  628. for dty in [np.float32, np.float64]:
  629. x = x.astype(dtx)
  630. y = y.astype(dty)
  631. with warnings.catch_warnings():
  632. warnings.simplefilter("error", OptimizeWarning)
  633. p, cov = curve_fit(func, x, y, method=method)
  634. assert np.isfinite(cov).all()
  635. assert not np.allclose(p, 1) # curve_fit's initial value
  636. def test_dtypes2(self):
  637. # regression test for gh-7117: curve_fit fails if
  638. # both inputs are float32
  639. def hyperbola(x, s_1, s_2, o_x, o_y, c):
  640. b_2 = (s_1 + s_2) / 2
  641. b_1 = (s_2 - s_1) / 2
  642. return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
  643. min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
  644. max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
  645. guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
  646. params = [-2, .4, -1, -5, 9.5]
  647. xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
  648. ydata = hyperbola(xdata, *params)
  649. # run optimization twice, with xdata being float32 and float64
  650. popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
  651. bounds=(min_fit, max_fit))
  652. xdata = xdata.astype(np.float32)
  653. ydata = hyperbola(xdata, *params)
  654. popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
  655. bounds=(min_fit, max_fit))
  656. assert_allclose(popt_32, popt_64, atol=2e-5)
  657. def test_broadcast_y(self):
  658. xdata = np.arange(10)
  659. target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
  660. fit_func = lambda x, a, b: a*x**2 + b*x - target
  661. for method in ['lm', 'trf', 'dogbox']:
  662. popt0, pcov0 = curve_fit(fit_func,
  663. xdata=xdata,
  664. ydata=np.zeros_like(xdata),
  665. method=method)
  666. popt1, pcov1 = curve_fit(fit_func,
  667. xdata=xdata,
  668. ydata=0,
  669. method=method)
  670. assert_allclose(pcov0, pcov1)
  671. def test_args_in_kwargs(self):
  672. # Ensure that `args` cannot be passed as keyword argument to `curve_fit`
  673. def func(x, a, b):
  674. return a * x + b
  675. with assert_raises(ValueError):
  676. curve_fit(func,
  677. xdata=[1, 2, 3, 4],
  678. ydata=[5, 9, 13, 17],
  679. p0=[1],
  680. args=(1,))
  681. def test_data_point_number_validation(self):
  682. def func(x, a, b, c, d, e):
  683. return a * np.exp(-b * x) + c + d + e
  684. with assert_raises(TypeError, match="The number of func parameters="):
  685. curve_fit(func,
  686. xdata=[1, 2, 3, 4],
  687. ydata=[5, 9, 13, 17])
  688. @pytest.mark.filterwarnings('ignore::RuntimeWarning')
  689. def test_gh4555(self):
  690. # gh-4555 reported that covariance matrices returned by `leastsq`
  691. # can have negative diagonal elements and eigenvalues. (In fact,
  692. # they can also be asymmetric.) This shows up in the output of
  693. # `scipy.optimize.curve_fit`. Check that it has been resolved.giit
  694. def f(x, a, b, c, d, e):
  695. return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e
  696. rng = np.random.default_rng(408113519974467917)
  697. n = 100
  698. x = np.arange(n)
  699. y = np.linspace(2, 7, n) + rng.random(n)
  700. p, cov = optimize.curve_fit(f, x, y, maxfev=100000)
  701. assert np.all(np.diag(cov) > 0)
  702. eigs = linalg.eigh(cov)[0] # separate line for debugging
  703. # some platforms see a small negative eigevenvalue
  704. assert np.all(eigs > -1e-2)
  705. assert_allclose(cov, cov.T)
  706. def test_gh4555b(self):
  707. # check that PR gh-17247 did not significantly change covariance matrix
  708. # for simple cases
  709. rng = np.random.default_rng(408113519974467917)
  710. def func(x, a, b, c):
  711. return a * np.exp(-b * x) + c
  712. xdata = np.linspace(0, 4, 50)
  713. y = func(xdata, 2.5, 1.3, 0.5)
  714. y_noise = 0.2 * rng.normal(size=xdata.size)
  715. ydata = y + y_noise
  716. _, res = curve_fit(func, xdata, ydata)
  717. # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b
  718. ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749],
  719. [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403],
  720. [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]]
  721. # Linux_Python_38_32bit_full fails with default tolerance
  722. assert_allclose(res, ref, 2e-7)
  723. class TestFixedPoint:
  724. def test_scalar_trivial(self):
  725. # f(x) = 2x; fixed point should be x=0
  726. def func(x):
  727. return 2.0*x
  728. x0 = 1.0
  729. x = fixed_point(func, x0)
  730. assert_almost_equal(x, 0.0)
  731. def test_scalar_basic1(self):
  732. # f(x) = x**2; x0=1.05; fixed point should be x=1
  733. def func(x):
  734. return x**2
  735. x0 = 1.05
  736. x = fixed_point(func, x0)
  737. assert_almost_equal(x, 1.0)
  738. def test_scalar_basic2(self):
  739. # f(x) = x**0.5; x0=1.05; fixed point should be x=1
  740. def func(x):
  741. return x**0.5
  742. x0 = 1.05
  743. x = fixed_point(func, x0)
  744. assert_almost_equal(x, 1.0)
  745. def test_array_trivial(self):
  746. def func(x):
  747. return 2.0*x
  748. x0 = [0.3, 0.15]
  749. with np.errstate(all='ignore'):
  750. x = fixed_point(func, x0)
  751. assert_almost_equal(x, [0.0, 0.0])
  752. def test_array_basic1(self):
  753. # f(x) = c * x**2; fixed point should be x=1/c
  754. def func(x, c):
  755. return c * x**2
  756. c = array([0.75, 1.0, 1.25])
  757. x0 = [1.1, 1.15, 0.9]
  758. with np.errstate(all='ignore'):
  759. x = fixed_point(func, x0, args=(c,))
  760. assert_almost_equal(x, 1.0/c)
  761. def test_array_basic2(self):
  762. # f(x) = c * x**0.5; fixed point should be x=c**2
  763. def func(x, c):
  764. return c * x**0.5
  765. c = array([0.75, 1.0, 1.25])
  766. x0 = [0.8, 1.1, 1.1]
  767. x = fixed_point(func, x0, args=(c,))
  768. assert_almost_equal(x, c**2)
  769. def test_lambertw(self):
  770. # python-list/2010-December/594592.html
  771. xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
  772. args=(), xtol=1e-12, maxiter=500)
  773. assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
  774. assert_allclose(xxroot, lambertw(1)/2)
  775. def test_no_acceleration(self):
  776. # github issue 5460
  777. ks = 2
  778. kl = 6
  779. m = 1.3
  780. n0 = 1.001
  781. i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
  782. def func(n):
  783. return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
  784. n = fixed_point(func, n0, method='iteration')
  785. assert_allclose(n, m)