_lbfgsb_py.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. """
  2. Functions
  3. ---------
  4. .. autosummary::
  5. :toctree: generated/
  6. fmin_l_bfgs_b
  7. """
  8. ## License for the Python wrapper
  9. ## ==============================
  10. ## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
  11. ## Permission is hereby granted, free of charge, to any person obtaining a
  12. ## copy of this software and associated documentation files (the "Software"),
  13. ## to deal in the Software without restriction, including without limitation
  14. ## the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15. ## and/or sell copies of the Software, and to permit persons to whom the
  16. ## Software is furnished to do so, subject to the following conditions:
  17. ## The above copyright notice and this permission notice shall be included in
  18. ## all copies or substantial portions of the Software.
  19. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  24. ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25. ## DEALINGS IN THE SOFTWARE.
  26. ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
  27. import numpy as np
  28. from numpy import array, asarray, float64, zeros
  29. from . import _lbfgsb
  30. from ._optimize import (MemoizeJac, OptimizeResult,
  31. _check_unknown_options, _prepare_scalar_function)
  32. from ._constraints import old_bound_to_new
  33. from scipy.sparse.linalg import LinearOperator
  34. __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
  35. def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
  36. approx_grad=0,
  37. bounds=None, m=10, factr=1e7, pgtol=1e-5,
  38. epsilon=1e-8,
  39. iprint=-1, maxfun=15000, maxiter=15000, disp=None,
  40. callback=None, maxls=20):
  41. """
  42. Minimize a function func using the L-BFGS-B algorithm.
  43. Parameters
  44. ----------
  45. func : callable f(x,*args)
  46. Function to minimize.
  47. x0 : ndarray
  48. Initial guess.
  49. fprime : callable fprime(x,*args), optional
  50. The gradient of `func`. If None, then `func` returns the function
  51. value and the gradient (``f, g = func(x, *args)``), unless
  52. `approx_grad` is True in which case `func` returns only ``f``.
  53. args : sequence, optional
  54. Arguments to pass to `func` and `fprime`.
  55. approx_grad : bool, optional
  56. Whether to approximate the gradient numerically (in which case
  57. `func` returns only the function value).
  58. bounds : list, optional
  59. ``(min, max)`` pairs for each element in ``x``, defining
  60. the bounds on that parameter. Use None or +-inf for one of ``min`` or
  61. ``max`` when there is no bound in that direction.
  62. m : int, optional
  63. The maximum number of variable metric corrections
  64. used to define the limited memory matrix. (The limited memory BFGS
  65. method does not store the full hessian but uses this many terms in an
  66. approximation to it.)
  67. factr : float, optional
  68. The iteration stops when
  69. ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
  70. where ``eps`` is the machine precision, which is automatically
  71. generated by the code. Typical values for `factr` are: 1e12 for
  72. low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
  73. high accuracy. See Notes for relationship to `ftol`, which is exposed
  74. (instead of `factr`) by the `scipy.optimize.minimize` interface to
  75. L-BFGS-B.
  76. pgtol : float, optional
  77. The iteration will stop when
  78. ``max{|proj g_i | i = 1, ..., n} <= pgtol``
  79. where ``pg_i`` is the i-th component of the projected gradient.
  80. epsilon : float, optional
  81. Step size used when `approx_grad` is True, for numerically
  82. calculating the gradient
  83. iprint : int, optional
  84. Controls the frequency of output. ``iprint < 0`` means no output;
  85. ``iprint = 0`` print only one line at the last iteration;
  86. ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
  87. ``iprint = 99`` print details of every iteration except n-vectors;
  88. ``iprint = 100`` print also the changes of active set and final x;
  89. ``iprint > 100`` print details of every iteration including x and g.
  90. disp : int, optional
  91. If zero, then no output. If a positive number, then this over-rides
  92. `iprint` (i.e., `iprint` gets the value of `disp`).
  93. maxfun : int, optional
  94. Maximum number of function evaluations. Note that this function
  95. may violate the limit because of evaluating gradients by numerical
  96. differentiation.
  97. maxiter : int, optional
  98. Maximum number of iterations.
  99. callback : callable, optional
  100. Called after each iteration, as ``callback(xk)``, where ``xk`` is the
  101. current parameter vector.
  102. maxls : int, optional
  103. Maximum number of line search steps (per iteration). Default is 20.
  104. Returns
  105. -------
  106. x : array_like
  107. Estimated position of the minimum.
  108. f : float
  109. Value of `func` at the minimum.
  110. d : dict
  111. Information dictionary.
  112. * d['warnflag'] is
  113. - 0 if converged,
  114. - 1 if too many function evaluations or too many iterations,
  115. - 2 if stopped for another reason, given in d['task']
  116. * d['grad'] is the gradient at the minimum (should be 0 ish)
  117. * d['funcalls'] is the number of function calls made.
  118. * d['nit'] is the number of iterations.
  119. See also
  120. --------
  121. minimize: Interface to minimization algorithms for multivariate
  122. functions. See the 'L-BFGS-B' `method` in particular. Note that the
  123. `ftol` option is made available via that interface, while `factr` is
  124. provided via this interface, where `factr` is the factor multiplying
  125. the default machine floating-point precision to arrive at `ftol`:
  126. ``ftol = factr * numpy.finfo(float).eps``.
  127. Notes
  128. -----
  129. License of L-BFGS-B (FORTRAN code):
  130. The version included here (in fortran code) is 3.0
  131. (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
  132. and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
  133. condition for use:
  134. This software is freely available, but we expect that all publications
  135. describing work using this software, or all commercial products using it,
  136. quote at least one of the references given below. This software is released
  137. under the BSD License.
  138. References
  139. ----------
  140. * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
  141. Constrained Optimization, (1995), SIAM Journal on Scientific and
  142. Statistical Computing, 16, 5, pp. 1190-1208.
  143. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
  144. FORTRAN routines for large scale bound constrained optimization (1997),
  145. ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
  146. * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
  147. FORTRAN routines for large scale bound constrained optimization (2011),
  148. ACM Transactions on Mathematical Software, 38, 1.
  149. """
  150. # handle fprime/approx_grad
  151. if approx_grad:
  152. fun = func
  153. jac = None
  154. elif fprime is None:
  155. fun = MemoizeJac(func)
  156. jac = fun.derivative
  157. else:
  158. fun = func
  159. jac = fprime
  160. # build options
  161. opts = {'disp': disp,
  162. 'iprint': iprint,
  163. 'maxcor': m,
  164. 'ftol': factr * np.finfo(float).eps,
  165. 'gtol': pgtol,
  166. 'eps': epsilon,
  167. 'maxfun': maxfun,
  168. 'maxiter': maxiter,
  169. 'callback': callback,
  170. 'maxls': maxls}
  171. res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
  172. **opts)
  173. d = {'grad': res['jac'],
  174. 'task': res['message'],
  175. 'funcalls': res['nfev'],
  176. 'nit': res['nit'],
  177. 'warnflag': res['status']}
  178. f = res['fun']
  179. x = res['x']
  180. return x, f, d
  181. def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
  182. disp=None, maxcor=10, ftol=2.2204460492503131e-09,
  183. gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
  184. iprint=-1, callback=None, maxls=20,
  185. finite_diff_rel_step=None, **unknown_options):
  186. """
  187. Minimize a scalar function of one or more variables using the L-BFGS-B
  188. algorithm.
  189. Options
  190. -------
  191. disp : None or int
  192. If `disp is None` (the default), then the supplied version of `iprint`
  193. is used. If `disp is not None`, then it overrides the supplied version
  194. of `iprint` with the behaviour you outlined.
  195. maxcor : int
  196. The maximum number of variable metric corrections used to
  197. define the limited memory matrix. (The limited memory BFGS
  198. method does not store the full hessian but uses this many terms
  199. in an approximation to it.)
  200. ftol : float
  201. The iteration stops when ``(f^k -
  202. f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
  203. gtol : float
  204. The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
  205. <= gtol`` where ``pg_i`` is the i-th component of the
  206. projected gradient.
  207. eps : float or ndarray
  208. If `jac is None` the absolute step size used for numerical
  209. approximation of the jacobian via forward differences.
  210. maxfun : int
  211. Maximum number of function evaluations. Note that this function
  212. may violate the limit because of evaluating gradients by numerical
  213. differentiation.
  214. maxiter : int
  215. Maximum number of iterations.
  216. iprint : int, optional
  217. Controls the frequency of output. ``iprint < 0`` means no output;
  218. ``iprint = 0`` print only one line at the last iteration;
  219. ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
  220. ``iprint = 99`` print details of every iteration except n-vectors;
  221. ``iprint = 100`` print also the changes of active set and final x;
  222. ``iprint > 100`` print details of every iteration including x and g.
  223. maxls : int, optional
  224. Maximum number of line search steps (per iteration). Default is 20.
  225. finite_diff_rel_step : None or array_like, optional
  226. If `jac in ['2-point', '3-point', 'cs']` the relative step size to
  227. use for numerical approximation of the jacobian. The absolute step
  228. size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
  229. possibly adjusted to fit into the bounds. For ``method='3-point'``
  230. the sign of `h` is ignored. If None (default) then step is selected
  231. automatically.
  232. Notes
  233. -----
  234. The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
  235. but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
  236. relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
  237. I.e., `factr` multiplies the default machine floating-point precision to
  238. arrive at `ftol`.
  239. """
  240. _check_unknown_options(unknown_options)
  241. m = maxcor
  242. pgtol = gtol
  243. factr = ftol / np.finfo(float).eps
  244. x0 = asarray(x0).ravel()
  245. n, = x0.shape
  246. if bounds is None:
  247. bounds = [(None, None)] * n
  248. if len(bounds) != n:
  249. raise ValueError('length of x0 != length of bounds')
  250. # unbounded variables must use None, not +-inf, for optimizer to work properly
  251. bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
  252. # LBFGSB is sent 'old-style' bounds, 'new-style' bounds are required by
  253. # approx_derivative and ScalarFunction
  254. new_bounds = old_bound_to_new(bounds)
  255. # check bounds
  256. if (new_bounds[0] > new_bounds[1]).any():
  257. raise ValueError("LBFGSB - one of the lower bounds is greater than an upper bound.")
  258. # initial vector must lie within the bounds. Otherwise ScalarFunction and
  259. # approx_derivative will cause problems
  260. x0 = np.clip(x0, new_bounds[0], new_bounds[1])
  261. if disp is not None:
  262. if disp == 0:
  263. iprint = -1
  264. else:
  265. iprint = disp
  266. sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
  267. bounds=new_bounds,
  268. finite_diff_rel_step=finite_diff_rel_step)
  269. func_and_grad = sf.fun_and_grad
  270. fortran_int = _lbfgsb.types.intvar.dtype
  271. nbd = zeros(n, fortran_int)
  272. low_bnd = zeros(n, float64)
  273. upper_bnd = zeros(n, float64)
  274. bounds_map = {(None, None): 0,
  275. (1, None): 1,
  276. (1, 1): 2,
  277. (None, 1): 3}
  278. for i in range(0, n):
  279. l, u = bounds[i]
  280. if l is not None:
  281. low_bnd[i] = l
  282. l = 1
  283. if u is not None:
  284. upper_bnd[i] = u
  285. u = 1
  286. nbd[i] = bounds_map[l, u]
  287. if not maxls > 0:
  288. raise ValueError('maxls must be positive.')
  289. x = array(x0, float64)
  290. f = array(0.0, float64)
  291. g = zeros((n,), float64)
  292. wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
  293. iwa = zeros(3*n, fortran_int)
  294. task = zeros(1, 'S60')
  295. csave = zeros(1, 'S60')
  296. lsave = zeros(4, fortran_int)
  297. isave = zeros(44, fortran_int)
  298. dsave = zeros(29, float64)
  299. task[:] = 'START'
  300. n_iterations = 0
  301. while 1:
  302. # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
  303. _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
  304. pgtol, wa, iwa, task, iprint, csave, lsave,
  305. isave, dsave, maxls)
  306. task_str = task.tobytes()
  307. if task_str.startswith(b'FG'):
  308. # The minimization routine wants f and g at the current x.
  309. # Note that interruptions due to maxfun are postponed
  310. # until the completion of the current minimization iteration.
  311. # Overwrite f and g:
  312. f, g = func_and_grad(x)
  313. elif task_str.startswith(b'NEW_X'):
  314. # new iteration
  315. n_iterations += 1
  316. if callback is not None:
  317. callback(np.copy(x))
  318. if n_iterations >= maxiter:
  319. task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
  320. elif sf.nfev > maxfun:
  321. task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
  322. 'EXCEEDS LIMIT')
  323. else:
  324. break
  325. task_str = task.tobytes().strip(b'\x00').strip()
  326. if task_str.startswith(b'CONV'):
  327. warnflag = 0
  328. elif sf.nfev > maxfun or n_iterations >= maxiter:
  329. warnflag = 1
  330. else:
  331. warnflag = 2
  332. # These two portions of the workspace are described in the mainlb
  333. # subroutine in lbfgsb.f. See line 363.
  334. s = wa[0: m*n].reshape(m, n)
  335. y = wa[m*n: 2*m*n].reshape(m, n)
  336. # See lbfgsb.f line 160 for this portion of the workspace.
  337. # isave(31) = the total number of BFGS updates prior the current iteration;
  338. n_bfgs_updates = isave[30]
  339. n_corrs = min(n_bfgs_updates, maxcor)
  340. hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
  341. task_str = task_str.decode()
  342. return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
  343. njev=sf.ngev,
  344. nit=n_iterations, status=warnflag, message=task_str,
  345. x=x, success=(warnflag == 0), hess_inv=hess_inv)
  346. class LbfgsInvHessProduct(LinearOperator):
  347. """Linear operator for the L-BFGS approximate inverse Hessian.
  348. This operator computes the product of a vector with the approximate inverse
  349. of the Hessian of the objective function, using the L-BFGS limited
  350. memory approximation to the inverse Hessian, accumulated during the
  351. optimization.
  352. Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
  353. interface.
  354. Parameters
  355. ----------
  356. sk : array_like, shape=(n_corr, n)
  357. Array of `n_corr` most recent updates to the solution vector.
  358. (See [1]).
  359. yk : array_like, shape=(n_corr, n)
  360. Array of `n_corr` most recent updates to the gradient. (See [1]).
  361. References
  362. ----------
  363. .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
  364. storage." Mathematics of computation 35.151 (1980): 773-782.
  365. """
  366. def __init__(self, sk, yk):
  367. """Construct the operator."""
  368. if sk.shape != yk.shape or sk.ndim != 2:
  369. raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
  370. n_corrs, n = sk.shape
  371. super().__init__(dtype=np.float64, shape=(n, n))
  372. self.sk = sk
  373. self.yk = yk
  374. self.n_corrs = n_corrs
  375. self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
  376. def _matvec(self, x):
  377. """Efficient matrix-vector multiply with the BFGS matrices.
  378. This calculation is described in Section (4) of [1].
  379. Parameters
  380. ----------
  381. x : ndarray
  382. An array with shape (n,) or (n,1).
  383. Returns
  384. -------
  385. y : ndarray
  386. The matrix-vector product
  387. """
  388. s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
  389. q = np.array(x, dtype=self.dtype, copy=True)
  390. if q.ndim == 2 and q.shape[1] == 1:
  391. q = q.reshape(-1)
  392. alpha = np.empty(n_corrs)
  393. for i in range(n_corrs-1, -1, -1):
  394. alpha[i] = rho[i] * np.dot(s[i], q)
  395. q = q - alpha[i]*y[i]
  396. r = q
  397. for i in range(n_corrs):
  398. beta = rho[i] * np.dot(y[i], r)
  399. r = r + s[i] * (alpha[i] - beta)
  400. return r
  401. def todense(self):
  402. """Return a dense array representation of this operator.
  403. Returns
  404. -------
  405. arr : ndarray, shape=(n, n)
  406. An array with the same shape and containing
  407. the same data represented by this `LinearOperator`.
  408. """
  409. s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
  410. I = np.eye(*self.shape, dtype=self.dtype)
  411. Hk = I
  412. for i in range(n_corrs):
  413. A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
  414. A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
  415. Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
  416. s[i][np.newaxis, :])
  417. return Hk