dogbox.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. """
  2. Dogleg algorithm with rectangular trust regions for least-squares minimization.
  3. The description of the algorithm can be found in [Voglis]_. The algorithm does
  4. trust-region iterations, but the shape of trust regions is rectangular as
  5. opposed to conventional elliptical. The intersection of a trust region and
  6. an initial feasible region is again some rectangle. Thus, on each iteration a
  7. bound-constrained quadratic optimization problem is solved.
  8. A quadratic problem is solved by well-known dogleg approach, where the
  9. function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
  10. Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
  11. along this path, and optimization amounts to simply following along this
  12. path as long as a point stays within the bounds. A constrained Cauchy step
  13. (along the anti-gradient) is considered for safety in rank deficient cases,
  14. in this situations the convergence might be slow.
  15. If during iterations some variable hit the initial bound and the component
  16. of anti-gradient points outside the feasible region, then a next dogleg step
  17. won't make any progress. At this state such variables satisfy first-order
  18. optimality conditions and they are excluded before computing a next dogleg
  19. step.
  20. Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
  21. Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
  22. dense and sparse matrices, or Jacobian being LinearOperator). The second
  23. option allows to solve very large problems (up to couple of millions of
  24. residuals on a regular PC), provided the Jacobian matrix is sufficiently
  25. sparse. But note that dogbox is not very good for solving problems with
  26. large number of constraints, because of variables exclusion-inclusion on each
  27. iteration (a required number of function evaluations might be high or accuracy
  28. of a solution will be poor), thus its large-scale usage is probably limited
  29. to unconstrained problems.
  30. References
  31. ----------
  32. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
  33. Approach for Unconstrained and Bound Constrained Nonlinear
  34. Optimization", WSEAS International Conference on Applied
  35. Mathematics, Corfu, Greece, 2004.
  36. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
  37. """
  38. import numpy as np
  39. from numpy.linalg import lstsq, norm
  40. from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
  41. from scipy.optimize import OptimizeResult
  42. from .common import (
  43. step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
  44. build_quadratic_1d, minimize_quadratic_1d, compute_grad,
  45. compute_jac_scale, check_termination, scale_for_robust_loss_function,
  46. print_header_nonlinear, print_iteration_nonlinear)
  47. def lsmr_operator(Jop, d, active_set):
  48. """Compute LinearOperator to use in LSMR by dogbox algorithm.
  49. `active_set` mask is used to excluded active variables from computations
  50. of matrix-vector products.
  51. """
  52. m, n = Jop.shape
  53. def matvec(x):
  54. x_free = x.ravel().copy()
  55. x_free[active_set] = 0
  56. return Jop.matvec(x * d)
  57. def rmatvec(x):
  58. r = d * Jop.rmatvec(x)
  59. r[active_set] = 0
  60. return r
  61. return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
  62. def find_intersection(x, tr_bounds, lb, ub):
  63. """Find intersection of trust-region bounds and initial bounds.
  64. Returns
  65. -------
  66. lb_total, ub_total : ndarray with shape of x
  67. Lower and upper bounds of the intersection region.
  68. orig_l, orig_u : ndarray of bool with shape of x
  69. True means that an original bound is taken as a corresponding bound
  70. in the intersection region.
  71. tr_l, tr_u : ndarray of bool with shape of x
  72. True means that a trust-region bound is taken as a corresponding bound
  73. in the intersection region.
  74. """
  75. lb_centered = lb - x
  76. ub_centered = ub - x
  77. lb_total = np.maximum(lb_centered, -tr_bounds)
  78. ub_total = np.minimum(ub_centered, tr_bounds)
  79. orig_l = np.equal(lb_total, lb_centered)
  80. orig_u = np.equal(ub_total, ub_centered)
  81. tr_l = np.equal(lb_total, -tr_bounds)
  82. tr_u = np.equal(ub_total, tr_bounds)
  83. return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
  84. def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
  85. """Find dogleg step in a rectangular region.
  86. Returns
  87. -------
  88. step : ndarray, shape (n,)
  89. Computed dogleg step.
  90. bound_hits : ndarray of int, shape (n,)
  91. Each component shows whether a corresponding variable hits the
  92. initial bound after the step is taken:
  93. * 0 - a variable doesn't hit the bound.
  94. * -1 - lower bound is hit.
  95. * 1 - upper bound is hit.
  96. tr_hit : bool
  97. Whether the step hit the boundary of the trust-region.
  98. """
  99. lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
  100. x, tr_bounds, lb, ub
  101. )
  102. bound_hits = np.zeros_like(x, dtype=int)
  103. if in_bounds(newton_step, lb_total, ub_total):
  104. return newton_step, bound_hits, False
  105. to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
  106. # The classical dogleg algorithm would check if Cauchy step fits into
  107. # the bounds, and just return it constrained version if not. But in a
  108. # rectangular trust region it makes sense to try to improve constrained
  109. # Cauchy step too. Thus, we don't distinguish these two cases.
  110. cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
  111. step_diff = newton_step - cauchy_step
  112. step_size, hits = step_size_to_bound(cauchy_step, step_diff,
  113. lb_total, ub_total)
  114. bound_hits[(hits < 0) & orig_l] = -1
  115. bound_hits[(hits > 0) & orig_u] = 1
  116. tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
  117. return cauchy_step + step_size * step_diff, bound_hits, tr_hit
  118. def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
  119. loss_function, tr_solver, tr_options, verbose):
  120. f = f0
  121. f_true = f.copy()
  122. nfev = 1
  123. J = J0
  124. njev = 1
  125. if loss_function is not None:
  126. rho = loss_function(f)
  127. cost = 0.5 * np.sum(rho[0])
  128. J, f = scale_for_robust_loss_function(J, f, rho)
  129. else:
  130. cost = 0.5 * np.dot(f, f)
  131. g = compute_grad(J, f)
  132. jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
  133. if jac_scale:
  134. scale, scale_inv = compute_jac_scale(J)
  135. else:
  136. scale, scale_inv = x_scale, 1 / x_scale
  137. Delta = norm(x0 * scale_inv, ord=np.inf)
  138. if Delta == 0:
  139. Delta = 1.0
  140. on_bound = np.zeros_like(x0, dtype=int)
  141. on_bound[np.equal(x0, lb)] = -1
  142. on_bound[np.equal(x0, ub)] = 1
  143. x = x0
  144. step = np.empty_like(x0)
  145. if max_nfev is None:
  146. max_nfev = x0.size * 100
  147. termination_status = None
  148. iteration = 0
  149. step_norm = None
  150. actual_reduction = None
  151. if verbose == 2:
  152. print_header_nonlinear()
  153. while True:
  154. active_set = on_bound * g < 0
  155. free_set = ~active_set
  156. g_free = g[free_set]
  157. g_full = g.copy()
  158. g[active_set] = 0
  159. g_norm = norm(g, ord=np.inf)
  160. if g_norm < gtol:
  161. termination_status = 1
  162. if verbose == 2:
  163. print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
  164. step_norm, g_norm)
  165. if termination_status is not None or nfev == max_nfev:
  166. break
  167. x_free = x[free_set]
  168. lb_free = lb[free_set]
  169. ub_free = ub[free_set]
  170. scale_free = scale[free_set]
  171. # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
  172. if tr_solver == 'exact':
  173. J_free = J[:, free_set]
  174. newton_step = lstsq(J_free, -f, rcond=-1)[0]
  175. # Coefficients for the quadratic model along the anti-gradient.
  176. a, b = build_quadratic_1d(J_free, g_free, -g_free)
  177. elif tr_solver == 'lsmr':
  178. Jop = aslinearoperator(J)
  179. # We compute lsmr step in scaled variables and then
  180. # transform back to normal variables, if lsmr would give exact lsq
  181. # solution, this would be equivalent to not doing any
  182. # transformations, but from experience it's better this way.
  183. # We pass active_set to make computations as if we selected
  184. # the free subset of J columns, but without actually doing any
  185. # slicing, which is expensive for sparse matrices and impossible
  186. # for LinearOperator.
  187. lsmr_op = lsmr_operator(Jop, scale, active_set)
  188. newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
  189. newton_step *= scale_free
  190. # Components of g for active variables were zeroed, so this call
  191. # is correct and equivalent to using J_free and g_free.
  192. a, b = build_quadratic_1d(Jop, g, -g)
  193. actual_reduction = -1.0
  194. while actual_reduction <= 0 and nfev < max_nfev:
  195. tr_bounds = Delta * scale_free
  196. step_free, on_bound_free, tr_hit = dogleg_step(
  197. x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
  198. step.fill(0.0)
  199. step[free_set] = step_free
  200. if tr_solver == 'exact':
  201. predicted_reduction = -evaluate_quadratic(J_free, g_free,
  202. step_free)
  203. elif tr_solver == 'lsmr':
  204. predicted_reduction = -evaluate_quadratic(Jop, g, step)
  205. # gh11403 ensure that solution is fully within bounds.
  206. x_new = np.clip(x + step, lb, ub)
  207. f_new = fun(x_new)
  208. nfev += 1
  209. step_h_norm = norm(step * scale_inv, ord=np.inf)
  210. if not np.all(np.isfinite(f_new)):
  211. Delta = 0.25 * step_h_norm
  212. continue
  213. # Usual trust-region step quality estimation.
  214. if loss_function is not None:
  215. cost_new = loss_function(f_new, cost_only=True)
  216. else:
  217. cost_new = 0.5 * np.dot(f_new, f_new)
  218. actual_reduction = cost - cost_new
  219. Delta, ratio = update_tr_radius(
  220. Delta, actual_reduction, predicted_reduction,
  221. step_h_norm, tr_hit
  222. )
  223. step_norm = norm(step)
  224. termination_status = check_termination(
  225. actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
  226. if termination_status is not None:
  227. break
  228. if actual_reduction > 0:
  229. on_bound[free_set] = on_bound_free
  230. x = x_new
  231. # Set variables exactly at the boundary.
  232. mask = on_bound == -1
  233. x[mask] = lb[mask]
  234. mask = on_bound == 1
  235. x[mask] = ub[mask]
  236. f = f_new
  237. f_true = f.copy()
  238. cost = cost_new
  239. J = jac(x, f)
  240. njev += 1
  241. if loss_function is not None:
  242. rho = loss_function(f)
  243. J, f = scale_for_robust_loss_function(J, f, rho)
  244. g = compute_grad(J, f)
  245. if jac_scale:
  246. scale, scale_inv = compute_jac_scale(J, scale_inv)
  247. else:
  248. step_norm = 0
  249. actual_reduction = 0
  250. iteration += 1
  251. if termination_status is None:
  252. termination_status = 0
  253. return OptimizeResult(
  254. x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
  255. active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)