_linprog.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. """
  2. A top-level linear programming interface.
  3. .. versionadded:: 0.15.0
  4. Functions
  5. ---------
  6. .. autosummary::
  7. :toctree: generated/
  8. linprog
  9. linprog_verbose_callback
  10. linprog_terse_callback
  11. """
  12. import numpy as np
  13. from ._optimize import OptimizeResult, OptimizeWarning
  14. from warnings import warn
  15. from ._linprog_highs import _linprog_highs
  16. from ._linprog_ip import _linprog_ip
  17. from ._linprog_simplex import _linprog_simplex
  18. from ._linprog_rs import _linprog_rs
  19. from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc,
  20. _linprog_rs_doc, _linprog_simplex_doc,
  21. _linprog_highs_ipm_doc, _linprog_highs_ds_doc)
  22. from ._linprog_util import (
  23. _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
  24. _postsolve, _check_result, _display_summary)
  25. from copy import deepcopy
  26. __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
  27. __docformat__ = "restructuredtext en"
  28. LINPROG_METHODS = ['simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm']
  29. def linprog_verbose_callback(res):
  30. """
  31. A sample callback function demonstrating the linprog callback interface.
  32. This callback produces detailed output to sys.stdout before each iteration
  33. and after the final iteration of the simplex algorithm.
  34. Parameters
  35. ----------
  36. res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
  37. x : 1-D array
  38. The independent variable vector which optimizes the linear
  39. programming problem.
  40. fun : float
  41. Value of the objective function.
  42. success : bool
  43. True if the algorithm succeeded in finding an optimal solution.
  44. slack : 1-D array
  45. The values of the slack variables. Each slack variable corresponds
  46. to an inequality constraint. If the slack is zero, then the
  47. corresponding constraint is active.
  48. con : 1-D array
  49. The (nominally zero) residuals of the equality constraints, that is,
  50. ``b - A_eq @ x``
  51. phase : int
  52. The phase of the optimization being executed. In phase 1 a basic
  53. feasible solution is sought and the T has an additional row
  54. representing an alternate objective function.
  55. status : int
  56. An integer representing the exit status of the optimization::
  57. 0 : Optimization terminated successfully
  58. 1 : Iteration limit reached
  59. 2 : Problem appears to be infeasible
  60. 3 : Problem appears to be unbounded
  61. 4 : Serious numerical difficulties encountered
  62. nit : int
  63. The number of iterations performed.
  64. message : str
  65. A string descriptor of the exit status of the optimization.
  66. """
  67. x = res['x']
  68. fun = res['fun']
  69. phase = res['phase']
  70. status = res['status']
  71. nit = res['nit']
  72. message = res['message']
  73. complete = res['complete']
  74. saved_printoptions = np.get_printoptions()
  75. np.set_printoptions(linewidth=500,
  76. formatter={'float': lambda x: "{0: 12.4f}".format(x)})
  77. if status:
  78. print('--------- Simplex Early Exit -------\n')
  79. print('The simplex method exited early with status {0:d}'.format(status))
  80. print(message)
  81. elif complete:
  82. print('--------- Simplex Complete --------\n')
  83. print('Iterations required: {}'.format(nit))
  84. else:
  85. print('--------- Iteration {0:d} ---------\n'.format(nit))
  86. if nit > 0:
  87. if phase == 1:
  88. print('Current Pseudo-Objective Value:')
  89. else:
  90. print('Current Objective Value:')
  91. print('f = ', fun)
  92. print()
  93. print('Current Solution Vector:')
  94. print('x = ', x)
  95. print()
  96. np.set_printoptions(**saved_printoptions)
  97. def linprog_terse_callback(res):
  98. """
  99. A sample callback function demonstrating the linprog callback interface.
  100. This callback produces brief output to sys.stdout before each iteration
  101. and after the final iteration of the simplex algorithm.
  102. Parameters
  103. ----------
  104. res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
  105. x : 1-D array
  106. The independent variable vector which optimizes the linear
  107. programming problem.
  108. fun : float
  109. Value of the objective function.
  110. success : bool
  111. True if the algorithm succeeded in finding an optimal solution.
  112. slack : 1-D array
  113. The values of the slack variables. Each slack variable corresponds
  114. to an inequality constraint. If the slack is zero, then the
  115. corresponding constraint is active.
  116. con : 1-D array
  117. The (nominally zero) residuals of the equality constraints, that is,
  118. ``b - A_eq @ x``.
  119. phase : int
  120. The phase of the optimization being executed. In phase 1 a basic
  121. feasible solution is sought and the T has an additional row
  122. representing an alternate objective function.
  123. status : int
  124. An integer representing the exit status of the optimization::
  125. 0 : Optimization terminated successfully
  126. 1 : Iteration limit reached
  127. 2 : Problem appears to be infeasible
  128. 3 : Problem appears to be unbounded
  129. 4 : Serious numerical difficulties encountered
  130. nit : int
  131. The number of iterations performed.
  132. message : str
  133. A string descriptor of the exit status of the optimization.
  134. """
  135. nit = res['nit']
  136. x = res['x']
  137. if nit == 0:
  138. print("Iter: X:")
  139. print("{0: <5d} ".format(nit), end="")
  140. print(x)
  141. def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
  142. bounds=None, method='highs', callback=None,
  143. options=None, x0=None, integrality=None):
  144. r"""
  145. Linear programming: minimize a linear objective function subject to linear
  146. equality and inequality constraints.
  147. Linear programming solves problems of the following form:
  148. .. math::
  149. \min_x \ & c^T x \\
  150. \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
  151. & A_{eq} x = b_{eq},\\
  152. & l \leq x \leq u ,
  153. where :math:`x` is a vector of decision variables; :math:`c`,
  154. :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
  155. :math:`A_{ub}` and :math:`A_{eq}` are matrices.
  156. Alternatively, that's:
  157. minimize::
  158. c @ x
  159. such that::
  160. A_ub @ x <= b_ub
  161. A_eq @ x == b_eq
  162. lb <= x <= ub
  163. Note that by default ``lb = 0`` and ``ub = None`` unless specified with
  164. ``bounds``.
  165. Parameters
  166. ----------
  167. c : 1-D array
  168. The coefficients of the linear objective function to be minimized.
  169. A_ub : 2-D array, optional
  170. The inequality constraint matrix. Each row of ``A_ub`` specifies the
  171. coefficients of a linear inequality constraint on ``x``.
  172. b_ub : 1-D array, optional
  173. The inequality constraint vector. Each element represents an
  174. upper bound on the corresponding value of ``A_ub @ x``.
  175. A_eq : 2-D array, optional
  176. The equality constraint matrix. Each row of ``A_eq`` specifies the
  177. coefficients of a linear equality constraint on ``x``.
  178. b_eq : 1-D array, optional
  179. The equality constraint vector. Each element of ``A_eq @ x`` must equal
  180. the corresponding element of ``b_eq``.
  181. bounds : sequence, optional
  182. A sequence of ``(min, max)`` pairs for each element in ``x``, defining
  183. the minimum and maximum values of that decision variable. Use ``None``
  184. to indicate that there is no bound. By default, bounds are
  185. ``(0, None)`` (all decision variables are non-negative).
  186. If a single tuple ``(min, max)`` is provided, then ``min`` and
  187. ``max`` will serve as bounds for all decision variables.
  188. method : str, optional
  189. The algorithm used to solve the standard form problem.
  190. :ref:`'highs' <optimize.linprog-highs>` (default),
  191. :ref:`'highs-ds' <optimize.linprog-highs-ds>`,
  192. :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
  193. :ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
  194. :ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
  195. and
  196. :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
  197. The legacy methods are deprecated and will be removed in SciPy 1.11.0.
  198. callback : callable, optional
  199. If a callback function is provided, it will be called at least once per
  200. iteration of the algorithm. The callback function must accept a single
  201. `scipy.optimize.OptimizeResult` consisting of the following fields:
  202. x : 1-D array
  203. The current solution vector.
  204. fun : float
  205. The current value of the objective function ``c @ x``.
  206. success : bool
  207. ``True`` when the algorithm has completed successfully.
  208. slack : 1-D array
  209. The (nominally positive) values of the slack,
  210. ``b_ub - A_ub @ x``.
  211. con : 1-D array
  212. The (nominally zero) residuals of the equality constraints,
  213. ``b_eq - A_eq @ x``.
  214. phase : int
  215. The phase of the algorithm being executed.
  216. status : int
  217. An integer representing the status of the algorithm.
  218. ``0`` : Optimization proceeding nominally.
  219. ``1`` : Iteration limit reached.
  220. ``2`` : Problem appears to be infeasible.
  221. ``3`` : Problem appears to be unbounded.
  222. ``4`` : Numerical difficulties encountered.
  223. nit : int
  224. The current iteration number.
  225. message : str
  226. A string descriptor of the algorithm status.
  227. Callback functions are not currently supported by the HiGHS methods.
  228. options : dict, optional
  229. A dictionary of solver options. All methods accept the following
  230. options:
  231. maxiter : int
  232. Maximum number of iterations to perform.
  233. Default: see method-specific documentation.
  234. disp : bool
  235. Set to ``True`` to print convergence messages.
  236. Default: ``False``.
  237. presolve : bool
  238. Set to ``False`` to disable automatic presolve.
  239. Default: ``True``.
  240. All methods except the HiGHS solvers also accept:
  241. tol : float
  242. A tolerance which determines when a residual is "close enough" to
  243. zero to be considered exactly zero.
  244. autoscale : bool
  245. Set to ``True`` to automatically perform equilibration.
  246. Consider using this option if the numerical values in the
  247. constraints are separated by several orders of magnitude.
  248. Default: ``False``.
  249. rr : bool
  250. Set to ``False`` to disable automatic redundancy removal.
  251. Default: ``True``.
  252. rr_method : string
  253. Method used to identify and remove redundant rows from the
  254. equality constraint matrix after presolve. For problems with
  255. dense input, the available methods for redundancy removal are:
  256. "SVD":
  257. Repeatedly performs singular value decomposition on
  258. the matrix, detecting redundant rows based on nonzeros
  259. in the left singular vectors that correspond with
  260. zero singular values. May be fast when the matrix is
  261. nearly full rank.
  262. "pivot":
  263. Uses the algorithm presented in [5]_ to identify
  264. redundant rows.
  265. "ID":
  266. Uses a randomized interpolative decomposition.
  267. Identifies columns of the matrix transpose not used in
  268. a full-rank interpolative decomposition of the matrix.
  269. None:
  270. Uses "svd" if the matrix is nearly full rank, that is,
  271. the difference between the matrix rank and the number
  272. of rows is less than five. If not, uses "pivot". The
  273. behavior of this default is subject to change without
  274. prior notice.
  275. Default: None.
  276. For problems with sparse input, this option is ignored, and the
  277. pivot-based algorithm presented in [5]_ is used.
  278. For method-specific options, see
  279. :func:`show_options('linprog') <show_options>`.
  280. x0 : 1-D array, optional
  281. Guess values of the decision variables, which will be refined by
  282. the optimization algorithm. This argument is currently used only by the
  283. 'revised simplex' method, and can only be used if `x0` represents a
  284. basic feasible solution.
  285. integrality : 1-D array or int, optional
  286. Indicates the type of integrality constraint on each decision variable.
  287. ``0`` : Continuous variable; no integrality constraint.
  288. ``1`` : Integer variable; decision variable must be an integer
  289. within `bounds`.
  290. ``2`` : Semi-continuous variable; decision variable must be within
  291. `bounds` or take value ``0``.
  292. ``3`` : Semi-integer variable; decision variable must be an integer
  293. within `bounds` or take value ``0``.
  294. By default, all variables are continuous.
  295. For mixed integrality constraints, supply an array of shape `c.shape`.
  296. To infer a constraint on each decision variable from shorter inputs,
  297. the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
  298. This argument is currently used only by the ``'highs'`` method and
  299. ignored otherwise.
  300. Returns
  301. -------
  302. res : OptimizeResult
  303. A :class:`scipy.optimize.OptimizeResult` consisting of the fields
  304. below. Note that the return types of the fields may depend on whether
  305. the optimization was successful, therefore it is recommended to check
  306. `OptimizeResult.status` before relying on the other fields:
  307. x : 1-D array
  308. The values of the decision variables that minimizes the
  309. objective function while satisfying the constraints.
  310. fun : float
  311. The optimal value of the objective function ``c @ x``.
  312. slack : 1-D array
  313. The (nominally positive) values of the slack variables,
  314. ``b_ub - A_ub @ x``.
  315. con : 1-D array
  316. The (nominally zero) residuals of the equality constraints,
  317. ``b_eq - A_eq @ x``.
  318. success : bool
  319. ``True`` when the algorithm succeeds in finding an optimal
  320. solution.
  321. status : int
  322. An integer representing the exit status of the algorithm.
  323. ``0`` : Optimization terminated successfully.
  324. ``1`` : Iteration limit reached.
  325. ``2`` : Problem appears to be infeasible.
  326. ``3`` : Problem appears to be unbounded.
  327. ``4`` : Numerical difficulties encountered.
  328. nit : int
  329. The total number of iterations performed in all phases.
  330. message : str
  331. A string descriptor of the exit status of the algorithm.
  332. See Also
  333. --------
  334. show_options : Additional options accepted by the solvers.
  335. Notes
  336. -----
  337. This section describes the available solvers that can be selected by the
  338. 'method' parameter.
  339. `'highs-ds'` and
  340. `'highs-ipm'` are interfaces to the
  341. HiGHS simplex and interior-point method solvers [13]_, respectively.
  342. `'highs'` (default) chooses between
  343. the two automatically. These are the fastest linear
  344. programming solvers in SciPy, especially for large, sparse problems;
  345. which of these two is faster is problem-dependent.
  346. The other solvers (`'interior-point'`, `'revised simplex'`, and
  347. `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
  348. Method *highs-ds* is a wrapper of the C++ high performance dual
  349. revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
  350. is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
  351. **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
  352. as a simplex solver. Method *highs* chooses between the two automatically.
  353. For new code involving `linprog`, we recommend explicitly choosing one of
  354. these three method values.
  355. .. versionadded:: 1.6.0
  356. Method *interior-point* uses the primal-dual path following algorithm
  357. as outlined in [4]_. This algorithm supports sparse constraint matrices and
  358. is typically faster than the simplex methods, especially for large, sparse
  359. problems. Note, however, that the solution returned may be slightly less
  360. accurate than those of the simplex methods and will not, in general,
  361. correspond with a vertex of the polytope defined by the constraints.
  362. .. versionadded:: 1.0.0
  363. Method *revised simplex* uses the revised simplex method as described in
  364. [9]_, except that a factorization [11]_ of the basis matrix, rather than
  365. its inverse, is efficiently maintained and used to solve the linear systems
  366. at each iteration of the algorithm.
  367. .. versionadded:: 1.3.0
  368. Method *simplex* uses a traditional, full-tableau implementation of
  369. Dantzig's simplex algorithm [1]_, [2]_ (*not* the
  370. Nelder-Mead simplex). This algorithm is included for backwards
  371. compatibility and educational purposes.
  372. .. versionadded:: 0.15.0
  373. Before applying *interior-point*, *revised simplex*, or *simplex*,
  374. a presolve procedure based on [8]_ attempts
  375. to identify trivial infeasibilities, trivial unboundedness, and potential
  376. problem simplifications. Specifically, it checks for:
  377. - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
  378. - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
  379. variables;
  380. - column singletons in ``A_eq``, representing fixed variables; and
  381. - column singletons in ``A_ub``, representing simple bounds.
  382. If presolve reveals that the problem is unbounded (e.g. an unconstrained
  383. and unbounded variable has negative cost) or infeasible (e.g., a row of
  384. zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
  385. terminates with the appropriate status code. Note that presolve terminates
  386. as soon as any sign of unboundedness is detected; consequently, a problem
  387. may be reported as unbounded when in reality the problem is infeasible
  388. (but infeasibility has not been detected yet). Therefore, if it is
  389. important to know whether the problem is actually infeasible, solve the
  390. problem again with option ``presolve=False``.
  391. If neither infeasibility nor unboundedness are detected in a single pass
  392. of the presolve, bounds are tightened where possible and fixed
  393. variables are removed from the problem. Then, linearly dependent rows
  394. of the ``A_eq`` matrix are removed, (unless they represent an
  395. infeasibility) to avoid numerical difficulties in the primary solve
  396. routine. Note that rows that are nearly linearly dependent (within a
  397. prescribed tolerance) may also be removed, which can change the optimal
  398. solution in rare cases. If this is a concern, eliminate redundancy from
  399. your problem formulation and run with option ``rr=False`` or
  400. ``presolve=False``.
  401. Several potential improvements can be made here: additional presolve
  402. checks outlined in [8]_ should be implemented, the presolve routine should
  403. be run multiple times (until no further simplifications can be made), and
  404. more of the efficiency improvements from [5]_ should be implemented in the
  405. redundancy removal routines.
  406. After presolve, the problem is transformed to standard form by converting
  407. the (tightened) simple bounds to upper bound constraints, introducing
  408. non-negative slack variables for inequality constraints, and expressing
  409. unbounded variables as the difference between two non-negative variables.
  410. Optionally, the problem is automatically scaled via equilibration [12]_.
  411. The selected algorithm solves the standard form problem, and a
  412. postprocessing routine converts the result to a solution to the original
  413. problem.
  414. References
  415. ----------
  416. .. [1] Dantzig, George B., Linear programming and extensions. Rand
  417. Corporation Research Study Princeton Univ. Press, Princeton, NJ,
  418. 1963
  419. .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
  420. Mathematical Programming", McGraw-Hill, Chapter 4.
  421. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
  422. Mathematics of Operations Research (2), 1977: pp. 103-107.
  423. .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
  424. optimizer for linear programming: an implementation of the
  425. homogeneous algorithm." High performance optimization. Springer US,
  426. 2000. 197-232.
  427. .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
  428. large-scale linear programming." Optimization Methods and Software
  429. 6.3 (1995): 219-227.
  430. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
  431. Programming based on Newton's Method." Unpublished Course Notes,
  432. March 2004. Available 2/25/2017 at
  433. https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
  434. .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
  435. Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
  436. http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
  437. .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
  438. programming." Mathematical Programming 71.2 (1995): 221-245.
  439. .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
  440. programming." Athena Scientific 1 (1997): 997.
  441. .. [10] Andersen, Erling D., et al. Implementation of interior point
  442. methods for large scale linear programming. HEC/Universite de
  443. Geneve, 1996.
  444. .. [11] Bartels, Richard H. "A stabilization of the simplex method."
  445. Journal in Numerische Mathematik 16.5 (1971): 414-434.
  446. .. [12] Tomlin, J. A. "On scaling linear programming problems."
  447. Mathematical Programming Study 4 (1975): 146-166.
  448. .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
  449. "HiGHS - high performance software for linear optimization."
  450. https://highs.dev/
  451. .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
  452. simplex method." Mathematical Programming Computation, 10 (1),
  453. 119-142, 2018. DOI: 10.1007/s12532-017-0130-5
  454. Examples
  455. --------
  456. Consider the following problem:
  457. .. math::
  458. \min_{x_0, x_1} \ -x_0 + 4x_1 & \\
  459. \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
  460. -x_0 - 2x_1 & \geq -4,\\
  461. x_1 & \geq -3.
  462. The problem is not presented in the form accepted by `linprog`. This is
  463. easily remedied by converting the "greater than" inequality
  464. constraint to a "less than" inequality constraint by
  465. multiplying both sides by a factor of :math:`-1`. Note also that the last
  466. constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
  467. Finally, since there are no bounds on :math:`x_0`, we must explicitly
  468. specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
  469. default is for variables to be non-negative. After collecting coeffecients
  470. into arrays and tuples, the input for this problem is:
  471. >>> from scipy.optimize import linprog
  472. >>> c = [-1, 4]
  473. >>> A = [[-3, 1], [1, 2]]
  474. >>> b = [6, 4]
  475. >>> x0_bounds = (None, None)
  476. >>> x1_bounds = (-3, None)
  477. >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
  478. >>> res.fun
  479. -22.0
  480. >>> res.x
  481. array([10., -3.])
  482. >>> res.message
  483. 'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
  484. The marginals (AKA dual values / shadow prices / Lagrange multipliers)
  485. and residuals (slacks) are also available.
  486. >>> res.ineqlin
  487. residual: [ 3.900e+01 0.000e+00]
  488. marginals: [-0.000e+00 -1.000e+00]
  489. For example, because the marginal associated with the second inequality
  490. constraint is -1, we expect the optimal value of the objective function
  491. to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
  492. side of the second inequality constraint:
  493. >>> eps = 0.05
  494. >>> b[1] += eps
  495. >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
  496. -22.05
  497. Also, because the residual on the first inequality constraint is 39, we
  498. can decrease the right hand side of the first constraint by 39 without
  499. affecting the optimal solution.
  500. >>> b = [6, 4] # reset to original values
  501. >>> b[0] -= 39
  502. >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
  503. -22.0
  504. """
  505. meth = method.lower()
  506. methods = {"highs", "highs-ds", "highs-ipm",
  507. "simplex", "revised simplex", "interior-point"}
  508. if meth not in methods:
  509. raise ValueError(f"Unknown solver '{method}'")
  510. if x0 is not None and meth != "revised simplex":
  511. warning_message = "x0 is used only when method is 'revised simplex'. "
  512. warn(warning_message, OptimizeWarning)
  513. if np.any(integrality) and not meth == "highs":
  514. integrality = None
  515. warning_message = ("Only `method='highs'` supports integer "
  516. "constraints. Ignoring `integrality`.")
  517. warn(warning_message, OptimizeWarning)
  518. elif np.any(integrality):
  519. integrality = np.broadcast_to(integrality, np.shape(c))
  520. lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
  521. lp, solver_options = _parse_linprog(lp, options, meth)
  522. tol = solver_options.get('tol', 1e-9)
  523. # Give unmodified problem to HiGHS
  524. if meth.startswith('highs'):
  525. if callback is not None:
  526. raise NotImplementedError("HiGHS solvers do not support the "
  527. "callback interface.")
  528. highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
  529. 'highs': None}
  530. sol = _linprog_highs(lp, solver=highs_solvers[meth],
  531. **solver_options)
  532. sol['status'], sol['message'] = (
  533. _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
  534. sol['con'], lp.bounds, tol, sol['message']))
  535. sol['success'] = sol['status'] == 0
  536. return OptimizeResult(sol)
  537. warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
  538. "1.11.0. Please use one of the HiGHS solvers (e.g. "
  539. "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
  540. iteration = 0
  541. complete = False # will become True if solved in presolve
  542. undo = []
  543. # Keep the original arrays to calculate slack/residuals for original
  544. # problem.
  545. lp_o = deepcopy(lp)
  546. # Solve trivial problem, eliminate variables, tighten bounds, etc.
  547. rr_method = solver_options.pop('rr_method', None) # need to pop these;
  548. rr = solver_options.pop('rr', True) # they're not passed to methods
  549. c0 = 0 # we might get a constant term in the objective
  550. if solver_options.pop('presolve', True):
  551. (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
  552. rr_method,
  553. tol)
  554. C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
  555. postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
  556. if not complete:
  557. A, b, c, c0, x0 = _get_Abc(lp, c0)
  558. if solver_options.pop('autoscale', False):
  559. A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
  560. postsolve_args = postsolve_args[:-2] + (C, b_scale)
  561. if meth == 'simplex':
  562. x, status, message, iteration = _linprog_simplex(
  563. c, c0=c0, A=A, b=b, callback=callback,
  564. postsolve_args=postsolve_args, **solver_options)
  565. elif meth == 'interior-point':
  566. x, status, message, iteration = _linprog_ip(
  567. c, c0=c0, A=A, b=b, callback=callback,
  568. postsolve_args=postsolve_args, **solver_options)
  569. elif meth == 'revised simplex':
  570. x, status, message, iteration = _linprog_rs(
  571. c, c0=c0, A=A, b=b, x0=x0, callback=callback,
  572. postsolve_args=postsolve_args, **solver_options)
  573. # Eliminate artificial variables, re-introduce presolved variables, etc.
  574. disp = solver_options.get('disp', False)
  575. x, fun, slack, con = _postsolve(x, postsolve_args, complete)
  576. status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
  577. if disp:
  578. _display_summary(message, status, fun, iteration)
  579. sol = {
  580. 'x': x,
  581. 'fun': fun,
  582. 'slack': slack,
  583. 'con': con,
  584. 'status': status,
  585. 'message': message,
  586. 'nit': iteration,
  587. 'success': status == 0}
  588. return OptimizeResult(sol)