eigen.py 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. from types import FunctionType
  2. from collections import Counter
  3. from mpmath import mp, workprec
  4. from mpmath.libmp.libmpf import prec_to_dps
  5. from sympy.core.sorting import default_sort_key
  6. from sympy.core.evalf import DEFAULT_MAXPREC, PrecisionExhausted
  7. from sympy.core.logic import fuzzy_and, fuzzy_or
  8. from sympy.core.numbers import Float
  9. from sympy.core.sympify import _sympify
  10. from sympy.functions.elementary.miscellaneous import sqrt
  11. from sympy.polys import roots, CRootOf, ZZ, QQ, EX
  12. from sympy.polys.matrices import DomainMatrix
  13. from sympy.polys.matrices.eigen import dom_eigenvects, dom_eigenvects_to_sympy
  14. from sympy.polys.polytools import gcd
  15. from .common import MatrixError, NonSquareMatrixError
  16. from .determinant import _find_reasonable_pivot
  17. from .utilities import _iszero, _simplify
  18. def _eigenvals_eigenvects_mpmath(M):
  19. norm2 = lambda v: mp.sqrt(sum(i**2 for i in v))
  20. v1 = None
  21. prec = max([x._prec for x in M.atoms(Float)])
  22. eps = 2**-prec
  23. while prec < DEFAULT_MAXPREC:
  24. with workprec(prec):
  25. A = mp.matrix(M.evalf(n=prec_to_dps(prec)))
  26. E, ER = mp.eig(A)
  27. v2 = norm2([i for e in E for i in (mp.re(e), mp.im(e))])
  28. if v1 is not None and mp.fabs(v1 - v2) < eps:
  29. return E, ER
  30. v1 = v2
  31. prec *= 2
  32. # we get here because the next step would have taken us
  33. # past MAXPREC or because we never took a step; in case
  34. # of the latter, we refuse to send back a solution since
  35. # it would not have been verified; we also resist taking
  36. # a small step to arrive exactly at MAXPREC since then
  37. # the two calculations might be artificially close.
  38. raise PrecisionExhausted
  39. def _eigenvals_mpmath(M, multiple=False):
  40. """Compute eigenvalues using mpmath"""
  41. E, _ = _eigenvals_eigenvects_mpmath(M)
  42. result = [_sympify(x) for x in E]
  43. if multiple:
  44. return result
  45. return dict(Counter(result))
  46. def _eigenvects_mpmath(M):
  47. E, ER = _eigenvals_eigenvects_mpmath(M)
  48. result = []
  49. for i in range(M.rows):
  50. eigenval = _sympify(E[i])
  51. eigenvect = _sympify(ER[:, i])
  52. result.append((eigenval, 1, [eigenvect]))
  53. return result
  54. # This function is a candidate for caching if it gets implemented for matrices.
  55. def _eigenvals(
  56. M, error_when_incomplete=True, *, simplify=False, multiple=False,
  57. rational=False, **flags):
  58. r"""Compute eigenvalues of the matrix.
  59. Parameters
  60. ==========
  61. error_when_incomplete : bool, optional
  62. If it is set to ``True``, it will raise an error if not all
  63. eigenvalues are computed. This is caused by ``roots`` not returning
  64. a full list of eigenvalues.
  65. simplify : bool or function, optional
  66. If it is set to ``True``, it attempts to return the most
  67. simplified form of expressions returned by applying default
  68. simplification method in every routine.
  69. If it is set to ``False``, it will skip simplification in this
  70. particular routine to save computation resources.
  71. If a function is passed to, it will attempt to apply
  72. the particular function as simplification method.
  73. rational : bool, optional
  74. If it is set to ``True``, every floating point numbers would be
  75. replaced with rationals before computation. It can solve some
  76. issues of ``roots`` routine not working well with floats.
  77. multiple : bool, optional
  78. If it is set to ``True``, the result will be in the form of a
  79. list.
  80. If it is set to ``False``, the result will be in the form of a
  81. dictionary.
  82. Returns
  83. =======
  84. eigs : list or dict
  85. Eigenvalues of a matrix. The return format would be specified by
  86. the key ``multiple``.
  87. Raises
  88. ======
  89. MatrixError
  90. If not enough roots had got computed.
  91. NonSquareMatrixError
  92. If attempted to compute eigenvalues from a non-square matrix.
  93. Examples
  94. ========
  95. >>> from sympy import Matrix
  96. >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
  97. >>> M.eigenvals()
  98. {-1: 1, 0: 1, 2: 1}
  99. See Also
  100. ========
  101. MatrixDeterminant.charpoly
  102. eigenvects
  103. Notes
  104. =====
  105. Eigenvalues of a matrix $A$ can be computed by solving a matrix
  106. equation $\det(A - \lambda I) = 0$
  107. It's not always possible to return radical solutions for
  108. eigenvalues for matrices larger than $4, 4$ shape due to
  109. Abel-Ruffini theorem.
  110. If there is no radical solution is found for the eigenvalue,
  111. it may return eigenvalues in the form of
  112. :class:`sympy.polys.rootoftools.ComplexRootOf`.
  113. """
  114. if not M:
  115. if multiple:
  116. return []
  117. return {}
  118. if not M.is_square:
  119. raise NonSquareMatrixError("{} must be a square matrix.".format(M))
  120. if M._rep.domain not in (ZZ, QQ):
  121. # Skip this check for ZZ/QQ because it can be slow
  122. if all(x.is_number for x in M) and M.has(Float):
  123. return _eigenvals_mpmath(M, multiple=multiple)
  124. if rational:
  125. from sympy.simplify import nsimplify
  126. M = M.applyfunc(
  127. lambda x: nsimplify(x, rational=True) if x.has(Float) else x)
  128. if multiple:
  129. return _eigenvals_list(
  130. M, error_when_incomplete=error_when_incomplete, simplify=simplify,
  131. **flags)
  132. return _eigenvals_dict(
  133. M, error_when_incomplete=error_when_incomplete, simplify=simplify,
  134. **flags)
  135. eigenvals_error_message = \
  136. "It is not always possible to express the eigenvalues of a matrix " + \
  137. "of size 5x5 or higher in radicals. " + \
  138. "We have CRootOf, but domains other than the rationals are not " + \
  139. "currently supported. " + \
  140. "If there are no symbols in the matrix, " + \
  141. "it should still be possible to compute numeric approximations " + \
  142. "of the eigenvalues using " + \
  143. "M.evalf().eigenvals() or M.charpoly().nroots()."
  144. def _eigenvals_list(
  145. M, error_when_incomplete=True, simplify=False, **flags):
  146. iblocks = M.strongly_connected_components()
  147. all_eigs = []
  148. is_dom = M._rep.domain in (ZZ, QQ)
  149. for b in iblocks:
  150. # Fast path for a 1x1 block:
  151. if is_dom and len(b) == 1:
  152. index = b[0]
  153. val = M[index, index]
  154. all_eigs.append(val)
  155. continue
  156. block = M[b, b]
  157. if isinstance(simplify, FunctionType):
  158. charpoly = block.charpoly(simplify=simplify)
  159. else:
  160. charpoly = block.charpoly()
  161. eigs = roots(charpoly, multiple=True, **flags)
  162. if len(eigs) != block.rows:
  163. degree = int(charpoly.degree())
  164. f = charpoly.as_expr()
  165. x = charpoly.gen
  166. try:
  167. eigs = [CRootOf(f, x, idx) for idx in range(degree)]
  168. except NotImplementedError:
  169. if error_when_incomplete:
  170. raise MatrixError(eigenvals_error_message)
  171. else:
  172. eigs = []
  173. all_eigs += eigs
  174. if not simplify:
  175. return all_eigs
  176. if not isinstance(simplify, FunctionType):
  177. simplify = _simplify
  178. return [simplify(value) for value in all_eigs]
  179. def _eigenvals_dict(
  180. M, error_when_incomplete=True, simplify=False, **flags):
  181. iblocks = M.strongly_connected_components()
  182. all_eigs = {}
  183. is_dom = M._rep.domain in (ZZ, QQ)
  184. for b in iblocks:
  185. # Fast path for a 1x1 block:
  186. if is_dom and len(b) == 1:
  187. index = b[0]
  188. val = M[index, index]
  189. all_eigs[val] = all_eigs.get(val, 0) + 1
  190. continue
  191. block = M[b, b]
  192. if isinstance(simplify, FunctionType):
  193. charpoly = block.charpoly(simplify=simplify)
  194. else:
  195. charpoly = block.charpoly()
  196. eigs = roots(charpoly, multiple=False, **flags)
  197. if sum(eigs.values()) != block.rows:
  198. degree = int(charpoly.degree())
  199. f = charpoly.as_expr()
  200. x = charpoly.gen
  201. try:
  202. eigs = {CRootOf(f, x, idx): 1 for idx in range(degree)}
  203. except NotImplementedError:
  204. if error_when_incomplete:
  205. raise MatrixError(eigenvals_error_message)
  206. else:
  207. eigs = {}
  208. for k, v in eigs.items():
  209. if k in all_eigs:
  210. all_eigs[k] += v
  211. else:
  212. all_eigs[k] = v
  213. if not simplify:
  214. return all_eigs
  215. if not isinstance(simplify, FunctionType):
  216. simplify = _simplify
  217. return {simplify(key): value for key, value in all_eigs.items()}
  218. def _eigenspace(M, eigenval, iszerofunc=_iszero, simplify=False):
  219. """Get a basis for the eigenspace for a particular eigenvalue"""
  220. m = M - M.eye(M.rows) * eigenval
  221. ret = m.nullspace(iszerofunc=iszerofunc)
  222. # The nullspace for a real eigenvalue should be non-trivial.
  223. # If we didn't find an eigenvector, try once more a little harder
  224. if len(ret) == 0 and simplify:
  225. ret = m.nullspace(iszerofunc=iszerofunc, simplify=True)
  226. if len(ret) == 0:
  227. raise NotImplementedError(
  228. "Can't evaluate eigenvector for eigenvalue {}".format(eigenval))
  229. return ret
  230. def _eigenvects_DOM(M, **kwargs):
  231. DOM = DomainMatrix.from_Matrix(M, field=True, extension=True)
  232. DOM = DOM.to_dense()
  233. if DOM.domain != EX:
  234. rational, algebraic = dom_eigenvects(DOM)
  235. eigenvects = dom_eigenvects_to_sympy(
  236. rational, algebraic, M.__class__, **kwargs)
  237. eigenvects = sorted(eigenvects, key=lambda x: default_sort_key(x[0]))
  238. return eigenvects
  239. return None
  240. def _eigenvects_sympy(M, iszerofunc, simplify=True, **flags):
  241. eigenvals = M.eigenvals(rational=False, **flags)
  242. # Make sure that we have all roots in radical form
  243. for x in eigenvals:
  244. if x.has(CRootOf):
  245. raise MatrixError(
  246. "Eigenvector computation is not implemented if the matrix have "
  247. "eigenvalues in CRootOf form")
  248. eigenvals = sorted(eigenvals.items(), key=default_sort_key)
  249. ret = []
  250. for val, mult in eigenvals:
  251. vects = _eigenspace(M, val, iszerofunc=iszerofunc, simplify=simplify)
  252. ret.append((val, mult, vects))
  253. return ret
  254. # This functions is a candidate for caching if it gets implemented for matrices.
  255. def _eigenvects(M, error_when_incomplete=True, iszerofunc=_iszero, *, chop=False, **flags):
  256. """Compute eigenvectors of the matrix.
  257. Parameters
  258. ==========
  259. error_when_incomplete : bool, optional
  260. Raise an error when not all eigenvalues are computed. This is
  261. caused by ``roots`` not returning a full list of eigenvalues.
  262. iszerofunc : function, optional
  263. Specifies a zero testing function to be used in ``rref``.
  264. Default value is ``_iszero``, which uses SymPy's naive and fast
  265. default assumption handler.
  266. It can also accept any user-specified zero testing function, if it
  267. is formatted as a function which accepts a single symbolic argument
  268. and returns ``True`` if it is tested as zero and ``False`` if it
  269. is tested as non-zero, and ``None`` if it is undecidable.
  270. simplify : bool or function, optional
  271. If ``True``, ``as_content_primitive()`` will be used to tidy up
  272. normalization artifacts.
  273. It will also be used by the ``nullspace`` routine.
  274. chop : bool or positive number, optional
  275. If the matrix contains any Floats, they will be changed to Rationals
  276. for computation purposes, but the answers will be returned after
  277. being evaluated with evalf. The ``chop`` flag is passed to ``evalf``.
  278. When ``chop=True`` a default precision will be used; a number will
  279. be interpreted as the desired level of precision.
  280. Returns
  281. =======
  282. ret : [(eigenval, multiplicity, eigenspace), ...]
  283. A ragged list containing tuples of data obtained by ``eigenvals``
  284. and ``nullspace``.
  285. ``eigenspace`` is a list containing the ``eigenvector`` for each
  286. eigenvalue.
  287. ``eigenvector`` is a vector in the form of a ``Matrix``. e.g.
  288. a vector of length 3 is returned as ``Matrix([a_1, a_2, a_3])``.
  289. Raises
  290. ======
  291. NotImplementedError
  292. If failed to compute nullspace.
  293. Examples
  294. ========
  295. >>> from sympy import Matrix
  296. >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
  297. >>> M.eigenvects()
  298. [(-1, 1, [Matrix([
  299. [-1],
  300. [ 1],
  301. [ 0]])]), (0, 1, [Matrix([
  302. [ 0],
  303. [-1],
  304. [ 1]])]), (2, 1, [Matrix([
  305. [2/3],
  306. [1/3],
  307. [ 1]])])]
  308. See Also
  309. ========
  310. eigenvals
  311. MatrixSubspaces.nullspace
  312. """
  313. simplify = flags.get('simplify', True)
  314. primitive = flags.get('simplify', False)
  315. flags.pop('simplify', None) # remove this if it's there
  316. flags.pop('multiple', None) # remove this if it's there
  317. if not isinstance(simplify, FunctionType):
  318. simpfunc = _simplify if simplify else lambda x: x
  319. has_floats = M.has(Float)
  320. if has_floats:
  321. if all(x.is_number for x in M):
  322. return _eigenvects_mpmath(M)
  323. from sympy.simplify import nsimplify
  324. M = M.applyfunc(lambda x: nsimplify(x, rational=True))
  325. ret = _eigenvects_DOM(M)
  326. if ret is None:
  327. ret = _eigenvects_sympy(M, iszerofunc, simplify=simplify, **flags)
  328. if primitive:
  329. # if the primitive flag is set, get rid of any common
  330. # integer denominators
  331. def denom_clean(l):
  332. return [(v / gcd(list(v))).applyfunc(simpfunc) for v in l]
  333. ret = [(val, mult, denom_clean(es)) for val, mult, es in ret]
  334. if has_floats:
  335. # if we had floats to start with, turn the eigenvectors to floats
  336. ret = [(val.evalf(chop=chop), mult, [v.evalf(chop=chop) for v in es])
  337. for val, mult, es in ret]
  338. return ret
  339. def _is_diagonalizable_with_eigen(M, reals_only=False):
  340. """See _is_diagonalizable. This function returns the bool along with the
  341. eigenvectors to avoid calculating them again in functions like
  342. ``diagonalize``."""
  343. if not M.is_square:
  344. return False, []
  345. eigenvecs = M.eigenvects(simplify=True)
  346. for val, mult, basis in eigenvecs:
  347. if reals_only and not val.is_real: # if we have a complex eigenvalue
  348. return False, eigenvecs
  349. if mult != len(basis): # if the geometric multiplicity doesn't equal the algebraic
  350. return False, eigenvecs
  351. return True, eigenvecs
  352. def _is_diagonalizable(M, reals_only=False, **kwargs):
  353. """Returns ``True`` if a matrix is diagonalizable.
  354. Parameters
  355. ==========
  356. reals_only : bool, optional
  357. If ``True``, it tests whether the matrix can be diagonalized
  358. to contain only real numbers on the diagonal.
  359. If ``False``, it tests whether the matrix can be diagonalized
  360. at all, even with numbers that may not be real.
  361. Examples
  362. ========
  363. Example of a diagonalizable matrix:
  364. >>> from sympy import Matrix
  365. >>> M = Matrix([[1, 2, 0], [0, 3, 0], [2, -4, 2]])
  366. >>> M.is_diagonalizable()
  367. True
  368. Example of a non-diagonalizable matrix:
  369. >>> M = Matrix([[0, 1], [0, 0]])
  370. >>> M.is_diagonalizable()
  371. False
  372. Example of a matrix that is diagonalized in terms of non-real entries:
  373. >>> M = Matrix([[0, 1], [-1, 0]])
  374. >>> M.is_diagonalizable(reals_only=False)
  375. True
  376. >>> M.is_diagonalizable(reals_only=True)
  377. False
  378. See Also
  379. ========
  380. is_diagonal
  381. diagonalize
  382. """
  383. if not M.is_square:
  384. return False
  385. if all(e.is_real for e in M) and M.is_symmetric():
  386. return True
  387. if all(e.is_complex for e in M) and M.is_hermitian:
  388. return True
  389. return _is_diagonalizable_with_eigen(M, reals_only=reals_only)[0]
  390. #G&VL, Matrix Computations, Algo 5.4.2
  391. def _householder_vector(x):
  392. if not x.cols == 1:
  393. raise ValueError("Input must be a column matrix")
  394. v = x.copy()
  395. v_plus = x.copy()
  396. v_minus = x.copy()
  397. q = x[0, 0] / abs(x[0, 0])
  398. norm_x = x.norm()
  399. v_plus[0, 0] = x[0, 0] + q * norm_x
  400. v_minus[0, 0] = x[0, 0] - q * norm_x
  401. if x[1:, 0].norm() == 0:
  402. bet = 0
  403. v[0, 0] = 1
  404. else:
  405. if v_plus.norm() <= v_minus.norm():
  406. v = v_plus
  407. else:
  408. v = v_minus
  409. v = v / v[0]
  410. bet = 2 / (v.norm() ** 2)
  411. return v, bet
  412. def _bidiagonal_decmp_hholder(M):
  413. m = M.rows
  414. n = M.cols
  415. A = M.as_mutable()
  416. U, V = A.eye(m), A.eye(n)
  417. for i in range(min(m, n)):
  418. v, bet = _householder_vector(A[i:, i])
  419. hh_mat = A.eye(m - i) - bet * v * v.H
  420. A[i:, i:] = hh_mat * A[i:, i:]
  421. temp = A.eye(m)
  422. temp[i:, i:] = hh_mat
  423. U = U * temp
  424. if i + 1 <= n - 2:
  425. v, bet = _householder_vector(A[i, i+1:].T)
  426. hh_mat = A.eye(n - i - 1) - bet * v * v.H
  427. A[i:, i+1:] = A[i:, i+1:] * hh_mat
  428. temp = A.eye(n)
  429. temp[i+1:, i+1:] = hh_mat
  430. V = temp * V
  431. return U, A, V
  432. def _eval_bidiag_hholder(M):
  433. m = M.rows
  434. n = M.cols
  435. A = M.as_mutable()
  436. for i in range(min(m, n)):
  437. v, bet = _householder_vector(A[i:, i])
  438. hh_mat = A.eye(m-i) - bet * v * v.H
  439. A[i:, i:] = hh_mat * A[i:, i:]
  440. if i + 1 <= n - 2:
  441. v, bet = _householder_vector(A[i, i+1:].T)
  442. hh_mat = A.eye(n - i - 1) - bet * v * v.H
  443. A[i:, i+1:] = A[i:, i+1:] * hh_mat
  444. return A
  445. def _bidiagonal_decomposition(M, upper=True):
  446. """
  447. Returns $(U,B,V.H)$ for
  448. $$A = UBV^{H}$$
  449. where $A$ is the input matrix, and $B$ is its Bidiagonalized form
  450. Note: Bidiagonal Computation can hang for symbolic matrices.
  451. Parameters
  452. ==========
  453. upper : bool. Whether to do upper bidiagnalization or lower.
  454. True for upper and False for lower.
  455. References
  456. ==========
  457. .. [1] Algorithm 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
  458. .. [2] Complex Matrix Bidiagonalization, https://github.com/vslobody/Householder-Bidiagonalization
  459. """
  460. if not isinstance(upper, bool):
  461. raise ValueError("upper must be a boolean")
  462. if upper:
  463. return _bidiagonal_decmp_hholder(M)
  464. X = _bidiagonal_decmp_hholder(M.H)
  465. return X[2].H, X[1].H, X[0].H
  466. def _bidiagonalize(M, upper=True):
  467. """
  468. Returns $B$, the Bidiagonalized form of the input matrix.
  469. Note: Bidiagonal Computation can hang for symbolic matrices.
  470. Parameters
  471. ==========
  472. upper : bool. Whether to do upper bidiagnalization or lower.
  473. True for upper and False for lower.
  474. References
  475. ==========
  476. .. [1] Algorithm 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
  477. .. [2] Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization
  478. """
  479. if not isinstance(upper, bool):
  480. raise ValueError("upper must be a boolean")
  481. if upper:
  482. return _eval_bidiag_hholder(M)
  483. return _eval_bidiag_hholder(M.H).H
  484. def _diagonalize(M, reals_only=False, sort=False, normalize=False):
  485. """
  486. Return (P, D), where D is diagonal and
  487. D = P^-1 * M * P
  488. where M is current matrix.
  489. Parameters
  490. ==========
  491. reals_only : bool. Whether to throw an error if complex numbers are need
  492. to diagonalize. (Default: False)
  493. sort : bool. Sort the eigenvalues along the diagonal. (Default: False)
  494. normalize : bool. If True, normalize the columns of P. (Default: False)
  495. Examples
  496. ========
  497. >>> from sympy import Matrix
  498. >>> M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
  499. >>> M
  500. Matrix([
  501. [1, 2, 0],
  502. [0, 3, 0],
  503. [2, -4, 2]])
  504. >>> (P, D) = M.diagonalize()
  505. >>> D
  506. Matrix([
  507. [1, 0, 0],
  508. [0, 2, 0],
  509. [0, 0, 3]])
  510. >>> P
  511. Matrix([
  512. [-1, 0, -1],
  513. [ 0, 0, -1],
  514. [ 2, 1, 2]])
  515. >>> P.inv() * M * P
  516. Matrix([
  517. [1, 0, 0],
  518. [0, 2, 0],
  519. [0, 0, 3]])
  520. See Also
  521. ========
  522. is_diagonal
  523. is_diagonalizable
  524. """
  525. if not M.is_square:
  526. raise NonSquareMatrixError()
  527. is_diagonalizable, eigenvecs = _is_diagonalizable_with_eigen(M,
  528. reals_only=reals_only)
  529. if not is_diagonalizable:
  530. raise MatrixError("Matrix is not diagonalizable")
  531. if sort:
  532. eigenvecs = sorted(eigenvecs, key=default_sort_key)
  533. p_cols, diag = [], []
  534. for val, mult, basis in eigenvecs:
  535. diag += [val] * mult
  536. p_cols += basis
  537. if normalize:
  538. p_cols = [v / v.norm() for v in p_cols]
  539. return M.hstack(*p_cols), M.diag(*diag)
  540. def _fuzzy_positive_definite(M):
  541. positive_diagonals = M._has_positive_diagonals()
  542. if positive_diagonals is False:
  543. return False
  544. if positive_diagonals and M.is_strongly_diagonally_dominant:
  545. return True
  546. return None
  547. def _fuzzy_positive_semidefinite(M):
  548. nonnegative_diagonals = M._has_nonnegative_diagonals()
  549. if nonnegative_diagonals is False:
  550. return False
  551. if nonnegative_diagonals and M.is_weakly_diagonally_dominant:
  552. return True
  553. return None
  554. def _is_positive_definite(M):
  555. if not M.is_hermitian:
  556. if not M.is_square:
  557. return False
  558. M = M + M.H
  559. fuzzy = _fuzzy_positive_definite(M)
  560. if fuzzy is not None:
  561. return fuzzy
  562. return _is_positive_definite_GE(M)
  563. def _is_positive_semidefinite(M):
  564. if not M.is_hermitian:
  565. if not M.is_square:
  566. return False
  567. M = M + M.H
  568. fuzzy = _fuzzy_positive_semidefinite(M)
  569. if fuzzy is not None:
  570. return fuzzy
  571. return _is_positive_semidefinite_cholesky(M)
  572. def _is_negative_definite(M):
  573. return _is_positive_definite(-M)
  574. def _is_negative_semidefinite(M):
  575. return _is_positive_semidefinite(-M)
  576. def _is_indefinite(M):
  577. if M.is_hermitian:
  578. eigen = M.eigenvals()
  579. args1 = [x.is_positive for x in eigen.keys()]
  580. any_positive = fuzzy_or(args1)
  581. args2 = [x.is_negative for x in eigen.keys()]
  582. any_negative = fuzzy_or(args2)
  583. return fuzzy_and([any_positive, any_negative])
  584. elif M.is_square:
  585. return (M + M.H).is_indefinite
  586. return False
  587. def _is_positive_definite_GE(M):
  588. """A division-free gaussian elimination method for testing
  589. positive-definiteness."""
  590. M = M.as_mutable()
  591. size = M.rows
  592. for i in range(size):
  593. is_positive = M[i, i].is_positive
  594. if is_positive is not True:
  595. return is_positive
  596. for j in range(i+1, size):
  597. M[j, i+1:] = M[i, i] * M[j, i+1:] - M[j, i] * M[i, i+1:]
  598. return True
  599. def _is_positive_semidefinite_cholesky(M):
  600. """Uses Cholesky factorization with complete pivoting
  601. References
  602. ==========
  603. .. [1] http://eprints.ma.man.ac.uk/1199/1/covered/MIMS_ep2008_116.pdf
  604. .. [2] https://www.value-at-risk.net/cholesky-factorization/
  605. """
  606. M = M.as_mutable()
  607. for k in range(M.rows):
  608. diags = [M[i, i] for i in range(k, M.rows)]
  609. pivot, pivot_val, nonzero, _ = _find_reasonable_pivot(diags)
  610. if nonzero:
  611. return None
  612. if pivot is None:
  613. for i in range(k+1, M.rows):
  614. for j in range(k, M.cols):
  615. iszero = M[i, j].is_zero
  616. if iszero is None:
  617. return None
  618. elif iszero is False:
  619. return False
  620. return True
  621. if M[k, k].is_negative or pivot_val.is_negative:
  622. return False
  623. elif not (M[k, k].is_nonnegative and pivot_val.is_nonnegative):
  624. return None
  625. if pivot > 0:
  626. M.col_swap(k, k+pivot)
  627. M.row_swap(k, k+pivot)
  628. M[k, k] = sqrt(M[k, k])
  629. M[k, k+1:] /= M[k, k]
  630. M[k+1:, k+1:] -= M[k, k+1:].H * M[k, k+1:]
  631. return M[-1, -1].is_nonnegative
  632. _doc_positive_definite = \
  633. r"""Finds out the definiteness of a matrix.
  634. Explanation
  635. ===========
  636. A square real matrix $A$ is:
  637. - A positive definite matrix if $x^T A x > 0$
  638. for all non-zero real vectors $x$.
  639. - A positive semidefinite matrix if $x^T A x \geq 0$
  640. for all non-zero real vectors $x$.
  641. - A negative definite matrix if $x^T A x < 0$
  642. for all non-zero real vectors $x$.
  643. - A negative semidefinite matrix if $x^T A x \leq 0$
  644. for all non-zero real vectors $x$.
  645. - An indefinite matrix if there exists non-zero real vectors
  646. $x, y$ with $x^T A x > 0 > y^T A y$.
  647. A square complex matrix $A$ is:
  648. - A positive definite matrix if $\text{re}(x^H A x) > 0$
  649. for all non-zero complex vectors $x$.
  650. - A positive semidefinite matrix if $\text{re}(x^H A x) \geq 0$
  651. for all non-zero complex vectors $x$.
  652. - A negative definite matrix if $\text{re}(x^H A x) < 0$
  653. for all non-zero complex vectors $x$.
  654. - A negative semidefinite matrix if $\text{re}(x^H A x) \leq 0$
  655. for all non-zero complex vectors $x$.
  656. - An indefinite matrix if there exists non-zero complex vectors
  657. $x, y$ with $\text{re}(x^H A x) > 0 > \text{re}(y^H A y)$.
  658. A matrix need not be symmetric or hermitian to be positive definite.
  659. - A real non-symmetric matrix is positive definite if and only if
  660. $\frac{A + A^T}{2}$ is positive definite.
  661. - A complex non-hermitian matrix is positive definite if and only if
  662. $\frac{A + A^H}{2}$ is positive definite.
  663. And this extension can apply for all the definitions above.
  664. However, for complex cases, you can restrict the definition of
  665. $\text{re}(x^H A x) > 0$ to $x^H A x > 0$ and require the matrix
  666. to be hermitian.
  667. But we do not present this restriction for computation because you
  668. can check ``M.is_hermitian`` independently with this and use
  669. the same procedure.
  670. Examples
  671. ========
  672. An example of symmetric positive definite matrix:
  673. .. plot::
  674. :context: reset
  675. :format: doctest
  676. :include-source: True
  677. >>> from sympy import Matrix, symbols
  678. >>> from sympy.plotting import plot3d
  679. >>> a, b = symbols('a b')
  680. >>> x = Matrix([a, b])
  681. >>> A = Matrix([[1, 0], [0, 1]])
  682. >>> A.is_positive_definite
  683. True
  684. >>> A.is_positive_semidefinite
  685. True
  686. >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
  687. An example of symmetric positive semidefinite matrix:
  688. .. plot::
  689. :context: close-figs
  690. :format: doctest
  691. :include-source: True
  692. >>> A = Matrix([[1, -1], [-1, 1]])
  693. >>> A.is_positive_definite
  694. False
  695. >>> A.is_positive_semidefinite
  696. True
  697. >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
  698. An example of symmetric negative definite matrix:
  699. .. plot::
  700. :context: close-figs
  701. :format: doctest
  702. :include-source: True
  703. >>> A = Matrix([[-1, 0], [0, -1]])
  704. >>> A.is_negative_definite
  705. True
  706. >>> A.is_negative_semidefinite
  707. True
  708. >>> A.is_indefinite
  709. False
  710. >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
  711. An example of symmetric indefinite matrix:
  712. .. plot::
  713. :context: close-figs
  714. :format: doctest
  715. :include-source: True
  716. >>> A = Matrix([[1, 2], [2, -1]])
  717. >>> A.is_indefinite
  718. True
  719. >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
  720. An example of non-symmetric positive definite matrix.
  721. .. plot::
  722. :context: close-figs
  723. :format: doctest
  724. :include-source: True
  725. >>> A = Matrix([[1, 2], [-2, 1]])
  726. >>> A.is_positive_definite
  727. True
  728. >>> A.is_positive_semidefinite
  729. True
  730. >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
  731. Notes
  732. =====
  733. Although some people trivialize the definition of positive definite
  734. matrices only for symmetric or hermitian matrices, this restriction
  735. is not correct because it does not classify all instances of
  736. positive definite matrices from the definition $x^T A x > 0$ or
  737. $\text{re}(x^H A x) > 0$.
  738. For instance, ``Matrix([[1, 2], [-2, 1]])`` presented in
  739. the example above is an example of real positive definite matrix
  740. that is not symmetric.
  741. However, since the following formula holds true;
  742. .. math::
  743. \text{re}(x^H A x) > 0 \iff
  744. \text{re}(x^H \frac{A + A^H}{2} x) > 0
  745. We can classify all positive definite matrices that may or may not
  746. be symmetric or hermitian by transforming the matrix to
  747. $\frac{A + A^T}{2}$ or $\frac{A + A^H}{2}$
  748. (which is guaranteed to be always real symmetric or complex
  749. hermitian) and we can defer most of the studies to symmetric or
  750. hermitian positive definite matrices.
  751. But it is a different problem for the existance of Cholesky
  752. decomposition. Because even though a non symmetric or a non
  753. hermitian matrix can be positive definite, Cholesky or LDL
  754. decomposition does not exist because the decompositions require the
  755. matrix to be symmetric or hermitian.
  756. References
  757. ==========
  758. .. [1] https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues
  759. .. [2] https://mathworld.wolfram.com/PositiveDefiniteMatrix.html
  760. .. [3] Johnson, C. R. "Positive Definite Matrices." Amer.
  761. Math. Monthly 77, 259-264 1970.
  762. """
  763. _is_positive_definite.__doc__ = _doc_positive_definite
  764. _is_positive_semidefinite.__doc__ = _doc_positive_definite
  765. _is_negative_definite.__doc__ = _doc_positive_definite
  766. _is_negative_semidefinite.__doc__ = _doc_positive_definite
  767. _is_indefinite.__doc__ = _doc_positive_definite
  768. def _jordan_form(M, calc_transform=True, *, chop=False):
  769. """Return $(P, J)$ where $J$ is a Jordan block
  770. matrix and $P$ is a matrix such that $M = P J P^{-1}$
  771. Parameters
  772. ==========
  773. calc_transform : bool
  774. If ``False``, then only $J$ is returned.
  775. chop : bool
  776. All matrices are converted to exact types when computing
  777. eigenvalues and eigenvectors. As a result, there may be
  778. approximation errors. If ``chop==True``, these errors
  779. will be truncated.
  780. Examples
  781. ========
  782. >>> from sympy import Matrix
  783. >>> M = Matrix([[ 6, 5, -2, -3], [-3, -1, 3, 3], [ 2, 1, -2, -3], [-1, 1, 5, 5]])
  784. >>> P, J = M.jordan_form()
  785. >>> J
  786. Matrix([
  787. [2, 1, 0, 0],
  788. [0, 2, 0, 0],
  789. [0, 0, 2, 1],
  790. [0, 0, 0, 2]])
  791. See Also
  792. ========
  793. jordan_block
  794. """
  795. if not M.is_square:
  796. raise NonSquareMatrixError("Only square matrices have Jordan forms")
  797. mat = M
  798. has_floats = M.has(Float)
  799. if has_floats:
  800. try:
  801. max_prec = max(term._prec for term in M.values() if isinstance(term, Float))
  802. except ValueError:
  803. # if no term in the matrix is explicitly a Float calling max()
  804. # will throw a error so setting max_prec to default value of 53
  805. max_prec = 53
  806. # setting minimum max_dps to 15 to prevent loss of precision in
  807. # matrix containing non evaluated expressions
  808. max_dps = max(prec_to_dps(max_prec), 15)
  809. def restore_floats(*args):
  810. """If ``has_floats`` is `True`, cast all ``args`` as
  811. matrices of floats."""
  812. if has_floats:
  813. args = [m.evalf(n=max_dps, chop=chop) for m in args]
  814. if len(args) == 1:
  815. return args[0]
  816. return args
  817. # cache calculations for some speedup
  818. mat_cache = {}
  819. def eig_mat(val, pow):
  820. """Cache computations of ``(M - val*I)**pow`` for quick
  821. retrieval"""
  822. if (val, pow) in mat_cache:
  823. return mat_cache[(val, pow)]
  824. if (val, pow - 1) in mat_cache:
  825. mat_cache[(val, pow)] = mat_cache[(val, pow - 1)].multiply(
  826. mat_cache[(val, 1)], dotprodsimp=None)
  827. else:
  828. mat_cache[(val, pow)] = (mat - val*M.eye(M.rows)).pow(pow)
  829. return mat_cache[(val, pow)]
  830. # helper functions
  831. def nullity_chain(val, algebraic_multiplicity):
  832. """Calculate the sequence [0, nullity(E), nullity(E**2), ...]
  833. until it is constant where ``E = M - val*I``"""
  834. # mat.rank() is faster than computing the null space,
  835. # so use the rank-nullity theorem
  836. cols = M.cols
  837. ret = [0]
  838. nullity = cols - eig_mat(val, 1).rank()
  839. i = 2
  840. while nullity != ret[-1]:
  841. ret.append(nullity)
  842. if nullity == algebraic_multiplicity:
  843. break
  844. nullity = cols - eig_mat(val, i).rank()
  845. i += 1
  846. # Due to issues like #7146 and #15872, SymPy sometimes
  847. # gives the wrong rank. In this case, raise an error
  848. # instead of returning an incorrect matrix
  849. if nullity < ret[-1] or nullity > algebraic_multiplicity:
  850. raise MatrixError(
  851. "SymPy had encountered an inconsistent "
  852. "result while computing Jordan block: "
  853. "{}".format(M))
  854. return ret
  855. def blocks_from_nullity_chain(d):
  856. """Return a list of the size of each Jordan block.
  857. If d_n is the nullity of E**n, then the number
  858. of Jordan blocks of size n is
  859. 2*d_n - d_(n-1) - d_(n+1)"""
  860. # d[0] is always the number of columns, so skip past it
  861. mid = [2*d[n] - d[n - 1] - d[n + 1] for n in range(1, len(d) - 1)]
  862. # d is assumed to plateau with "d[ len(d) ] == d[-1]", so
  863. # 2*d_n - d_(n-1) - d_(n+1) == d_n - d_(n-1)
  864. end = [d[-1] - d[-2]] if len(d) > 1 else [d[0]]
  865. return mid + end
  866. def pick_vec(small_basis, big_basis):
  867. """Picks a vector from big_basis that isn't in
  868. the subspace spanned by small_basis"""
  869. if len(small_basis) == 0:
  870. return big_basis[0]
  871. for v in big_basis:
  872. _, pivots = M.hstack(*(small_basis + [v])).echelon_form(
  873. with_pivots=True)
  874. if pivots[-1] == len(small_basis):
  875. return v
  876. # roots doesn't like Floats, so replace them with Rationals
  877. if has_floats:
  878. from sympy.simplify import nsimplify
  879. mat = mat.applyfunc(lambda x: nsimplify(x, rational=True))
  880. # first calculate the jordan block structure
  881. eigs = mat.eigenvals()
  882. # Make sure that we have all roots in radical form
  883. for x in eigs:
  884. if x.has(CRootOf):
  885. raise MatrixError(
  886. "Jordan normal form is not implemented if the matrix have "
  887. "eigenvalues in CRootOf form")
  888. # most matrices have distinct eigenvalues
  889. # and so are diagonalizable. In this case, don't
  890. # do extra work!
  891. if len(eigs.keys()) == mat.cols:
  892. blocks = sorted(eigs.keys(), key=default_sort_key)
  893. jordan_mat = mat.diag(*blocks)
  894. if not calc_transform:
  895. return restore_floats(jordan_mat)
  896. jordan_basis = [eig_mat(eig, 1).nullspace()[0]
  897. for eig in blocks]
  898. basis_mat = mat.hstack(*jordan_basis)
  899. return restore_floats(basis_mat, jordan_mat)
  900. block_structure = []
  901. for eig in sorted(eigs.keys(), key=default_sort_key):
  902. algebraic_multiplicity = eigs[eig]
  903. chain = nullity_chain(eig, algebraic_multiplicity)
  904. block_sizes = blocks_from_nullity_chain(chain)
  905. # if block_sizes = = [a, b, c, ...], then the number of
  906. # Jordan blocks of size 1 is a, of size 2 is b, etc.
  907. # create an array that has (eig, block_size) with one
  908. # entry for each block
  909. size_nums = [(i+1, num) for i, num in enumerate(block_sizes)]
  910. # we expect larger Jordan blocks to come earlier
  911. size_nums.reverse()
  912. block_structure.extend(
  913. [(eig, size) for size, num in size_nums for _ in range(num)])
  914. jordan_form_size = sum(size for eig, size in block_structure)
  915. if jordan_form_size != M.rows:
  916. raise MatrixError(
  917. "SymPy had encountered an inconsistent result while "
  918. "computing Jordan block. : {}".format(M))
  919. blocks = (mat.jordan_block(size=size, eigenvalue=eig) for eig, size in block_structure)
  920. jordan_mat = mat.diag(*blocks)
  921. if not calc_transform:
  922. return restore_floats(jordan_mat)
  923. # For each generalized eigenspace, calculate a basis.
  924. # We start by looking for a vector in null( (A - eig*I)**n )
  925. # which isn't in null( (A - eig*I)**(n-1) ) where n is
  926. # the size of the Jordan block
  927. #
  928. # Ideally we'd just loop through block_structure and
  929. # compute each generalized eigenspace. However, this
  930. # causes a lot of unneeded computation. Instead, we
  931. # go through the eigenvalues separately, since we know
  932. # their generalized eigenspaces must have bases that
  933. # are linearly independent.
  934. jordan_basis = []
  935. for eig in sorted(eigs.keys(), key=default_sort_key):
  936. eig_basis = []
  937. for block_eig, size in block_structure:
  938. if block_eig != eig:
  939. continue
  940. null_big = (eig_mat(eig, size)).nullspace()
  941. null_small = (eig_mat(eig, size - 1)).nullspace()
  942. # we want to pick something that is in the big basis
  943. # and not the small, but also something that is independent
  944. # of any other generalized eigenvectors from a different
  945. # generalized eigenspace sharing the same eigenvalue.
  946. vec = pick_vec(null_small + eig_basis, null_big)
  947. new_vecs = [eig_mat(eig, i).multiply(vec, dotprodsimp=None)
  948. for i in range(size)]
  949. eig_basis.extend(new_vecs)
  950. jordan_basis.extend(reversed(new_vecs))
  951. basis_mat = mat.hstack(*jordan_basis)
  952. return restore_floats(basis_mat, jordan_mat)
  953. def _left_eigenvects(M, **flags):
  954. """Returns left eigenvectors and eigenvalues.
  955. This function returns the list of triples (eigenval, multiplicity,
  956. basis) for the left eigenvectors. Options are the same as for
  957. eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
  958. eigenvects().
  959. Examples
  960. ========
  961. >>> from sympy import Matrix
  962. >>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
  963. >>> M.eigenvects()
  964. [(-1, 1, [Matrix([
  965. [-1],
  966. [ 1],
  967. [ 0]])]), (0, 1, [Matrix([
  968. [ 0],
  969. [-1],
  970. [ 1]])]), (2, 1, [Matrix([
  971. [2/3],
  972. [1/3],
  973. [ 1]])])]
  974. >>> M.left_eigenvects()
  975. [(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
  976. 1, [Matrix([[1, 1, 1]])])]
  977. """
  978. eigs = M.transpose().eigenvects(**flags)
  979. return [(val, mult, [l.transpose() for l in basis]) for val, mult, basis in eigs]
  980. def _singular_values(M):
  981. """Compute the singular values of a Matrix
  982. Examples
  983. ========
  984. >>> from sympy import Matrix, Symbol
  985. >>> x = Symbol('x', real=True)
  986. >>> M = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
  987. >>> M.singular_values()
  988. [sqrt(x**2 + 1), 1, 0]
  989. See Also
  990. ========
  991. condition_number
  992. """
  993. if M.rows >= M.cols:
  994. valmultpairs = M.H.multiply(M).eigenvals()
  995. else:
  996. valmultpairs = M.multiply(M.H).eigenvals()
  997. # Expands result from eigenvals into a simple list
  998. vals = []
  999. for k, v in valmultpairs.items():
  1000. vals += [sqrt(k)] * v # dangerous! same k in several spots!
  1001. # Pad with zeros if singular values are computed in reverse way,
  1002. # to give consistent format.
  1003. if len(vals) < M.cols:
  1004. vals += [M.zero] * (M.cols - len(vals))
  1005. # sort them in descending order
  1006. vals.sort(reverse=True, key=default_sort_key)
  1007. return vals