linalg.py 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795
  1. """Lite version of scipy.linalg.
  2. Notes
  3. -----
  4. This module is a lite version of the linalg.py module in SciPy which
  5. contains high-level Python interface to the LAPACK library. The lite
  6. version only accesses the following LAPACK functions: dgesv, zgesv,
  7. dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
  8. zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
  9. """
  10. __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
  11. 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
  12. 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
  13. 'LinAlgError', 'multi_dot']
  14. import functools
  15. import operator
  16. import warnings
  17. from numpy.core import (
  18. array, asarray, zeros, empty, empty_like, intc, single, double,
  19. csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
  20. add, multiply, sqrt, sum, isfinite,
  21. finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
  22. atleast_2d, intp, asanyarray, object_, matmul,
  23. swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
  24. reciprocal
  25. )
  26. from numpy.core.multiarray import normalize_axis_index
  27. from numpy.core.overrides import set_module
  28. from numpy.core import overrides
  29. from numpy.lib.twodim_base import triu, eye
  30. from numpy.linalg import _umath_linalg
  31. array_function_dispatch = functools.partial(
  32. overrides.array_function_dispatch, module='numpy.linalg')
  33. fortran_int = intc
  34. @set_module('numpy.linalg')
  35. class LinAlgError(Exception):
  36. """
  37. Generic Python-exception-derived object raised by linalg functions.
  38. General purpose exception class, derived from Python's exception.Exception
  39. class, programmatically raised in linalg functions when a Linear
  40. Algebra-related condition would prevent further correct execution of the
  41. function.
  42. Parameters
  43. ----------
  44. None
  45. Examples
  46. --------
  47. >>> from numpy import linalg as LA
  48. >>> LA.inv(np.zeros((2,2)))
  49. Traceback (most recent call last):
  50. File "<stdin>", line 1, in <module>
  51. File "...linalg.py", line 350,
  52. in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
  53. File "...linalg.py", line 249,
  54. in solve
  55. raise LinAlgError('Singular matrix')
  56. numpy.linalg.LinAlgError: Singular matrix
  57. """
  58. def _determine_error_states():
  59. errobj = geterrobj()
  60. bufsize = errobj[0]
  61. with errstate(invalid='call', over='ignore',
  62. divide='ignore', under='ignore'):
  63. invalid_call_errmask = geterrobj()[1]
  64. return [bufsize, invalid_call_errmask, None]
  65. # Dealing with errors in _umath_linalg
  66. _linalg_error_extobj = _determine_error_states()
  67. del _determine_error_states
  68. def _raise_linalgerror_singular(err, flag):
  69. raise LinAlgError("Singular matrix")
  70. def _raise_linalgerror_nonposdef(err, flag):
  71. raise LinAlgError("Matrix is not positive definite")
  72. def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
  73. raise LinAlgError("Eigenvalues did not converge")
  74. def _raise_linalgerror_svd_nonconvergence(err, flag):
  75. raise LinAlgError("SVD did not converge")
  76. def _raise_linalgerror_lstsq(err, flag):
  77. raise LinAlgError("SVD did not converge in Linear Least Squares")
  78. def _raise_linalgerror_qr(err, flag):
  79. raise LinAlgError("Incorrect argument found while performing "
  80. "QR factorization")
  81. def get_linalg_error_extobj(callback):
  82. extobj = list(_linalg_error_extobj) # make a copy
  83. extobj[2] = callback
  84. return extobj
  85. def _makearray(a):
  86. new = asarray(a)
  87. wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
  88. return new, wrap
  89. def isComplexType(t):
  90. return issubclass(t, complexfloating)
  91. _real_types_map = {single : single,
  92. double : double,
  93. csingle : single,
  94. cdouble : double}
  95. _complex_types_map = {single : csingle,
  96. double : cdouble,
  97. csingle : csingle,
  98. cdouble : cdouble}
  99. def _realType(t, default=double):
  100. return _real_types_map.get(t, default)
  101. def _complexType(t, default=cdouble):
  102. return _complex_types_map.get(t, default)
  103. def _commonType(*arrays):
  104. # in lite version, use higher precision (always double or cdouble)
  105. result_type = single
  106. is_complex = False
  107. for a in arrays:
  108. if issubclass(a.dtype.type, inexact):
  109. if isComplexType(a.dtype.type):
  110. is_complex = True
  111. rt = _realType(a.dtype.type, default=None)
  112. if rt is None:
  113. # unsupported inexact scalar
  114. raise TypeError("array type %s is unsupported in linalg" %
  115. (a.dtype.name,))
  116. else:
  117. rt = double
  118. if rt is double:
  119. result_type = double
  120. if is_complex:
  121. t = cdouble
  122. result_type = _complex_types_map[result_type]
  123. else:
  124. t = double
  125. return t, result_type
  126. def _to_native_byte_order(*arrays):
  127. ret = []
  128. for arr in arrays:
  129. if arr.dtype.byteorder not in ('=', '|'):
  130. ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
  131. else:
  132. ret.append(arr)
  133. if len(ret) == 1:
  134. return ret[0]
  135. else:
  136. return ret
  137. def _assert_2d(*arrays):
  138. for a in arrays:
  139. if a.ndim != 2:
  140. raise LinAlgError('%d-dimensional array given. Array must be '
  141. 'two-dimensional' % a.ndim)
  142. def _assert_stacked_2d(*arrays):
  143. for a in arrays:
  144. if a.ndim < 2:
  145. raise LinAlgError('%d-dimensional array given. Array must be '
  146. 'at least two-dimensional' % a.ndim)
  147. def _assert_stacked_square(*arrays):
  148. for a in arrays:
  149. m, n = a.shape[-2:]
  150. if m != n:
  151. raise LinAlgError('Last 2 dimensions of the array must be square')
  152. def _assert_finite(*arrays):
  153. for a in arrays:
  154. if not isfinite(a).all():
  155. raise LinAlgError("Array must not contain infs or NaNs")
  156. def _is_empty_2d(arr):
  157. # check size first for efficiency
  158. return arr.size == 0 and product(arr.shape[-2:]) == 0
  159. def transpose(a):
  160. """
  161. Transpose each matrix in a stack of matrices.
  162. Unlike np.transpose, this only swaps the last two axes, rather than all of
  163. them
  164. Parameters
  165. ----------
  166. a : (...,M,N) array_like
  167. Returns
  168. -------
  169. aT : (...,N,M) ndarray
  170. """
  171. return swapaxes(a, -1, -2)
  172. # Linear equations
  173. def _tensorsolve_dispatcher(a, b, axes=None):
  174. return (a, b)
  175. @array_function_dispatch(_tensorsolve_dispatcher)
  176. def tensorsolve(a, b, axes=None):
  177. """
  178. Solve the tensor equation ``a x = b`` for x.
  179. It is assumed that all indices of `x` are summed over in the product,
  180. together with the rightmost indices of `a`, as is done in, for example,
  181. ``tensordot(a, x, axes=x.ndim)``.
  182. Parameters
  183. ----------
  184. a : array_like
  185. Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
  186. the shape of that sub-tensor of `a` consisting of the appropriate
  187. number of its rightmost indices, and must be such that
  188. ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
  189. 'square').
  190. b : array_like
  191. Right-hand tensor, which can be of any shape.
  192. axes : tuple of ints, optional
  193. Axes in `a` to reorder to the right, before inversion.
  194. If None (default), no reordering is done.
  195. Returns
  196. -------
  197. x : ndarray, shape Q
  198. Raises
  199. ------
  200. LinAlgError
  201. If `a` is singular or not 'square' (in the above sense).
  202. See Also
  203. --------
  204. numpy.tensordot, tensorinv, numpy.einsum
  205. Examples
  206. --------
  207. >>> a = np.eye(2*3*4)
  208. >>> a.shape = (2*3, 4, 2, 3, 4)
  209. >>> b = np.random.randn(2*3, 4)
  210. >>> x = np.linalg.tensorsolve(a, b)
  211. >>> x.shape
  212. (2, 3, 4)
  213. >>> np.allclose(np.tensordot(a, x, axes=3), b)
  214. True
  215. """
  216. a, wrap = _makearray(a)
  217. b = asarray(b)
  218. an = a.ndim
  219. if axes is not None:
  220. allaxes = list(range(0, an))
  221. for k in axes:
  222. allaxes.remove(k)
  223. allaxes.insert(an, k)
  224. a = a.transpose(allaxes)
  225. oldshape = a.shape[-(an-b.ndim):]
  226. prod = 1
  227. for k in oldshape:
  228. prod *= k
  229. if a.size != prod ** 2:
  230. raise LinAlgError(
  231. "Input arrays must satisfy the requirement \
  232. prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
  233. )
  234. a = a.reshape(prod, prod)
  235. b = b.ravel()
  236. res = wrap(solve(a, b))
  237. res.shape = oldshape
  238. return res
  239. def _solve_dispatcher(a, b):
  240. return (a, b)
  241. @array_function_dispatch(_solve_dispatcher)
  242. def solve(a, b):
  243. """
  244. Solve a linear matrix equation, or system of linear scalar equations.
  245. Computes the "exact" solution, `x`, of the well-determined, i.e., full
  246. rank, linear matrix equation `ax = b`.
  247. Parameters
  248. ----------
  249. a : (..., M, M) array_like
  250. Coefficient matrix.
  251. b : {(..., M,), (..., M, K)}, array_like
  252. Ordinate or "dependent variable" values.
  253. Returns
  254. -------
  255. x : {(..., M,), (..., M, K)} ndarray
  256. Solution to the system a x = b. Returned shape is identical to `b`.
  257. Raises
  258. ------
  259. LinAlgError
  260. If `a` is singular or not square.
  261. See Also
  262. --------
  263. scipy.linalg.solve : Similar function in SciPy.
  264. Notes
  265. -----
  266. .. versionadded:: 1.8.0
  267. Broadcasting rules apply, see the `numpy.linalg` documentation for
  268. details.
  269. The solutions are computed using LAPACK routine ``_gesv``.
  270. `a` must be square and of full-rank, i.e., all rows (or, equivalently,
  271. columns) must be linearly independent; if either is not true, use
  272. `lstsq` for the least-squares best "solution" of the
  273. system/equation.
  274. References
  275. ----------
  276. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  277. FL, Academic Press, Inc., 1980, pg. 22.
  278. Examples
  279. --------
  280. Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:
  281. >>> a = np.array([[1, 2], [3, 5]])
  282. >>> b = np.array([1, 2])
  283. >>> x = np.linalg.solve(a, b)
  284. >>> x
  285. array([-1., 1.])
  286. Check that the solution is correct:
  287. >>> np.allclose(np.dot(a, x), b)
  288. True
  289. """
  290. a, _ = _makearray(a)
  291. _assert_stacked_2d(a)
  292. _assert_stacked_square(a)
  293. b, wrap = _makearray(b)
  294. t, result_t = _commonType(a, b)
  295. # We use the b = (..., M,) logic, only if the number of extra dimensions
  296. # match exactly
  297. if b.ndim == a.ndim - 1:
  298. gufunc = _umath_linalg.solve1
  299. else:
  300. gufunc = _umath_linalg.solve
  301. signature = 'DD->D' if isComplexType(t) else 'dd->d'
  302. extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
  303. r = gufunc(a, b, signature=signature, extobj=extobj)
  304. return wrap(r.astype(result_t, copy=False))
  305. def _tensorinv_dispatcher(a, ind=None):
  306. return (a,)
  307. @array_function_dispatch(_tensorinv_dispatcher)
  308. def tensorinv(a, ind=2):
  309. """
  310. Compute the 'inverse' of an N-dimensional array.
  311. The result is an inverse for `a` relative to the tensordot operation
  312. ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
  313. ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
  314. tensordot operation.
  315. Parameters
  316. ----------
  317. a : array_like
  318. Tensor to 'invert'. Its shape must be 'square', i. e.,
  319. ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
  320. ind : int, optional
  321. Number of first indices that are involved in the inverse sum.
  322. Must be a positive integer, default is 2.
  323. Returns
  324. -------
  325. b : ndarray
  326. `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
  327. Raises
  328. ------
  329. LinAlgError
  330. If `a` is singular or not 'square' (in the above sense).
  331. See Also
  332. --------
  333. numpy.tensordot, tensorsolve
  334. Examples
  335. --------
  336. >>> a = np.eye(4*6)
  337. >>> a.shape = (4, 6, 8, 3)
  338. >>> ainv = np.linalg.tensorinv(a, ind=2)
  339. >>> ainv.shape
  340. (8, 3, 4, 6)
  341. >>> b = np.random.randn(4, 6)
  342. >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
  343. True
  344. >>> a = np.eye(4*6)
  345. >>> a.shape = (24, 8, 3)
  346. >>> ainv = np.linalg.tensorinv(a, ind=1)
  347. >>> ainv.shape
  348. (8, 3, 24)
  349. >>> b = np.random.randn(24)
  350. >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
  351. True
  352. """
  353. a = asarray(a)
  354. oldshape = a.shape
  355. prod = 1
  356. if ind > 0:
  357. invshape = oldshape[ind:] + oldshape[:ind]
  358. for k in oldshape[ind:]:
  359. prod *= k
  360. else:
  361. raise ValueError("Invalid ind argument.")
  362. a = a.reshape(prod, -1)
  363. ia = inv(a)
  364. return ia.reshape(*invshape)
  365. # Matrix inversion
  366. def _unary_dispatcher(a):
  367. return (a,)
  368. @array_function_dispatch(_unary_dispatcher)
  369. def inv(a):
  370. """
  371. Compute the (multiplicative) inverse of a matrix.
  372. Given a square matrix `a`, return the matrix `ainv` satisfying
  373. ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
  374. Parameters
  375. ----------
  376. a : (..., M, M) array_like
  377. Matrix to be inverted.
  378. Returns
  379. -------
  380. ainv : (..., M, M) ndarray or matrix
  381. (Multiplicative) inverse of the matrix `a`.
  382. Raises
  383. ------
  384. LinAlgError
  385. If `a` is not square or inversion fails.
  386. See Also
  387. --------
  388. scipy.linalg.inv : Similar function in SciPy.
  389. Notes
  390. -----
  391. .. versionadded:: 1.8.0
  392. Broadcasting rules apply, see the `numpy.linalg` documentation for
  393. details.
  394. Examples
  395. --------
  396. >>> from numpy.linalg import inv
  397. >>> a = np.array([[1., 2.], [3., 4.]])
  398. >>> ainv = inv(a)
  399. >>> np.allclose(np.dot(a, ainv), np.eye(2))
  400. True
  401. >>> np.allclose(np.dot(ainv, a), np.eye(2))
  402. True
  403. If a is a matrix object, then the return value is a matrix as well:
  404. >>> ainv = inv(np.matrix(a))
  405. >>> ainv
  406. matrix([[-2. , 1. ],
  407. [ 1.5, -0.5]])
  408. Inverses of several matrices can be computed at once:
  409. >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
  410. >>> inv(a)
  411. array([[[-2. , 1. ],
  412. [ 1.5 , -0.5 ]],
  413. [[-1.25, 0.75],
  414. [ 0.75, -0.25]]])
  415. """
  416. a, wrap = _makearray(a)
  417. _assert_stacked_2d(a)
  418. _assert_stacked_square(a)
  419. t, result_t = _commonType(a)
  420. signature = 'D->D' if isComplexType(t) else 'd->d'
  421. extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
  422. ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
  423. return wrap(ainv.astype(result_t, copy=False))
  424. def _matrix_power_dispatcher(a, n):
  425. return (a,)
  426. @array_function_dispatch(_matrix_power_dispatcher)
  427. def matrix_power(a, n):
  428. """
  429. Raise a square matrix to the (integer) power `n`.
  430. For positive integers `n`, the power is computed by repeated matrix
  431. squarings and matrix multiplications. If ``n == 0``, the identity matrix
  432. of the same shape as M is returned. If ``n < 0``, the inverse
  433. is computed and then raised to the ``abs(n)``.
  434. .. note:: Stacks of object matrices are not currently supported.
  435. Parameters
  436. ----------
  437. a : (..., M, M) array_like
  438. Matrix to be "powered".
  439. n : int
  440. The exponent can be any integer or long integer, positive,
  441. negative, or zero.
  442. Returns
  443. -------
  444. a**n : (..., M, M) ndarray or matrix object
  445. The return value is the same shape and type as `M`;
  446. if the exponent is positive or zero then the type of the
  447. elements is the same as those of `M`. If the exponent is
  448. negative the elements are floating-point.
  449. Raises
  450. ------
  451. LinAlgError
  452. For matrices that are not square or that (for negative powers) cannot
  453. be inverted numerically.
  454. Examples
  455. --------
  456. >>> from numpy.linalg import matrix_power
  457. >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
  458. >>> matrix_power(i, 3) # should = -i
  459. array([[ 0, -1],
  460. [ 1, 0]])
  461. >>> matrix_power(i, 0)
  462. array([[1, 0],
  463. [0, 1]])
  464. >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
  465. array([[ 0., 1.],
  466. [-1., 0.]])
  467. Somewhat more sophisticated example
  468. >>> q = np.zeros((4, 4))
  469. >>> q[0:2, 0:2] = -i
  470. >>> q[2:4, 2:4] = i
  471. >>> q # one of the three quaternion units not equal to 1
  472. array([[ 0., -1., 0., 0.],
  473. [ 1., 0., 0., 0.],
  474. [ 0., 0., 0., 1.],
  475. [ 0., 0., -1., 0.]])
  476. >>> matrix_power(q, 2) # = -np.eye(4)
  477. array([[-1., 0., 0., 0.],
  478. [ 0., -1., 0., 0.],
  479. [ 0., 0., -1., 0.],
  480. [ 0., 0., 0., -1.]])
  481. """
  482. a = asanyarray(a)
  483. _assert_stacked_2d(a)
  484. _assert_stacked_square(a)
  485. try:
  486. n = operator.index(n)
  487. except TypeError as e:
  488. raise TypeError("exponent must be an integer") from e
  489. # Fall back on dot for object arrays. Object arrays are not supported by
  490. # the current implementation of matmul using einsum
  491. if a.dtype != object:
  492. fmatmul = matmul
  493. elif a.ndim == 2:
  494. fmatmul = dot
  495. else:
  496. raise NotImplementedError(
  497. "matrix_power not supported for stacks of object arrays")
  498. if n == 0:
  499. a = empty_like(a)
  500. a[...] = eye(a.shape[-2], dtype=a.dtype)
  501. return a
  502. elif n < 0:
  503. a = inv(a)
  504. n = abs(n)
  505. # short-cuts.
  506. if n == 1:
  507. return a
  508. elif n == 2:
  509. return fmatmul(a, a)
  510. elif n == 3:
  511. return fmatmul(fmatmul(a, a), a)
  512. # Use binary decomposition to reduce the number of matrix multiplications.
  513. # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
  514. # increasing powers of 2, and multiply into the result as needed.
  515. z = result = None
  516. while n > 0:
  517. z = a if z is None else fmatmul(z, z)
  518. n, bit = divmod(n, 2)
  519. if bit:
  520. result = z if result is None else fmatmul(result, z)
  521. return result
  522. # Cholesky decomposition
  523. @array_function_dispatch(_unary_dispatcher)
  524. def cholesky(a):
  525. """
  526. Cholesky decomposition.
  527. Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
  528. where `L` is lower-triangular and .H is the conjugate transpose operator
  529. (which is the ordinary transpose if `a` is real-valued). `a` must be
  530. Hermitian (symmetric if real-valued) and positive-definite. No
  531. checking is performed to verify whether `a` is Hermitian or not.
  532. In addition, only the lower-triangular and diagonal elements of `a`
  533. are used. Only `L` is actually returned.
  534. Parameters
  535. ----------
  536. a : (..., M, M) array_like
  537. Hermitian (symmetric if all elements are real), positive-definite
  538. input matrix.
  539. Returns
  540. -------
  541. L : (..., M, M) array_like
  542. Lower-triangular Cholesky factor of `a`. Returns a matrix object if
  543. `a` is a matrix object.
  544. Raises
  545. ------
  546. LinAlgError
  547. If the decomposition fails, for example, if `a` is not
  548. positive-definite.
  549. See Also
  550. --------
  551. scipy.linalg.cholesky : Similar function in SciPy.
  552. scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
  553. positive-definite matrix.
  554. scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
  555. `scipy.linalg.cho_solve`.
  556. Notes
  557. -----
  558. .. versionadded:: 1.8.0
  559. Broadcasting rules apply, see the `numpy.linalg` documentation for
  560. details.
  561. The Cholesky decomposition is often used as a fast way of solving
  562. .. math:: A \\mathbf{x} = \\mathbf{b}
  563. (when `A` is both Hermitian/symmetric and positive-definite).
  564. First, we solve for :math:`\\mathbf{y}` in
  565. .. math:: L \\mathbf{y} = \\mathbf{b},
  566. and then for :math:`\\mathbf{x}` in
  567. .. math:: L.H \\mathbf{x} = \\mathbf{y}.
  568. Examples
  569. --------
  570. >>> A = np.array([[1,-2j],[2j,5]])
  571. >>> A
  572. array([[ 1.+0.j, -0.-2.j],
  573. [ 0.+2.j, 5.+0.j]])
  574. >>> L = np.linalg.cholesky(A)
  575. >>> L
  576. array([[1.+0.j, 0.+0.j],
  577. [0.+2.j, 1.+0.j]])
  578. >>> np.dot(L, L.T.conj()) # verify that L * L.H = A
  579. array([[1.+0.j, 0.-2.j],
  580. [0.+2.j, 5.+0.j]])
  581. >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
  582. >>> np.linalg.cholesky(A) # an ndarray object is returned
  583. array([[1.+0.j, 0.+0.j],
  584. [0.+2.j, 1.+0.j]])
  585. >>> # But a matrix object is returned if A is a matrix object
  586. >>> np.linalg.cholesky(np.matrix(A))
  587. matrix([[ 1.+0.j, 0.+0.j],
  588. [ 0.+2.j, 1.+0.j]])
  589. """
  590. extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
  591. gufunc = _umath_linalg.cholesky_lo
  592. a, wrap = _makearray(a)
  593. _assert_stacked_2d(a)
  594. _assert_stacked_square(a)
  595. t, result_t = _commonType(a)
  596. signature = 'D->D' if isComplexType(t) else 'd->d'
  597. r = gufunc(a, signature=signature, extobj=extobj)
  598. return wrap(r.astype(result_t, copy=False))
  599. # QR decomposition
  600. def _qr_dispatcher(a, mode=None):
  601. return (a,)
  602. @array_function_dispatch(_qr_dispatcher)
  603. def qr(a, mode='reduced'):
  604. """
  605. Compute the qr factorization of a matrix.
  606. Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
  607. upper-triangular.
  608. Parameters
  609. ----------
  610. a : array_like, shape (..., M, N)
  611. An array-like object with the dimensionality of at least 2.
  612. mode : {'reduced', 'complete', 'r', 'raw'}, optional
  613. If K = min(M, N), then
  614. * 'reduced' : returns q, r with dimensions
  615. (..., M, K), (..., K, N) (default)
  616. * 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)
  617. * 'r' : returns r only with dimensions (..., K, N)
  618. * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
  619. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
  620. see the notes for more information. The default is 'reduced', and to
  621. maintain backward compatibility with earlier versions of numpy both
  622. it and the old default 'full' can be omitted. Note that array h
  623. returned in 'raw' mode is transposed for calling Fortran. The
  624. 'economic' mode is deprecated. The modes 'full' and 'economic' may
  625. be passed using only the first letter for backwards compatibility,
  626. but all others must be spelled out. See the Notes for more
  627. explanation.
  628. Returns
  629. -------
  630. q : ndarray of float or complex, optional
  631. A matrix with orthonormal columns. When mode = 'complete' the
  632. result is an orthogonal/unitary matrix depending on whether or not
  633. a is real/complex. The determinant may be either +/- 1 in that
  634. case. In case the number of dimensions in the input array is
  635. greater than 2 then a stack of the matrices with above properties
  636. is returned.
  637. r : ndarray of float or complex, optional
  638. The upper-triangular matrix or a stack of upper-triangular
  639. matrices if the number of dimensions in the input array is greater
  640. than 2.
  641. (h, tau) : ndarrays of np.double or np.cdouble, optional
  642. The array h contains the Householder reflectors that generate q
  643. along with r. The tau array contains scaling factors for the
  644. reflectors. In the deprecated 'economic' mode only h is returned.
  645. Raises
  646. ------
  647. LinAlgError
  648. If factoring fails.
  649. See Also
  650. --------
  651. scipy.linalg.qr : Similar function in SciPy.
  652. scipy.linalg.rq : Compute RQ decomposition of a matrix.
  653. Notes
  654. -----
  655. This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
  656. ``dorgqr``, and ``zungqr``.
  657. For more information on the qr factorization, see for example:
  658. https://en.wikipedia.org/wiki/QR_factorization
  659. Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
  660. `a` is of type `matrix`, all the return values will be matrices too.
  661. New 'reduced', 'complete', and 'raw' options for mode were added in
  662. NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
  663. addition the options 'full' and 'economic' were deprecated. Because
  664. 'full' was the previous default and 'reduced' is the new default,
  665. backward compatibility can be maintained by letting `mode` default.
  666. The 'raw' option was added so that LAPACK routines that can multiply
  667. arrays by q using the Householder reflectors can be used. Note that in
  668. this case the returned arrays are of type np.double or np.cdouble and
  669. the h array is transposed to be FORTRAN compatible. No routines using
  670. the 'raw' return are currently exposed by numpy, but some are available
  671. in lapack_lite and just await the necessary work.
  672. Examples
  673. --------
  674. >>> a = np.random.randn(9, 6)
  675. >>> q, r = np.linalg.qr(a)
  676. >>> np.allclose(a, np.dot(q, r)) # a does equal qr
  677. True
  678. >>> r2 = np.linalg.qr(a, mode='r')
  679. >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
  680. True
  681. >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
  682. >>> q, r = np.linalg.qr(a)
  683. >>> q.shape
  684. (3, 2, 2)
  685. >>> r.shape
  686. (3, 2, 2)
  687. >>> np.allclose(a, np.matmul(q, r))
  688. True
  689. Example illustrating a common use of `qr`: solving of least squares
  690. problems
  691. What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
  692. the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
  693. and you'll see that it should be y0 = 0, m = 1.) The answer is provided
  694. by solving the over-determined matrix equation ``Ax = b``, where::
  695. A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
  696. x = array([[y0], [m]])
  697. b = array([[1], [0], [2], [1]])
  698. If A = qr such that q is orthonormal (which is always possible via
  699. Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
  700. however, we simply use `lstsq`.)
  701. >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
  702. >>> A
  703. array([[0, 1],
  704. [1, 1],
  705. [1, 1],
  706. [2, 1]])
  707. >>> b = np.array([1, 2, 2, 3])
  708. >>> q, r = np.linalg.qr(A)
  709. >>> p = np.dot(q.T, b)
  710. >>> np.dot(np.linalg.inv(r), p)
  711. array([ 1., 1.])
  712. """
  713. if mode not in ('reduced', 'complete', 'r', 'raw'):
  714. if mode in ('f', 'full'):
  715. # 2013-04-01, 1.8
  716. msg = "".join((
  717. "The 'full' option is deprecated in favor of 'reduced'.\n",
  718. "For backward compatibility let mode default."))
  719. warnings.warn(msg, DeprecationWarning, stacklevel=3)
  720. mode = 'reduced'
  721. elif mode in ('e', 'economic'):
  722. # 2013-04-01, 1.8
  723. msg = "The 'economic' option is deprecated."
  724. warnings.warn(msg, DeprecationWarning, stacklevel=3)
  725. mode = 'economic'
  726. else:
  727. raise ValueError(f"Unrecognized mode '{mode}'")
  728. a, wrap = _makearray(a)
  729. _assert_stacked_2d(a)
  730. m, n = a.shape[-2:]
  731. t, result_t = _commonType(a)
  732. a = a.astype(t, copy=True)
  733. a = _to_native_byte_order(a)
  734. mn = min(m, n)
  735. if m <= n:
  736. gufunc = _umath_linalg.qr_r_raw_m
  737. else:
  738. gufunc = _umath_linalg.qr_r_raw_n
  739. signature = 'D->D' if isComplexType(t) else 'd->d'
  740. extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
  741. tau = gufunc(a, signature=signature, extobj=extobj)
  742. # handle modes that don't return q
  743. if mode == 'r':
  744. r = triu(a[..., :mn, :])
  745. r = r.astype(result_t, copy=False)
  746. return wrap(r)
  747. if mode == 'raw':
  748. q = transpose(a)
  749. q = q.astype(result_t, copy=False)
  750. tau = tau.astype(result_t, copy=False)
  751. return wrap(q), tau
  752. if mode == 'economic':
  753. a = a.astype(result_t, copy=False)
  754. return wrap(a)
  755. # mc is the number of columns in the resulting q
  756. # matrix. If the mode is complete then it is
  757. # same as number of rows, and if the mode is reduced,
  758. # then it is the minimum of number of rows and columns.
  759. if mode == 'complete' and m > n:
  760. mc = m
  761. gufunc = _umath_linalg.qr_complete
  762. else:
  763. mc = mn
  764. gufunc = _umath_linalg.qr_reduced
  765. signature = 'DD->D' if isComplexType(t) else 'dd->d'
  766. extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
  767. q = gufunc(a, tau, signature=signature, extobj=extobj)
  768. r = triu(a[..., :mc, :])
  769. q = q.astype(result_t, copy=False)
  770. r = r.astype(result_t, copy=False)
  771. return wrap(q), wrap(r)
  772. # Eigenvalues
  773. @array_function_dispatch(_unary_dispatcher)
  774. def eigvals(a):
  775. """
  776. Compute the eigenvalues of a general matrix.
  777. Main difference between `eigvals` and `eig`: the eigenvectors aren't
  778. returned.
  779. Parameters
  780. ----------
  781. a : (..., M, M) array_like
  782. A complex- or real-valued matrix whose eigenvalues will be computed.
  783. Returns
  784. -------
  785. w : (..., M,) ndarray
  786. The eigenvalues, each repeated according to its multiplicity.
  787. They are not necessarily ordered, nor are they necessarily
  788. real for real matrices.
  789. Raises
  790. ------
  791. LinAlgError
  792. If the eigenvalue computation does not converge.
  793. See Also
  794. --------
  795. eig : eigenvalues and right eigenvectors of general arrays
  796. eigvalsh : eigenvalues of real symmetric or complex Hermitian
  797. (conjugate symmetric) arrays.
  798. eigh : eigenvalues and eigenvectors of real symmetric or complex
  799. Hermitian (conjugate symmetric) arrays.
  800. scipy.linalg.eigvals : Similar function in SciPy.
  801. Notes
  802. -----
  803. .. versionadded:: 1.8.0
  804. Broadcasting rules apply, see the `numpy.linalg` documentation for
  805. details.
  806. This is implemented using the ``_geev`` LAPACK routines which compute
  807. the eigenvalues and eigenvectors of general square arrays.
  808. Examples
  809. --------
  810. Illustration, using the fact that the eigenvalues of a diagonal matrix
  811. are its diagonal elements, that multiplying a matrix on the left
  812. by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
  813. of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
  814. if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
  815. ``A``:
  816. >>> from numpy import linalg as LA
  817. >>> x = np.random.random()
  818. >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
  819. >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
  820. (1.0, 1.0, 0.0)
  821. Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
  822. >>> D = np.diag((-1,1))
  823. >>> LA.eigvals(D)
  824. array([-1., 1.])
  825. >>> A = np.dot(Q, D)
  826. >>> A = np.dot(A, Q.T)
  827. >>> LA.eigvals(A)
  828. array([ 1., -1.]) # random
  829. """
  830. a, wrap = _makearray(a)
  831. _assert_stacked_2d(a)
  832. _assert_stacked_square(a)
  833. _assert_finite(a)
  834. t, result_t = _commonType(a)
  835. extobj = get_linalg_error_extobj(
  836. _raise_linalgerror_eigenvalues_nonconvergence)
  837. signature = 'D->D' if isComplexType(t) else 'd->D'
  838. w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
  839. if not isComplexType(t):
  840. if all(w.imag == 0):
  841. w = w.real
  842. result_t = _realType(result_t)
  843. else:
  844. result_t = _complexType(result_t)
  845. return w.astype(result_t, copy=False)
  846. def _eigvalsh_dispatcher(a, UPLO=None):
  847. return (a,)
  848. @array_function_dispatch(_eigvalsh_dispatcher)
  849. def eigvalsh(a, UPLO='L'):
  850. """
  851. Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
  852. Main difference from eigh: the eigenvectors are not computed.
  853. Parameters
  854. ----------
  855. a : (..., M, M) array_like
  856. A complex- or real-valued matrix whose eigenvalues are to be
  857. computed.
  858. UPLO : {'L', 'U'}, optional
  859. Specifies whether the calculation is done with the lower triangular
  860. part of `a` ('L', default) or the upper triangular part ('U').
  861. Irrespective of this value only the real parts of the diagonal will
  862. be considered in the computation to preserve the notion of a Hermitian
  863. matrix. It therefore follows that the imaginary part of the diagonal
  864. will always be treated as zero.
  865. Returns
  866. -------
  867. w : (..., M,) ndarray
  868. The eigenvalues in ascending order, each repeated according to
  869. its multiplicity.
  870. Raises
  871. ------
  872. LinAlgError
  873. If the eigenvalue computation does not converge.
  874. See Also
  875. --------
  876. eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
  877. (conjugate symmetric) arrays.
  878. eigvals : eigenvalues of general real or complex arrays.
  879. eig : eigenvalues and right eigenvectors of general real or complex
  880. arrays.
  881. scipy.linalg.eigvalsh : Similar function in SciPy.
  882. Notes
  883. -----
  884. .. versionadded:: 1.8.0
  885. Broadcasting rules apply, see the `numpy.linalg` documentation for
  886. details.
  887. The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
  888. Examples
  889. --------
  890. >>> from numpy import linalg as LA
  891. >>> a = np.array([[1, -2j], [2j, 5]])
  892. >>> LA.eigvalsh(a)
  893. array([ 0.17157288, 5.82842712]) # may vary
  894. >>> # demonstrate the treatment of the imaginary part of the diagonal
  895. >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
  896. >>> a
  897. array([[5.+2.j, 9.-2.j],
  898. [0.+2.j, 2.-1.j]])
  899. >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
  900. >>> # with:
  901. >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
  902. >>> b
  903. array([[5.+0.j, 0.-2.j],
  904. [0.+2.j, 2.+0.j]])
  905. >>> wa = LA.eigvalsh(a)
  906. >>> wb = LA.eigvals(b)
  907. >>> wa; wb
  908. array([1., 6.])
  909. array([6.+0.j, 1.+0.j])
  910. """
  911. UPLO = UPLO.upper()
  912. if UPLO not in ('L', 'U'):
  913. raise ValueError("UPLO argument must be 'L' or 'U'")
  914. extobj = get_linalg_error_extobj(
  915. _raise_linalgerror_eigenvalues_nonconvergence)
  916. if UPLO == 'L':
  917. gufunc = _umath_linalg.eigvalsh_lo
  918. else:
  919. gufunc = _umath_linalg.eigvalsh_up
  920. a, wrap = _makearray(a)
  921. _assert_stacked_2d(a)
  922. _assert_stacked_square(a)
  923. t, result_t = _commonType(a)
  924. signature = 'D->d' if isComplexType(t) else 'd->d'
  925. w = gufunc(a, signature=signature, extobj=extobj)
  926. return w.astype(_realType(result_t), copy=False)
  927. def _convertarray(a):
  928. t, result_t = _commonType(a)
  929. a = a.astype(t).T.copy()
  930. return a, t, result_t
  931. # Eigenvectors
  932. @array_function_dispatch(_unary_dispatcher)
  933. def eig(a):
  934. """
  935. Compute the eigenvalues and right eigenvectors of a square array.
  936. Parameters
  937. ----------
  938. a : (..., M, M) array
  939. Matrices for which the eigenvalues and right eigenvectors will
  940. be computed
  941. Returns
  942. -------
  943. w : (..., M) array
  944. The eigenvalues, each repeated according to its multiplicity.
  945. The eigenvalues are not necessarily ordered. The resulting
  946. array will be of complex type, unless the imaginary part is
  947. zero in which case it will be cast to a real type. When `a`
  948. is real the resulting eigenvalues will be real (0 imaginary
  949. part) or occur in conjugate pairs
  950. v : (..., M, M) array
  951. The normalized (unit "length") eigenvectors, such that the
  952. column ``v[:,i]`` is the eigenvector corresponding to the
  953. eigenvalue ``w[i]``.
  954. Raises
  955. ------
  956. LinAlgError
  957. If the eigenvalue computation does not converge.
  958. See Also
  959. --------
  960. eigvals : eigenvalues of a non-symmetric array.
  961. eigh : eigenvalues and eigenvectors of a real symmetric or complex
  962. Hermitian (conjugate symmetric) array.
  963. eigvalsh : eigenvalues of a real symmetric or complex Hermitian
  964. (conjugate symmetric) array.
  965. scipy.linalg.eig : Similar function in SciPy that also solves the
  966. generalized eigenvalue problem.
  967. scipy.linalg.schur : Best choice for unitary and other non-Hermitian
  968. normal matrices.
  969. Notes
  970. -----
  971. .. versionadded:: 1.8.0
  972. Broadcasting rules apply, see the `numpy.linalg` documentation for
  973. details.
  974. This is implemented using the ``_geev`` LAPACK routines which compute
  975. the eigenvalues and eigenvectors of general square arrays.
  976. The number `w` is an eigenvalue of `a` if there exists a vector
  977. `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
  978. `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
  979. for :math:`i \\in \\{0,...,M-1\\}`.
  980. The array `v` of eigenvectors may not be of maximum rank, that is, some
  981. of the columns may be linearly dependent, although round-off error may
  982. obscure that fact. If the eigenvalues are all different, then theoretically
  983. the eigenvectors are linearly independent and `a` can be diagonalized by
  984. a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
  985. For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
  986. is preferred because the matrix `v` is guaranteed to be unitary, which is
  987. not the case when using `eig`. The Schur factorization produces an
  988. upper triangular matrix rather than a diagonal matrix, but for normal
  989. matrices only the diagonal of the upper triangular matrix is needed, the
  990. rest is roundoff error.
  991. Finally, it is emphasized that `v` consists of the *right* (as in
  992. right-hand side) eigenvectors of `a`. A vector `y` satisfying
  993. ``y.T @ a = z * y.T`` for some number `z` is called a *left*
  994. eigenvector of `a`, and, in general, the left and right eigenvectors
  995. of a matrix are not necessarily the (perhaps conjugate) transposes
  996. of each other.
  997. References
  998. ----------
  999. G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
  1000. Academic Press, Inc., 1980, Various pp.
  1001. Examples
  1002. --------
  1003. >>> from numpy import linalg as LA
  1004. (Almost) trivial example with real e-values and e-vectors.
  1005. >>> w, v = LA.eig(np.diag((1, 2, 3)))
  1006. >>> w; v
  1007. array([1., 2., 3.])
  1008. array([[1., 0., 0.],
  1009. [0., 1., 0.],
  1010. [0., 0., 1.]])
  1011. Real matrix possessing complex e-values and e-vectors; note that the
  1012. e-values are complex conjugates of each other.
  1013. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
  1014. >>> w; v
  1015. array([1.+1.j, 1.-1.j])
  1016. array([[0.70710678+0.j , 0.70710678-0.j ],
  1017. [0. -0.70710678j, 0. +0.70710678j]])
  1018. Complex-valued matrix with real e-values (but complex-valued e-vectors);
  1019. note that ``a.conj().T == a``, i.e., `a` is Hermitian.
  1020. >>> a = np.array([[1, 1j], [-1j, 1]])
  1021. >>> w, v = LA.eig(a)
  1022. >>> w; v
  1023. array([2.+0.j, 0.+0.j])
  1024. array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
  1025. [ 0.70710678+0.j , -0. +0.70710678j]])
  1026. Be careful about round-off error!
  1027. >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
  1028. >>> # Theor. e-values are 1 +/- 1e-9
  1029. >>> w, v = LA.eig(a)
  1030. >>> w; v
  1031. array([1., 1.])
  1032. array([[1., 0.],
  1033. [0., 1.]])
  1034. """
  1035. a, wrap = _makearray(a)
  1036. _assert_stacked_2d(a)
  1037. _assert_stacked_square(a)
  1038. _assert_finite(a)
  1039. t, result_t = _commonType(a)
  1040. extobj = get_linalg_error_extobj(
  1041. _raise_linalgerror_eigenvalues_nonconvergence)
  1042. signature = 'D->DD' if isComplexType(t) else 'd->DD'
  1043. w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
  1044. if not isComplexType(t) and all(w.imag == 0.0):
  1045. w = w.real
  1046. vt = vt.real
  1047. result_t = _realType(result_t)
  1048. else:
  1049. result_t = _complexType(result_t)
  1050. vt = vt.astype(result_t, copy=False)
  1051. return w.astype(result_t, copy=False), wrap(vt)
  1052. @array_function_dispatch(_eigvalsh_dispatcher)
  1053. def eigh(a, UPLO='L'):
  1054. """
  1055. Return the eigenvalues and eigenvectors of a complex Hermitian
  1056. (conjugate symmetric) or a real symmetric matrix.
  1057. Returns two objects, a 1-D array containing the eigenvalues of `a`, and
  1058. a 2-D square array or matrix (depending on the input type) of the
  1059. corresponding eigenvectors (in columns).
  1060. Parameters
  1061. ----------
  1062. a : (..., M, M) array
  1063. Hermitian or real symmetric matrices whose eigenvalues and
  1064. eigenvectors are to be computed.
  1065. UPLO : {'L', 'U'}, optional
  1066. Specifies whether the calculation is done with the lower triangular
  1067. part of `a` ('L', default) or the upper triangular part ('U').
  1068. Irrespective of this value only the real parts of the diagonal will
  1069. be considered in the computation to preserve the notion of a Hermitian
  1070. matrix. It therefore follows that the imaginary part of the diagonal
  1071. will always be treated as zero.
  1072. Returns
  1073. -------
  1074. w : (..., M) ndarray
  1075. The eigenvalues in ascending order, each repeated according to
  1076. its multiplicity.
  1077. v : {(..., M, M) ndarray, (..., M, M) matrix}
  1078. The column ``v[:, i]`` is the normalized eigenvector corresponding
  1079. to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
  1080. a matrix object.
  1081. Raises
  1082. ------
  1083. LinAlgError
  1084. If the eigenvalue computation does not converge.
  1085. See Also
  1086. --------
  1087. eigvalsh : eigenvalues of real symmetric or complex Hermitian
  1088. (conjugate symmetric) arrays.
  1089. eig : eigenvalues and right eigenvectors for non-symmetric arrays.
  1090. eigvals : eigenvalues of non-symmetric arrays.
  1091. scipy.linalg.eigh : Similar function in SciPy (but also solves the
  1092. generalized eigenvalue problem).
  1093. Notes
  1094. -----
  1095. .. versionadded:: 1.8.0
  1096. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1097. details.
  1098. The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
  1099. ``_heevd``.
  1100. The eigenvalues of real symmetric or complex Hermitian matrices are
  1101. always real. [1]_ The array `v` of (column) eigenvectors is unitary
  1102. and `a`, `w`, and `v` satisfy the equations
  1103. ``dot(a, v[:, i]) = w[i] * v[:, i]``.
  1104. References
  1105. ----------
  1106. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  1107. FL, Academic Press, Inc., 1980, pg. 222.
  1108. Examples
  1109. --------
  1110. >>> from numpy import linalg as LA
  1111. >>> a = np.array([[1, -2j], [2j, 5]])
  1112. >>> a
  1113. array([[ 1.+0.j, -0.-2.j],
  1114. [ 0.+2.j, 5.+0.j]])
  1115. >>> w, v = LA.eigh(a)
  1116. >>> w; v
  1117. array([0.17157288, 5.82842712])
  1118. array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
  1119. [ 0. +0.38268343j, 0. -0.92387953j]])
  1120. >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
  1121. array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
  1122. >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
  1123. array([0.+0.j, 0.+0.j])
  1124. >>> A = np.matrix(a) # what happens if input is a matrix object
  1125. >>> A
  1126. matrix([[ 1.+0.j, -0.-2.j],
  1127. [ 0.+2.j, 5.+0.j]])
  1128. >>> w, v = LA.eigh(A)
  1129. >>> w; v
  1130. array([0.17157288, 5.82842712])
  1131. matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
  1132. [ 0. +0.38268343j, 0. -0.92387953j]])
  1133. >>> # demonstrate the treatment of the imaginary part of the diagonal
  1134. >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
  1135. >>> a
  1136. array([[5.+2.j, 9.-2.j],
  1137. [0.+2.j, 2.-1.j]])
  1138. >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
  1139. >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
  1140. >>> b
  1141. array([[5.+0.j, 0.-2.j],
  1142. [0.+2.j, 2.+0.j]])
  1143. >>> wa, va = LA.eigh(a)
  1144. >>> wb, vb = LA.eig(b)
  1145. >>> wa; wb
  1146. array([1., 6.])
  1147. array([6.+0.j, 1.+0.j])
  1148. >>> va; vb
  1149. array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
  1150. [ 0. +0.89442719j, 0. -0.4472136j ]])
  1151. array([[ 0.89442719+0.j , -0. +0.4472136j],
  1152. [-0. +0.4472136j, 0.89442719+0.j ]])
  1153. """
  1154. UPLO = UPLO.upper()
  1155. if UPLO not in ('L', 'U'):
  1156. raise ValueError("UPLO argument must be 'L' or 'U'")
  1157. a, wrap = _makearray(a)
  1158. _assert_stacked_2d(a)
  1159. _assert_stacked_square(a)
  1160. t, result_t = _commonType(a)
  1161. extobj = get_linalg_error_extobj(
  1162. _raise_linalgerror_eigenvalues_nonconvergence)
  1163. if UPLO == 'L':
  1164. gufunc = _umath_linalg.eigh_lo
  1165. else:
  1166. gufunc = _umath_linalg.eigh_up
  1167. signature = 'D->dD' if isComplexType(t) else 'd->dd'
  1168. w, vt = gufunc(a, signature=signature, extobj=extobj)
  1169. w = w.astype(_realType(result_t), copy=False)
  1170. vt = vt.astype(result_t, copy=False)
  1171. return w, wrap(vt)
  1172. # Singular value decomposition
  1173. def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
  1174. return (a,)
  1175. @array_function_dispatch(_svd_dispatcher)
  1176. def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
  1177. """
  1178. Singular Value Decomposition.
  1179. When `a` is a 2D array, and ``full_matrices=False``, then it is
  1180. factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
  1181. `u` and the Hermitian transpose of `vh` are 2D arrays with
  1182. orthonormal columns and `s` is a 1D array of `a`'s singular
  1183. values. When `a` is higher-dimensional, SVD is applied in
  1184. stacked mode as explained below.
  1185. Parameters
  1186. ----------
  1187. a : (..., M, N) array_like
  1188. A real or complex array with ``a.ndim >= 2``.
  1189. full_matrices : bool, optional
  1190. If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
  1191. ``(..., N, N)``, respectively. Otherwise, the shapes are
  1192. ``(..., M, K)`` and ``(..., K, N)``, respectively, where
  1193. ``K = min(M, N)``.
  1194. compute_uv : bool, optional
  1195. Whether or not to compute `u` and `vh` in addition to `s`. True
  1196. by default.
  1197. hermitian : bool, optional
  1198. If True, `a` is assumed to be Hermitian (symmetric if real-valued),
  1199. enabling a more efficient method for finding singular values.
  1200. Defaults to False.
  1201. .. versionadded:: 1.17.0
  1202. Returns
  1203. -------
  1204. u : { (..., M, M), (..., M, K) } array
  1205. Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
  1206. size as those of the input `a`. The size of the last two dimensions
  1207. depends on the value of `full_matrices`. Only returned when
  1208. `compute_uv` is True.
  1209. s : (..., K) array
  1210. Vector(s) with the singular values, within each vector sorted in
  1211. descending order. The first ``a.ndim - 2`` dimensions have the same
  1212. size as those of the input `a`.
  1213. vh : { (..., N, N), (..., K, N) } array
  1214. Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
  1215. size as those of the input `a`. The size of the last two dimensions
  1216. depends on the value of `full_matrices`. Only returned when
  1217. `compute_uv` is True.
  1218. Raises
  1219. ------
  1220. LinAlgError
  1221. If SVD computation does not converge.
  1222. See Also
  1223. --------
  1224. scipy.linalg.svd : Similar function in SciPy.
  1225. scipy.linalg.svdvals : Compute singular values of a matrix.
  1226. Notes
  1227. -----
  1228. .. versionchanged:: 1.8.0
  1229. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1230. details.
  1231. The decomposition is performed using LAPACK routine ``_gesdd``.
  1232. SVD is usually described for the factorization of a 2D matrix :math:`A`.
  1233. The higher-dimensional case will be discussed below. In the 2D case, SVD is
  1234. written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
  1235. :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
  1236. contains the singular values of `a` and `u` and `vh` are unitary. The rows
  1237. of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
  1238. the eigenvectors of :math:`A A^H`. In both cases the corresponding
  1239. (possibly non-zero) eigenvalues are given by ``s**2``.
  1240. If `a` has more than two dimensions, then broadcasting rules apply, as
  1241. explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
  1242. working in "stacked" mode: it iterates over all indices of the first
  1243. ``a.ndim - 2`` dimensions and for each combination SVD is applied to the
  1244. last two indices. The matrix `a` can be reconstructed from the
  1245. decomposition with either ``(u * s[..., None, :]) @ vh`` or
  1246. ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
  1247. function ``np.matmul`` for python versions below 3.5.)
  1248. If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
  1249. all the return values.
  1250. Examples
  1251. --------
  1252. >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
  1253. >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
  1254. Reconstruction based on full SVD, 2D case:
  1255. >>> u, s, vh = np.linalg.svd(a, full_matrices=True)
  1256. >>> u.shape, s.shape, vh.shape
  1257. ((9, 9), (6,), (6, 6))
  1258. >>> np.allclose(a, np.dot(u[:, :6] * s, vh))
  1259. True
  1260. >>> smat = np.zeros((9, 6), dtype=complex)
  1261. >>> smat[:6, :6] = np.diag(s)
  1262. >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
  1263. True
  1264. Reconstruction based on reduced SVD, 2D case:
  1265. >>> u, s, vh = np.linalg.svd(a, full_matrices=False)
  1266. >>> u.shape, s.shape, vh.shape
  1267. ((9, 6), (6,), (6, 6))
  1268. >>> np.allclose(a, np.dot(u * s, vh))
  1269. True
  1270. >>> smat = np.diag(s)
  1271. >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
  1272. True
  1273. Reconstruction based on full SVD, 4D case:
  1274. >>> u, s, vh = np.linalg.svd(b, full_matrices=True)
  1275. >>> u.shape, s.shape, vh.shape
  1276. ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
  1277. >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
  1278. True
  1279. >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
  1280. True
  1281. Reconstruction based on reduced SVD, 4D case:
  1282. >>> u, s, vh = np.linalg.svd(b, full_matrices=False)
  1283. >>> u.shape, s.shape, vh.shape
  1284. ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
  1285. >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
  1286. True
  1287. >>> np.allclose(b, np.matmul(u, s[..., None] * vh))
  1288. True
  1289. """
  1290. import numpy as _nx
  1291. a, wrap = _makearray(a)
  1292. if hermitian:
  1293. # note: lapack svd returns eigenvalues with s ** 2 sorted descending,
  1294. # but eig returns s sorted ascending, so we re-order the eigenvalues
  1295. # and related arrays to have the correct order
  1296. if compute_uv:
  1297. s, u = eigh(a)
  1298. sgn = sign(s)
  1299. s = abs(s)
  1300. sidx = argsort(s)[..., ::-1]
  1301. sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
  1302. s = _nx.take_along_axis(s, sidx, axis=-1)
  1303. u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
  1304. # singular values are unsigned, move the sign into v
  1305. vt = transpose(u * sgn[..., None, :]).conjugate()
  1306. return wrap(u), s, wrap(vt)
  1307. else:
  1308. s = eigvalsh(a)
  1309. s = abs(s)
  1310. return sort(s)[..., ::-1]
  1311. _assert_stacked_2d(a)
  1312. t, result_t = _commonType(a)
  1313. extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
  1314. m, n = a.shape[-2:]
  1315. if compute_uv:
  1316. if full_matrices:
  1317. if m < n:
  1318. gufunc = _umath_linalg.svd_m_f
  1319. else:
  1320. gufunc = _umath_linalg.svd_n_f
  1321. else:
  1322. if m < n:
  1323. gufunc = _umath_linalg.svd_m_s
  1324. else:
  1325. gufunc = _umath_linalg.svd_n_s
  1326. signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
  1327. u, s, vh = gufunc(a, signature=signature, extobj=extobj)
  1328. u = u.astype(result_t, copy=False)
  1329. s = s.astype(_realType(result_t), copy=False)
  1330. vh = vh.astype(result_t, copy=False)
  1331. return wrap(u), s, wrap(vh)
  1332. else:
  1333. if m < n:
  1334. gufunc = _umath_linalg.svd_m
  1335. else:
  1336. gufunc = _umath_linalg.svd_n
  1337. signature = 'D->d' if isComplexType(t) else 'd->d'
  1338. s = gufunc(a, signature=signature, extobj=extobj)
  1339. s = s.astype(_realType(result_t), copy=False)
  1340. return s
  1341. def _cond_dispatcher(x, p=None):
  1342. return (x,)
  1343. @array_function_dispatch(_cond_dispatcher)
  1344. def cond(x, p=None):
  1345. """
  1346. Compute the condition number of a matrix.
  1347. This function is capable of returning the condition number using
  1348. one of seven different norms, depending on the value of `p` (see
  1349. Parameters below).
  1350. Parameters
  1351. ----------
  1352. x : (..., M, N) array_like
  1353. The matrix whose condition number is sought.
  1354. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
  1355. Order of the norm used in the condition number computation:
  1356. ===== ============================
  1357. p norm for matrices
  1358. ===== ============================
  1359. None 2-norm, computed directly using the ``SVD``
  1360. 'fro' Frobenius norm
  1361. inf max(sum(abs(x), axis=1))
  1362. -inf min(sum(abs(x), axis=1))
  1363. 1 max(sum(abs(x), axis=0))
  1364. -1 min(sum(abs(x), axis=0))
  1365. 2 2-norm (largest sing. value)
  1366. -2 smallest singular value
  1367. ===== ============================
  1368. inf means the `numpy.inf` object, and the Frobenius norm is
  1369. the root-of-sum-of-squares norm.
  1370. Returns
  1371. -------
  1372. c : {float, inf}
  1373. The condition number of the matrix. May be infinite.
  1374. See Also
  1375. --------
  1376. numpy.linalg.norm
  1377. Notes
  1378. -----
  1379. The condition number of `x` is defined as the norm of `x` times the
  1380. norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
  1381. (root-of-sum-of-squares) or one of a number of other matrix norms.
  1382. References
  1383. ----------
  1384. .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
  1385. Academic Press, Inc., 1980, pg. 285.
  1386. Examples
  1387. --------
  1388. >>> from numpy import linalg as LA
  1389. >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
  1390. >>> a
  1391. array([[ 1, 0, -1],
  1392. [ 0, 1, 0],
  1393. [ 1, 0, 1]])
  1394. >>> LA.cond(a)
  1395. 1.4142135623730951
  1396. >>> LA.cond(a, 'fro')
  1397. 3.1622776601683795
  1398. >>> LA.cond(a, np.inf)
  1399. 2.0
  1400. >>> LA.cond(a, -np.inf)
  1401. 1.0
  1402. >>> LA.cond(a, 1)
  1403. 2.0
  1404. >>> LA.cond(a, -1)
  1405. 1.0
  1406. >>> LA.cond(a, 2)
  1407. 1.4142135623730951
  1408. >>> LA.cond(a, -2)
  1409. 0.70710678118654746 # may vary
  1410. >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
  1411. 0.70710678118654746 # may vary
  1412. """
  1413. x = asarray(x) # in case we have a matrix
  1414. if _is_empty_2d(x):
  1415. raise LinAlgError("cond is not defined on empty arrays")
  1416. if p is None or p == 2 or p == -2:
  1417. s = svd(x, compute_uv=False)
  1418. with errstate(all='ignore'):
  1419. if p == -2:
  1420. r = s[..., -1] / s[..., 0]
  1421. else:
  1422. r = s[..., 0] / s[..., -1]
  1423. else:
  1424. # Call inv(x) ignoring errors. The result array will
  1425. # contain nans in the entries where inversion failed.
  1426. _assert_stacked_2d(x)
  1427. _assert_stacked_square(x)
  1428. t, result_t = _commonType(x)
  1429. signature = 'D->D' if isComplexType(t) else 'd->d'
  1430. with errstate(all='ignore'):
  1431. invx = _umath_linalg.inv(x, signature=signature)
  1432. r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
  1433. r = r.astype(result_t, copy=False)
  1434. # Convert nans to infs unless the original array had nan entries
  1435. r = asarray(r)
  1436. nan_mask = isnan(r)
  1437. if nan_mask.any():
  1438. nan_mask &= ~isnan(x).any(axis=(-2, -1))
  1439. if r.ndim > 0:
  1440. r[nan_mask] = Inf
  1441. elif nan_mask:
  1442. r[()] = Inf
  1443. # Convention is to return scalars instead of 0d arrays
  1444. if r.ndim == 0:
  1445. r = r[()]
  1446. return r
  1447. def _matrix_rank_dispatcher(A, tol=None, hermitian=None):
  1448. return (A,)
  1449. @array_function_dispatch(_matrix_rank_dispatcher)
  1450. def matrix_rank(A, tol=None, hermitian=False):
  1451. """
  1452. Return matrix rank of array using SVD method
  1453. Rank of the array is the number of singular values of the array that are
  1454. greater than `tol`.
  1455. .. versionchanged:: 1.14
  1456. Can now operate on stacks of matrices
  1457. Parameters
  1458. ----------
  1459. A : {(M,), (..., M, N)} array_like
  1460. Input vector or stack of matrices.
  1461. tol : (...) array_like, float, optional
  1462. Threshold below which SVD values are considered zero. If `tol` is
  1463. None, and ``S`` is an array with singular values for `M`, and
  1464. ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
  1465. set to ``S.max() * max(M, N) * eps``.
  1466. .. versionchanged:: 1.14
  1467. Broadcasted against the stack of matrices
  1468. hermitian : bool, optional
  1469. If True, `A` is assumed to be Hermitian (symmetric if real-valued),
  1470. enabling a more efficient method for finding singular values.
  1471. Defaults to False.
  1472. .. versionadded:: 1.14
  1473. Returns
  1474. -------
  1475. rank : (...) array_like
  1476. Rank of A.
  1477. Notes
  1478. -----
  1479. The default threshold to detect rank deficiency is a test on the magnitude
  1480. of the singular values of `A`. By default, we identify singular values less
  1481. than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with
  1482. the symbols defined above). This is the algorithm MATLAB uses [1]. It also
  1483. appears in *Numerical recipes* in the discussion of SVD solutions for linear
  1484. least squares [2].
  1485. This default threshold is designed to detect rank deficiency accounting for
  1486. the numerical errors of the SVD computation. Imagine that there is a column
  1487. in `A` that is an exact (in floating point) linear combination of other
  1488. columns in `A`. Computing the SVD on `A` will not produce a singular value
  1489. exactly equal to 0 in general: any difference of the smallest SVD value from
  1490. 0 will be caused by numerical imprecision in the calculation of the SVD.
  1491. Our threshold for small SVD values takes this numerical imprecision into
  1492. account, and the default threshold will detect such numerical rank
  1493. deficiency. The threshold may declare a matrix `A` rank deficient even if
  1494. the linear combination of some columns of `A` is not exactly equal to
  1495. another column of `A` but only numerically very close to another column of
  1496. `A`.
  1497. We chose our default threshold because it is in wide use. Other thresholds
  1498. are possible. For example, elsewhere in the 2007 edition of *Numerical
  1499. recipes* there is an alternative threshold of ``S.max() *
  1500. np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
  1501. this threshold as being based on "expected roundoff error" (p 71).
  1502. The thresholds above deal with floating point roundoff error in the
  1503. calculation of the SVD. However, you may have more information about the
  1504. sources of error in `A` that would make you consider other tolerance values
  1505. to detect *effective* rank deficiency. The most useful measure of the
  1506. tolerance depends on the operations you intend to use on your matrix. For
  1507. example, if your data come from uncertain measurements with uncertainties
  1508. greater than floating point epsilon, choosing a tolerance near that
  1509. uncertainty may be preferable. The tolerance may be absolute if the
  1510. uncertainties are absolute rather than relative.
  1511. References
  1512. ----------
  1513. .. [1] MATLAB reference documentation, "Rank"
  1514. https://www.mathworks.com/help/techdoc/ref/rank.html
  1515. .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
  1516. "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
  1517. page 795.
  1518. Examples
  1519. --------
  1520. >>> from numpy.linalg import matrix_rank
  1521. >>> matrix_rank(np.eye(4)) # Full rank matrix
  1522. 4
  1523. >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
  1524. >>> matrix_rank(I)
  1525. 3
  1526. >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
  1527. 1
  1528. >>> matrix_rank(np.zeros((4,)))
  1529. 0
  1530. """
  1531. A = asarray(A)
  1532. if A.ndim < 2:
  1533. return int(not all(A==0))
  1534. S = svd(A, compute_uv=False, hermitian=hermitian)
  1535. if tol is None:
  1536. tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps
  1537. else:
  1538. tol = asarray(tol)[..., newaxis]
  1539. return count_nonzero(S > tol, axis=-1)
  1540. # Generalized inverse
  1541. def _pinv_dispatcher(a, rcond=None, hermitian=None):
  1542. return (a,)
  1543. @array_function_dispatch(_pinv_dispatcher)
  1544. def pinv(a, rcond=1e-15, hermitian=False):
  1545. """
  1546. Compute the (Moore-Penrose) pseudo-inverse of a matrix.
  1547. Calculate the generalized inverse of a matrix using its
  1548. singular-value decomposition (SVD) and including all
  1549. *large* singular values.
  1550. .. versionchanged:: 1.14
  1551. Can now operate on stacks of matrices
  1552. Parameters
  1553. ----------
  1554. a : (..., M, N) array_like
  1555. Matrix or stack of matrices to be pseudo-inverted.
  1556. rcond : (...) array_like of float
  1557. Cutoff for small singular values.
  1558. Singular values less than or equal to
  1559. ``rcond * largest_singular_value`` are set to zero.
  1560. Broadcasts against the stack of matrices.
  1561. hermitian : bool, optional
  1562. If True, `a` is assumed to be Hermitian (symmetric if real-valued),
  1563. enabling a more efficient method for finding singular values.
  1564. Defaults to False.
  1565. .. versionadded:: 1.17.0
  1566. Returns
  1567. -------
  1568. B : (..., N, M) ndarray
  1569. The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
  1570. is `B`.
  1571. Raises
  1572. ------
  1573. LinAlgError
  1574. If the SVD computation does not converge.
  1575. See Also
  1576. --------
  1577. scipy.linalg.pinv : Similar function in SciPy.
  1578. scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
  1579. Hermitian matrix.
  1580. Notes
  1581. -----
  1582. The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
  1583. defined as: "the matrix that 'solves' [the least-squares problem]
  1584. :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
  1585. :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
  1586. It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
  1587. value decomposition of A, then
  1588. :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
  1589. orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
  1590. of A's so-called singular values, (followed, typically, by
  1591. zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
  1592. consisting of the reciprocals of A's singular values
  1593. (again, followed by zeros). [1]_
  1594. References
  1595. ----------
  1596. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  1597. FL, Academic Press, Inc., 1980, pp. 139-142.
  1598. Examples
  1599. --------
  1600. The following example checks that ``a * a+ * a == a`` and
  1601. ``a+ * a * a+ == a+``:
  1602. >>> a = np.random.randn(9, 6)
  1603. >>> B = np.linalg.pinv(a)
  1604. >>> np.allclose(a, np.dot(a, np.dot(B, a)))
  1605. True
  1606. >>> np.allclose(B, np.dot(B, np.dot(a, B)))
  1607. True
  1608. """
  1609. a, wrap = _makearray(a)
  1610. rcond = asarray(rcond)
  1611. if _is_empty_2d(a):
  1612. m, n = a.shape[-2:]
  1613. res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
  1614. return wrap(res)
  1615. a = a.conjugate()
  1616. u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
  1617. # discard small singular values
  1618. cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
  1619. large = s > cutoff
  1620. s = divide(1, s, where=large, out=s)
  1621. s[~large] = 0
  1622. res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
  1623. return wrap(res)
  1624. # Determinant
  1625. @array_function_dispatch(_unary_dispatcher)
  1626. def slogdet(a):
  1627. """
  1628. Compute the sign and (natural) logarithm of the determinant of an array.
  1629. If an array has a very small or very large determinant, then a call to
  1630. `det` may overflow or underflow. This routine is more robust against such
  1631. issues, because it computes the logarithm of the determinant rather than
  1632. the determinant itself.
  1633. Parameters
  1634. ----------
  1635. a : (..., M, M) array_like
  1636. Input array, has to be a square 2-D array.
  1637. Returns
  1638. -------
  1639. sign : (...) array_like
  1640. A number representing the sign of the determinant. For a real matrix,
  1641. this is 1, 0, or -1. For a complex matrix, this is a complex number
  1642. with absolute value 1 (i.e., it is on the unit circle), or else 0.
  1643. logdet : (...) array_like
  1644. The natural log of the absolute value of the determinant.
  1645. If the determinant is zero, then `sign` will be 0 and `logdet` will be
  1646. -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
  1647. See Also
  1648. --------
  1649. det
  1650. Notes
  1651. -----
  1652. .. versionadded:: 1.8.0
  1653. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1654. details.
  1655. .. versionadded:: 1.6.0
  1656. The determinant is computed via LU factorization using the LAPACK
  1657. routine ``z/dgetrf``.
  1658. Examples
  1659. --------
  1660. The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
  1661. >>> a = np.array([[1, 2], [3, 4]])
  1662. >>> (sign, logdet) = np.linalg.slogdet(a)
  1663. >>> (sign, logdet)
  1664. (-1, 0.69314718055994529) # may vary
  1665. >>> sign * np.exp(logdet)
  1666. -2.0
  1667. Computing log-determinants for a stack of matrices:
  1668. >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
  1669. >>> a.shape
  1670. (3, 2, 2)
  1671. >>> sign, logdet = np.linalg.slogdet(a)
  1672. >>> (sign, logdet)
  1673. (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
  1674. >>> sign * np.exp(logdet)
  1675. array([-2., -3., -8.])
  1676. This routine succeeds where ordinary `det` does not:
  1677. >>> np.linalg.det(np.eye(500) * 0.1)
  1678. 0.0
  1679. >>> np.linalg.slogdet(np.eye(500) * 0.1)
  1680. (1, -1151.2925464970228)
  1681. """
  1682. a = asarray(a)
  1683. _assert_stacked_2d(a)
  1684. _assert_stacked_square(a)
  1685. t, result_t = _commonType(a)
  1686. real_t = _realType(result_t)
  1687. signature = 'D->Dd' if isComplexType(t) else 'd->dd'
  1688. sign, logdet = _umath_linalg.slogdet(a, signature=signature)
  1689. sign = sign.astype(result_t, copy=False)
  1690. logdet = logdet.astype(real_t, copy=False)
  1691. return sign, logdet
  1692. @array_function_dispatch(_unary_dispatcher)
  1693. def det(a):
  1694. """
  1695. Compute the determinant of an array.
  1696. Parameters
  1697. ----------
  1698. a : (..., M, M) array_like
  1699. Input array to compute determinants for.
  1700. Returns
  1701. -------
  1702. det : (...) array_like
  1703. Determinant of `a`.
  1704. See Also
  1705. --------
  1706. slogdet : Another way to represent the determinant, more suitable
  1707. for large matrices where underflow/overflow may occur.
  1708. scipy.linalg.det : Similar function in SciPy.
  1709. Notes
  1710. -----
  1711. .. versionadded:: 1.8.0
  1712. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1713. details.
  1714. The determinant is computed via LU factorization using the LAPACK
  1715. routine ``z/dgetrf``.
  1716. Examples
  1717. --------
  1718. The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
  1719. >>> a = np.array([[1, 2], [3, 4]])
  1720. >>> np.linalg.det(a)
  1721. -2.0 # may vary
  1722. Computing determinants for a stack of matrices:
  1723. >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
  1724. >>> a.shape
  1725. (3, 2, 2)
  1726. >>> np.linalg.det(a)
  1727. array([-2., -3., -8.])
  1728. """
  1729. a = asarray(a)
  1730. _assert_stacked_2d(a)
  1731. _assert_stacked_square(a)
  1732. t, result_t = _commonType(a)
  1733. signature = 'D->D' if isComplexType(t) else 'd->d'
  1734. r = _umath_linalg.det(a, signature=signature)
  1735. r = r.astype(result_t, copy=False)
  1736. return r
  1737. # Linear Least Squares
  1738. def _lstsq_dispatcher(a, b, rcond=None):
  1739. return (a, b)
  1740. @array_function_dispatch(_lstsq_dispatcher)
  1741. def lstsq(a, b, rcond="warn"):
  1742. r"""
  1743. Return the least-squares solution to a linear matrix equation.
  1744. Computes the vector `x` that approximately solves the equation
  1745. ``a @ x = b``. The equation may be under-, well-, or over-determined
  1746. (i.e., the number of linearly independent rows of `a` can be less than,
  1747. equal to, or greater than its number of linearly independent columns).
  1748. If `a` is square and of full rank, then `x` (but for round-off error)
  1749. is the "exact" solution of the equation. Else, `x` minimizes the
  1750. Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing
  1751. solutions, the one with the smallest 2-norm :math:`||x||` is returned.
  1752. Parameters
  1753. ----------
  1754. a : (M, N) array_like
  1755. "Coefficient" matrix.
  1756. b : {(M,), (M, K)} array_like
  1757. Ordinate or "dependent variable" values. If `b` is two-dimensional,
  1758. the least-squares solution is calculated for each of the `K` columns
  1759. of `b`.
  1760. rcond : float, optional
  1761. Cut-off ratio for small singular values of `a`.
  1762. For the purposes of rank determination, singular values are treated
  1763. as zero if they are smaller than `rcond` times the largest singular
  1764. value of `a`.
  1765. .. versionchanged:: 1.14.0
  1766. If not set, a FutureWarning is given. The previous default
  1767. of ``-1`` will use the machine precision as `rcond` parameter,
  1768. the new default will use the machine precision times `max(M, N)`.
  1769. To silence the warning and use the new default, use ``rcond=None``,
  1770. to keep using the old behavior, use ``rcond=-1``.
  1771. Returns
  1772. -------
  1773. x : {(N,), (N, K)} ndarray
  1774. Least-squares solution. If `b` is two-dimensional,
  1775. the solutions are in the `K` columns of `x`.
  1776. residuals : {(1,), (K,), (0,)} ndarray
  1777. Sums of squared residuals: Squared Euclidean 2-norm for each column in
  1778. ``b - a @ x``.
  1779. If the rank of `a` is < N or M <= N, this is an empty array.
  1780. If `b` is 1-dimensional, this is a (1,) shape array.
  1781. Otherwise the shape is (K,).
  1782. rank : int
  1783. Rank of matrix `a`.
  1784. s : (min(M, N),) ndarray
  1785. Singular values of `a`.
  1786. Raises
  1787. ------
  1788. LinAlgError
  1789. If computation does not converge.
  1790. See Also
  1791. --------
  1792. scipy.linalg.lstsq : Similar function in SciPy.
  1793. Notes
  1794. -----
  1795. If `b` is a matrix, then all array results are returned as matrices.
  1796. Examples
  1797. --------
  1798. Fit a line, ``y = mx + c``, through some noisy data-points:
  1799. >>> x = np.array([0, 1, 2, 3])
  1800. >>> y = np.array([-1, 0.2, 0.9, 2.1])
  1801. By examining the coefficients, we see that the line should have a
  1802. gradient of roughly 1 and cut the y-axis at, more or less, -1.
  1803. We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
  1804. and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
  1805. >>> A = np.vstack([x, np.ones(len(x))]).T
  1806. >>> A
  1807. array([[ 0., 1.],
  1808. [ 1., 1.],
  1809. [ 2., 1.],
  1810. [ 3., 1.]])
  1811. >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
  1812. >>> m, c
  1813. (1.0 -0.95) # may vary
  1814. Plot the data along with the fitted line:
  1815. >>> import matplotlib.pyplot as plt
  1816. >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
  1817. >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
  1818. >>> _ = plt.legend()
  1819. >>> plt.show()
  1820. """
  1821. a, _ = _makearray(a)
  1822. b, wrap = _makearray(b)
  1823. is_1d = b.ndim == 1
  1824. if is_1d:
  1825. b = b[:, newaxis]
  1826. _assert_2d(a, b)
  1827. m, n = a.shape[-2:]
  1828. m2, n_rhs = b.shape[-2:]
  1829. if m != m2:
  1830. raise LinAlgError('Incompatible dimensions')
  1831. t, result_t = _commonType(a, b)
  1832. result_real_t = _realType(result_t)
  1833. # Determine default rcond value
  1834. if rcond == "warn":
  1835. # 2017-08-19, 1.14.0
  1836. warnings.warn("`rcond` parameter will change to the default of "
  1837. "machine precision times ``max(M, N)`` where M and N "
  1838. "are the input matrix dimensions.\n"
  1839. "To use the future default and silence this warning "
  1840. "we advise to pass `rcond=None`, to keep using the old, "
  1841. "explicitly pass `rcond=-1`.",
  1842. FutureWarning, stacklevel=3)
  1843. rcond = -1
  1844. if rcond is None:
  1845. rcond = finfo(t).eps * max(n, m)
  1846. if m <= n:
  1847. gufunc = _umath_linalg.lstsq_m
  1848. else:
  1849. gufunc = _umath_linalg.lstsq_n
  1850. signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
  1851. extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
  1852. if n_rhs == 0:
  1853. # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
  1854. b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
  1855. x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
  1856. if m == 0:
  1857. x[...] = 0
  1858. if n_rhs == 0:
  1859. # remove the item we added
  1860. x = x[..., :n_rhs]
  1861. resids = resids[..., :n_rhs]
  1862. # remove the axis we added
  1863. if is_1d:
  1864. x = x.squeeze(axis=-1)
  1865. # we probably should squeeze resids too, but we can't
  1866. # without breaking compatibility.
  1867. # as documented
  1868. if rank != n or m <= n:
  1869. resids = array([], result_real_t)
  1870. # coerce output arrays
  1871. s = s.astype(result_real_t, copy=False)
  1872. resids = resids.astype(result_real_t, copy=False)
  1873. x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
  1874. return wrap(x), wrap(resids), rank, s
  1875. def _multi_svd_norm(x, row_axis, col_axis, op):
  1876. """Compute a function of the singular values of the 2-D matrices in `x`.
  1877. This is a private utility function used by `numpy.linalg.norm()`.
  1878. Parameters
  1879. ----------
  1880. x : ndarray
  1881. row_axis, col_axis : int
  1882. The axes of `x` that hold the 2-D matrices.
  1883. op : callable
  1884. This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
  1885. Returns
  1886. -------
  1887. result : float or ndarray
  1888. If `x` is 2-D, the return values is a float.
  1889. Otherwise, it is an array with ``x.ndim - 2`` dimensions.
  1890. The return values are either the minimum or maximum or sum of the
  1891. singular values of the matrices, depending on whether `op`
  1892. is `numpy.amin` or `numpy.amax` or `numpy.sum`.
  1893. """
  1894. y = moveaxis(x, (row_axis, col_axis), (-2, -1))
  1895. result = op(svd(y, compute_uv=False), axis=-1)
  1896. return result
  1897. def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
  1898. return (x,)
  1899. @array_function_dispatch(_norm_dispatcher)
  1900. def norm(x, ord=None, axis=None, keepdims=False):
  1901. """
  1902. Matrix or vector norm.
  1903. This function is able to return one of eight different matrix norms,
  1904. or one of an infinite number of vector norms (described below), depending
  1905. on the value of the ``ord`` parameter.
  1906. Parameters
  1907. ----------
  1908. x : array_like
  1909. Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
  1910. is None. If both `axis` and `ord` are None, the 2-norm of
  1911. ``x.ravel`` will be returned.
  1912. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
  1913. Order of the norm (see table under ``Notes``). inf means numpy's
  1914. `inf` object. The default is None.
  1915. axis : {None, int, 2-tuple of ints}, optional.
  1916. If `axis` is an integer, it specifies the axis of `x` along which to
  1917. compute the vector norms. If `axis` is a 2-tuple, it specifies the
  1918. axes that hold 2-D matrices, and the matrix norms of these matrices
  1919. are computed. If `axis` is None then either a vector norm (when `x`
  1920. is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
  1921. is None.
  1922. .. versionadded:: 1.8.0
  1923. keepdims : bool, optional
  1924. If this is set to True, the axes which are normed over are left in the
  1925. result as dimensions with size one. With this option the result will
  1926. broadcast correctly against the original `x`.
  1927. .. versionadded:: 1.10.0
  1928. Returns
  1929. -------
  1930. n : float or ndarray
  1931. Norm of the matrix or vector(s).
  1932. See Also
  1933. --------
  1934. scipy.linalg.norm : Similar function in SciPy.
  1935. Notes
  1936. -----
  1937. For values of ``ord < 1``, the result is, strictly speaking, not a
  1938. mathematical 'norm', but it may still be useful for various numerical
  1939. purposes.
  1940. The following norms can be calculated:
  1941. ===== ============================ ==========================
  1942. ord norm for matrices norm for vectors
  1943. ===== ============================ ==========================
  1944. None Frobenius norm 2-norm
  1945. 'fro' Frobenius norm --
  1946. 'nuc' nuclear norm --
  1947. inf max(sum(abs(x), axis=1)) max(abs(x))
  1948. -inf min(sum(abs(x), axis=1)) min(abs(x))
  1949. 0 -- sum(x != 0)
  1950. 1 max(sum(abs(x), axis=0)) as below
  1951. -1 min(sum(abs(x), axis=0)) as below
  1952. 2 2-norm (largest sing. value) as below
  1953. -2 smallest singular value as below
  1954. other -- sum(abs(x)**ord)**(1./ord)
  1955. ===== ============================ ==========================
  1956. The Frobenius norm is given by [1]_:
  1957. :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
  1958. The nuclear norm is the sum of the singular values.
  1959. Both the Frobenius and nuclear norm orders are only defined for
  1960. matrices and raise a ValueError when ``x.ndim != 2``.
  1961. References
  1962. ----------
  1963. .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
  1964. Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
  1965. Examples
  1966. --------
  1967. >>> from numpy import linalg as LA
  1968. >>> a = np.arange(9) - 4
  1969. >>> a
  1970. array([-4, -3, -2, ..., 2, 3, 4])
  1971. >>> b = a.reshape((3, 3))
  1972. >>> b
  1973. array([[-4, -3, -2],
  1974. [-1, 0, 1],
  1975. [ 2, 3, 4]])
  1976. >>> LA.norm(a)
  1977. 7.745966692414834
  1978. >>> LA.norm(b)
  1979. 7.745966692414834
  1980. >>> LA.norm(b, 'fro')
  1981. 7.745966692414834
  1982. >>> LA.norm(a, np.inf)
  1983. 4.0
  1984. >>> LA.norm(b, np.inf)
  1985. 9.0
  1986. >>> LA.norm(a, -np.inf)
  1987. 0.0
  1988. >>> LA.norm(b, -np.inf)
  1989. 2.0
  1990. >>> LA.norm(a, 1)
  1991. 20.0
  1992. >>> LA.norm(b, 1)
  1993. 7.0
  1994. >>> LA.norm(a, -1)
  1995. -4.6566128774142013e-010
  1996. >>> LA.norm(b, -1)
  1997. 6.0
  1998. >>> LA.norm(a, 2)
  1999. 7.745966692414834
  2000. >>> LA.norm(b, 2)
  2001. 7.3484692283495345
  2002. >>> LA.norm(a, -2)
  2003. 0.0
  2004. >>> LA.norm(b, -2)
  2005. 1.8570331885190563e-016 # may vary
  2006. >>> LA.norm(a, 3)
  2007. 5.8480354764257312 # may vary
  2008. >>> LA.norm(a, -3)
  2009. 0.0
  2010. Using the `axis` argument to compute vector norms:
  2011. >>> c = np.array([[ 1, 2, 3],
  2012. ... [-1, 1, 4]])
  2013. >>> LA.norm(c, axis=0)
  2014. array([ 1.41421356, 2.23606798, 5. ])
  2015. >>> LA.norm(c, axis=1)
  2016. array([ 3.74165739, 4.24264069])
  2017. >>> LA.norm(c, ord=1, axis=1)
  2018. array([ 6., 6.])
  2019. Using the `axis` argument to compute matrix norms:
  2020. >>> m = np.arange(8).reshape(2,2,2)
  2021. >>> LA.norm(m, axis=(1,2))
  2022. array([ 3.74165739, 11.22497216])
  2023. >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
  2024. (3.7416573867739413, 11.224972160321824)
  2025. """
  2026. x = asarray(x)
  2027. if not issubclass(x.dtype.type, (inexact, object_)):
  2028. x = x.astype(float)
  2029. # Immediately handle some default, simple, fast, and common cases.
  2030. if axis is None:
  2031. ndim = x.ndim
  2032. if ((ord is None) or
  2033. (ord in ('f', 'fro') and ndim == 2) or
  2034. (ord == 2 and ndim == 1)):
  2035. x = x.ravel(order='K')
  2036. if isComplexType(x.dtype.type):
  2037. x_real = x.real
  2038. x_imag = x.imag
  2039. sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag)
  2040. else:
  2041. sqnorm = x.dot(x)
  2042. ret = sqrt(sqnorm)
  2043. if keepdims:
  2044. ret = ret.reshape(ndim*[1])
  2045. return ret
  2046. # Normalize the `axis` argument to a tuple.
  2047. nd = x.ndim
  2048. if axis is None:
  2049. axis = tuple(range(nd))
  2050. elif not isinstance(axis, tuple):
  2051. try:
  2052. axis = int(axis)
  2053. except Exception as e:
  2054. raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
  2055. axis = (axis,)
  2056. if len(axis) == 1:
  2057. if ord == Inf:
  2058. return abs(x).max(axis=axis, keepdims=keepdims)
  2059. elif ord == -Inf:
  2060. return abs(x).min(axis=axis, keepdims=keepdims)
  2061. elif ord == 0:
  2062. # Zero norm
  2063. return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
  2064. elif ord == 1:
  2065. # special case for speedup
  2066. return add.reduce(abs(x), axis=axis, keepdims=keepdims)
  2067. elif ord is None or ord == 2:
  2068. # special case for speedup
  2069. s = (x.conj() * x).real
  2070. return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
  2071. # None of the str-type keywords for ord ('fro', 'nuc')
  2072. # are valid for vectors
  2073. elif isinstance(ord, str):
  2074. raise ValueError(f"Invalid norm order '{ord}' for vectors")
  2075. else:
  2076. absx = abs(x)
  2077. absx **= ord
  2078. ret = add.reduce(absx, axis=axis, keepdims=keepdims)
  2079. ret **= reciprocal(ord, dtype=ret.dtype)
  2080. return ret
  2081. elif len(axis) == 2:
  2082. row_axis, col_axis = axis
  2083. row_axis = normalize_axis_index(row_axis, nd)
  2084. col_axis = normalize_axis_index(col_axis, nd)
  2085. if row_axis == col_axis:
  2086. raise ValueError('Duplicate axes given.')
  2087. if ord == 2:
  2088. ret = _multi_svd_norm(x, row_axis, col_axis, amax)
  2089. elif ord == -2:
  2090. ret = _multi_svd_norm(x, row_axis, col_axis, amin)
  2091. elif ord == 1:
  2092. if col_axis > row_axis:
  2093. col_axis -= 1
  2094. ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
  2095. elif ord == Inf:
  2096. if row_axis > col_axis:
  2097. row_axis -= 1
  2098. ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
  2099. elif ord == -1:
  2100. if col_axis > row_axis:
  2101. col_axis -= 1
  2102. ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
  2103. elif ord == -Inf:
  2104. if row_axis > col_axis:
  2105. row_axis -= 1
  2106. ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
  2107. elif ord in [None, 'fro', 'f']:
  2108. ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
  2109. elif ord == 'nuc':
  2110. ret = _multi_svd_norm(x, row_axis, col_axis, sum)
  2111. else:
  2112. raise ValueError("Invalid norm order for matrices.")
  2113. if keepdims:
  2114. ret_shape = list(x.shape)
  2115. ret_shape[axis[0]] = 1
  2116. ret_shape[axis[1]] = 1
  2117. ret = ret.reshape(ret_shape)
  2118. return ret
  2119. else:
  2120. raise ValueError("Improper number of dimensions to norm.")
  2121. # multi_dot
  2122. def _multidot_dispatcher(arrays, *, out=None):
  2123. yield from arrays
  2124. yield out
  2125. @array_function_dispatch(_multidot_dispatcher)
  2126. def multi_dot(arrays, *, out=None):
  2127. """
  2128. Compute the dot product of two or more arrays in a single function call,
  2129. while automatically selecting the fastest evaluation order.
  2130. `multi_dot` chains `numpy.dot` and uses optimal parenthesization
  2131. of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
  2132. this can speed up the multiplication a lot.
  2133. If the first argument is 1-D it is treated as a row vector.
  2134. If the last argument is 1-D it is treated as a column vector.
  2135. The other arguments must be 2-D.
  2136. Think of `multi_dot` as::
  2137. def multi_dot(arrays): return functools.reduce(np.dot, arrays)
  2138. Parameters
  2139. ----------
  2140. arrays : sequence of array_like
  2141. If the first argument is 1-D it is treated as row vector.
  2142. If the last argument is 1-D it is treated as column vector.
  2143. The other arguments must be 2-D.
  2144. out : ndarray, optional
  2145. Output argument. This must have the exact kind that would be returned
  2146. if it was not used. In particular, it must have the right type, must be
  2147. C-contiguous, and its dtype must be the dtype that would be returned
  2148. for `dot(a, b)`. This is a performance feature. Therefore, if these
  2149. conditions are not met, an exception is raised, instead of attempting
  2150. to be flexible.
  2151. .. versionadded:: 1.19.0
  2152. Returns
  2153. -------
  2154. output : ndarray
  2155. Returns the dot product of the supplied arrays.
  2156. See Also
  2157. --------
  2158. numpy.dot : dot multiplication with two arguments.
  2159. References
  2160. ----------
  2161. .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
  2162. .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
  2163. Examples
  2164. --------
  2165. `multi_dot` allows you to write::
  2166. >>> from numpy.linalg import multi_dot
  2167. >>> # Prepare some data
  2168. >>> A = np.random.random((10000, 100))
  2169. >>> B = np.random.random((100, 1000))
  2170. >>> C = np.random.random((1000, 5))
  2171. >>> D = np.random.random((5, 333))
  2172. >>> # the actual dot multiplication
  2173. >>> _ = multi_dot([A, B, C, D])
  2174. instead of::
  2175. >>> _ = np.dot(np.dot(np.dot(A, B), C), D)
  2176. >>> # or
  2177. >>> _ = A.dot(B).dot(C).dot(D)
  2178. Notes
  2179. -----
  2180. The cost for a matrix multiplication can be calculated with the
  2181. following function::
  2182. def cost(A, B):
  2183. return A.shape[0] * A.shape[1] * B.shape[1]
  2184. Assume we have three matrices
  2185. :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
  2186. The costs for the two different parenthesizations are as follows::
  2187. cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
  2188. cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
  2189. """
  2190. n = len(arrays)
  2191. # optimization only makes sense for len(arrays) > 2
  2192. if n < 2:
  2193. raise ValueError("Expecting at least two arrays.")
  2194. elif n == 2:
  2195. return dot(arrays[0], arrays[1], out=out)
  2196. arrays = [asanyarray(a) for a in arrays]
  2197. # save original ndim to reshape the result array into the proper form later
  2198. ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
  2199. # Explicitly convert vectors to 2D arrays to keep the logic of the internal
  2200. # _multi_dot_* functions as simple as possible.
  2201. if arrays[0].ndim == 1:
  2202. arrays[0] = atleast_2d(arrays[0])
  2203. if arrays[-1].ndim == 1:
  2204. arrays[-1] = atleast_2d(arrays[-1]).T
  2205. _assert_2d(*arrays)
  2206. # _multi_dot_three is much faster than _multi_dot_matrix_chain_order
  2207. if n == 3:
  2208. result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
  2209. else:
  2210. order = _multi_dot_matrix_chain_order(arrays)
  2211. result = _multi_dot(arrays, order, 0, n - 1, out=out)
  2212. # return proper shape
  2213. if ndim_first == 1 and ndim_last == 1:
  2214. return result[0, 0] # scalar
  2215. elif ndim_first == 1 or ndim_last == 1:
  2216. return result.ravel() # 1-D
  2217. else:
  2218. return result
  2219. def _multi_dot_three(A, B, C, out=None):
  2220. """
  2221. Find the best order for three arrays and do the multiplication.
  2222. For three arguments `_multi_dot_three` is approximately 15 times faster
  2223. than `_multi_dot_matrix_chain_order`
  2224. """
  2225. a0, a1b0 = A.shape
  2226. b1c0, c1 = C.shape
  2227. # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
  2228. cost1 = a0 * b1c0 * (a1b0 + c1)
  2229. # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
  2230. cost2 = a1b0 * c1 * (a0 + b1c0)
  2231. if cost1 < cost2:
  2232. return dot(dot(A, B), C, out=out)
  2233. else:
  2234. return dot(A, dot(B, C), out=out)
  2235. def _multi_dot_matrix_chain_order(arrays, return_costs=False):
  2236. """
  2237. Return a np.array that encodes the optimal order of mutiplications.
  2238. The optimal order array is then used by `_multi_dot()` to do the
  2239. multiplication.
  2240. Also return the cost matrix if `return_costs` is `True`
  2241. The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
  2242. Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
  2243. cost[i, j] = min([
  2244. cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
  2245. for k in range(i, j)])
  2246. """
  2247. n = len(arrays)
  2248. # p stores the dimensions of the matrices
  2249. # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
  2250. p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
  2251. # m is a matrix of costs of the subproblems
  2252. # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
  2253. m = zeros((n, n), dtype=double)
  2254. # s is the actual ordering
  2255. # s[i, j] is the value of k at which we split the product A_i..A_j
  2256. s = empty((n, n), dtype=intp)
  2257. for l in range(1, n):
  2258. for i in range(n - l):
  2259. j = i + l
  2260. m[i, j] = Inf
  2261. for k in range(i, j):
  2262. q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
  2263. if q < m[i, j]:
  2264. m[i, j] = q
  2265. s[i, j] = k # Note that Cormen uses 1-based index
  2266. return (s, m) if return_costs else s
  2267. def _multi_dot(arrays, order, i, j, out=None):
  2268. """Actually do the multiplication with the given order."""
  2269. if i == j:
  2270. # the initial call with non-None out should never get here
  2271. assert out is None
  2272. return arrays[i]
  2273. else:
  2274. return dot(_multi_dot(arrays, order, i, order[i, j]),
  2275. _multi_dot(arrays, order, order[i, j] + 1, j),
  2276. out=out)