_matfuncs.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. #
  2. # Author: Travis Oliphant, March 2002
  3. #
  4. from itertools import product
  5. import numpy as np
  6. from numpy import (Inf, dot, diag, prod, logical_not, ravel, transpose,
  7. conjugate, absolute, amax, sign, isfinite)
  8. from numpy.lib.scimath import sqrt as csqrt
  9. # Local imports
  10. from scipy.linalg import LinAlgError, bandwidth
  11. from ._misc import norm
  12. from ._basic import solve, inv
  13. from ._special_matrices import triu
  14. from ._decomp_svd import svd
  15. from ._decomp_schur import schur, rsf2csf
  16. from ._expm_frechet import expm_frechet, expm_cond
  17. from ._matfuncs_sqrtm import sqrtm
  18. from ._matfuncs_expm import pick_pade_structure, pade_UV_calc
  19. __all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm',
  20. 'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet',
  21. 'expm_cond', 'khatri_rao']
  22. eps = np.finfo('d').eps
  23. feps = np.finfo('f').eps
  24. _array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
  25. ###############################################################################
  26. # Utility functions.
  27. def _asarray_square(A):
  28. """
  29. Wraps asarray with the extra requirement that the input be a square matrix.
  30. The motivation is that the matfuncs module has real functions that have
  31. been lifted to square matrix functions.
  32. Parameters
  33. ----------
  34. A : array_like
  35. A square matrix.
  36. Returns
  37. -------
  38. out : ndarray
  39. An ndarray copy or view or other representation of A.
  40. """
  41. A = np.asarray(A)
  42. if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
  43. raise ValueError('expected square array_like input')
  44. return A
  45. def _maybe_real(A, B, tol=None):
  46. """
  47. Return either B or the real part of B, depending on properties of A and B.
  48. The motivation is that B has been computed as a complicated function of A,
  49. and B may be perturbed by negligible imaginary components.
  50. If A is real and B is complex with small imaginary components,
  51. then return a real copy of B. The assumption in that case would be that
  52. the imaginary components of B are numerical artifacts.
  53. Parameters
  54. ----------
  55. A : ndarray
  56. Input array whose type is to be checked as real vs. complex.
  57. B : ndarray
  58. Array to be returned, possibly without its imaginary part.
  59. tol : float
  60. Absolute tolerance.
  61. Returns
  62. -------
  63. out : real or complex array
  64. Either the input array B or only the real part of the input array B.
  65. """
  66. # Note that booleans and integers compare as real.
  67. if np.isrealobj(A) and np.iscomplexobj(B):
  68. if tol is None:
  69. tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
  70. if np.allclose(B.imag, 0.0, atol=tol):
  71. B = B.real
  72. return B
  73. ###############################################################################
  74. # Matrix functions.
  75. def fractional_matrix_power(A, t):
  76. """
  77. Compute the fractional power of a matrix.
  78. Proceeds according to the discussion in section (6) of [1]_.
  79. Parameters
  80. ----------
  81. A : (N, N) array_like
  82. Matrix whose fractional power to evaluate.
  83. t : float
  84. Fractional power.
  85. Returns
  86. -------
  87. X : (N, N) array_like
  88. The fractional power of the matrix.
  89. References
  90. ----------
  91. .. [1] Nicholas J. Higham and Lijing lin (2011)
  92. "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
  93. SIAM Journal on Matrix Analysis and Applications,
  94. 32 (3). pp. 1056-1078. ISSN 0895-4798
  95. Examples
  96. --------
  97. >>> import numpy as np
  98. >>> from scipy.linalg import fractional_matrix_power
  99. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  100. >>> b = fractional_matrix_power(a, 0.5)
  101. >>> b
  102. array([[ 0.75592895, 1.13389342],
  103. [ 0.37796447, 1.88982237]])
  104. >>> np.dot(b, b) # Verify square root
  105. array([[ 1., 3.],
  106. [ 1., 4.]])
  107. """
  108. # This fixes some issue with imports;
  109. # this function calls onenormest which is in scipy.sparse.
  110. A = _asarray_square(A)
  111. import scipy.linalg._matfuncs_inv_ssq
  112. return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
  113. def logm(A, disp=True):
  114. """
  115. Compute matrix logarithm.
  116. The matrix logarithm is the inverse of
  117. expm: expm(logm(`A`)) == `A`
  118. Parameters
  119. ----------
  120. A : (N, N) array_like
  121. Matrix whose logarithm to evaluate
  122. disp : bool, optional
  123. Print warning if error in the result is estimated large
  124. instead of returning estimated error. (Default: True)
  125. Returns
  126. -------
  127. logm : (N, N) ndarray
  128. Matrix logarithm of `A`
  129. errest : float
  130. (if disp == False)
  131. 1-norm of the estimated error, ||err||_1 / ||A||_1
  132. References
  133. ----------
  134. .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
  135. "Improved Inverse Scaling and Squaring Algorithms
  136. for the Matrix Logarithm."
  137. SIAM Journal on Scientific Computing, 34 (4). C152-C169.
  138. ISSN 1095-7197
  139. .. [2] Nicholas J. Higham (2008)
  140. "Functions of Matrices: Theory and Computation"
  141. ISBN 978-0-898716-46-7
  142. .. [3] Nicholas J. Higham and Lijing lin (2011)
  143. "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
  144. SIAM Journal on Matrix Analysis and Applications,
  145. 32 (3). pp. 1056-1078. ISSN 0895-4798
  146. Examples
  147. --------
  148. >>> import numpy as np
  149. >>> from scipy.linalg import logm, expm
  150. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  151. >>> b = logm(a)
  152. >>> b
  153. array([[-1.02571087, 2.05142174],
  154. [ 0.68380725, 1.02571087]])
  155. >>> expm(b) # Verify expm(logm(a)) returns a
  156. array([[ 1., 3.],
  157. [ 1., 4.]])
  158. """
  159. A = _asarray_square(A)
  160. # Avoid circular import ... this is OK, right?
  161. import scipy.linalg._matfuncs_inv_ssq
  162. F = scipy.linalg._matfuncs_inv_ssq._logm(A)
  163. F = _maybe_real(A, F)
  164. errtol = 1000*eps
  165. #TODO use a better error approximation
  166. errest = norm(expm(F)-A,1) / norm(A,1)
  167. if disp:
  168. if not isfinite(errest) or errest >= errtol:
  169. print("logm result may be inaccurate, approximate err =", errest)
  170. return F
  171. else:
  172. return F, errest
  173. def expm(A):
  174. """Compute the matrix exponential of an array.
  175. Parameters
  176. ----------
  177. A : ndarray
  178. Input with last two dimensions are square ``(..., n, n)``.
  179. Returns
  180. -------
  181. eA : ndarray
  182. The resulting matrix exponential with the same shape of ``A``
  183. Notes
  184. -----
  185. Implements the algorithm given in [1], which is essentially a Pade
  186. approximation with a variable order that is decided based on the array
  187. data.
  188. For input with size ``n``, the memory usage is in the worst case in the
  189. order of ``8*(n**2)``. If the input data is not of single and double
  190. precision of real and complex dtypes, it is copied to a new array.
  191. For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with
  192. 1-norm estimation and from that point on the estimation scheme given in
  193. [2] is used to decide on the approximation order.
  194. References
  195. ----------
  196. .. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling
  197. and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix
  198. Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X`
  199. .. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm
  200. for Matrix 1-Norm Estimation, with an Application to 1-Norm
  201. Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201,
  202. :doi:`10.1137/S0895479899356080`
  203. Examples
  204. --------
  205. >>> import numpy as np
  206. >>> from scipy.linalg import expm, sinm, cosm
  207. Matrix version of the formula exp(0) = 1:
  208. >>> expm(np.zeros((3, 2, 2)))
  209. array([[[1., 0.],
  210. [0., 1.]],
  211. <BLANKLINE>
  212. [[1., 0.],
  213. [0., 1.]],
  214. <BLANKLINE>
  215. [[1., 0.],
  216. [0., 1.]]])
  217. Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
  218. applied to a matrix:
  219. >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
  220. >>> expm(1j*a)
  221. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  222. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  223. >>> cosm(a) + 1j*sinm(a)
  224. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  225. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  226. """
  227. a = np.asarray(A)
  228. if a.size == 1 and a.ndim < 2:
  229. return np.array([[np.exp(a.item())]])
  230. if a.ndim < 2:
  231. raise LinAlgError('The input array must be at least two-dimensional')
  232. if a.shape[-1] != a.shape[-2]:
  233. raise LinAlgError('Last 2 dimensions of the array must be square')
  234. n = a.shape[-1]
  235. # Empty array
  236. if min(*a.shape) == 0:
  237. return np.empty_like(a)
  238. # Scalar case
  239. if a.shape[-2:] == (1, 1):
  240. return np.exp(a)
  241. if not np.issubdtype(a.dtype, np.inexact):
  242. a = a.astype(float)
  243. elif a.dtype == np.float16:
  244. a = a.astype(np.float32)
  245. # Explicit formula for 2x2 case, formula (2.2) in [1]
  246. # without Kahan's method numerical instabilities can occur.
  247. if a.shape[-2:] == (2, 2):
  248. a1, a2, a3, a4 = (a[..., [0], [0]],
  249. a[..., [0], [1]],
  250. a[..., [1], [0]],
  251. a[..., [1], [1]])
  252. mu = csqrt((a1-a4)**2 + 4*a2*a3)/2. # csqrt slow but handles neg.vals
  253. eApD2 = np.exp((a1+a4)/2.)
  254. AmD2 = (a1 - a4)/2.
  255. coshMu = np.cosh(mu)
  256. sinchMu = np.ones_like(coshMu)
  257. mask = mu != 0
  258. sinchMu[mask] = np.sinh(mu[mask]) / mu[mask]
  259. eA = np.empty((a.shape), dtype=mu.dtype)
  260. eA[..., [0], [0]] = eApD2 * (coshMu + AmD2*sinchMu)
  261. eA[..., [0], [1]] = eApD2 * a2 * sinchMu
  262. eA[..., [1], [0]] = eApD2 * a3 * sinchMu
  263. eA[..., [1], [1]] = eApD2 * (coshMu - AmD2*sinchMu)
  264. if np.isrealobj(a):
  265. return eA.real
  266. return eA
  267. # larger problem with unspecified stacked dimensions.
  268. n = a.shape[-1]
  269. eA = np.empty(a.shape, dtype=a.dtype)
  270. # working memory to hold intermediate arrays
  271. Am = np.empty((5, n, n), dtype=a.dtype)
  272. # Main loop to go through the slices of an ndarray and passing to expm
  273. for ind in product(*[range(x) for x in a.shape[:-2]]):
  274. aw = a[ind]
  275. lu = bandwidth(aw)
  276. if not any(lu): # a is diagonal?
  277. eA[ind] = np.diag(np.exp(np.diag(aw)))
  278. continue
  279. # Generic/triangular case; copy the slice into scratch and send.
  280. # Am will be mutated by pick_pade_structure
  281. Am[0, :, :] = aw
  282. m, s = pick_pade_structure(Am)
  283. if s != 0: # scaling needed
  284. Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]]
  285. pade_UV_calc(Am, n, m)
  286. eAw = Am[0]
  287. if s != 0: # squaring needed
  288. if (lu[1] == 0) or (lu[0] == 0): # lower/upper triangular
  289. # This branch implements Code Fragment 2.1 of [1]
  290. diag_aw = np.diag(aw)
  291. # einsum returns a writable view
  292. np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s))
  293. # super/sub diagonal
  294. sd = np.diag(aw, k=-1 if lu[1] == 0 else 1)
  295. for i in range(s-1, -1, -1):
  296. eAw = eAw @ eAw
  297. # diagonal
  298. np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i))
  299. exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i))
  300. if lu[1] == 0: # lower
  301. np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd
  302. else: # upper
  303. np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd
  304. else: # generic
  305. for _ in range(s):
  306. eAw = eAw @ eAw
  307. # Zero out the entries from np.empty in case of triangular input
  308. if (lu[0] == 0) or (lu[1] == 0):
  309. eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw)
  310. else:
  311. eA[ind] = eAw
  312. return eA
  313. def _exp_sinch(x):
  314. # Higham's formula (10.42), might overflow, see GH-11839
  315. lexp_diff = np.diff(np.exp(x))
  316. l_diff = np.diff(x)
  317. mask_z = l_diff == 0.
  318. lexp_diff[~mask_z] /= l_diff[~mask_z]
  319. lexp_diff[mask_z] = np.exp(x[:-1][mask_z])
  320. return lexp_diff
  321. def cosm(A):
  322. """
  323. Compute the matrix cosine.
  324. This routine uses expm to compute the matrix exponentials.
  325. Parameters
  326. ----------
  327. A : (N, N) array_like
  328. Input array
  329. Returns
  330. -------
  331. cosm : (N, N) ndarray
  332. Matrix cosine of A
  333. Examples
  334. --------
  335. >>> import numpy as np
  336. >>> from scipy.linalg import expm, sinm, cosm
  337. Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
  338. applied to a matrix:
  339. >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
  340. >>> expm(1j*a)
  341. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  342. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  343. >>> cosm(a) + 1j*sinm(a)
  344. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  345. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  346. """
  347. A = _asarray_square(A)
  348. if np.iscomplexobj(A):
  349. return 0.5*(expm(1j*A) + expm(-1j*A))
  350. else:
  351. return expm(1j*A).real
  352. def sinm(A):
  353. """
  354. Compute the matrix sine.
  355. This routine uses expm to compute the matrix exponentials.
  356. Parameters
  357. ----------
  358. A : (N, N) array_like
  359. Input array.
  360. Returns
  361. -------
  362. sinm : (N, N) ndarray
  363. Matrix sine of `A`
  364. Examples
  365. --------
  366. >>> import numpy as np
  367. >>> from scipy.linalg import expm, sinm, cosm
  368. Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
  369. applied to a matrix:
  370. >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
  371. >>> expm(1j*a)
  372. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  373. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  374. >>> cosm(a) + 1j*sinm(a)
  375. array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
  376. [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
  377. """
  378. A = _asarray_square(A)
  379. if np.iscomplexobj(A):
  380. return -0.5j*(expm(1j*A) - expm(-1j*A))
  381. else:
  382. return expm(1j*A).imag
  383. def tanm(A):
  384. """
  385. Compute the matrix tangent.
  386. This routine uses expm to compute the matrix exponentials.
  387. Parameters
  388. ----------
  389. A : (N, N) array_like
  390. Input array.
  391. Returns
  392. -------
  393. tanm : (N, N) ndarray
  394. Matrix tangent of `A`
  395. Examples
  396. --------
  397. >>> import numpy as np
  398. >>> from scipy.linalg import tanm, sinm, cosm
  399. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  400. >>> t = tanm(a)
  401. >>> t
  402. array([[ -2.00876993, -8.41880636],
  403. [ -2.80626879, -10.42757629]])
  404. Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
  405. >>> s = sinm(a)
  406. >>> c = cosm(a)
  407. >>> s.dot(np.linalg.inv(c))
  408. array([[ -2.00876993, -8.41880636],
  409. [ -2.80626879, -10.42757629]])
  410. """
  411. A = _asarray_square(A)
  412. return _maybe_real(A, solve(cosm(A), sinm(A)))
  413. def coshm(A):
  414. """
  415. Compute the hyperbolic matrix cosine.
  416. This routine uses expm to compute the matrix exponentials.
  417. Parameters
  418. ----------
  419. A : (N, N) array_like
  420. Input array.
  421. Returns
  422. -------
  423. coshm : (N, N) ndarray
  424. Hyperbolic matrix cosine of `A`
  425. Examples
  426. --------
  427. >>> import numpy as np
  428. >>> from scipy.linalg import tanhm, sinhm, coshm
  429. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  430. >>> c = coshm(a)
  431. >>> c
  432. array([[ 11.24592233, 38.76236492],
  433. [ 12.92078831, 50.00828725]])
  434. Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
  435. >>> t = tanhm(a)
  436. >>> s = sinhm(a)
  437. >>> t - s.dot(np.linalg.inv(c))
  438. array([[ 2.72004641e-15, 4.55191440e-15],
  439. [ 0.00000000e+00, -5.55111512e-16]])
  440. """
  441. A = _asarray_square(A)
  442. return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
  443. def sinhm(A):
  444. """
  445. Compute the hyperbolic matrix sine.
  446. This routine uses expm to compute the matrix exponentials.
  447. Parameters
  448. ----------
  449. A : (N, N) array_like
  450. Input array.
  451. Returns
  452. -------
  453. sinhm : (N, N) ndarray
  454. Hyperbolic matrix sine of `A`
  455. Examples
  456. --------
  457. >>> import numpy as np
  458. >>> from scipy.linalg import tanhm, sinhm, coshm
  459. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  460. >>> s = sinhm(a)
  461. >>> s
  462. array([[ 10.57300653, 39.28826594],
  463. [ 13.09608865, 49.86127247]])
  464. Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
  465. >>> t = tanhm(a)
  466. >>> c = coshm(a)
  467. >>> t - s.dot(np.linalg.inv(c))
  468. array([[ 2.72004641e-15, 4.55191440e-15],
  469. [ 0.00000000e+00, -5.55111512e-16]])
  470. """
  471. A = _asarray_square(A)
  472. return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
  473. def tanhm(A):
  474. """
  475. Compute the hyperbolic matrix tangent.
  476. This routine uses expm to compute the matrix exponentials.
  477. Parameters
  478. ----------
  479. A : (N, N) array_like
  480. Input array
  481. Returns
  482. -------
  483. tanhm : (N, N) ndarray
  484. Hyperbolic matrix tangent of `A`
  485. Examples
  486. --------
  487. >>> import numpy as np
  488. >>> from scipy.linalg import tanhm, sinhm, coshm
  489. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  490. >>> t = tanhm(a)
  491. >>> t
  492. array([[ 0.3428582 , 0.51987926],
  493. [ 0.17329309, 0.86273746]])
  494. Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
  495. >>> s = sinhm(a)
  496. >>> c = coshm(a)
  497. >>> t - s.dot(np.linalg.inv(c))
  498. array([[ 2.72004641e-15, 4.55191440e-15],
  499. [ 0.00000000e+00, -5.55111512e-16]])
  500. """
  501. A = _asarray_square(A)
  502. return _maybe_real(A, solve(coshm(A), sinhm(A)))
  503. def funm(A, func, disp=True):
  504. """
  505. Evaluate a matrix function specified by a callable.
  506. Returns the value of matrix-valued function ``f`` at `A`. The
  507. function ``f`` is an extension of the scalar-valued function `func`
  508. to matrices.
  509. Parameters
  510. ----------
  511. A : (N, N) array_like
  512. Matrix at which to evaluate the function
  513. func : callable
  514. Callable object that evaluates a scalar function f.
  515. Must be vectorized (eg. using vectorize).
  516. disp : bool, optional
  517. Print warning if error in the result is estimated large
  518. instead of returning estimated error. (Default: True)
  519. Returns
  520. -------
  521. funm : (N, N) ndarray
  522. Value of the matrix function specified by func evaluated at `A`
  523. errest : float
  524. (if disp == False)
  525. 1-norm of the estimated error, ||err||_1 / ||A||_1
  526. Notes
  527. -----
  528. This function implements the general algorithm based on Schur decomposition
  529. (Algorithm 9.1.1. in [1]_).
  530. If the input matrix is known to be diagonalizable, then relying on the
  531. eigendecomposition is likely to be faster. For example, if your matrix is
  532. Hermitian, you can do
  533. >>> from scipy.linalg import eigh
  534. >>> def funm_herm(a, func, check_finite=False):
  535. ... w, v = eigh(a, check_finite=check_finite)
  536. ... ## if you further know that your matrix is positive semidefinite,
  537. ... ## you can optionally guard against precision errors by doing
  538. ... # w = np.maximum(w, 0)
  539. ... w = func(w)
  540. ... return (v * w).dot(v.conj().T)
  541. References
  542. ----------
  543. .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
  544. Examples
  545. --------
  546. >>> import numpy as np
  547. >>> from scipy.linalg import funm
  548. >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
  549. >>> funm(a, lambda x: x*x)
  550. array([[ 4., 15.],
  551. [ 5., 19.]])
  552. >>> a.dot(a)
  553. array([[ 4., 15.],
  554. [ 5., 19.]])
  555. """
  556. A = _asarray_square(A)
  557. # Perform Shur decomposition (lapack ?gees)
  558. T, Z = schur(A)
  559. T, Z = rsf2csf(T,Z)
  560. n,n = T.shape
  561. F = diag(func(diag(T))) # apply function to diagonal elements
  562. F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
  563. minden = abs(T[0,0])
  564. # implement Algorithm 11.1.1 from Golub and Van Loan
  565. # "matrix Computations."
  566. for p in range(1,n):
  567. for i in range(1,n-p+1):
  568. j = i + p
  569. s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
  570. ksl = slice(i,j-1)
  571. val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
  572. s = s + val
  573. den = T[j-1,j-1] - T[i-1,i-1]
  574. if den != 0.0:
  575. s = s / den
  576. F[i-1,j-1] = s
  577. minden = min(minden,abs(den))
  578. F = dot(dot(Z, F), transpose(conjugate(Z)))
  579. F = _maybe_real(A, F)
  580. tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
  581. if minden == 0.0:
  582. minden = tol
  583. err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
  584. if prod(ravel(logical_not(isfinite(F))),axis=0):
  585. err = Inf
  586. if disp:
  587. if err > 1000*tol:
  588. print("funm result may be inaccurate, approximate err =", err)
  589. return F
  590. else:
  591. return F, err
  592. def signm(A, disp=True):
  593. """
  594. Matrix sign function.
  595. Extension of the scalar sign(x) to matrices.
  596. Parameters
  597. ----------
  598. A : (N, N) array_like
  599. Matrix at which to evaluate the sign function
  600. disp : bool, optional
  601. Print warning if error in the result is estimated large
  602. instead of returning estimated error. (Default: True)
  603. Returns
  604. -------
  605. signm : (N, N) ndarray
  606. Value of the sign function at `A`
  607. errest : float
  608. (if disp == False)
  609. 1-norm of the estimated error, ||err||_1 / ||A||_1
  610. Examples
  611. --------
  612. >>> from scipy.linalg import signm, eigvals
  613. >>> a = [[1,2,3], [1,2,1], [1,1,1]]
  614. >>> eigvals(a)
  615. array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
  616. >>> eigvals(signm(a))
  617. array([-1.+0.j, 1.+0.j, 1.+0.j])
  618. """
  619. A = _asarray_square(A)
  620. def rounded_sign(x):
  621. rx = np.real(x)
  622. if rx.dtype.char == 'f':
  623. c = 1e3*feps*amax(x)
  624. else:
  625. c = 1e3*eps*amax(x)
  626. return sign((absolute(rx) > c) * rx)
  627. result, errest = funm(A, rounded_sign, disp=0)
  628. errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
  629. if errest < errtol:
  630. return result
  631. # Handle signm of defective matrices:
  632. # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
  633. # 8:237-250,1981" for how to improve the following (currently a
  634. # rather naive) iteration process:
  635. # a = result # sometimes iteration converges faster but where??
  636. # Shifting to avoid zero eigenvalues. How to ensure that shifting does
  637. # not change the spectrum too much?
  638. vals = svd(A, compute_uv=False)
  639. max_sv = np.amax(vals)
  640. # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
  641. # c = 0.5/min_nonzero_sv
  642. c = 0.5/max_sv
  643. S0 = A + c*np.identity(A.shape[0])
  644. prev_errest = errest
  645. for i in range(100):
  646. iS0 = inv(S0)
  647. S0 = 0.5*(S0 + iS0)
  648. Pp = 0.5*(dot(S0,S0)+S0)
  649. errest = norm(dot(Pp,Pp)-Pp,1)
  650. if errest < errtol or prev_errest == errest:
  651. break
  652. prev_errest = errest
  653. if disp:
  654. if not isfinite(errest) or errest >= errtol:
  655. print("signm result may be inaccurate, approximate err =", errest)
  656. return S0
  657. else:
  658. return S0, errest
  659. def khatri_rao(a, b):
  660. r"""
  661. Khatri-rao product
  662. A column-wise Kronecker product of two matrices
  663. Parameters
  664. ----------
  665. a : (n, k) array_like
  666. Input array
  667. b : (m, k) array_like
  668. Input array
  669. Returns
  670. -------
  671. c: (n*m, k) ndarray
  672. Khatri-rao product of `a` and `b`.
  673. See Also
  674. --------
  675. kron : Kronecker product
  676. Notes
  677. -----
  678. The mathematical definition of the Khatri-Rao product is:
  679. .. math::
  680. (A_{ij} \bigotimes B_{ij})_{ij}
  681. which is the Kronecker product of every column of A and B, e.g.::
  682. c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
  683. Examples
  684. --------
  685. >>> import numpy as np
  686. >>> from scipy import linalg
  687. >>> a = np.array([[1, 2, 3], [4, 5, 6]])
  688. >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
  689. >>> linalg.khatri_rao(a, b)
  690. array([[ 3, 8, 15],
  691. [ 6, 14, 24],
  692. [ 2, 6, 27],
  693. [12, 20, 30],
  694. [24, 35, 48],
  695. [ 8, 15, 54]])
  696. """
  697. a = np.asarray(a)
  698. b = np.asarray(b)
  699. if not (a.ndim == 2 and b.ndim == 2):
  700. raise ValueError("The both arrays should be 2-dimensional.")
  701. if not a.shape[1] == b.shape[1]:
  702. raise ValueError("The number of columns for both arrays "
  703. "should be equal.")
  704. # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
  705. c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
  706. return c.reshape((-1,) + c.shape[2:])