_signaltools.py 152 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565
  1. # Author: Travis Oliphant
  2. # 1999 -- 2002
  3. import operator
  4. import math
  5. import timeit
  6. from scipy.spatial import cKDTree
  7. from . import _sigtools
  8. from ._ltisys import dlti
  9. from ._upfirdn import upfirdn, _output_len, _upfirdn_modes
  10. from scipy import linalg, fft as sp_fft
  11. from scipy.fft._helper import _init_nd_shape_and_axes
  12. from scipy._lib._util import prod as _prod
  13. import numpy as np
  14. from scipy.special import lambertw
  15. from .windows import get_window
  16. from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
  17. from ._filter_design import cheby1, _validate_sos, zpk2sos
  18. from ._fir_filter_design import firwin
  19. from ._sosfilt import _sosfilt
  20. import warnings
  21. __all__ = ['correlate', 'correlation_lags', 'correlate2d',
  22. 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
  23. 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
  24. 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
  25. 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
  26. 'residuez', 'resample', 'resample_poly', 'detrend',
  27. 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
  28. 'filtfilt', 'decimate', 'vectorstrength']
  29. _modedict = {'valid': 0, 'same': 1, 'full': 2}
  30. _boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
  31. 'symmetric': 1, 'reflect': 4}
  32. def _valfrommode(mode):
  33. try:
  34. return _modedict[mode]
  35. except KeyError as e:
  36. raise ValueError("Acceptable mode flags are 'valid',"
  37. " 'same', or 'full'.") from e
  38. def _bvalfromboundary(boundary):
  39. try:
  40. return _boundarydict[boundary] << 2
  41. except KeyError as e:
  42. raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
  43. "(or 'wrap'), and 'symmetric' (or 'symm').") from e
  44. def _inputs_swap_needed(mode, shape1, shape2, axes=None):
  45. """Determine if inputs arrays need to be swapped in `"valid"` mode.
  46. If in `"valid"` mode, returns whether or not the input arrays need to be
  47. swapped depending on whether `shape1` is at least as large as `shape2` in
  48. every calculated dimension.
  49. This is important for some of the correlation and convolution
  50. implementations in this module, where the larger array input needs to come
  51. before the smaller array input when operating in this mode.
  52. Note that if the mode provided is not 'valid', False is immediately
  53. returned.
  54. """
  55. if mode != 'valid':
  56. return False
  57. if not shape1:
  58. return False
  59. if axes is None:
  60. axes = range(len(shape1))
  61. ok1 = all(shape1[i] >= shape2[i] for i in axes)
  62. ok2 = all(shape2[i] >= shape1[i] for i in axes)
  63. if not (ok1 or ok2):
  64. raise ValueError("For 'valid' mode, one must be at least "
  65. "as large as the other in every dimension")
  66. return not ok1
  67. def correlate(in1, in2, mode='full', method='auto'):
  68. r"""
  69. Cross-correlate two N-dimensional arrays.
  70. Cross-correlate `in1` and `in2`, with the output size determined by the
  71. `mode` argument.
  72. Parameters
  73. ----------
  74. in1 : array_like
  75. First input.
  76. in2 : array_like
  77. Second input. Should have the same number of dimensions as `in1`.
  78. mode : str {'full', 'valid', 'same'}, optional
  79. A string indicating the size of the output:
  80. ``full``
  81. The output is the full discrete linear cross-correlation
  82. of the inputs. (Default)
  83. ``valid``
  84. The output consists only of those elements that do not
  85. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  86. must be at least as large as the other in every dimension.
  87. ``same``
  88. The output is the same size as `in1`, centered
  89. with respect to the 'full' output.
  90. method : str {'auto', 'direct', 'fft'}, optional
  91. A string indicating which method to use to calculate the correlation.
  92. ``direct``
  93. The correlation is determined directly from sums, the definition of
  94. correlation.
  95. ``fft``
  96. The Fast Fourier Transform is used to perform the correlation more
  97. quickly (only available for numerical arrays.)
  98. ``auto``
  99. Automatically chooses direct or Fourier method based on an estimate
  100. of which is faster (default). See `convolve` Notes for more detail.
  101. .. versionadded:: 0.19.0
  102. Returns
  103. -------
  104. correlate : array
  105. An N-dimensional array containing a subset of the discrete linear
  106. cross-correlation of `in1` with `in2`.
  107. See Also
  108. --------
  109. choose_conv_method : contains more documentation on `method`.
  110. correlation_lags : calculates the lag / displacement indices array for 1D
  111. cross-correlation.
  112. Notes
  113. -----
  114. The correlation z of two d-dimensional arrays x and y is defined as::
  115. z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
  116. This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
  117. then
  118. .. math::
  119. z[k] = (x * y)(k - N + 1)
  120. = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
  121. for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
  122. where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
  123. and :math:`y_m` is 0 when m is outside the range of y.
  124. ``method='fft'`` only works for numerical arrays as it relies on
  125. `fftconvolve`. In certain cases (i.e., arrays of objects or when
  126. rounding integers can lose precision), ``method='direct'`` is always used.
  127. When using "same" mode with even-length inputs, the outputs of `correlate`
  128. and `correlate2d` differ: There is a 1-index offset between them.
  129. Examples
  130. --------
  131. Implement a matched filter using cross-correlation, to recover a signal
  132. that has passed through a noisy channel.
  133. >>> import numpy as np
  134. >>> from scipy import signal
  135. >>> import matplotlib.pyplot as plt
  136. >>> rng = np.random.default_rng()
  137. >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
  138. >>> sig_noise = sig + rng.standard_normal(len(sig))
  139. >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
  140. >>> clock = np.arange(64, len(sig), 128)
  141. >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
  142. >>> ax_orig.plot(sig)
  143. >>> ax_orig.plot(clock, sig[clock], 'ro')
  144. >>> ax_orig.set_title('Original signal')
  145. >>> ax_noise.plot(sig_noise)
  146. >>> ax_noise.set_title('Signal with noise')
  147. >>> ax_corr.plot(corr)
  148. >>> ax_corr.plot(clock, corr[clock], 'ro')
  149. >>> ax_corr.axhline(0.5, ls=':')
  150. >>> ax_corr.set_title('Cross-correlated with rectangular pulse')
  151. >>> ax_orig.margins(0, 0.1)
  152. >>> fig.tight_layout()
  153. >>> plt.show()
  154. Compute the cross-correlation of a noisy signal with the original signal.
  155. >>> x = np.arange(128) / 128
  156. >>> sig = np.sin(2 * np.pi * x)
  157. >>> sig_noise = sig + rng.standard_normal(len(sig))
  158. >>> corr = signal.correlate(sig_noise, sig)
  159. >>> lags = signal.correlation_lags(len(sig), len(sig_noise))
  160. >>> corr /= np.max(corr)
  161. >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8))
  162. >>> ax_orig.plot(sig)
  163. >>> ax_orig.set_title('Original signal')
  164. >>> ax_orig.set_xlabel('Sample Number')
  165. >>> ax_noise.plot(sig_noise)
  166. >>> ax_noise.set_title('Signal with noise')
  167. >>> ax_noise.set_xlabel('Sample Number')
  168. >>> ax_corr.plot(lags, corr)
  169. >>> ax_corr.set_title('Cross-correlated signal')
  170. >>> ax_corr.set_xlabel('Lag')
  171. >>> ax_orig.margins(0, 0.1)
  172. >>> ax_noise.margins(0, 0.1)
  173. >>> ax_corr.margins(0, 0.1)
  174. >>> fig.tight_layout()
  175. >>> plt.show()
  176. """
  177. in1 = np.asarray(in1)
  178. in2 = np.asarray(in2)
  179. if in1.ndim == in2.ndim == 0:
  180. return in1 * in2.conj()
  181. elif in1.ndim != in2.ndim:
  182. raise ValueError("in1 and in2 should have the same dimensionality")
  183. # Don't use _valfrommode, since correlate should not accept numeric modes
  184. try:
  185. val = _modedict[mode]
  186. except KeyError as e:
  187. raise ValueError("Acceptable mode flags are 'valid',"
  188. " 'same', or 'full'.") from e
  189. # this either calls fftconvolve or this function with method=='direct'
  190. if method in ('fft', 'auto'):
  191. return convolve(in1, _reverse_and_conj(in2), mode, method)
  192. elif method == 'direct':
  193. # fastpath to faster numpy.correlate for 1d inputs when possible
  194. if _np_conv_ok(in1, in2, mode):
  195. return np.correlate(in1, in2, mode)
  196. # _correlateND is far slower when in2.size > in1.size, so swap them
  197. # and then undo the effect afterward if mode == 'full'. Also, it fails
  198. # with 'valid' mode if in2 is larger than in1, so swap those, too.
  199. # Don't swap inputs for 'same' mode, since shape of in1 matters.
  200. swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
  201. _inputs_swap_needed(mode, in1.shape, in2.shape))
  202. if swapped_inputs:
  203. in1, in2 = in2, in1
  204. if mode == 'valid':
  205. ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
  206. out = np.empty(ps, in1.dtype)
  207. z = _sigtools._correlateND(in1, in2, out, val)
  208. else:
  209. ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
  210. # zero pad input
  211. in1zpadded = np.zeros(ps, in1.dtype)
  212. sc = tuple(slice(0, i) for i in in1.shape)
  213. in1zpadded[sc] = in1.copy()
  214. if mode == 'full':
  215. out = np.empty(ps, in1.dtype)
  216. elif mode == 'same':
  217. out = np.empty(in1.shape, in1.dtype)
  218. z = _sigtools._correlateND(in1zpadded, in2, out, val)
  219. if swapped_inputs:
  220. # Reverse and conjugate to undo the effect of swapping inputs
  221. z = _reverse_and_conj(z)
  222. return z
  223. else:
  224. raise ValueError("Acceptable method flags are 'auto',"
  225. " 'direct', or 'fft'.")
  226. def correlation_lags(in1_len, in2_len, mode='full'):
  227. r"""
  228. Calculates the lag / displacement indices array for 1D cross-correlation.
  229. Parameters
  230. ----------
  231. in1_len : int
  232. First input size.
  233. in2_len : int
  234. Second input size.
  235. mode : str {'full', 'valid', 'same'}, optional
  236. A string indicating the size of the output.
  237. See the documentation `correlate` for more information.
  238. Returns
  239. -------
  240. lags : array
  241. Returns an array containing cross-correlation lag/displacement indices.
  242. Indices can be indexed with the np.argmax of the correlation to return
  243. the lag/displacement.
  244. See Also
  245. --------
  246. correlate : Compute the N-dimensional cross-correlation.
  247. Notes
  248. -----
  249. Cross-correlation for continuous functions :math:`f` and :math:`g` is
  250. defined as:
  251. .. math::
  252. \left ( f\star g \right )\left ( \tau \right )
  253. \triangleq \int_{t_0}^{t_0 +T}
  254. \overline{f\left ( t \right )}g\left ( t+\tau \right )dt
  255. Where :math:`\tau` is defined as the displacement, also known as the lag.
  256. Cross correlation for discrete functions :math:`f` and :math:`g` is
  257. defined as:
  258. .. math::
  259. \left ( f\star g \right )\left [ n \right ]
  260. \triangleq \sum_{-\infty}^{\infty}
  261. \overline{f\left [ m \right ]}g\left [ m+n \right ]
  262. Where :math:`n` is the lag.
  263. Examples
  264. --------
  265. Cross-correlation of a signal with its time-delayed self.
  266. >>> import numpy as np
  267. >>> from scipy import signal
  268. >>> rng = np.random.default_rng()
  269. >>> x = rng.standard_normal(1000)
  270. >>> y = np.concatenate([rng.standard_normal(100), x])
  271. >>> correlation = signal.correlate(x, y, mode="full")
  272. >>> lags = signal.correlation_lags(x.size, y.size, mode="full")
  273. >>> lag = lags[np.argmax(correlation)]
  274. """
  275. # calculate lag ranges in different modes of operation
  276. if mode == "full":
  277. # the output is the full discrete linear convolution
  278. # of the inputs. (Default)
  279. lags = np.arange(-in2_len + 1, in1_len)
  280. elif mode == "same":
  281. # the output is the same size as `in1`, centered
  282. # with respect to the 'full' output.
  283. # calculate the full output
  284. lags = np.arange(-in2_len + 1, in1_len)
  285. # determine the midpoint in the full output
  286. mid = lags.size // 2
  287. # determine lag_bound to be used with respect
  288. # to the midpoint
  289. lag_bound = in1_len // 2
  290. # calculate lag ranges for even and odd scenarios
  291. if in1_len % 2 == 0:
  292. lags = lags[(mid-lag_bound):(mid+lag_bound)]
  293. else:
  294. lags = lags[(mid-lag_bound):(mid+lag_bound)+1]
  295. elif mode == "valid":
  296. # the output consists only of those elements that do not
  297. # rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  298. # must be at least as large as the other in every dimension.
  299. # the lag_bound will be either negative or positive
  300. # this let's us infer how to present the lag range
  301. lag_bound = in1_len - in2_len
  302. if lag_bound >= 0:
  303. lags = np.arange(lag_bound + 1)
  304. else:
  305. lags = np.arange(lag_bound, 1)
  306. return lags
  307. def _centered(arr, newshape):
  308. # Return the center newshape portion of the array.
  309. newshape = np.asarray(newshape)
  310. currshape = np.array(arr.shape)
  311. startind = (currshape - newshape) // 2
  312. endind = startind + newshape
  313. myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
  314. return arr[tuple(myslice)]
  315. def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
  316. """Handle the axes argument for frequency-domain convolution.
  317. Returns the inputs and axes in a standard form, eliminating redundant axes,
  318. swapping the inputs if necessary, and checking for various potential
  319. errors.
  320. Parameters
  321. ----------
  322. in1 : array
  323. First input.
  324. in2 : array
  325. Second input.
  326. mode : str {'full', 'valid', 'same'}, optional
  327. A string indicating the size of the output.
  328. See the documentation `fftconvolve` for more information.
  329. axes : list of ints
  330. Axes over which to compute the FFTs.
  331. sorted_axes : bool, optional
  332. If `True`, sort the axes.
  333. Default is `False`, do not sort.
  334. Returns
  335. -------
  336. in1 : array
  337. The first input, possible swapped with the second input.
  338. in2 : array
  339. The second input, possible swapped with the first input.
  340. axes : list of ints
  341. Axes over which to compute the FFTs.
  342. """
  343. s1 = in1.shape
  344. s2 = in2.shape
  345. noaxes = axes is None
  346. _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)
  347. if not noaxes and not len(axes):
  348. raise ValueError("when provided, axes cannot be empty")
  349. # Axes of length 1 can rely on broadcasting rules for multipy,
  350. # no fft needed.
  351. axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]
  352. if sorted_axes:
  353. axes.sort()
  354. if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1
  355. for a in range(in1.ndim) if a not in axes):
  356. raise ValueError("incompatible shapes for in1 and in2:"
  357. " {0} and {1}".format(s1, s2))
  358. # Check that input sizes are compatible with 'valid' mode.
  359. if _inputs_swap_needed(mode, s1, s2, axes=axes):
  360. # Convolution is commutative; order doesn't have any effect on output.
  361. in1, in2 = in2, in1
  362. return in1, in2, axes
  363. def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
  364. """Convolve two arrays in the frequency domain.
  365. This function implements only base the FFT-related operations.
  366. Specifically, it converts the signals to the frequency domain, multiplies
  367. them, then converts them back to the time domain. Calculations of axes,
  368. shapes, convolution mode, etc. are implemented in higher level-functions,
  369. such as `fftconvolve` and `oaconvolve`. Those functions should be used
  370. instead of this one.
  371. Parameters
  372. ----------
  373. in1 : array_like
  374. First input.
  375. in2 : array_like
  376. Second input. Should have the same number of dimensions as `in1`.
  377. axes : array_like of ints
  378. Axes over which to compute the FFTs.
  379. shape : array_like of ints
  380. The sizes of the FFTs.
  381. calc_fast_len : bool, optional
  382. If `True`, set each value of `shape` to the next fast FFT length.
  383. Default is `False`, use `axes` as-is.
  384. Returns
  385. -------
  386. out : array
  387. An N-dimensional array containing the discrete linear convolution of
  388. `in1` with `in2`.
  389. """
  390. if not len(axes):
  391. return in1 * in2
  392. complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
  393. if calc_fast_len:
  394. # Speed up FFT by padding to optimal size.
  395. fshape = [
  396. sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]
  397. else:
  398. fshape = shape
  399. if not complex_result:
  400. fft, ifft = sp_fft.rfftn, sp_fft.irfftn
  401. else:
  402. fft, ifft = sp_fft.fftn, sp_fft.ifftn
  403. sp1 = fft(in1, fshape, axes=axes)
  404. sp2 = fft(in2, fshape, axes=axes)
  405. ret = ifft(sp1 * sp2, fshape, axes=axes)
  406. if calc_fast_len:
  407. fslice = tuple([slice(sz) for sz in shape])
  408. ret = ret[fslice]
  409. return ret
  410. def _apply_conv_mode(ret, s1, s2, mode, axes):
  411. """Calculate the convolution result shape based on the `mode` argument.
  412. Returns the result sliced to the correct size for the given mode.
  413. Parameters
  414. ----------
  415. ret : array
  416. The result array, with the appropriate shape for the 'full' mode.
  417. s1 : list of int
  418. The shape of the first input.
  419. s2 : list of int
  420. The shape of the second input.
  421. mode : str {'full', 'valid', 'same'}
  422. A string indicating the size of the output.
  423. See the documentation `fftconvolve` for more information.
  424. axes : list of ints
  425. Axes over which to compute the convolution.
  426. Returns
  427. -------
  428. ret : array
  429. A copy of `res`, sliced to the correct size for the given `mode`.
  430. """
  431. if mode == "full":
  432. return ret.copy()
  433. elif mode == "same":
  434. return _centered(ret, s1).copy()
  435. elif mode == "valid":
  436. shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1
  437. for a in range(ret.ndim)]
  438. return _centered(ret, shape_valid).copy()
  439. else:
  440. raise ValueError("acceptable mode flags are 'valid',"
  441. " 'same', or 'full'")
  442. def fftconvolve(in1, in2, mode="full", axes=None):
  443. """Convolve two N-dimensional arrays using FFT.
  444. Convolve `in1` and `in2` using the fast Fourier transform method, with
  445. the output size determined by the `mode` argument.
  446. This is generally much faster than `convolve` for large arrays (n > ~500),
  447. but can be slower when only a few output values are needed, and can only
  448. output float arrays (int or object array inputs will be cast to float).
  449. As of v0.19, `convolve` automatically chooses this method or the direct
  450. method based on an estimation of which is faster.
  451. Parameters
  452. ----------
  453. in1 : array_like
  454. First input.
  455. in2 : array_like
  456. Second input. Should have the same number of dimensions as `in1`.
  457. mode : str {'full', 'valid', 'same'}, optional
  458. A string indicating the size of the output:
  459. ``full``
  460. The output is the full discrete linear convolution
  461. of the inputs. (Default)
  462. ``valid``
  463. The output consists only of those elements that do not
  464. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  465. must be at least as large as the other in every dimension.
  466. ``same``
  467. The output is the same size as `in1`, centered
  468. with respect to the 'full' output.
  469. axes : int or array_like of ints or None, optional
  470. Axes over which to compute the convolution.
  471. The default is over all axes.
  472. Returns
  473. -------
  474. out : array
  475. An N-dimensional array containing a subset of the discrete linear
  476. convolution of `in1` with `in2`.
  477. See Also
  478. --------
  479. convolve : Uses the direct convolution or FFT convolution algorithm
  480. depending on which is faster.
  481. oaconvolve : Uses the overlap-add method to do convolution, which is
  482. generally faster when the input arrays are large and
  483. significantly different in size.
  484. Examples
  485. --------
  486. Autocorrelation of white noise is an impulse.
  487. >>> import numpy as np
  488. >>> from scipy import signal
  489. >>> rng = np.random.default_rng()
  490. >>> sig = rng.standard_normal(1000)
  491. >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
  492. >>> import matplotlib.pyplot as plt
  493. >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
  494. >>> ax_orig.plot(sig)
  495. >>> ax_orig.set_title('White noise')
  496. >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
  497. >>> ax_mag.set_title('Autocorrelation')
  498. >>> fig.tight_layout()
  499. >>> fig.show()
  500. Gaussian blur implemented using FFT convolution. Notice the dark borders
  501. around the image, due to the zero-padding beyond its boundaries.
  502. The `convolve2d` function allows for other types of image boundaries,
  503. but is far slower.
  504. >>> from scipy import datasets
  505. >>> face = datasets.face(gray=True)
  506. >>> kernel = np.outer(signal.windows.gaussian(70, 8),
  507. ... signal.windows.gaussian(70, 8))
  508. >>> blurred = signal.fftconvolve(face, kernel, mode='same')
  509. >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
  510. ... figsize=(6, 15))
  511. >>> ax_orig.imshow(face, cmap='gray')
  512. >>> ax_orig.set_title('Original')
  513. >>> ax_orig.set_axis_off()
  514. >>> ax_kernel.imshow(kernel, cmap='gray')
  515. >>> ax_kernel.set_title('Gaussian kernel')
  516. >>> ax_kernel.set_axis_off()
  517. >>> ax_blurred.imshow(blurred, cmap='gray')
  518. >>> ax_blurred.set_title('Blurred')
  519. >>> ax_blurred.set_axis_off()
  520. >>> fig.show()
  521. """
  522. in1 = np.asarray(in1)
  523. in2 = np.asarray(in2)
  524. if in1.ndim == in2.ndim == 0: # scalar inputs
  525. return in1 * in2
  526. elif in1.ndim != in2.ndim:
  527. raise ValueError("in1 and in2 should have the same dimensionality")
  528. elif in1.size == 0 or in2.size == 0: # empty arrays
  529. return np.array([])
  530. in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
  531. sorted_axes=False)
  532. s1 = in1.shape
  533. s2 = in2.shape
  534. shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1
  535. for i in range(in1.ndim)]
  536. ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)
  537. return _apply_conv_mode(ret, s1, s2, mode, axes)
  538. def _calc_oa_lens(s1, s2):
  539. """Calculate the optimal FFT lengths for overlapp-add convolution.
  540. The calculation is done for a single dimension.
  541. Parameters
  542. ----------
  543. s1 : int
  544. Size of the dimension for the first array.
  545. s2 : int
  546. Size of the dimension for the second array.
  547. Returns
  548. -------
  549. block_size : int
  550. The size of the FFT blocks.
  551. overlap : int
  552. The amount of overlap between two blocks.
  553. in1_step : int
  554. The size of each step for the first array.
  555. in2_step : int
  556. The size of each step for the first array.
  557. """
  558. # Set up the arguments for the conventional FFT approach.
  559. fallback = (s1+s2-1, None, s1, s2)
  560. # Use conventional FFT convolve if sizes are same.
  561. if s1 == s2 or s1 == 1 or s2 == 1:
  562. return fallback
  563. if s2 > s1:
  564. s1, s2 = s2, s1
  565. swapped = True
  566. else:
  567. swapped = False
  568. # There cannot be a useful block size if s2 is more than half of s1.
  569. if s2 >= s1/2:
  570. return fallback
  571. # Derivation of optimal block length
  572. # For original formula see:
  573. # https://en.wikipedia.org/wiki/Overlap-add_method
  574. #
  575. # Formula:
  576. # K = overlap = s2-1
  577. # N = block_size
  578. # C = complexity
  579. # e = exponential, exp(1)
  580. #
  581. # C = (N*(log2(N)+1))/(N-K)
  582. # C = (N*log2(2N))/(N-K)
  583. # C = N/(N-K) * log2(2N)
  584. # C1 = N/(N-K)
  585. # C2 = log2(2N) = ln(2N)/ln(2)
  586. #
  587. # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2
  588. # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))
  589. #
  590. # dC/dN = dC1/dN*C2 + dC2/dN*C1
  591. # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))
  592. # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))
  593. # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)
  594. # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)
  595. # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
  596. #
  597. # Solve for minimum, where dC/dN = 0
  598. # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
  599. # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K
  600. # 0 = N - K*ln(2N) - K
  601. # 0 = N - K*(ln(2N) + 1)
  602. # 0 = N - K*ln(2Ne)
  603. # N = K*ln(2Ne)
  604. # N/K = ln(2Ne)
  605. #
  606. # e^(N/K) = e^ln(2Ne)
  607. # e^(N/K) = 2Ne
  608. # 1/e^(N/K) = 1/(2*N*e)
  609. # e^(N/-K) = 1/(2*N*e)
  610. # e^(N/-K) = K/N*1/(2*K*e)
  611. # N/K*e^(N/-K) = 1/(2*e*K)
  612. # N/-K*e^(N/-K) = -1/(2*e*K)
  613. #
  614. # Using Lambert W function
  615. # https://en.wikipedia.org/wiki/Lambert_W_function
  616. # x = W(y) It is the solution to y = x*e^x
  617. # x = N/-K
  618. # y = -1/(2*e*K)
  619. #
  620. # N/-K = W(-1/(2*e*K))
  621. #
  622. # N = -K*W(-1/(2*e*K))
  623. overlap = s2-1
  624. opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real
  625. block_size = sp_fft.next_fast_len(math.ceil(opt_size))
  626. # Use conventional FFT convolve if there is only going to be one block.
  627. if block_size >= s1:
  628. return fallback
  629. if not swapped:
  630. in1_step = block_size-s2+1
  631. in2_step = s2
  632. else:
  633. in1_step = s2
  634. in2_step = block_size-s2+1
  635. return block_size, overlap, in1_step, in2_step
  636. def oaconvolve(in1, in2, mode="full", axes=None):
  637. """Convolve two N-dimensional arrays using the overlap-add method.
  638. Convolve `in1` and `in2` using the overlap-add method, with
  639. the output size determined by the `mode` argument.
  640. This is generally much faster than `convolve` for large arrays (n > ~500),
  641. and generally much faster than `fftconvolve` when one array is much
  642. larger than the other, but can be slower when only a few output values are
  643. needed or when the arrays are very similar in shape, and can only
  644. output float arrays (int or object array inputs will be cast to float).
  645. Parameters
  646. ----------
  647. in1 : array_like
  648. First input.
  649. in2 : array_like
  650. Second input. Should have the same number of dimensions as `in1`.
  651. mode : str {'full', 'valid', 'same'}, optional
  652. A string indicating the size of the output:
  653. ``full``
  654. The output is the full discrete linear convolution
  655. of the inputs. (Default)
  656. ``valid``
  657. The output consists only of those elements that do not
  658. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  659. must be at least as large as the other in every dimension.
  660. ``same``
  661. The output is the same size as `in1`, centered
  662. with respect to the 'full' output.
  663. axes : int or array_like of ints or None, optional
  664. Axes over which to compute the convolution.
  665. The default is over all axes.
  666. Returns
  667. -------
  668. out : array
  669. An N-dimensional array containing a subset of the discrete linear
  670. convolution of `in1` with `in2`.
  671. See Also
  672. --------
  673. convolve : Uses the direct convolution or FFT convolution algorithm
  674. depending on which is faster.
  675. fftconvolve : An implementation of convolution using FFT.
  676. Notes
  677. -----
  678. .. versionadded:: 1.4.0
  679. References
  680. ----------
  681. .. [1] Wikipedia, "Overlap-add_method".
  682. https://en.wikipedia.org/wiki/Overlap-add_method
  683. .. [2] Richard G. Lyons. Understanding Digital Signal Processing,
  684. Third Edition, 2011. Chapter 13.10.
  685. ISBN 13: 978-0137-02741-5
  686. Examples
  687. --------
  688. Convolve a 100,000 sample signal with a 512-sample filter.
  689. >>> import numpy as np
  690. >>> from scipy import signal
  691. >>> rng = np.random.default_rng()
  692. >>> sig = rng.standard_normal(100000)
  693. >>> filt = signal.firwin(512, 0.01)
  694. >>> fsig = signal.oaconvolve(sig, filt)
  695. >>> import matplotlib.pyplot as plt
  696. >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
  697. >>> ax_orig.plot(sig)
  698. >>> ax_orig.set_title('White noise')
  699. >>> ax_mag.plot(fsig)
  700. >>> ax_mag.set_title('Filtered noise')
  701. >>> fig.tight_layout()
  702. >>> fig.show()
  703. """
  704. in1 = np.asarray(in1)
  705. in2 = np.asarray(in2)
  706. if in1.ndim == in2.ndim == 0: # scalar inputs
  707. return in1 * in2
  708. elif in1.ndim != in2.ndim:
  709. raise ValueError("in1 and in2 should have the same dimensionality")
  710. elif in1.size == 0 or in2.size == 0: # empty arrays
  711. return np.array([])
  712. elif in1.shape == in2.shape: # Equivalent to fftconvolve
  713. return fftconvolve(in1, in2, mode=mode, axes=axes)
  714. in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
  715. sorted_axes=True)
  716. s1 = in1.shape
  717. s2 = in2.shape
  718. if not axes:
  719. ret = in1 * in2
  720. return _apply_conv_mode(ret, s1, s2, mode, axes)
  721. # Calculate this now since in1 is changed later
  722. shape_final = [None if i not in axes else
  723. s1[i] + s2[i] - 1 for i in range(in1.ndim)]
  724. # Calculate the block sizes for the output, steps, first and second inputs.
  725. # It is simpler to calculate them all together than doing them in separate
  726. # loops due to all the special cases that need to be handled.
  727. optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else
  728. _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))
  729. block_size, overlaps, \
  730. in1_step, in2_step = zip(*optimal_sizes)
  731. # Fall back to fftconvolve if there is only one block in every dimension.
  732. if in1_step == s1 and in2_step == s2:
  733. return fftconvolve(in1, in2, mode=mode, axes=axes)
  734. # Figure out the number of steps and padding.
  735. # This would get too complicated in a list comprehension.
  736. nsteps1 = []
  737. nsteps2 = []
  738. pad_size1 = []
  739. pad_size2 = []
  740. for i in range(in1.ndim):
  741. if i not in axes:
  742. pad_size1 += [(0, 0)]
  743. pad_size2 += [(0, 0)]
  744. continue
  745. if s1[i] > in1_step[i]:
  746. curnstep1 = math.ceil((s1[i]+1)/in1_step[i])
  747. if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:
  748. curnstep1 += 1
  749. curpad1 = curnstep1*in1_step[i] - s1[i]
  750. else:
  751. curnstep1 = 1
  752. curpad1 = 0
  753. if s2[i] > in2_step[i]:
  754. curnstep2 = math.ceil((s2[i]+1)/in2_step[i])
  755. if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:
  756. curnstep2 += 1
  757. curpad2 = curnstep2*in2_step[i] - s2[i]
  758. else:
  759. curnstep2 = 1
  760. curpad2 = 0
  761. nsteps1 += [curnstep1]
  762. nsteps2 += [curnstep2]
  763. pad_size1 += [(0, curpad1)]
  764. pad_size2 += [(0, curpad2)]
  765. # Pad the array to a size that can be reshaped to the desired shape
  766. # if necessary.
  767. if not all(curpad == (0, 0) for curpad in pad_size1):
  768. in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)
  769. if not all(curpad == (0, 0) for curpad in pad_size2):
  770. in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)
  771. # Reshape the overlap-add parts to input block sizes.
  772. split_axes = [iax+i for i, iax in enumerate(axes)]
  773. fft_axes = [iax+1 for iax in split_axes]
  774. # We need to put each new dimension before the corresponding dimension
  775. # being reshaped in order to get the data in the right layout at the end.
  776. reshape_size1 = list(in1_step)
  777. reshape_size2 = list(in2_step)
  778. for i, iax in enumerate(split_axes):
  779. reshape_size1.insert(iax, nsteps1[i])
  780. reshape_size2.insert(iax, nsteps2[i])
  781. in1 = in1.reshape(*reshape_size1)
  782. in2 = in2.reshape(*reshape_size2)
  783. # Do the convolution.
  784. fft_shape = [block_size[i] for i in axes]
  785. ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)
  786. # Do the overlap-add.
  787. for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):
  788. overlap = overlaps[ax]
  789. if overlap is None:
  790. continue
  791. ret, overpart = np.split(ret, [-overlap], ax_fft)
  792. overpart = np.split(overpart, [-1], ax_split)[0]
  793. ret_overpart = np.split(ret, [overlap], ax_fft)[0]
  794. ret_overpart = np.split(ret_overpart, [1], ax_split)[1]
  795. ret_overpart += overpart
  796. # Reshape back to the correct dimensionality.
  797. shape_ret = [ret.shape[i] if i not in fft_axes else
  798. ret.shape[i]*ret.shape[i-1]
  799. for i in range(ret.ndim) if i not in split_axes]
  800. ret = ret.reshape(*shape_ret)
  801. # Slice to the correct size.
  802. slice_final = tuple([slice(islice) for islice in shape_final])
  803. ret = ret[slice_final]
  804. return _apply_conv_mode(ret, s1, s2, mode, axes)
  805. def _numeric_arrays(arrays, kinds='buifc'):
  806. """
  807. See if a list of arrays are all numeric.
  808. Parameters
  809. ----------
  810. arrays : array or list of arrays
  811. arrays to check if numeric.
  812. kinds : string-like
  813. The dtypes of the arrays to be checked. If the dtype.kind of
  814. the ndarrays are not in this string the function returns False and
  815. otherwise returns True.
  816. """
  817. if type(arrays) == np.ndarray:
  818. return arrays.dtype.kind in kinds
  819. for array_ in arrays:
  820. if array_.dtype.kind not in kinds:
  821. return False
  822. return True
  823. def _conv_ops(x_shape, h_shape, mode):
  824. """
  825. Find the number of operations required for direct/fft methods of
  826. convolution. The direct operations were recorded by making a dummy class to
  827. record the number of operations by overriding ``__mul__`` and ``__add__``.
  828. The FFT operations rely on the (well-known) computational complexity of the
  829. FFT (and the implementation of ``_freq_domain_conv``).
  830. """
  831. if mode == "full":
  832. out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
  833. elif mode == "valid":
  834. out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]
  835. elif mode == "same":
  836. out_shape = x_shape
  837. else:
  838. raise ValueError("Acceptable mode flags are 'valid',"
  839. " 'same', or 'full', not mode={}".format(mode))
  840. s1, s2 = x_shape, h_shape
  841. if len(x_shape) == 1:
  842. s1, s2 = s1[0], s2[0]
  843. if mode == "full":
  844. direct_ops = s1 * s2
  845. elif mode == "valid":
  846. direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
  847. elif mode == "same":
  848. direct_ops = (s1 * s2 if s1 < s2 else
  849. s1 * s2 - (s2 // 2) * ((s2 + 1) // 2))
  850. else:
  851. if mode == "full":
  852. direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
  853. elif mode == "valid":
  854. direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
  855. elif mode == "same":
  856. direct_ops = _prod(s1) * _prod(s2)
  857. full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
  858. N = _prod(full_out_shape)
  859. fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape
  860. return fft_ops, direct_ops
  861. def _fftconv_faster(x, h, mode):
  862. """
  863. See if using fftconvolve or convolve is faster.
  864. Parameters
  865. ----------
  866. x : np.ndarray
  867. Signal
  868. h : np.ndarray
  869. Kernel
  870. mode : str
  871. Mode passed to convolve
  872. Returns
  873. -------
  874. fft_faster : bool
  875. Notes
  876. -----
  877. See docstring of `choose_conv_method` for details on tuning hardware.
  878. See pull request 11031 for more detail:
  879. https://github.com/scipy/scipy/pull/11031.
  880. """
  881. fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
  882. offset = -1e-3 if x.ndim == 1 else -1e-4
  883. constants = {
  884. "valid": (1.89095737e-9, 2.1364985e-10, offset),
  885. "full": (1.7649070e-9, 2.1414831e-10, offset),
  886. "same": (3.2646654e-9, 2.8478277e-10, offset)
  887. if h.size <= x.size
  888. else (3.21635404e-9, 1.1773253e-8, -1e-5),
  889. } if x.ndim == 1 else {
  890. "valid": (1.85927e-9, 2.11242e-8, offset),
  891. "full": (1.99817e-9, 1.66174e-8, offset),
  892. "same": (2.04735e-9, 1.55367e-8, offset),
  893. }
  894. O_fft, O_direct, O_offset = constants[mode]
  895. return O_fft * fft_ops < O_direct * direct_ops + O_offset
  896. def _reverse_and_conj(x):
  897. """
  898. Reverse array `x` in all dimensions and perform the complex conjugate
  899. """
  900. reverse = (slice(None, None, -1),) * x.ndim
  901. return x[reverse].conj()
  902. def _np_conv_ok(volume, kernel, mode):
  903. """
  904. See if numpy supports convolution of `volume` and `kernel` (i.e. both are
  905. 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the
  906. size of the larger input, while SciPy's uses the size of the first input.
  907. Invalid mode strings will return False and be caught by the calling func.
  908. """
  909. if volume.ndim == kernel.ndim == 1:
  910. if mode in ('full', 'valid'):
  911. return True
  912. elif mode == 'same':
  913. return volume.size >= kernel.size
  914. else:
  915. return False
  916. def _timeit_fast(stmt="pass", setup="pass", repeat=3):
  917. """
  918. Returns the time the statement/function took, in seconds.
  919. Faster, less precise version of IPython's timeit. `stmt` can be a statement
  920. written as a string or a callable.
  921. Will do only 1 loop (like IPython's timeit) with no repetitions
  922. (unlike IPython) for very slow functions. For fast functions, only does
  923. enough loops to take 5 ms, which seems to produce similar results (on
  924. Windows at least), and avoids doing an extraneous cycle that isn't
  925. measured.
  926. """
  927. timer = timeit.Timer(stmt, setup)
  928. # determine number of calls per rep so total time for 1 rep >= 5 ms
  929. x = 0
  930. for p in range(0, 10):
  931. number = 10**p
  932. x = timer.timeit(number) # seconds
  933. if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
  934. break
  935. if x > 1: # second
  936. # If it's macroscopic, don't bother with repetitions
  937. best = x
  938. else:
  939. number *= 10
  940. r = timer.repeat(repeat, number)
  941. best = min(r)
  942. sec = best / number
  943. return sec
  944. def choose_conv_method(in1, in2, mode='full', measure=False):
  945. """
  946. Find the fastest convolution/correlation method.
  947. This primarily exists to be called during the ``method='auto'`` option in
  948. `convolve` and `correlate`. It can also be used to determine the value of
  949. ``method`` for many different convolutions of the same dtype/shape.
  950. In addition, it supports timing the convolution to adapt the value of
  951. ``method`` to a particular set of inputs and/or hardware.
  952. Parameters
  953. ----------
  954. in1 : array_like
  955. The first argument passed into the convolution function.
  956. in2 : array_like
  957. The second argument passed into the convolution function.
  958. mode : str {'full', 'valid', 'same'}, optional
  959. A string indicating the size of the output:
  960. ``full``
  961. The output is the full discrete linear convolution
  962. of the inputs. (Default)
  963. ``valid``
  964. The output consists only of those elements that do not
  965. rely on the zero-padding.
  966. ``same``
  967. The output is the same size as `in1`, centered
  968. with respect to the 'full' output.
  969. measure : bool, optional
  970. If True, run and time the convolution of `in1` and `in2` with both
  971. methods and return the fastest. If False (default), predict the fastest
  972. method using precomputed values.
  973. Returns
  974. -------
  975. method : str
  976. A string indicating which convolution method is fastest, either
  977. 'direct' or 'fft'
  978. times : dict, optional
  979. A dictionary containing the times (in seconds) needed for each method.
  980. This value is only returned if ``measure=True``.
  981. See Also
  982. --------
  983. convolve
  984. correlate
  985. Notes
  986. -----
  987. Generally, this method is 99% accurate for 2D signals and 85% accurate
  988. for 1D signals for randomly chosen input sizes. For precision, use
  989. ``measure=True`` to find the fastest method by timing the convolution.
  990. This can be used to avoid the minimal overhead of finding the fastest
  991. ``method`` later, or to adapt the value of ``method`` to a particular set
  992. of inputs.
  993. Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this
  994. function. These experiments measured the ratio between the time required
  995. when using ``method='auto'`` and the time required for the fastest method
  996. (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these
  997. experiments, we found:
  998. * There is a 95% chance of this ratio being less than 1.5 for 1D signals
  999. and a 99% chance of being less than 2.5 for 2D signals.
  1000. * The ratio was always less than 2.5/5 for 1D/2D signals respectively.
  1001. * This function is most inaccurate for 1D convolutions that take between 1
  1002. and 10 milliseconds with ``method='direct'``. A good proxy for this
  1003. (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.
  1004. The 2D results almost certainly generalize to 3D/4D/etc because the
  1005. implementation is the same (the 1D implementation is different).
  1006. All the numbers above are specific to the EC2 machine. However, we did find
  1007. that this function generalizes fairly decently across hardware. The speed
  1008. tests were of similar quality (and even slightly better) than the same
  1009. tests performed on the machine to tune this function's numbers (a mid-2014
  1010. 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).
  1011. There are cases when `fftconvolve` supports the inputs but this function
  1012. returns `direct` (e.g., to protect against floating point integer
  1013. precision).
  1014. .. versionadded:: 0.19
  1015. Examples
  1016. --------
  1017. Estimate the fastest method for a given input:
  1018. >>> import numpy as np
  1019. >>> from scipy import signal
  1020. >>> rng = np.random.default_rng()
  1021. >>> img = rng.random((32, 32))
  1022. >>> filter = rng.random((8, 8))
  1023. >>> method = signal.choose_conv_method(img, filter, mode='same')
  1024. >>> method
  1025. 'fft'
  1026. This can then be applied to other arrays of the same dtype and shape:
  1027. >>> img2 = rng.random((32, 32))
  1028. >>> filter2 = rng.random((8, 8))
  1029. >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)
  1030. >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)
  1031. The output of this function (``method``) works with `correlate` and
  1032. `convolve`.
  1033. """
  1034. volume = np.asarray(in1)
  1035. kernel = np.asarray(in2)
  1036. if measure:
  1037. times = {}
  1038. for method in ['fft', 'direct']:
  1039. times[method] = _timeit_fast(lambda: convolve(volume, kernel,
  1040. mode=mode, method=method))
  1041. chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
  1042. return chosen_method, times
  1043. # for integer input,
  1044. # catch when more precision required than float provides (representing an
  1045. # integer as float can lose precision in fftconvolve if larger than 2**52)
  1046. if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
  1047. max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
  1048. max_value *= int(min(volume.size, kernel.size))
  1049. if max_value > 2**np.finfo('float').nmant - 1:
  1050. return 'direct'
  1051. if _numeric_arrays([volume, kernel], kinds='b'):
  1052. return 'direct'
  1053. if _numeric_arrays([volume, kernel]):
  1054. if _fftconv_faster(volume, kernel, mode):
  1055. return 'fft'
  1056. return 'direct'
  1057. def convolve(in1, in2, mode='full', method='auto'):
  1058. """
  1059. Convolve two N-dimensional arrays.
  1060. Convolve `in1` and `in2`, with the output size determined by the
  1061. `mode` argument.
  1062. Parameters
  1063. ----------
  1064. in1 : array_like
  1065. First input.
  1066. in2 : array_like
  1067. Second input. Should have the same number of dimensions as `in1`.
  1068. mode : str {'full', 'valid', 'same'}, optional
  1069. A string indicating the size of the output:
  1070. ``full``
  1071. The output is the full discrete linear convolution
  1072. of the inputs. (Default)
  1073. ``valid``
  1074. The output consists only of those elements that do not
  1075. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  1076. must be at least as large as the other in every dimension.
  1077. ``same``
  1078. The output is the same size as `in1`, centered
  1079. with respect to the 'full' output.
  1080. method : str {'auto', 'direct', 'fft'}, optional
  1081. A string indicating which method to use to calculate the convolution.
  1082. ``direct``
  1083. The convolution is determined directly from sums, the definition of
  1084. convolution.
  1085. ``fft``
  1086. The Fourier Transform is used to perform the convolution by calling
  1087. `fftconvolve`.
  1088. ``auto``
  1089. Automatically chooses direct or Fourier method based on an estimate
  1090. of which is faster (default). See Notes for more detail.
  1091. .. versionadded:: 0.19.0
  1092. Returns
  1093. -------
  1094. convolve : array
  1095. An N-dimensional array containing a subset of the discrete linear
  1096. convolution of `in1` with `in2`.
  1097. Warns
  1098. -----
  1099. RuntimeWarning
  1100. Use of the FFT convolution on input containing NAN or INF will lead
  1101. to the entire output being NAN or INF. Use method='direct' when your
  1102. input contains NAN or INF values.
  1103. See Also
  1104. --------
  1105. numpy.polymul : performs polynomial multiplication (same operation, but
  1106. also accepts poly1d objects)
  1107. choose_conv_method : chooses the fastest appropriate convolution method
  1108. fftconvolve : Always uses the FFT method.
  1109. oaconvolve : Uses the overlap-add method to do convolution, which is
  1110. generally faster when the input arrays are large and
  1111. significantly different in size.
  1112. Notes
  1113. -----
  1114. By default, `convolve` and `correlate` use ``method='auto'``, which calls
  1115. `choose_conv_method` to choose the fastest method using pre-computed
  1116. values (`choose_conv_method` can also measure real-world timing with a
  1117. keyword argument). Because `fftconvolve` relies on floating point numbers,
  1118. there are certain constraints that may force `method=direct` (more detail
  1119. in `choose_conv_method` docstring).
  1120. Examples
  1121. --------
  1122. Smooth a square pulse using a Hann window:
  1123. >>> import numpy as np
  1124. >>> from scipy import signal
  1125. >>> sig = np.repeat([0., 1., 0.], 100)
  1126. >>> win = signal.windows.hann(50)
  1127. >>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
  1128. >>> import matplotlib.pyplot as plt
  1129. >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
  1130. >>> ax_orig.plot(sig)
  1131. >>> ax_orig.set_title('Original pulse')
  1132. >>> ax_orig.margins(0, 0.1)
  1133. >>> ax_win.plot(win)
  1134. >>> ax_win.set_title('Filter impulse response')
  1135. >>> ax_win.margins(0, 0.1)
  1136. >>> ax_filt.plot(filtered)
  1137. >>> ax_filt.set_title('Filtered signal')
  1138. >>> ax_filt.margins(0, 0.1)
  1139. >>> fig.tight_layout()
  1140. >>> fig.show()
  1141. """
  1142. volume = np.asarray(in1)
  1143. kernel = np.asarray(in2)
  1144. if volume.ndim == kernel.ndim == 0:
  1145. return volume * kernel
  1146. elif volume.ndim != kernel.ndim:
  1147. raise ValueError("volume and kernel should have the same "
  1148. "dimensionality")
  1149. if _inputs_swap_needed(mode, volume.shape, kernel.shape):
  1150. # Convolution is commutative; order doesn't have any effect on output
  1151. volume, kernel = kernel, volume
  1152. if method == 'auto':
  1153. method = choose_conv_method(volume, kernel, mode=mode)
  1154. if method == 'fft':
  1155. out = fftconvolve(volume, kernel, mode=mode)
  1156. result_type = np.result_type(volume, kernel)
  1157. if result_type.kind in {'u', 'i'}:
  1158. out = np.around(out)
  1159. if np.isnan(out.flat[0]) or np.isinf(out.flat[0]):
  1160. warnings.warn("Use of fft convolution on input with NAN or inf"
  1161. " results in NAN or inf output. Consider using"
  1162. " method='direct' instead.",
  1163. category=RuntimeWarning, stacklevel=2)
  1164. return out.astype(result_type)
  1165. elif method == 'direct':
  1166. # fastpath to faster numpy.convolve for 1d inputs when possible
  1167. if _np_conv_ok(volume, kernel, mode):
  1168. return np.convolve(volume, kernel, mode)
  1169. return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
  1170. else:
  1171. raise ValueError("Acceptable method flags are 'auto',"
  1172. " 'direct', or 'fft'.")
  1173. def order_filter(a, domain, rank):
  1174. """
  1175. Perform an order filter on an N-D array.
  1176. Perform an order filter on the array in. The domain argument acts as a
  1177. mask centered over each pixel. The non-zero elements of domain are
  1178. used to select elements surrounding each input pixel which are placed
  1179. in a list. The list is sorted, and the output for that pixel is the
  1180. element corresponding to rank in the sorted list.
  1181. Parameters
  1182. ----------
  1183. a : ndarray
  1184. The N-dimensional input array.
  1185. domain : array_like
  1186. A mask array with the same number of dimensions as `a`.
  1187. Each dimension should have an odd number of elements.
  1188. rank : int
  1189. A non-negative integer which selects the element from the
  1190. sorted list (0 corresponds to the smallest element, 1 is the
  1191. next smallest element, etc.).
  1192. Returns
  1193. -------
  1194. out : ndarray
  1195. The results of the order filter in an array with the same
  1196. shape as `a`.
  1197. Examples
  1198. --------
  1199. >>> import numpy as np
  1200. >>> from scipy import signal
  1201. >>> x = np.arange(25).reshape(5, 5)
  1202. >>> domain = np.identity(3)
  1203. >>> x
  1204. array([[ 0, 1, 2, 3, 4],
  1205. [ 5, 6, 7, 8, 9],
  1206. [10, 11, 12, 13, 14],
  1207. [15, 16, 17, 18, 19],
  1208. [20, 21, 22, 23, 24]])
  1209. >>> signal.order_filter(x, domain, 0)
  1210. array([[ 0., 0., 0., 0., 0.],
  1211. [ 0., 0., 1., 2., 0.],
  1212. [ 0., 5., 6., 7., 0.],
  1213. [ 0., 10., 11., 12., 0.],
  1214. [ 0., 0., 0., 0., 0.]])
  1215. >>> signal.order_filter(x, domain, 2)
  1216. array([[ 6., 7., 8., 9., 4.],
  1217. [ 11., 12., 13., 14., 9.],
  1218. [ 16., 17., 18., 19., 14.],
  1219. [ 21., 22., 23., 24., 19.],
  1220. [ 20., 21., 22., 23., 24.]])
  1221. """
  1222. domain = np.asarray(domain)
  1223. for dimsize in domain.shape:
  1224. if (dimsize % 2) != 1:
  1225. raise ValueError("Each dimension of domain argument "
  1226. "should have an odd number of elements.")
  1227. return _sigtools._order_filterND(a, domain, rank)
  1228. def medfilt(volume, kernel_size=None):
  1229. """
  1230. Perform a median filter on an N-dimensional array.
  1231. Apply a median filter to the input array using a local window-size
  1232. given by `kernel_size`. The array will automatically be zero-padded.
  1233. Parameters
  1234. ----------
  1235. volume : array_like
  1236. An N-dimensional input array.
  1237. kernel_size : array_like, optional
  1238. A scalar or an N-length list giving the size of the median filter
  1239. window in each dimension. Elements of `kernel_size` should be odd.
  1240. If `kernel_size` is a scalar, then this scalar is used as the size in
  1241. each dimension. Default size is 3 for each dimension.
  1242. Returns
  1243. -------
  1244. out : ndarray
  1245. An array the same size as input containing the median filtered
  1246. result.
  1247. Warns
  1248. -----
  1249. UserWarning
  1250. If array size is smaller than kernel size along any dimension
  1251. See Also
  1252. --------
  1253. scipy.ndimage.median_filter
  1254. scipy.signal.medfilt2d
  1255. Notes
  1256. -----
  1257. The more general function `scipy.ndimage.median_filter` has a more
  1258. efficient implementation of a median filter and therefore runs much faster.
  1259. For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes,
  1260. the specialised function `scipy.signal.medfilt2d` may be faster.
  1261. """
  1262. volume = np.atleast_1d(volume)
  1263. if kernel_size is None:
  1264. kernel_size = [3] * volume.ndim
  1265. kernel_size = np.asarray(kernel_size)
  1266. if kernel_size.shape == ():
  1267. kernel_size = np.repeat(kernel_size.item(), volume.ndim)
  1268. for k in range(volume.ndim):
  1269. if (kernel_size[k] % 2) != 1:
  1270. raise ValueError("Each element of kernel_size should be odd.")
  1271. if any(k > s for k, s in zip(kernel_size, volume.shape)):
  1272. warnings.warn('kernel_size exceeds volume extent: the volume will be '
  1273. 'zero-padded.')
  1274. domain = np.ones(kernel_size, dtype=volume.dtype)
  1275. numels = np.prod(kernel_size, axis=0)
  1276. order = numels // 2
  1277. return _sigtools._order_filterND(volume, domain, order)
  1278. def wiener(im, mysize=None, noise=None):
  1279. """
  1280. Perform a Wiener filter on an N-dimensional array.
  1281. Apply a Wiener filter to the N-dimensional array `im`.
  1282. Parameters
  1283. ----------
  1284. im : ndarray
  1285. An N-dimensional array.
  1286. mysize : int or array_like, optional
  1287. A scalar or an N-length list giving the size of the Wiener filter
  1288. window in each dimension. Elements of mysize should be odd.
  1289. If mysize is a scalar, then this scalar is used as the size
  1290. in each dimension.
  1291. noise : float, optional
  1292. The noise-power to use. If None, then noise is estimated as the
  1293. average of the local variance of the input.
  1294. Returns
  1295. -------
  1296. out : ndarray
  1297. Wiener filtered result with the same shape as `im`.
  1298. Notes
  1299. -----
  1300. This implementation is similar to wiener2 in Matlab/Octave.
  1301. For more details see [1]_
  1302. References
  1303. ----------
  1304. .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,
  1305. Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.
  1306. Examples
  1307. --------
  1308. >>> from scipy.datasets import face
  1309. >>> from scipy.signal import wiener
  1310. >>> import matplotlib.pyplot as plt
  1311. >>> import numpy as np
  1312. >>> rng = np.random.default_rng()
  1313. >>> img = rng.random((40, 40)) #Create a random image
  1314. >>> filtered_img = wiener(img, (5, 5)) #Filter the image
  1315. >>> f, (plot1, plot2) = plt.subplots(1, 2)
  1316. >>> plot1.imshow(img)
  1317. >>> plot2.imshow(filtered_img)
  1318. >>> plt.show()
  1319. """
  1320. im = np.asarray(im)
  1321. if mysize is None:
  1322. mysize = [3] * im.ndim
  1323. mysize = np.asarray(mysize)
  1324. if mysize.shape == ():
  1325. mysize = np.repeat(mysize.item(), im.ndim)
  1326. # Estimate the local mean
  1327. lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)
  1328. # Estimate the local variance
  1329. lVar = (correlate(im ** 2, np.ones(mysize), 'same') /
  1330. np.prod(mysize, axis=0) - lMean ** 2)
  1331. # Estimate the noise power if needed.
  1332. if noise is None:
  1333. noise = np.mean(np.ravel(lVar), axis=0)
  1334. res = (im - lMean)
  1335. res *= (1 - noise / lVar)
  1336. res += lMean
  1337. out = np.where(lVar < noise, lMean, res)
  1338. return out
  1339. def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
  1340. """
  1341. Convolve two 2-dimensional arrays.
  1342. Convolve `in1` and `in2` with output size determined by `mode`, and
  1343. boundary conditions determined by `boundary` and `fillvalue`.
  1344. Parameters
  1345. ----------
  1346. in1 : array_like
  1347. First input.
  1348. in2 : array_like
  1349. Second input. Should have the same number of dimensions as `in1`.
  1350. mode : str {'full', 'valid', 'same'}, optional
  1351. A string indicating the size of the output:
  1352. ``full``
  1353. The output is the full discrete linear convolution
  1354. of the inputs. (Default)
  1355. ``valid``
  1356. The output consists only of those elements that do not
  1357. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  1358. must be at least as large as the other in every dimension.
  1359. ``same``
  1360. The output is the same size as `in1`, centered
  1361. with respect to the 'full' output.
  1362. boundary : str {'fill', 'wrap', 'symm'}, optional
  1363. A flag indicating how to handle boundaries:
  1364. ``fill``
  1365. pad input arrays with fillvalue. (default)
  1366. ``wrap``
  1367. circular boundary conditions.
  1368. ``symm``
  1369. symmetrical boundary conditions.
  1370. fillvalue : scalar, optional
  1371. Value to fill pad input arrays with. Default is 0.
  1372. Returns
  1373. -------
  1374. out : ndarray
  1375. A 2-dimensional array containing a subset of the discrete linear
  1376. convolution of `in1` with `in2`.
  1377. Examples
  1378. --------
  1379. Compute the gradient of an image by 2D convolution with a complex Scharr
  1380. operator. (Horizontal operator is real, vertical is imaginary.) Use
  1381. symmetric boundary condition to avoid creating edges at the image
  1382. boundaries.
  1383. >>> import numpy as np
  1384. >>> from scipy import signal
  1385. >>> from scipy import datasets
  1386. >>> ascent = datasets.ascent()
  1387. >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
  1388. ... [-10+0j, 0+ 0j, +10 +0j],
  1389. ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
  1390. >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
  1391. >>> import matplotlib.pyplot as plt
  1392. >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
  1393. >>> ax_orig.imshow(ascent, cmap='gray')
  1394. >>> ax_orig.set_title('Original')
  1395. >>> ax_orig.set_axis_off()
  1396. >>> ax_mag.imshow(np.absolute(grad), cmap='gray')
  1397. >>> ax_mag.set_title('Gradient magnitude')
  1398. >>> ax_mag.set_axis_off()
  1399. >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
  1400. >>> ax_ang.set_title('Gradient orientation')
  1401. >>> ax_ang.set_axis_off()
  1402. >>> fig.show()
  1403. """
  1404. in1 = np.asarray(in1)
  1405. in2 = np.asarray(in2)
  1406. if not in1.ndim == in2.ndim == 2:
  1407. raise ValueError('convolve2d inputs must both be 2-D arrays')
  1408. if _inputs_swap_needed(mode, in1.shape, in2.shape):
  1409. in1, in2 = in2, in1
  1410. val = _valfrommode(mode)
  1411. bval = _bvalfromboundary(boundary)
  1412. out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
  1413. return out
  1414. def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
  1415. """
  1416. Cross-correlate two 2-dimensional arrays.
  1417. Cross correlate `in1` and `in2` with output size determined by `mode`, and
  1418. boundary conditions determined by `boundary` and `fillvalue`.
  1419. Parameters
  1420. ----------
  1421. in1 : array_like
  1422. First input.
  1423. in2 : array_like
  1424. Second input. Should have the same number of dimensions as `in1`.
  1425. mode : str {'full', 'valid', 'same'}, optional
  1426. A string indicating the size of the output:
  1427. ``full``
  1428. The output is the full discrete linear cross-correlation
  1429. of the inputs. (Default)
  1430. ``valid``
  1431. The output consists only of those elements that do not
  1432. rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
  1433. must be at least as large as the other in every dimension.
  1434. ``same``
  1435. The output is the same size as `in1`, centered
  1436. with respect to the 'full' output.
  1437. boundary : str {'fill', 'wrap', 'symm'}, optional
  1438. A flag indicating how to handle boundaries:
  1439. ``fill``
  1440. pad input arrays with fillvalue. (default)
  1441. ``wrap``
  1442. circular boundary conditions.
  1443. ``symm``
  1444. symmetrical boundary conditions.
  1445. fillvalue : scalar, optional
  1446. Value to fill pad input arrays with. Default is 0.
  1447. Returns
  1448. -------
  1449. correlate2d : ndarray
  1450. A 2-dimensional array containing a subset of the discrete linear
  1451. cross-correlation of `in1` with `in2`.
  1452. Notes
  1453. -----
  1454. When using "same" mode with even-length inputs, the outputs of `correlate`
  1455. and `correlate2d` differ: There is a 1-index offset between them.
  1456. Examples
  1457. --------
  1458. Use 2D cross-correlation to find the location of a template in a noisy
  1459. image:
  1460. >>> import numpy as np
  1461. >>> from scipy import signal
  1462. >>> from scipy import datasets
  1463. >>> rng = np.random.default_rng()
  1464. >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean()
  1465. >>> template = np.copy(face[300:365, 670:750]) # right eye
  1466. >>> template -= template.mean()
  1467. >>> face = face + rng.standard_normal(face.shape) * 50 # add noise
  1468. >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
  1469. >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
  1470. >>> import matplotlib.pyplot as plt
  1471. >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
  1472. ... figsize=(6, 15))
  1473. >>> ax_orig.imshow(face, cmap='gray')
  1474. >>> ax_orig.set_title('Original')
  1475. >>> ax_orig.set_axis_off()
  1476. >>> ax_template.imshow(template, cmap='gray')
  1477. >>> ax_template.set_title('Template')
  1478. >>> ax_template.set_axis_off()
  1479. >>> ax_corr.imshow(corr, cmap='gray')
  1480. >>> ax_corr.set_title('Cross-correlation')
  1481. >>> ax_corr.set_axis_off()
  1482. >>> ax_orig.plot(x, y, 'ro')
  1483. >>> fig.show()
  1484. """
  1485. in1 = np.asarray(in1)
  1486. in2 = np.asarray(in2)
  1487. if not in1.ndim == in2.ndim == 2:
  1488. raise ValueError('correlate2d inputs must both be 2-D arrays')
  1489. swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
  1490. if swapped_inputs:
  1491. in1, in2 = in2, in1
  1492. val = _valfrommode(mode)
  1493. bval = _bvalfromboundary(boundary)
  1494. out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
  1495. if swapped_inputs:
  1496. out = out[::-1, ::-1]
  1497. return out
  1498. def medfilt2d(input, kernel_size=3):
  1499. """
  1500. Median filter a 2-dimensional array.
  1501. Apply a median filter to the `input` array using a local window-size
  1502. given by `kernel_size` (must be odd). The array is zero-padded
  1503. automatically.
  1504. Parameters
  1505. ----------
  1506. input : array_like
  1507. A 2-dimensional input array.
  1508. kernel_size : array_like, optional
  1509. A scalar or a list of length 2, giving the size of the
  1510. median filter window in each dimension. Elements of
  1511. `kernel_size` should be odd. If `kernel_size` is a scalar,
  1512. then this scalar is used as the size in each dimension.
  1513. Default is a kernel of size (3, 3).
  1514. Returns
  1515. -------
  1516. out : ndarray
  1517. An array the same size as input containing the median filtered
  1518. result.
  1519. See Also
  1520. --------
  1521. scipy.ndimage.median_filter
  1522. Notes
  1523. -----
  1524. This is faster than `medfilt` when the input dtype is ``uint8``,
  1525. ``float32``, or ``float64``; for other types, this falls back to
  1526. `medfilt`. In some situations, `scipy.ndimage.median_filter` may be
  1527. faster than this function.
  1528. Examples
  1529. --------
  1530. >>> import numpy as np
  1531. >>> from scipy import signal
  1532. >>> x = np.arange(25).reshape(5, 5)
  1533. >>> x
  1534. array([[ 0, 1, 2, 3, 4],
  1535. [ 5, 6, 7, 8, 9],
  1536. [10, 11, 12, 13, 14],
  1537. [15, 16, 17, 18, 19],
  1538. [20, 21, 22, 23, 24]])
  1539. # Replaces i,j with the median out of 5*5 window
  1540. >>> signal.medfilt2d(x, kernel_size=5)
  1541. array([[ 0, 0, 2, 0, 0],
  1542. [ 0, 3, 7, 4, 0],
  1543. [ 2, 8, 12, 9, 4],
  1544. [ 0, 8, 12, 9, 0],
  1545. [ 0, 0, 12, 0, 0]])
  1546. # Replaces i,j with the median out of default 3*3 window
  1547. >>> signal.medfilt2d(x)
  1548. array([[ 0, 1, 2, 3, 0],
  1549. [ 1, 6, 7, 8, 4],
  1550. [ 6, 11, 12, 13, 9],
  1551. [11, 16, 17, 18, 14],
  1552. [ 0, 16, 17, 18, 0]])
  1553. # Replaces i,j with the median out of default 5*3 window
  1554. >>> signal.medfilt2d(x, kernel_size=[5,3])
  1555. array([[ 0, 1, 2, 3, 0],
  1556. [ 0, 6, 7, 8, 3],
  1557. [ 5, 11, 12, 13, 8],
  1558. [ 5, 11, 12, 13, 8],
  1559. [ 0, 11, 12, 13, 0]])
  1560. # Replaces i,j with the median out of default 3*5 window
  1561. >>> signal.medfilt2d(x, kernel_size=[3,5])
  1562. array([[ 0, 0, 2, 1, 0],
  1563. [ 1, 5, 7, 6, 3],
  1564. [ 6, 10, 12, 11, 8],
  1565. [11, 15, 17, 16, 13],
  1566. [ 0, 15, 17, 16, 0]])
  1567. # As seen in the examples,
  1568. # kernel numbers must be odd and not exceed original array dim
  1569. """
  1570. image = np.asarray(input)
  1571. # checking dtype.type, rather than just dtype, is necessary for
  1572. # excluding np.longdouble with MS Visual C.
  1573. if image.dtype.type not in (np.ubyte, np.single, np.double):
  1574. return medfilt(image, kernel_size)
  1575. if kernel_size is None:
  1576. kernel_size = [3] * 2
  1577. kernel_size = np.asarray(kernel_size)
  1578. if kernel_size.shape == ():
  1579. kernel_size = np.repeat(kernel_size.item(), 2)
  1580. for size in kernel_size:
  1581. if (size % 2) != 1:
  1582. raise ValueError("Each element of kernel_size should be odd.")
  1583. return _sigtools._medfilt2d(image, kernel_size)
  1584. def lfilter(b, a, x, axis=-1, zi=None):
  1585. """
  1586. Filter data along one-dimension with an IIR or FIR filter.
  1587. Filter a data sequence, `x`, using a digital filter. This works for many
  1588. fundamental data types (including Object type). The filter is a direct
  1589. form II transposed implementation of the standard difference equation
  1590. (see Notes).
  1591. The function `sosfilt` (and filter design using ``output='sos'``) should be
  1592. preferred over `lfilter` for most filtering tasks, as second-order sections
  1593. have fewer numerical problems.
  1594. Parameters
  1595. ----------
  1596. b : array_like
  1597. The numerator coefficient vector in a 1-D sequence.
  1598. a : array_like
  1599. The denominator coefficient vector in a 1-D sequence. If ``a[0]``
  1600. is not 1, then both `a` and `b` are normalized by ``a[0]``.
  1601. x : array_like
  1602. An N-dimensional input array.
  1603. axis : int, optional
  1604. The axis of the input data array along which to apply the
  1605. linear filter. The filter is applied to each subarray along
  1606. this axis. Default is -1.
  1607. zi : array_like, optional
  1608. Initial conditions for the filter delays. It is a vector
  1609. (or array of vectors for an N-dimensional input) of length
  1610. ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
  1611. initial rest is assumed. See `lfiltic` for more information.
  1612. Returns
  1613. -------
  1614. y : array
  1615. The output of the digital filter.
  1616. zf : array, optional
  1617. If `zi` is None, this is not returned, otherwise, `zf` holds the
  1618. final filter delay values.
  1619. See Also
  1620. --------
  1621. lfiltic : Construct initial conditions for `lfilter`.
  1622. lfilter_zi : Compute initial state (steady state of step response) for
  1623. `lfilter`.
  1624. filtfilt : A forward-backward filter, to obtain a filter with zero phase.
  1625. savgol_filter : A Savitzky-Golay filter.
  1626. sosfilt: Filter data using cascaded second-order sections.
  1627. sosfiltfilt: A forward-backward filter using second-order sections.
  1628. Notes
  1629. -----
  1630. The filter function is implemented as a direct II transposed structure.
  1631. This means that the filter implements::
  1632. a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
  1633. - a[1]*y[n-1] - ... - a[N]*y[n-N]
  1634. where `M` is the degree of the numerator, `N` is the degree of the
  1635. denominator, and `n` is the sample number. It is implemented using
  1636. the following difference equations (assuming M = N)::
  1637. a[0]*y[n] = b[0] * x[n] + d[0][n-1]
  1638. d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
  1639. d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
  1640. ...
  1641. d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
  1642. d[N-1][n] = b[N] * x[n] - a[N] * y[n]
  1643. where `d` are the state variables.
  1644. The rational transfer function describing this filter in the
  1645. z-transform domain is::
  1646. -1 -M
  1647. b[0] + b[1]z + ... + b[M] z
  1648. Y(z) = -------------------------------- X(z)
  1649. -1 -N
  1650. a[0] + a[1]z + ... + a[N] z
  1651. Examples
  1652. --------
  1653. Generate a noisy signal to be filtered:
  1654. >>> import numpy as np
  1655. >>> from scipy import signal
  1656. >>> import matplotlib.pyplot as plt
  1657. >>> rng = np.random.default_rng()
  1658. >>> t = np.linspace(-1, 1, 201)
  1659. >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
  1660. ... 0.1*np.sin(2*np.pi*1.25*t + 1) +
  1661. ... 0.18*np.cos(2*np.pi*3.85*t))
  1662. >>> xn = x + rng.standard_normal(len(t)) * 0.08
  1663. Create an order 3 lowpass butterworth filter:
  1664. >>> b, a = signal.butter(3, 0.05)
  1665. Apply the filter to xn. Use lfilter_zi to choose the initial condition of
  1666. the filter:
  1667. >>> zi = signal.lfilter_zi(b, a)
  1668. >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
  1669. Apply the filter again, to have a result filtered at an order the same as
  1670. filtfilt:
  1671. >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
  1672. Use filtfilt to apply the filter:
  1673. >>> y = signal.filtfilt(b, a, xn)
  1674. Plot the original signal and the various filtered versions:
  1675. >>> plt.figure
  1676. >>> plt.plot(t, xn, 'b', alpha=0.75)
  1677. >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
  1678. >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
  1679. ... 'filtfilt'), loc='best')
  1680. >>> plt.grid(True)
  1681. >>> plt.show()
  1682. """
  1683. a = np.atleast_1d(a)
  1684. if len(a) == 1:
  1685. # This path only supports types fdgFDGO to mirror _linear_filter below.
  1686. # Any of b, a, x, or zi can set the dtype, but there is no default
  1687. # casting of other types; instead a NotImplementedError is raised.
  1688. b = np.asarray(b)
  1689. a = np.asarray(a)
  1690. if b.ndim != 1 and a.ndim != 1:
  1691. raise ValueError('object of too small depth for desired array')
  1692. x = _validate_x(x)
  1693. inputs = [b, a, x]
  1694. if zi is not None:
  1695. # _linear_filter does not broadcast zi, but does do expansion of
  1696. # singleton dims.
  1697. zi = np.asarray(zi)
  1698. if zi.ndim != x.ndim:
  1699. raise ValueError('object of too small depth for desired array')
  1700. expected_shape = list(x.shape)
  1701. expected_shape[axis] = b.shape[0] - 1
  1702. expected_shape = tuple(expected_shape)
  1703. # check the trivial case where zi is the right shape first
  1704. if zi.shape != expected_shape:
  1705. strides = zi.ndim * [None]
  1706. if axis < 0:
  1707. axis += zi.ndim
  1708. for k in range(zi.ndim):
  1709. if k == axis and zi.shape[k] == expected_shape[k]:
  1710. strides[k] = zi.strides[k]
  1711. elif k != axis and zi.shape[k] == expected_shape[k]:
  1712. strides[k] = zi.strides[k]
  1713. elif k != axis and zi.shape[k] == 1:
  1714. strides[k] = 0
  1715. else:
  1716. raise ValueError('Unexpected shape for zi: expected '
  1717. '%s, found %s.' %
  1718. (expected_shape, zi.shape))
  1719. zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
  1720. strides)
  1721. inputs.append(zi)
  1722. dtype = np.result_type(*inputs)
  1723. if dtype.char not in 'fdgFDGO':
  1724. raise NotImplementedError("input type '%s' not supported" % dtype)
  1725. b = np.array(b, dtype=dtype)
  1726. a = np.array(a, dtype=dtype, copy=False)
  1727. b /= a[0]
  1728. x = np.array(x, dtype=dtype, copy=False)
  1729. out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
  1730. ind = out_full.ndim * [slice(None)]
  1731. if zi is not None:
  1732. ind[axis] = slice(zi.shape[axis])
  1733. out_full[tuple(ind)] += zi
  1734. ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
  1735. out = out_full[tuple(ind)]
  1736. if zi is None:
  1737. return out
  1738. else:
  1739. ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
  1740. zf = out_full[tuple(ind)]
  1741. return out, zf
  1742. else:
  1743. if zi is None:
  1744. return _sigtools._linear_filter(b, a, x, axis)
  1745. else:
  1746. return _sigtools._linear_filter(b, a, x, axis, zi)
  1747. def lfiltic(b, a, y, x=None):
  1748. """
  1749. Construct initial conditions for lfilter given input and output vectors.
  1750. Given a linear filter (b, a) and initial conditions on the output `y`
  1751. and the input `x`, return the initial conditions on the state vector zi
  1752. which is used by `lfilter` to generate the output given the input.
  1753. Parameters
  1754. ----------
  1755. b : array_like
  1756. Linear filter term.
  1757. a : array_like
  1758. Linear filter term.
  1759. y : array_like
  1760. Initial conditions.
  1761. If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
  1762. If `y` is too short, it is padded with zeros.
  1763. x : array_like, optional
  1764. Initial conditions.
  1765. If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
  1766. If `x` is not given, its initial conditions are assumed zero.
  1767. If `x` is too short, it is padded with zeros.
  1768. Returns
  1769. -------
  1770. zi : ndarray
  1771. The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
  1772. where ``K = max(M, N)``.
  1773. See Also
  1774. --------
  1775. lfilter, lfilter_zi
  1776. """
  1777. N = np.size(a) - 1
  1778. M = np.size(b) - 1
  1779. K = max(M, N)
  1780. y = np.asarray(y)
  1781. if x is None:
  1782. result_type = np.result_type(np.asarray(b), np.asarray(a), y)
  1783. if result_type.kind in 'bui':
  1784. result_type = np.float64
  1785. x = np.zeros(M, dtype=result_type)
  1786. else:
  1787. x = np.asarray(x)
  1788. result_type = np.result_type(np.asarray(b), np.asarray(a), y, x)
  1789. if result_type.kind in 'bui':
  1790. result_type = np.float64
  1791. x = x.astype(result_type)
  1792. L = np.size(x)
  1793. if L < M:
  1794. x = np.r_[x, np.zeros(M - L)]
  1795. y = y.astype(result_type)
  1796. zi = np.zeros(K, result_type)
  1797. L = np.size(y)
  1798. if L < N:
  1799. y = np.r_[y, np.zeros(N - L)]
  1800. for m in range(M):
  1801. zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
  1802. for m in range(N):
  1803. zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
  1804. return zi
  1805. def deconvolve(signal, divisor):
  1806. """Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
  1807. Returns the quotient and remainder such that
  1808. ``signal = convolve(divisor, quotient) + remainder``
  1809. Parameters
  1810. ----------
  1811. signal : (N,) array_like
  1812. Signal data, typically a recorded signal
  1813. divisor : (N,) array_like
  1814. Divisor data, typically an impulse response or filter that was
  1815. applied to the original signal
  1816. Returns
  1817. -------
  1818. quotient : ndarray
  1819. Quotient, typically the recovered original signal
  1820. remainder : ndarray
  1821. Remainder
  1822. See Also
  1823. --------
  1824. numpy.polydiv : performs polynomial division (same operation, but
  1825. also accepts poly1d objects)
  1826. Examples
  1827. --------
  1828. Deconvolve a signal that's been filtered:
  1829. >>> from scipy import signal
  1830. >>> original = [0, 1, 0, 0, 1, 1, 0, 0]
  1831. >>> impulse_response = [2, 1]
  1832. >>> recorded = signal.convolve(impulse_response, original)
  1833. >>> recorded
  1834. array([0, 2, 1, 0, 2, 3, 1, 0, 0])
  1835. >>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
  1836. >>> recovered
  1837. array([ 0., 1., 0., 0., 1., 1., 0., 0.])
  1838. """
  1839. num = np.atleast_1d(signal)
  1840. den = np.atleast_1d(divisor)
  1841. if num.ndim > 1:
  1842. raise ValueError("signal must be 1-D.")
  1843. if den.ndim > 1:
  1844. raise ValueError("divisor must be 1-D.")
  1845. N = len(num)
  1846. D = len(den)
  1847. if D > N:
  1848. quot = []
  1849. rem = num
  1850. else:
  1851. input = np.zeros(N - D + 1, float)
  1852. input[0] = 1
  1853. quot = lfilter(num, den, input)
  1854. rem = num - convolve(den, quot, mode='full')
  1855. return quot, rem
  1856. def hilbert(x, N=None, axis=-1):
  1857. """
  1858. Compute the analytic signal, using the Hilbert transform.
  1859. The transformation is done along the last axis by default.
  1860. Parameters
  1861. ----------
  1862. x : array_like
  1863. Signal data. Must be real.
  1864. N : int, optional
  1865. Number of Fourier components. Default: ``x.shape[axis]``
  1866. axis : int, optional
  1867. Axis along which to do the transformation. Default: -1.
  1868. Returns
  1869. -------
  1870. xa : ndarray
  1871. Analytic signal of `x`, of each 1-D array along `axis`
  1872. Notes
  1873. -----
  1874. The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
  1875. .. math:: x_a = F^{-1}(F(x) 2U) = x + i y
  1876. where `F` is the Fourier transform, `U` the unit step function,
  1877. and `y` the Hilbert transform of `x`. [1]_
  1878. In other words, the negative half of the frequency spectrum is zeroed
  1879. out, turning the real-valued signal into a complex signal. The Hilbert
  1880. transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
  1881. original signal from ``np.real(hilbert(x))``.
  1882. References
  1883. ----------
  1884. .. [1] Wikipedia, "Analytic signal".
  1885. https://en.wikipedia.org/wiki/Analytic_signal
  1886. .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
  1887. .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
  1888. Processing, Third Edition, 2009. Chapter 12.
  1889. ISBN 13: 978-1292-02572-8
  1890. Examples
  1891. --------
  1892. In this example we use the Hilbert transform to determine the amplitude
  1893. envelope and instantaneous frequency of an amplitude-modulated signal.
  1894. >>> import numpy as np
  1895. >>> import matplotlib.pyplot as plt
  1896. >>> from scipy.signal import hilbert, chirp
  1897. >>> duration = 1.0
  1898. >>> fs = 400.0
  1899. >>> samples = int(fs*duration)
  1900. >>> t = np.arange(samples) / fs
  1901. We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
  1902. apply an amplitude modulation.
  1903. >>> signal = chirp(t, 20.0, t[-1], 100.0)
  1904. >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
  1905. The amplitude envelope is given by magnitude of the analytic signal. The
  1906. instantaneous frequency can be obtained by differentiating the
  1907. instantaneous phase in respect to time. The instantaneous phase corresponds
  1908. to the phase angle of the analytic signal.
  1909. >>> analytic_signal = hilbert(signal)
  1910. >>> amplitude_envelope = np.abs(analytic_signal)
  1911. >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
  1912. >>> instantaneous_frequency = (np.diff(instantaneous_phase) /
  1913. ... (2.0*np.pi) * fs)
  1914. >>> fig, (ax0, ax1) = plt.subplots(nrows=2)
  1915. >>> ax0.plot(t, signal, label='signal')
  1916. >>> ax0.plot(t, amplitude_envelope, label='envelope')
  1917. >>> ax0.set_xlabel("time in seconds")
  1918. >>> ax0.legend()
  1919. >>> ax1.plot(t[1:], instantaneous_frequency)
  1920. >>> ax1.set_xlabel("time in seconds")
  1921. >>> ax1.set_ylim(0.0, 120.0)
  1922. >>> fig.tight_layout()
  1923. """
  1924. x = np.asarray(x)
  1925. if np.iscomplexobj(x):
  1926. raise ValueError("x must be real.")
  1927. if N is None:
  1928. N = x.shape[axis]
  1929. if N <= 0:
  1930. raise ValueError("N must be positive.")
  1931. Xf = sp_fft.fft(x, N, axis=axis)
  1932. h = np.zeros(N, dtype=Xf.dtype)
  1933. if N % 2 == 0:
  1934. h[0] = h[N // 2] = 1
  1935. h[1:N // 2] = 2
  1936. else:
  1937. h[0] = 1
  1938. h[1:(N + 1) // 2] = 2
  1939. if x.ndim > 1:
  1940. ind = [np.newaxis] * x.ndim
  1941. ind[axis] = slice(None)
  1942. h = h[tuple(ind)]
  1943. x = sp_fft.ifft(Xf * h, axis=axis)
  1944. return x
  1945. def hilbert2(x, N=None):
  1946. """
  1947. Compute the '2-D' analytic signal of `x`
  1948. Parameters
  1949. ----------
  1950. x : array_like
  1951. 2-D signal data.
  1952. N : int or tuple of two ints, optional
  1953. Number of Fourier components. Default is ``x.shape``
  1954. Returns
  1955. -------
  1956. xa : ndarray
  1957. Analytic signal of `x` taken along axes (0,1).
  1958. References
  1959. ----------
  1960. .. [1] Wikipedia, "Analytic signal",
  1961. https://en.wikipedia.org/wiki/Analytic_signal
  1962. """
  1963. x = np.atleast_2d(x)
  1964. if x.ndim > 2:
  1965. raise ValueError("x must be 2-D.")
  1966. if np.iscomplexobj(x):
  1967. raise ValueError("x must be real.")
  1968. if N is None:
  1969. N = x.shape
  1970. elif isinstance(N, int):
  1971. if N <= 0:
  1972. raise ValueError("N must be positive.")
  1973. N = (N, N)
  1974. elif len(N) != 2 or np.any(np.asarray(N) <= 0):
  1975. raise ValueError("When given as a tuple, N must hold exactly "
  1976. "two positive integers")
  1977. Xf = sp_fft.fft2(x, N, axes=(0, 1))
  1978. h1 = np.zeros(N[0], dtype=Xf.dtype)
  1979. h2 = np.zeros(N[1], dtype=Xf.dtype)
  1980. for p in range(2):
  1981. h = eval("h%d" % (p + 1))
  1982. N1 = N[p]
  1983. if N1 % 2 == 0:
  1984. h[0] = h[N1 // 2] = 1
  1985. h[1:N1 // 2] = 2
  1986. else:
  1987. h[0] = 1
  1988. h[1:(N1 + 1) // 2] = 2
  1989. exec("h%d = h" % (p + 1), globals(), locals())
  1990. h = h1[:, np.newaxis] * h2[np.newaxis, :]
  1991. k = x.ndim
  1992. while k > 2:
  1993. h = h[:, np.newaxis]
  1994. k -= 1
  1995. x = sp_fft.ifft2(Xf * h, axes=(0, 1))
  1996. return x
  1997. def cmplx_sort(p):
  1998. """Sort roots based on magnitude.
  1999. Parameters
  2000. ----------
  2001. p : array_like
  2002. The roots to sort, as a 1-D array.
  2003. Returns
  2004. -------
  2005. p_sorted : ndarray
  2006. Sorted roots.
  2007. indx : ndarray
  2008. Array of indices needed to sort the input `p`.
  2009. Examples
  2010. --------
  2011. >>> from scipy import signal
  2012. >>> vals = [1, 4, 1+1.j, 3]
  2013. >>> p_sorted, indx = signal.cmplx_sort(vals)
  2014. >>> p_sorted
  2015. array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
  2016. >>> indx
  2017. array([0, 2, 3, 1])
  2018. """
  2019. p = np.asarray(p)
  2020. indx = np.argsort(abs(p))
  2021. return np.take(p, indx, 0), indx
  2022. def unique_roots(p, tol=1e-3, rtype='min'):
  2023. """Determine unique roots and their multiplicities from a list of roots.
  2024. Parameters
  2025. ----------
  2026. p : array_like
  2027. The list of roots.
  2028. tol : float, optional
  2029. The tolerance for two roots to be considered equal in terms of
  2030. the distance between them. Default is 1e-3. Refer to Notes about
  2031. the details on roots grouping.
  2032. rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional
  2033. How to determine the returned root if multiple roots are within
  2034. `tol` of each other.
  2035. - 'max', 'maximum': pick the maximum of those roots
  2036. - 'min', 'minimum': pick the minimum of those roots
  2037. - 'avg', 'mean': take the average of those roots
  2038. When finding minimum or maximum among complex roots they are compared
  2039. first by the real part and then by the imaginary part.
  2040. Returns
  2041. -------
  2042. unique : ndarray
  2043. The list of unique roots.
  2044. multiplicity : ndarray
  2045. The multiplicity of each root.
  2046. Notes
  2047. -----
  2048. If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to
  2049. ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it
  2050. doesn't necessarily mean that ``a`` is close to ``c``. It means that roots
  2051. grouping is not unique. In this function we use "greedy" grouping going
  2052. through the roots in the order they are given in the input `p`.
  2053. This utility function is not specific to roots but can be used for any
  2054. sequence of values for which uniqueness and multiplicity has to be
  2055. determined. For a more general routine, see `numpy.unique`.
  2056. Examples
  2057. --------
  2058. >>> from scipy import signal
  2059. >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
  2060. >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
  2061. Check which roots have multiplicity larger than 1:
  2062. >>> uniq[mult > 1]
  2063. array([ 1.305])
  2064. """
  2065. if rtype in ['max', 'maximum']:
  2066. reduce = np.max
  2067. elif rtype in ['min', 'minimum']:
  2068. reduce = np.min
  2069. elif rtype in ['avg', 'mean']:
  2070. reduce = np.mean
  2071. else:
  2072. raise ValueError("`rtype` must be one of "
  2073. "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
  2074. p = np.asarray(p)
  2075. points = np.empty((len(p), 2))
  2076. points[:, 0] = np.real(p)
  2077. points[:, 1] = np.imag(p)
  2078. tree = cKDTree(points)
  2079. p_unique = []
  2080. p_multiplicity = []
  2081. used = np.zeros(len(p), dtype=bool)
  2082. for i in range(len(p)):
  2083. if used[i]:
  2084. continue
  2085. group = tree.query_ball_point(points[i], tol)
  2086. group = [x for x in group if not used[x]]
  2087. p_unique.append(reduce(p[group]))
  2088. p_multiplicity.append(len(group))
  2089. used[group] = True
  2090. return np.asarray(p_unique), np.asarray(p_multiplicity)
  2091. def invres(r, p, k, tol=1e-3, rtype='avg'):
  2092. """Compute b(s) and a(s) from partial fraction expansion.
  2093. If `M` is the degree of numerator `b` and `N` the degree of denominator
  2094. `a`::
  2095. b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
  2096. H(s) = ------ = ------------------------------------------
  2097. a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
  2098. then the partial-fraction expansion H(s) is defined as::
  2099. r[0] r[1] r[-1]
  2100. = -------- + -------- + ... + --------- + k(s)
  2101. (s-p[0]) (s-p[1]) (s-p[-1])
  2102. If there are any repeated roots (closer together than `tol`), then H(s)
  2103. has terms like::
  2104. r[i] r[i+1] r[i+n-1]
  2105. -------- + ----------- + ... + -----------
  2106. (s-p[i]) (s-p[i])**2 (s-p[i])**n
  2107. This function is used for polynomials in positive powers of s or z,
  2108. such as analog filters or digital filters in controls engineering. For
  2109. negative powers of z (typical for digital filters in DSP), use `invresz`.
  2110. Parameters
  2111. ----------
  2112. r : array_like
  2113. Residues corresponding to the poles. For repeated poles, the residues
  2114. must be ordered to correspond to ascending by power fractions.
  2115. p : array_like
  2116. Poles. Equal poles must be adjacent.
  2117. k : array_like
  2118. Coefficients of the direct polynomial term.
  2119. tol : float, optional
  2120. The tolerance for two roots to be considered equal in terms of
  2121. the distance between them. Default is 1e-3. See `unique_roots`
  2122. for further details.
  2123. rtype : {'avg', 'min', 'max'}, optional
  2124. Method for computing a root to represent a group of identical roots.
  2125. Default is 'avg'. See `unique_roots` for further details.
  2126. Returns
  2127. -------
  2128. b : ndarray
  2129. Numerator polynomial coefficients.
  2130. a : ndarray
  2131. Denominator polynomial coefficients.
  2132. See Also
  2133. --------
  2134. residue, invresz, unique_roots
  2135. """
  2136. r = np.atleast_1d(r)
  2137. p = np.atleast_1d(p)
  2138. k = np.trim_zeros(np.atleast_1d(k), 'f')
  2139. unique_poles, multiplicity = _group_poles(p, tol, rtype)
  2140. factors, denominator = _compute_factors(unique_poles, multiplicity,
  2141. include_powers=True)
  2142. if len(k) == 0:
  2143. numerator = 0
  2144. else:
  2145. numerator = np.polymul(k, denominator)
  2146. for residue, factor in zip(r, factors):
  2147. numerator = np.polyadd(numerator, residue * factor)
  2148. return numerator, denominator
  2149. def _compute_factors(roots, multiplicity, include_powers=False):
  2150. """Compute the total polynomial divided by factors for each root."""
  2151. current = np.array([1])
  2152. suffixes = [current]
  2153. for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):
  2154. monomial = np.array([1, -pole])
  2155. for _ in range(mult):
  2156. current = np.polymul(current, monomial)
  2157. suffixes.append(current)
  2158. suffixes = suffixes[::-1]
  2159. factors = []
  2160. current = np.array([1])
  2161. for pole, mult, suffix in zip(roots, multiplicity, suffixes):
  2162. monomial = np.array([1, -pole])
  2163. block = []
  2164. for i in range(mult):
  2165. if i == 0 or include_powers:
  2166. block.append(np.polymul(current, suffix))
  2167. current = np.polymul(current, monomial)
  2168. factors.extend(reversed(block))
  2169. return factors, current
  2170. def _compute_residues(poles, multiplicity, numerator):
  2171. denominator_factors, _ = _compute_factors(poles, multiplicity)
  2172. numerator = numerator.astype(poles.dtype)
  2173. residues = []
  2174. for pole, mult, factor in zip(poles, multiplicity,
  2175. denominator_factors):
  2176. if mult == 1:
  2177. residues.append(np.polyval(numerator, pole) /
  2178. np.polyval(factor, pole))
  2179. else:
  2180. numer = numerator.copy()
  2181. monomial = np.array([1, -pole])
  2182. factor, d = np.polydiv(factor, monomial)
  2183. block = []
  2184. for _ in range(mult):
  2185. numer, n = np.polydiv(numer, monomial)
  2186. r = n[0] / d[0]
  2187. numer = np.polysub(numer, r * factor)
  2188. block.append(r)
  2189. residues.extend(reversed(block))
  2190. return np.asarray(residues)
  2191. def residue(b, a, tol=1e-3, rtype='avg'):
  2192. """Compute partial-fraction expansion of b(s) / a(s).
  2193. If `M` is the degree of numerator `b` and `N` the degree of denominator
  2194. `a`::
  2195. b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
  2196. H(s) = ------ = ------------------------------------------
  2197. a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
  2198. then the partial-fraction expansion H(s) is defined as::
  2199. r[0] r[1] r[-1]
  2200. = -------- + -------- + ... + --------- + k(s)
  2201. (s-p[0]) (s-p[1]) (s-p[-1])
  2202. If there are any repeated roots (closer together than `tol`), then H(s)
  2203. has terms like::
  2204. r[i] r[i+1] r[i+n-1]
  2205. -------- + ----------- + ... + -----------
  2206. (s-p[i]) (s-p[i])**2 (s-p[i])**n
  2207. This function is used for polynomials in positive powers of s or z,
  2208. such as analog filters or digital filters in controls engineering. For
  2209. negative powers of z (typical for digital filters in DSP), use `residuez`.
  2210. See Notes for details about the algorithm.
  2211. Parameters
  2212. ----------
  2213. b : array_like
  2214. Numerator polynomial coefficients.
  2215. a : array_like
  2216. Denominator polynomial coefficients.
  2217. tol : float, optional
  2218. The tolerance for two roots to be considered equal in terms of
  2219. the distance between them. Default is 1e-3. See `unique_roots`
  2220. for further details.
  2221. rtype : {'avg', 'min', 'max'}, optional
  2222. Method for computing a root to represent a group of identical roots.
  2223. Default is 'avg'. See `unique_roots` for further details.
  2224. Returns
  2225. -------
  2226. r : ndarray
  2227. Residues corresponding to the poles. For repeated poles, the residues
  2228. are ordered to correspond to ascending by power fractions.
  2229. p : ndarray
  2230. Poles ordered by magnitude in ascending order.
  2231. k : ndarray
  2232. Coefficients of the direct polynomial term.
  2233. See Also
  2234. --------
  2235. invres, residuez, numpy.poly, unique_roots
  2236. Notes
  2237. -----
  2238. The "deflation through subtraction" algorithm is used for
  2239. computations --- method 6 in [1]_.
  2240. The form of partial fraction expansion depends on poles multiplicity in
  2241. the exact mathematical sense. However there is no way to exactly
  2242. determine multiplicity of roots of a polynomial in numerical computing.
  2243. Thus you should think of the result of `residue` with given `tol` as
  2244. partial fraction expansion computed for the denominator composed of the
  2245. computed poles with empirically determined multiplicity. The choice of
  2246. `tol` can drastically change the result if there are close poles.
  2247. References
  2248. ----------
  2249. .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a
  2250. review of computational methodology and efficiency", Journal of
  2251. Computational and Applied Mathematics, Vol. 9, 1983.
  2252. """
  2253. b = np.asarray(b)
  2254. a = np.asarray(a)
  2255. if (np.issubdtype(b.dtype, np.complexfloating)
  2256. or np.issubdtype(a.dtype, np.complexfloating)):
  2257. b = b.astype(complex)
  2258. a = a.astype(complex)
  2259. else:
  2260. b = b.astype(float)
  2261. a = a.astype(float)
  2262. b = np.trim_zeros(np.atleast_1d(b), 'f')
  2263. a = np.trim_zeros(np.atleast_1d(a), 'f')
  2264. if a.size == 0:
  2265. raise ValueError("Denominator `a` is zero.")
  2266. poles = np.roots(a)
  2267. if b.size == 0:
  2268. return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
  2269. if len(b) < len(a):
  2270. k = np.empty(0)
  2271. else:
  2272. k, b = np.polydiv(b, a)
  2273. unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
  2274. unique_poles, order = cmplx_sort(unique_poles)
  2275. multiplicity = multiplicity[order]
  2276. residues = _compute_residues(unique_poles, multiplicity, b)
  2277. index = 0
  2278. for pole, mult in zip(unique_poles, multiplicity):
  2279. poles[index:index + mult] = pole
  2280. index += mult
  2281. return residues / a[0], poles, k
  2282. def residuez(b, a, tol=1e-3, rtype='avg'):
  2283. """Compute partial-fraction expansion of b(z) / a(z).
  2284. If `M` is the degree of numerator `b` and `N` the degree of denominator
  2285. `a`::
  2286. b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
  2287. H(z) = ------ = ------------------------------------------
  2288. a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
  2289. then the partial-fraction expansion H(z) is defined as::
  2290. r[0] r[-1]
  2291. = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
  2292. (1-p[0]z**(-1)) (1-p[-1]z**(-1))
  2293. If there are any repeated roots (closer than `tol`), then the partial
  2294. fraction expansion has terms like::
  2295. r[i] r[i+1] r[i+n-1]
  2296. -------------- + ------------------ + ... + ------------------
  2297. (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
  2298. This function is used for polynomials in negative powers of z,
  2299. such as digital filters in DSP. For positive powers, use `residue`.
  2300. See Notes of `residue` for details about the algorithm.
  2301. Parameters
  2302. ----------
  2303. b : array_like
  2304. Numerator polynomial coefficients.
  2305. a : array_like
  2306. Denominator polynomial coefficients.
  2307. tol : float, optional
  2308. The tolerance for two roots to be considered equal in terms of
  2309. the distance between them. Default is 1e-3. See `unique_roots`
  2310. for further details.
  2311. rtype : {'avg', 'min', 'max'}, optional
  2312. Method for computing a root to represent a group of identical roots.
  2313. Default is 'avg'. See `unique_roots` for further details.
  2314. Returns
  2315. -------
  2316. r : ndarray
  2317. Residues corresponding to the poles. For repeated poles, the residues
  2318. are ordered to correspond to ascending by power fractions.
  2319. p : ndarray
  2320. Poles ordered by magnitude in ascending order.
  2321. k : ndarray
  2322. Coefficients of the direct polynomial term.
  2323. See Also
  2324. --------
  2325. invresz, residue, unique_roots
  2326. """
  2327. b = np.asarray(b)
  2328. a = np.asarray(a)
  2329. if (np.issubdtype(b.dtype, np.complexfloating)
  2330. or np.issubdtype(a.dtype, np.complexfloating)):
  2331. b = b.astype(complex)
  2332. a = a.astype(complex)
  2333. else:
  2334. b = b.astype(float)
  2335. a = a.astype(float)
  2336. b = np.trim_zeros(np.atleast_1d(b), 'b')
  2337. a = np.trim_zeros(np.atleast_1d(a), 'b')
  2338. if a.size == 0:
  2339. raise ValueError("Denominator `a` is zero.")
  2340. elif a[0] == 0:
  2341. raise ValueError("First coefficient of determinant `a` must be "
  2342. "non-zero.")
  2343. poles = np.roots(a)
  2344. if b.size == 0:
  2345. return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
  2346. b_rev = b[::-1]
  2347. a_rev = a[::-1]
  2348. if len(b_rev) < len(a_rev):
  2349. k_rev = np.empty(0)
  2350. else:
  2351. k_rev, b_rev = np.polydiv(b_rev, a_rev)
  2352. unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
  2353. unique_poles, order = cmplx_sort(unique_poles)
  2354. multiplicity = multiplicity[order]
  2355. residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)
  2356. index = 0
  2357. powers = np.empty(len(residues), dtype=int)
  2358. for pole, mult in zip(unique_poles, multiplicity):
  2359. poles[index:index + mult] = pole
  2360. powers[index:index + mult] = 1 + np.arange(mult)
  2361. index += mult
  2362. residues *= (-poles) ** powers / a_rev[0]
  2363. return residues, poles, k_rev[::-1]
  2364. def _group_poles(poles, tol, rtype):
  2365. if rtype in ['max', 'maximum']:
  2366. reduce = np.max
  2367. elif rtype in ['min', 'minimum']:
  2368. reduce = np.min
  2369. elif rtype in ['avg', 'mean']:
  2370. reduce = np.mean
  2371. else:
  2372. raise ValueError("`rtype` must be one of "
  2373. "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
  2374. unique = []
  2375. multiplicity = []
  2376. pole = poles[0]
  2377. block = [pole]
  2378. for i in range(1, len(poles)):
  2379. if abs(poles[i] - pole) <= tol:
  2380. block.append(pole)
  2381. else:
  2382. unique.append(reduce(block))
  2383. multiplicity.append(len(block))
  2384. pole = poles[i]
  2385. block = [pole]
  2386. unique.append(reduce(block))
  2387. multiplicity.append(len(block))
  2388. return np.asarray(unique), np.asarray(multiplicity)
  2389. def invresz(r, p, k, tol=1e-3, rtype='avg'):
  2390. """Compute b(z) and a(z) from partial fraction expansion.
  2391. If `M` is the degree of numerator `b` and `N` the degree of denominator
  2392. `a`::
  2393. b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
  2394. H(z) = ------ = ------------------------------------------
  2395. a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
  2396. then the partial-fraction expansion H(z) is defined as::
  2397. r[0] r[-1]
  2398. = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
  2399. (1-p[0]z**(-1)) (1-p[-1]z**(-1))
  2400. If there are any repeated roots (closer than `tol`), then the partial
  2401. fraction expansion has terms like::
  2402. r[i] r[i+1] r[i+n-1]
  2403. -------------- + ------------------ + ... + ------------------
  2404. (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
  2405. This function is used for polynomials in negative powers of z,
  2406. such as digital filters in DSP. For positive powers, use `invres`.
  2407. Parameters
  2408. ----------
  2409. r : array_like
  2410. Residues corresponding to the poles. For repeated poles, the residues
  2411. must be ordered to correspond to ascending by power fractions.
  2412. p : array_like
  2413. Poles. Equal poles must be adjacent.
  2414. k : array_like
  2415. Coefficients of the direct polynomial term.
  2416. tol : float, optional
  2417. The tolerance for two roots to be considered equal in terms of
  2418. the distance between them. Default is 1e-3. See `unique_roots`
  2419. for further details.
  2420. rtype : {'avg', 'min', 'max'}, optional
  2421. Method for computing a root to represent a group of identical roots.
  2422. Default is 'avg'. See `unique_roots` for further details.
  2423. Returns
  2424. -------
  2425. b : ndarray
  2426. Numerator polynomial coefficients.
  2427. a : ndarray
  2428. Denominator polynomial coefficients.
  2429. See Also
  2430. --------
  2431. residuez, unique_roots, invres
  2432. """
  2433. r = np.atleast_1d(r)
  2434. p = np.atleast_1d(p)
  2435. k = np.trim_zeros(np.atleast_1d(k), 'b')
  2436. unique_poles, multiplicity = _group_poles(p, tol, rtype)
  2437. factors, denominator = _compute_factors(unique_poles, multiplicity,
  2438. include_powers=True)
  2439. if len(k) == 0:
  2440. numerator = 0
  2441. else:
  2442. numerator = np.polymul(k[::-1], denominator[::-1])
  2443. for residue, factor in zip(r, factors):
  2444. numerator = np.polyadd(numerator, residue * factor[::-1])
  2445. return numerator[::-1], denominator
  2446. def resample(x, num, t=None, axis=0, window=None, domain='time'):
  2447. """
  2448. Resample `x` to `num` samples using Fourier method along the given axis.
  2449. The resampled signal starts at the same value as `x` but is sampled
  2450. with a spacing of ``len(x) / num * (spacing of x)``. Because a
  2451. Fourier method is used, the signal is assumed to be periodic.
  2452. Parameters
  2453. ----------
  2454. x : array_like
  2455. The data to be resampled.
  2456. num : int
  2457. The number of samples in the resampled signal.
  2458. t : array_like, optional
  2459. If `t` is given, it is assumed to be the equally spaced sample
  2460. positions associated with the signal data in `x`.
  2461. axis : int, optional
  2462. The axis of `x` that is resampled. Default is 0.
  2463. window : array_like, callable, string, float, or tuple, optional
  2464. Specifies the window applied to the signal in the Fourier
  2465. domain. See below for details.
  2466. domain : string, optional
  2467. A string indicating the domain of the input `x`:
  2468. ``time`` Consider the input `x` as time-domain (Default),
  2469. ``freq`` Consider the input `x` as frequency-domain.
  2470. Returns
  2471. -------
  2472. resampled_x or (resampled_x, resampled_t)
  2473. Either the resampled array, or, if `t` was given, a tuple
  2474. containing the resampled array and the corresponding resampled
  2475. positions.
  2476. See Also
  2477. --------
  2478. decimate : Downsample the signal after applying an FIR or IIR filter.
  2479. resample_poly : Resample using polyphase filtering and an FIR filter.
  2480. Notes
  2481. -----
  2482. The argument `window` controls a Fourier-domain window that tapers
  2483. the Fourier spectrum before zero-padding to alleviate ringing in
  2484. the resampled values for sampled signals you didn't intend to be
  2485. interpreted as band-limited.
  2486. If `window` is a function, then it is called with a vector of inputs
  2487. indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
  2488. If `window` is an array of the same length as `x.shape[axis]` it is
  2489. assumed to be the window to be applied directly in the Fourier
  2490. domain (with dc and low-frequency first).
  2491. For any other type of `window`, the function `scipy.signal.get_window`
  2492. is called to generate the window.
  2493. The first sample of the returned vector is the same as the first
  2494. sample of the input vector. The spacing between samples is changed
  2495. from ``dx`` to ``dx * len(x) / num``.
  2496. If `t` is not None, then it is used solely to calculate the resampled
  2497. positions `resampled_t`
  2498. As noted, `resample` uses FFT transformations, which can be very
  2499. slow if the number of input or output samples is large and prime;
  2500. see `scipy.fft.fft`.
  2501. Examples
  2502. --------
  2503. Note that the end of the resampled data rises to meet the first
  2504. sample of the next cycle:
  2505. >>> import numpy as np
  2506. >>> from scipy import signal
  2507. >>> x = np.linspace(0, 10, 20, endpoint=False)
  2508. >>> y = np.cos(-x**2/6.0)
  2509. >>> f = signal.resample(y, 100)
  2510. >>> xnew = np.linspace(0, 10, 100, endpoint=False)
  2511. >>> import matplotlib.pyplot as plt
  2512. >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
  2513. >>> plt.legend(['data', 'resampled'], loc='best')
  2514. >>> plt.show()
  2515. """
  2516. if domain not in ('time', 'freq'):
  2517. raise ValueError("Acceptable domain flags are 'time' or"
  2518. " 'freq', not domain={}".format(domain))
  2519. x = np.asarray(x)
  2520. Nx = x.shape[axis]
  2521. # Check if we can use faster real FFT
  2522. real_input = np.isrealobj(x)
  2523. if domain == 'time':
  2524. # Forward transform
  2525. if real_input:
  2526. X = sp_fft.rfft(x, axis=axis)
  2527. else: # Full complex FFT
  2528. X = sp_fft.fft(x, axis=axis)
  2529. else: # domain == 'freq'
  2530. X = x
  2531. # Apply window to spectrum
  2532. if window is not None:
  2533. if callable(window):
  2534. W = window(sp_fft.fftfreq(Nx))
  2535. elif isinstance(window, np.ndarray):
  2536. if window.shape != (Nx,):
  2537. raise ValueError('window must have the same length as data')
  2538. W = window
  2539. else:
  2540. W = sp_fft.ifftshift(get_window(window, Nx))
  2541. newshape_W = [1] * x.ndim
  2542. newshape_W[axis] = X.shape[axis]
  2543. if real_input:
  2544. # Fold the window back on itself to mimic complex behavior
  2545. W_real = W.copy()
  2546. W_real[1:] += W_real[-1:0:-1]
  2547. W_real[1:] *= 0.5
  2548. X *= W_real[:newshape_W[axis]].reshape(newshape_W)
  2549. else:
  2550. X *= W.reshape(newshape_W)
  2551. # Copy each half of the original spectrum to the output spectrum, either
  2552. # truncating high frequences (downsampling) or zero-padding them
  2553. # (upsampling)
  2554. # Placeholder array for output spectrum
  2555. newshape = list(x.shape)
  2556. if real_input:
  2557. newshape[axis] = num // 2 + 1
  2558. else:
  2559. newshape[axis] = num
  2560. Y = np.zeros(newshape, X.dtype)
  2561. # Copy positive frequency components (and Nyquist, if present)
  2562. N = min(num, Nx)
  2563. nyq = N // 2 + 1 # Slice index that includes Nyquist if present
  2564. sl = [slice(None)] * x.ndim
  2565. sl[axis] = slice(0, nyq)
  2566. Y[tuple(sl)] = X[tuple(sl)]
  2567. if not real_input:
  2568. # Copy negative frequency components
  2569. if N > 2: # (slice expression doesn't collapse to empty array)
  2570. sl[axis] = slice(nyq - N, None)
  2571. Y[tuple(sl)] = X[tuple(sl)]
  2572. # Split/join Nyquist component(s) if present
  2573. # So far we have set Y[+N/2]=X[+N/2]
  2574. if N % 2 == 0:
  2575. if num < Nx: # downsampling
  2576. if real_input:
  2577. sl[axis] = slice(N//2, N//2 + 1)
  2578. Y[tuple(sl)] *= 2.
  2579. else:
  2580. # select the component of Y at frequency +N/2,
  2581. # add the component of X at -N/2
  2582. sl[axis] = slice(-N//2, -N//2 + 1)
  2583. Y[tuple(sl)] += X[tuple(sl)]
  2584. elif Nx < num: # upsampling
  2585. # select the component at frequency +N/2 and halve it
  2586. sl[axis] = slice(N//2, N//2 + 1)
  2587. Y[tuple(sl)] *= 0.5
  2588. if not real_input:
  2589. temp = Y[tuple(sl)]
  2590. # set the component at -N/2 equal to the component at +N/2
  2591. sl[axis] = slice(num-N//2, num-N//2 + 1)
  2592. Y[tuple(sl)] = temp
  2593. # Inverse transform
  2594. if real_input:
  2595. y = sp_fft.irfft(Y, num, axis=axis)
  2596. else:
  2597. y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)
  2598. y *= (float(num) / float(Nx))
  2599. if t is None:
  2600. return y
  2601. else:
  2602. new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
  2603. return y, new_t
  2604. def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),
  2605. padtype='constant', cval=None):
  2606. """
  2607. Resample `x` along the given axis using polyphase filtering.
  2608. The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
  2609. FIR filter is applied, and then it is downsampled by the factor `down`.
  2610. The resulting sample rate is ``up / down`` times the original sample
  2611. rate. By default, values beyond the boundary of the signal are assumed
  2612. to be zero during the filtering step.
  2613. Parameters
  2614. ----------
  2615. x : array_like
  2616. The data to be resampled.
  2617. up : int
  2618. The upsampling factor.
  2619. down : int
  2620. The downsampling factor.
  2621. axis : int, optional
  2622. The axis of `x` that is resampled. Default is 0.
  2623. window : string, tuple, or array_like, optional
  2624. Desired window to use to design the low-pass filter, or the FIR filter
  2625. coefficients to employ. See below for details.
  2626. padtype : string, optional
  2627. `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of
  2628. the other signal extension modes supported by `scipy.signal.upfirdn`.
  2629. Changes assumptions on values beyond the boundary. If `constant`,
  2630. assumed to be `cval` (default zero). If `line` assumed to continue a
  2631. linear trend defined by the first and last points. `mean`, `median`,
  2632. `maximum` and `minimum` work as in `np.pad` and assume that the values
  2633. beyond the boundary are the mean, median, maximum or minimum
  2634. respectively of the array along the axis.
  2635. .. versionadded:: 1.4.0
  2636. cval : float, optional
  2637. Value to use if `padtype='constant'`. Default is zero.
  2638. .. versionadded:: 1.4.0
  2639. Returns
  2640. -------
  2641. resampled_x : array
  2642. The resampled array.
  2643. See Also
  2644. --------
  2645. decimate : Downsample the signal after applying an FIR or IIR filter.
  2646. resample : Resample up or down using the FFT method.
  2647. Notes
  2648. -----
  2649. This polyphase method will likely be faster than the Fourier method
  2650. in `scipy.signal.resample` when the number of samples is large and
  2651. prime, or when the number of samples is large and `up` and `down`
  2652. share a large greatest common denominator. The length of the FIR
  2653. filter used will depend on ``max(up, down) // gcd(up, down)``, and
  2654. the number of operations during polyphase filtering will depend on
  2655. the filter length and `down` (see `scipy.signal.upfirdn` for details).
  2656. The argument `window` specifies the FIR low-pass filter design.
  2657. If `window` is an array_like it is assumed to be the FIR filter
  2658. coefficients. Note that the FIR filter is applied after the upsampling
  2659. step, so it should be designed to operate on a signal at a sampling
  2660. frequency higher than the original by a factor of `up//gcd(up, down)`.
  2661. This function's output will be centered with respect to this array, so it
  2662. is best to pass a symmetric filter with an odd number of samples if, as
  2663. is usually the case, a zero-phase filter is desired.
  2664. For any other type of `window`, the functions `scipy.signal.get_window`
  2665. and `scipy.signal.firwin` are called to generate the appropriate filter
  2666. coefficients.
  2667. The first sample of the returned vector is the same as the first
  2668. sample of the input vector. The spacing between samples is changed
  2669. from ``dx`` to ``dx * down / float(up)``.
  2670. Examples
  2671. --------
  2672. By default, the end of the resampled data rises to meet the first
  2673. sample of the next cycle for the FFT method, and gets closer to zero
  2674. for the polyphase method:
  2675. >>> import numpy as np
  2676. >>> from scipy import signal
  2677. >>> import matplotlib.pyplot as plt
  2678. >>> x = np.linspace(0, 10, 20, endpoint=False)
  2679. >>> y = np.cos(-x**2/6.0)
  2680. >>> f_fft = signal.resample(y, 100)
  2681. >>> f_poly = signal.resample_poly(y, 100, 20)
  2682. >>> xnew = np.linspace(0, 10, 100, endpoint=False)
  2683. >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
  2684. >>> plt.plot(x, y, 'ko-')
  2685. >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
  2686. >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
  2687. >>> plt.show()
  2688. This default behaviour can be changed by using the padtype option:
  2689. >>> N = 5
  2690. >>> x = np.linspace(0, 1, N, endpoint=False)
  2691. >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)
  2692. >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)
  2693. >>> Y = np.stack([y, y2], axis=-1)
  2694. >>> up = 4
  2695. >>> xr = np.linspace(0, 1, N*up, endpoint=False)
  2696. >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')
  2697. >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')
  2698. >>> y4 = signal.resample_poly(Y, up, 1, padtype='line')
  2699. >>> for i in [0,1]:
  2700. ... plt.figure()
  2701. ... plt.plot(xr, y4[:,i], 'g.', label='line')
  2702. ... plt.plot(xr, y3[:,i], 'y.', label='mean')
  2703. ... plt.plot(xr, y2[:,i], 'r.', label='constant')
  2704. ... plt.plot(x, Y[:,i], 'k-')
  2705. ... plt.legend()
  2706. >>> plt.show()
  2707. """
  2708. x = np.asarray(x)
  2709. if up != int(up):
  2710. raise ValueError("up must be an integer")
  2711. if down != int(down):
  2712. raise ValueError("down must be an integer")
  2713. up = int(up)
  2714. down = int(down)
  2715. if up < 1 or down < 1:
  2716. raise ValueError('up and down must be >= 1')
  2717. if cval is not None and padtype != 'constant':
  2718. raise ValueError('cval has no effect when padtype is ', padtype)
  2719. # Determine our up and down factors
  2720. # Use a rational approximation to save computation time on really long
  2721. # signals
  2722. g_ = math.gcd(up, down)
  2723. up //= g_
  2724. down //= g_
  2725. if up == down == 1:
  2726. return x.copy()
  2727. n_in = x.shape[axis]
  2728. n_out = n_in * up
  2729. n_out = n_out // down + bool(n_out % down)
  2730. if isinstance(window, (list, np.ndarray)):
  2731. window = np.array(window) # use array to force a copy (we modify it)
  2732. if window.ndim > 1:
  2733. raise ValueError('window must be 1-D')
  2734. half_len = (window.size - 1) // 2
  2735. h = window
  2736. else:
  2737. # Design a linear-phase low-pass FIR filter
  2738. max_rate = max(up, down)
  2739. f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
  2740. half_len = 10 * max_rate # reasonable cutoff for sinc-like function
  2741. h = firwin(2 * half_len + 1, f_c,
  2742. window=window).astype(x.dtype) # match dtype of x
  2743. h *= up
  2744. # Zero-pad our filter to put the output samples at the center
  2745. n_pre_pad = (down - half_len % down)
  2746. n_post_pad = 0
  2747. n_pre_remove = (half_len + n_pre_pad) // down
  2748. # We should rarely need to do this given our filter lengths...
  2749. while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,
  2750. up, down) < n_out + n_pre_remove:
  2751. n_post_pad += 1
  2752. h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
  2753. np.zeros(n_post_pad, dtype=h.dtype)))
  2754. n_pre_remove_end = n_pre_remove + n_out
  2755. # Remove background depending on the padtype option
  2756. funcs = {'mean': np.mean, 'median': np.median,
  2757. 'minimum': np.amin, 'maximum': np.amax}
  2758. upfirdn_kwargs = {'mode': 'constant', 'cval': 0}
  2759. if padtype in funcs:
  2760. background_values = funcs[padtype](x, axis=axis, keepdims=True)
  2761. elif padtype in _upfirdn_modes:
  2762. upfirdn_kwargs = {'mode': padtype}
  2763. if padtype == 'constant':
  2764. if cval is None:
  2765. cval = 0
  2766. upfirdn_kwargs['cval'] = cval
  2767. else:
  2768. raise ValueError(
  2769. 'padtype must be one of: maximum, mean, median, minimum, ' +
  2770. ', '.join(_upfirdn_modes))
  2771. if padtype in funcs:
  2772. x = x - background_values
  2773. # filter then remove excess
  2774. y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)
  2775. keep = [slice(None), ]*x.ndim
  2776. keep[axis] = slice(n_pre_remove, n_pre_remove_end)
  2777. y_keep = y[tuple(keep)]
  2778. # Add background back
  2779. if padtype in funcs:
  2780. y_keep += background_values
  2781. return y_keep
  2782. def vectorstrength(events, period):
  2783. '''
  2784. Determine the vector strength of the events corresponding to the given
  2785. period.
  2786. The vector strength is a measure of phase synchrony, how well the
  2787. timing of the events is synchronized to a single period of a periodic
  2788. signal.
  2789. If multiple periods are used, calculate the vector strength of each.
  2790. This is called the "resonating vector strength".
  2791. Parameters
  2792. ----------
  2793. events : 1D array_like
  2794. An array of time points containing the timing of the events.
  2795. period : float or array_like
  2796. The period of the signal that the events should synchronize to.
  2797. The period is in the same units as `events`. It can also be an array
  2798. of periods, in which case the outputs are arrays of the same length.
  2799. Returns
  2800. -------
  2801. strength : float or 1D array
  2802. The strength of the synchronization. 1.0 is perfect synchronization
  2803. and 0.0 is no synchronization. If `period` is an array, this is also
  2804. an array with each element containing the vector strength at the
  2805. corresponding period.
  2806. phase : float or array
  2807. The phase that the events are most strongly synchronized to in radians.
  2808. If `period` is an array, this is also an array with each element
  2809. containing the phase for the corresponding period.
  2810. References
  2811. ----------
  2812. van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
  2813. strength: Auditory system, electric fish, and noise.
  2814. Chaos 21, 047508 (2011);
  2815. :doi:`10.1063/1.3670512`.
  2816. van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
  2817. biological and mathematical perspectives. Biol Cybern.
  2818. 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
  2819. van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
  2820. when we vary the "probing" frequency while keeping the spike times
  2821. fixed. Biol Cybern. 2013 Aug;107(4):491-94.
  2822. :doi:`10.1007/s00422-013-0560-8`.
  2823. '''
  2824. events = np.asarray(events)
  2825. period = np.asarray(period)
  2826. if events.ndim > 1:
  2827. raise ValueError('events cannot have dimensions more than 1')
  2828. if period.ndim > 1:
  2829. raise ValueError('period cannot have dimensions more than 1')
  2830. # we need to know later if period was originally a scalar
  2831. scalarperiod = not period.ndim
  2832. events = np.atleast_2d(events)
  2833. period = np.atleast_2d(period)
  2834. if (period <= 0).any():
  2835. raise ValueError('periods must be positive')
  2836. # this converts the times to vectors
  2837. vectors = np.exp(np.dot(2j*np.pi/period.T, events))
  2838. # the vector strength is just the magnitude of the mean of the vectors
  2839. # the vector phase is the angle of the mean of the vectors
  2840. vectormean = np.mean(vectors, axis=1)
  2841. strength = abs(vectormean)
  2842. phase = np.angle(vectormean)
  2843. # if the original period was a scalar, return scalars
  2844. if scalarperiod:
  2845. strength = strength[0]
  2846. phase = phase[0]
  2847. return strength, phase
  2848. def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):
  2849. """
  2850. Remove linear trend along axis from data.
  2851. Parameters
  2852. ----------
  2853. data : array_like
  2854. The input data.
  2855. axis : int, optional
  2856. The axis along which to detrend the data. By default this is the
  2857. last axis (-1).
  2858. type : {'linear', 'constant'}, optional
  2859. The type of detrending. If ``type == 'linear'`` (default),
  2860. the result of a linear least-squares fit to `data` is subtracted
  2861. from `data`.
  2862. If ``type == 'constant'``, only the mean of `data` is subtracted.
  2863. bp : array_like of ints, optional
  2864. A sequence of break points. If given, an individual linear fit is
  2865. performed for each part of `data` between two break points.
  2866. Break points are specified as indices into `data`. This parameter
  2867. only has an effect when ``type == 'linear'``.
  2868. overwrite_data : bool, optional
  2869. If True, perform in place detrending and avoid a copy. Default is False
  2870. Returns
  2871. -------
  2872. ret : ndarray
  2873. The detrended input data.
  2874. Examples
  2875. --------
  2876. >>> import numpy as np
  2877. >>> from scipy import signal
  2878. >>> rng = np.random.default_rng()
  2879. >>> npoints = 1000
  2880. >>> noise = rng.standard_normal(npoints)
  2881. >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
  2882. >>> (signal.detrend(x) - noise).max()
  2883. 0.06 # random
  2884. """
  2885. if type not in ['linear', 'l', 'constant', 'c']:
  2886. raise ValueError("Trend type must be 'linear' or 'constant'.")
  2887. data = np.asarray(data)
  2888. dtype = data.dtype.char
  2889. if dtype not in 'dfDF':
  2890. dtype = 'd'
  2891. if type in ['constant', 'c']:
  2892. ret = data - np.mean(data, axis, keepdims=True)
  2893. return ret
  2894. else:
  2895. dshape = data.shape
  2896. N = dshape[axis]
  2897. bp = np.sort(np.unique(np.r_[0, bp, N]))
  2898. if np.any(bp > N):
  2899. raise ValueError("Breakpoints must be less than length "
  2900. "of data along given axis.")
  2901. Nreg = len(bp) - 1
  2902. # Restructure data so that axis is along first dimension and
  2903. # all other dimensions are collapsed into second dimension
  2904. rnk = len(dshape)
  2905. if axis < 0:
  2906. axis = axis + rnk
  2907. newdims = np.r_[axis, 0:axis, axis + 1:rnk]
  2908. newdata = np.reshape(np.transpose(data, tuple(newdims)),
  2909. (N, _prod(dshape) // N))
  2910. if not overwrite_data:
  2911. newdata = newdata.copy() # make sure we have a copy
  2912. if newdata.dtype.char not in 'dfDF':
  2913. newdata = newdata.astype(dtype)
  2914. # Find leastsq fit and remove it for each piece
  2915. for m in range(Nreg):
  2916. Npts = bp[m + 1] - bp[m]
  2917. A = np.ones((Npts, 2), dtype)
  2918. A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
  2919. sl = slice(bp[m], bp[m + 1])
  2920. coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
  2921. newdata[sl] = newdata[sl] - np.dot(A, coef)
  2922. # Put data back in original shape.
  2923. tdshape = np.take(dshape, newdims, 0)
  2924. ret = np.reshape(newdata, tuple(tdshape))
  2925. vals = list(range(1, rnk))
  2926. olddims = vals[:axis] + [0] + vals[axis:]
  2927. ret = np.transpose(ret, tuple(olddims))
  2928. return ret
  2929. def lfilter_zi(b, a):
  2930. """
  2931. Construct initial conditions for lfilter for step response steady-state.
  2932. Compute an initial state `zi` for the `lfilter` function that corresponds
  2933. to the steady state of the step response.
  2934. A typical use of this function is to set the initial state so that the
  2935. output of the filter starts at the same value as the first element of
  2936. the signal to be filtered.
  2937. Parameters
  2938. ----------
  2939. b, a : array_like (1-D)
  2940. The IIR filter coefficients. See `lfilter` for more
  2941. information.
  2942. Returns
  2943. -------
  2944. zi : 1-D ndarray
  2945. The initial state for the filter.
  2946. See Also
  2947. --------
  2948. lfilter, lfiltic, filtfilt
  2949. Notes
  2950. -----
  2951. A linear filter with order m has a state space representation (A, B, C, D),
  2952. for which the output y of the filter can be expressed as::
  2953. z(n+1) = A*z(n) + B*x(n)
  2954. y(n) = C*z(n) + D*x(n)
  2955. where z(n) is a vector of length m, A has shape (m, m), B has shape
  2956. (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
  2957. a scalar). lfilter_zi solves::
  2958. zi = A*zi + B
  2959. In other words, it finds the initial condition for which the response
  2960. to an input of all ones is a constant.
  2961. Given the filter coefficients `a` and `b`, the state space matrices
  2962. for the transposed direct form II implementation of the linear filter,
  2963. which is the implementation used by scipy.signal.lfilter, are::
  2964. A = scipy.linalg.companion(a).T
  2965. B = b[1:] - a[1:]*b[0]
  2966. assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
  2967. divided by a[0].
  2968. Examples
  2969. --------
  2970. The following code creates a lowpass Butterworth filter. Then it
  2971. applies that filter to an array whose values are all 1.0; the
  2972. output is also all 1.0, as expected for a lowpass filter. If the
  2973. `zi` argument of `lfilter` had not been given, the output would have
  2974. shown the transient signal.
  2975. >>> from numpy import array, ones
  2976. >>> from scipy.signal import lfilter, lfilter_zi, butter
  2977. >>> b, a = butter(5, 0.25)
  2978. >>> zi = lfilter_zi(b, a)
  2979. >>> y, zo = lfilter(b, a, ones(10), zi=zi)
  2980. >>> y
  2981. array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
  2982. Another example:
  2983. >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
  2984. >>> y, zf = lfilter(b, a, x, zi=zi*x[0])
  2985. >>> y
  2986. array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
  2987. 0.44399389, 0.35505241])
  2988. Note that the `zi` argument to `lfilter` was computed using
  2989. `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
  2990. transient until the input drops from 0.5 to 0.0.
  2991. """
  2992. # FIXME: Can this function be replaced with an appropriate
  2993. # use of lfiltic? For example, when b,a = butter(N,Wn),
  2994. # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
  2995. #
  2996. # We could use scipy.signal.normalize, but it uses warnings in
  2997. # cases where a ValueError is more appropriate, and it allows
  2998. # b to be 2D.
  2999. b = np.atleast_1d(b)
  3000. if b.ndim != 1:
  3001. raise ValueError("Numerator b must be 1-D.")
  3002. a = np.atleast_1d(a)
  3003. if a.ndim != 1:
  3004. raise ValueError("Denominator a must be 1-D.")
  3005. while len(a) > 1 and a[0] == 0.0:
  3006. a = a[1:]
  3007. if a.size < 1:
  3008. raise ValueError("There must be at least one nonzero `a` coefficient.")
  3009. if a[0] != 1.0:
  3010. # Normalize the coefficients so a[0] == 1.
  3011. b = b / a[0]
  3012. a = a / a[0]
  3013. n = max(len(a), len(b))
  3014. # Pad a or b with zeros so they are the same length.
  3015. if len(a) < n:
  3016. a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)]
  3017. elif len(b) < n:
  3018. b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)]
  3019. IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T
  3020. B = b[1:] - a[1:] * b[0]
  3021. # Solve zi = A*zi + B
  3022. zi = np.linalg.solve(IminusA, B)
  3023. # For future reference: we could also use the following
  3024. # explicit formulas to solve the linear system:
  3025. #
  3026. # zi = np.zeros(n - 1)
  3027. # zi[0] = B.sum() / IminusA[:,0].sum()
  3028. # asum = 1.0
  3029. # csum = 0.0
  3030. # for k in range(1,n-1):
  3031. # asum += a[k]
  3032. # csum += b[k] - a[k]*b[0]
  3033. # zi[k] = asum*zi[0] - csum
  3034. return zi
  3035. def sosfilt_zi(sos):
  3036. """
  3037. Construct initial conditions for sosfilt for step response steady-state.
  3038. Compute an initial state `zi` for the `sosfilt` function that corresponds
  3039. to the steady state of the step response.
  3040. A typical use of this function is to set the initial state so that the
  3041. output of the filter starts at the same value as the first element of
  3042. the signal to be filtered.
  3043. Parameters
  3044. ----------
  3045. sos : array_like
  3046. Array of second-order filter coefficients, must have shape
  3047. ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
  3048. specification.
  3049. Returns
  3050. -------
  3051. zi : ndarray
  3052. Initial conditions suitable for use with ``sosfilt``, shape
  3053. ``(n_sections, 2)``.
  3054. See Also
  3055. --------
  3056. sosfilt, zpk2sos
  3057. Notes
  3058. -----
  3059. .. versionadded:: 0.16.0
  3060. Examples
  3061. --------
  3062. Filter a rectangular pulse that begins at time 0, with and without
  3063. the use of the `zi` argument of `scipy.signal.sosfilt`.
  3064. >>> import numpy as np
  3065. >>> from scipy import signal
  3066. >>> import matplotlib.pyplot as plt
  3067. >>> sos = signal.butter(9, 0.125, output='sos')
  3068. >>> zi = signal.sosfilt_zi(sos)
  3069. >>> x = (np.arange(250) < 100).astype(int)
  3070. >>> f1 = signal.sosfilt(sos, x)
  3071. >>> f2, zo = signal.sosfilt(sos, x, zi=zi)
  3072. >>> plt.plot(x, 'k--', label='x')
  3073. >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
  3074. >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
  3075. >>> plt.legend(loc='best')
  3076. >>> plt.show()
  3077. """
  3078. sos = np.asarray(sos)
  3079. if sos.ndim != 2 or sos.shape[1] != 6:
  3080. raise ValueError('sos must be shape (n_sections, 6)')
  3081. if sos.dtype.kind in 'bui':
  3082. sos = sos.astype(np.float64)
  3083. n_sections = sos.shape[0]
  3084. zi = np.empty((n_sections, 2), dtype=sos.dtype)
  3085. scale = 1.0
  3086. for section in range(n_sections):
  3087. b = sos[section, :3]
  3088. a = sos[section, 3:]
  3089. zi[section] = scale * lfilter_zi(b, a)
  3090. # If H(z) = B(z)/A(z) is this section's transfer function, then
  3091. # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
  3092. # state value of this section's step response.
  3093. scale *= b.sum() / a.sum()
  3094. return zi
  3095. def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
  3096. """Forward-backward IIR filter that uses Gustafsson's method.
  3097. Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
  3098. then backward, using Gustafsson's initial conditions [1]_.
  3099. Let ``y_fb`` be the result of filtering first forward and then backward,
  3100. and let ``y_bf`` be the result of filtering first backward then forward.
  3101. Gustafsson's method is to compute initial conditions for the forward
  3102. pass and the backward pass such that ``y_fb == y_bf``.
  3103. Parameters
  3104. ----------
  3105. b : scalar or 1-D ndarray
  3106. Numerator coefficients of the filter.
  3107. a : scalar or 1-D ndarray
  3108. Denominator coefficients of the filter.
  3109. x : ndarray
  3110. Data to be filtered.
  3111. axis : int, optional
  3112. Axis of `x` to be filtered. Default is -1.
  3113. irlen : int or None, optional
  3114. The length of the nonnegligible part of the impulse response.
  3115. If `irlen` is None, or if the length of the signal is less than
  3116. ``2 * irlen``, then no part of the impulse response is ignored.
  3117. Returns
  3118. -------
  3119. y : ndarray
  3120. The filtered data.
  3121. x0 : ndarray
  3122. Initial condition for the forward filter.
  3123. x1 : ndarray
  3124. Initial condition for the backward filter.
  3125. Notes
  3126. -----
  3127. Typically the return values `x0` and `x1` are not needed by the
  3128. caller. The intended use of these return values is in unit tests.
  3129. References
  3130. ----------
  3131. .. [1] F. Gustaffson. Determining the initial states in forward-backward
  3132. filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
  3133. """
  3134. # In the comments, "Gustafsson's paper" and [1] refer to the
  3135. # paper referenced in the docstring.
  3136. b = np.atleast_1d(b)
  3137. a = np.atleast_1d(a)
  3138. order = max(len(b), len(a)) - 1
  3139. if order == 0:
  3140. # The filter is just scalar multiplication, with no state.
  3141. scale = (b[0] / a[0])**2
  3142. y = scale * x
  3143. return y, np.array([]), np.array([])
  3144. if axis != -1 or axis != x.ndim - 1:
  3145. # Move the axis containing the data to the end.
  3146. x = np.swapaxes(x, axis, x.ndim - 1)
  3147. # n is the number of samples in the data to be filtered.
  3148. n = x.shape[-1]
  3149. if irlen is None or n <= 2*irlen:
  3150. m = n
  3151. else:
  3152. m = irlen
  3153. # Create Obs, the observability matrix (called O in the paper).
  3154. # This matrix can be interpreted as the operator that propagates
  3155. # an arbitrary initial state to the output, assuming the input is
  3156. # zero.
  3157. # In Gustafsson's paper, the forward and backward filters are not
  3158. # necessarily the same, so he has both O_f and O_b. We use the same
  3159. # filter in both directions, so we only need O. The same comment
  3160. # applies to S below.
  3161. Obs = np.zeros((m, order))
  3162. zi = np.zeros(order)
  3163. zi[0] = 1
  3164. Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
  3165. for k in range(1, order):
  3166. Obs[k:, k] = Obs[:-k, 0]
  3167. # Obsr is O^R (Gustafsson's notation for row-reversed O)
  3168. Obsr = Obs[::-1]
  3169. # Create S. S is the matrix that applies the filter to the reversed
  3170. # propagated initial conditions. That is,
  3171. # out = S.dot(zi)
  3172. # is the same as
  3173. # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
  3174. # out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
  3175. # Equations (5) & (6) of [1]
  3176. S = lfilter(b, a, Obs[::-1], axis=0)
  3177. # Sr is S^R (row-reversed S)
  3178. Sr = S[::-1]
  3179. # M is [(S^R - O), (O^R - S)]
  3180. if m == n:
  3181. M = np.hstack((Sr - Obs, Obsr - S))
  3182. else:
  3183. # Matrix described in section IV of [1].
  3184. M = np.zeros((2*m, 2*order))
  3185. M[:m, :order] = Sr - Obs
  3186. M[m:, order:] = Obsr - S
  3187. # Naive forward-backward and backward-forward filters.
  3188. # These have large transients because the filters use zero initial
  3189. # conditions.
  3190. y_f = lfilter(b, a, x)
  3191. y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
  3192. y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
  3193. y_bf = lfilter(b, a, y_b)
  3194. delta_y_bf_fb = y_bf - y_fb
  3195. if m == n:
  3196. delta = delta_y_bf_fb
  3197. else:
  3198. start_m = delta_y_bf_fb[..., :m]
  3199. end_m = delta_y_bf_fb[..., -m:]
  3200. delta = np.concatenate((start_m, end_m), axis=-1)
  3201. # ic_opt holds the "optimal" initial conditions.
  3202. # The following code computes the result shown in the formula
  3203. # of the paper between equations (6) and (7).
  3204. if delta.ndim == 1:
  3205. ic_opt = linalg.lstsq(M, delta)[0]
  3206. else:
  3207. # Reshape delta so it can be used as an array of multiple
  3208. # right-hand-sides in linalg.lstsq.
  3209. delta2d = delta.reshape(-1, delta.shape[-1]).T
  3210. ic_opt0 = linalg.lstsq(M, delta2d)[0].T
  3211. ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
  3212. # Now compute the filtered signal using equation (7) of [1].
  3213. # First, form [S^R, O^R] and call it W.
  3214. if m == n:
  3215. W = np.hstack((Sr, Obsr))
  3216. else:
  3217. W = np.zeros((2*m, 2*order))
  3218. W[:m, :order] = Sr
  3219. W[m:, order:] = Obsr
  3220. # Equation (7) of [1] says
  3221. # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
  3222. # `wic` is (almost) the product on the right.
  3223. # W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
  3224. # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
  3225. # so wic has shape (..., m).
  3226. wic = ic_opt.dot(W.T)
  3227. # `wic` is "almost" the product of W and the optimal ICs in equation
  3228. # (7)--if we're using a truncated impulse response (m < n), `wic`
  3229. # contains only the adjustments required for the ends of the signal.
  3230. # Here we form y_opt, taking this into account if necessary.
  3231. y_opt = y_fb
  3232. if m == n:
  3233. y_opt += wic
  3234. else:
  3235. y_opt[..., :m] += wic[..., :m]
  3236. y_opt[..., -m:] += wic[..., -m:]
  3237. x0 = ic_opt[..., :order]
  3238. x1 = ic_opt[..., -order:]
  3239. if axis != -1 or axis != x.ndim - 1:
  3240. # Restore the data axis to its original position.
  3241. x0 = np.swapaxes(x0, axis, x.ndim - 1)
  3242. x1 = np.swapaxes(x1, axis, x.ndim - 1)
  3243. y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
  3244. return y_opt, x0, x1
  3245. def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
  3246. irlen=None):
  3247. """
  3248. Apply a digital filter forward and backward to a signal.
  3249. This function applies a linear digital filter twice, once forward and
  3250. once backwards. The combined filter has zero phase and a filter order
  3251. twice that of the original.
  3252. The function provides options for handling the edges of the signal.
  3253. The function `sosfiltfilt` (and filter design using ``output='sos'``)
  3254. should be preferred over `filtfilt` for most filtering tasks, as
  3255. second-order sections have fewer numerical problems.
  3256. Parameters
  3257. ----------
  3258. b : (N,) array_like
  3259. The numerator coefficient vector of the filter.
  3260. a : (N,) array_like
  3261. The denominator coefficient vector of the filter. If ``a[0]``
  3262. is not 1, then both `a` and `b` are normalized by ``a[0]``.
  3263. x : array_like
  3264. The array of data to be filtered.
  3265. axis : int, optional
  3266. The axis of `x` to which the filter is applied.
  3267. Default is -1.
  3268. padtype : str or None, optional
  3269. Must be 'odd', 'even', 'constant', or None. This determines the
  3270. type of extension to use for the padded signal to which the filter
  3271. is applied. If `padtype` is None, no padding is used. The default
  3272. is 'odd'.
  3273. padlen : int or None, optional
  3274. The number of elements by which to extend `x` at both ends of
  3275. `axis` before applying the filter. This value must be less than
  3276. ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
  3277. The default value is ``3 * max(len(a), len(b))``.
  3278. method : str, optional
  3279. Determines the method for handling the edges of the signal, either
  3280. "pad" or "gust". When `method` is "pad", the signal is padded; the
  3281. type of padding is determined by `padtype` and `padlen`, and `irlen`
  3282. is ignored. When `method` is "gust", Gustafsson's method is used,
  3283. and `padtype` and `padlen` are ignored.
  3284. irlen : int or None, optional
  3285. When `method` is "gust", `irlen` specifies the length of the
  3286. impulse response of the filter. If `irlen` is None, no part
  3287. of the impulse response is ignored. For a long signal, specifying
  3288. `irlen` can significantly improve the performance of the filter.
  3289. Returns
  3290. -------
  3291. y : ndarray
  3292. The filtered output with the same shape as `x`.
  3293. See Also
  3294. --------
  3295. sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
  3296. Notes
  3297. -----
  3298. When `method` is "pad", the function pads the data along the given axis
  3299. in one of three ways: odd, even or constant. The odd and even extensions
  3300. have the corresponding symmetry about the end point of the data. The
  3301. constant extension extends the data with the values at the end points. On
  3302. both the forward and backward passes, the initial condition of the
  3303. filter is found by using `lfilter_zi` and scaling it by the end point of
  3304. the extended data.
  3305. When `method` is "gust", Gustafsson's method [1]_ is used. Initial
  3306. conditions are chosen for the forward and backward passes so that the
  3307. forward-backward filter gives the same result as the backward-forward
  3308. filter.
  3309. The option to use Gustaffson's method was added in scipy version 0.16.0.
  3310. References
  3311. ----------
  3312. .. [1] F. Gustaffson, "Determining the initial states in forward-backward
  3313. filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
  3314. 1996.
  3315. Examples
  3316. --------
  3317. The examples will use several functions from `scipy.signal`.
  3318. >>> import numpy as np
  3319. >>> from scipy import signal
  3320. >>> import matplotlib.pyplot as plt
  3321. First we create a one second signal that is the sum of two pure sine
  3322. waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
  3323. >>> t = np.linspace(0, 1.0, 2001)
  3324. >>> xlow = np.sin(2 * np.pi * 5 * t)
  3325. >>> xhigh = np.sin(2 * np.pi * 250 * t)
  3326. >>> x = xlow + xhigh
  3327. Now create a lowpass Butterworth filter with a cutoff of 0.125 times
  3328. the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
  3329. The result should be approximately ``xlow``, with no phase shift.
  3330. >>> b, a = signal.butter(8, 0.125)
  3331. >>> y = signal.filtfilt(b, a, x, padlen=150)
  3332. >>> np.abs(y - xlow).max()
  3333. 9.1086182074789912e-06
  3334. We get a fairly clean result for this artificial example because
  3335. the odd extension is exact, and with the moderately long padding,
  3336. the filter's transients have dissipated by the time the actual data
  3337. is reached. In general, transient effects at the edges are
  3338. unavoidable.
  3339. The following example demonstrates the option ``method="gust"``.
  3340. First, create a filter.
  3341. >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
  3342. `sig` is a random input signal to be filtered.
  3343. >>> rng = np.random.default_rng()
  3344. >>> n = 60
  3345. >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum()
  3346. Apply `filtfilt` to `sig`, once using the Gustafsson method, and
  3347. once using padding, and plot the results for comparison.
  3348. >>> fgust = signal.filtfilt(b, a, sig, method="gust")
  3349. >>> fpad = signal.filtfilt(b, a, sig, padlen=50)
  3350. >>> plt.plot(sig, 'k-', label='input')
  3351. >>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
  3352. >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
  3353. >>> plt.legend(loc='best')
  3354. >>> plt.show()
  3355. The `irlen` argument can be used to improve the performance
  3356. of Gustafsson's method.
  3357. Estimate the impulse response length of the filter.
  3358. >>> z, p, k = signal.tf2zpk(b, a)
  3359. >>> eps = 1e-9
  3360. >>> r = np.max(np.abs(p))
  3361. >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
  3362. >>> approx_impulse_len
  3363. 137
  3364. Apply the filter to a longer signal, with and without the `irlen`
  3365. argument. The difference between `y1` and `y2` is small. For long
  3366. signals, using `irlen` gives a significant performance improvement.
  3367. >>> x = rng.standard_normal(5000)
  3368. >>> y1 = signal.filtfilt(b, a, x, method='gust')
  3369. >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
  3370. >>> print(np.max(np.abs(y1 - y2)))
  3371. 1.80056858312e-10
  3372. """
  3373. b = np.atleast_1d(b)
  3374. a = np.atleast_1d(a)
  3375. x = np.asarray(x)
  3376. if method not in ["pad", "gust"]:
  3377. raise ValueError("method must be 'pad' or 'gust'.")
  3378. if method == "gust":
  3379. y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
  3380. return y
  3381. # method == "pad"
  3382. edge, ext = _validate_pad(padtype, padlen, x, axis,
  3383. ntaps=max(len(a), len(b)))
  3384. # Get the steady state of the filter's step response.
  3385. zi = lfilter_zi(b, a)
  3386. # Reshape zi and create x0 so that zi*x0 broadcasts
  3387. # to the correct value for the 'zi' keyword argument
  3388. # to lfilter.
  3389. zi_shape = [1] * x.ndim
  3390. zi_shape[axis] = zi.size
  3391. zi = np.reshape(zi, zi_shape)
  3392. x0 = axis_slice(ext, stop=1, axis=axis)
  3393. # Forward filter.
  3394. (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
  3395. # Backward filter.
  3396. # Create y0 so zi*y0 broadcasts appropriately.
  3397. y0 = axis_slice(y, start=-1, axis=axis)
  3398. (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
  3399. # Reverse y.
  3400. y = axis_reverse(y, axis=axis)
  3401. if edge > 0:
  3402. # Slice the actual signal from the extended signal.
  3403. y = axis_slice(y, start=edge, stop=-edge, axis=axis)
  3404. return y
  3405. def _validate_pad(padtype, padlen, x, axis, ntaps):
  3406. """Helper to validate padding for filtfilt"""
  3407. if padtype not in ['even', 'odd', 'constant', None]:
  3408. raise ValueError(("Unknown value '%s' given to padtype. padtype "
  3409. "must be 'even', 'odd', 'constant', or None.") %
  3410. padtype)
  3411. if padtype is None:
  3412. padlen = 0
  3413. if padlen is None:
  3414. # Original padding; preserved for backwards compatibility.
  3415. edge = ntaps * 3
  3416. else:
  3417. edge = padlen
  3418. # x's 'axis' dimension must be bigger than edge.
  3419. if x.shape[axis] <= edge:
  3420. raise ValueError("The length of the input vector x must be greater "
  3421. "than padlen, which is %d." % edge)
  3422. if padtype is not None and edge > 0:
  3423. # Make an extension of length `edge` at each
  3424. # end of the input array.
  3425. if padtype == 'even':
  3426. ext = even_ext(x, edge, axis=axis)
  3427. elif padtype == 'odd':
  3428. ext = odd_ext(x, edge, axis=axis)
  3429. else:
  3430. ext = const_ext(x, edge, axis=axis)
  3431. else:
  3432. ext = x
  3433. return edge, ext
  3434. def _validate_x(x):
  3435. x = np.asarray(x)
  3436. if x.ndim == 0:
  3437. raise ValueError('x must be at least 1-D')
  3438. return x
  3439. def sosfilt(sos, x, axis=-1, zi=None):
  3440. """
  3441. Filter data along one dimension using cascaded second-order sections.
  3442. Filter a data sequence, `x`, using a digital IIR filter defined by
  3443. `sos`.
  3444. Parameters
  3445. ----------
  3446. sos : array_like
  3447. Array of second-order filter coefficients, must have shape
  3448. ``(n_sections, 6)``. Each row corresponds to a second-order
  3449. section, with the first three columns providing the numerator
  3450. coefficients and the last three providing the denominator
  3451. coefficients.
  3452. x : array_like
  3453. An N-dimensional input array.
  3454. axis : int, optional
  3455. The axis of the input data array along which to apply the
  3456. linear filter. The filter is applied to each subarray along
  3457. this axis. Default is -1.
  3458. zi : array_like, optional
  3459. Initial conditions for the cascaded filter delays. It is a (at
  3460. least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
  3461. ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
  3462. replaced by 2. If `zi` is None or is not given then initial rest
  3463. (i.e. all zeros) is assumed.
  3464. Note that these initial conditions are *not* the same as the initial
  3465. conditions given by `lfiltic` or `lfilter_zi`.
  3466. Returns
  3467. -------
  3468. y : ndarray
  3469. The output of the digital filter.
  3470. zf : ndarray, optional
  3471. If `zi` is None, this is not returned, otherwise, `zf` holds the
  3472. final filter delay values.
  3473. See Also
  3474. --------
  3475. zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
  3476. Notes
  3477. -----
  3478. The filter function is implemented as a series of second-order filters
  3479. with direct-form II transposed structure. It is designed to minimize
  3480. numerical precision errors for high-order filters.
  3481. .. versionadded:: 0.16.0
  3482. Examples
  3483. --------
  3484. Plot a 13th-order filter's impulse response using both `lfilter` and
  3485. `sosfilt`, showing the instability that results from trying to do a
  3486. 13th-order filter in a single stage (the numerical error pushes some poles
  3487. outside of the unit circle):
  3488. >>> import matplotlib.pyplot as plt
  3489. >>> from scipy import signal
  3490. >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
  3491. >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
  3492. >>> x = signal.unit_impulse(700)
  3493. >>> y_tf = signal.lfilter(b, a, x)
  3494. >>> y_sos = signal.sosfilt(sos, x)
  3495. >>> plt.plot(y_tf, 'r', label='TF')
  3496. >>> plt.plot(y_sos, 'k', label='SOS')
  3497. >>> plt.legend(loc='best')
  3498. >>> plt.show()
  3499. """
  3500. x = _validate_x(x)
  3501. sos, n_sections = _validate_sos(sos)
  3502. x_zi_shape = list(x.shape)
  3503. x_zi_shape[axis] = 2
  3504. x_zi_shape = tuple([n_sections] + x_zi_shape)
  3505. inputs = [sos, x]
  3506. if zi is not None:
  3507. inputs.append(np.asarray(zi))
  3508. dtype = np.result_type(*inputs)
  3509. if dtype.char not in 'fdgFDGO':
  3510. raise NotImplementedError("input type '%s' not supported" % dtype)
  3511. if zi is not None:
  3512. zi = np.array(zi, dtype) # make a copy so that we can operate in place
  3513. if zi.shape != x_zi_shape:
  3514. raise ValueError('Invalid zi shape. With axis=%r, an input with '
  3515. 'shape %r, and an sos array with %d sections, zi '
  3516. 'must have shape %r, got %r.' %
  3517. (axis, x.shape, n_sections, x_zi_shape, zi.shape))
  3518. return_zi = True
  3519. else:
  3520. zi = np.zeros(x_zi_shape, dtype=dtype)
  3521. return_zi = False
  3522. axis = axis % x.ndim # make positive
  3523. x = np.moveaxis(x, axis, -1)
  3524. zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])
  3525. x_shape, zi_shape = x.shape, zi.shape
  3526. x = np.reshape(x, (-1, x.shape[-1]))
  3527. x = np.array(x, dtype, order='C') # make a copy, can modify in place
  3528. zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))
  3529. sos = sos.astype(dtype, copy=False)
  3530. _sosfilt(sos, x, zi)
  3531. x.shape = x_shape
  3532. x = np.moveaxis(x, -1, axis)
  3533. if return_zi:
  3534. zi.shape = zi_shape
  3535. zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])
  3536. out = (x, zi)
  3537. else:
  3538. out = x
  3539. return out
  3540. def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
  3541. """
  3542. A forward-backward digital filter using cascaded second-order sections.
  3543. See `filtfilt` for more complete information about this method.
  3544. Parameters
  3545. ----------
  3546. sos : array_like
  3547. Array of second-order filter coefficients, must have shape
  3548. ``(n_sections, 6)``. Each row corresponds to a second-order
  3549. section, with the first three columns providing the numerator
  3550. coefficients and the last three providing the denominator
  3551. coefficients.
  3552. x : array_like
  3553. The array of data to be filtered.
  3554. axis : int, optional
  3555. The axis of `x` to which the filter is applied.
  3556. Default is -1.
  3557. padtype : str or None, optional
  3558. Must be 'odd', 'even', 'constant', or None. This determines the
  3559. type of extension to use for the padded signal to which the filter
  3560. is applied. If `padtype` is None, no padding is used. The default
  3561. is 'odd'.
  3562. padlen : int or None, optional
  3563. The number of elements by which to extend `x` at both ends of
  3564. `axis` before applying the filter. This value must be less than
  3565. ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
  3566. The default value is::
  3567. 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
  3568. (sos[:, 5] == 0).sum()))
  3569. The extra subtraction at the end attempts to compensate for poles
  3570. and zeros at the origin (e.g. for odd-order filters) to yield
  3571. equivalent estimates of `padlen` to those of `filtfilt` for
  3572. second-order section filters built with `scipy.signal` functions.
  3573. Returns
  3574. -------
  3575. y : ndarray
  3576. The filtered output with the same shape as `x`.
  3577. See Also
  3578. --------
  3579. filtfilt, sosfilt, sosfilt_zi, sosfreqz
  3580. Notes
  3581. -----
  3582. .. versionadded:: 0.18.0
  3583. Examples
  3584. --------
  3585. >>> import numpy as np
  3586. >>> from scipy.signal import sosfiltfilt, butter
  3587. >>> import matplotlib.pyplot as plt
  3588. >>> rng = np.random.default_rng()
  3589. Create an interesting signal to filter.
  3590. >>> n = 201
  3591. >>> t = np.linspace(0, 1, n)
  3592. >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n)
  3593. Create a lowpass Butterworth filter, and use it to filter `x`.
  3594. >>> sos = butter(4, 0.125, output='sos')
  3595. >>> y = sosfiltfilt(sos, x)
  3596. For comparison, apply an 8th order filter using `sosfilt`. The filter
  3597. is initialized using the mean of the first four values of `x`.
  3598. >>> from scipy.signal import sosfilt, sosfilt_zi
  3599. >>> sos8 = butter(8, 0.125, output='sos')
  3600. >>> zi = x[:4].mean() * sosfilt_zi(sos8)
  3601. >>> y2, zo = sosfilt(sos8, x, zi=zi)
  3602. Plot the results. Note that the phase of `y` matches the input, while
  3603. `y2` has a significant phase delay.
  3604. >>> plt.plot(t, x, alpha=0.5, label='x(t)')
  3605. >>> plt.plot(t, y, label='y(t)')
  3606. >>> plt.plot(t, y2, label='y2(t)')
  3607. >>> plt.legend(framealpha=1, shadow=True)
  3608. >>> plt.grid(alpha=0.25)
  3609. >>> plt.xlabel('t')
  3610. >>> plt.show()
  3611. """
  3612. sos, n_sections = _validate_sos(sos)
  3613. x = _validate_x(x)
  3614. # `method` is "pad"...
  3615. ntaps = 2 * n_sections + 1
  3616. ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
  3617. edge, ext = _validate_pad(padtype, padlen, x, axis,
  3618. ntaps=ntaps)
  3619. # These steps follow the same form as filtfilt with modifications
  3620. zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
  3621. zi_shape = [1] * x.ndim
  3622. zi_shape[axis] = 2
  3623. zi.shape = [n_sections] + zi_shape
  3624. x_0 = axis_slice(ext, stop=1, axis=axis)
  3625. (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
  3626. y_0 = axis_slice(y, start=-1, axis=axis)
  3627. (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
  3628. y = axis_reverse(y, axis=axis)
  3629. if edge > 0:
  3630. y = axis_slice(y, start=edge, stop=-edge, axis=axis)
  3631. return y
  3632. def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
  3633. """
  3634. Downsample the signal after applying an anti-aliasing filter.
  3635. By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
  3636. filter with Hamming window is used if `ftype` is 'fir'.
  3637. Parameters
  3638. ----------
  3639. x : array_like
  3640. The signal to be downsampled, as an N-dimensional array.
  3641. q : int
  3642. The downsampling factor. When using IIR downsampling, it is recommended
  3643. to call `decimate` multiple times for downsampling factors higher than
  3644. 13.
  3645. n : int, optional
  3646. The order of the filter (1 less than the length for 'fir'). Defaults to
  3647. 8 for 'iir' and 20 times the downsampling factor for 'fir'.
  3648. ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
  3649. If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
  3650. of an `dlti` object, uses that object to filter before downsampling.
  3651. axis : int, optional
  3652. The axis along which to decimate.
  3653. zero_phase : bool, optional
  3654. Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
  3655. when using an IIR filter, and shifting the outputs back by the filter's
  3656. group delay when using an FIR filter. The default value of ``True`` is
  3657. recommended, since a phase shift is generally not desired.
  3658. .. versionadded:: 0.18.0
  3659. Returns
  3660. -------
  3661. y : ndarray
  3662. The down-sampled signal.
  3663. See Also
  3664. --------
  3665. resample : Resample up or down using the FFT method.
  3666. resample_poly : Resample using polyphase filtering and an FIR filter.
  3667. Notes
  3668. -----
  3669. The ``zero_phase`` keyword was added in 0.18.0.
  3670. The possibility to use instances of ``dlti`` as ``ftype`` was added in
  3671. 0.18.0.
  3672. Examples
  3673. --------
  3674. >>> import numpy as np
  3675. >>> from scipy import signal
  3676. >>> import matplotlib.pyplot as plt
  3677. Define wave parameters.
  3678. >>> wave_duration = 3
  3679. >>> sample_rate = 100
  3680. >>> freq = 2
  3681. >>> q = 5
  3682. Calculate number of samples.
  3683. >>> samples = wave_duration*sample_rate
  3684. >>> samples_decimated = int(samples/q)
  3685. Create cosine wave.
  3686. >>> x = np.linspace(0, wave_duration, samples, endpoint=False)
  3687. >>> y = np.cos(x*np.pi*freq*2)
  3688. Decimate cosine wave.
  3689. >>> ydem = signal.decimate(y, q)
  3690. >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False)
  3691. Plot original and decimated waves.
  3692. >>> plt.plot(x, y, '.-', xnew, ydem, 'o-')
  3693. >>> plt.xlabel('Time, Seconds')
  3694. >>> plt.legend(['data', 'decimated'], loc='best')
  3695. >>> plt.show()
  3696. """
  3697. x = np.asarray(x)
  3698. q = operator.index(q)
  3699. if n is not None:
  3700. n = operator.index(n)
  3701. result_type = x.dtype
  3702. if not np.issubdtype(result_type, np.inexact) \
  3703. or result_type.type == np.float16:
  3704. # upcast integers and float16 to float64
  3705. result_type = np.float64
  3706. if ftype == 'fir':
  3707. if n is None:
  3708. half_len = 10 * q # reasonable cutoff for our sinc-like function
  3709. n = 2 * half_len
  3710. b, a = firwin(n+1, 1. / q, window='hamming'), 1.
  3711. b = np.asarray(b, dtype=result_type)
  3712. a = np.asarray(a, dtype=result_type)
  3713. elif ftype == 'iir':
  3714. if n is None:
  3715. n = 8
  3716. sos = cheby1(n, 0.05, 0.8 / q, output='sos')
  3717. sos = np.asarray(sos, dtype=result_type)
  3718. elif isinstance(ftype, dlti):
  3719. system = ftype._as_zpk()
  3720. sos = zpk2sos(system.zeros, system.poles, system.gain)
  3721. sos = np.asarray(sos, dtype=result_type)
  3722. else:
  3723. raise ValueError('invalid ftype')
  3724. sl = [slice(None)] * x.ndim
  3725. if ftype == 'fir':
  3726. b = b / a
  3727. if zero_phase:
  3728. y = resample_poly(x, 1, q, axis=axis, window=b)
  3729. else:
  3730. # upfirdn is generally faster than lfilter by a factor equal to the
  3731. # downsampling factor, since it only calculates the needed outputs
  3732. n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
  3733. y = upfirdn(b, x, up=1, down=q, axis=axis)
  3734. sl[axis] = slice(None, n_out, None)
  3735. else: # IIR case
  3736. if zero_phase:
  3737. y = sosfiltfilt(sos, x, axis=axis)
  3738. else:
  3739. y = sosfilt(sos, x, axis=axis)
  3740. sl[axis] = slice(None, None, q)
  3741. return y[tuple(sl)]