test_mstats_basic.py 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977
  1. """
  2. Tests for the stats.mstats module (support for masked arrays)
  3. """
  4. import warnings
  5. import platform
  6. import numpy as np
  7. from numpy import nan
  8. import numpy.ma as ma
  9. from numpy.ma import masked, nomask
  10. import scipy.stats.mstats as mstats
  11. from scipy import stats
  12. from .common_tests import check_named_results
  13. import pytest
  14. from pytest import raises as assert_raises
  15. from numpy.ma.testutils import (assert_equal, assert_almost_equal,
  16. assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
  17. assert_allclose, assert_array_equal)
  18. from numpy.testing import suppress_warnings
  19. from scipy.stats import _mstats_basic
  20. class TestMquantiles:
  21. def test_mquantiles_limit_keyword(self):
  22. # Regression test for Trac ticket #867
  23. data = np.array([[6., 7., 1.],
  24. [47., 15., 2.],
  25. [49., 36., 3.],
  26. [15., 39., 4.],
  27. [42., 40., -999.],
  28. [41., 41., -999.],
  29. [7., -999., -999.],
  30. [39., -999., -999.],
  31. [43., -999., -999.],
  32. [40., -999., -999.],
  33. [36., -999., -999.]])
  34. desired = [[19.2, 14.6, 1.45],
  35. [40.0, 37.5, 2.5],
  36. [42.8, 40.05, 3.55]]
  37. quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
  38. assert_almost_equal(quants, desired)
  39. def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
  40. # Note this doesn't test when axis is not specified
  41. x = mstats.gmean(array_like, axis=axis, dtype=dtype)
  42. assert_allclose(x, desired, rtol=rtol)
  43. assert_equal(x.dtype, dtype)
  44. def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
  45. x = stats.hmean(array_like, axis=axis, dtype=dtype)
  46. assert_allclose(x, desired, rtol=rtol)
  47. assert_equal(x.dtype, dtype)
  48. class TestGeoMean:
  49. def test_1d(self):
  50. a = [1, 2, 3, 4]
  51. desired = np.power(1*2*3*4, 1./4.)
  52. check_equal_gmean(a, desired, rtol=1e-14)
  53. def test_1d_ma(self):
  54. # Test a 1d masked array
  55. a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
  56. desired = 45.2872868812
  57. check_equal_gmean(a, desired)
  58. a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
  59. desired = np.power(1*2*3, 1./3.)
  60. check_equal_gmean(a, desired, rtol=1e-14)
  61. def test_1d_ma_value(self):
  62. # Test a 1d masked array with a masked value
  63. a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
  64. desired = 41.4716627439
  65. check_equal_gmean(a, desired)
  66. def test_1d_ma0(self):
  67. # Test a 1d masked array with zero element
  68. a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
  69. desired = 0
  70. check_equal_gmean(a, desired)
  71. def test_1d_ma_inf(self):
  72. # Test a 1d masked array with negative element
  73. a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
  74. desired = np.nan
  75. with np.errstate(invalid='ignore'):
  76. check_equal_gmean(a, desired)
  77. @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
  78. def test_1d_float96(self):
  79. a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
  80. desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
  81. check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
  82. def test_2d_ma(self):
  83. a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
  84. mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
  85. desired = np.array([1, 2, 3, 4])
  86. check_equal_gmean(a, desired, axis=0, rtol=1e-14)
  87. desired = ma.array([np.power(1*2*3*4, 1./4.),
  88. np.power(2*3, 1./2.),
  89. np.power(1*4, 1./2.)])
  90. check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
  91. # Test a 2d masked array
  92. a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
  93. desired = 52.8885199
  94. check_equal_gmean(np.ma.array(a), desired)
  95. class TestHarMean:
  96. def test_1d(self):
  97. a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
  98. desired = 3. / (1./1 + 1./2 + 1./3)
  99. check_equal_hmean(a, desired, rtol=1e-14)
  100. a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
  101. desired = 34.1417152147
  102. check_equal_hmean(a, desired)
  103. a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
  104. mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
  105. desired = 31.8137186141
  106. check_equal_hmean(a, desired)
  107. @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
  108. def test_1d_float96(self):
  109. a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
  110. desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
  111. check_equal_hmean(a, desired_dt, dtype=np.float96)
  112. def test_2d(self):
  113. a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
  114. mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
  115. desired = ma.array([1, 2, 3, 4])
  116. check_equal_hmean(a, desired, axis=0, rtol=1e-14)
  117. desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
  118. check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
  119. a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
  120. desired = 38.6696271841
  121. check_equal_hmean(np.ma.array(a), desired)
  122. class TestRanking:
  123. def test_ranking(self):
  124. x = ma.array([0,1,1,1,2,3,4,5,5,6,])
  125. assert_almost_equal(mstats.rankdata(x),
  126. [1,3,3,3,5,6,7,8.5,8.5,10])
  127. x[[3,4]] = masked
  128. assert_almost_equal(mstats.rankdata(x),
  129. [1,2.5,2.5,0,0,4,5,6.5,6.5,8])
  130. assert_almost_equal(mstats.rankdata(x, use_missing=True),
  131. [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
  132. x = ma.array([0,1,5,1,2,4,3,5,1,6,])
  133. assert_almost_equal(mstats.rankdata(x),
  134. [1,3,8.5,3,5,7,6,8.5,3,10])
  135. x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
  136. assert_almost_equal(mstats.rankdata(x),
  137. [[1,3,3,3,5], [6,7,8.5,8.5,10]])
  138. assert_almost_equal(mstats.rankdata(x, axis=1),
  139. [[1,3,3,3,5], [1,2,3.5,3.5,5]])
  140. assert_almost_equal(mstats.rankdata(x,axis=0),
  141. [[1,1,1,1,1], [2,2,2,2,2,]])
  142. class TestCorr:
  143. def test_pearsonr(self):
  144. # Tests some computations of Pearson's r
  145. x = ma.arange(10)
  146. with warnings.catch_warnings():
  147. # The tests in this context are edge cases, with perfect
  148. # correlation or anticorrelation, or totally masked data.
  149. # None of these should trigger a RuntimeWarning.
  150. warnings.simplefilter("error", RuntimeWarning)
  151. assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
  152. assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
  153. x = ma.array(x, mask=True)
  154. pr = mstats.pearsonr(x, x)
  155. assert_(pr[0] is masked)
  156. assert_(pr[1] is masked)
  157. x1 = ma.array([-1.0, 0.0, 1.0])
  158. y1 = ma.array([0, 0, 3])
  159. r, p = mstats.pearsonr(x1, y1)
  160. assert_almost_equal(r, np.sqrt(3)/2)
  161. assert_almost_equal(p, 1.0/3)
  162. # (x2, y2) have the same unmasked data as (x1, y1).
  163. mask = [False, False, False, True]
  164. x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
  165. y2 = ma.array([0, 0, 3, -1], mask=mask)
  166. r, p = mstats.pearsonr(x2, y2)
  167. assert_almost_equal(r, np.sqrt(3)/2)
  168. assert_almost_equal(p, 1.0/3)
  169. def test_pearsonr_misaligned_mask(self):
  170. mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
  171. my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
  172. x = np.array([1, 4, 5, 6])
  173. y = np.array([9, 6, 5, 9])
  174. mr, mp = mstats.pearsonr(mx, my)
  175. r, p = stats.pearsonr(x, y)
  176. assert_equal(mr, r)
  177. assert_equal(mp, p)
  178. def test_spearmanr(self):
  179. # Tests some computations of Spearman's rho
  180. (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
  181. assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
  182. (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
  183. (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
  184. assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
  185. x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
  186. 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
  187. y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
  188. 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
  189. assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
  190. x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
  191. 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
  192. y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
  193. 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
  194. (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
  195. assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
  196. # Next test is to make sure calculation uses sufficient precision.
  197. # The denominator's value is ~n^3 and used to be represented as an
  198. # int. 2000**3 > 2**32 so these arrays would cause overflow on
  199. # some machines.
  200. x = list(range(2000))
  201. y = list(range(2000))
  202. y[0], y[9] = y[9], y[0]
  203. y[10], y[434] = y[434], y[10]
  204. y[435], y[1509] = y[1509], y[435]
  205. # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
  206. # = 1 - (1 / 500)
  207. # = 0.998
  208. assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
  209. # test for namedtuple attributes
  210. res = mstats.spearmanr(x, y)
  211. attributes = ('correlation', 'pvalue')
  212. check_named_results(res, attributes, ma=True)
  213. def test_spearmanr_alternative(self):
  214. # check against R
  215. # options(digits=16)
  216. # cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
  217. # 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
  218. # c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
  219. # 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
  220. # alternative='two.sided', method='spearman')
  221. x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
  222. 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
  223. y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
  224. 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
  225. r_exp = 0.6887298747763864 # from cor.test
  226. r, p = mstats.spearmanr(x, y)
  227. assert_allclose(r, r_exp)
  228. assert_allclose(p, 0.004519192910756)
  229. r, p = mstats.spearmanr(x, y, alternative='greater')
  230. assert_allclose(r, r_exp)
  231. assert_allclose(p, 0.002259596455378)
  232. r, p = mstats.spearmanr(x, y, alternative='less')
  233. assert_allclose(r, r_exp)
  234. assert_allclose(p, 0.9977404035446)
  235. # intuitive test (with obvious positive correlation)
  236. n = 100
  237. x = np.linspace(0, 5, n)
  238. y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x
  239. stat1, p1 = mstats.spearmanr(x, y)
  240. stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
  241. assert_allclose(p2, p1 / 2) # positive correlation -> small p
  242. stat3, p3 = mstats.spearmanr(x, y, alternative="less")
  243. assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p
  244. assert stat1 == stat2 == stat3
  245. with pytest.raises(ValueError, match="alternative must be 'less'..."):
  246. mstats.spearmanr(x, y, alternative="ekki-ekki")
  247. @pytest.mark.skipif(platform.machine() == 'ppc64le',
  248. reason="fails/crashes on ppc64le")
  249. def test_kendalltau(self):
  250. # check case with maximum disorder and p=1
  251. x = ma.array(np.array([9, 2, 5, 6]))
  252. y = ma.array(np.array([4, 7, 9, 11]))
  253. # Cross-check with exact result from R:
  254. # cor.test(x,y,method="kendall",exact=1)
  255. expected = [0.0, 1.0]
  256. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  257. # simple case without ties
  258. x = ma.array(np.arange(10))
  259. y = ma.array(np.arange(10))
  260. # Cross-check with exact result from R:
  261. # cor.test(x,y,method="kendall",exact=1)
  262. expected = [1.0, 5.511463844797e-07]
  263. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  264. # check exception in case of invalid method keyword
  265. assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
  266. # swap a couple of values
  267. b = y[1]
  268. y[1] = y[2]
  269. y[2] = b
  270. # Cross-check with exact result from R:
  271. # cor.test(x,y,method="kendall",exact=1)
  272. expected = [0.9555555555555556, 5.511463844797e-06]
  273. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  274. # swap a couple more
  275. b = y[5]
  276. y[5] = y[6]
  277. y[6] = b
  278. # Cross-check with exact result from R:
  279. # cor.test(x,y,method="kendall",exact=1)
  280. expected = [0.9111111111111111, 2.976190476190e-05]
  281. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  282. # same in opposite direction
  283. x = ma.array(np.arange(10))
  284. y = ma.array(np.arange(10)[::-1])
  285. # Cross-check with exact result from R:
  286. # cor.test(x,y,method="kendall",exact=1)
  287. expected = [-1.0, 5.511463844797e-07]
  288. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  289. # swap a couple of values
  290. b = y[1]
  291. y[1] = y[2]
  292. y[2] = b
  293. # Cross-check with exact result from R:
  294. # cor.test(x,y,method="kendall",exact=1)
  295. expected = [-0.9555555555555556, 5.511463844797e-06]
  296. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  297. # swap a couple more
  298. b = y[5]
  299. y[5] = y[6]
  300. y[6] = b
  301. # Cross-check with exact result from R:
  302. # cor.test(x,y,method="kendall",exact=1)
  303. expected = [-0.9111111111111111, 2.976190476190e-05]
  304. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
  305. # Tests some computations of Kendall's tau
  306. x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
  307. y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
  308. z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
  309. assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
  310. [+0.3333333, 0.75])
  311. assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
  312. [+0.3333333, 0.4969059])
  313. assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
  314. [-0.5477226, 0.2785987])
  315. #
  316. x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
  317. 10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
  318. y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
  319. 25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
  320. result = mstats.kendalltau(x, y)
  321. assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
  322. # test for namedtuple attributes
  323. attributes = ('correlation', 'pvalue')
  324. check_named_results(result, attributes, ma=True)
  325. @pytest.mark.skipif(platform.machine() == 'ppc64le',
  326. reason="fails/crashes on ppc64le")
  327. @pytest.mark.slow
  328. def test_kendalltau_large(self):
  329. # make sure internal variable use correct precision with
  330. # larger arrays
  331. x = np.arange(2000, dtype=float)
  332. x = ma.masked_greater(x, 1995)
  333. y = np.arange(2000, dtype=float)
  334. y = np.concatenate((y[1000:], y[:1000]))
  335. assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
  336. def test_kendalltau_seasonal(self):
  337. # Tests the seasonal Kendall tau.
  338. x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
  339. [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
  340. [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
  341. [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
  342. x = ma.fix_invalid(x).T
  343. output = mstats.kendalltau_seasonal(x)
  344. assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
  345. assert_almost_equal(output['seasonal p-value'].round(2),
  346. [0.18,0.53,0.20,0.04])
  347. @pytest.mark.parametrize("method", ("exact", "asymptotic"))
  348. @pytest.mark.parametrize("alternative", ("two-sided", "greater", "less"))
  349. def test_kendalltau_mstats_vs_stats(self, method, alternative):
  350. # Test that mstats.kendalltau and stats.kendalltau with
  351. # nan_policy='omit' matches behavior of stats.kendalltau
  352. # Accuracy of the alternatives is tested in stats/tests/test_stats.py
  353. np.random.seed(0)
  354. n = 50
  355. x = np.random.rand(n)
  356. y = np.random.rand(n)
  357. mask = np.random.rand(n) > 0.5
  358. x_masked = ma.array(x, mask=mask)
  359. y_masked = ma.array(y, mask=mask)
  360. res_masked = mstats.kendalltau(
  361. x_masked, y_masked, method=method, alternative=alternative)
  362. x_compressed = x_masked.compressed()
  363. y_compressed = y_masked.compressed()
  364. res_compressed = stats.kendalltau(
  365. x_compressed, y_compressed, method=method, alternative=alternative)
  366. x[mask] = np.nan
  367. y[mask] = np.nan
  368. res_nan = stats.kendalltau(
  369. x, y, method=method, nan_policy='omit', alternative=alternative)
  370. assert_allclose(res_masked, res_compressed)
  371. assert_allclose(res_nan, res_compressed)
  372. def test_kendall_p_exact_medium(self):
  373. # Test for the exact method with medium samples (some n >= 171)
  374. # expected values generated using SymPy
  375. expectations = {(100, 2393): 0.62822615287956040664,
  376. (101, 2436): 0.60439525773513602669,
  377. (170, 0): 2.755801935583541e-307,
  378. (171, 0): 0.0,
  379. (171, 1): 2.755801935583541e-307,
  380. (172, 1): 0.0,
  381. (200, 9797): 0.74753983745929675209,
  382. (201, 9656): 0.40959218958120363618}
  383. for nc, expected in expectations.items():
  384. res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
  385. assert_almost_equal(res, expected)
  386. @pytest.mark.xslow
  387. def test_kendall_p_exact_large(self):
  388. # Test for the exact method with large samples (n >= 171)
  389. # expected values generated using SymPy
  390. expectations = {(400, 38965): 0.48444283672113314099,
  391. (401, 39516): 0.66363159823474837662,
  392. (800, 156772): 0.42265448483120932055,
  393. (801, 157849): 0.53437553412194416236,
  394. (1600, 637472): 0.84200727400323538419,
  395. (1601, 630304): 0.34465255088058593946}
  396. for nc, expected in expectations.items():
  397. res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
  398. assert_almost_equal(res, expected)
  399. def test_pointbiserial(self):
  400. x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,
  401. 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1]
  402. y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2,
  403. 3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2,
  404. 1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan]
  405. assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
  406. # test for namedtuple attributes
  407. res = mstats.pointbiserialr(x, y)
  408. attributes = ('correlation', 'pvalue')
  409. check_named_results(res, attributes, ma=True)
  410. class TestTrimming:
  411. def test_trim(self):
  412. a = ma.arange(10)
  413. assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
  414. a = ma.arange(10)
  415. assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
  416. a = ma.arange(10)
  417. assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
  418. [None,None,None,3,4,5,6,7,None,None])
  419. a = ma.arange(10)
  420. assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
  421. [None,1,2,3,4,5,6,7,None,None])
  422. a = ma.arange(12)
  423. a[[0,-1]] = a[5] = masked
  424. assert_equal(mstats.trim(a, (2,8)),
  425. [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
  426. x = ma.arange(100).reshape(10, 10)
  427. expected = [1]*10 + [0]*70 + [1]*20
  428. trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
  429. assert_equal(trimx._mask.ravel(), expected)
  430. trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
  431. assert_equal(trimx._mask.ravel(), expected)
  432. trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
  433. assert_equal(trimx._mask.T.ravel(), expected)
  434. # same as above, but with an extra masked row inserted
  435. x = ma.arange(110).reshape(11, 10)
  436. x[1] = masked
  437. expected = [1]*20 + [0]*70 + [1]*20
  438. trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
  439. assert_equal(trimx._mask.ravel(), expected)
  440. trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
  441. assert_equal(trimx._mask.ravel(), expected)
  442. trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
  443. assert_equal(trimx.T._mask.ravel(), expected)
  444. def test_trim_old(self):
  445. x = ma.arange(100)
  446. assert_equal(mstats.trimboth(x).count(), 60)
  447. assert_equal(mstats.trimtail(x,tail='r').count(), 80)
  448. x[50:70] = masked
  449. trimx = mstats.trimboth(x)
  450. assert_equal(trimx.count(), 48)
  451. assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
  452. x._mask = nomask
  453. x.shape = (10,10)
  454. assert_equal(mstats.trimboth(x).count(), 60)
  455. assert_equal(mstats.trimtail(x).count(), 80)
  456. def test_trimr(self):
  457. x = ma.arange(10)
  458. result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
  459. expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
  460. mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
  461. assert_equal(result, expected)
  462. assert_equal(result.mask, expected.mask)
  463. def test_trimmedmean(self):
  464. data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
  465. 296,299,306,376,428,515,666,1310,2611])
  466. assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
  467. assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
  468. assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
  469. def test_trimmed_stde(self):
  470. data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
  471. 296,299,306,376,428,515,666,1310,2611])
  472. assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
  473. assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
  474. def test_winsorization(self):
  475. data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
  476. 296,299,306,376,428,515,666,1310,2611])
  477. assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
  478. 21551.4, 1)
  479. assert_almost_equal(
  480. mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
  481. 11887.3, 1)
  482. data[5] = masked
  483. winsorized = mstats.winsorize(data)
  484. assert_equal(winsorized.mask, data.mask)
  485. def test_winsorization_nan(self):
  486. data = ma.array([np.nan, np.nan, 0, 1, 2])
  487. assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
  488. nan_policy='raise')
  489. # Testing propagate (default behavior)
  490. assert_equal(mstats.winsorize(data, (0.4, 0.4)),
  491. ma.array([2, 2, 2, 2, 2]))
  492. assert_equal(mstats.winsorize(data, (0.8, 0.8)),
  493. ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
  494. assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
  495. ma.array([np.nan, np.nan, 2, 2, 2]))
  496. assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
  497. ma.array([np.nan, np.nan, 2, 2, 2]))
  498. class TestMoments:
  499. # Comparison numbers are found using R v.1.5.1
  500. # note that length(testcase) = 4
  501. # testmathworks comes from documentation for the
  502. # Statistics Toolbox for Matlab and can be found at both
  503. # https://www.mathworks.com/help/stats/kurtosis.html
  504. # https://www.mathworks.com/help/stats/skewness.html
  505. # Note that both test cases came from here.
  506. testcase = [1,2,3,4]
  507. testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
  508. np.nan])
  509. testcase_2d = ma.array(
  510. np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
  511. [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
  512. [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
  513. [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
  514. [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
  515. mask=np.array([[True, False, False, True, False],
  516. [True, True, True, False, True],
  517. [False, False, False, False, False],
  518. [True, True, True, True, True],
  519. [False, False, True, False, False]], dtype=bool))
  520. def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
  521. expect = np.asarray(expect)
  522. if shape is not None:
  523. expect = np.broadcast_to(expect, shape)
  524. assert_array_equal(actual, expect)
  525. if dtype is None:
  526. dtype = expect.dtype
  527. assert actual.dtype == dtype
  528. def test_moment(self):
  529. y = mstats.moment(self.testcase,1)
  530. assert_almost_equal(y,0.0,10)
  531. y = mstats.moment(self.testcase,2)
  532. assert_almost_equal(y,1.25)
  533. y = mstats.moment(self.testcase,3)
  534. assert_almost_equal(y,0.0)
  535. y = mstats.moment(self.testcase,4)
  536. assert_almost_equal(y,2.5625)
  537. # check array_like input for moment
  538. y = mstats.moment(self.testcase, [1, 2, 3, 4])
  539. assert_allclose(y, [0, 1.25, 0, 2.5625])
  540. # check moment input consists only of integers
  541. y = mstats.moment(self.testcase, 0.0)
  542. assert_allclose(y, 1.0)
  543. assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
  544. y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
  545. assert_allclose(y, [0, 1.25, 0, 2.5625])
  546. # test empty input
  547. y = mstats.moment([])
  548. self._assert_equal(y, np.nan, dtype=np.float64)
  549. y = mstats.moment(np.array([], dtype=np.float32))
  550. self._assert_equal(y, np.nan, dtype=np.float32)
  551. y = mstats.moment(np.zeros((1, 0)), axis=0)
  552. self._assert_equal(y, [], shape=(0,), dtype=np.float64)
  553. y = mstats.moment([[]], axis=1)
  554. self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
  555. y = mstats.moment([[]], moment=[0, 1], axis=0)
  556. self._assert_equal(y, [], shape=(2, 0))
  557. x = np.arange(10.)
  558. x[9] = np.nan
  559. assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
  560. def test_variation(self):
  561. y = mstats.variation(self.testcase)
  562. assert_almost_equal(y,0.44721359549996, 10)
  563. def test_variation_ddof(self):
  564. # test variation with delta degrees of freedom
  565. # regression test for gh-13341
  566. a = np.array([1, 2, 3, 4, 5])
  567. y = mstats.variation(a, ddof=1)
  568. assert_almost_equal(y, 0.5270462766947299)
  569. def test_skewness(self):
  570. y = mstats.skew(self.testmathworks)
  571. assert_almost_equal(y,-0.29322304336607,10)
  572. y = mstats.skew(self.testmathworks,bias=0)
  573. assert_almost_equal(y,-0.437111105023940,10)
  574. y = mstats.skew(self.testcase)
  575. assert_almost_equal(y,0.0,10)
  576. # test that skew works on multidimensional masked arrays
  577. correct_2d = ma.array(
  578. np.array([0.6882870394455785, 0, 0.2665647526856708,
  579. 0, -0.05211472114254485]),
  580. mask=np.array([False, False, False, True, False], dtype=bool)
  581. )
  582. assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d)
  583. for i, row in enumerate(self.testcase_2d):
  584. assert_almost_equal(mstats.skew(row), correct_2d[i])
  585. correct_2d_bias_corrected = ma.array(
  586. np.array([1.685952043212545, 0.0, 0.3973712716070531, 0,
  587. -0.09026534484117164]),
  588. mask=np.array([False, False, False, True, False], dtype=bool)
  589. )
  590. assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False),
  591. correct_2d_bias_corrected)
  592. for i, row in enumerate(self.testcase_2d):
  593. assert_almost_equal(mstats.skew(row, bias=False),
  594. correct_2d_bias_corrected[i])
  595. # Check consistency between stats and mstats implementations
  596. assert_allclose(mstats.skew(self.testcase_2d[2, :]),
  597. stats.skew(self.testcase_2d[2, :]))
  598. def test_kurtosis(self):
  599. # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
  600. # for compatibility with Matlab)
  601. y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
  602. assert_almost_equal(y, 2.1658856802973, 10)
  603. # Note that MATLAB has confusing docs for the following case
  604. # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
  605. # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
  606. # The MATLAB docs imply that both should give Fisher's
  607. y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
  608. assert_almost_equal(y, 3.663542721189047, 10)
  609. y = mstats.kurtosis(self.testcase, 0, 0)
  610. assert_almost_equal(y, 1.64)
  611. # test that kurtosis works on multidimensional masked arrays
  612. correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
  613. -1.26979517952]),
  614. mask=np.array([False, False, False, True,
  615. False], dtype=bool))
  616. assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
  617. correct_2d)
  618. for i, row in enumerate(self.testcase_2d):
  619. assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
  620. correct_2d_bias_corrected = ma.array(
  621. np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
  622. mask=np.array([False, False, False, True, False], dtype=bool))
  623. assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
  624. bias=False),
  625. correct_2d_bias_corrected)
  626. for i, row in enumerate(self.testcase_2d):
  627. assert_almost_equal(mstats.kurtosis(row, bias=False),
  628. correct_2d_bias_corrected[i])
  629. # Check consistency between stats and mstats implementations
  630. assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
  631. stats.kurtosis(self.testcase_2d[2, :]),
  632. nulp=4)
  633. class TestMode:
  634. def test_mode(self):
  635. a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
  636. a2 = np.reshape(a1, (3,5))
  637. a3 = np.array([1,2,3,4,5,6])
  638. a4 = np.reshape(a3, (3,2))
  639. ma1 = ma.masked_where(ma.array(a1) > 2, a1)
  640. ma2 = ma.masked_where(a2 > 2, a2)
  641. ma3 = ma.masked_where(a3 < 2, a3)
  642. ma4 = ma.masked_where(ma.array(a4) < 2, a4)
  643. assert_equal(mstats.mode(a1, axis=None), (3,4))
  644. assert_equal(mstats.mode(a1, axis=0), (3,4))
  645. assert_equal(mstats.mode(ma1, axis=None), (0,3))
  646. assert_equal(mstats.mode(a2, axis=None), (3,4))
  647. assert_equal(mstats.mode(ma2, axis=None), (0,3))
  648. assert_equal(mstats.mode(a3, axis=None), (1,1))
  649. assert_equal(mstats.mode(ma3, axis=None), (2,1))
  650. assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
  651. assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
  652. assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
  653. assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
  654. assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
  655. assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
  656. a1_res = mstats.mode(a1, axis=None)
  657. # test for namedtuple attributes
  658. attributes = ('mode', 'count')
  659. check_named_results(a1_res, attributes, ma=True)
  660. def test_mode_modifies_input(self):
  661. # regression test for gh-6428: mode(..., axis=None) may not modify
  662. # the input array
  663. im = np.zeros((100, 100))
  664. im[:50, :] += 1
  665. im[:, :50] += 1
  666. cp = im.copy()
  667. mstats.mode(im, None)
  668. assert_equal(im, cp)
  669. class TestPercentile:
  670. def setup_method(self):
  671. self.a1 = [3, 4, 5, 10, -3, -5, 6]
  672. self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
  673. self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
  674. def test_percentile(self):
  675. x = np.arange(8) * 0.5
  676. assert_equal(mstats.scoreatpercentile(x, 0), 0.)
  677. assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
  678. assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
  679. def test_2D(self):
  680. x = ma.array([[1, 1, 1],
  681. [1, 1, 1],
  682. [4, 4, 3],
  683. [1, 1, 1],
  684. [1, 1, 1]])
  685. assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
  686. class TestVariability:
  687. """ Comparison numbers are found using R v.1.5.1
  688. note that length(testcase) = 4
  689. """
  690. testcase = ma.fix_invalid([1,2,3,4,np.nan])
  691. def test_sem(self):
  692. # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
  693. y = mstats.sem(self.testcase)
  694. assert_almost_equal(y, 0.6454972244)
  695. n = self.testcase.count()
  696. assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
  697. mstats.sem(self.testcase, ddof=2))
  698. def test_zmap(self):
  699. # This is not in R, so tested by using:
  700. # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
  701. y = mstats.zmap(self.testcase, self.testcase)
  702. desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
  703. 0.44721359549996, 1.3416407864999])
  704. assert_array_almost_equal(desired_unmaskedvals,
  705. y.data[y.mask == False], decimal=12)
  706. def test_zscore(self):
  707. # This is not in R, so tested by using:
  708. # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
  709. y = mstats.zscore(self.testcase)
  710. desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
  711. 0.44721359549996, 1.3416407864999, np.nan])
  712. assert_almost_equal(desired, y, decimal=12)
  713. class TestMisc:
  714. def test_obrientransform(self):
  715. args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
  716. [6]+[7]*2+[8]*4+[9]*9+[10]*16]
  717. result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
  718. [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
  719. assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
  720. result, 4)
  721. def test_ks_2samp(self):
  722. x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
  723. [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
  724. [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
  725. [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
  726. x = ma.fix_invalid(x).T
  727. (winter, spring, summer, fall) = x.T
  728. assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
  729. (0.1818, 0.9628))
  730. assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
  731. (0.1469, 0.6886))
  732. assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
  733. (0.1818, 0.6011))
  734. def test_friedmanchisq(self):
  735. # No missing values
  736. args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
  737. [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
  738. [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
  739. result = mstats.friedmanchisquare(*args)
  740. assert_almost_equal(result[0], 10.4737, 4)
  741. assert_almost_equal(result[1], 0.005317, 6)
  742. # Missing values
  743. x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
  744. [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
  745. [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
  746. [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
  747. x = ma.fix_invalid(x)
  748. result = mstats.friedmanchisquare(*x)
  749. assert_almost_equal(result[0], 2.0156, 4)
  750. assert_almost_equal(result[1], 0.5692, 4)
  751. # test for namedtuple attributes
  752. attributes = ('statistic', 'pvalue')
  753. check_named_results(result, attributes, ma=True)
  754. def test_regress_simple():
  755. # Regress a line with sinusoidal noise. Test for #1273.
  756. x = np.linspace(0, 100, 100)
  757. y = 0.2 * np.linspace(0, 100, 100) + 10
  758. y += np.sin(np.linspace(0, 20, 100))
  759. result = mstats.linregress(x, y)
  760. # Result is of a correct class and with correct fields
  761. lr = stats._stats_mstats_common.LinregressResult
  762. assert_(isinstance(result, lr))
  763. attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
  764. check_named_results(result, attributes, ma=True)
  765. assert 'intercept_stderr' in dir(result)
  766. # Slope and intercept are estimated correctly
  767. assert_almost_equal(result.slope, 0.19644990055858422)
  768. assert_almost_equal(result.intercept, 10.211269918932341)
  769. assert_almost_equal(result.stderr, 0.002395781449783862)
  770. assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
  771. def test_linregress_identical_x():
  772. x = np.zeros(10)
  773. y = np.random.random(10)
  774. msg = "Cannot calculate a linear regression if all x values are identical"
  775. with assert_raises(ValueError, match=msg):
  776. mstats.linregress(x, y)
  777. def test_theilslopes():
  778. # Test for basic slope and intercept.
  779. slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
  780. assert_almost_equal(slope, 0.5)
  781. assert_almost_equal(intercept, 0.5)
  782. slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1],
  783. method='joint')
  784. assert_almost_equal(slope, 0.5)
  785. assert_almost_equal(intercept, 0.0)
  786. # Test for correct masking.
  787. y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
  788. slope, intercept, lower, upper = mstats.theilslopes(y)
  789. assert_almost_equal(slope, 1./3)
  790. assert_almost_equal(intercept, 2./3)
  791. slope, intercept, lower, upper = mstats.theilslopes(y,
  792. method='joint')
  793. assert_almost_equal(slope, 1./3)
  794. assert_almost_equal(intercept, 0.0)
  795. # Test of confidence intervals from example in Sen (1968).
  796. x = [1, 2, 3, 4, 10, 12, 18]
  797. y = [9, 15, 19, 20, 45, 55, 78]
  798. slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
  799. assert_almost_equal(slope, 4)
  800. assert_almost_equal(intercept, 4.0)
  801. assert_almost_equal(upper, 4.38, decimal=2)
  802. assert_almost_equal(lower, 3.71, decimal=2)
  803. slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07,
  804. method='joint')
  805. assert_almost_equal(slope, 4)
  806. assert_almost_equal(intercept, 6.0)
  807. assert_almost_equal(upper, 4.38, decimal=2)
  808. assert_almost_equal(lower, 3.71, decimal=2)
  809. def test_theilslopes_warnings():
  810. # Test `theilslopes` with degenerate input; see gh-15943
  811. with pytest.warns(RuntimeWarning, match="All `x` coordinates are..."):
  812. res = mstats.theilslopes([0, 1], [0, 0])
  813. assert np.all(np.isnan(res))
  814. with suppress_warnings() as sup:
  815. sup.filter(RuntimeWarning, "invalid value encountered...")
  816. res = mstats.theilslopes([0, 0, 0], [0, 1, 0])
  817. assert_allclose(res, (0, 0, np.nan, np.nan))
  818. def test_theilslopes_namedtuple_consistency():
  819. """
  820. Simple test to ensure tuple backwards-compatibility of the returned
  821. TheilslopesResult object
  822. """
  823. y = [1, 2, 4]
  824. x = [4, 6, 8]
  825. slope, intercept, low_slope, high_slope = mstats.theilslopes(y, x)
  826. result = mstats.theilslopes(y, x)
  827. # note all four returned values are distinct here
  828. assert_equal(slope, result.slope)
  829. assert_equal(intercept, result.intercept)
  830. assert_equal(low_slope, result.low_slope)
  831. assert_equal(high_slope, result.high_slope)
  832. def test_siegelslopes():
  833. # method should be exact for straight line
  834. y = 2 * np.arange(10) + 0.5
  835. assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
  836. assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
  837. x = 2 * np.arange(10)
  838. y = 5 * x - 3.0
  839. assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
  840. assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
  841. # method is robust to outliers: brekdown point of 50%
  842. y[:4] = 1000
  843. assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
  844. # if there are no outliers, results should be comparble to linregress
  845. x = np.arange(10)
  846. y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
  847. slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
  848. slope, intercept = mstats.siegelslopes(y, x)
  849. assert_allclose(slope, slope_ols, rtol=0.1)
  850. assert_allclose(intercept, intercept_ols, rtol=0.1)
  851. slope, intercept = mstats.siegelslopes(y, x, method='separate')
  852. assert_allclose(slope, slope_ols, rtol=0.1)
  853. assert_allclose(intercept, intercept_ols, rtol=0.1)
  854. def test_siegelslopes_namedtuple_consistency():
  855. """
  856. Simple test to ensure tuple backwards-compatibility of the returned
  857. SiegelslopesResult object.
  858. """
  859. y = [1, 2, 4]
  860. x = [4, 6, 8]
  861. slope, intercept = mstats.siegelslopes(y, x)
  862. result = mstats.siegelslopes(y, x)
  863. # note both returned values are distinct here
  864. assert_equal(slope, result.slope)
  865. assert_equal(intercept, result.intercept)
  866. def test_plotting_positions():
  867. # Regression test for #1256
  868. pos = mstats.plotting_positions(np.arange(3), 0, 0)
  869. assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
  870. class TestNormalitytests():
  871. def test_vs_nonmasked(self):
  872. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  873. assert_array_almost_equal(mstats.normaltest(x),
  874. stats.normaltest(x))
  875. assert_array_almost_equal(mstats.skewtest(x),
  876. stats.skewtest(x))
  877. assert_array_almost_equal(mstats.kurtosistest(x),
  878. stats.kurtosistest(x))
  879. funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
  880. mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
  881. x = [1, 2, 3, 4]
  882. for func, mfunc in zip(funcs, mfuncs):
  883. assert_raises(ValueError, func, x)
  884. assert_raises(ValueError, mfunc, x)
  885. def test_axis_None(self):
  886. # Test axis=None (equal to axis=0 for 1-D input)
  887. x = np.array((-2,-1,0,1,2,3)*4)**2
  888. assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
  889. assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
  890. assert_allclose(mstats.kurtosistest(x, axis=None),
  891. mstats.kurtosistest(x))
  892. def test_maskedarray_input(self):
  893. # Add some masked values, test result doesn't change
  894. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  895. xm = np.ma.array(np.r_[np.inf, x, 10],
  896. mask=np.r_[True, [False] * x.size, True])
  897. assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
  898. assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
  899. assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
  900. def test_nd_input(self):
  901. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  902. x_2d = np.vstack([x] * 2).T
  903. for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
  904. res_1d = func(x)
  905. res_2d = func(x_2d)
  906. assert_allclose(res_2d[0], [res_1d[0]] * 2)
  907. assert_allclose(res_2d[1], [res_1d[1]] * 2)
  908. def test_normaltest_result_attributes(self):
  909. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  910. res = mstats.normaltest(x)
  911. attributes = ('statistic', 'pvalue')
  912. check_named_results(res, attributes, ma=True)
  913. def test_kurtosistest_result_attributes(self):
  914. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  915. res = mstats.kurtosistest(x)
  916. attributes = ('statistic', 'pvalue')
  917. check_named_results(res, attributes, ma=True)
  918. def regression_test_9033(self):
  919. # x cleary non-normal but power of negtative denom needs
  920. # to be handled correctly to reject normality
  921. counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
  922. x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
  923. assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
  924. @pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
  925. @pytest.mark.parametrize("alternative", ["less", "greater"])
  926. def test_alternative(self, test, alternative):
  927. x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
  928. stats_test = getattr(stats, test)
  929. mstats_test = getattr(mstats, test)
  930. z_ex, p_ex = stats_test(x, alternative=alternative)
  931. z, p = mstats_test(x, alternative=alternative)
  932. assert_allclose(z, z_ex, atol=1e-12)
  933. assert_allclose(p, p_ex, atol=1e-12)
  934. # test with masked arrays
  935. x[1:5] = np.nan
  936. x = np.ma.masked_array(x, mask=np.isnan(x))
  937. z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
  938. z, p = mstats_test(x, alternative=alternative)
  939. assert_allclose(z, z_ex, atol=1e-12)
  940. assert_allclose(p, p_ex, atol=1e-12)
  941. def test_bad_alternative(self):
  942. x = stats.norm.rvs(size=20, random_state=123)
  943. msg = r"alternative must be 'less', 'greater' or 'two-sided'"
  944. with pytest.raises(ValueError, match=msg):
  945. mstats.skewtest(x, alternative='error')
  946. with pytest.raises(ValueError, match=msg):
  947. mstats.kurtosistest(x, alternative='error')
  948. class TestFOneway():
  949. def test_result_attributes(self):
  950. a = np.array([655, 788], dtype=np.uint16)
  951. b = np.array([789, 772], dtype=np.uint16)
  952. res = mstats.f_oneway(a, b)
  953. attributes = ('statistic', 'pvalue')
  954. check_named_results(res, attributes, ma=True)
  955. class TestMannwhitneyu():
  956. # data from gh-1428
  957. x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  958. 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
  959. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  960. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  961. 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
  962. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  963. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
  964. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  965. 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
  966. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  967. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
  968. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  969. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  970. 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
  971. 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  972. 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
  973. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  974. 1., 1., 1., 1., 1., 1.])
  975. y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
  976. 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
  977. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
  978. 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
  979. 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
  980. 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
  981. 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  982. 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
  983. 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
  984. 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
  985. 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
  986. 1., 1., 1., 1.])
  987. def test_result_attributes(self):
  988. res = mstats.mannwhitneyu(self.x, self.y)
  989. attributes = ('statistic', 'pvalue')
  990. check_named_results(res, attributes, ma=True)
  991. def test_against_stats(self):
  992. # gh-4641 reported that stats.mannwhitneyu returned half the p-value
  993. # of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
  994. # is now two-sided, so they match.
  995. res1 = mstats.mannwhitneyu(self.x, self.y)
  996. res2 = stats.mannwhitneyu(self.x, self.y)
  997. assert res1.statistic == res2.statistic
  998. assert_allclose(res1.pvalue, res2.pvalue)
  999. class TestKruskal():
  1000. def test_result_attributes(self):
  1001. x = [1, 3, 5, 7, 9]
  1002. y = [2, 4, 6, 8, 10]
  1003. res = mstats.kruskal(x, y)
  1004. attributes = ('statistic', 'pvalue')
  1005. check_named_results(res, attributes, ma=True)
  1006. # TODO: for all ttest functions, add tests with masked array inputs
  1007. class TestTtest_rel():
  1008. def test_vs_nonmasked(self):
  1009. np.random.seed(1234567)
  1010. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1011. # 1-D inputs
  1012. res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
  1013. res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
  1014. assert_allclose(res1, res2)
  1015. # 2-D inputs
  1016. res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
  1017. res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
  1018. assert_allclose(res1, res2)
  1019. res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
  1020. res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
  1021. assert_allclose(res1, res2)
  1022. # Check default is axis=0
  1023. res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
  1024. assert_allclose(res2, res3)
  1025. def test_fully_masked(self):
  1026. np.random.seed(1234567)
  1027. outcome = ma.masked_array(np.random.randn(3, 2),
  1028. mask=[[1, 1, 1], [0, 0, 0]])
  1029. with suppress_warnings() as sup:
  1030. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1031. for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
  1032. t, p = mstats.ttest_rel(*pair)
  1033. assert_array_equal(t, (np.nan, np.nan))
  1034. assert_array_equal(p, (np.nan, np.nan))
  1035. def test_result_attributes(self):
  1036. np.random.seed(1234567)
  1037. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1038. res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
  1039. attributes = ('statistic', 'pvalue')
  1040. check_named_results(res, attributes, ma=True)
  1041. def test_invalid_input_size(self):
  1042. assert_raises(ValueError, mstats.ttest_rel,
  1043. np.arange(10), np.arange(11))
  1044. x = np.arange(24)
  1045. assert_raises(ValueError, mstats.ttest_rel,
  1046. x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
  1047. assert_raises(ValueError, mstats.ttest_rel,
  1048. x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
  1049. def test_empty(self):
  1050. res1 = mstats.ttest_rel([], [])
  1051. assert_(np.all(np.isnan(res1)))
  1052. def test_zero_division(self):
  1053. t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
  1054. assert_equal((np.abs(t), p), (np.inf, 0))
  1055. with suppress_warnings() as sup:
  1056. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1057. t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
  1058. assert_array_equal(t, np.array([np.nan, np.nan]))
  1059. assert_array_equal(p, np.array([np.nan, np.nan]))
  1060. def test_bad_alternative(self):
  1061. msg = r"alternative must be 'less', 'greater' or 'two-sided'"
  1062. with pytest.raises(ValueError, match=msg):
  1063. mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
  1064. @pytest.mark.parametrize("alternative", ["less", "greater"])
  1065. def test_alternative(self, alternative):
  1066. x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
  1067. y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
  1068. t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
  1069. t, p = mstats.ttest_rel(x, y, alternative=alternative)
  1070. assert_allclose(t, t_ex, rtol=1e-14)
  1071. assert_allclose(p, p_ex, rtol=1e-14)
  1072. # test with masked arrays
  1073. x[1:10] = np.nan
  1074. y[1:10] = np.nan
  1075. x = np.ma.masked_array(x, mask=np.isnan(x))
  1076. y = np.ma.masked_array(y, mask=np.isnan(y))
  1077. t, p = mstats.ttest_rel(x, y, alternative=alternative)
  1078. t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
  1079. alternative=alternative)
  1080. assert_allclose(t, t_ex, rtol=1e-14)
  1081. assert_allclose(p, p_ex, rtol=1e-14)
  1082. class TestTtest_ind():
  1083. def test_vs_nonmasked(self):
  1084. np.random.seed(1234567)
  1085. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1086. # 1-D inputs
  1087. res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
  1088. res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
  1089. assert_allclose(res1, res2)
  1090. # 2-D inputs
  1091. res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
  1092. res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
  1093. assert_allclose(res1, res2)
  1094. res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
  1095. res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
  1096. assert_allclose(res1, res2)
  1097. # Check default is axis=0
  1098. res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
  1099. assert_allclose(res2, res3)
  1100. # Check equal_var
  1101. res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
  1102. res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
  1103. assert_allclose(res4, res5)
  1104. res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
  1105. res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
  1106. assert_allclose(res4, res5)
  1107. def test_fully_masked(self):
  1108. np.random.seed(1234567)
  1109. outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
  1110. with suppress_warnings() as sup:
  1111. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1112. for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
  1113. t, p = mstats.ttest_ind(*pair)
  1114. assert_array_equal(t, (np.nan, np.nan))
  1115. assert_array_equal(p, (np.nan, np.nan))
  1116. def test_result_attributes(self):
  1117. np.random.seed(1234567)
  1118. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1119. res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
  1120. attributes = ('statistic', 'pvalue')
  1121. check_named_results(res, attributes, ma=True)
  1122. def test_empty(self):
  1123. res1 = mstats.ttest_ind([], [])
  1124. assert_(np.all(np.isnan(res1)))
  1125. def test_zero_division(self):
  1126. t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
  1127. assert_equal((np.abs(t), p), (np.inf, 0))
  1128. with suppress_warnings() as sup:
  1129. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1130. t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
  1131. assert_array_equal(t, (np.nan, np.nan))
  1132. assert_array_equal(p, (np.nan, np.nan))
  1133. t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
  1134. assert_equal((np.abs(t), p), (np.inf, 0))
  1135. assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
  1136. equal_var=False), (np.nan, np.nan))
  1137. def test_bad_alternative(self):
  1138. msg = r"alternative must be 'less', 'greater' or 'two-sided'"
  1139. with pytest.raises(ValueError, match=msg):
  1140. mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
  1141. @pytest.mark.parametrize("alternative", ["less", "greater"])
  1142. def test_alternative(self, alternative):
  1143. x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
  1144. y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
  1145. t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
  1146. t, p = mstats.ttest_ind(x, y, alternative=alternative)
  1147. assert_allclose(t, t_ex, rtol=1e-14)
  1148. assert_allclose(p, p_ex, rtol=1e-14)
  1149. # test with masked arrays
  1150. x[1:10] = np.nan
  1151. y[80:90] = np.nan
  1152. x = np.ma.masked_array(x, mask=np.isnan(x))
  1153. y = np.ma.masked_array(y, mask=np.isnan(y))
  1154. t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
  1155. alternative=alternative)
  1156. t, p = mstats.ttest_ind(x, y, alternative=alternative)
  1157. assert_allclose(t, t_ex, rtol=1e-14)
  1158. assert_allclose(p, p_ex, rtol=1e-14)
  1159. class TestTtest_1samp():
  1160. def test_vs_nonmasked(self):
  1161. np.random.seed(1234567)
  1162. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1163. # 1-D inputs
  1164. res1 = stats.ttest_1samp(outcome[:, 0], 1)
  1165. res2 = mstats.ttest_1samp(outcome[:, 0], 1)
  1166. assert_allclose(res1, res2)
  1167. def test_fully_masked(self):
  1168. np.random.seed(1234567)
  1169. outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
  1170. expected = (np.nan, np.nan)
  1171. with suppress_warnings() as sup:
  1172. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1173. for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
  1174. t, p = mstats.ttest_1samp(*pair)
  1175. assert_array_equal(p, expected)
  1176. assert_array_equal(t, expected)
  1177. def test_result_attributes(self):
  1178. np.random.seed(1234567)
  1179. outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
  1180. res = mstats.ttest_1samp(outcome[:, 0], 1)
  1181. attributes = ('statistic', 'pvalue')
  1182. check_named_results(res, attributes, ma=True)
  1183. def test_empty(self):
  1184. res1 = mstats.ttest_1samp([], 1)
  1185. assert_(np.all(np.isnan(res1)))
  1186. def test_zero_division(self):
  1187. t, p = mstats.ttest_1samp([0, 0, 0], 1)
  1188. assert_equal((np.abs(t), p), (np.inf, 0))
  1189. with suppress_warnings() as sup:
  1190. sup.filter(RuntimeWarning, "invalid value encountered in absolute")
  1191. t, p = mstats.ttest_1samp([0, 0, 0], 0)
  1192. assert_(np.isnan(t))
  1193. assert_array_equal(p, (np.nan, np.nan))
  1194. def test_bad_alternative(self):
  1195. msg = r"alternative must be 'less', 'greater' or 'two-sided'"
  1196. with pytest.raises(ValueError, match=msg):
  1197. mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
  1198. @pytest.mark.parametrize("alternative", ["less", "greater"])
  1199. def test_alternative(self, alternative):
  1200. x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
  1201. t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
  1202. t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
  1203. assert_allclose(t, t_ex, rtol=1e-14)
  1204. assert_allclose(p, p_ex, rtol=1e-14)
  1205. # test with masked arrays
  1206. x[1:10] = np.nan
  1207. x = np.ma.masked_array(x, mask=np.isnan(x))
  1208. t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
  1209. alternative=alternative)
  1210. t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
  1211. assert_allclose(t, t_ex, rtol=1e-14)
  1212. assert_allclose(p, p_ex, rtol=1e-14)
  1213. class TestDescribe:
  1214. """
  1215. Tests for mstats.describe.
  1216. Note that there are also tests for `mstats.describe` in the
  1217. class TestCompareWithStats.
  1218. """
  1219. def test_basic_with_axis(self):
  1220. # This is a basic test that is also a regression test for gh-7303.
  1221. a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
  1222. [5, 5, 0, 9, 3, 3]],
  1223. mask=[[0, 0, 0, 0, 0, 1],
  1224. [0, 0, 1, 1, 0, 0]])
  1225. result = mstats.describe(a, axis=1)
  1226. assert_equal(result.nobs, [5, 4])
  1227. amin, amax = result.minmax
  1228. assert_equal(amin, [0, 3])
  1229. assert_equal(amax, [4, 5])
  1230. assert_equal(result.mean, [2.0, 4.0])
  1231. assert_equal(result.variance, [2.0, 1.0])
  1232. assert_equal(result.skewness, [0.0, 0.0])
  1233. assert_allclose(result.kurtosis, [-1.3, -2.0])
  1234. class TestCompareWithStats:
  1235. """
  1236. Class to compare mstats results with stats results.
  1237. It is in general assumed that scipy.stats is at a more mature stage than
  1238. stats.mstats. If a routine in mstats results in similar results like in
  1239. scipy.stats, this is considered also as a proper validation of scipy.mstats
  1240. routine.
  1241. Different sample sizes are used for testing, as some problems between stats
  1242. and mstats are dependent on sample size.
  1243. Author: Alexander Loew
  1244. NOTE that some tests fail. This might be caused by
  1245. a) actual differences or bugs between stats and mstats
  1246. b) numerical inaccuracies
  1247. c) different definitions of routine interfaces
  1248. These failures need to be checked. Current workaround is to have disabled these tests,
  1249. but issuing reports on scipy-dev
  1250. """
  1251. def get_n(self):
  1252. """ Returns list of sample sizes to be used for comparison. """
  1253. return [1000, 100, 10, 5]
  1254. def generate_xy_sample(self, n):
  1255. # This routine generates numpy arrays and corresponding masked arrays
  1256. # with the same data, but additional masked values
  1257. np.random.seed(1234567)
  1258. x = np.random.randn(n)
  1259. y = x + np.random.randn(n)
  1260. xm = np.full(len(x) + 5, 1e16)
  1261. ym = np.full(len(y) + 5, 1e16)
  1262. xm[0:len(x)] = x
  1263. ym[0:len(y)] = y
  1264. mask = xm > 9e15
  1265. xm = np.ma.array(xm, mask=mask)
  1266. ym = np.ma.array(ym, mask=mask)
  1267. return x, y, xm, ym
  1268. def generate_xy_sample2D(self, n, nx):
  1269. x = np.full((n, nx), np.nan)
  1270. y = np.full((n, nx), np.nan)
  1271. xm = np.full((n+5, nx), np.nan)
  1272. ym = np.full((n+5, nx), np.nan)
  1273. for i in range(nx):
  1274. x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
  1275. xm[0:n, :] = x[0:n]
  1276. ym[0:n, :] = y[0:n]
  1277. xm = np.ma.array(xm, mask=np.isnan(xm))
  1278. ym = np.ma.array(ym, mask=np.isnan(ym))
  1279. return x, y, xm, ym
  1280. def test_linregress(self):
  1281. for n in self.get_n():
  1282. x, y, xm, ym = self.generate_xy_sample(n)
  1283. result1 = stats.linregress(x, y)
  1284. result2 = stats.mstats.linregress(xm, ym)
  1285. assert_allclose(np.asarray(result1), np.asarray(result2))
  1286. def test_pearsonr(self):
  1287. for n in self.get_n():
  1288. x, y, xm, ym = self.generate_xy_sample(n)
  1289. r, p = stats.pearsonr(x, y)
  1290. rm, pm = stats.mstats.pearsonr(xm, ym)
  1291. assert_almost_equal(r, rm, decimal=14)
  1292. assert_almost_equal(p, pm, decimal=14)
  1293. def test_spearmanr(self):
  1294. for n in self.get_n():
  1295. x, y, xm, ym = self.generate_xy_sample(n)
  1296. r, p = stats.spearmanr(x, y)
  1297. rm, pm = stats.mstats.spearmanr(xm, ym)
  1298. assert_almost_equal(r, rm, 14)
  1299. assert_almost_equal(p, pm, 14)
  1300. def test_spearmanr_backcompat_useties(self):
  1301. # A regression test to ensure we don't break backwards compat
  1302. # more than we have to (see gh-9204).
  1303. x = np.arange(6)
  1304. assert_raises(ValueError, mstats.spearmanr, x, x, False)
  1305. def test_gmean(self):
  1306. for n in self.get_n():
  1307. x, y, xm, ym = self.generate_xy_sample(n)
  1308. r = stats.gmean(abs(x))
  1309. rm = stats.mstats.gmean(abs(xm))
  1310. assert_allclose(r, rm, rtol=1e-13)
  1311. r = stats.gmean(abs(y))
  1312. rm = stats.mstats.gmean(abs(ym))
  1313. assert_allclose(r, rm, rtol=1e-13)
  1314. def test_hmean(self):
  1315. for n in self.get_n():
  1316. x, y, xm, ym = self.generate_xy_sample(n)
  1317. r = stats.hmean(abs(x))
  1318. rm = stats.mstats.hmean(abs(xm))
  1319. assert_almost_equal(r, rm, 10)
  1320. r = stats.hmean(abs(y))
  1321. rm = stats.mstats.hmean(abs(ym))
  1322. assert_almost_equal(r, rm, 10)
  1323. def test_skew(self):
  1324. for n in self.get_n():
  1325. x, y, xm, ym = self.generate_xy_sample(n)
  1326. r = stats.skew(x)
  1327. rm = stats.mstats.skew(xm)
  1328. assert_almost_equal(r, rm, 10)
  1329. r = stats.skew(y)
  1330. rm = stats.mstats.skew(ym)
  1331. assert_almost_equal(r, rm, 10)
  1332. def test_moment(self):
  1333. for n in self.get_n():
  1334. x, y, xm, ym = self.generate_xy_sample(n)
  1335. r = stats.moment(x)
  1336. rm = stats.mstats.moment(xm)
  1337. assert_almost_equal(r, rm, 10)
  1338. r = stats.moment(y)
  1339. rm = stats.mstats.moment(ym)
  1340. assert_almost_equal(r, rm, 10)
  1341. def test_zscore(self):
  1342. for n in self.get_n():
  1343. x, y, xm, ym = self.generate_xy_sample(n)
  1344. # reference solution
  1345. zx = (x - x.mean()) / x.std()
  1346. zy = (y - y.mean()) / y.std()
  1347. # validate stats
  1348. assert_allclose(stats.zscore(x), zx, rtol=1e-10)
  1349. assert_allclose(stats.zscore(y), zy, rtol=1e-10)
  1350. # compare stats and mstats
  1351. assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
  1352. rtol=1e-10)
  1353. assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
  1354. rtol=1e-10)
  1355. def test_kurtosis(self):
  1356. for n in self.get_n():
  1357. x, y, xm, ym = self.generate_xy_sample(n)
  1358. r = stats.kurtosis(x)
  1359. rm = stats.mstats.kurtosis(xm)
  1360. assert_almost_equal(r, rm, 10)
  1361. r = stats.kurtosis(y)
  1362. rm = stats.mstats.kurtosis(ym)
  1363. assert_almost_equal(r, rm, 10)
  1364. def test_sem(self):
  1365. # example from stats.sem doc
  1366. a = np.arange(20).reshape(5, 4)
  1367. am = np.ma.array(a)
  1368. r = stats.sem(a, ddof=1)
  1369. rm = stats.mstats.sem(am, ddof=1)
  1370. assert_allclose(r, 2.82842712, atol=1e-5)
  1371. assert_allclose(rm, 2.82842712, atol=1e-5)
  1372. for n in self.get_n():
  1373. x, y, xm, ym = self.generate_xy_sample(n)
  1374. assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
  1375. stats.sem(x, axis=None, ddof=0), decimal=13)
  1376. assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
  1377. stats.sem(y, axis=None, ddof=0), decimal=13)
  1378. assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
  1379. stats.sem(x, axis=None, ddof=1), decimal=13)
  1380. assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
  1381. stats.sem(y, axis=None, ddof=1), decimal=13)
  1382. def test_describe(self):
  1383. for n in self.get_n():
  1384. x, y, xm, ym = self.generate_xy_sample(n)
  1385. r = stats.describe(x, ddof=1)
  1386. rm = stats.mstats.describe(xm, ddof=1)
  1387. for ii in range(6):
  1388. assert_almost_equal(np.asarray(r[ii]),
  1389. np.asarray(rm[ii]),
  1390. decimal=12)
  1391. def test_describe_result_attributes(self):
  1392. actual = mstats.describe(np.arange(5))
  1393. attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
  1394. 'kurtosis')
  1395. check_named_results(actual, attributes, ma=True)
  1396. def test_rankdata(self):
  1397. for n in self.get_n():
  1398. x, y, xm, ym = self.generate_xy_sample(n)
  1399. r = stats.rankdata(x)
  1400. rm = stats.mstats.rankdata(x)
  1401. assert_allclose(r, rm)
  1402. def test_tmean(self):
  1403. for n in self.get_n():
  1404. x, y, xm, ym = self.generate_xy_sample(n)
  1405. assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
  1406. assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
  1407. def test_tmax(self):
  1408. for n in self.get_n():
  1409. x, y, xm, ym = self.generate_xy_sample(n)
  1410. assert_almost_equal(stats.tmax(x,2.),
  1411. stats.mstats.tmax(xm,2.), 10)
  1412. assert_almost_equal(stats.tmax(y,2.),
  1413. stats.mstats.tmax(ym,2.), 10)
  1414. assert_almost_equal(stats.tmax(x, upperlimit=3.),
  1415. stats.mstats.tmax(xm, upperlimit=3.), 10)
  1416. assert_almost_equal(stats.tmax(y, upperlimit=3.),
  1417. stats.mstats.tmax(ym, upperlimit=3.), 10)
  1418. def test_tmin(self):
  1419. for n in self.get_n():
  1420. x, y, xm, ym = self.generate_xy_sample(n)
  1421. assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
  1422. assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
  1423. assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
  1424. stats.mstats.tmin(xm, lowerlimit=-1.), 10)
  1425. assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
  1426. stats.mstats.tmin(ym, lowerlimit=-1.), 10)
  1427. def test_zmap(self):
  1428. for n in self.get_n():
  1429. x, y, xm, ym = self.generate_xy_sample(n)
  1430. z = stats.zmap(x, y)
  1431. zm = stats.mstats.zmap(xm, ym)
  1432. assert_allclose(z, zm[0:len(z)], atol=1e-10)
  1433. def test_variation(self):
  1434. for n in self.get_n():
  1435. x, y, xm, ym = self.generate_xy_sample(n)
  1436. assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
  1437. decimal=12)
  1438. assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
  1439. decimal=12)
  1440. def test_tvar(self):
  1441. for n in self.get_n():
  1442. x, y, xm, ym = self.generate_xy_sample(n)
  1443. assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
  1444. decimal=12)
  1445. assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
  1446. decimal=12)
  1447. def test_trimboth(self):
  1448. a = np.arange(20)
  1449. b = stats.trimboth(a, 0.1)
  1450. bm = stats.mstats.trimboth(a, 0.1)
  1451. assert_allclose(np.sort(b), bm.data[~bm.mask])
  1452. def test_tsem(self):
  1453. for n in self.get_n():
  1454. x, y, xm, ym = self.generate_xy_sample(n)
  1455. assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
  1456. decimal=14)
  1457. assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
  1458. decimal=14)
  1459. assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
  1460. stats.mstats.tsem(xm, limits=(-2., 2.)),
  1461. decimal=14)
  1462. def test_skewtest(self):
  1463. # this test is for 1D data
  1464. for n in self.get_n():
  1465. if n > 8:
  1466. x, y, xm, ym = self.generate_xy_sample(n)
  1467. r = stats.skewtest(x)
  1468. rm = stats.mstats.skewtest(xm)
  1469. assert_allclose(r, rm)
  1470. def test_skewtest_result_attributes(self):
  1471. x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
  1472. res = mstats.skewtest(x)
  1473. attributes = ('statistic', 'pvalue')
  1474. check_named_results(res, attributes, ma=True)
  1475. def test_skewtest_2D_notmasked(self):
  1476. # a normal ndarray is passed to the masked function
  1477. x = np.random.random((20, 2)) * 20.
  1478. r = stats.skewtest(x)
  1479. rm = stats.mstats.skewtest(x)
  1480. assert_allclose(np.asarray(r), np.asarray(rm))
  1481. def test_skewtest_2D_WithMask(self):
  1482. nx = 2
  1483. for n in self.get_n():
  1484. if n > 8:
  1485. x, y, xm, ym = self.generate_xy_sample2D(n, nx)
  1486. r = stats.skewtest(x)
  1487. rm = stats.mstats.skewtest(xm)
  1488. assert_allclose(r[0][0], rm[0][0], rtol=1e-14)
  1489. assert_allclose(r[0][1], rm[0][1], rtol=1e-14)
  1490. def test_normaltest(self):
  1491. with np.errstate(over='raise'), suppress_warnings() as sup:
  1492. sup.filter(UserWarning, "kurtosistest only valid for n>=20")
  1493. for n in self.get_n():
  1494. if n > 8:
  1495. x, y, xm, ym = self.generate_xy_sample(n)
  1496. r = stats.normaltest(x)
  1497. rm = stats.mstats.normaltest(xm)
  1498. assert_allclose(np.asarray(r), np.asarray(rm))
  1499. def test_find_repeats(self):
  1500. x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
  1501. tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
  1502. mask = (tmp == 5.)
  1503. xm = np.ma.array(tmp, mask=mask)
  1504. x_orig, xm_orig = x.copy(), xm.copy()
  1505. r = stats.find_repeats(x)
  1506. rm = stats.mstats.find_repeats(xm)
  1507. assert_equal(r, rm)
  1508. assert_equal(x, x_orig)
  1509. assert_equal(xm, xm_orig)
  1510. # This crazy behavior is expected by count_tied_groups, but is not
  1511. # in the docstring...
  1512. _, counts = stats.mstats.find_repeats([])
  1513. assert_equal(counts, np.array(0, dtype=np.intp))
  1514. def test_kendalltau(self):
  1515. for n in self.get_n():
  1516. x, y, xm, ym = self.generate_xy_sample(n)
  1517. r = stats.kendalltau(x, y)
  1518. rm = stats.mstats.kendalltau(xm, ym)
  1519. assert_almost_equal(r[0], rm[0], decimal=10)
  1520. assert_almost_equal(r[1], rm[1], decimal=7)
  1521. def test_obrientransform(self):
  1522. for n in self.get_n():
  1523. x, y, xm, ym = self.generate_xy_sample(n)
  1524. r = stats.obrientransform(x)
  1525. rm = stats.mstats.obrientransform(xm)
  1526. assert_almost_equal(r.T, rm[0:len(x)])
  1527. def test_ks_1samp(self):
  1528. """Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
  1529. for mode in ['auto', 'exact', 'asymp']:
  1530. with suppress_warnings() as sup:
  1531. for alternative in ['less', 'greater', 'two-sided']:
  1532. for n in self.get_n():
  1533. x, y, xm, ym = self.generate_xy_sample(n)
  1534. res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
  1535. res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
  1536. assert_equal(np.asarray(res1), np.asarray(res2))
  1537. res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
  1538. assert_equal(np.asarray(res1), np.asarray(res3))
  1539. def test_kstest_1samp(self):
  1540. """Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays."""
  1541. for mode in ['auto', 'exact', 'asymp']:
  1542. with suppress_warnings() as sup:
  1543. for alternative in ['less', 'greater', 'two-sided']:
  1544. for n in self.get_n():
  1545. x, y, xm, ym = self.generate_xy_sample(n)
  1546. res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
  1547. res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)
  1548. assert_equal(np.asarray(res1), np.asarray(res2))
  1549. res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)
  1550. assert_equal(np.asarray(res1), np.asarray(res3))
  1551. def test_ks_2samp(self):
  1552. """Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
  1553. gh-8431"""
  1554. for mode in ['auto', 'exact', 'asymp']:
  1555. with suppress_warnings() as sup:
  1556. if mode in ['auto', 'exact']:
  1557. message = "ks_2samp: Exact calculation unsuccessful."
  1558. sup.filter(RuntimeWarning, message)
  1559. for alternative in ['less', 'greater', 'two-sided']:
  1560. for n in self.get_n():
  1561. x, y, xm, ym = self.generate_xy_sample(n)
  1562. res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)
  1563. res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)
  1564. assert_equal(np.asarray(res1), np.asarray(res2))
  1565. res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)
  1566. assert_equal(np.asarray(res1), np.asarray(res3))
  1567. def test_kstest_2samp(self):
  1568. """Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays."""
  1569. for mode in ['auto', 'exact', 'asymp']:
  1570. with suppress_warnings() as sup:
  1571. if mode in ['auto', 'exact']:
  1572. message = "ks_2samp: Exact calculation unsuccessful."
  1573. sup.filter(RuntimeWarning, message)
  1574. for alternative in ['less', 'greater', 'two-sided']:
  1575. for n in self.get_n():
  1576. x, y, xm, ym = self.generate_xy_sample(n)
  1577. res1 = stats.kstest(x, y, alternative=alternative, mode=mode)
  1578. res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)
  1579. assert_equal(np.asarray(res1), np.asarray(res2))
  1580. res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)
  1581. assert_equal(np.asarray(res1), np.asarray(res3))
  1582. class TestBrunnerMunzel:
  1583. # Data from (Lumley, 1996)
  1584. X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
  1585. 1, 1, 1, 2, 4, 1, 1, np.nan])
  1586. Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
  1587. significant = 14
  1588. def test_brunnermunzel_one_sided(self):
  1589. # Results are compared with R's lawstat package.
  1590. u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
  1591. u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
  1592. u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
  1593. u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
  1594. assert_almost_equal(p1, p2, decimal=self.significant)
  1595. assert_almost_equal(p3, p4, decimal=self.significant)
  1596. assert_(p1 != p3)
  1597. assert_almost_equal(u1, 3.1374674823029505,
  1598. decimal=self.significant)
  1599. assert_almost_equal(u2, -3.1374674823029505,
  1600. decimal=self.significant)
  1601. assert_almost_equal(u3, 3.1374674823029505,
  1602. decimal=self.significant)
  1603. assert_almost_equal(u4, -3.1374674823029505,
  1604. decimal=self.significant)
  1605. assert_almost_equal(p1, 0.0028931043330757342,
  1606. decimal=self.significant)
  1607. assert_almost_equal(p3, 0.99710689566692423,
  1608. decimal=self.significant)
  1609. def test_brunnermunzel_two_sided(self):
  1610. # Results are compared with R's lawstat package.
  1611. u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
  1612. u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
  1613. assert_almost_equal(p1, p2, decimal=self.significant)
  1614. assert_almost_equal(u1, 3.1374674823029505,
  1615. decimal=self.significant)
  1616. assert_almost_equal(u2, -3.1374674823029505,
  1617. decimal=self.significant)
  1618. assert_almost_equal(p1, 0.0057862086661515377,
  1619. decimal=self.significant)
  1620. def test_brunnermunzel_default(self):
  1621. # The default value for alternative is two-sided
  1622. u1, p1 = mstats.brunnermunzel(self.X, self.Y)
  1623. u2, p2 = mstats.brunnermunzel(self.Y, self.X)
  1624. assert_almost_equal(p1, p2, decimal=self.significant)
  1625. assert_almost_equal(u1, 3.1374674823029505,
  1626. decimal=self.significant)
  1627. assert_almost_equal(u2, -3.1374674823029505,
  1628. decimal=self.significant)
  1629. assert_almost_equal(p1, 0.0057862086661515377,
  1630. decimal=self.significant)
  1631. def test_brunnermunzel_alternative_error(self):
  1632. alternative = "error"
  1633. distribution = "t"
  1634. assert_(alternative not in ["two-sided", "greater", "less"])
  1635. assert_raises(ValueError,
  1636. mstats.brunnermunzel,
  1637. self.X,
  1638. self.Y,
  1639. alternative,
  1640. distribution)
  1641. def test_brunnermunzel_distribution_norm(self):
  1642. u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
  1643. u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
  1644. assert_almost_equal(p1, p2, decimal=self.significant)
  1645. assert_almost_equal(u1, 3.1374674823029505,
  1646. decimal=self.significant)
  1647. assert_almost_equal(u2, -3.1374674823029505,
  1648. decimal=self.significant)
  1649. assert_almost_equal(p1, 0.0017041417600383024,
  1650. decimal=self.significant)
  1651. def test_brunnermunzel_distribution_error(self):
  1652. alternative = "two-sided"
  1653. distribution = "error"
  1654. assert_(alternative not in ["t", "normal"])
  1655. assert_raises(ValueError,
  1656. mstats.brunnermunzel,
  1657. self.X,
  1658. self.Y,
  1659. alternative,
  1660. distribution)
  1661. def test_brunnermunzel_empty_imput(self):
  1662. u1, p1 = mstats.brunnermunzel(self.X, [])
  1663. u2, p2 = mstats.brunnermunzel([], self.Y)
  1664. u3, p3 = mstats.brunnermunzel([], [])
  1665. assert_(np.isnan(u1))
  1666. assert_(np.isnan(p1))
  1667. assert_(np.isnan(u2))
  1668. assert_(np.isnan(p2))
  1669. assert_(np.isnan(u3))
  1670. assert_(np.isnan(p3))