_testutils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. import os
  2. import functools
  3. import operator
  4. from scipy._lib import _pep440
  5. import numpy as np
  6. from numpy.testing import assert_
  7. import pytest
  8. import scipy.special as sc
  9. __all__ = ['with_special_errors', 'assert_func_equal', 'FuncData']
  10. #------------------------------------------------------------------------------
  11. # Check if a module is present to be used in tests
  12. #------------------------------------------------------------------------------
  13. class MissingModule:
  14. def __init__(self, name):
  15. self.name = name
  16. def check_version(module, min_ver):
  17. if type(module) == MissingModule:
  18. return pytest.mark.skip(reason="{} is not installed".format(module.name))
  19. return pytest.mark.skipif(_pep440.parse(module.__version__) < _pep440.Version(min_ver),
  20. reason="{} version >= {} required".format(module.__name__, min_ver))
  21. #------------------------------------------------------------------------------
  22. # Enable convergence and loss of precision warnings -- turn off one by one
  23. #------------------------------------------------------------------------------
  24. def with_special_errors(func):
  25. """
  26. Enable special function errors (such as underflow, overflow,
  27. loss of precision, etc.)
  28. """
  29. @functools.wraps(func)
  30. def wrapper(*a, **kw):
  31. with sc.errstate(all='raise'):
  32. res = func(*a, **kw)
  33. return res
  34. return wrapper
  35. #------------------------------------------------------------------------------
  36. # Comparing function values at many data points at once, with helpful
  37. # error reports
  38. #------------------------------------------------------------------------------
  39. def assert_func_equal(func, results, points, rtol=None, atol=None,
  40. param_filter=None, knownfailure=None,
  41. vectorized=True, dtype=None, nan_ok=False,
  42. ignore_inf_sign=False, distinguish_nan_and_inf=True):
  43. if hasattr(points, 'next'):
  44. # it's a generator
  45. points = list(points)
  46. points = np.asarray(points)
  47. if points.ndim == 1:
  48. points = points[:,None]
  49. nparams = points.shape[1]
  50. if hasattr(results, '__name__'):
  51. # function
  52. data = points
  53. result_columns = None
  54. result_func = results
  55. else:
  56. # dataset
  57. data = np.c_[points, results]
  58. result_columns = list(range(nparams, data.shape[1]))
  59. result_func = None
  60. fdata = FuncData(func, data, list(range(nparams)),
  61. result_columns=result_columns, result_func=result_func,
  62. rtol=rtol, atol=atol, param_filter=param_filter,
  63. knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
  64. ignore_inf_sign=ignore_inf_sign,
  65. distinguish_nan_and_inf=distinguish_nan_and_inf)
  66. fdata.check()
  67. class FuncData:
  68. """
  69. Data set for checking a special function.
  70. Parameters
  71. ----------
  72. func : function
  73. Function to test
  74. data : numpy array
  75. columnar data to use for testing
  76. param_columns : int or tuple of ints
  77. Columns indices in which the parameters to `func` lie.
  78. Can be imaginary integers to indicate that the parameter
  79. should be cast to complex.
  80. result_columns : int or tuple of ints, optional
  81. Column indices for expected results from `func`.
  82. result_func : callable, optional
  83. Function to call to obtain results.
  84. rtol : float, optional
  85. Required relative tolerance. Default is 5*eps.
  86. atol : float, optional
  87. Required absolute tolerance. Default is 5*tiny.
  88. param_filter : function, or tuple of functions/Nones, optional
  89. Filter functions to exclude some parameter ranges.
  90. If omitted, no filtering is done.
  91. knownfailure : str, optional
  92. Known failure error message to raise when the test is run.
  93. If omitted, no exception is raised.
  94. nan_ok : bool, optional
  95. If nan is always an accepted result.
  96. vectorized : bool, optional
  97. Whether all functions passed in are vectorized.
  98. ignore_inf_sign : bool, optional
  99. Whether to ignore signs of infinities.
  100. (Doesn't matter for complex-valued functions.)
  101. distinguish_nan_and_inf : bool, optional
  102. If True, treat numbers which contain nans or infs as
  103. equal. Sets ignore_inf_sign to be True.
  104. """
  105. def __init__(self, func, data, param_columns, result_columns=None,
  106. result_func=None, rtol=None, atol=None, param_filter=None,
  107. knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
  108. ignore_inf_sign=False, distinguish_nan_and_inf=True):
  109. self.func = func
  110. self.data = data
  111. self.dataname = dataname
  112. if not hasattr(param_columns, '__len__'):
  113. param_columns = (param_columns,)
  114. self.param_columns = tuple(param_columns)
  115. if result_columns is not None:
  116. if not hasattr(result_columns, '__len__'):
  117. result_columns = (result_columns,)
  118. self.result_columns = tuple(result_columns)
  119. if result_func is not None:
  120. raise ValueError("Only result_func or result_columns should be provided")
  121. elif result_func is not None:
  122. self.result_columns = None
  123. else:
  124. raise ValueError("Either result_func or result_columns should be provided")
  125. self.result_func = result_func
  126. self.rtol = rtol
  127. self.atol = atol
  128. if not hasattr(param_filter, '__len__'):
  129. param_filter = (param_filter,)
  130. self.param_filter = param_filter
  131. self.knownfailure = knownfailure
  132. self.nan_ok = nan_ok
  133. self.vectorized = vectorized
  134. self.ignore_inf_sign = ignore_inf_sign
  135. self.distinguish_nan_and_inf = distinguish_nan_and_inf
  136. if not self.distinguish_nan_and_inf:
  137. self.ignore_inf_sign = True
  138. def get_tolerances(self, dtype):
  139. if not np.issubdtype(dtype, np.inexact):
  140. dtype = np.dtype(float)
  141. info = np.finfo(dtype)
  142. rtol, atol = self.rtol, self.atol
  143. if rtol is None:
  144. rtol = 5*info.eps
  145. if atol is None:
  146. atol = 5*info.tiny
  147. return rtol, atol
  148. def check(self, data=None, dtype=None, dtypes=None):
  149. """Check the special function against the data."""
  150. __tracebackhide__ = operator.methodcaller(
  151. 'errisinstance', AssertionError
  152. )
  153. if self.knownfailure:
  154. pytest.xfail(reason=self.knownfailure)
  155. if data is None:
  156. data = self.data
  157. if dtype is None:
  158. dtype = data.dtype
  159. else:
  160. data = data.astype(dtype)
  161. rtol, atol = self.get_tolerances(dtype)
  162. # Apply given filter functions
  163. if self.param_filter:
  164. param_mask = np.ones((data.shape[0],), np.bool_)
  165. for j, filter in zip(self.param_columns, self.param_filter):
  166. if filter:
  167. param_mask &= list(filter(data[:,j]))
  168. data = data[param_mask]
  169. # Pick parameters from the correct columns
  170. params = []
  171. for idx, j in enumerate(self.param_columns):
  172. if np.iscomplexobj(j):
  173. j = int(j.imag)
  174. params.append(data[:,j].astype(complex))
  175. elif dtypes and idx < len(dtypes):
  176. params.append(data[:, j].astype(dtypes[idx]))
  177. else:
  178. params.append(data[:,j])
  179. # Helper for evaluating results
  180. def eval_func_at_params(func, skip_mask=None):
  181. if self.vectorized:
  182. got = func(*params)
  183. else:
  184. got = []
  185. for j in range(len(params[0])):
  186. if skip_mask is not None and skip_mask[j]:
  187. got.append(np.nan)
  188. continue
  189. got.append(func(*tuple([params[i][j] for i in range(len(params))])))
  190. got = np.asarray(got)
  191. if not isinstance(got, tuple):
  192. got = (got,)
  193. return got
  194. # Evaluate function to be tested
  195. got = eval_func_at_params(self.func)
  196. # Grab the correct results
  197. if self.result_columns is not None:
  198. # Correct results passed in with the data
  199. wanted = tuple([data[:,icol] for icol in self.result_columns])
  200. else:
  201. # Function producing correct results passed in
  202. skip_mask = None
  203. if self.nan_ok and len(got) == 1:
  204. # Don't spend time evaluating what doesn't need to be evaluated
  205. skip_mask = np.isnan(got[0])
  206. wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
  207. # Check the validity of each output returned
  208. assert_(len(got) == len(wanted))
  209. for output_num, (x, y) in enumerate(zip(got, wanted)):
  210. if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
  211. pinf_x = np.isinf(x)
  212. pinf_y = np.isinf(y)
  213. minf_x = np.isinf(x)
  214. minf_y = np.isinf(y)
  215. else:
  216. pinf_x = np.isposinf(x)
  217. pinf_y = np.isposinf(y)
  218. minf_x = np.isneginf(x)
  219. minf_y = np.isneginf(y)
  220. nan_x = np.isnan(x)
  221. nan_y = np.isnan(y)
  222. with np.errstate(all='ignore'):
  223. abs_y = np.absolute(y)
  224. abs_y[~np.isfinite(abs_y)] = 0
  225. diff = np.absolute(x - y)
  226. diff[~np.isfinite(diff)] = 0
  227. rdiff = diff / np.absolute(y)
  228. rdiff[~np.isfinite(rdiff)] = 0
  229. tol_mask = (diff <= atol + rtol*abs_y)
  230. pinf_mask = (pinf_x == pinf_y)
  231. minf_mask = (minf_x == minf_y)
  232. nan_mask = (nan_x == nan_y)
  233. bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
  234. point_count = bad_j.size
  235. if self.nan_ok:
  236. bad_j &= ~nan_x
  237. bad_j &= ~nan_y
  238. point_count -= (nan_x | nan_y).sum()
  239. if not self.distinguish_nan_and_inf and not self.nan_ok:
  240. # If nan's are okay we've already covered all these cases
  241. inf_x = np.isinf(x)
  242. inf_y = np.isinf(y)
  243. both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
  244. bad_j &= ~both_nonfinite
  245. point_count -= both_nonfinite.sum()
  246. if np.any(bad_j):
  247. # Some bad results: inform what, where, and how bad
  248. msg = [""]
  249. msg.append("Max |adiff|: %g" % diff[bad_j].max())
  250. msg.append("Max |rdiff|: %g" % rdiff[bad_j].max())
  251. msg.append("Bad results (%d out of %d) for the following points (in output %d):"
  252. % (np.sum(bad_j), point_count, output_num,))
  253. for j in np.nonzero(bad_j)[0]:
  254. j = int(j)
  255. fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
  256. a = " ".join(map(fmt, params))
  257. b = " ".join(map(fmt, got))
  258. c = " ".join(map(fmt, wanted))
  259. d = fmt(rdiff)
  260. msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d))
  261. assert_(False, "\n".join(msg))
  262. def __repr__(self):
  263. """Pretty-printing, esp. for Nose output"""
  264. if np.any(list(map(np.iscomplexobj, self.param_columns))):
  265. is_complex = " (complex)"
  266. else:
  267. is_complex = ""
  268. if self.dataname:
  269. return "<Data for %s%s: %s>" % (self.func.__name__, is_complex,
  270. os.path.basename(self.dataname))
  271. else:
  272. return "<Data for %s%s>" % (self.func.__name__, is_complex)