_odds_ratio.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. import numpy as np
  2. from scipy.special import ndtri
  3. from scipy.optimize import brentq
  4. from ._discrete_distns import nchypergeom_fisher
  5. from ._common import ConfidenceInterval
  6. def _sample_odds_ratio(table):
  7. """
  8. Given a table [[a, b], [c, d]], compute a*d/(b*c).
  9. Return nan if the numerator and denominator are 0.
  10. Return inf if just the denominator is 0.
  11. """
  12. # table must be a 2x2 numpy array.
  13. if table[1, 0] > 0 and table[0, 1] > 0:
  14. oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
  15. elif table[0, 0] == 0 or table[1, 1] == 0:
  16. oddsratio = np.nan
  17. else:
  18. oddsratio = np.inf
  19. return oddsratio
  20. def _solve(func):
  21. """
  22. Solve func(nc) = 0. func must be an increasing function.
  23. """
  24. # We could just as well call the variable `x` instead of `nc`, but we
  25. # always call this function with functions for which nc (the noncentrality
  26. # parameter) is the variable for which we are solving.
  27. nc = 1.0
  28. value = func(nc)
  29. if value == 0:
  30. return nc
  31. # Multiplicative factor by which to increase or decrease nc when
  32. # searching for a bracketing interval.
  33. factor = 2.0
  34. # Find a bracketing interval.
  35. if value > 0:
  36. nc /= factor
  37. while func(nc) > 0:
  38. nc /= factor
  39. lo = nc
  40. hi = factor*nc
  41. else:
  42. nc *= factor
  43. while func(nc) < 0:
  44. nc *= factor
  45. lo = nc/factor
  46. hi = nc
  47. # lo and hi bracket the solution for nc.
  48. nc = brentq(func, lo, hi, xtol=1e-13)
  49. return nc
  50. def _nc_hypergeom_mean_inverse(x, M, n, N):
  51. """
  52. For the given noncentral hypergeometric parameters x, M, n,and N
  53. (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
  54. contingency table), find the noncentrality parameter of Fisher's
  55. noncentral hypergeometric distribution whose mean is x.
  56. """
  57. nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
  58. return nc
  59. def _hypergeom_params_from_table(table):
  60. # The notation M, n and N is consistent with stats.hypergeom and
  61. # stats.nchypergeom_fisher.
  62. x = table[0, 0]
  63. M = table.sum()
  64. n = table[0].sum()
  65. N = table[:, 0].sum()
  66. return x, M, n, N
  67. def _ci_upper(table, alpha):
  68. """
  69. Compute the upper end of the confidence interval.
  70. """
  71. if _sample_odds_ratio(table) == np.inf:
  72. return np.inf
  73. x, M, n, N = _hypergeom_params_from_table(table)
  74. # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
  75. # it in the lambda expression.
  76. nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
  77. return nc
  78. def _ci_lower(table, alpha):
  79. """
  80. Compute the lower end of the confidence interval.
  81. """
  82. if _sample_odds_ratio(table) == 0:
  83. return 0
  84. x, M, n, N = _hypergeom_params_from_table(table)
  85. nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
  86. return nc
  87. def _conditional_oddsratio(table):
  88. """
  89. Conditional MLE of the odds ratio for the 2x2 contingency table.
  90. """
  91. x, M, n, N = _hypergeom_params_from_table(table)
  92. # Get the bounds of the support. The support of the noncentral
  93. # hypergeometric distribution with parameters M, n, and N is the same
  94. # for all values of the noncentrality parameter, so we can use 1 here.
  95. lo, hi = nchypergeom_fisher.support(M, n, N, 1)
  96. # Check if x is at one of the extremes of the support. If so, we know
  97. # the odds ratio is either 0 or inf.
  98. if x == lo:
  99. # x is at the low end of the support.
  100. return 0
  101. if x == hi:
  102. # x is at the high end of the support.
  103. return np.inf
  104. nc = _nc_hypergeom_mean_inverse(x, M, n, N)
  105. return nc
  106. def _conditional_oddsratio_ci(table, confidence_level=0.95,
  107. alternative='two-sided'):
  108. """
  109. Conditional exact confidence interval for the odds ratio.
  110. """
  111. if alternative == 'two-sided':
  112. alpha = 0.5*(1 - confidence_level)
  113. lower = _ci_lower(table, alpha)
  114. upper = _ci_upper(table, alpha)
  115. elif alternative == 'less':
  116. lower = 0.0
  117. upper = _ci_upper(table, 1 - confidence_level)
  118. else:
  119. # alternative == 'greater'
  120. lower = _ci_lower(table, 1 - confidence_level)
  121. upper = np.inf
  122. return lower, upper
  123. def _sample_odds_ratio_ci(table, confidence_level=0.95,
  124. alternative='two-sided'):
  125. oddsratio = _sample_odds_ratio(table)
  126. log_or = np.log(oddsratio)
  127. se = np.sqrt((1/table).sum())
  128. if alternative == 'less':
  129. z = ndtri(confidence_level)
  130. loglow = -np.inf
  131. loghigh = log_or + z*se
  132. elif alternative == 'greater':
  133. z = ndtri(confidence_level)
  134. loglow = log_or - z*se
  135. loghigh = np.inf
  136. else:
  137. # alternative is 'two-sided'
  138. z = ndtri(0.5*confidence_level + 0.5)
  139. loglow = log_or - z*se
  140. loghigh = log_or + z*se
  141. return np.exp(loglow), np.exp(loghigh)
  142. class OddsRatioResult:
  143. """
  144. Result of `scipy.stats.contingency.odds_ratio`. See the
  145. docstring for `odds_ratio` for more details.
  146. Attributes
  147. ----------
  148. statistic : float
  149. The computed odds ratio.
  150. * If `kind` is ``'sample'``, this is sample (or unconditional)
  151. estimate, given by
  152. ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
  153. * If `kind` is ``'conditional'``, this is the conditional
  154. maximum likelihood estimate for the odds ratio. It is
  155. the noncentrality parameter of Fisher's noncentral
  156. hypergeometric distribution with the same hypergeometric
  157. parameters as `table` and whose mean is ``table[0, 0]``.
  158. Methods
  159. -------
  160. confidence_interval :
  161. Confidence interval for the odds ratio.
  162. """
  163. def __init__(self, _table, _kind, statistic):
  164. # for now, no need to make _table and _kind public, since this sort of
  165. # information is returned in very few `scipy.stats` results
  166. self._table = _table
  167. self._kind = _kind
  168. self.statistic = statistic
  169. def __repr__(self):
  170. return f"OddsRatioResult(statistic={self.statistic})"
  171. def confidence_interval(self, confidence_level=0.95,
  172. alternative='two-sided'):
  173. """
  174. Confidence interval for the odds ratio.
  175. Parameters
  176. ----------
  177. confidence_level: float
  178. Desired confidence level for the confidence interval.
  179. The value must be given as a fraction between 0 and 1.
  180. Default is 0.95 (meaning 95%).
  181. alternative : {'two-sided', 'less', 'greater'}, optional
  182. The alternative hypothesis of the hypothesis test to which the
  183. confidence interval corresponds. That is, suppose the null
  184. hypothesis is that the true odds ratio equals ``OR`` and the
  185. confidence interval is ``(low, high)``. Then the following options
  186. for `alternative` are available (default is 'two-sided'):
  187. * 'two-sided': the true odds ratio is not equal to ``OR``. There
  188. is evidence against the null hypothesis at the chosen
  189. `confidence_level` if ``high < OR`` or ``low > OR``.
  190. * 'less': the true odds ratio is less than ``OR``. The ``low`` end
  191. of the confidence interval is 0, and there is evidence against
  192. the null hypothesis at the chosen `confidence_level` if
  193. ``high < OR``.
  194. * 'greater': the true odds ratio is greater than ``OR``. The
  195. ``high`` end of the confidence interval is ``np.inf``, and there
  196. is evidence against the null hypothesis at the chosen
  197. `confidence_level` if ``low > OR``.
  198. Returns
  199. -------
  200. ci : ``ConfidenceInterval`` instance
  201. The confidence interval, represented as an object with
  202. attributes ``low`` and ``high``.
  203. Notes
  204. -----
  205. When `kind` is ``'conditional'``, the limits of the confidence
  206. interval are the conditional "exact confidence limits" as described
  207. by Fisher [1]_. The conditional odds ratio and confidence interval are
  208. also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
  209. When `kind` is ``'sample'``, the confidence interval is computed
  210. under the assumption that the logarithm of the odds ratio is normally
  211. distributed with standard error given by::
  212. se = sqrt(1/a + 1/b + 1/c + 1/d)
  213. where ``a``, ``b``, ``c`` and ``d`` are the elements of the
  214. contingency table. (See, for example, [2]_, section 3.1.3.2,
  215. or [3]_, section 2.3.3).
  216. References
  217. ----------
  218. .. [1] R. A. Fisher (1935), The logic of inductive inference,
  219. Journal of the Royal Statistical Society, Vol. 98, No. 1,
  220. pp. 39-82.
  221. .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
  222. Methods, Techniques, and Applications, CRC Press LLC, Boca
  223. Raton, Florida.
  224. .. [3] Alan Agresti, An Introduction to Categorical Data Analyis
  225. (second edition), Wiley, Hoboken, NJ, USA (2007).
  226. """
  227. if alternative not in ['two-sided', 'less', 'greater']:
  228. raise ValueError("`alternative` must be 'two-sided', 'less' or "
  229. "'greater'.")
  230. if confidence_level < 0 or confidence_level > 1:
  231. raise ValueError('confidence_level must be between 0 and 1')
  232. if self._kind == 'conditional':
  233. ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
  234. else:
  235. ci = self._sample_odds_ratio_ci(confidence_level, alternative)
  236. return ci
  237. def _conditional_odds_ratio_ci(self, confidence_level=0.95,
  238. alternative='two-sided'):
  239. """
  240. Confidence interval for the conditional odds ratio.
  241. """
  242. table = self._table
  243. if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
  244. # If both values in a row or column are zero, the p-value is 1,
  245. # the odds ratio is NaN and the confidence interval is (0, inf).
  246. ci = (0, np.inf)
  247. else:
  248. ci = _conditional_oddsratio_ci(table,
  249. confidence_level=confidence_level,
  250. alternative=alternative)
  251. return ConfidenceInterval(low=ci[0], high=ci[1])
  252. def _sample_odds_ratio_ci(self, confidence_level=0.95,
  253. alternative='two-sided'):
  254. """
  255. Confidence interval for the sample odds ratio.
  256. """
  257. if confidence_level < 0 or confidence_level > 1:
  258. raise ValueError('confidence_level must be between 0 and 1')
  259. table = self._table
  260. if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
  261. # If both values in a row or column are zero, the p-value is 1,
  262. # the odds ratio is NaN and the confidence interval is (0, inf).
  263. ci = (0, np.inf)
  264. else:
  265. ci = _sample_odds_ratio_ci(table,
  266. confidence_level=confidence_level,
  267. alternative=alternative)
  268. return ConfidenceInterval(low=ci[0], high=ci[1])
  269. def odds_ratio(table, *, kind='conditional'):
  270. r"""
  271. Compute the odds ratio for a 2x2 contingency table.
  272. Parameters
  273. ----------
  274. table : array_like of ints
  275. A 2x2 contingency table. Elements must be non-negative integers.
  276. kind : str, optional
  277. Which kind of odds ratio to compute, either the sample
  278. odds ratio (``kind='sample'``) or the conditional odds ratio
  279. (``kind='conditional'``). Default is ``'conditional'``.
  280. Returns
  281. -------
  282. result : `~scipy.stats._result_classes.OddsRatioResult` instance
  283. The returned object has two computed attributes:
  284. statistic : float
  285. * If `kind` is ``'sample'``, this is sample (or unconditional)
  286. estimate, given by
  287. ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
  288. * If `kind` is ``'conditional'``, this is the conditional
  289. maximum likelihood estimate for the odds ratio. It is
  290. the noncentrality parameter of Fisher's noncentral
  291. hypergeometric distribution with the same hypergeometric
  292. parameters as `table` and whose mean is ``table[0, 0]``.
  293. The object has the method `confidence_interval` that computes
  294. the confidence interval of the odds ratio.
  295. See Also
  296. --------
  297. scipy.stats.fisher_exact
  298. relative_risk
  299. Notes
  300. -----
  301. The conditional odds ratio was discussed by Fisher (see "Example 1"
  302. of [1]_). Texts that cover the odds ratio include [2]_ and [3]_.
  303. .. versionadded:: 1.10.0
  304. References
  305. ----------
  306. .. [1] R. A. Fisher (1935), The logic of inductive inference,
  307. Journal of the Royal Statistical Society, Vol. 98, No. 1,
  308. pp. 39-82.
  309. .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
  310. Volume I - The analysis of case-control studies. IARC Sci Publ.
  311. (32):5-338. PMID: 7216345. (See section 4.2.)
  312. .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
  313. Methods, Techniques, and Applications, CRC Press LLC, Boca
  314. Raton, Florida.
  315. Examples
  316. --------
  317. In epidemiology, individuals are classified as "exposed" or
  318. "unexposed" to some factor or treatment. If the occurrence of some
  319. illness is under study, those who have the illness are often
  320. classifed as "cases", and those without it are "noncases". The
  321. counts of the occurrences of these classes gives a contingency
  322. table::
  323. exposed unexposed
  324. cases a b
  325. noncases c d
  326. The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can
  327. be interpreted as the odds of a case occurring in the exposed group,
  328. and ``b/d`` as the odds of a case occurring in the unexposed group.
  329. The sample odds ratio is the ratio of these odds. If the odds ratio
  330. is greater than 1, it suggests that there is a positive association
  331. between being exposed and being a case.
  332. Interchanging the rows or columns of the contingency table inverts
  333. the odds ratio, so it is import to understand the meaning of labels
  334. given to the rows and columns of the table when interpreting the
  335. odds ratio.
  336. Consider a hypothetical example where it is hypothesized that
  337. exposure to a certain chemical is assocated with increased occurrence
  338. of a certain disease. Suppose we have the following table for a
  339. collection of 410 people::
  340. exposed unexposed
  341. cases 7 15
  342. noncases 58 472
  343. The question we ask is "Is exposure to the chemical associated with
  344. increased risk of the disease?"
  345. Compute the odds ratio:
  346. >>> from scipy.stats.contingency import odds_ratio
  347. >>> res = odds_ratio([[7, 15], [58, 472]])
  348. >>> res.statistic
  349. 3.7836687705553493
  350. For this sample, the odds of getting the disease for those who have
  351. been exposed to the chemical are almost 3.8 times that of those who
  352. have not been exposed.
  353. We can compute the 95% confidence interval for the odds ratio:
  354. >>> res.confidence_interval(confidence_level=0.95)
  355. ConfidenceInterval(low=1.2514829132266785, high=10.363493716701269)
  356. The 95% confidence interval for the conditional odds ratio is
  357. approximately (1.25, 10.4).
  358. """
  359. if kind not in ['conditional', 'sample']:
  360. raise ValueError("`kind` must be 'conditional' or 'sample'.")
  361. c = np.asarray(table)
  362. if c.shape != (2, 2):
  363. raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
  364. "of shape (2, 2).")
  365. if not np.issubdtype(c.dtype, np.integer):
  366. raise ValueError("`table` must be an array of integers, but got "
  367. f"type {c.dtype}")
  368. c = c.astype(np.int64)
  369. if np.any(c < 0):
  370. raise ValueError("All values in `table` must be nonnegative.")
  371. if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
  372. # If both values in a row or column are zero, the p-value is NaN and
  373. # the odds ratio is NaN.
  374. result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
  375. return result
  376. if kind == 'sample':
  377. oddsratio = _sample_odds_ratio(c)
  378. else: # kind is 'conditional'
  379. oddsratio = _conditional_oddsratio(c)
  380. result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
  381. return result