_qmc.py 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570
  1. """Quasi-Monte Carlo engines and helpers."""
  2. from __future__ import annotations
  3. import copy
  4. import math
  5. import numbers
  6. import os
  7. import warnings
  8. from abc import ABC, abstractmethod
  9. from functools import partial
  10. from typing import (
  11. Callable,
  12. ClassVar,
  13. Dict,
  14. List,
  15. Literal,
  16. Optional,
  17. overload,
  18. Tuple,
  19. TYPE_CHECKING,
  20. )
  21. import numpy as np
  22. if TYPE_CHECKING:
  23. import numpy.typing as npt
  24. from scipy._lib._util import (
  25. DecimalNumber, GeneratorType, IntNumber, SeedType
  26. )
  27. import scipy.stats as stats
  28. from scipy._lib._util import rng_integers
  29. from scipy.spatial import distance, Voronoi
  30. from scipy.special import gammainc
  31. from ._sobol import (
  32. _initialize_v, _cscramble, _fill_p_cumulative, _draw, _fast_forward,
  33. _categorize, _MAXDIM
  34. )
  35. from ._qmc_cy import (
  36. _cy_wrapper_centered_discrepancy,
  37. _cy_wrapper_wrap_around_discrepancy,
  38. _cy_wrapper_mixture_discrepancy,
  39. _cy_wrapper_l2_star_discrepancy,
  40. _cy_wrapper_update_discrepancy,
  41. _cy_van_der_corput_scrambled,
  42. _cy_van_der_corput,
  43. )
  44. __all__ = ['scale', 'discrepancy', 'update_discrepancy',
  45. 'QMCEngine', 'Sobol', 'Halton', 'LatinHypercube', 'PoissonDisk',
  46. 'MultinomialQMC', 'MultivariateNormalQMC']
  47. @overload
  48. def check_random_state(seed: Optional[IntNumber] = ...) -> np.random.Generator:
  49. ...
  50. @overload
  51. def check_random_state(seed: GeneratorType) -> GeneratorType:
  52. ...
  53. # Based on scipy._lib._util.check_random_state
  54. def check_random_state(seed=None):
  55. """Turn `seed` into a `numpy.random.Generator` instance.
  56. Parameters
  57. ----------
  58. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional # noqa
  59. If `seed` is an int or None, a new `numpy.random.Generator` is
  60. created using ``np.random.default_rng(seed)``.
  61. If `seed` is already a ``Generator`` or ``RandomState`` instance, then
  62. the provided instance is used.
  63. Returns
  64. -------
  65. seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
  66. Random number generator.
  67. """
  68. if seed is None or isinstance(seed, (numbers.Integral, np.integer)):
  69. return np.random.default_rng(seed)
  70. elif isinstance(seed, (np.random.RandomState, np.random.Generator)):
  71. return seed
  72. else:
  73. raise ValueError(f'{seed!r} cannot be used to seed a'
  74. ' numpy.random.Generator instance')
  75. def scale(
  76. sample: npt.ArrayLike,
  77. l_bounds: npt.ArrayLike,
  78. u_bounds: npt.ArrayLike,
  79. *,
  80. reverse: bool = False
  81. ) -> np.ndarray:
  82. r"""Sample scaling from unit hypercube to different bounds.
  83. To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
  84. with :math:`a` the lower bounds and :math:`b` the upper bounds.
  85. The following transformation is used:
  86. .. math::
  87. (b - a) \cdot \text{sample} + a
  88. Parameters
  89. ----------
  90. sample : array_like (n, d)
  91. Sample to scale.
  92. l_bounds, u_bounds : array_like (d,)
  93. Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
  94. data. If `reverse` is True, range of the original data to transform
  95. to the unit hypercube.
  96. reverse : bool, optional
  97. Reverse the transformation from different bounds to the unit hypercube.
  98. Default is False.
  99. Returns
  100. -------
  101. sample : array_like (n, d)
  102. Scaled sample.
  103. Examples
  104. --------
  105. Transform 3 samples in the unit hypercube to bounds:
  106. >>> from scipy.stats import qmc
  107. >>> l_bounds = [-2, 0]
  108. >>> u_bounds = [6, 5]
  109. >>> sample = [[0.5 , 0.75],
  110. ... [0.5 , 0.5],
  111. ... [0.75, 0.25]]
  112. >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
  113. >>> sample_scaled
  114. array([[2. , 3.75],
  115. [2. , 2.5 ],
  116. [4. , 1.25]])
  117. And convert back to the unit hypercube:
  118. >>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
  119. >>> sample_
  120. array([[0.5 , 0.75],
  121. [0.5 , 0.5 ],
  122. [0.75, 0.25]])
  123. """
  124. sample = np.asarray(sample)
  125. # Checking bounds and sample
  126. if not sample.ndim == 2:
  127. raise ValueError('Sample is not a 2D array')
  128. lower, upper = _validate_bounds(
  129. l_bounds=l_bounds, u_bounds=u_bounds, d=sample.shape[1]
  130. )
  131. if not reverse:
  132. # Checking that sample is within the hypercube
  133. if (sample.max() > 1.) or (sample.min() < 0.):
  134. raise ValueError('Sample is not in unit hypercube')
  135. return sample * (upper - lower) + lower
  136. else:
  137. # Checking that sample is within the bounds
  138. if not (np.all(sample >= lower) and np.all(sample <= upper)):
  139. raise ValueError('Sample is out of bounds')
  140. return (sample - lower) / (upper - lower)
  141. def discrepancy(
  142. sample: npt.ArrayLike,
  143. *,
  144. iterative: bool = False,
  145. method: Literal["CD", "WD", "MD", "L2-star"] = "CD",
  146. workers: IntNumber = 1) -> float:
  147. """Discrepancy of a given sample.
  148. Parameters
  149. ----------
  150. sample : array_like (n, d)
  151. The sample to compute the discrepancy from.
  152. iterative : bool, optional
  153. Must be False if not using it for updating the discrepancy.
  154. Default is False. Refer to the notes for more details.
  155. method : str, optional
  156. Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
  157. Refer to the notes for more details. Default is ``CD``.
  158. workers : int, optional
  159. Number of workers to use for parallel processing. If -1 is given all
  160. CPU threads are used. Default is 1.
  161. Returns
  162. -------
  163. discrepancy : float
  164. Discrepancy.
  165. Notes
  166. -----
  167. The discrepancy is a uniformity criterion used to assess the space filling
  168. of a number of samples in a hypercube. A discrepancy quantifies the
  169. distance between the continuous uniform distribution on a hypercube and the
  170. discrete uniform distribution on :math:`n` distinct sample points.
  171. The lower the value is, the better the coverage of the parameter space is.
  172. For a collection of subsets of the hypercube, the discrepancy is the
  173. difference between the fraction of sample points in one of those
  174. subsets and the volume of that subset. There are different definitions of
  175. discrepancy corresponding to different collections of subsets. Some
  176. versions take a root mean square difference over subsets instead of
  177. a maximum.
  178. A measure of uniformity is reasonable if it satisfies the following
  179. criteria [1]_:
  180. 1. It is invariant under permuting factors and/or runs.
  181. 2. It is invariant under rotation of the coordinates.
  182. 3. It can measure not only uniformity of the sample over the hypercube,
  183. but also the projection uniformity of the sample over non-empty
  184. subset of lower dimension hypercubes.
  185. 4. There is some reasonable geometric meaning.
  186. 5. It is easy to compute.
  187. 6. It satisfies the Koksma-Hlawka-like inequality.
  188. 7. It is consistent with other criteria in experimental design.
  189. Four methods are available:
  190. * ``CD``: Centered Discrepancy - subspace involves a corner of the
  191. hypercube
  192. * ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
  193. * ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
  194. * ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation
  195. See [2]_ for precise definitions of each method.
  196. Lastly, using ``iterative=True``, it is possible to compute the
  197. discrepancy as if we had :math:`n+1` samples. This is useful if we want
  198. to add a point to a sampling and check the candidate which would give the
  199. lowest discrepancy. Then you could just update the discrepancy with
  200. each candidate using `update_discrepancy`. This method is faster than
  201. computing the discrepancy for a large number of candidates.
  202. References
  203. ----------
  204. .. [1] Fang et al. "Design and modeling for computer experiments".
  205. Computer Science and Data Analysis Series, 2006.
  206. .. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets."
  207. Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
  208. .. [3] T. T. Warnock. "Computational investigations of low discrepancy
  209. point sets." Applications of Number Theory to Numerical
  210. Analysis, Academic Press, pp. 319-343, 1972.
  211. Examples
  212. --------
  213. Calculate the quality of the sample using the discrepancy:
  214. >>> import numpy as np
  215. >>> from scipy.stats import qmc
  216. >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
  217. >>> l_bounds = [0.5, 0.5]
  218. >>> u_bounds = [6.5, 6.5]
  219. >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
  220. >>> space
  221. array([[0.08333333, 0.41666667],
  222. [0.25 , 0.91666667],
  223. [0.41666667, 0.25 ],
  224. [0.58333333, 0.75 ],
  225. [0.75 , 0.08333333],
  226. [0.91666667, 0.58333333]])
  227. >>> qmc.discrepancy(space)
  228. 0.008142039609053464
  229. We can also compute iteratively the ``CD`` discrepancy by using
  230. ``iterative=True``.
  231. >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
  232. >>> disc_init
  233. 0.04769081147119336
  234. >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
  235. 0.008142039609053513
  236. """
  237. sample = np.asarray(sample, dtype=np.float64, order="C")
  238. # Checking that sample is within the hypercube and 2D
  239. if not sample.ndim == 2:
  240. raise ValueError("Sample is not a 2D array")
  241. if (sample.max() > 1.) or (sample.min() < 0.):
  242. raise ValueError("Sample is not in unit hypercube")
  243. workers = _validate_workers(workers)
  244. methods = {
  245. "CD": _cy_wrapper_centered_discrepancy,
  246. "WD": _cy_wrapper_wrap_around_discrepancy,
  247. "MD": _cy_wrapper_mixture_discrepancy,
  248. "L2-star": _cy_wrapper_l2_star_discrepancy,
  249. }
  250. if method in methods:
  251. return methods[method](sample, iterative, workers=workers)
  252. else:
  253. raise ValueError(f"{method!r} is not a valid method. It must be one of"
  254. f" {set(methods)!r}")
  255. def update_discrepancy(
  256. x_new: npt.ArrayLike,
  257. sample: npt.ArrayLike,
  258. initial_disc: DecimalNumber) -> float:
  259. """Update the centered discrepancy with a new sample.
  260. Parameters
  261. ----------
  262. x_new : array_like (1, d)
  263. The new sample to add in `sample`.
  264. sample : array_like (n, d)
  265. The initial sample.
  266. initial_disc : float
  267. Centered discrepancy of the `sample`.
  268. Returns
  269. -------
  270. discrepancy : float
  271. Centered discrepancy of the sample composed of `x_new` and `sample`.
  272. Examples
  273. --------
  274. We can also compute iteratively the discrepancy by using
  275. ``iterative=True``.
  276. >>> import numpy as np
  277. >>> from scipy.stats import qmc
  278. >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
  279. >>> l_bounds = [0.5, 0.5]
  280. >>> u_bounds = [6.5, 6.5]
  281. >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
  282. >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
  283. >>> disc_init
  284. 0.04769081147119336
  285. >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
  286. 0.008142039609053513
  287. """
  288. sample = np.asarray(sample, dtype=np.float64, order="C")
  289. x_new = np.asarray(x_new, dtype=np.float64, order="C")
  290. # Checking that sample is within the hypercube and 2D
  291. if not sample.ndim == 2:
  292. raise ValueError('Sample is not a 2D array')
  293. if (sample.max() > 1.) or (sample.min() < 0.):
  294. raise ValueError('Sample is not in unit hypercube')
  295. # Checking that x_new is within the hypercube and 1D
  296. if not x_new.ndim == 1:
  297. raise ValueError('x_new is not a 1D array')
  298. if not (np.all(x_new >= 0) and np.all(x_new <= 1)):
  299. raise ValueError('x_new is not in unit hypercube')
  300. if x_new.shape[0] != sample.shape[1]:
  301. raise ValueError("x_new and sample must be broadcastable")
  302. return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc)
  303. def _perturb_discrepancy(sample: np.ndarray, i1: int, i2: int, k: int,
  304. disc: float):
  305. """Centered discrepancy after an elementary perturbation of a LHS.
  306. An elementary perturbation consists of an exchange of coordinates between
  307. two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
  308. this operation conserves the LHS properties.
  309. Parameters
  310. ----------
  311. sample : array_like (n, d)
  312. The sample (before permutation) to compute the discrepancy from.
  313. i1 : int
  314. The first line of the elementary permutation.
  315. i2 : int
  316. The second line of the elementary permutation.
  317. k : int
  318. The column of the elementary permutation.
  319. disc : float
  320. Centered discrepancy of the design before permutation.
  321. Returns
  322. -------
  323. discrepancy : float
  324. Centered discrepancy of the design after permutation.
  325. References
  326. ----------
  327. .. [1] Jin et al. "An efficient algorithm for constructing optimal design
  328. of computer experiments", Journal of Statistical Planning and
  329. Inference, 2005.
  330. """
  331. n = sample.shape[0]
  332. z_ij = sample - 0.5
  333. # Eq (19)
  334. c_i1j = (1. / n ** 2.
  335. * np.prod(0.5 * (2. + abs(z_ij[i1, :])
  336. + abs(z_ij) - abs(z_ij[i1, :] - z_ij)), axis=1))
  337. c_i2j = (1. / n ** 2.
  338. * np.prod(0.5 * (2. + abs(z_ij[i2, :])
  339. + abs(z_ij) - abs(z_ij[i2, :] - z_ij)), axis=1))
  340. # Eq (20)
  341. c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :]))
  342. - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :])
  343. - 0.5 * z_ij[i1, :] ** 2))
  344. c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :]))
  345. - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :])
  346. - 0.5 * z_ij[i2, :] ** 2))
  347. # Eq (22), typo in the article in the denominator i2 -> i1
  348. num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k])
  349. - abs(z_ij[i2, k] - z_ij[:, k]))
  350. denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k])
  351. - abs(z_ij[i1, k] - z_ij[:, k]))
  352. gamma = num / denum
  353. # Eq (23)
  354. c_p_i1j = gamma * c_i1j
  355. # Eq (24)
  356. c_p_i2j = c_i2j / gamma
  357. alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
  358. beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
  359. g_i1 = np.prod(1. + abs(z_ij[i1, :]))
  360. g_i2 = np.prod(1. + abs(z_ij[i2, :]))
  361. h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
  362. h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
  363. # Eq (25), typo in the article g is missing
  364. c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
  365. # Eq (26), typo in the article n ** 2
  366. c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
  367. # Eq (26)
  368. sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
  369. mask = np.ones(n, dtype=bool)
  370. mask[[i1, i2]] = False
  371. sum_ = sum(sum_[mask])
  372. disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
  373. return disc_ep
  374. def primes_from_2_to(n: int) -> np.ndarray:
  375. """Prime numbers from 2 to *n*.
  376. Parameters
  377. ----------
  378. n : int
  379. Sup bound with ``n >= 6``.
  380. Returns
  381. -------
  382. primes : list(int)
  383. Primes in ``2 <= p < n``.
  384. Notes
  385. -----
  386. Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
  387. by the original author, Bruno Astrolino, for free use in SciPy under
  388. the 3-clause BSD.
  389. References
  390. ----------
  391. .. [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.
  392. """
  393. sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
  394. for i in range(1, int(n ** 0.5) // 3 + 1):
  395. k = 3 * i + 1 | 1
  396. sieve[k * k // 3::2 * k] = False
  397. sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
  398. return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
  399. def n_primes(n: IntNumber) -> List[int]:
  400. """List of the n-first prime numbers.
  401. Parameters
  402. ----------
  403. n : int
  404. Number of prime numbers wanted.
  405. Returns
  406. -------
  407. primes : list(int)
  408. List of primes.
  409. """
  410. primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
  411. 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
  412. 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
  413. 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,
  414. 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
  415. 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
  416. 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
  417. 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,
  418. 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,
  419. 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
  420. 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
  421. 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,
  422. 953, 967, 971, 977, 983, 991, 997][:n] # type: ignore[misc]
  423. if len(primes) < n:
  424. big_number = 2000
  425. while 'Not enough primes':
  426. primes = primes_from_2_to(big_number)[:n] # type: ignore
  427. if len(primes) == n:
  428. break
  429. big_number += 1000
  430. return primes
  431. def van_der_corput(
  432. n: IntNumber,
  433. base: IntNumber = 2,
  434. *,
  435. start_index: IntNumber = 0,
  436. scramble: bool = False,
  437. seed: SeedType = None,
  438. workers: IntNumber = 1) -> np.ndarray:
  439. """Van der Corput sequence.
  440. Pseudo-random number generator based on a b-adic expansion.
  441. Scrambling uses permutations of the remainders (see [1]_). Multiple
  442. permutations are applied to construct a point. The sequence of
  443. permutations has to be the same for all points of the sequence.
  444. Parameters
  445. ----------
  446. n : int
  447. Number of element of the sequence.
  448. base : int, optional
  449. Base of the sequence. Default is 2.
  450. start_index : int, optional
  451. Index to start the sequence from. Default is 0.
  452. scramble : bool, optional
  453. If True, use Owen scrambling. Otherwise no scrambling is done.
  454. Default is True.
  455. seed : {None, int, `numpy.random.Generator`}, optional
  456. If `seed` is an int or None, a new `numpy.random.Generator` is
  457. created using ``np.random.default_rng(seed)``.
  458. If `seed` is already a ``Generator`` instance, then the provided
  459. instance is used.
  460. workers : int, optional
  461. Number of workers to use for parallel processing. If -1 is
  462. given all CPU threads are used. Default is 1.
  463. Returns
  464. -------
  465. sequence : list (n,)
  466. Sequence of Van der Corput.
  467. References
  468. ----------
  469. .. [1] A. B. Owen. "A randomized Halton algorithm in R",
  470. :arxiv:`1706.02808`, 2017.
  471. """
  472. if base < 2:
  473. raise ValueError("'base' must be at least 2")
  474. if scramble:
  475. rng = check_random_state(seed)
  476. # In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is
  477. # created for each positive integer `k` such that `1 - base**-k < 1`
  478. # using floating-point arithmetic. For double precision floats, the
  479. # condition `1 - base**-k < 1` can also be written as `base**-k >
  480. # 2**-54`, which makes it more apparent how many permutations we need
  481. # to create.
  482. count = math.ceil(54 / math.log2(base)) - 1
  483. permutations = np.repeat(np.arange(base)[None], count, axis=0)
  484. for perm in permutations:
  485. rng.shuffle(perm)
  486. return _cy_van_der_corput_scrambled(n, base, start_index,
  487. permutations, workers)
  488. else:
  489. return _cy_van_der_corput(n, base, start_index, workers)
  490. class QMCEngine(ABC):
  491. """A generic Quasi-Monte Carlo sampler class meant for subclassing.
  492. QMCEngine is a base class to construct a specific Quasi-Monte Carlo
  493. sampler. It cannot be used directly as a sampler.
  494. Parameters
  495. ----------
  496. d : int
  497. Dimension of the parameter space.
  498. optimization : {None, "random-cd", "lloyd"}, optional
  499. Whether to use an optimization scheme to improve the quality after
  500. sampling. Note that this is a post-processing step that does not
  501. guarantee that all properties of the sample will be conserved.
  502. Default is None.
  503. * ``random-cd``: random permutations of coordinates to lower the
  504. centered discrepancy. The best sample based on the centered
  505. discrepancy is constantly updated. Centered discrepancy-based
  506. sampling shows better space-filling robustness toward 2D and 3D
  507. subprojections compared to using other discrepancy measures.
  508. * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
  509. The process converges to equally spaced samples.
  510. .. versionadded:: 1.10.0
  511. seed : {None, int, `numpy.random.Generator`}, optional
  512. If `seed` is an int or None, a new `numpy.random.Generator` is
  513. created using ``np.random.default_rng(seed)``.
  514. If `seed` is already a ``Generator`` instance, then the provided
  515. instance is used.
  516. Notes
  517. -----
  518. By convention samples are distributed over the half-open interval
  519. ``[0, 1)``. Instances of the class can access the attributes: ``d`` for
  520. the dimension; and ``rng`` for the random number generator (used for the
  521. ``seed``).
  522. **Subclassing**
  523. When subclassing `QMCEngine` to create a new sampler, ``__init__`` and
  524. ``random`` must be redefined.
  525. * ``__init__(d, seed=None)``: at least fix the dimension. If the sampler
  526. does not take advantage of a ``seed`` (deterministic methods like
  527. Halton), this parameter can be omitted.
  528. * ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
  529. is used for parallelism. See `Halton` for example.
  530. Optionally, two other methods can be overwritten by subclasses:
  531. * ``reset``: Reset the engine to its original state.
  532. * ``fast_forward``: If the sequence is deterministic (like Halton
  533. sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.
  534. Examples
  535. --------
  536. To create a random sampler based on ``np.random.random``, we would do the
  537. following:
  538. >>> from scipy.stats import qmc
  539. >>> class RandomEngine(qmc.QMCEngine):
  540. ... def __init__(self, d, seed=None):
  541. ... super().__init__(d=d, seed=seed)
  542. ...
  543. ...
  544. ... def _random(self, n=1, *, workers=1):
  545. ... return self.rng.random((n, self.d))
  546. ...
  547. ...
  548. ... def reset(self):
  549. ... super().__init__(d=self.d, seed=self.rng_seed)
  550. ... return self
  551. ...
  552. ...
  553. ... def fast_forward(self, n):
  554. ... self.random(n)
  555. ... return self
  556. After subclassing `QMCEngine` to define the sampling strategy we want to
  557. use, we can create an instance to sample from.
  558. >>> engine = RandomEngine(2)
  559. >>> engine.random(5)
  560. array([[0.22733602, 0.31675834], # random
  561. [0.79736546, 0.67625467],
  562. [0.39110955, 0.33281393],
  563. [0.59830875, 0.18673419],
  564. [0.67275604, 0.94180287]])
  565. We can also reset the state of the generator and resample again.
  566. >>> _ = engine.reset()
  567. >>> engine.random(5)
  568. array([[0.22733602, 0.31675834], # random
  569. [0.79736546, 0.67625467],
  570. [0.39110955, 0.33281393],
  571. [0.59830875, 0.18673419],
  572. [0.67275604, 0.94180287]])
  573. """
  574. @abstractmethod
  575. def __init__(
  576. self,
  577. d: IntNumber,
  578. *,
  579. optimization: Optional[Literal["random-cd", "lloyd"]] = None,
  580. seed: SeedType = None
  581. ) -> None:
  582. if not np.issubdtype(type(d), np.integer) or d < 0:
  583. raise ValueError('d must be a non-negative integer value')
  584. self.d = d
  585. self.rng = check_random_state(seed)
  586. self.rng_seed = copy.deepcopy(seed)
  587. self.num_generated = 0
  588. config = {
  589. # random-cd
  590. "n_nochange": 100,
  591. "n_iters": 10_000,
  592. "rng": self.rng,
  593. # lloyd
  594. "tol": 1e-5,
  595. "maxiter": 10,
  596. "qhull_options": None,
  597. }
  598. self.optimization_method = _select_optimizer(optimization, config)
  599. @abstractmethod
  600. def _random(
  601. self, n: IntNumber = 1, *, workers: IntNumber = 1
  602. ) -> np.ndarray:
  603. ...
  604. def random(
  605. self, n: IntNumber = 1, *, workers: IntNumber = 1
  606. ) -> np.ndarray:
  607. """Draw `n` in the half-open interval ``[0, 1)``.
  608. Parameters
  609. ----------
  610. n : int, optional
  611. Number of samples to generate in the parameter space.
  612. Default is 1.
  613. workers : int, optional
  614. Only supported with `Halton`.
  615. Number of workers to use for parallel processing. If -1 is
  616. given all CPU threads are used. Default is 1. It becomes faster
  617. than one worker for `n` greater than :math:`10^3`.
  618. Returns
  619. -------
  620. sample : array_like (n, d)
  621. QMC sample.
  622. """
  623. sample = self._random(n, workers=workers)
  624. if self.optimization_method is not None:
  625. sample = self.optimization_method(sample)
  626. self.num_generated += n
  627. return sample
  628. def integers(
  629. self,
  630. l_bounds: npt.ArrayLike,
  631. *,
  632. u_bounds: Optional[npt.ArrayLike] = None,
  633. n: IntNumber = 1,
  634. endpoint: bool = False,
  635. workers: IntNumber = 1
  636. ) -> np.ndarray:
  637. r"""
  638. Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
  639. (exclusive), or if endpoint=True, `l_bounds` (inclusive) to
  640. `u_bounds` (inclusive).
  641. Parameters
  642. ----------
  643. l_bounds : int or array-like of ints
  644. Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
  645. in which case this parameter is 0 and this value is used for
  646. `u_bounds`).
  647. u_bounds : int or array-like of ints, optional
  648. If provided, one above the largest (signed) integer to be drawn
  649. (see above for behavior if ``u_bounds=None``).
  650. If array-like, must contain integer values.
  651. n : int, optional
  652. Number of samples to generate in the parameter space.
  653. Default is 1.
  654. endpoint : bool, optional
  655. If true, sample from the interval ``[l_bounds, u_bounds]`` instead
  656. of the default ``[l_bounds, u_bounds)``. Defaults is False.
  657. workers : int, optional
  658. Number of workers to use for parallel processing. If -1 is
  659. given all CPU threads are used. Only supported when using `Halton`
  660. Default is 1.
  661. Returns
  662. -------
  663. sample : array_like (n, d)
  664. QMC sample.
  665. Notes
  666. -----
  667. It is safe to just use the same ``[0, 1)`` to integer mapping
  668. with QMC that you would use with MC. You still get unbiasedness,
  669. a strong law of large numbers, an asymptotically infinite variance
  670. reduction and a finite sample variance bound.
  671. To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
  672. with :math:`a` the lower bounds and :math:`b` the upper bounds,
  673. the following transformation is used:
  674. .. math::
  675. \text{floor}((b - a) \cdot \text{sample} + a)
  676. """
  677. if u_bounds is None:
  678. u_bounds = l_bounds
  679. l_bounds = 0
  680. u_bounds = np.atleast_1d(u_bounds)
  681. l_bounds = np.atleast_1d(l_bounds)
  682. if endpoint:
  683. u_bounds = u_bounds + 1
  684. if (not np.issubdtype(l_bounds.dtype, np.integer) or
  685. not np.issubdtype(u_bounds.dtype, np.integer)):
  686. message = ("'u_bounds' and 'l_bounds' must be integers or"
  687. " array-like of integers")
  688. raise ValueError(message)
  689. if isinstance(self, Halton):
  690. sample = self.random(n=n, workers=workers)
  691. else:
  692. sample = self.random(n=n)
  693. sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)
  694. sample = np.floor(sample).astype(np.int64)
  695. return sample
  696. def reset(self) -> QMCEngine:
  697. """Reset the engine to base state.
  698. Returns
  699. -------
  700. engine : QMCEngine
  701. Engine reset to its base state.
  702. """
  703. seed = copy.deepcopy(self.rng_seed)
  704. self.rng = check_random_state(seed)
  705. self.num_generated = 0
  706. return self
  707. def fast_forward(self, n: IntNumber) -> QMCEngine:
  708. """Fast-forward the sequence by `n` positions.
  709. Parameters
  710. ----------
  711. n : int
  712. Number of points to skip in the sequence.
  713. Returns
  714. -------
  715. engine : QMCEngine
  716. Engine reset to its base state.
  717. """
  718. self.random(n=n)
  719. return self
  720. class Halton(QMCEngine):
  721. """Halton sequence.
  722. Pseudo-random number generator that generalize the Van der Corput sequence
  723. for multiple dimensions. The Halton sequence uses the base-two Van der
  724. Corput sequence for the first dimension, base-three for its second and
  725. base-:math:`n` for its n-dimension.
  726. Parameters
  727. ----------
  728. d : int
  729. Dimension of the parameter space.
  730. scramble : bool, optional
  731. If True, use Owen scrambling. Otherwise no scrambling is done.
  732. Default is True.
  733. optimization : {None, "random-cd", "lloyd"}, optional
  734. Whether to use an optimization scheme to improve the quality after
  735. sampling. Note that this is a post-processing step that does not
  736. guarantee that all properties of the sample will be conserved.
  737. Default is None.
  738. * ``random-cd``: random permutations of coordinates to lower the
  739. centered discrepancy. The best sample based on the centered
  740. discrepancy is constantly updated. Centered discrepancy-based
  741. sampling shows better space-filling robustness toward 2D and 3D
  742. subprojections compared to using other discrepancy measures.
  743. * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
  744. The process converges to equally spaced samples.
  745. .. versionadded:: 1.10.0
  746. seed : {None, int, `numpy.random.Generator`}, optional
  747. If `seed` is an int or None, a new `numpy.random.Generator` is
  748. created using ``np.random.default_rng(seed)``.
  749. If `seed` is already a ``Generator`` instance, then the provided
  750. instance is used.
  751. Notes
  752. -----
  753. The Halton sequence has severe striping artifacts for even modestly
  754. large dimensions. These can be ameliorated by scrambling. Scrambling
  755. also supports replication-based error estimates and extends
  756. applicabiltiy to unbounded integrands.
  757. References
  758. ----------
  759. .. [1] Halton, "On the efficiency of certain quasi-random sequences of
  760. points in evaluating multi-dimensional integrals", Numerische
  761. Mathematik, 1960.
  762. .. [2] A. B. Owen. "A randomized Halton algorithm in R",
  763. :arxiv:`1706.02808`, 2017.
  764. Examples
  765. --------
  766. Generate samples from a low discrepancy sequence of Halton.
  767. >>> from scipy.stats import qmc
  768. >>> sampler = qmc.Halton(d=2, scramble=False)
  769. >>> sample = sampler.random(n=5)
  770. >>> sample
  771. array([[0. , 0. ],
  772. [0.5 , 0.33333333],
  773. [0.25 , 0.66666667],
  774. [0.75 , 0.11111111],
  775. [0.125 , 0.44444444]])
  776. Compute the quality of the sample using the discrepancy criterion.
  777. >>> qmc.discrepancy(sample)
  778. 0.088893711419753
  779. If some wants to continue an existing design, extra points can be obtained
  780. by calling again `random`. Alternatively, you can skip some points like:
  781. >>> _ = sampler.fast_forward(5)
  782. >>> sample_continued = sampler.random(n=5)
  783. >>> sample_continued
  784. array([[0.3125 , 0.37037037],
  785. [0.8125 , 0.7037037 ],
  786. [0.1875 , 0.14814815],
  787. [0.6875 , 0.48148148],
  788. [0.4375 , 0.81481481]])
  789. Finally, samples can be scaled to bounds.
  790. >>> l_bounds = [0, 2]
  791. >>> u_bounds = [10, 5]
  792. >>> qmc.scale(sample_continued, l_bounds, u_bounds)
  793. array([[3.125 , 3.11111111],
  794. [8.125 , 4.11111111],
  795. [1.875 , 2.44444444],
  796. [6.875 , 3.44444444],
  797. [4.375 , 4.44444444]])
  798. """
  799. def __init__(
  800. self, d: IntNumber, *, scramble: bool = True,
  801. optimization: Optional[Literal["random-cd", "lloyd"]] = None,
  802. seed: SeedType = None
  803. ) -> None:
  804. # Used in `scipy.integrate.qmc_quad`
  805. self._init_quad = {'d': d, 'scramble': True,
  806. 'optimization': optimization}
  807. super().__init__(d=d, optimization=optimization, seed=seed)
  808. self.seed = seed
  809. self.base = n_primes(d)
  810. self.scramble = scramble
  811. def _random(
  812. self, n: IntNumber = 1, *, workers: IntNumber = 1
  813. ) -> np.ndarray:
  814. """Draw `n` in the half-open interval ``[0, 1)``.
  815. Parameters
  816. ----------
  817. n : int, optional
  818. Number of samples to generate in the parameter space. Default is 1.
  819. workers : int, optional
  820. Number of workers to use for parallel processing. If -1 is
  821. given all CPU threads are used. Default is 1. It becomes faster
  822. than one worker for `n` greater than :math:`10^3`.
  823. Returns
  824. -------
  825. sample : array_like (n, d)
  826. QMC sample.
  827. """
  828. workers = _validate_workers(workers)
  829. # Generate a sample using a Van der Corput sequence per dimension.
  830. # important to have ``type(bdim) == int`` for performance reason
  831. sample = [van_der_corput(n, int(bdim), start_index=self.num_generated,
  832. scramble=self.scramble,
  833. seed=copy.deepcopy(self.seed),
  834. workers=workers)
  835. for bdim in self.base]
  836. return np.array(sample).T.reshape(n, self.d)
  837. class LatinHypercube(QMCEngine):
  838. r"""Latin hypercube sampling (LHS).
  839. A Latin hypercube sample [1]_ generates :math:`n` points in
  840. :math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
  841. placing exactly one point in :math:`[j/n, (j+1)/n)` for
  842. :math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.
  843. Parameters
  844. ----------
  845. d : int
  846. Dimension of the parameter space.
  847. centered : bool, optional
  848. Center samples within cells of a multi-dimensional grid.
  849. Default is False.
  850. .. deprecated:: 1.10.0
  851. `centered` is deprecated as of SciPy 1.10.0 and will be removed in
  852. 1.12.0. Use `scramble` instead. ``centered=True`` corresponds to
  853. ``scramble=False``.
  854. scramble : bool, optional
  855. When False, center samples within cells of a multi-dimensional grid.
  856. Otherwise, samples are randomly placed within cells of the grid.
  857. .. note::
  858. Setting ``scramble=False`` does not ensure deterministic output.
  859. For that, use the `seed` parameter.
  860. Default is True.
  861. .. versionadded:: 1.10.0
  862. optimization : {None, "random-cd", "lloyd"}, optional
  863. Whether to use an optimization scheme to improve the quality after
  864. sampling. Note that this is a post-processing step that does not
  865. guarantee that all properties of the sample will be conserved.
  866. Default is None.
  867. * ``random-cd``: random permutations of coordinates to lower the
  868. centered discrepancy. The best sample based on the centered
  869. discrepancy is constantly updated. Centered discrepancy-based
  870. sampling shows better space-filling robustness toward 2D and 3D
  871. subprojections compared to using other discrepancy measures.
  872. * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
  873. The process converges to equally spaced samples.
  874. .. versionadded:: 1.8.0
  875. .. versionchanged:: 1.10.0
  876. Add ``lloyd``.
  877. strength : {1, 2}, optional
  878. Strength of the LHS. ``strength=1`` produces a plain LHS while
  879. ``strength=2`` produces an orthogonal array based LHS of strength 2
  880. [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
  881. with ``p`` a prime number. It also constrains ``d <= p + 1``.
  882. Default is 1.
  883. .. versionadded:: 1.8.0
  884. seed : {None, int, `numpy.random.Generator`}, optional
  885. If `seed` is an int or None, a new `numpy.random.Generator` is
  886. created using ``np.random.default_rng(seed)``.
  887. If `seed` is already a ``Generator`` instance, then the provided
  888. instance is used.
  889. Notes
  890. -----
  891. When LHS is used for integrating a function :math:`f` over :math:`n`,
  892. LHS is extremely effective on integrands that are nearly additive [2]_.
  893. With a LHS of :math:`n` points, the variance of the integral is always
  894. lower than plain MC on :math:`n-1` points [3]_. There is a central limit
  895. theorem for LHS on the mean and variance of the integral [4]_, but not
  896. necessarily for optimized LHS due to the randomization.
  897. :math:`A` is called an orthogonal array of strength :math:`t` if in each
  898. n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
  899. distinct rows occur the same number of times. The elements of :math:`A`
  900. are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
  901. The constraint that :math:`p` must be a prime number is to allow modular
  902. arithmetic. Increasing strength adds some symmetry to the sub-projections
  903. of a sample. With strength 2, samples are symmetric along the diagonals of
  904. 2D sub-projections. This may be undesirable, but on the other hand, the
  905. sample dispersion is improved.
  906. Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
  907. strength 2 is a useful increment over strength 1. Going to strength 3 is
  908. a smaller increment and scrambled QMC like Sobol', Halton are more
  909. performant [7]_.
  910. To create a LHS of strength 2, the orthogonal array :math:`A` is
  911. randomized by applying a random, bijective map of the set of symbols onto
  912. itself. For example, in column 0, all 0s might become 2; in column 1,
  913. all 0s might become 1, etc.
  914. Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
  915. one-dimensional LHS of size :math:`p` to the subarray where
  916. :math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.
  917. References
  918. ----------
  919. .. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
  920. of Input Variables in the Analysis of Output from a Computer Code."
  921. Technometrics, 1979.
  922. .. [2] M. Stein, "Large sample properties of simulations using Latin
  923. hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
  924. .. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
  925. SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
  926. .. [4] Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
  927. 24, no. 5: 2058-2080, 1996.
  928. .. [5] Fang et al. "Design and modeling for computer experiments".
  929. Computer Science and Data Analysis Series, 2006.
  930. .. [6] Damblin et al., "Numerical studies of space filling designs:
  931. optimization of Latin Hypercube Samples and subprojection properties."
  932. Journal of Simulation, 2013.
  933. .. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
  934. integration and visualization." Statistica Sinica, 1992.
  935. .. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
  936. Journal of the American Statistical Association, 1993.
  937. Examples
  938. --------
  939. Generate samples from a Latin hypercube generator.
  940. >>> from scipy.stats import qmc
  941. >>> sampler = qmc.LatinHypercube(d=2)
  942. >>> sample = sampler.random(n=5)
  943. >>> sample
  944. array([[0.1545328 , 0.53664833], # random
  945. [0.84052691, 0.06474907],
  946. [0.52177809, 0.93343721],
  947. [0.68033825, 0.36265316],
  948. [0.26544879, 0.61163943]])
  949. Compute the quality of the sample using the discrepancy criterion.
  950. >>> qmc.discrepancy(sample)
  951. 0.0196... # random
  952. Samples can be scaled to bounds.
  953. >>> l_bounds = [0, 2]
  954. >>> u_bounds = [10, 5]
  955. >>> qmc.scale(sample, l_bounds, u_bounds)
  956. array([[1.54532796, 3.609945 ], # random
  957. [8.40526909, 2.1942472 ],
  958. [5.2177809 , 4.80031164],
  959. [6.80338249, 3.08795949],
  960. [2.65448791, 3.83491828]])
  961. Use the `optimization` keyword argument to produce a LHS with
  962. lower discrepancy at higher computational cost.
  963. >>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
  964. >>> sample = sampler.random(n=5)
  965. >>> qmc.discrepancy(sample)
  966. 0.0176... # random
  967. Use the `strength` keyword argument to produce an orthogonal array based
  968. LHS of strength 2. In this case, the number of sample points must be the
  969. square of a prime number.
  970. >>> sampler = qmc.LatinHypercube(d=2, strength=2)
  971. >>> sample = sampler.random(n=9)
  972. >>> qmc.discrepancy(sample)
  973. 0.00526... # random
  974. Options could be combined to produce an optimized centered
  975. orthogonal array based LHS. After optimization, the result would not
  976. be guaranteed to be of strength 2.
  977. """
  978. def __init__(
  979. self, d: IntNumber, *, centered: bool = False,
  980. scramble: bool = True,
  981. strength: int = 1,
  982. optimization: Optional[Literal["random-cd", "lloyd"]] = None,
  983. seed: SeedType = None
  984. ) -> None:
  985. if centered:
  986. scramble = False
  987. warnings.warn(
  988. "'centered' is deprecated and will be removed in SciPy 1.12."
  989. " Please use 'scramble' instead. 'centered=True' corresponds"
  990. " to 'scramble=False'.",
  991. stacklevel=2
  992. )
  993. # Used in `scipy.integrate.qmc_quad`
  994. self._init_quad = {'d': d, 'scramble': True, 'strength': strength,
  995. 'optimization': optimization}
  996. super().__init__(d=d, seed=seed, optimization=optimization)
  997. self.scramble = scramble
  998. lhs_method_strength = {
  999. 1: self._random_lhs,
  1000. 2: self._random_oa_lhs
  1001. }
  1002. try:
  1003. self.lhs_method: Callable = lhs_method_strength[strength]
  1004. except KeyError as exc:
  1005. message = (f"{strength!r} is not a valid strength. It must be one"
  1006. f" of {set(lhs_method_strength)!r}")
  1007. raise ValueError(message) from exc
  1008. def _random(
  1009. self, n: IntNumber = 1, *, workers: IntNumber = 1
  1010. ) -> np.ndarray:
  1011. lhs = self.lhs_method(n)
  1012. return lhs
  1013. def _random_lhs(self, n: IntNumber = 1) -> np.ndarray:
  1014. """Base LHS algorithm."""
  1015. if not self.scramble:
  1016. samples: np.ndarray | float = 0.5
  1017. else:
  1018. samples = self.rng.uniform(size=(n, self.d))
  1019. perms = np.tile(np.arange(1, n + 1),
  1020. (self.d, 1)) # type: ignore[arg-type]
  1021. for i in range(self.d):
  1022. self.rng.shuffle(perms[i, :])
  1023. perms = perms.T
  1024. samples = (perms - samples) / n
  1025. return samples
  1026. def _random_oa_lhs(self, n: IntNumber = 4) -> np.ndarray:
  1027. """Orthogonal array based LHS of strength 2."""
  1028. p = np.sqrt(n).astype(int)
  1029. n_row = p**2
  1030. n_col = p + 1
  1031. primes = primes_from_2_to(p + 1)
  1032. if p not in primes or n != n_row:
  1033. raise ValueError(
  1034. "n is not the square of a prime number. Close"
  1035. f" values are {primes[-2:]**2}"
  1036. )
  1037. if self.d > p + 1:
  1038. raise ValueError("n is too small for d. Must be n > (d-1)**2")
  1039. oa_sample = np.zeros(shape=(n_row, n_col), dtype=int)
  1040. # OA of strength 2
  1041. arrays = np.tile(np.arange(p), (2, 1))
  1042. oa_sample[:, :2] = np.stack(np.meshgrid(*arrays),
  1043. axis=-1).reshape(-1, 2)
  1044. for p_ in range(1, p):
  1045. oa_sample[:, 2+p_-1] = np.mod(oa_sample[:, 0]
  1046. + p_*oa_sample[:, 1], p)
  1047. # scramble the OA
  1048. oa_sample_ = np.empty(shape=(n_row, n_col), dtype=int)
  1049. for j in range(n_col):
  1050. perms = self.rng.permutation(p)
  1051. oa_sample_[:, j] = perms[oa_sample[:, j]]
  1052. # following is making a scrambled OA into an OA-LHS
  1053. oa_lhs_sample = np.zeros(shape=(n_row, n_col))
  1054. lhs_engine = LatinHypercube(d=1, scramble=self.scramble, strength=1,
  1055. seed=self.rng) # type: QMCEngine
  1056. for j in range(n_col):
  1057. for k in range(p):
  1058. idx = oa_sample[:, j] == k
  1059. lhs = lhs_engine.random(p).flatten()
  1060. oa_lhs_sample[:, j][idx] = lhs + oa_sample[:, j][idx]
  1061. lhs_engine = lhs_engine.reset()
  1062. oa_lhs_sample /= p
  1063. return oa_lhs_sample[:, :self.d] # type: ignore
  1064. class Sobol(QMCEngine):
  1065. """Engine for generating (scrambled) Sobol' sequences.
  1066. Sobol' sequences are low-discrepancy, quasi-random numbers. Points
  1067. can be drawn using two methods:
  1068. * `random_base2`: safely draw :math:`n=2^m` points. This method
  1069. guarantees the balance properties of the sequence.
  1070. * `random`: draw an arbitrary number of points from the
  1071. sequence. See warning below.
  1072. Parameters
  1073. ----------
  1074. d : int
  1075. Dimensionality of the sequence. Max dimensionality is 21201.
  1076. scramble : bool, optional
  1077. If True, use LMS+shift scrambling. Otherwise, no scrambling is done.
  1078. Default is True.
  1079. bits : int, optional
  1080. Number of bits of the generator. Control the maximum number of points
  1081. that can be generated, which is ``2**bits``. Maximal value is 64.
  1082. It does not correspond to the return type, which is always
  1083. ``np.float64`` to prevent points from repeating themselves.
  1084. Default is None, which for backward compatibility, corresponds to 30.
  1085. .. versionadded:: 1.9.0
  1086. optimization : {None, "random-cd", "lloyd"}, optional
  1087. Whether to use an optimization scheme to improve the quality after
  1088. sampling. Note that this is a post-processing step that does not
  1089. guarantee that all properties of the sample will be conserved.
  1090. Default is None.
  1091. * ``random-cd``: random permutations of coordinates to lower the
  1092. centered discrepancy. The best sample based on the centered
  1093. discrepancy is constantly updated. Centered discrepancy-based
  1094. sampling shows better space-filling robustness toward 2D and 3D
  1095. subprojections compared to using other discrepancy measures.
  1096. * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
  1097. The process converges to equally spaced samples.
  1098. .. versionadded:: 1.10.0
  1099. seed : {None, int, `numpy.random.Generator`}, optional
  1100. If `seed` is an int or None, a new `numpy.random.Generator` is
  1101. created using ``np.random.default_rng(seed)``.
  1102. If `seed` is already a ``Generator`` instance, then the provided
  1103. instance is used.
  1104. Notes
  1105. -----
  1106. Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
  1107. :math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular
  1108. integrands, provides a means of error estimation, and can improve their
  1109. rate of convergence. The scrambling strategy which is implemented is a
  1110. (left) linear matrix scramble (LMS) followed by a digital random shift
  1111. (LMS+shift) [2]_.
  1112. There are many versions of Sobol' sequences depending on their
  1113. 'direction numbers'. This code uses direction numbers from [4]_. Hence,
  1114. the maximum number of dimension is 21201. The direction numbers have been
  1115. precomputed with search criterion 6 and can be retrieved at
  1116. https://web.maths.unsw.edu.au/~fkuo/sobol/.
  1117. .. warning::
  1118. Sobol' sequences are a quadrature rule and they lose their balance
  1119. properties if one uses a sample size that is not a power of 2, or skips
  1120. the first point, or thins the sequence [5]_.
  1121. If :math:`n=2^m` points are not enough then one should take :math:`2^M`
  1122. points for :math:`M>m`. When scrambling, the number R of independent
  1123. replicates does not have to be a power of 2.
  1124. Sobol' sequences are generated to some number :math:`B` of bits.
  1125. After :math:`2^B` points have been generated, the sequence would
  1126. repeat. Hence, an error is raised.
  1127. The number of bits can be controlled with the parameter `bits`.
  1128. References
  1129. ----------
  1130. .. [1] I. M. Sobol', "The distribution of points in a cube and the accurate
  1131. evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
  1132. 1967.
  1133. .. [2] J. Matousek, "On the L2-discrepancy for anchored boxes."
  1134. J. of Complexity 14, 527-556, 1998.
  1135. .. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points."
  1136. Journal of Complexity, 14(4):466-489, December 1998.
  1137. .. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better
  1138. two-dimensional projections." SIAM Journal on Scientific Computing,
  1139. 30(5):2635-2654, 2008.
  1140. .. [5] Art B. Owen, "On dropping the first Sobol' point."
  1141. :arxiv:`2008.08051`, 2020.
  1142. Examples
  1143. --------
  1144. Generate samples from a low discrepancy sequence of Sobol'.
  1145. >>> from scipy.stats import qmc
  1146. >>> sampler = qmc.Sobol(d=2, scramble=False)
  1147. >>> sample = sampler.random_base2(m=3)
  1148. >>> sample
  1149. array([[0. , 0. ],
  1150. [0.5 , 0.5 ],
  1151. [0.75 , 0.25 ],
  1152. [0.25 , 0.75 ],
  1153. [0.375, 0.375],
  1154. [0.875, 0.875],
  1155. [0.625, 0.125],
  1156. [0.125, 0.625]])
  1157. Compute the quality of the sample using the discrepancy criterion.
  1158. >>> qmc.discrepancy(sample)
  1159. 0.013882107204860938
  1160. To continue an existing design, extra points can be obtained
  1161. by calling again `random_base2`. Alternatively, you can skip some
  1162. points like:
  1163. >>> _ = sampler.reset()
  1164. >>> _ = sampler.fast_forward(4)
  1165. >>> sample_continued = sampler.random_base2(m=2)
  1166. >>> sample_continued
  1167. array([[0.375, 0.375],
  1168. [0.875, 0.875],
  1169. [0.625, 0.125],
  1170. [0.125, 0.625]])
  1171. Finally, samples can be scaled to bounds.
  1172. >>> l_bounds = [0, 2]
  1173. >>> u_bounds = [10, 5]
  1174. >>> qmc.scale(sample_continued, l_bounds, u_bounds)
  1175. array([[3.75 , 3.125],
  1176. [8.75 , 4.625],
  1177. [6.25 , 2.375],
  1178. [1.25 , 3.875]])
  1179. """
  1180. MAXDIM: ClassVar[int] = _MAXDIM
  1181. def __init__(
  1182. self, d: IntNumber, *, scramble: bool = True,
  1183. bits: Optional[IntNumber] = None, seed: SeedType = None,
  1184. optimization: Optional[Literal["random-cd", "lloyd"]] = None
  1185. ) -> None:
  1186. # Used in `scipy.integrate.qmc_quad`
  1187. self._init_quad = {'d': d, 'scramble': True, 'bits': bits,
  1188. 'optimization': optimization}
  1189. super().__init__(d=d, optimization=optimization, seed=seed)
  1190. if d > self.MAXDIM:
  1191. raise ValueError(
  1192. f"Maximum supported dimensionality is {self.MAXDIM}."
  1193. )
  1194. self.bits = bits
  1195. self.dtype_i: type
  1196. if self.bits is None:
  1197. self.bits = 30
  1198. if self.bits <= 32:
  1199. self.dtype_i = np.uint32
  1200. elif 32 < self.bits <= 64:
  1201. self.dtype_i = np.uint64
  1202. else:
  1203. raise ValueError("Maximum supported 'bits' is 64")
  1204. self.maxn = 2**self.bits
  1205. # v is d x maxbit matrix
  1206. self._sv: np.ndarray = np.zeros((d, self.bits), dtype=self.dtype_i)
  1207. _initialize_v(self._sv, dim=d, bits=self.bits)
  1208. if not scramble:
  1209. self._shift: np.ndarray = np.zeros(d, dtype=self.dtype_i)
  1210. else:
  1211. # scramble self._shift and self._sv
  1212. self._scramble()
  1213. self._quasi = self._shift.copy()
  1214. # normalization constant with the largest possible number
  1215. # calculate in Python to not overflow int with 2**64
  1216. self._scale = 1.0 / 2 ** self.bits
  1217. self._first_point = (self._quasi * self._scale).reshape(1, -1)
  1218. # explicit casting to float64
  1219. self._first_point = self._first_point.astype(np.float64)
  1220. def _scramble(self) -> None:
  1221. """Scramble the sequence using LMS+shift."""
  1222. # Generate shift vector
  1223. self._shift = np.dot(
  1224. rng_integers(self.rng, 2, size=(self.d, self.bits),
  1225. dtype=self.dtype_i),
  1226. 2 ** np.arange(self.bits, dtype=self.dtype_i),
  1227. )
  1228. # Generate lower triangular matrices (stacked across dimensions)
  1229. ltm = np.tril(rng_integers(self.rng, 2,
  1230. size=(self.d, self.bits, self.bits),
  1231. dtype=self.dtype_i))
  1232. _cscramble(
  1233. dim=self.d, bits=self.bits, # type: ignore[arg-type]
  1234. ltm=ltm, sv=self._sv
  1235. )
  1236. def _random(
  1237. self, n: IntNumber = 1, *, workers: IntNumber = 1
  1238. ) -> np.ndarray:
  1239. """Draw next point(s) in the Sobol' sequence.
  1240. Parameters
  1241. ----------
  1242. n : int, optional
  1243. Number of samples to generate in the parameter space. Default is 1.
  1244. Returns
  1245. -------
  1246. sample : array_like (n, d)
  1247. Sobol' sample.
  1248. """
  1249. sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)
  1250. if n == 0:
  1251. return sample
  1252. total_n = self.num_generated + n
  1253. if total_n > self.maxn:
  1254. msg = (
  1255. f"At most 2**{self.bits}={self.maxn} distinct points can be "
  1256. f"generated. {self.num_generated} points have been previously "
  1257. f"generated, then: n={self.num_generated}+{n}={total_n}. "
  1258. )
  1259. if self.bits != 64:
  1260. msg += "Consider increasing `bits`."
  1261. raise ValueError(msg)
  1262. if self.num_generated == 0:
  1263. # verify n is 2**n
  1264. if not (n & (n - 1) == 0):
  1265. warnings.warn("The balance properties of Sobol' points require"
  1266. " n to be a power of 2.", stacklevel=2)
  1267. if n == 1:
  1268. sample = self._first_point
  1269. else:
  1270. _draw(
  1271. n=n - 1, num_gen=self.num_generated, dim=self.d,
  1272. scale=self._scale, sv=self._sv, quasi=self._quasi,
  1273. sample=sample
  1274. )
  1275. sample = np.concatenate(
  1276. [self._first_point, sample]
  1277. )[:n] # type: ignore[misc]
  1278. else:
  1279. _draw(
  1280. n=n, num_gen=self.num_generated - 1, dim=self.d,
  1281. scale=self._scale, sv=self._sv, quasi=self._quasi,
  1282. sample=sample
  1283. )
  1284. return sample
  1285. def random_base2(self, m: IntNumber) -> np.ndarray:
  1286. """Draw point(s) from the Sobol' sequence.
  1287. This function draws :math:`n=2^m` points in the parameter space
  1288. ensuring the balance properties of the sequence.
  1289. Parameters
  1290. ----------
  1291. m : int
  1292. Logarithm in base 2 of the number of samples; i.e., n = 2^m.
  1293. Returns
  1294. -------
  1295. sample : array_like (n, d)
  1296. Sobol' sample.
  1297. """
  1298. n = 2 ** m
  1299. total_n = self.num_generated + n
  1300. if not (total_n & (total_n - 1) == 0):
  1301. raise ValueError("The balance properties of Sobol' points require "
  1302. "n to be a power of 2. {0} points have been "
  1303. "previously generated, then: n={0}+2**{1}={2}. "
  1304. "If you still want to do this, the function "
  1305. "'Sobol.random()' can be used."
  1306. .format(self.num_generated, m, total_n))
  1307. return self.random(n)
  1308. def reset(self) -> Sobol:
  1309. """Reset the engine to base state.
  1310. Returns
  1311. -------
  1312. engine : Sobol
  1313. Engine reset to its base state.
  1314. """
  1315. super().reset()
  1316. self._quasi = self._shift.copy()
  1317. return self
  1318. def fast_forward(self, n: IntNumber) -> Sobol:
  1319. """Fast-forward the sequence by `n` positions.
  1320. Parameters
  1321. ----------
  1322. n : int
  1323. Number of points to skip in the sequence.
  1324. Returns
  1325. -------
  1326. engine : Sobol
  1327. The fast-forwarded engine.
  1328. """
  1329. if self.num_generated == 0:
  1330. _fast_forward(
  1331. n=n - 1, num_gen=self.num_generated, dim=self.d,
  1332. sv=self._sv, quasi=self._quasi
  1333. )
  1334. else:
  1335. _fast_forward(
  1336. n=n, num_gen=self.num_generated - 1, dim=self.d,
  1337. sv=self._sv, quasi=self._quasi
  1338. )
  1339. self.num_generated += n
  1340. return self
  1341. class PoissonDisk(QMCEngine):
  1342. """Poisson disk sampling.
  1343. Parameters
  1344. ----------
  1345. d : int
  1346. Dimension of the parameter space.
  1347. radius : float
  1348. Minimal distance to keep between points when sampling new candidates.
  1349. hypersphere : {"volume", "surface"}, optional
  1350. Sampling strategy to generate potential candidates to be added in the
  1351. final sample. Default is "volume".
  1352. * ``volume``: original Bridson algorithm as described in [1]_.
  1353. New candidates are sampled *within* the hypersphere.
  1354. * ``surface``: only sample the surface of the hypersphere.
  1355. ncandidates : int
  1356. Number of candidates to sample per iteration. More candidates result
  1357. in a denser sampling as more candidates can be accepted per iteration.
  1358. optimization : {None, "random-cd", "lloyd"}, optional
  1359. Whether to use an optimization scheme to improve the quality after
  1360. sampling. Note that this is a post-processing step that does not
  1361. guarantee that all properties of the sample will be conserved.
  1362. Default is None.
  1363. * ``random-cd``: random permutations of coordinates to lower the
  1364. centered discrepancy. The best sample based on the centered
  1365. discrepancy is constantly updated. Centered discrepancy-based
  1366. sampling shows better space-filling robustness toward 2D and 3D
  1367. subprojections compared to using other discrepancy measures.
  1368. * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
  1369. The process converges to equally spaced samples.
  1370. .. versionadded:: 1.10.0
  1371. seed : {None, int, `numpy.random.Generator`}, optional
  1372. If `seed` is an int or None, a new `numpy.random.Generator` is
  1373. created using ``np.random.default_rng(seed)``.
  1374. If `seed` is already a ``Generator`` instance, then the provided
  1375. instance is used.
  1376. Notes
  1377. -----
  1378. Poisson disk sampling is an iterative sampling strategy. Starting from
  1379. a seed sample, `ncandidates` are sampled in the hypersphere
  1380. surrounding the seed. Candidates bellow a certain `radius` or outside the
  1381. domain are rejected. New samples are added in a pool of sample seed. The
  1382. process stops when the pool is empty or when the number of required
  1383. samples is reached.
  1384. The maximum number of point that a sample can contain is directly linked
  1385. to the `radius`. As the dimension of the space increases, a higher radius
  1386. spreads the points further and help overcome the curse of dimensionality.
  1387. See the :ref:`quasi monte carlo tutorial <quasi-monte-carlo>` for more
  1388. details.
  1389. .. warning::
  1390. The algorithm is more suitable for low dimensions and sampling size
  1391. due to its iterative nature and memory requirements.
  1392. Selecting a small radius with a high dimension would
  1393. mean that the space could contain more samples than using lower
  1394. dimension or a bigger radius.
  1395. Some code taken from [2]_, written consent given on 31.03.2021
  1396. by the original author, Shamis, for free use in SciPy under
  1397. the 3-clause BSD.
  1398. References
  1399. ----------
  1400. .. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
  1401. Dimensions." SIGGRAPH, 2007.
  1402. .. [2] `StackOverflow <https://stackoverflow.com/questions/66047540>`__.
  1403. Examples
  1404. --------
  1405. Generate a 2D sample using a `radius` of 0.2.
  1406. >>> import numpy as np
  1407. >>> import matplotlib.pyplot as plt
  1408. >>> from matplotlib.collections import PatchCollection
  1409. >>> from scipy.stats import qmc
  1410. >>>
  1411. >>> rng = np.random.default_rng()
  1412. >>> radius = 0.2
  1413. >>> engine = qmc.PoissonDisk(d=2, radius=radius, seed=rng)
  1414. >>> sample = engine.random(20)
  1415. Visualizing the 2D sample and showing that no points are closer than
  1416. `radius`. ``radius/2`` is used to visualize non-intersecting circles.
  1417. If two samples are exactly at `radius` from each other, then their circle
  1418. of radius ``radius/2`` will touch.
  1419. >>> fig, ax = plt.subplots()
  1420. >>> _ = ax.scatter(sample[:, 0], sample[:, 1])
  1421. >>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
  1422. ... for xi, yi in sample]
  1423. >>> collection = PatchCollection(circles, match_original=True)
  1424. >>> ax.add_collection(collection)
  1425. >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
  1426. ... xlim=[0, 1], ylim=[0, 1])
  1427. >>> plt.show()
  1428. Such visualization can be seen as circle packing: how many circle can
  1429. we put in the space. It is a np-hard problem. The method `fill_space`
  1430. can be used to add samples until no more samples can be added. This is
  1431. a hard problem and parameters may need to be adjusted manually. Beware of
  1432. the dimension: as the dimensionality increases, the number of samples
  1433. required to fill the space increases exponentially
  1434. (curse-of-dimensionality).
  1435. """
  1436. def __init__(
  1437. self,
  1438. d: IntNumber,
  1439. *,
  1440. radius: DecimalNumber = 0.05,
  1441. hypersphere: Literal["volume", "surface"] = "volume",
  1442. ncandidates: IntNumber = 30,
  1443. optimization: Optional[Literal["random-cd", "lloyd"]] = None,
  1444. seed: SeedType = None
  1445. ) -> None:
  1446. # Used in `scipy.integrate.qmc_quad`
  1447. self._init_quad = {'d': d, 'radius': radius,
  1448. 'hypersphere': hypersphere,
  1449. 'ncandidates': ncandidates,
  1450. 'optimization': optimization}
  1451. super().__init__(d=d, optimization=optimization, seed=seed)
  1452. hypersphere_sample = {
  1453. "volume": self._hypersphere_volume_sample,
  1454. "surface": self._hypersphere_surface_sample
  1455. }
  1456. try:
  1457. self.hypersphere_method = hypersphere_sample[hypersphere]
  1458. except KeyError as exc:
  1459. message = (
  1460. f"{hypersphere!r} is not a valid hypersphere sampling"
  1461. f" method. It must be one of {set(hypersphere_sample)!r}")
  1462. raise ValueError(message) from exc
  1463. # size of the sphere from which the samples are drawn relative to the
  1464. # size of a disk (radius)
  1465. # for the surface sampler, all new points are almost exactly 1 radius
  1466. # away from at least one existing sample +eps to avoid rejection
  1467. self.radius_factor = 2 if hypersphere == "volume" else 1.001
  1468. self.radius = radius
  1469. self.radius_squared = self.radius**2
  1470. # sample to generate per iteration in the hypersphere around center
  1471. self.ncandidates = ncandidates
  1472. with np.errstate(divide='ignore'):
  1473. self.cell_size = self.radius / np.sqrt(self.d)
  1474. self.grid_size = (
  1475. np.ceil(np.ones(self.d) / self.cell_size)
  1476. ).astype(int)
  1477. self._initialize_grid_pool()
  1478. def _initialize_grid_pool(self):
  1479. """Sampling pool and sample grid."""
  1480. self.sample_pool = []
  1481. # Positions of cells
  1482. # n-dim value for each grid cell
  1483. self.sample_grid = np.empty(
  1484. np.append(self.grid_size, self.d),
  1485. dtype=np.float32
  1486. )
  1487. # Initialise empty cells with NaNs
  1488. self.sample_grid.fill(np.nan)
  1489. def _random(
  1490. self, n: IntNumber = 1, *, workers: IntNumber = 1
  1491. ) -> np.ndarray:
  1492. """Draw `n` in the interval ``[0, 1]``.
  1493. Note that it can return fewer samples if the space is full.
  1494. See the note section of the class.
  1495. Parameters
  1496. ----------
  1497. n : int, optional
  1498. Number of samples to generate in the parameter space. Default is 1.
  1499. Returns
  1500. -------
  1501. sample : array_like (n, d)
  1502. QMC sample.
  1503. """
  1504. if n == 0 or self.d == 0:
  1505. return np.empty((n, self.d))
  1506. def in_limits(sample: np.ndarray) -> bool:
  1507. return (sample.max() <= 1.) and (sample.min() >= 0.)
  1508. def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool:
  1509. """
  1510. Check if there are samples closer than ``radius_squared`` to the
  1511. `candidate` sample.
  1512. """
  1513. indices = (candidate / self.cell_size).astype(int)
  1514. ind_min = np.maximum(indices - n, np.zeros(self.d, dtype=int))
  1515. ind_max = np.minimum(indices + n + 1, self.grid_size)
  1516. # Check if the center cell is empty
  1517. if not np.isnan(self.sample_grid[tuple(indices)][0]):
  1518. return True
  1519. a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]
  1520. # guards against: invalid value encountered in less as we are
  1521. # comparing with nan and returns False. Which is wanted.
  1522. with np.errstate(invalid='ignore'):
  1523. if np.any(
  1524. np.sum(
  1525. np.square(candidate - self.sample_grid[tuple(a)]),
  1526. axis=self.d
  1527. ) < self.radius_squared
  1528. ):
  1529. return True
  1530. return False
  1531. def add_sample(candidate: np.ndarray) -> None:
  1532. self.sample_pool.append(candidate)
  1533. indices = (candidate / self.cell_size).astype(int)
  1534. self.sample_grid[tuple(indices)] = candidate
  1535. curr_sample.append(candidate)
  1536. curr_sample: List[np.ndarray] = []
  1537. if len(self.sample_pool) == 0:
  1538. # the pool is being initialized with a single random sample
  1539. add_sample(self.rng.random(self.d))
  1540. num_drawn = 1
  1541. else:
  1542. num_drawn = 0
  1543. # exhaust sample pool to have up to n sample
  1544. while len(self.sample_pool) and num_drawn < n:
  1545. # select a sample from the available pool
  1546. idx_center = rng_integers(self.rng, len(self.sample_pool))
  1547. center = self.sample_pool[idx_center]
  1548. del self.sample_pool[idx_center]
  1549. # generate candidates around the center sample
  1550. candidates = self.hypersphere_method(
  1551. center, self.radius * self.radius_factor, self.ncandidates
  1552. )
  1553. # keep candidates that satisfy some conditions
  1554. for candidate in candidates:
  1555. if in_limits(candidate) and not in_neighborhood(candidate):
  1556. add_sample(candidate)
  1557. num_drawn += 1
  1558. if num_drawn >= n:
  1559. break
  1560. self.num_generated += num_drawn
  1561. return np.array(curr_sample)
  1562. def fill_space(self) -> np.ndarray:
  1563. """Draw ``n`` samples in the interval ``[0, 1]``.
  1564. Unlike `random`, this method will try to add points until
  1565. the space is full. Depending on ``candidates`` (and to a lesser extent
  1566. other parameters), some empty areas can still be present in the sample.
  1567. .. warning::
  1568. This can be extremely slow in high dimensions or if the
  1569. ``radius`` is very small-with respect to the dimensionality.
  1570. Returns
  1571. -------
  1572. sample : array_like (n, d)
  1573. QMC sample.
  1574. """
  1575. return self.random(np.inf) # type: ignore[arg-type]
  1576. def reset(self) -> PoissonDisk:
  1577. """Reset the engine to base state.
  1578. Returns
  1579. -------
  1580. engine : PoissonDisk
  1581. Engine reset to its base state.
  1582. """
  1583. super().reset()
  1584. self._initialize_grid_pool()
  1585. return self
  1586. def _hypersphere_volume_sample(
  1587. self, center: np.ndarray, radius: DecimalNumber,
  1588. candidates: IntNumber = 1
  1589. ) -> np.ndarray:
  1590. """Uniform sampling within hypersphere."""
  1591. # should remove samples within r/2
  1592. x = self.rng.standard_normal(size=(candidates, self.d))
  1593. ssq = np.sum(x**2, axis=1)
  1594. fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq)
  1595. fr_tiled = np.tile(
  1596. fr.reshape(-1, 1), (1, self.d) # type: ignore[arg-type]
  1597. )
  1598. p = center + np.multiply(x, fr_tiled)
  1599. return p
  1600. def _hypersphere_surface_sample(
  1601. self, center: np.ndarray, radius: DecimalNumber,
  1602. candidates: IntNumber = 1
  1603. ) -> np.ndarray:
  1604. """Uniform sampling on the hypersphere's surface."""
  1605. vec = self.rng.standard_normal(size=(candidates, self.d))
  1606. vec /= np.linalg.norm(vec, axis=1)[:, None]
  1607. p = center + np.multiply(vec, radius)
  1608. return p
  1609. class MultivariateNormalQMC:
  1610. r"""QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.
  1611. Parameters
  1612. ----------
  1613. mean : array_like (d,)
  1614. The mean vector. Where ``d`` is the dimension.
  1615. cov : array_like (d, d), optional
  1616. The covariance matrix. If omitted, use `cov_root` instead.
  1617. If both `cov` and `cov_root` are omitted, use the identity matrix.
  1618. cov_root : array_like (d, d'), optional
  1619. A root decomposition of the covariance matrix, where ``d'`` may be less
  1620. than ``d`` if the covariance is not full rank. If omitted, use `cov`.
  1621. inv_transform : bool, optional
  1622. If True, use inverse transform instead of Box-Muller. Default is True.
  1623. engine : QMCEngine, optional
  1624. Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
  1625. seed : {None, int, `numpy.random.Generator`}, optional
  1626. Used only if `engine` is None.
  1627. If `seed` is an int or None, a new `numpy.random.Generator` is
  1628. created using ``np.random.default_rng(seed)``.
  1629. If `seed` is already a ``Generator`` instance, then the provided
  1630. instance is used.
  1631. Examples
  1632. --------
  1633. >>> import matplotlib.pyplot as plt
  1634. >>> from scipy.stats import qmc
  1635. >>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
  1636. >>> sample = dist.random(512)
  1637. >>> _ = plt.scatter(sample[:, 0], sample[:, 1])
  1638. >>> plt.show()
  1639. """
  1640. def __init__(
  1641. self, mean: npt.ArrayLike, cov: Optional[npt.ArrayLike] = None, *,
  1642. cov_root: Optional[npt.ArrayLike] = None,
  1643. inv_transform: bool = True,
  1644. engine: Optional[QMCEngine] = None,
  1645. seed: SeedType = None
  1646. ) -> None:
  1647. mean = np.array(mean, copy=False, ndmin=1)
  1648. d = mean.shape[0]
  1649. if cov is not None:
  1650. # covariance matrix provided
  1651. cov = np.array(cov, copy=False, ndmin=2)
  1652. # check for square/symmetric cov matrix and mean vector has the
  1653. # same d
  1654. if not mean.shape[0] == cov.shape[0]:
  1655. raise ValueError("Dimension mismatch between mean and "
  1656. "covariance.")
  1657. if not np.allclose(cov, cov.transpose()):
  1658. raise ValueError("Covariance matrix is not symmetric.")
  1659. # compute Cholesky decomp; if it fails, do the eigen decomposition
  1660. try:
  1661. cov_root = np.linalg.cholesky(cov).transpose()
  1662. except np.linalg.LinAlgError:
  1663. eigval, eigvec = np.linalg.eigh(cov)
  1664. if not np.all(eigval >= -1.0e-8):
  1665. raise ValueError("Covariance matrix not PSD.")
  1666. eigval = np.clip(eigval, 0.0, None)
  1667. cov_root = (eigvec * np.sqrt(eigval)).transpose()
  1668. elif cov_root is not None:
  1669. # root decomposition provided
  1670. cov_root = np.atleast_2d(cov_root)
  1671. if not mean.shape[0] == cov_root.shape[0]:
  1672. raise ValueError("Dimension mismatch between mean and "
  1673. "covariance.")
  1674. else:
  1675. # corresponds to identity covariance matrix
  1676. cov_root = None
  1677. self._inv_transform = inv_transform
  1678. if not inv_transform:
  1679. # to apply Box-Muller, we need an even number of dimensions
  1680. engine_dim = 2 * math.ceil(d / 2)
  1681. else:
  1682. engine_dim = d
  1683. if engine is None:
  1684. self.engine = Sobol(
  1685. d=engine_dim, scramble=True, bits=30, seed=seed
  1686. ) # type: QMCEngine
  1687. elif isinstance(engine, QMCEngine):
  1688. if engine.d != engine_dim:
  1689. raise ValueError("Dimension of `engine` must be consistent"
  1690. " with dimensions of mean and covariance."
  1691. " If `inv_transform` is False, it must be"
  1692. " an even number.")
  1693. self.engine = engine
  1694. else:
  1695. raise ValueError("`engine` must be an instance of "
  1696. "`scipy.stats.qmc.QMCEngine` or `None`.")
  1697. self._mean = mean
  1698. self._corr_matrix = cov_root
  1699. self._d = d
  1700. def random(self, n: IntNumber = 1) -> np.ndarray:
  1701. """Draw `n` QMC samples from the multivariate Normal.
  1702. Parameters
  1703. ----------
  1704. n : int, optional
  1705. Number of samples to generate in the parameter space. Default is 1.
  1706. Returns
  1707. -------
  1708. sample : array_like (n, d)
  1709. Sample.
  1710. """
  1711. base_samples = self._standard_normal_samples(n)
  1712. return self._correlate(base_samples)
  1713. def _correlate(self, base_samples: np.ndarray) -> np.ndarray:
  1714. if self._corr_matrix is not None:
  1715. return base_samples @ self._corr_matrix + self._mean
  1716. else:
  1717. # avoid multiplying with identity here
  1718. return base_samples + self._mean
  1719. def _standard_normal_samples(self, n: IntNumber = 1) -> np.ndarray:
  1720. """Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.
  1721. Parameters
  1722. ----------
  1723. n : int, optional
  1724. Number of samples to generate in the parameter space. Default is 1.
  1725. Returns
  1726. -------
  1727. sample : array_like (n, d)
  1728. Sample.
  1729. """
  1730. # get base samples
  1731. samples = self.engine.random(n)
  1732. if self._inv_transform:
  1733. # apply inverse transform
  1734. # (values to close to 0/1 result in inf values)
  1735. return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5)) # type: ignore[attr-defined]
  1736. else:
  1737. # apply Box-Muller transform (note: indexes starting from 1)
  1738. even = np.arange(0, samples.shape[-1], 2)
  1739. Rs = np.sqrt(-2 * np.log(samples[:, even]))
  1740. thetas = 2 * math.pi * samples[:, 1 + even]
  1741. cos = np.cos(thetas)
  1742. sin = np.sin(thetas)
  1743. transf_samples = np.stack([Rs * cos, Rs * sin],
  1744. -1).reshape(n, -1)
  1745. # make sure we only return the number of dimension requested
  1746. return transf_samples[:, : self._d]
  1747. class MultinomialQMC:
  1748. r"""QMC sampling from a multinomial distribution.
  1749. Parameters
  1750. ----------
  1751. pvals : array_like (k,)
  1752. Vector of probabilities of size ``k``, where ``k`` is the number
  1753. of categories. Elements must be non-negative and sum to 1.
  1754. n_trials : int
  1755. Number of trials.
  1756. engine : QMCEngine, optional
  1757. Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
  1758. seed : {None, int, `numpy.random.Generator`}, optional
  1759. Used only if `engine` is None.
  1760. If `seed` is an int or None, a new `numpy.random.Generator` is
  1761. created using ``np.random.default_rng(seed)``.
  1762. If `seed` is already a ``Generator`` instance, then the provided
  1763. instance is used.
  1764. Examples
  1765. --------
  1766. Let's define 3 categories and for a given sample, the sum of the trials
  1767. of each category is 8. The number of trials per category is determined
  1768. by the `pvals` associated to each category.
  1769. Then, we sample this distribution 64 times.
  1770. >>> import matplotlib.pyplot as plt
  1771. >>> from scipy.stats import qmc
  1772. >>> dist = qmc.MultinomialQMC(
  1773. ... pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1)
  1774. ... )
  1775. >>> sample = dist.random(64)
  1776. We can plot the sample and verify that the median of number of trials
  1777. for each category is following the `pvals`. That would be
  1778. ``pvals * n_trials = [2, 4, 4]``.
  1779. >>> fig, ax = plt.subplots()
  1780. >>> ax.yaxis.get_major_locator().set_params(integer=True)
  1781. >>> _ = ax.boxplot(sample)
  1782. >>> ax.set(xlabel="Categories", ylabel="Trials")
  1783. >>> plt.show()
  1784. """
  1785. def __init__(
  1786. self, pvals: npt.ArrayLike, n_trials: IntNumber,
  1787. *, engine: Optional[QMCEngine] = None,
  1788. seed: SeedType = None
  1789. ) -> None:
  1790. self.pvals = np.array(pvals, copy=False, ndmin=1)
  1791. if np.min(pvals) < 0:
  1792. raise ValueError('Elements of pvals must be non-negative.')
  1793. if not np.isclose(np.sum(pvals), 1):
  1794. raise ValueError('Elements of pvals must sum to 1.')
  1795. self.n_trials = n_trials
  1796. if engine is None:
  1797. self.engine = Sobol(
  1798. d=1, scramble=True, bits=30, seed=seed
  1799. ) # type: QMCEngine
  1800. elif isinstance(engine, QMCEngine):
  1801. if engine.d != 1:
  1802. raise ValueError("Dimension of `engine` must be 1.")
  1803. self.engine = engine
  1804. else:
  1805. raise ValueError("`engine` must be an instance of "
  1806. "`scipy.stats.qmc.QMCEngine` or `None`.")
  1807. def random(self, n: IntNumber = 1) -> np.ndarray:
  1808. """Draw `n` QMC samples from the multinomial distribution.
  1809. Parameters
  1810. ----------
  1811. n : int, optional
  1812. Number of samples to generate in the parameter space. Default is 1.
  1813. Returns
  1814. -------
  1815. samples : array_like (n, pvals)
  1816. Sample.
  1817. """
  1818. sample = np.empty((n, len(self.pvals)))
  1819. for i in range(n):
  1820. base_draws = self.engine.random(self.n_trials).ravel()
  1821. p_cumulative = np.empty_like(self.pvals, dtype=float)
  1822. _fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative)
  1823. sample_ = np.zeros_like(self.pvals, dtype=int)
  1824. _categorize(base_draws, p_cumulative, sample_)
  1825. sample[i] = sample_
  1826. return sample
  1827. def _select_optimizer(
  1828. optimization: Optional[Literal["random-cd", "lloyd"]], config: Dict
  1829. ) -> Optional[Callable]:
  1830. """A factory for optimization methods."""
  1831. optimization_method: Dict[str, Callable] = {
  1832. "random-cd": _random_cd,
  1833. "lloyd": _lloyd_centroidal_voronoi_tessellation
  1834. }
  1835. optimizer: Optional[partial]
  1836. if optimization is not None:
  1837. try:
  1838. optimization = optimization.lower() # type: ignore[assignment]
  1839. optimizer_ = optimization_method[optimization]
  1840. except KeyError as exc:
  1841. message = (f"{optimization!r} is not a valid optimization"
  1842. f" method. It must be one of"
  1843. f" {set(optimization_method)!r}")
  1844. raise ValueError(message) from exc
  1845. # config
  1846. optimizer = partial(optimizer_, **config)
  1847. else:
  1848. optimizer = None
  1849. return optimizer
  1850. def _random_cd(
  1851. best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType,
  1852. **kwargs: Dict
  1853. ) -> np.ndarray:
  1854. """Optimal LHS on CD.
  1855. Create a base LHS and do random permutations of coordinates to
  1856. lower the centered discrepancy.
  1857. Because it starts with a normal LHS, it also works with the
  1858. `centered` keyword argument.
  1859. Two stopping criterion are used to stop the algorithm: at most,
  1860. `n_iters` iterations are performed; or if there is no improvement
  1861. for `n_nochange` consecutive iterations.
  1862. """
  1863. del kwargs # only use keywords which are defined, needed by factory
  1864. n, d = best_sample.shape
  1865. if d == 0 or n == 0:
  1866. return np.empty((n, d))
  1867. best_disc = discrepancy(best_sample)
  1868. if n == 1:
  1869. return best_sample
  1870. bounds = ([0, d - 1],
  1871. [0, n - 1],
  1872. [0, n - 1])
  1873. n_nochange_ = 0
  1874. n_iters_ = 0
  1875. while n_nochange_ < n_nochange and n_iters_ < n_iters:
  1876. n_iters_ += 1
  1877. col = rng_integers(rng, *bounds[0], endpoint=True) # type: ignore[misc]
  1878. row_1 = rng_integers(rng, *bounds[1], endpoint=True) # type: ignore[misc]
  1879. row_2 = rng_integers(rng, *bounds[2], endpoint=True) # type: ignore[misc]
  1880. disc = _perturb_discrepancy(best_sample,
  1881. row_1, row_2, col,
  1882. best_disc)
  1883. if disc < best_disc:
  1884. best_sample[row_1, col], best_sample[row_2, col] = (
  1885. best_sample[row_2, col], best_sample[row_1, col])
  1886. best_disc = disc
  1887. n_nochange_ = 0
  1888. else:
  1889. n_nochange_ += 1
  1890. return best_sample
  1891. def _l1_norm(sample: np.ndarray) -> float:
  1892. return distance.pdist(sample, 'cityblock').min()
  1893. def _lloyd_iteration(
  1894. sample: np.ndarray,
  1895. decay: float,
  1896. qhull_options: str
  1897. ) -> np.ndarray:
  1898. """Lloyd-Max algorithm iteration.
  1899. Based on the implementation of Stéfan van der Walt:
  1900. https://github.com/stefanv/lloyd
  1901. which is:
  1902. Copyright (c) 2021-04-21 Stéfan van der Walt
  1903. https://github.com/stefanv/lloyd
  1904. MIT License
  1905. Parameters
  1906. ----------
  1907. sample : array_like (n, d)
  1908. The sample to iterate on.
  1909. decay : float
  1910. Relaxation decay. A positive value would move the samples toward
  1911. their centroid, and negative value would move them away.
  1912. 1 would move the samples to their centroid.
  1913. qhull_options : str
  1914. Additional options to pass to Qhull. See Qhull manual
  1915. for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
  1916. "Qbb Qc Qz Qj" otherwise.)
  1917. Returns
  1918. -------
  1919. sample : array_like (n, d)
  1920. The sample after an iteration of Lloyd's algorithm.
  1921. """
  1922. new_sample = np.empty_like(sample)
  1923. voronoi = Voronoi(sample, qhull_options=qhull_options)
  1924. for ii, idx in enumerate(voronoi.point_region):
  1925. # the region is a series of indices into self.voronoi.vertices
  1926. # remove samples at infinity, designated by index -1
  1927. region = [i for i in voronoi.regions[idx] if i != -1]
  1928. # get the vertices for this region
  1929. verts = voronoi.vertices[region]
  1930. # clipping would be wrong, we need to intersect
  1931. # verts = np.clip(verts, 0, 1)
  1932. # move samples towards centroids:
  1933. # Centroid in n-D is the mean for uniformly distributed nodes
  1934. # of a geometry.
  1935. centroid = np.mean(verts, axis=0)
  1936. new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay
  1937. # only update sample to centroid within the region
  1938. is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1)
  1939. sample[is_valid] = new_sample[is_valid]
  1940. return sample
  1941. def _lloyd_centroidal_voronoi_tessellation(
  1942. sample: npt.ArrayLike,
  1943. *,
  1944. tol: DecimalNumber = 1e-5,
  1945. maxiter: IntNumber = 10,
  1946. qhull_options: Optional[str] = None,
  1947. **kwargs: Dict
  1948. ) -> np.ndarray:
  1949. """Approximate Centroidal Voronoi Tessellation.
  1950. Perturb samples in N-dimensions using Lloyd-Max algorithm.
  1951. Parameters
  1952. ----------
  1953. sample : array_like (n, d)
  1954. The sample to iterate on. With ``n`` the number of samples and ``d``
  1955. the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``.
  1956. tol : float, optional
  1957. Tolerance for termination. If the min of the L1-norm over the samples
  1958. changes less than `tol`, it stops the algorithm. Default is 1e-5.
  1959. maxiter : int, optional
  1960. Maximum number of iterations. It will stop the algorithm even if
  1961. `tol` is above the threshold.
  1962. Too many iterations tend to cluster the samples as a hypersphere.
  1963. Default is 10.
  1964. qhull_options : str, optional
  1965. Additional options to pass to Qhull. See Qhull manual
  1966. for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
  1967. "Qbb Qc Qz Qj" otherwise.)
  1968. Returns
  1969. -------
  1970. sample : array_like (n, d)
  1971. The sample after being processed by Lloyd-Max algorithm.
  1972. Notes
  1973. -----
  1974. Lloyd-Max algorithm is an iterative process with the purpose of improving
  1975. the dispersion of samples. For given sample: (i) compute a Voronoi
  1976. Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the
  1977. samples toward the centroid of their respective cell. See [1]_, [2]_.
  1978. A relaxation factor is used to control how fast samples can move at each
  1979. iteration. This factor is starting at 2 and ending at 1 after `maxiter`
  1980. following an exponential decay.
  1981. The process converges to equally spaced samples. It implies that measures
  1982. like the discrepancy could suffer from too many iterations. On the other
  1983. hand, L1 and L2 distances should improve. This is especially true with
  1984. QMC methods which tend to favor the discrepancy over other criteria.
  1985. .. note::
  1986. The current implementation does not intersect the Voronoi Tessellation
  1987. with the boundaries. This implies that for a low number of samples,
  1988. empirically below 20, no Voronoi cell is touching the boundaries.
  1989. Hence, samples cannot be moved close to the boundaries.
  1990. Further improvements could consider the samples at infinity so that
  1991. all boundaries are segments of some Voronoi cells. This would fix
  1992. the computation of the centroid position.
  1993. .. warning::
  1994. The Voronoi Tessellation step is expensive and quickly becomes
  1995. intractable with dimensions as low as 10 even for a sample
  1996. of size as low as 1000.
  1997. .. versionadded:: 1.9.0
  1998. References
  1999. ----------
  2000. .. [1] Lloyd. "Least Squares Quantization in PCM".
  2001. IEEE Transactions on Information Theory, 1982.
  2002. .. [2] Max J. "Quantizing for minimum distortion".
  2003. IEEE Transactions on Information Theory, 1960.
  2004. Examples
  2005. --------
  2006. >>> import numpy as np
  2007. >>> from scipy.spatial import distance
  2008. >>> rng = np.random.default_rng()
  2009. >>> sample = rng.random((128, 2))
  2010. .. note::
  2011. The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale`
  2012. can be used to scale the samples from their
  2013. original bounds to :math:`[0, 1]^d`. And back to their original bounds.
  2014. Compute the quality of the sample using the L1 criterion.
  2015. >>> def l1_norm(sample):
  2016. ... return distance.pdist(sample, 'cityblock').min()
  2017. >>> l1_norm(sample)
  2018. 0.00161... # random
  2019. Now process the sample using Lloyd's algorithm and check the improvement
  2020. on the L1. The value should increase.
  2021. >>> sample = _lloyd_centroidal_voronoi_tessellation(sample)
  2022. >>> l1_norm(sample)
  2023. 0.0278... # random
  2024. """
  2025. del kwargs # only use keywords which are defined, needed by factory
  2026. sample = np.asarray(sample).copy()
  2027. if not sample.ndim == 2:
  2028. raise ValueError('`sample` is not a 2D array')
  2029. if not sample.shape[1] >= 2:
  2030. raise ValueError('`sample` dimension is not >= 2')
  2031. # Checking that sample is within the hypercube
  2032. if (sample.max() > 1.) or (sample.min() < 0.):
  2033. raise ValueError('`sample` is not in unit hypercube')
  2034. if qhull_options is None:
  2035. qhull_options = 'Qbb Qc Qz QJ'
  2036. if sample.shape[1] >= 5:
  2037. qhull_options += ' Qx'
  2038. # Fit an exponential to be 2 at 0 and 1 at `maxiter`.
  2039. # The decay is used for relaxation.
  2040. # analytical solution for y=exp(-maxiter/x) - 0.1
  2041. root = -maxiter / np.log(0.1)
  2042. decay = [np.exp(-x / root)+0.9 for x in range(maxiter)]
  2043. l1_old = _l1_norm(sample=sample)
  2044. for i in range(maxiter):
  2045. sample = _lloyd_iteration(
  2046. sample=sample, decay=decay[i],
  2047. qhull_options=qhull_options,
  2048. )
  2049. l1_new = _l1_norm(sample=sample)
  2050. if abs(l1_new - l1_old) < tol:
  2051. break
  2052. else:
  2053. l1_old = l1_new
  2054. return sample
  2055. def _validate_workers(workers: IntNumber = 1) -> IntNumber:
  2056. """Validate `workers` based on platform and value.
  2057. Parameters
  2058. ----------
  2059. workers : int, optional
  2060. Number of workers to use for parallel processing. If -1 is
  2061. given all CPU threads are used. Default is 1.
  2062. Returns
  2063. -------
  2064. Workers : int
  2065. Number of CPU used by the algorithm
  2066. """
  2067. workers = int(workers)
  2068. if workers == -1:
  2069. workers = os.cpu_count() # type: ignore[assignment]
  2070. if workers is None:
  2071. raise NotImplementedError(
  2072. "Cannot determine the number of cpus using os.cpu_count(), "
  2073. "cannot use -1 for the number of workers"
  2074. )
  2075. elif workers <= 0:
  2076. raise ValueError(f"Invalid number of workers: {workers}, must be -1 "
  2077. "or > 0")
  2078. return workers
  2079. def _validate_bounds(
  2080. l_bounds: npt.ArrayLike, u_bounds: npt.ArrayLike, d: int
  2081. ) -> Tuple[np.ndarray, ...]:
  2082. """Bounds input validation.
  2083. Parameters
  2084. ----------
  2085. l_bounds, u_bounds : array_like (d,)
  2086. Lower and upper bounds.
  2087. d : int
  2088. Dimension to use for broadcasting.
  2089. Returns
  2090. -------
  2091. l_bounds, u_bounds : array_like (d,)
  2092. Lower and upper bounds.
  2093. """
  2094. try:
  2095. lower = np.broadcast_to(l_bounds, d)
  2096. upper = np.broadcast_to(u_bounds, d)
  2097. except ValueError as exc:
  2098. msg = ("'l_bounds' and 'u_bounds' must be broadcastable and respect"
  2099. " the sample dimension")
  2100. raise ValueError(msg) from exc
  2101. if not np.all(lower < upper):
  2102. raise ValueError("Bounds are not consistent 'l_bounds' < 'u_bounds'")
  2103. return lower, upper