1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316 |
- import sys
- import pytest
- import textwrap
- import subprocess
- import numpy as np
- import numpy.core._multiarray_tests as _multiarray_tests
- from numpy import array, arange, nditer, all
- from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_raises,
- IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles
- )
- def iter_multi_index(i):
- ret = []
- while not i.finished:
- ret.append(i.multi_index)
- i.iternext()
- return ret
- def iter_indices(i):
- ret = []
- while not i.finished:
- ret.append(i.index)
- i.iternext()
- return ret
- def iter_iterindices(i):
- ret = []
- while not i.finished:
- ret.append(i.iterindex)
- i.iternext()
- return ret
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- def test_iter_refcount():
- # Make sure the iterator doesn't leak
- # Basic
- a = arange(6)
- dt = np.dtype('f4').newbyteorder()
- rc_a = sys.getrefcount(a)
- rc_dt = sys.getrefcount(dt)
- with nditer(a, [],
- [['readwrite', 'updateifcopy']],
- casting='unsafe',
- op_dtypes=[dt]) as it:
- assert_(not it.iterationneedsapi)
- assert_(sys.getrefcount(a) > rc_a)
- assert_(sys.getrefcount(dt) > rc_dt)
- # del 'it'
- it = None
- assert_equal(sys.getrefcount(a), rc_a)
- assert_equal(sys.getrefcount(dt), rc_dt)
- # With a copy
- a = arange(6, dtype='f4')
- dt = np.dtype('f4')
- rc_a = sys.getrefcount(a)
- rc_dt = sys.getrefcount(dt)
- it = nditer(a, [],
- [['readwrite']],
- op_dtypes=[dt])
- rc2_a = sys.getrefcount(a)
- rc2_dt = sys.getrefcount(dt)
- it2 = it.copy()
- assert_(sys.getrefcount(a) > rc2_a)
- assert_(sys.getrefcount(dt) > rc2_dt)
- it = None
- assert_equal(sys.getrefcount(a), rc2_a)
- assert_equal(sys.getrefcount(dt), rc2_dt)
- it2 = None
- assert_equal(sys.getrefcount(a), rc_a)
- assert_equal(sys.getrefcount(dt), rc_dt)
- del it2 # avoid pyflakes unused variable warning
- def test_iter_best_order():
- # The iterator should always find the iteration order
- # with increasing memory addresses
- # Test the ordering for 1-D to 5-D shapes
- for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
- a = arange(np.prod(shape))
- # Test each combination of positive and negative strides
- for dirs in range(2**len(shape)):
- dirs_index = [slice(None)]*len(shape)
- for bit in range(len(shape)):
- if ((2**bit) & dirs):
- dirs_index[bit] = slice(None, None, -1)
- dirs_index = tuple(dirs_index)
- aview = a.reshape(shape)[dirs_index]
- # C-order
- i = nditer(aview, [], [['readonly']])
- assert_equal([x for x in i], a)
- # Fortran-order
- i = nditer(aview.T, [], [['readonly']])
- assert_equal([x for x in i], a)
- # Other order
- if len(shape) > 2:
- i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
- assert_equal([x for x in i], a)
- def test_iter_c_order():
- # Test forcing C order
- # Test the ordering for 1-D to 5-D shapes
- for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
- a = arange(np.prod(shape))
- # Test each combination of positive and negative strides
- for dirs in range(2**len(shape)):
- dirs_index = [slice(None)]*len(shape)
- for bit in range(len(shape)):
- if ((2**bit) & dirs):
- dirs_index[bit] = slice(None, None, -1)
- dirs_index = tuple(dirs_index)
- aview = a.reshape(shape)[dirs_index]
- # C-order
- i = nditer(aview, order='C')
- assert_equal([x for x in i], aview.ravel(order='C'))
- # Fortran-order
- i = nditer(aview.T, order='C')
- assert_equal([x for x in i], aview.T.ravel(order='C'))
- # Other order
- if len(shape) > 2:
- i = nditer(aview.swapaxes(0, 1), order='C')
- assert_equal([x for x in i],
- aview.swapaxes(0, 1).ravel(order='C'))
- def test_iter_f_order():
- # Test forcing F order
- # Test the ordering for 1-D to 5-D shapes
- for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
- a = arange(np.prod(shape))
- # Test each combination of positive and negative strides
- for dirs in range(2**len(shape)):
- dirs_index = [slice(None)]*len(shape)
- for bit in range(len(shape)):
- if ((2**bit) & dirs):
- dirs_index[bit] = slice(None, None, -1)
- dirs_index = tuple(dirs_index)
- aview = a.reshape(shape)[dirs_index]
- # C-order
- i = nditer(aview, order='F')
- assert_equal([x for x in i], aview.ravel(order='F'))
- # Fortran-order
- i = nditer(aview.T, order='F')
- assert_equal([x for x in i], aview.T.ravel(order='F'))
- # Other order
- if len(shape) > 2:
- i = nditer(aview.swapaxes(0, 1), order='F')
- assert_equal([x for x in i],
- aview.swapaxes(0, 1).ravel(order='F'))
- def test_iter_c_or_f_order():
- # Test forcing any contiguous (C or F) order
- # Test the ordering for 1-D to 5-D shapes
- for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
- a = arange(np.prod(shape))
- # Test each combination of positive and negative strides
- for dirs in range(2**len(shape)):
- dirs_index = [slice(None)]*len(shape)
- for bit in range(len(shape)):
- if ((2**bit) & dirs):
- dirs_index[bit] = slice(None, None, -1)
- dirs_index = tuple(dirs_index)
- aview = a.reshape(shape)[dirs_index]
- # C-order
- i = nditer(aview, order='A')
- assert_equal([x for x in i], aview.ravel(order='A'))
- # Fortran-order
- i = nditer(aview.T, order='A')
- assert_equal([x for x in i], aview.T.ravel(order='A'))
- # Other order
- if len(shape) > 2:
- i = nditer(aview.swapaxes(0, 1), order='A')
- assert_equal([x for x in i],
- aview.swapaxes(0, 1).ravel(order='A'))
- def test_nditer_multi_index_set():
- # Test the multi_index set
- a = np.arange(6).reshape(2, 3)
- it = np.nditer(a, flags=['multi_index'])
- # Removes the iteration on two first elements of a[0]
- it.multi_index = (0, 2,)
- assert_equal([i for i in it], [2, 3, 4, 5])
-
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- def test_nditer_multi_index_set_refcount():
- # Test if the reference count on index variable is decreased
-
- index = 0
- i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
- start_count = sys.getrefcount(index)
- i.multi_index = (index,)
- end_count = sys.getrefcount(index)
-
- assert_equal(start_count, end_count)
- def test_iter_best_order_multi_index_1d():
- # The multi-indices should be correct with any reordering
- a = arange(4)
- # 1D order
- i = nditer(a, ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
- # 1D reversed order
- i = nditer(a[::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
- def test_iter_best_order_multi_index_2d():
- # The multi-indices should be correct with any reordering
- a = arange(6)
- # 2D C-order
- i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
- # 2D Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
- # 2D reversed C-order
- i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
- i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
- i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
- # 2D reversed Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
- i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
- ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
- ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
- def test_iter_best_order_multi_index_3d():
- # The multi-indices should be correct with any reordering
- a = arange(12)
- # 3D C-order
- i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
- (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
- # 3D Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
- (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
- # 3D reversed C-order
- i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
- (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
- i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
- (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
- i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
- (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
- # 3D reversed Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
- ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
- (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
- ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
- (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
- ['multi_index'], [['readonly']])
- assert_equal(iter_multi_index(i),
- [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
- (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
- def test_iter_best_order_c_index_1d():
- # The C index should be correct with any reordering
- a = arange(4)
- # 1D order
- i = nditer(a, ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 1, 2, 3])
- # 1D reversed order
- i = nditer(a[::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [3, 2, 1, 0])
- def test_iter_best_order_c_index_2d():
- # The C index should be correct with any reordering
- a = arange(6)
- # 2D C-order
- i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
- # 2D Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F'),
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
- # 2D reversed C-order
- i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
- i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
- i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
- # 2D reversed Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
- i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
- def test_iter_best_order_c_index_3d():
- # The C index should be correct with any reordering
- a = arange(12)
- # 3D C-order
- i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
- # 3D Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F'),
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
- # 3D reversed C-order
- i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
- i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
- i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
- # 3D reversed Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
- ['c_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
- def test_iter_best_order_f_index_1d():
- # The Fortran index should be correct with any reordering
- a = arange(4)
- # 1D order
- i = nditer(a, ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 1, 2, 3])
- # 1D reversed order
- i = nditer(a[::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [3, 2, 1, 0])
- def test_iter_best_order_f_index_2d():
- # The Fortran index should be correct with any reordering
- a = arange(6)
- # 2D C-order
- i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
- # 2D Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F'),
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
- # 2D reversed C-order
- i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
- i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
- i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
- # 2D reversed Fortran-order
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
- i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
- i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
- def test_iter_best_order_f_index_3d():
- # The Fortran index should be correct with any reordering
- a = arange(12)
- # 3D C-order
- i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
- # 3D Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F'),
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
- # 3D reversed C-order
- i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
- i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
- i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
- # 3D reversed Fortran-order
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
- i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
- ['f_index'], [['readonly']])
- assert_equal(iter_indices(i),
- [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
- def test_iter_no_inner_full_coalesce():
- # Check no_inner iterators which coalesce into a single inner loop
- for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
- size = np.prod(shape)
- a = arange(size)
- # Test each combination of forward and backwards indexing
- for dirs in range(2**len(shape)):
- dirs_index = [slice(None)]*len(shape)
- for bit in range(len(shape)):
- if ((2**bit) & dirs):
- dirs_index[bit] = slice(None, None, -1)
- dirs_index = tuple(dirs_index)
- aview = a.reshape(shape)[dirs_index]
- # C-order
- i = nditer(aview, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 1)
- assert_equal(i[0].shape, (size,))
- # Fortran-order
- i = nditer(aview.T, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 1)
- assert_equal(i[0].shape, (size,))
- # Other order
- if len(shape) > 2:
- i = nditer(aview.swapaxes(0, 1),
- ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 1)
- assert_equal(i[0].shape, (size,))
- def test_iter_no_inner_dim_coalescing():
- # Check no_inner iterators whose dimensions may not coalesce completely
- # Skipping the last element in a dimension prevents coalescing
- # with the next-bigger dimension
- a = arange(24).reshape(2, 3, 4)[:,:, :-1]
- i = nditer(a, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 2)
- assert_equal(i[0].shape, (3,))
- a = arange(24).reshape(2, 3, 4)[:, :-1,:]
- i = nditer(a, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 2)
- assert_equal(i[0].shape, (8,))
- a = arange(24).reshape(2, 3, 4)[:-1,:,:]
- i = nditer(a, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 1)
- assert_equal(i[0].shape, (12,))
- # Even with lots of 1-sized dimensions, should still coalesce
- a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
- i = nditer(a, ['external_loop'], [['readonly']])
- assert_equal(i.ndim, 1)
- assert_equal(i[0].shape, (24,))
- def test_iter_dim_coalescing():
- # Check that the correct number of dimensions are coalesced
- # Tracking a multi-index disables coalescing
- a = arange(24).reshape(2, 3, 4)
- i = nditer(a, ['multi_index'], [['readonly']])
- assert_equal(i.ndim, 3)
- # A tracked index can allow coalescing if it's compatible with the array
- a3d = arange(24).reshape(2, 3, 4)
- i = nditer(a3d, ['c_index'], [['readonly']])
- assert_equal(i.ndim, 1)
- i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
- assert_equal(i.ndim, 3)
- i = nditer(a3d.T, ['c_index'], [['readonly']])
- assert_equal(i.ndim, 3)
- i = nditer(a3d.T, ['f_index'], [['readonly']])
- assert_equal(i.ndim, 1)
- i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
- assert_equal(i.ndim, 3)
- # When C or F order is forced, coalescing may still occur
- a3d = arange(24).reshape(2, 3, 4)
- i = nditer(a3d, order='C')
- assert_equal(i.ndim, 1)
- i = nditer(a3d.T, order='C')
- assert_equal(i.ndim, 3)
- i = nditer(a3d, order='F')
- assert_equal(i.ndim, 3)
- i = nditer(a3d.T, order='F')
- assert_equal(i.ndim, 1)
- i = nditer(a3d, order='A')
- assert_equal(i.ndim, 1)
- i = nditer(a3d.T, order='A')
- assert_equal(i.ndim, 1)
- def test_iter_broadcasting():
- # Standard NumPy broadcasting rules
- # 1D with scalar
- i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 6)
- assert_equal(i.shape, (6,))
- # 2D with scalar
- i = nditer([arange(6).reshape(2, 3), np.int32(2)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 6)
- assert_equal(i.shape, (2, 3))
- # 2D with 1D
- i = nditer([arange(6).reshape(2, 3), arange(3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 6)
- assert_equal(i.shape, (2, 3))
- i = nditer([arange(2).reshape(2, 1), arange(3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 6)
- assert_equal(i.shape, (2, 3))
- # 2D with 2D
- i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 6)
- assert_equal(i.shape, (2, 3))
- # 3D with scalar
- i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- # 3D with 1D
- i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- # 3D with 2D
- i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- # 3D with 3D
- i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
- arange(4).reshape(4, 1, 1)],
- ['multi_index'], [['readonly']]*3)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
- ['multi_index'], [['readonly']]*2)
- assert_equal(i.itersize, 24)
- assert_equal(i.shape, (4, 2, 3))
- def test_iter_itershape():
- # Check that allocated outputs work with a specified shape
- a = np.arange(6, dtype='i2').reshape(2, 3)
- i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
- op_axes=[[0, 1, None], None],
- itershape=(-1, -1, 4))
- assert_equal(i.operands[1].shape, (2, 3, 4))
- assert_equal(i.operands[1].strides, (24, 8, 2))
- i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
- op_axes=[[0, 1, None], None],
- itershape=(-1, -1, 4))
- assert_equal(i.operands[1].shape, (3, 2, 4))
- assert_equal(i.operands[1].strides, (8, 24, 2))
- i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
- order='F',
- op_axes=[[0, 1, None], None],
- itershape=(-1, -1, 4))
- assert_equal(i.operands[1].shape, (3, 2, 4))
- assert_equal(i.operands[1].strides, (2, 6, 12))
- # If we specify 1 in the itershape, it shouldn't allow broadcasting
- # of that dimension to a bigger value
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['writeonly', 'allocate']],
- op_axes=[[0, 1, None], None],
- itershape=(-1, 1, 4))
- # Test bug that for no op_axes but itershape, they are NULLed correctly
- i = np.nditer([np.ones(2), None, None], itershape=(2,))
- def test_iter_broadcasting_errors():
- # Check that errors are thrown for bad broadcasting shapes
- # 1D with 1D
- assert_raises(ValueError, nditer, [arange(2), arange(3)],
- [], [['readonly']]*2)
- # 2D with 1D
- assert_raises(ValueError, nditer,
- [arange(6).reshape(2, 3), arange(2)],
- [], [['readonly']]*2)
- # 2D with 2D
- assert_raises(ValueError, nditer,
- [arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
- [], [['readonly']]*2)
- assert_raises(ValueError, nditer,
- [arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
- [], [['readonly']]*2)
- # 3D with 3D
- assert_raises(ValueError, nditer,
- [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
- [], [['readonly']]*2)
- assert_raises(ValueError, nditer,
- [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
- [], [['readonly']]*2)
- # Verify that the error message mentions the right shapes
- try:
- nditer([arange(2).reshape(1, 2, 1),
- arange(3).reshape(1, 3),
- arange(6).reshape(2, 3)],
- [],
- [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
- raise AssertionError('Should have raised a broadcast error')
- except ValueError as e:
- msg = str(e)
- # The message should contain the shape of the 3rd operand
- assert_(msg.find('(2,3)') >= 0,
- 'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
- # The message should contain the broadcast shape
- assert_(msg.find('(1,2,3)') >= 0,
- 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
- try:
- nditer([arange(6).reshape(2, 3), arange(2)],
- [],
- [['readonly'], ['readonly']],
- op_axes=[[0, 1], [0, np.newaxis]],
- itershape=(4, 3))
- raise AssertionError('Should have raised a broadcast error')
- except ValueError as e:
- msg = str(e)
- # The message should contain "shape->remappedshape" for each operand
- assert_(msg.find('(2,3)->(2,3)') >= 0,
- 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
- assert_(msg.find('(2,)->(2,newaxis)') >= 0,
- ('Message "%s" doesn\'t contain remapped operand shape' +
- '(2,)->(2,newaxis)') % msg)
- # The message should contain the itershape parameter
- assert_(msg.find('(4,3)') >= 0,
- 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
- try:
- nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
- [],
- [['writeonly', 'no_broadcast'], ['readonly']])
- raise AssertionError('Should have raised a broadcast error')
- except ValueError as e:
- msg = str(e)
- # The message should contain the shape of the bad operand
- assert_(msg.find('(2,1,1)') >= 0,
- 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
- # The message should contain the broadcast shape
- assert_(msg.find('(2,1,2)') >= 0,
- 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
- def test_iter_flags_errors():
- # Check that bad combinations of flags produce errors
- a = arange(6)
- # Not enough operands
- assert_raises(ValueError, nditer, [], [], [])
- # Too many operands
- assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
- # Bad global flag
- assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
- # Bad op flag
- assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
- # Bad order parameter
- assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
- # Bad casting parameter
- assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
- # op_flags must match ops
- assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
- # Cannot track both a C and an F index
- assert_raises(ValueError, nditer, a,
- ['c_index', 'f_index'], [['readonly']])
- # Inner iteration and multi-indices/indices are incompatible
- assert_raises(ValueError, nditer, a,
- ['external_loop', 'multi_index'], [['readonly']])
- assert_raises(ValueError, nditer, a,
- ['external_loop', 'c_index'], [['readonly']])
- assert_raises(ValueError, nditer, a,
- ['external_loop', 'f_index'], [['readonly']])
- # Must specify exactly one of readwrite/readonly/writeonly per operand
- assert_raises(ValueError, nditer, a, [], [[]])
- assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
- assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
- assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
- assert_raises(ValueError, nditer, a,
- [], [['readonly', 'writeonly', 'readwrite']])
- # Python scalars are always readonly
- assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
- assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
- # Array scalars are always readonly
- assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
- assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
- # Check readonly array
- a.flags.writeable = False
- assert_raises(ValueError, nditer, a, [], [['writeonly']])
- assert_raises(ValueError, nditer, a, [], [['readwrite']])
- a.flags.writeable = True
- # Multi-indices available only with the multi_index flag
- i = nditer(arange(6), [], [['readonly']])
- assert_raises(ValueError, lambda i:i.multi_index, i)
- # Index available only with an index flag
- assert_raises(ValueError, lambda i:i.index, i)
- # GotoCoords and GotoIndex incompatible with buffering or no_inner
- def assign_multi_index(i):
- i.multi_index = (0,)
- def assign_index(i):
- i.index = 0
- def assign_iterindex(i):
- i.iterindex = 0
- def assign_iterrange(i):
- i.iterrange = (0, 1)
- i = nditer(arange(6), ['external_loop'])
- assert_raises(ValueError, assign_multi_index, i)
- assert_raises(ValueError, assign_index, i)
- assert_raises(ValueError, assign_iterindex, i)
- assert_raises(ValueError, assign_iterrange, i)
- i = nditer(arange(6), ['buffered'])
- assert_raises(ValueError, assign_multi_index, i)
- assert_raises(ValueError, assign_index, i)
- assert_raises(ValueError, assign_iterrange, i)
- # Can't iterate if size is zero
- assert_raises(ValueError, nditer, np.array([]))
- def test_iter_slice():
- a, b, c = np.arange(3), np.arange(3), np.arange(3.)
- i = nditer([a, b, c], [], ['readwrite'])
- with i:
- i[0:2] = (3, 3)
- assert_equal(a, [3, 1, 2])
- assert_equal(b, [3, 1, 2])
- assert_equal(c, [0, 1, 2])
- i[1] = 12
- assert_equal(i[0:2], [3, 12])
- def test_iter_assign_mapping():
- a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
- it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
- casting='same_kind', op_dtypes=[np.dtype('f4')])
- with it:
- it.operands[0][...] = 3
- it.operands[0][...] = 14
- assert_equal(a, 14)
- it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
- casting='same_kind', op_dtypes=[np.dtype('f4')])
- with it:
- x = it.operands[0][-1:1]
- x[...] = 14
- it.operands[0][...] = -1234
- assert_equal(a, -1234)
- # check for no warnings on dealloc
- x = None
- it = None
- def test_iter_nbo_align_contig():
- # Check that byte order, alignment, and contig changes work
- # Byte order change by requesting a specific dtype
- a = np.arange(6, dtype='f4')
- au = a.byteswap().newbyteorder()
- assert_(a.dtype.byteorder != au.dtype.byteorder)
- i = nditer(au, [], [['readwrite', 'updateifcopy']],
- casting='equiv',
- op_dtypes=[np.dtype('f4')])
- with i:
- # context manager triggers WRITEBACKIFCOPY on i at exit
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- assert_equal(au, [2]*6)
- del i # should not raise a warning
- # Byte order change by requesting NBO
- a = np.arange(6, dtype='f4')
- au = a.byteswap().newbyteorder()
- assert_(a.dtype.byteorder != au.dtype.byteorder)
- with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
- casting='equiv') as i:
- # context manager triggers UPDATEIFCOPY on i at exit
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 12345
- i.operands[0][:] = 2
- assert_equal(au, [2]*6)
- # Unaligned input
- a = np.zeros((6*4+1,), dtype='i1')[1:]
- a.dtype = 'f4'
- a[:] = np.arange(6, dtype='f4')
- assert_(not a.flags.aligned)
- # Without 'aligned', shouldn't copy
- i = nditer(a, [], [['readonly']])
- assert_(not i.operands[0].flags.aligned)
- assert_equal(i.operands[0], a)
- # With 'aligned', should make a copy
- with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
- assert_(i.operands[0].flags.aligned)
- # context manager triggers UPDATEIFCOPY on i at exit
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 3
- assert_equal(a, [3]*6)
- # Discontiguous input
- a = arange(12)
- # If it is contiguous, shouldn't copy
- i = nditer(a[:6], [], [['readonly']])
- assert_(i.operands[0].flags.contiguous)
- assert_equal(i.operands[0], a[:6])
- # If it isn't contiguous, should buffer
- i = nditer(a[::2], ['buffered', 'external_loop'],
- [['readonly', 'contig']],
- buffersize=10)
- assert_(i[0].flags.contiguous)
- assert_equal(i[0], a[::2])
- def test_iter_array_cast():
- # Check that arrays are cast as requested
- # No cast 'f4' -> 'f4'
- a = np.arange(6, dtype='f4').reshape(2, 3)
- i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
- with i:
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- # Byte-order cast '<f4' -> '>f4'
- a = np.arange(6, dtype='<f4').reshape(2, 3)
- with nditer(a, [], [['readwrite', 'updateifcopy']],
- casting='equiv',
- op_dtypes=[np.dtype('>f4')]) as i:
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('>f4'))
- # Safe case 'f4' -> 'f8'
- a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
- i = nditer(a, [], [['readonly', 'copy']],
- casting='safe',
- op_dtypes=[np.dtype('f8')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f8'))
- # The memory layout of the temporary should match a (a is (48,4,16))
- # except negative strides get flipped to positive strides.
- assert_equal(i.operands[0].strides, (96, 8, 32))
- a = a[::-1,:, ::-1]
- i = nditer(a, [], [['readonly', 'copy']],
- casting='safe',
- op_dtypes=[np.dtype('f8')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f8'))
- assert_equal(i.operands[0].strides, (96, 8, 32))
- # Same-kind cast 'f8' -> 'f4' -> 'f8'
- a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
- with nditer(a, [],
- [['readwrite', 'updateifcopy']],
- casting='same_kind',
- op_dtypes=[np.dtype('f4')]) as i:
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- assert_equal(i.operands[0].strides, (4, 16, 48))
- # Check that WRITEBACKIFCOPY is activated at exit
- i.operands[0][2, 1, 1] = -12.5
- assert_(a[2, 1, 1] != -12.5)
- assert_equal(a[2, 1, 1], -12.5)
- a = np.arange(6, dtype='i4')[::-2]
- with nditer(a, [],
- [['writeonly', 'updateifcopy']],
- casting='unsafe',
- op_dtypes=[np.dtype('f4')]) as i:
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- # Even though the stride was negative in 'a', it
- # becomes positive in the temporary
- assert_equal(i.operands[0].strides, (4,))
- i.operands[0][:] = [1, 2, 3]
- assert_equal(a, [1, 2, 3])
- def test_iter_array_cast_errors():
- # Check that invalid casts are caught
- # Need to enable copying for casts to occur
- assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
- [['readonly']], op_dtypes=[np.dtype('f8')])
- # Also need to allow casting for casts to occur
- assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
- [['readonly', 'copy']], casting='no',
- op_dtypes=[np.dtype('f8')])
- assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
- [['readonly', 'copy']], casting='equiv',
- op_dtypes=[np.dtype('f8')])
- assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
- [['writeonly', 'updateifcopy']],
- casting='no',
- op_dtypes=[np.dtype('f4')])
- assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
- [['writeonly', 'updateifcopy']],
- casting='equiv',
- op_dtypes=[np.dtype('f4')])
- # '<f4' -> '>f4' should not work with casting='no'
- assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [],
- [['readonly', 'copy']], casting='no',
- op_dtypes=[np.dtype('>f4')])
- # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
- assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
- [['readwrite', 'updateifcopy']],
- casting='safe',
- op_dtypes=[np.dtype('f8')])
- assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
- [['readwrite', 'updateifcopy']],
- casting='safe',
- op_dtypes=[np.dtype('f4')])
- # 'f4' -> 'i4' is neither a safe nor a same-kind cast
- assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
- [['readonly', 'copy']],
- casting='same_kind',
- op_dtypes=[np.dtype('i4')])
- assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
- [['writeonly', 'updateifcopy']],
- casting='same_kind',
- op_dtypes=[np.dtype('f4')])
- def test_iter_scalar_cast():
- # Check that scalars are cast as requested
- # No cast 'f4' -> 'f4'
- i = nditer(np.float32(2.5), [], [['readonly']],
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.dtypes[0], np.dtype('f4'))
- assert_equal(i.value.dtype, np.dtype('f4'))
- assert_equal(i.value, 2.5)
- # Safe cast 'f4' -> 'f8'
- i = nditer(np.float32(2.5), [],
- [['readonly', 'copy']],
- casting='safe',
- op_dtypes=[np.dtype('f8')])
- assert_equal(i.dtypes[0], np.dtype('f8'))
- assert_equal(i.value.dtype, np.dtype('f8'))
- assert_equal(i.value, 2.5)
- # Same-kind cast 'f8' -> 'f4'
- i = nditer(np.float64(2.5), [],
- [['readonly', 'copy']],
- casting='same_kind',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.dtypes[0], np.dtype('f4'))
- assert_equal(i.value.dtype, np.dtype('f4'))
- assert_equal(i.value, 2.5)
- # Unsafe cast 'f8' -> 'i4'
- i = nditer(np.float64(3.0), [],
- [['readonly', 'copy']],
- casting='unsafe',
- op_dtypes=[np.dtype('i4')])
- assert_equal(i.dtypes[0], np.dtype('i4'))
- assert_equal(i.value.dtype, np.dtype('i4'))
- assert_equal(i.value, 3)
- # Readonly scalars may be cast even without setting COPY or BUFFERED
- i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
- assert_equal(i[0].dtype, np.dtype('f8'))
- assert_equal(i[0], 3.)
- def test_iter_scalar_cast_errors():
- # Check that invalid casts are caught
- # Need to allow copying/buffering for write casts of scalars to occur
- assert_raises(TypeError, nditer, np.float32(2), [],
- [['readwrite']], op_dtypes=[np.dtype('f8')])
- assert_raises(TypeError, nditer, 2.5, [],
- [['readwrite']], op_dtypes=[np.dtype('f4')])
- # 'f8' -> 'f4' isn't a safe cast if the value would overflow
- assert_raises(TypeError, nditer, np.float64(1e60), [],
- [['readonly']],
- casting='safe',
- op_dtypes=[np.dtype('f4')])
- # 'f4' -> 'i4' is neither a safe nor a same-kind cast
- assert_raises(TypeError, nditer, np.float32(2), [],
- [['readonly']],
- casting='same_kind',
- op_dtypes=[np.dtype('i4')])
- def test_iter_object_arrays_basic():
- # Check that object arrays work
- obj = {'a':3,'b':'d'}
- a = np.array([[1, 2, 3], None, obj, None], dtype='O')
- if HAS_REFCOUNT:
- rc = sys.getrefcount(obj)
- # Need to allow references for object arrays
- assert_raises(TypeError, nditer, a)
- if HAS_REFCOUNT:
- assert_equal(sys.getrefcount(obj), rc)
- i = nditer(a, ['refs_ok'], ['readonly'])
- vals = [x_[()] for x_ in i]
- assert_equal(np.array(vals, dtype='O'), a)
- vals, i, x = [None]*3
- if HAS_REFCOUNT:
- assert_equal(sys.getrefcount(obj), rc)
- i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
- ['readonly'], order='C')
- assert_(i.iterationneedsapi)
- vals = [x_[()] for x_ in i]
- assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
- vals, i, x = [None]*3
- if HAS_REFCOUNT:
- assert_equal(sys.getrefcount(obj), rc)
- i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
- ['readwrite'], order='C')
- with i:
- for x in i:
- x[...] = None
- vals, i, x = [None]*3
- if HAS_REFCOUNT:
- assert_(sys.getrefcount(obj) == rc-1)
- assert_equal(a, np.array([None]*4, dtype='O'))
- def test_iter_object_arrays_conversions():
- # Conversions to/from objects
- a = np.arange(6, dtype='O')
- i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
- casting='unsafe', op_dtypes='i4')
- with i:
- for x in i:
- x[...] += 1
- assert_equal(a, np.arange(6)+1)
- a = np.arange(6, dtype='i4')
- i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
- casting='unsafe', op_dtypes='O')
- with i:
- for x in i:
- x[...] += 1
- assert_equal(a, np.arange(6)+1)
- # Non-contiguous object array
- a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
- a = a['a']
- a[:] = np.arange(6)
- i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
- casting='unsafe', op_dtypes='i4')
- with i:
- for x in i:
- x[...] += 1
- assert_equal(a, np.arange(6)+1)
- #Non-contiguous value array
- a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
- a = a['a']
- a[:] = np.arange(6) + 98172488
- i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
- casting='unsafe', op_dtypes='O')
- with i:
- ob = i[0][()]
- if HAS_REFCOUNT:
- rc = sys.getrefcount(ob)
- for x in i:
- x[...] += 1
- if HAS_REFCOUNT:
- assert_(sys.getrefcount(ob) == rc-1)
- assert_equal(a, np.arange(6)+98172489)
- def test_iter_common_dtype():
- # Check that the iterator finds a common data type correctly
- i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
- ['common_dtype'],
- [['readonly', 'copy']]*2,
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('f8'))
- assert_equal(i.dtypes[1], np.dtype('f8'))
- i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
- ['common_dtype'],
- [['readonly', 'copy']]*2,
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('f8'))
- assert_equal(i.dtypes[1], np.dtype('f8'))
- i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
- ['common_dtype'],
- [['readonly', 'copy']]*2,
- casting='same_kind')
- assert_equal(i.dtypes[0], np.dtype('f4'))
- assert_equal(i.dtypes[1], np.dtype('f4'))
- i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
- ['common_dtype'],
- [['readonly', 'copy']]*2,
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('u4'))
- assert_equal(i.dtypes[1], np.dtype('u4'))
- i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
- ['common_dtype'],
- [['readonly', 'copy']]*2,
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('i8'))
- assert_equal(i.dtypes[1], np.dtype('i8'))
- i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
- array([2j], dtype='c8'), array([9], dtype='f8')],
- ['common_dtype'],
- [['readonly', 'copy']]*4,
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('c16'))
- assert_equal(i.dtypes[1], np.dtype('c16'))
- assert_equal(i.dtypes[2], np.dtype('c16'))
- assert_equal(i.dtypes[3], np.dtype('c16'))
- assert_equal(i.value, (3, -12, 2j, 9))
- # When allocating outputs, other outputs aren't factored in
- i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
- [['readonly', 'copy'],
- ['writeonly', 'allocate'],
- ['writeonly']],
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('i4'))
- assert_equal(i.dtypes[1], np.dtype('i4'))
- assert_equal(i.dtypes[2], np.dtype('c16'))
- # But, if common data types are requested, they are
- i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
- ['common_dtype'],
- [['readonly', 'copy'],
- ['writeonly', 'allocate'],
- ['writeonly']],
- casting='safe')
- assert_equal(i.dtypes[0], np.dtype('c16'))
- assert_equal(i.dtypes[1], np.dtype('c16'))
- assert_equal(i.dtypes[2], np.dtype('c16'))
- def test_iter_copy_if_overlap():
- # Ensure the iterator makes copies on read/write overlap, if requested
- # Copy not needed, 1 op
- for flag in ['readonly', 'writeonly', 'readwrite']:
- a = arange(10)
- i = nditer([a], ['copy_if_overlap'], [[flag]])
- with i:
- assert_(i.operands[0] is a)
- # Copy needed, 2 ops, read-write overlap
- x = arange(10)
- a = x[1:]
- b = x[:-1]
- with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
- assert_(not np.shares_memory(*i.operands))
- # Copy not needed with elementwise, 2 ops, exactly same arrays
- x = arange(10)
- a = x
- b = x
- i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
- ['readwrite', 'overlap_assume_elementwise']])
- with i:
- assert_(i.operands[0] is a and i.operands[1] is b)
- with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
- assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
- # Copy not needed, 2 ops, no overlap
- x = arange(10)
- a = x[::2]
- b = x[1::2]
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
- assert_(i.operands[0] is a and i.operands[1] is b)
- # Copy needed, 2 ops, read-write overlap
- x = arange(4, dtype=np.int8)
- a = x[3:]
- b = x.view(np.int32)[:1]
- with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
- assert_(not np.shares_memory(*i.operands))
- # Copy needed, 3 ops, read-write overlap
- for flag in ['writeonly', 'readwrite']:
- x = np.ones([10, 10])
- a = x
- b = x.T
- c = x
- with nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['readonly'], [flag]]) as i:
- a2, b2, c2 = i.operands
- assert_(not np.shares_memory(a2, c2))
- assert_(not np.shares_memory(b2, c2))
- # Copy not needed, 3 ops, read-only overlap
- x = np.ones([10, 10])
- a = x
- b = x.T
- c = x
- i = nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['readonly'], ['readonly']])
- a2, b2, c2 = i.operands
- assert_(a is a2)
- assert_(b is b2)
- assert_(c is c2)
- # Copy not needed, 3 ops, read-only overlap
- x = np.ones([10, 10])
- a = x
- b = np.ones([10, 10])
- c = x.T
- i = nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['writeonly'], ['readonly']])
- a2, b2, c2 = i.operands
- assert_(a is a2)
- assert_(b is b2)
- assert_(c is c2)
- # Copy not needed, 3 ops, write-only overlap
- x = np.arange(7)
- a = x[:3]
- b = x[3:6]
- c = x[4:7]
- i = nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['writeonly'], ['writeonly']])
- a2, b2, c2 = i.operands
- assert_(a is a2)
- assert_(b is b2)
- assert_(c is c2)
- def test_iter_op_axes():
- # Check that custom axes work
- # Reverse the axes
- a = arange(6).reshape(2, 3)
- i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
- assert_(all([x == y for (x, y) in i]))
- a = arange(24).reshape(2, 3, 4)
- i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
- assert_(all([x == y for (x, y) in i]))
- # Broadcast 1D to any dimension
- a = arange(1, 31).reshape(2, 3, 5)
- b = arange(1, 3)
- i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
- assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
- b = arange(1, 4)
- i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
- assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
- b = arange(1, 6)
- i = nditer([a, b], [], [['readonly']]*2,
- op_axes=[None, [np.newaxis, np.newaxis, 0]])
- assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
- # Inner product-style broadcasting
- a = arange(24).reshape(2, 3, 4)
- b = arange(40).reshape(5, 2, 4)
- i = nditer([a, b], ['multi_index'], [['readonly']]*2,
- op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
- assert_equal(i.shape, (2, 3, 5, 2))
- # Matrix product-style broadcasting
- a = arange(12).reshape(3, 4)
- b = arange(20).reshape(4, 5)
- i = nditer([a, b], ['multi_index'], [['readonly']]*2,
- op_axes=[[0, -1], [-1, 1]])
- assert_equal(i.shape, (3, 5))
- def test_iter_op_axes_errors():
- # Check that custom axes throws errors for bad inputs
- # Wrong number of items in op_axes
- a = arange(6).reshape(2, 3)
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0], [1], [0]])
- # Out of bounds items in op_axes
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[2, 1], [0, 1]])
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0, 1], [2, -1]])
- # Duplicate items in op_axes
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0, 0], [0, 1]])
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0, 1], [1, 1]])
- # Different sized arrays in op_axes
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0, 1], [0, 1, 0]])
- # Non-broadcastable dimensions in the result
- assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
- op_axes=[[0, 1], [1, 0]])
- def test_iter_copy():
- # Check that copying the iterator works correctly
- a = arange(24).reshape(2, 3, 4)
- # Simple iterator
- i = nditer(a)
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- i.iterindex = 3
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- # Buffered iterator
- i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- i.iterindex = 3
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- i.iterrange = (3, 9)
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- i.iterrange = (2, 18)
- next(i)
- next(i)
- j = i.copy()
- assert_equal([x[()] for x in i], [x[()] for x in j])
- # Casting iterator
- with nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='f8', buffersize=5) as i:
- j = i.copy()
- assert_equal([x[()] for x in j], a.ravel(order='F'))
- a = arange(24, dtype='<i4').reshape(2, 3, 4)
- with nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='>f8', buffersize=5) as i:
- j = i.copy()
- assert_equal([x[()] for x in j], a.ravel(order='F'))
- @pytest.mark.parametrize("dtype", np.typecodes["All"])
- @pytest.mark.parametrize("loop_dtype", np.typecodes["All"])
- @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
- def test_iter_copy_casts(dtype, loop_dtype):
- # Ensure the dtype is never flexible:
- if loop_dtype.lower() == "m":
- loop_dtype = loop_dtype + "[ms]"
- elif np.dtype(loop_dtype).itemsize == 0:
- loop_dtype = loop_dtype + "50"
- # Make things a bit more interesting by requiring a byte-swap as well:
- arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder())
- try:
- expected = arr.astype(loop_dtype)
- except Exception:
- # Some casts are not possible, do not worry about them
- return
- it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
- op_dtypes=[loop_dtype], casting="unsafe")
- if np.issubdtype(np.dtype(loop_dtype), np.number):
- # Casting to strings may be strange, but for simple dtypes do not rely
- # on the cast being correct:
- assert_array_equal(expected, np.ones(1000, dtype=loop_dtype))
- it_copy = it.copy()
- res = next(it)
- del it
- res_copy = next(it_copy)
- del it_copy
- assert_array_equal(res, expected)
- assert_array_equal(res_copy, expected)
- def test_iter_copy_casts_structured():
- # Test a complicated structured dtype for casting, as it requires
- # both multiple steps and a more complex casting setup.
- # Includes a structured -> unstructured (any to object), and many other
- # casts, which cause this to require all steps in the casting machinery
- # one level down as well as the iterator copy (which uses NpyAuxData clone)
- in_dtype = np.dtype([("a", np.dtype("i,")),
- ("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))])
- out_dtype = np.dtype([("a", np.dtype("O")),
- ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
- arr = np.ones(1000, dtype=in_dtype)
- it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
- op_dtypes=[out_dtype], casting="unsafe")
- it_copy = it.copy()
- res1 = next(it)
- del it
- res2 = next(it_copy)
- del it_copy
- expected = arr["a"].astype(out_dtype["a"])
- assert_array_equal(res1["a"], expected)
- assert_array_equal(res2["a"], expected)
- for field in in_dtype["b"].names:
- # Note that the .base avoids the subarray field
- expected = arr["b"][field].astype(out_dtype["b"][field].base)
- assert_array_equal(res1["b"][field], expected)
- assert_array_equal(res2["b"][field], expected)
- def test_iter_allocate_output_simple():
- # Check that the iterator will properly allocate outputs
- # Simple case
- a = arange(6)
- i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')])
- assert_equal(i.operands[1].shape, a.shape)
- assert_equal(i.operands[1].dtype, np.dtype('f4'))
- def test_iter_allocate_output_buffered_readwrite():
- # Allocated output with buffering + delay_bufalloc
- a = arange(6)
- i = nditer([a, None], ['buffered', 'delay_bufalloc'],
- [['readonly'], ['allocate', 'readwrite']])
- with i:
- i.operands[1][:] = 1
- i.reset()
- for x in i:
- x[1][...] += x[0][...]
- assert_equal(i.operands[1], a+1)
- def test_iter_allocate_output_itorder():
- # The allocated output should match the iteration order
- # C-order input, best iteration order
- a = arange(6, dtype='i4').reshape(2, 3)
- i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')])
- assert_equal(i.operands[1].shape, a.shape)
- assert_equal(i.operands[1].strides, a.strides)
- assert_equal(i.operands[1].dtype, np.dtype('f4'))
- # F-order input, best iteration order
- a = arange(24, dtype='i4').reshape(2, 3, 4).T
- i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')])
- assert_equal(i.operands[1].shape, a.shape)
- assert_equal(i.operands[1].strides, a.strides)
- assert_equal(i.operands[1].dtype, np.dtype('f4'))
- # Non-contiguous input, C iteration order
- a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
- i = nditer([a, None], [],
- [['readonly'], ['writeonly', 'allocate']],
- order='C',
- op_dtypes=[None, np.dtype('f4')])
- assert_equal(i.operands[1].shape, a.shape)
- assert_equal(i.operands[1].strides, (32, 16, 4))
- assert_equal(i.operands[1].dtype, np.dtype('f4'))
- def test_iter_allocate_output_opaxes():
- # Specifying op_axes should work
- a = arange(24, dtype='i4').reshape(2, 3, 4)
- i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
- op_dtypes=[np.dtype('u4'), None],
- op_axes=[[1, 2, 0], None])
- assert_equal(i.operands[0].shape, (4, 2, 3))
- assert_equal(i.operands[0].strides, (4, 48, 16))
- assert_equal(i.operands[0].dtype, np.dtype('u4'))
- def test_iter_allocate_output_types_promotion():
- # Check type promotion of automatic outputs
- i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
- [['readonly']]*2+[['writeonly', 'allocate']])
- assert_equal(i.dtypes[2], np.dtype('f8'))
- i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
- [['readonly']]*2+[['writeonly', 'allocate']])
- assert_equal(i.dtypes[2], np.dtype('f8'))
- i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
- [['readonly']]*2+[['writeonly', 'allocate']])
- assert_equal(i.dtypes[2], np.dtype('f4'))
- i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
- [['readonly']]*2+[['writeonly', 'allocate']])
- assert_equal(i.dtypes[2], np.dtype('u4'))
- i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
- [['readonly']]*2+[['writeonly', 'allocate']])
- assert_equal(i.dtypes[2], np.dtype('i8'))
- def test_iter_allocate_output_types_byte_order():
- # Verify the rules for byte order changes
- # When there's just one input, the output type exactly matches
- a = array([3], dtype='u4').newbyteorder()
- i = nditer([a, None], [],
- [['readonly'], ['writeonly', 'allocate']])
- assert_equal(i.dtypes[0], i.dtypes[1])
- # With two or more inputs, the output type is in native byte order
- i = nditer([a, a, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- assert_(i.dtypes[0] != i.dtypes[2])
- assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
- def test_iter_allocate_output_types_scalar():
- # If the inputs are all scalars, the output should be a scalar
- i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
- [['writeonly', 'allocate']] + [['readonly']]*4)
- assert_equal(i.operands[0].dtype, np.dtype('complex128'))
- assert_equal(i.operands[0].ndim, 0)
- def test_iter_allocate_output_subtype():
- # Make sure that the subtype with priority wins
- class MyNDArray(np.ndarray):
- __array_priority__ = 15
- # subclass vs ndarray
- a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
- b = np.arange(4).reshape(2, 2).T
- i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- assert_equal(type(a), type(i.operands[2]))
- assert_(type(b) is not type(i.operands[2]))
- assert_equal(i.operands[2].shape, (2, 2))
- # If subtypes are disabled, we should get back an ndarray.
- i = nditer([a, b, None], [],
- [['readonly'], ['readonly'],
- ['writeonly', 'allocate', 'no_subtype']])
- assert_equal(type(b), type(i.operands[2]))
- assert_(type(a) is not type(i.operands[2]))
- assert_equal(i.operands[2].shape, (2, 2))
- def test_iter_allocate_output_errors():
- # Check that the iterator will throw errors for bad output allocations
- # Need an input if no output data type is specified
- a = arange(6)
- assert_raises(TypeError, nditer, [a, None], [],
- [['writeonly'], ['writeonly', 'allocate']])
- # Allocated output should be flagged for writing
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['allocate', 'readonly']])
- # Allocated output can't have buffering without delayed bufalloc
- assert_raises(ValueError, nditer, [a, None], ['buffered'],
- ['allocate', 'readwrite'])
- # Must specify dtype if there are no inputs (cannot promote existing ones;
- # maybe this should use the 'f4' here, but it does not historically.)
- assert_raises(TypeError, nditer, [None, None], [],
- [['writeonly', 'allocate'],
- ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')])
- # If using op_axes, must specify all the axes
- a = arange(24, dtype='i4').reshape(2, 3, 4)
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')],
- op_axes=[None, [0, np.newaxis, 1]])
- # If using op_axes, the axes must be within bounds
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')],
- op_axes=[None, [0, 3, 1]])
- # If using op_axes, there can't be duplicates
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['writeonly', 'allocate']],
- op_dtypes=[None, np.dtype('f4')],
- op_axes=[None, [0, 2, 1, 0]])
- # Not all axes may be specified if a reduction. If there is a hole
- # in op_axes, this is an error.
- a = arange(24, dtype='i4').reshape(2, 3, 4)
- assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
- [['readonly'], ['readwrite', 'allocate']],
- op_dtypes=[None, np.dtype('f4')],
- op_axes=[None, [0, np.newaxis, 2]])
- def test_all_allocated():
- # When no output and no shape is given, `()` is used as shape.
- i = np.nditer([None], op_dtypes=["int64"])
- assert i.operands[0].shape == ()
- assert i.dtypes == (np.dtype("int64"),)
- i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4))
- assert i.operands[0].shape == (2, 3, 4)
- def test_iter_remove_axis():
- a = arange(24).reshape(2, 3, 4)
- i = nditer(a, ['multi_index'])
- i.remove_axis(1)
- assert_equal([x for x in i], a[:, 0,:].ravel())
- a = a[::-1,:,:]
- i = nditer(a, ['multi_index'])
- i.remove_axis(0)
- assert_equal([x for x in i], a[0,:,:].ravel())
- def test_iter_remove_multi_index_inner_loop():
- # Check that removing multi-index support works
- a = arange(24).reshape(2, 3, 4)
- i = nditer(a, ['multi_index'])
- assert_equal(i.ndim, 3)
- assert_equal(i.shape, (2, 3, 4))
- assert_equal(i.itviews[0].shape, (2, 3, 4))
- # Removing the multi-index tracking causes all dimensions to coalesce
- before = [x for x in i]
- i.remove_multi_index()
- after = [x for x in i]
- assert_equal(before, after)
- assert_equal(i.ndim, 1)
- assert_raises(ValueError, lambda i:i.shape, i)
- assert_equal(i.itviews[0].shape, (24,))
- # Removing the inner loop means there's just one iteration
- i.reset()
- assert_equal(i.itersize, 24)
- assert_equal(i[0].shape, tuple())
- i.enable_external_loop()
- assert_equal(i.itersize, 24)
- assert_equal(i[0].shape, (24,))
- assert_equal(i.value, arange(24))
- def test_iter_iterindex():
- # Make sure iterindex works
- buffersize = 5
- a = arange(24).reshape(4, 3, 2)
- for flags in ([], ['buffered']):
- i = nditer(a, flags, buffersize=buffersize)
- assert_equal(iter_iterindices(i), list(range(24)))
- i.iterindex = 2
- assert_equal(iter_iterindices(i), list(range(2, 24)))
- i = nditer(a, flags, order='F', buffersize=buffersize)
- assert_equal(iter_iterindices(i), list(range(24)))
- i.iterindex = 5
- assert_equal(iter_iterindices(i), list(range(5, 24)))
- i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
- assert_equal(iter_iterindices(i), list(range(24)))
- i.iterindex = 9
- assert_equal(iter_iterindices(i), list(range(9, 24)))
- i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
- assert_equal(iter_iterindices(i), list(range(24)))
- i.iterindex = 13
- assert_equal(iter_iterindices(i), list(range(13, 24)))
- i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
- assert_equal(iter_iterindices(i), list(range(24)))
- i.iterindex = 23
- assert_equal(iter_iterindices(i), list(range(23, 24)))
- i.reset()
- i.iterindex = 2
- assert_equal(iter_iterindices(i), list(range(2, 24)))
- def test_iter_iterrange():
- # Make sure getting and resetting the iterrange works
- buffersize = 5
- a = arange(24, dtype='i4').reshape(4, 3, 2)
- a_fort = a.ravel(order='F')
- i = nditer(a, ['ranged'], ['readonly'], order='F',
- buffersize=buffersize)
- assert_equal(i.iterrange, (0, 24))
- assert_equal([x[()] for x in i], a_fort)
- for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
- i.iterrange = r
- assert_equal(i.iterrange, r)
- assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
- i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
- op_dtypes='f8', buffersize=buffersize)
- assert_equal(i.iterrange, (0, 24))
- assert_equal([x[()] for x in i], a_fort)
- for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
- i.iterrange = r
- assert_equal(i.iterrange, r)
- assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
- def get_array(i):
- val = np.array([], dtype='f8')
- for x in i:
- val = np.concatenate((val, x))
- return val
- i = nditer(a, ['ranged', 'buffered', 'external_loop'],
- ['readonly'], order='F',
- op_dtypes='f8', buffersize=buffersize)
- assert_equal(i.iterrange, (0, 24))
- assert_equal(get_array(i), a_fort)
- for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
- i.iterrange = r
- assert_equal(i.iterrange, r)
- assert_equal(get_array(i), a_fort[r[0]:r[1]])
- def test_iter_buffering():
- # Test buffering with several buffer sizes and types
- arrays = []
- # F-order swapped array
- arrays.append(np.arange(24,
- dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
- # Contiguous 1-dimensional array
- arrays.append(np.arange(10, dtype='f4'))
- # Unaligned array
- a = np.zeros((4*16+1,), dtype='i1')[1:]
- a.dtype = 'i4'
- a[:] = np.arange(16, dtype='i4')
- arrays.append(a)
- # 4-D F-order array
- arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
- for a in arrays:
- for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
- vals = []
- i = nditer(a, ['buffered', 'external_loop'],
- [['readonly', 'nbo', 'aligned']],
- order='C',
- casting='equiv',
- buffersize=buffersize)
- while not i.finished:
- assert_(i[0].size <= buffersize)
- vals.append(i[0].copy())
- i.iternext()
- assert_equal(np.concatenate(vals), a.ravel(order='C'))
- def test_iter_write_buffering():
- # Test that buffering of writes is working
- # F-order swapped array
- a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
- i = nditer(a, ['buffered'],
- [['readwrite', 'nbo', 'aligned']],
- casting='equiv',
- order='C',
- buffersize=16)
- x = 0
- with i:
- while not i.finished:
- i[0] = x
- x += 1
- i.iternext()
- assert_equal(a.ravel(order='C'), np.arange(24))
- def test_iter_buffering_delayed_alloc():
- # Test that delaying buffer allocation works
- a = np.arange(6)
- b = np.arange(1, dtype='f4')
- i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
- ['readwrite'],
- casting='unsafe',
- op_dtypes='f4')
- assert_(i.has_delayed_bufalloc)
- assert_raises(ValueError, lambda i:i.multi_index, i)
- assert_raises(ValueError, lambda i:i[0], i)
- assert_raises(ValueError, lambda i:i[0:2], i)
- def assign_iter(i):
- i[0] = 0
- assert_raises(ValueError, assign_iter, i)
- i.reset()
- assert_(not i.has_delayed_bufalloc)
- assert_equal(i.multi_index, (0,))
- with i:
- assert_equal(i[0], 0)
- i[1] = 1
- assert_equal(i[0:2], [0, 1])
- assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
- def test_iter_buffered_cast_simple():
- # Test that buffering can handle a simple cast
- a = np.arange(10, dtype='f4')
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('f8')],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype='f4'))
- def test_iter_buffered_cast_byteswapped():
- # Test that buffering can handle a cast which requires swap->cast->swap
- a = np.arange(10, dtype='f4').newbyteorder().byteswap()
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('f8').newbyteorder()],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype='f4'))
- with suppress_warnings() as sup:
- sup.filter(np.ComplexWarning)
- a = np.arange(10, dtype='f8').newbyteorder().byteswap()
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='unsafe',
- op_dtypes=[np.dtype('c8').newbyteorder()],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype='f8'))
- def test_iter_buffered_cast_byteswapped_complex():
- # Test that buffering can handle a cast which requires swap->cast->copy
- a = np.arange(10, dtype='c8').newbyteorder().byteswap()
- a += 2j
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('c16')],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
- a = np.arange(10, dtype='c8')
- a += 2j
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('c16').newbyteorder()],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
- a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
- a += 2j
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('c16')],
- buffersize=3)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
- a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
- i = nditer(a, ['buffered', 'external_loop'],
- [['readwrite', 'nbo', 'aligned']],
- casting='same_kind',
- op_dtypes=[np.dtype('f4')],
- buffersize=7)
- with i:
- for v in i:
- v[...] *= 2
- assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
- def test_iter_buffered_cast_structured_type():
- # Tests buffering of structured types
- # simple -> struct type (duplicates the value)
- sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
- a = np.arange(3, dtype='f4') + 0.5
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt)
- vals = [np.array(x) for x in i]
- assert_equal(vals[0]['a'], 0.5)
- assert_equal(vals[0]['b'], 0)
- assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
- assert_equal(vals[0]['d'], 0.5)
- assert_equal(vals[1]['a'], 1.5)
- assert_equal(vals[1]['b'], 1)
- assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
- assert_equal(vals[1]['d'], 1.5)
- assert_equal(vals[0].dtype, np.dtype(sdt))
- # object -> struct type
- sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
- a = np.zeros((3,), dtype='O')
- a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
- a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
- a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
- if HAS_REFCOUNT:
- rc = sys.getrefcount(a[0])
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt)
- vals = [x.copy() for x in i]
- assert_equal(vals[0]['a'], 0.5)
- assert_equal(vals[0]['b'], 0)
- assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
- assert_equal(vals[0]['d'], 0.5)
- assert_equal(vals[1]['a'], 1.5)
- assert_equal(vals[1]['b'], 1)
- assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
- assert_equal(vals[1]['d'], 1.5)
- assert_equal(vals[0].dtype, np.dtype(sdt))
- vals, i, x = [None]*3
- if HAS_REFCOUNT:
- assert_equal(sys.getrefcount(a[0]), rc)
- # single-field struct type -> simple
- sdt = [('a', 'f4')]
- a = np.array([(5.5,), (8,)], dtype=sdt)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes='i4')
- assert_equal([x_[()] for x_ in i], [5, 8])
- # make sure multi-field struct type -> simple doesn't work
- sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
- assert_raises(TypeError, lambda: (
- nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes='i4')))
- # struct type -> struct type (field-wise copy)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
- a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- assert_equal([np.array(x_) for x_ in i],
- [np.array((1, 2, 3), dtype=sdt2),
- np.array((4, 5, 6), dtype=sdt2)])
- def test_iter_buffered_cast_structured_type_failure_with_cleanup():
- # make sure struct type -> struct type with different
- # number of fields fails
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- for intent in ["readwrite", "readonly", "writeonly"]:
- # This test was initially designed to test an error at a different
- # place, but will now raise earlier to to the cast not being possible:
- # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails.
- # Without a faulty DType, there is probably no reliable
- # way to get the initial tested behaviour.
- simple_arr = np.array([1, 2], dtype="i,i") # requires clean up
- with pytest.raises(TypeError):
- nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
- casting='unsafe', op_dtypes=["f,f", sdt2])
- def test_buffered_cast_error_paths():
- with pytest.raises(ValueError):
- # The input is cast into an `S3` buffer
- np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
- casting="unsafe", flags=["buffered"])
- # The `M8[ns]` is cast into the `S3` output
- it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
- op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
- with pytest.raises(ValueError):
- with it:
- buf = next(it)
- buf[...] = "a" # cannot be converted to int.
- @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
- def test_buffered_cast_error_paths_unraisable():
- # The following gives an unraisable error. Pytest sometimes captures that
- # (depending python and/or pytest version). So with Python>=3.8 this can
- # probably be cleaned out in the future to check for
- # pytest.PytestUnraisableExceptionWarning:
- code = textwrap.dedent("""
- import numpy as np
-
- it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
- op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
- buf = next(it)
- buf[...] = "a"
- del buf, it # Flushing only happens during deallocate right now.
- """)
- res = subprocess.check_output([sys.executable, "-c", code],
- stderr=subprocess.STDOUT, text=True)
- assert "ValueError" in res
- def test_iter_buffered_cast_subarray():
- # Tests buffering of subarrays
- # one element -> many (copies it to all)
- sdt1 = [('a', 'f4')]
- sdt2 = [('a', 'f8', (3, 2, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- for x, count in zip(i, list(range(6))):
- assert_(np.all(x['a'] == count))
- # one element -> many -> back (copies it to all)
- sdt1 = [('a', 'O', (1, 1))]
- sdt2 = [('a', 'O', (3, 2, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'][:, 0, 0] = np.arange(6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- with i:
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_(np.all(x['a'] == count))
- x['a'][0] += 2
- count += 1
- assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
- # many -> one element -> back (copies just element 0)
- sdt1 = [('a', 'O', (3, 2, 2))]
- sdt2 = [('a', 'O', (1,))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'][:, 0, 0, 0] = np.arange(6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- with i:
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- x['a'] += 2
- count += 1
- assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
- # many -> one element -> back (copies just element 0)
- sdt1 = [('a', 'f8', (3, 2, 2))]
- sdt2 = [('a', 'O', (1,))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'][:, 0, 0, 0] = np.arange(6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- count += 1
- # many -> one element (copies just element 0)
- sdt1 = [('a', 'O', (3, 2, 2))]
- sdt2 = [('a', 'f4', (1,))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'][:, 0, 0, 0] = np.arange(6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- count += 1
- # many -> matching shape (straightforward copy)
- sdt1 = [('a', 'O', (3, 2, 2))]
- sdt2 = [('a', 'f4', (3, 2, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], a[count]['a'])
- count += 1
- # vector -> smaller vector (truncates)
- sdt1 = [('a', 'f8', (6,))]
- sdt2 = [('a', 'f4', (2,))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*6).reshape(6, 6)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], a[count]['a'][:2])
- count += 1
- # vector -> bigger vector (pads with zeros)
- sdt1 = [('a', 'f8', (2,))]
- sdt2 = [('a', 'f4', (6,))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*2).reshape(6, 2)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'][:2], a[count]['a'])
- assert_equal(x['a'][2:], [0, 0, 0, 0])
- count += 1
- # vector -> matrix (broadcasts)
- sdt1 = [('a', 'f8', (2,))]
- sdt2 = [('a', 'f4', (2, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*2).reshape(6, 2)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'][0], a[count]['a'])
- assert_equal(x['a'][1], a[count]['a'])
- count += 1
- # vector -> matrix (broadcasts and zero-pads)
- sdt1 = [('a', 'f8', (2, 1))]
- sdt2 = [('a', 'f4', (3, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*2).reshape(6, 2, 1)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
- assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
- assert_equal(x['a'][2,:], [0, 0])
- count += 1
- # matrix -> matrix (truncates and zero-pads)
- sdt1 = [('a', 'f8', (2, 3))]
- sdt2 = [('a', 'f4', (3, 2))]
- a = np.zeros((6,), dtype=sdt1)
- a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
- assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
- assert_equal(x['a'][2,:], [0, 0])
- count += 1
- def test_iter_buffering_badwriteback():
- # Writing back from a buffer cannot combine elements
- # a needs write buffering, but had a broadcast dimension
- a = np.arange(6).reshape(2, 3, 1)
- b = np.arange(12).reshape(2, 3, 2)
- assert_raises(ValueError, nditer, [a, b],
- ['buffered', 'external_loop'],
- [['readwrite'], ['writeonly']],
- order='C')
- # But if a is readonly, it's fine
- nditer([a, b], ['buffered', 'external_loop'],
- [['readonly'], ['writeonly']],
- order='C')
- # If a has just one element, it's fine too (constant 0 stride, a reduction)
- a = np.arange(1).reshape(1, 1, 1)
- nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
- [['readwrite'], ['writeonly']],
- order='C')
- # check that it fails on other dimensions too
- a = np.arange(6).reshape(1, 3, 2)
- assert_raises(ValueError, nditer, [a, b],
- ['buffered', 'external_loop'],
- [['readwrite'], ['writeonly']],
- order='C')
- a = np.arange(4).reshape(2, 1, 2)
- assert_raises(ValueError, nditer, [a, b],
- ['buffered', 'external_loop'],
- [['readwrite'], ['writeonly']],
- order='C')
- def test_iter_buffering_string():
- # Safe casting disallows shrinking strings
- a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
- assert_equal(a.dtype, np.dtype('S4'))
- assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
- op_dtypes='S2')
- i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
- assert_equal(i[0], b'abc')
- assert_equal(i[0].dtype, np.dtype('S6'))
- a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
- assert_equal(a.dtype, np.dtype('U4'))
- assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
- op_dtypes='U2')
- i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
- assert_equal(i[0], 'abc')
- assert_equal(i[0].dtype, np.dtype('U6'))
- def test_iter_buffering_growinner():
- # Test that the inner loop grows when no buffering is needed
- a = np.arange(30)
- i = nditer(a, ['buffered', 'growinner', 'external_loop'],
- buffersize=5)
- # Should end up with just one inner loop here
- assert_equal(i[0].size, a.size)
- @pytest.mark.slow
- def test_iter_buffered_reduce_reuse():
- # large enough array for all views, including negative strides.
- a = np.arange(2*3**5)[3**5:3**5+1]
- flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
- op_flags = [('readonly',), ('readwrite', 'allocate')]
- op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
- # wrong dtype to force buffering
- op_dtypes = [float, a.dtype]
- def get_params():
- for xs in range(-3**2, 3**2 + 1):
- for ys in range(xs, 3**2 + 1):
- for op_axes in op_axes_list:
- # last stride is reduced and because of that not
- # important for this test, as it is the inner stride.
- strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
- arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
- for skip in [0, 1]:
- yield arr, op_axes, skip
- for arr, op_axes, skip in get_params():
- nditer2 = np.nditer([arr.copy(), None],
- op_axes=op_axes, flags=flags, op_flags=op_flags,
- op_dtypes=op_dtypes)
- with nditer2:
- nditer2.operands[-1][...] = 0
- nditer2.reset()
- nditer2.iterindex = skip
- for (a2_in, b2_in) in nditer2:
- b2_in += a2_in.astype(np.int_)
- comp_res = nditer2.operands[-1]
- for bufsize in range(0, 3**3):
- nditer1 = np.nditer([arr, None],
- op_axes=op_axes, flags=flags, op_flags=op_flags,
- buffersize=bufsize, op_dtypes=op_dtypes)
- with nditer1:
- nditer1.operands[-1][...] = 0
- nditer1.reset()
- nditer1.iterindex = skip
- for (a1_in, b1_in) in nditer1:
- b1_in += a1_in.astype(np.int_)
- res = nditer1.operands[-1]
- assert_array_equal(res, comp_res)
- def test_iter_no_broadcast():
- # Test that the no_broadcast flag works
- a = np.arange(24).reshape(2, 3, 4)
- b = np.arange(6).reshape(2, 3, 1)
- c = np.arange(12).reshape(3, 4)
- nditer([a, b, c], [],
- [['readonly', 'no_broadcast'],
- ['readonly'], ['readonly']])
- assert_raises(ValueError, nditer, [a, b, c], [],
- [['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
- assert_raises(ValueError, nditer, [a, b, c], [],
- [['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
- class TestIterNested:
- def test_basic(self):
- # Test nested iteration basic usage
- a = arange(12).reshape(2, 3, 2)
- i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
- i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
- i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
- def test_reorder(self):
- # Test nested iteration basic usage
- a = arange(12).reshape(2, 3, 2)
- # In 'K' order (default), it gets reordered
- i, j = np.nested_iters(a, [[0], [2, 1]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
- i, j = np.nested_iters(a, [[1, 0], [2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
- i, j = np.nested_iters(a, [[2, 0], [1]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
- # In 'C' order, it doesn't
- i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
- i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
- i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
- def test_flip_axes(self):
- # Test nested iteration with negative axes
- a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
- # In 'K' order (default), the axes all get flipped
- i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
- i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
- i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
- # In 'C' order, flipping axes is disabled
- i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
- i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
- i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
- vals = [list(j) for _ in i]
- assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
- def test_broadcast(self):
- # Test nested iteration with broadcasting
- a = arange(2).reshape(2, 1)
- b = arange(3).reshape(1, 3)
- i, j = np.nested_iters([a, b], [[0], [1]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
- i, j = np.nested_iters([a, b], [[1], [0]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
- def test_dtype_copy(self):
- # Test nested iteration with a copy to change dtype
- # copy
- a = arange(6, dtype='i4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- op_flags=['readonly', 'copy'],
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
- vals = None
- # writebackifcopy - using context manager
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- op_flags=['readwrite', 'updateifcopy'],
- casting='same_kind',
- op_dtypes='f8')
- with i, j:
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[0, 1, 2], [3, 4, 5]])
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
- # writebackifcopy - using close()
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- op_flags=['readwrite', 'updateifcopy'],
- casting='same_kind',
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[0, 1, 2], [3, 4, 5]])
- i.close()
- j.close()
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
- def test_dtype_buffered(self):
- # Test nested iteration with buffering to change dtype
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- flags=['buffered'],
- op_flags=['readwrite'],
- casting='same_kind',
- op_dtypes='f8')
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
- def test_0d(self):
- a = np.arange(12).reshape(2, 3, 2)
- i, j = np.nested_iters(a, [[], [1, 0, 2]])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
- i, j = np.nested_iters(a, [[1, 0, 2], []])
- vals = [list(j) for _ in i]
- assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
- i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
- vals = []
- for x in i:
- for y in j:
- vals.append([z for z in k])
- assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
- def test_iter_nested_iters_dtype_buffered(self):
- # Test nested iteration with buffering to change dtype
- a = arange(6, dtype='f4').reshape(2, 3)
- i, j = np.nested_iters(a, [[0], [1]],
- flags=['buffered'],
- op_flags=['readwrite'],
- casting='same_kind',
- op_dtypes='f8')
- with i, j:
- assert_equal(j[0].dtype, np.dtype('f8'))
- for x in i:
- for y in j:
- y[...] += 1
- assert_equal(a, [[1, 2, 3], [4, 5, 6]])
- def test_iter_reduction_error():
- a = np.arange(6)
- assert_raises(ValueError, nditer, [a, None], [],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[[0], [-1]])
- a = np.arange(6).reshape(2, 3)
- assert_raises(ValueError, nditer, [a, None], ['external_loop'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[[0, 1], [-1, -1]])
- def test_iter_reduction():
- # Test doing reductions with the iterator
- a = np.arange(6)
- i = nditer([a, None], ['reduce_ok'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[[0], [-1]])
- # Need to initialize the output operand to the addition unit
- with i:
- i.operands[1][...] = 0
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
- a = np.arange(6).reshape(2, 3)
- i = nditer([a, None], ['reduce_ok', 'external_loop'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[[0, 1], [-1, -1]])
- # Need to initialize the output operand to the addition unit
- with i:
- i.operands[1][...] = 0
- # Reduction shape/strides for the output
- assert_equal(i[1].shape, (6,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
- # This is a tricky reduction case for the buffering double loop
- # to handle
- a = np.ones((2, 3, 5))
- it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[None, [0, -1, 1]])
- it2 = nditer([a, None], ['reduce_ok', 'external_loop',
- 'buffered', 'delay_bufalloc'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[None, [0, -1, 1]], buffersize=10)
- with it1, it2:
- it1.operands[1].fill(0)
- it2.operands[1].fill(0)
- it2.reset()
- for x in it1:
- x[1][...] += x[0]
- for x in it2:
- x[1][...] += x[0]
- assert_equal(it1.operands[1], it2.operands[1])
- assert_equal(it2.operands[1].sum(), a.size)
- def test_iter_buffering_reduction():
- # Test doing buffered reductions with the iterator
- a = np.arange(6)
- b = np.array(0., dtype='f8').byteswap().newbyteorder()
- i = nditer([a, b], ['reduce_ok', 'buffered'],
- [['readonly'], ['readwrite', 'nbo']],
- op_axes=[[0], [-1]])
- with i:
- assert_equal(i[1].dtype, np.dtype('f8'))
- assert_(i[1].dtype != b.dtype)
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(b, np.sum(a))
- a = np.arange(6).reshape(2, 3)
- b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
- i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
- [['readonly'], ['readwrite', 'nbo']],
- op_axes=[[0, 1], [0, -1]])
- # Reduction shape/strides for the output
- with i:
- assert_equal(i[1].shape, (3,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
- assert_equal(b, np.sum(a, axis=1))
- # Iterator inner double loop was wrong on this one
- p = np.arange(2) + 1
- it = np.nditer([p, None],
- ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
- [['readonly'], ['readwrite', 'allocate']],
- op_axes=[[-1, 0], [-1, -1]],
- itershape=(2, 2))
- with it:
- it.operands[1].fill(0)
- it.reset()
- assert_equal(it[0], [1, 2, 1, 2])
- # Iterator inner loop should take argument contiguity into account
- x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
- x[...] = np.arange(x.size).reshape(x.shape)
- y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
- y_base_copy = y_base.copy()
- y = y_base[::2,:,None]
- it = np.nditer([y, x],
- ['buffered', 'external_loop', 'reduce_ok'],
- [['readwrite'], ['readonly']])
- with it:
- for a, b in it:
- a.fill(2)
- assert_equal(y_base[1::2], y_base_copy[1::2])
- assert_equal(y_base[::2], 2)
- def test_iter_buffering_reduction_reuse_reduce_loops():
- # There was a bug triggering reuse of the reduce loop inappropriately,
- # which caused processing to happen in unnecessarily small chunks
- # and overran the buffer.
- a = np.zeros((2, 7))
- b = np.zeros((1, 7))
- it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
- op_flags=[['readonly'], ['readwrite']],
- buffersize=5)
- with it:
- bufsizes = [x.shape[0] for x, y in it]
- assert_equal(bufsizes, [5, 2, 5, 2])
- assert_equal(sum(bufsizes), a.size)
- def test_iter_writemasked_badinput():
- a = np.zeros((2, 3))
- b = np.zeros((3,))
- m = np.array([[True, True, False], [False, True, False]])
- m2 = np.array([True, True, False])
- m3 = np.array([0, 1, 1], dtype='u1')
- mbad1 = np.array([0, 1, 1], dtype='i1')
- mbad2 = np.array([0, 1, 1], dtype='f4')
- # Need an 'arraymask' if any operand is 'writemasked'
- assert_raises(ValueError, nditer, [a, m], [],
- [['readwrite', 'writemasked'], ['readonly']])
- # A 'writemasked' operand must not be readonly
- assert_raises(ValueError, nditer, [a, m], [],
- [['readonly', 'writemasked'], ['readonly', 'arraymask']])
- # 'writemasked' and 'arraymask' may not be used together
- assert_raises(ValueError, nditer, [a, m], [],
- [['readonly'], ['readwrite', 'arraymask', 'writemasked']])
- # 'arraymask' may only be specified once
- assert_raises(ValueError, nditer, [a, m, m2], [],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask'],
- ['readonly', 'arraymask']])
- # An 'arraymask' with nothing 'writemasked' also doesn't make sense
- assert_raises(ValueError, nditer, [a, m], [],
- [['readwrite'], ['readonly', 'arraymask']])
- # A writemasked reduction requires a similarly smaller mask
- assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
- [['readonly'],
- ['readwrite', 'writemasked'],
- ['readonly', 'arraymask']])
- # But this should work with a smaller/equal mask to the reduction operand
- np.nditer([a, b, m2], ['reduce_ok'],
- [['readonly'],
- ['readwrite', 'writemasked'],
- ['readonly', 'arraymask']])
- # The arraymask itself cannot be a reduction
- assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
- [['readonly'],
- ['readwrite', 'writemasked'],
- ['readwrite', 'arraymask']])
- # A uint8 mask is ok too
- np.nditer([a, m3], ['buffered'],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']],
- op_dtypes=['f4', None],
- casting='same_kind')
- # An int8 mask isn't ok
- assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']],
- op_dtypes=['f4', None],
- casting='same_kind')
- # A float32 mask isn't ok
- assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']],
- op_dtypes=['f4', None],
- casting='same_kind')
- def _is_buffered(iterator):
- try:
- iterator.itviews
- except ValueError:
- return True
- return False
- @pytest.mark.parametrize("a",
- [np.zeros((3,), dtype='f8'),
- np.zeros((9876, 3*5), dtype='f8')[::2, :],
- np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :],
- # Also test with the last dimension strided (so it does not fit if
- # there is repeated access)
- np.zeros((9,), dtype='f8')[::3],
- np.zeros((9876, 3*10), dtype='f8')[::2, ::5],
- np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]])
- def test_iter_writemasked(a):
- # Note, the slicing above is to ensure that nditer cannot combine multiple
- # axes into one. The repetition is just to make things a bit more
- # interesting.
- shape = a.shape
- reps = shape[-1] // 3
- msk = np.empty(shape, dtype=bool)
- msk[...] = [True, True, False] * reps
- # When buffering is unused, 'writemasked' effectively does nothing.
- # It's up to the user of the iterator to obey the requested semantics.
- it = np.nditer([a, msk], [],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']])
- with it:
- for x, m in it:
- x[...] = 1
- # Because we violated the semantics, all the values became 1
- assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
- # Even if buffering is enabled, we still may be accessing the array
- # directly.
- it = np.nditer([a, msk], ['buffered'],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']])
- # @seberg: I honestly don't currently understand why a "buffered" iterator
- # would end up not using a buffer for the small array here at least when
- # "writemasked" is used, that seems confusing... Check by testing for
- # actual memory overlap!
- is_buffered = True
- with it:
- for x, m in it:
- x[...] = 2.5
- if np.may_share_memory(x, a):
- is_buffered = False
- if not is_buffered:
- # Because we violated the semantics, all the values became 2.5
- assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
- else:
- # For large sizes, the iterator may be buffered:
- assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
- a[...] = 2.5
- # If buffering will definitely happening, for instance because of
- # a cast, only the items selected by the mask will be copied back from
- # the buffer.
- it = np.nditer([a, msk], ['buffered'],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']],
- op_dtypes=['i8', None],
- casting='unsafe')
- with it:
- for x, m in it:
- x[...] = 3
- # Even though we violated the semantics, only the selected values
- # were copied back
- assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
- @pytest.mark.parametrize(["mask", "mask_axes"], [
- # Allocated operand (only broadcasts with -1)
- (None, [-1, 0]),
- # Reduction along the first dimension (with and without op_axes)
- (np.zeros((1, 4), dtype="bool"), [0, 1]),
- (np.zeros((1, 4), dtype="bool"), None),
- # Test 0-D and -1 op_axes
- (np.zeros(4, dtype="bool"), [-1, 0]),
- (np.zeros((), dtype="bool"), [-1, -1]),
- (np.zeros((), dtype="bool"), None)])
- def test_iter_writemasked_broadcast_error(mask, mask_axes):
- # This assumes that a readwrite mask makes sense. This is likely not the
- # case and should simply be deprecated.
- arr = np.zeros((3, 4))
- itflags = ["reduce_ok"]
- mask_flags = ["arraymask", "readwrite", "allocate"]
- a_flags = ["writeonly", "writemasked"]
- if mask_axes is None:
- op_axes = None
- else:
- op_axes = [mask_axes, [0, 1]]
- with assert_raises(ValueError):
- np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags],
- op_axes=op_axes)
- def test_iter_writemasked_decref():
- # force casting (to make it interesting) by using a structured dtype.
- arr = np.arange(10000).astype(">i,O")
- original = arr.copy()
- mask = np.random.randint(0, 2, size=10000).astype(bool)
- it = np.nditer([arr, mask], ['buffered', "refs_ok"],
- [['readwrite', 'writemasked'],
- ['readonly', 'arraymask']],
- op_dtypes=["<i,O", "?"])
- singleton = object()
- if HAS_REFCOUNT:
- count = sys.getrefcount(singleton)
- for buf, mask_buf in it:
- buf[...] = (3, singleton)
- del buf, mask_buf, it # delete everything to ensure correct cleanup
- if HAS_REFCOUNT:
- # The buffer would have included additional items, they must be
- # cleared correctly:
- assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
- assert_array_equal(arr[~mask], original[~mask])
- assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
- del arr
- if HAS_REFCOUNT:
- assert sys.getrefcount(singleton) == count
- def test_iter_non_writable_attribute_deletion():
- it = np.nditer(np.ones(2))
- attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
- "iterationneedsapi", "has_multi_index", "has_index", "dtypes",
- "ndim", "nop", "itersize", "finished"]
- for s in attr:
- assert_raises(AttributeError, delattr, it, s)
- def test_iter_writable_attribute_deletion():
- it = np.nditer(np.ones(2))
- attr = [ "multi_index", "index", "iterrange", "iterindex"]
- for s in attr:
- assert_raises(AttributeError, delattr, it, s)
- def test_iter_element_deletion():
- it = np.nditer(np.ones(3))
- try:
- del it[1]
- del it[1:2]
- except TypeError:
- pass
- except Exception:
- raise AssertionError
- def test_iter_allocated_array_dtypes():
- # If the dtype of an allocated output has a shape, the shape gets
- # tacked onto the end of the result.
- it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
- for a, b in it:
- b[0] = a - 1
- b[1] = a + 1
- assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
- # Check the same (less sensitive) thing when `op_axes` with -1 is given.
- it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))],
- flags=["reduce_ok"], op_axes=[None, (-1, 0)])
- for a, b in it:
- b[0] = a - 1
- b[1] = a + 1
- assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
- # Make sure this works for scalars too
- it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
- for a, b, c in it:
- c[0, 0] = a - b
- c[0, 1] = a + b
- c[1, 0] = a * b
- c[1, 1] = a / b
- assert_equal(it.operands[2], [[8, 12], [20, 5]])
- def test_0d_iter():
- # Basic test for iteration of 0-d arrays:
- i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
- assert_equal(i.ndim, 0)
- assert_equal(next(i), (2, 3))
- assert_equal(i.multi_index, ())
- assert_equal(i.iterindex, 0)
- assert_raises(StopIteration, next, i)
- # test reset:
- i.reset()
- assert_equal(next(i), (2, 3))
- assert_raises(StopIteration, next, i)
- # test forcing to 0-d
- i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
- assert_equal(i.ndim, 0)
- assert_equal(len(i), 1)
- i = nditer(np.arange(5), ['multi_index'], [['readonly']],
- op_axes=[()], itershape=())
- assert_equal(i.ndim, 0)
- assert_equal(len(i), 1)
- # passing an itershape alone is not enough, the op_axes are also needed
- with assert_raises(ValueError):
- nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
- # Test a more complex buffered casting case (same as another test above)
- sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
- a = np.array(0.5, dtype='f4')
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe', op_dtypes=sdt)
- vals = next(i)
- assert_equal(vals['a'], 0.5)
- assert_equal(vals['b'], 0)
- assert_equal(vals['c'], [[(0.5)]*3]*2)
- assert_equal(vals['d'], 0.5)
- def test_object_iter_cleanup():
- # see gh-18450
- # object arrays can raise a python exception in ufunc inner loops using
- # nditer, which should cause iteration to stop & cleanup. There were bugs
- # in the nditer cleanup when decref'ing object arrays.
- # This test would trigger valgrind "uninitialized read" before the bugfix.
- assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
- # this more explicit code also triggers the invalid access
- arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
- oarr = arr.astype(object)
- oarr[:, -1] = None
- assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
- # followup: this tests for a bug introduced in the first pass of gh-18450,
- # caused by an incorrect fallthrough of the TypeError
- class T:
- def __bool__(self):
- raise TypeError("Ambiguous")
- assert_raises(TypeError, np.logical_or.reduce,
- np.array([T(), T()], dtype='O'))
- def test_object_iter_cleanup_reduce():
- # Similar as above, but a complex reduction case that was previously
- # missed (see gh-18810).
- # The following array is special in that it cannot be flattened:
- arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2]
- with pytest.raises(TypeError):
- np.sum(arr)
- @pytest.mark.parametrize("arr", [
- np.ones((8000, 4, 2), dtype=object)[:, ::2, :],
- np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :],
- np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")])
- def test_object_iter_cleanup_large_reduce(arr):
- # More complicated calls are possible for large arrays:
- out = np.ones(8000, dtype=np.intp)
- # force casting with `dtype=object`
- res = np.sum(arr, axis=(1, 2), dtype=object, out=out)
- assert_array_equal(res, np.full(8000, 4, dtype=object))
- def test_iter_too_large():
- # The total size of the iterator must not exceed the maximum intp due
- # to broadcasting. Dividing by 1024 will keep it small enough to
- # give a legal array.
- size = np.iinfo(np.intp).max // 1024
- arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
- assert_raises(ValueError, nditer, (arr, arr[:, None]))
- # test the same for multiindex. That may get more interesting when
- # removing 0 dimensional axis is allowed (since an iterator can grow then)
- assert_raises(ValueError, nditer,
- (arr, arr[:, None]), flags=['multi_index'])
- def test_iter_too_large_with_multiindex():
- # When a multi index is being tracked, the error is delayed this
- # checks the delayed error messages and getting below that by
- # removing an axis.
- base_size = 2**10
- num = 1
- while base_size**num < np.iinfo(np.intp).max:
- num += 1
- shape_template = [1, 1] * num
- arrays = []
- for i in range(num):
- shape = shape_template[:]
- shape[i * 2] = 2**10
- arrays.append(np.empty(shape))
- arrays = tuple(arrays)
- # arrays are now too large to be broadcast. The different modes test
- # different nditer functionality with or without GIL.
- for mode in range(6):
- with assert_raises(ValueError):
- _multiarray_tests.test_nditer_too_large(arrays, -1, mode)
- # but if we do nothing with the nditer, it can be constructed:
- _multiarray_tests.test_nditer_too_large(arrays, -1, 7)
- # When an axis is removed, things should work again (half the time):
- for i in range(num):
- for mode in range(6):
- # an axis with size 1024 is removed:
- _multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
- # an axis with size 1 is removed:
- with assert_raises(ValueError):
- _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
- def test_writebacks():
- a = np.arange(6, dtype='f4')
- au = a.byteswap().newbyteorder()
- assert_(a.dtype.byteorder != au.dtype.byteorder)
- it = nditer(au, [], [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- with it:
- it.operands[0][:] = 100
- assert_equal(au, 100)
- # do it again, this time raise an error,
- it = nditer(au, [], [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- try:
- with it:
- assert_equal(au.flags.writeable, False)
- it.operands[0][:] = 0
- raise ValueError('exit context manager on exception')
- except:
- pass
- assert_equal(au, 0)
- assert_equal(au.flags.writeable, True)
- # cannot reuse i outside context manager
- assert_raises(ValueError, getattr, it, 'operands')
- it = nditer(au, [], [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- with it:
- x = it.operands[0]
- x[:] = 6
- assert_(x.flags.writebackifcopy)
- assert_equal(au, 6)
- assert_(not x.flags.writebackifcopy)
- x[:] = 123 # x.data still valid
- assert_equal(au, 6) # but not connected to au
- it = nditer(au, [],
- [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- # reentering works
- with it:
- with it:
- for x in it:
- x[...] = 123
- it = nditer(au, [],
- [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- # make sure exiting the inner context manager closes the iterator
- with it:
- with it:
- for x in it:
- x[...] = 123
- assert_raises(ValueError, getattr, it, 'operands')
- # do not crash if original data array is decrefed
- it = nditer(au, [],
- [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- del au
- with it:
- for x in it:
- x[...] = 123
- # make sure we cannot reenter the closed iterator
- enter = it.__enter__
- assert_raises(RuntimeError, enter)
- def test_close_equivalent():
- ''' using a context amanger and using nditer.close are equivalent
- '''
- def add_close(x, y, out=None):
- addop = np.add
- it = np.nditer([x, y, out], [],
- [['readonly'], ['readonly'], ['writeonly','allocate']])
- for (a, b, c) in it:
- addop(a, b, out=c)
- ret = it.operands[2]
- it.close()
- return ret
- def add_context(x, y, out=None):
- addop = np.add
- it = np.nditer([x, y, out], [],
- [['readonly'], ['readonly'], ['writeonly','allocate']])
- with it:
- for (a, b, c) in it:
- addop(a, b, out=c)
- return it.operands[2]
- z = add_close(range(5), range(5))
- assert_equal(z, range(0, 10, 2))
- z = add_context(range(5), range(5))
- assert_equal(z, range(0, 10, 2))
- def test_close_raises():
- it = np.nditer(np.arange(3))
- assert_equal (next(it), 0)
- it.close()
- assert_raises(StopIteration, next, it)
- assert_raises(ValueError, getattr, it, 'operands')
- def test_close_parameters():
- it = np.nditer(np.arange(3))
- assert_raises(TypeError, it.close, 1)
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- def test_warn_noclose():
- a = np.arange(6, dtype='f4')
- au = a.byteswap().newbyteorder()
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
- casting='equiv', op_dtypes=[np.dtype('f4')])
- del it
- assert len(sup.log) == 1
- @pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32",
- reason="Errors with Python 3.9 on Windows")
- @pytest.mark.parametrize(["in_dtype", "buf_dtype"],
- [("i", "O"), ("O", "i"), # most simple cases
- ("i,O", "O,O"), # structured partially only copying O
- ("O,i", "i,O"), # structured casting to and from O
- ])
- @pytest.mark.parametrize("steps", [1, 2, 3])
- def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
- """
- Checks for reference counting leaks during cleanup. Using explicit
- reference counts lead to occasional false positives (at least in parallel
- test setups). This test now should still test leaks correctly when
- run e.g. with pytest-valgrind or pytest-leaks
- """
- value = 2**30 + 1 # just a random value that Python won't intern
- arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
- it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
- flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
- for step in range(steps):
- # The iteration finishes in 3 steps, the first two are partial
- next(it)
- del it # not necessary, but we test the cleanup
- # Repeat the test with `iternext`
- it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
- flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
- for step in range(steps):
- it.iternext()
- del it # not necessary, but we test the cleanup
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- @pytest.mark.parametrize(["in_dtype", "buf_dtype"],
- [("O", "i"), # most simple cases
- ("O,i", "i,O"), # structured casting to and from O
- ])
- def test_partial_iteration_error(in_dtype, buf_dtype):
- value = 123 # relies on python cache (leak-check will still find it)
- arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
- if in_dtype == "O":
- arr[int(np.BUFSIZE * 1.5)] = None
- else:
- arr[int(np.BUFSIZE * 1.5)]["f0"] = None
- count = sys.getrefcount(value)
- it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
- flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
- with pytest.raises(TypeError):
- # pytest.raises seems to have issues with the error originating
- # in the for loop, so manually unravel:
- next(it)
- next(it) # raises TypeError
- # Repeat the test with `iternext` after resetting, the buffers should
- # already be cleared from any references, so resetting is sufficient.
- it.reset()
- with pytest.raises(TypeError):
- it.iternext()
- it.iternext()
- assert count == sys.getrefcount(value)
- def test_debug_print(capfd):
- """
- Matches the expected output of a debug print with the actual output.
- Note that the iterator dump should not be considered stable API,
- this test is mainly to ensure the print does not crash.
- Currently uses a subprocess to avoid dealing with the C level `printf`s.
- """
- # the expected output with all addresses and sizes stripped (they vary
- # and/or are platform dependent).
- expected = """
- ------ BEGIN ITERATOR DUMP ------
- | Iterator Address:
- | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS
- | NDim: 2
- | NOp: 2
- | IterSize: 50
- | IterStart: 0
- | IterEnd: 50
- | IterIndex: 0
- | Iterator SizeOf:
- | BufferData SizeOf:
- | AxisData SizeOf:
- |
- | Perm: 0 1
- | DTypes:
- | DTypes: dtype('float64') dtype('int32')
- | InitDataPtrs:
- | BaseOffsets: 0 0
- | Operands:
- | Operand DTypes: dtype('int64') dtype('float64')
- | OpItFlags:
- | Flags[0]: READ CAST ALIGNED
- | Flags[1]: READ WRITE CAST ALIGNED REDUCE
- |
- | BufferData:
- | BufferSize: 50
- | Size: 5
- | BufIterEnd: 5
- | REDUCE Pos: 0
- | REDUCE OuterSize: 10
- | REDUCE OuterDim: 1
- | Strides: 8 4
- | Ptrs:
- | REDUCE Outer Strides: 40 0
- | REDUCE Outer Ptrs:
- | ReadTransferFn:
- | ReadTransferData:
- | WriteTransferFn:
- | WriteTransferData:
- | Buffers:
- |
- | AxisData[0]:
- | Shape: 5
- | Index: 0
- | Strides: 16 8
- | Ptrs:
- | AxisData[1]:
- | Shape: 10
- | Index: 0
- | Strides: 80 0
- | Ptrs:
- ------- END ITERATOR DUMP -------
- """.strip().splitlines()
- arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2]
- arr2 = np.arange(5.)
- it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe",
- flags=["reduce_ok", "buffered"],
- op_flags=[["readonly"], ["readwrite"]])
- it.debug_print()
- res = capfd.readouterr().out
- res = res.strip().splitlines()
- assert len(res) == len(expected)
- for res_line, expected_line in zip(res, expected):
- # The actual output may have additional pointers listed that are
- # stripped from the example output:
- assert res_line.startswith(expected_line.strip())
|