test_simd.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. # NOTE: Please avoid the use of numpy.testing since NPYV intrinsics
  2. # may be involved in their functionality.
  3. import pytest, math
  4. from numpy.core._simd import targets
  5. class _Test_Utility:
  6. # submodule of the desired SIMD extention, e.g. targets["AVX512F"]
  7. npyv = None
  8. # the current data type suffix e.g. 's8'
  9. sfx = None
  10. def __getattr__(self, attr):
  11. """
  12. To call NPV intrinsics without the attribute 'npyv' and
  13. auto suffixing intrinsics according to class attribute 'sfx'
  14. """
  15. return getattr(self.npyv, attr + "_" + self.sfx)
  16. def _data(self, start=None, count=None, reverse=False):
  17. """
  18. Create list of consecutive numbers according to number of vector's lanes.
  19. """
  20. if start is None:
  21. start = 1
  22. if count is None:
  23. count = self.nlanes
  24. rng = range(start, start + count)
  25. if reverse:
  26. rng = reversed(rng)
  27. if self._is_fp():
  28. return [x / 1.0 for x in rng]
  29. return list(rng)
  30. def _is_unsigned(self):
  31. return self.sfx[0] == 'u'
  32. def _is_signed(self):
  33. return self.sfx[0] == 's'
  34. def _is_fp(self):
  35. return self.sfx[0] == 'f'
  36. def _scalar_size(self):
  37. return int(self.sfx[1:])
  38. def _int_clip(self, seq):
  39. if self._is_fp():
  40. return seq
  41. max_int = self._int_max()
  42. min_int = self._int_min()
  43. return [min(max(v, min_int), max_int) for v in seq]
  44. def _int_max(self):
  45. if self._is_fp():
  46. return None
  47. max_u = self._to_unsigned(self.setall(-1))[0]
  48. if self._is_signed():
  49. return max_u // 2
  50. return max_u
  51. def _int_min(self):
  52. if self._is_fp():
  53. return None
  54. if self._is_unsigned():
  55. return 0
  56. return -(self._int_max() + 1)
  57. def _true_mask(self):
  58. max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
  59. return max_unsig[0]
  60. def _to_unsigned(self, vector):
  61. if isinstance(vector, (list, tuple)):
  62. return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
  63. else:
  64. sfx = vector.__name__.replace("npyv_", "")
  65. if sfx[0] == "b":
  66. cvt_intrin = "cvt_u{0}_b{0}"
  67. else:
  68. cvt_intrin = "reinterpret_u{0}_{1}"
  69. return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
  70. def _pinfinity(self):
  71. v = self.npyv.setall_u32(0x7f800000)
  72. return self.npyv.reinterpret_f32_u32(v)[0]
  73. def _ninfinity(self):
  74. v = self.npyv.setall_u32(0xff800000)
  75. return self.npyv.reinterpret_f32_u32(v)[0]
  76. def _nan(self):
  77. v = self.npyv.setall_u32(0x7fc00000)
  78. return self.npyv.reinterpret_f32_u32(v)[0]
  79. class _SIMD_INT(_Test_Utility):
  80. """
  81. To test all integer vector types at once
  82. """
  83. def test_operators_shift(self):
  84. if self.sfx in ("u8", "s8"):
  85. return
  86. data_a = self._data(self._int_max() - self.nlanes)
  87. data_b = self._data(self._int_min(), reverse=True)
  88. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  89. for count in range(self._scalar_size()):
  90. # load to cast
  91. data_shl_a = self.load([a << count for a in data_a])
  92. # left shift
  93. shl = self.shl(vdata_a, count)
  94. assert shl == data_shl_a
  95. # load to cast
  96. data_shr_a = self.load([a >> count for a in data_a])
  97. # right shift
  98. shr = self.shr(vdata_a, count)
  99. assert shr == data_shr_a
  100. # shift by zero or max or out-range immediate constant is not applicable and illogical
  101. for count in range(1, self._scalar_size()):
  102. # load to cast
  103. data_shl_a = self.load([a << count for a in data_a])
  104. # left shift by an immediate constant
  105. shli = self.shli(vdata_a, count)
  106. assert shli == data_shl_a
  107. # load to cast
  108. data_shr_a = self.load([a >> count for a in data_a])
  109. # right shift by an immediate constant
  110. shri = self.shri(vdata_a, count)
  111. assert shri == data_shr_a
  112. def test_arithmetic_subadd_saturated(self):
  113. if self.sfx in ("u32", "s32", "u64", "s64"):
  114. return
  115. data_a = self._data(self._int_max() - self.nlanes)
  116. data_b = self._data(self._int_min(), reverse=True)
  117. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  118. data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
  119. adds = self.adds(vdata_a, vdata_b)
  120. assert adds == data_adds
  121. data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
  122. subs = self.subs(vdata_a, vdata_b)
  123. assert subs == data_subs
  124. class _SIMD_FP(_Test_Utility):
  125. """
  126. To test all float vector types at once
  127. """
  128. def test_arithmetic_fused(self):
  129. vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
  130. vdata_cx2 = self.add(vdata_c, vdata_c)
  131. # multiply and add, a*b + c
  132. data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
  133. fma = self.muladd(vdata_a, vdata_b, vdata_c)
  134. assert fma == data_fma
  135. # multiply and subtract, a*b - c
  136. fms = self.mulsub(vdata_a, vdata_b, vdata_c)
  137. data_fms = self.sub(data_fma, vdata_cx2)
  138. assert fms == data_fms
  139. # negate multiply and add, -(a*b) + c
  140. nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
  141. data_nfma = self.sub(vdata_cx2, data_fma)
  142. assert nfma == data_nfma
  143. # negate multiply and subtract, -(a*b) - c
  144. nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
  145. data_nfms = self.mul(data_fma, self.setall(-1))
  146. assert nfms == data_nfms
  147. def test_abs(self):
  148. pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
  149. data = self._data()
  150. vdata = self.load(self._data())
  151. abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
  152. for case, desired in abs_cases:
  153. data_abs = [desired]*self.nlanes
  154. vabs = self.abs(self.setall(case))
  155. assert vabs == pytest.approx(data_abs, nan_ok=True)
  156. vabs = self.abs(self.mul(vdata, self.setall(-1)))
  157. assert vabs == data
  158. def test_sqrt(self):
  159. pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
  160. data = self._data()
  161. vdata = self.load(self._data())
  162. sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
  163. for case, desired in sqrt_cases:
  164. data_sqrt = [desired]*self.nlanes
  165. sqrt = self.sqrt(self.setall(case))
  166. assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
  167. data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision
  168. sqrt = self.sqrt(vdata)
  169. assert sqrt == data_sqrt
  170. def test_square(self):
  171. pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
  172. data = self._data()
  173. vdata = self.load(self._data())
  174. # square
  175. square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
  176. for case, desired in square_cases:
  177. data_square = [desired]*self.nlanes
  178. square = self.square(self.setall(case))
  179. assert square == pytest.approx(data_square, nan_ok=True)
  180. data_square = [x*x for x in data]
  181. square = self.square(vdata)
  182. assert square == data_square
  183. def test_reciprocal(self):
  184. pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
  185. data = self._data()
  186. vdata = self.load(self._data())
  187. recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
  188. for case, desired in recip_cases:
  189. data_recip = [desired]*self.nlanes
  190. recip = self.recip(self.setall(case))
  191. assert recip == pytest.approx(data_recip, nan_ok=True)
  192. data_recip = self.load([1/x for x in data]) # load to truncate precision
  193. recip = self.recip(vdata)
  194. assert recip == data_recip
  195. class _SIMD_ALL(_Test_Utility):
  196. """
  197. To test all vector types at once
  198. """
  199. def test_memory_load(self):
  200. data = self._data()
  201. # unaligned load
  202. load_data = self.load(data)
  203. assert load_data == data
  204. # aligned load
  205. loada_data = self.loada(data)
  206. assert loada_data == data
  207. # stream load
  208. loads_data = self.loads(data)
  209. assert loads_data == data
  210. # load lower part
  211. loadl = self.loadl(data)
  212. loadl_half = list(loadl)[:self.nlanes//2]
  213. data_half = data[:self.nlanes//2]
  214. assert loadl_half == data_half
  215. assert loadl != data # detect overflow
  216. def test_memory_store(self):
  217. data = self._data()
  218. vdata = self.load(data)
  219. # unaligned store
  220. store = [0] * self.nlanes
  221. self.store(store, vdata)
  222. assert store == data
  223. # aligned store
  224. store_a = [0] * self.nlanes
  225. self.storea(store_a, vdata)
  226. assert store_a == data
  227. # stream store
  228. store_s = [0] * self.nlanes
  229. self.stores(store_s, vdata)
  230. assert store_s == data
  231. # store lower part
  232. store_l = [0] * self.nlanes
  233. self.storel(store_l, vdata)
  234. assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
  235. assert store_l != vdata # detect overflow
  236. # store higher part
  237. store_h = [0] * self.nlanes
  238. self.storeh(store_h, vdata)
  239. assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
  240. assert store_h != vdata # detect overflow
  241. def test_memory_partial_load(self):
  242. if self.sfx in ("u8", "s8", "u16", "s16"):
  243. return
  244. data = self._data()
  245. lanes = list(range(1, self.nlanes + 1))
  246. lanes += [self.nlanes**2, self.nlanes**4] # test out of range
  247. for n in lanes:
  248. load_till = self.load_till(data, n, 15)
  249. data_till = data[:n] + [15] * (self.nlanes-n)
  250. assert load_till == data_till
  251. load_tillz = self.load_tillz(data, n)
  252. data_tillz = data[:n] + [0] * (self.nlanes-n)
  253. assert load_tillz == data_tillz
  254. def test_memory_partial_store(self):
  255. if self.sfx in ("u8", "s8", "u16", "s16"):
  256. return
  257. data = self._data()
  258. data_rev = self._data(reverse=True)
  259. vdata = self.load(data)
  260. lanes = list(range(1, self.nlanes + 1))
  261. lanes += [self.nlanes**2, self.nlanes**4]
  262. for n in lanes:
  263. data_till = data_rev.copy()
  264. data_till[:n] = data[:n]
  265. store_till = self._data(reverse=True)
  266. self.store_till(store_till, n, vdata)
  267. assert store_till == data_till
  268. def test_memory_noncont_load(self):
  269. if self.sfx in ("u8", "s8", "u16", "s16"):
  270. return
  271. for stride in range(1, 64):
  272. data = self._data(count=stride*self.nlanes)
  273. data_stride = data[::stride]
  274. loadn = self.loadn(data, stride)
  275. assert loadn == data_stride
  276. for stride in range(-64, 0):
  277. data = self._data(stride, -stride*self.nlanes)
  278. data_stride = self.load(data[::stride]) # cast unsigned
  279. loadn = self.loadn(data, stride)
  280. assert loadn == data_stride
  281. def test_memory_noncont_partial_load(self):
  282. if self.sfx in ("u8", "s8", "u16", "s16"):
  283. return
  284. lanes = list(range(1, self.nlanes + 1))
  285. lanes += [self.nlanes**2, self.nlanes**4]
  286. for stride in range(1, 64):
  287. data = self._data(count=stride*self.nlanes)
  288. data_stride = data[::stride]
  289. for n in lanes:
  290. data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
  291. loadn_till = self.loadn_till(data, stride, n, 15)
  292. assert loadn_till == data_stride_till
  293. data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
  294. loadn_tillz = self.loadn_tillz(data, stride, n)
  295. assert loadn_tillz == data_stride_tillz
  296. for stride in range(-64, 0):
  297. data = self._data(stride, -stride*self.nlanes)
  298. data_stride = list(self.load(data[::stride])) # cast unsigned
  299. for n in lanes:
  300. data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
  301. loadn_till = self.loadn_till(data, stride, n, 15)
  302. assert loadn_till == data_stride_till
  303. data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
  304. loadn_tillz = self.loadn_tillz(data, stride, n)
  305. assert loadn_tillz == data_stride_tillz
  306. def test_memory_noncont_store(self):
  307. if self.sfx in ("u8", "s8", "u16", "s16"):
  308. return
  309. vdata = self.load(self._data())
  310. for stride in range(1, 64):
  311. data = [15] * stride * self.nlanes
  312. data[::stride] = vdata
  313. storen = [15] * stride * self.nlanes
  314. storen += [127]*64
  315. self.storen(storen, stride, vdata)
  316. assert storen[:-64] == data
  317. assert storen[-64:] == [127]*64 # detect overflow
  318. for stride in range(-64, 0):
  319. data = [15] * -stride * self.nlanes
  320. data[::stride] = vdata
  321. storen = [127]*64
  322. storen += [15] * -stride * self.nlanes
  323. self.storen(storen, stride, vdata)
  324. assert storen[64:] == data
  325. assert storen[:64] == [127]*64 # detect overflow
  326. def test_memory_noncont_partial_store(self):
  327. if self.sfx in ("u8", "s8", "u16", "s16"):
  328. return
  329. data = self._data()
  330. vdata = self.load(data)
  331. lanes = list(range(1, self.nlanes + 1))
  332. lanes += [self.nlanes**2, self.nlanes**4]
  333. for stride in range(1, 64):
  334. for n in lanes:
  335. data_till = [15] * stride * self.nlanes
  336. data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
  337. storen_till = [15] * stride * self.nlanes
  338. storen_till += [127]*64
  339. self.storen_till(storen_till, stride, n, vdata)
  340. assert storen_till[:-64] == data_till
  341. assert storen_till[-64:] == [127]*64 # detect overflow
  342. for stride in range(-64, 0):
  343. for n in lanes:
  344. data_till = [15] * -stride * self.nlanes
  345. data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
  346. storen_till = [127]*64
  347. storen_till += [15] * -stride * self.nlanes
  348. self.storen_till(storen_till, stride, n, vdata)
  349. assert storen_till[64:] == data_till
  350. assert storen_till[:64] == [127]*64 # detect overflow
  351. def test_misc(self):
  352. broadcast_zero = self.zero()
  353. assert broadcast_zero == [0] * self.nlanes
  354. for i in range(1, 10):
  355. broadcasti = self.setall(i)
  356. assert broadcasti == [i] * self.nlanes
  357. data_a, data_b = self._data(), self._data(reverse=True)
  358. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  359. # py level of npyv_set_* don't support ignoring the extra specified lanes or
  360. # fill non-specified lanes with zero.
  361. vset = self.set(*data_a)
  362. assert vset == data_a
  363. # py level of npyv_setf_* don't support ignoring the extra specified lanes or
  364. # fill non-specified lanes with the specified scalar.
  365. vsetf = self.setf(10, *data_a)
  366. assert vsetf == data_a
  367. # We're testing the sainty of _simd's type-vector,
  368. # reinterpret* intrinsics itself are tested via compiler
  369. # during the build of _simd module
  370. sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64", "f32"]
  371. if self.npyv.simd_f64:
  372. sfxes.append("f64")
  373. for sfx in sfxes:
  374. vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
  375. assert vec_name == "npyv_" + sfx
  376. # select & mask operations
  377. select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
  378. assert select_a == data_a
  379. select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
  380. assert select_b == data_b
  381. # cleanup intrinsic is only used with AVX for
  382. # zeroing registers to avoid the AVX-SSE transition penalty,
  383. # so nothing to test here
  384. self.npyv.cleanup()
  385. def test_reorder(self):
  386. data_a, data_b = self._data(), self._data(reverse=True)
  387. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  388. # lower half part
  389. data_a_lo = data_a[:self.nlanes//2]
  390. data_b_lo = data_b[:self.nlanes//2]
  391. # higher half part
  392. data_a_hi = data_a[self.nlanes//2:]
  393. data_b_hi = data_b[self.nlanes//2:]
  394. # combine two lower parts
  395. combinel = self.combinel(vdata_a, vdata_b)
  396. assert combinel == data_a_lo + data_b_lo
  397. # combine two higher parts
  398. combineh = self.combineh(vdata_a, vdata_b)
  399. assert combineh == data_a_hi + data_b_hi
  400. # combine x2
  401. combine = self.combine(vdata_a, vdata_b)
  402. assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
  403. # zip(interleave)
  404. data_zipl = [v for p in zip(data_a_lo, data_b_lo) for v in p]
  405. data_ziph = [v for p in zip(data_a_hi, data_b_hi) for v in p]
  406. vzip = self.zip(vdata_a, vdata_b)
  407. assert vzip == (data_zipl, data_ziph)
  408. def test_operators_comparison(self):
  409. if self._is_fp():
  410. data_a = self._data()
  411. else:
  412. data_a = self._data(self._int_max() - self.nlanes)
  413. data_b = self._data(self._int_min(), reverse=True)
  414. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  415. mask_true = self._true_mask()
  416. def to_bool(vector):
  417. return [lane == mask_true for lane in vector]
  418. # equal
  419. data_eq = [a == b for a, b in zip(data_a, data_b)]
  420. cmpeq = to_bool(self.cmpeq(vdata_a, vdata_b))
  421. assert cmpeq == data_eq
  422. # not equal
  423. data_neq = [a != b for a, b in zip(data_a, data_b)]
  424. cmpneq = to_bool(self.cmpneq(vdata_a, vdata_b))
  425. assert cmpneq == data_neq
  426. # greater than
  427. data_gt = [a > b for a, b in zip(data_a, data_b)]
  428. cmpgt = to_bool(self.cmpgt(vdata_a, vdata_b))
  429. assert cmpgt == data_gt
  430. # greater than and equal
  431. data_ge = [a >= b for a, b in zip(data_a, data_b)]
  432. cmpge = to_bool(self.cmpge(vdata_a, vdata_b))
  433. assert cmpge == data_ge
  434. # less than
  435. data_lt = [a < b for a, b in zip(data_a, data_b)]
  436. cmplt = to_bool(self.cmplt(vdata_a, vdata_b))
  437. assert cmplt == data_lt
  438. # less than and equal
  439. data_le = [a <= b for a, b in zip(data_a, data_b)]
  440. cmple = to_bool(self.cmple(vdata_a, vdata_b))
  441. assert cmple == data_le
  442. def test_operators_logical(self):
  443. if self._is_fp():
  444. data_a = self._data()
  445. else:
  446. data_a = self._data(self._int_max() - self.nlanes)
  447. data_b = self._data(self._int_min(), reverse=True)
  448. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  449. if self._is_fp():
  450. data_cast_a = self._to_unsigned(vdata_a)
  451. data_cast_b = self._to_unsigned(vdata_b)
  452. cast, cast_data = self._to_unsigned, self._to_unsigned
  453. else:
  454. data_cast_a, data_cast_b = data_a, data_b
  455. cast, cast_data = lambda a: a, self.load
  456. data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
  457. vxor = cast(self.xor(vdata_a, vdata_b))
  458. assert vxor == data_xor
  459. data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
  460. vor = cast(getattr(self, "or")(vdata_a, vdata_b))
  461. assert vor == data_or
  462. data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
  463. vand = cast(getattr(self, "and")(vdata_a, vdata_b))
  464. assert vand == data_and
  465. data_not = cast_data([~a for a in data_cast_a])
  466. vnot = cast(getattr(self, "not")(vdata_a))
  467. assert vnot == data_not
  468. def test_conversion_boolean(self):
  469. bsfx = "b" + self.sfx[1:]
  470. to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
  471. from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
  472. false_vb = to_boolean(self.setall(0))
  473. true_vb = self.cmpeq(self.setall(0), self.setall(0))
  474. assert false_vb != true_vb
  475. false_vsfx = from_boolean(false_vb)
  476. true_vsfx = from_boolean(true_vb)
  477. assert false_vsfx != true_vsfx
  478. def test_arithmetic_subadd(self):
  479. if self._is_fp():
  480. data_a = self._data()
  481. else:
  482. data_a = self._data(self._int_max() - self.nlanes)
  483. data_b = self._data(self._int_min(), reverse=True)
  484. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  485. # non-saturated
  486. data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast
  487. add = self.add(vdata_a, vdata_b)
  488. assert add == data_add
  489. data_sub = self.load([a - b for a, b in zip(data_a, data_b)])
  490. sub = self.sub(vdata_a, vdata_b)
  491. assert sub == data_sub
  492. def test_arithmetic_mul(self):
  493. if self.sfx in ("u64", "s64"):
  494. return
  495. if self._is_fp():
  496. data_a = self._data()
  497. else:
  498. data_a = self._data(self._int_max() - self.nlanes)
  499. data_b = self._data(self._int_min(), reverse=True)
  500. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  501. data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
  502. mul = self.mul(vdata_a, vdata_b)
  503. assert mul == data_mul
  504. def test_arithmetic_div(self):
  505. if not self._is_fp():
  506. return
  507. data_a, data_b = self._data(), self._data(reverse=True)
  508. vdata_a, vdata_b = self.load(data_a), self.load(data_b)
  509. # load to truncate f64 to precision of f32
  510. data_div = self.load([a / b for a, b in zip(data_a, data_b)])
  511. div = self.div(vdata_a, vdata_b)
  512. assert div == data_div
  513. def test_arithmetic_reduce_sum(self):
  514. if not self._is_fp():
  515. return
  516. # reduce sum
  517. data = self._data()
  518. vdata = self.load(data)
  519. data_sum = sum(data)
  520. vsum = self.sum(vdata)
  521. assert vsum == data_sum
  522. int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
  523. fp_sfx = ("f32", "f64")
  524. all_sfx = int_sfx + fp_sfx
  525. tests_registry = {
  526. int_sfx : _SIMD_INT,
  527. fp_sfx : _SIMD_FP,
  528. all_sfx : _SIMD_ALL
  529. }
  530. for target_name, npyv in targets.items():
  531. simd_width = npyv.simd if npyv else ''
  532. pretty_name = target_name.split('__') # multi-target separator
  533. if len(pretty_name) > 1:
  534. # multi-target
  535. pretty_name = f"({' '.join(pretty_name)})"
  536. else:
  537. pretty_name = pretty_name[0]
  538. skip = ""
  539. skip_sfx = dict()
  540. if not npyv:
  541. skip = f"target '{pretty_name}' isn't supported by current machine"
  542. elif not npyv.simd:
  543. skip = f"target '{pretty_name}' isn't supported by NPYV"
  544. elif not npyv.simd_f64:
  545. skip_sfx["f64"] = f"target '{pretty_name}' doesn't support double-precision"
  546. for sfxes, cls in tests_registry.items():
  547. for sfx in sfxes:
  548. skip_m = skip_sfx.get(sfx, skip)
  549. inhr = (cls,)
  550. attr = dict(npyv=targets[target_name], sfx=sfx)
  551. tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
  552. if skip_m:
  553. pytest.mark.skip(reason=skip_m)(tcls)
  554. globals()[tcls.__name__] = tcls