_internal.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. """
  2. A place for internal code
  3. Some things are more easily handled Python.
  4. """
  5. import ast
  6. import re
  7. import sys
  8. import platform
  9. from .multiarray import dtype, array, ndarray
  10. try:
  11. import ctypes
  12. except ImportError:
  13. ctypes = None
  14. IS_PYPY = platform.python_implementation() == 'PyPy'
  15. if sys.byteorder == 'little':
  16. _nbo = '<'
  17. else:
  18. _nbo = '>'
  19. def _makenames_list(adict, align):
  20. allfields = []
  21. for fname, obj in adict.items():
  22. n = len(obj)
  23. if not isinstance(obj, tuple) or n not in (2, 3):
  24. raise ValueError("entry not a 2- or 3- tuple")
  25. if n > 2 and obj[2] == fname:
  26. continue
  27. num = int(obj[1])
  28. if num < 0:
  29. raise ValueError("invalid offset.")
  30. format = dtype(obj[0], align=align)
  31. if n > 2:
  32. title = obj[2]
  33. else:
  34. title = None
  35. allfields.append((fname, format, num, title))
  36. # sort by offsets
  37. allfields.sort(key=lambda x: x[2])
  38. names = [x[0] for x in allfields]
  39. formats = [x[1] for x in allfields]
  40. offsets = [x[2] for x in allfields]
  41. titles = [x[3] for x in allfields]
  42. return names, formats, offsets, titles
  43. # Called in PyArray_DescrConverter function when
  44. # a dictionary without "names" and "formats"
  45. # fields is used as a data-type descriptor.
  46. def _usefields(adict, align):
  47. try:
  48. names = adict[-1]
  49. except KeyError:
  50. names = None
  51. if names is None:
  52. names, formats, offsets, titles = _makenames_list(adict, align)
  53. else:
  54. formats = []
  55. offsets = []
  56. titles = []
  57. for name in names:
  58. res = adict[name]
  59. formats.append(res[0])
  60. offsets.append(res[1])
  61. if len(res) > 2:
  62. titles.append(res[2])
  63. else:
  64. titles.append(None)
  65. return dtype({"names": names,
  66. "formats": formats,
  67. "offsets": offsets,
  68. "titles": titles}, align)
  69. # construct an array_protocol descriptor list
  70. # from the fields attribute of a descriptor
  71. # This calls itself recursively but should eventually hit
  72. # a descriptor that has no fields and then return
  73. # a simple typestring
  74. def _array_descr(descriptor):
  75. fields = descriptor.fields
  76. if fields is None:
  77. subdtype = descriptor.subdtype
  78. if subdtype is None:
  79. if descriptor.metadata is None:
  80. return descriptor.str
  81. else:
  82. new = descriptor.metadata.copy()
  83. if new:
  84. return (descriptor.str, new)
  85. else:
  86. return descriptor.str
  87. else:
  88. return (_array_descr(subdtype[0]), subdtype[1])
  89. names = descriptor.names
  90. ordered_fields = [fields[x] + (x,) for x in names]
  91. result = []
  92. offset = 0
  93. for field in ordered_fields:
  94. if field[1] > offset:
  95. num = field[1] - offset
  96. result.append(('', f'|V{num}'))
  97. offset += num
  98. elif field[1] < offset:
  99. raise ValueError(
  100. "dtype.descr is not defined for types with overlapping or "
  101. "out-of-order fields")
  102. if len(field) > 3:
  103. name = (field[2], field[3])
  104. else:
  105. name = field[2]
  106. if field[0].subdtype:
  107. tup = (name, _array_descr(field[0].subdtype[0]),
  108. field[0].subdtype[1])
  109. else:
  110. tup = (name, _array_descr(field[0]))
  111. offset += field[0].itemsize
  112. result.append(tup)
  113. if descriptor.itemsize > offset:
  114. num = descriptor.itemsize - offset
  115. result.append(('', f'|V{num}'))
  116. return result
  117. # Build a new array from the information in a pickle.
  118. # Note that the name numpy.core._internal._reconstruct is embedded in
  119. # pickles of ndarrays made with NumPy before release 1.0
  120. # so don't remove the name here, or you'll
  121. # break backward compatibility.
  122. def _reconstruct(subtype, shape, dtype):
  123. return ndarray.__new__(subtype, shape, dtype)
  124. # format_re was originally from numarray by J. Todd Miller
  125. format_re = re.compile(r'(?P<order1>[<>|=]?)'
  126. r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
  127. r'(?P<order2>[<>|=]?)'
  128. r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
  129. sep_re = re.compile(r'\s*,\s*')
  130. space_re = re.compile(r'\s+$')
  131. # astr is a string (perhaps comma separated)
  132. _convorder = {'=': _nbo}
  133. def _commastring(astr):
  134. startindex = 0
  135. result = []
  136. while startindex < len(astr):
  137. mo = format_re.match(astr, pos=startindex)
  138. try:
  139. (order1, repeats, order2, dtype) = mo.groups()
  140. except (TypeError, AttributeError):
  141. raise ValueError(
  142. f'format number {len(result)+1} of "{astr}" is not recognized'
  143. ) from None
  144. startindex = mo.end()
  145. # Separator or ending padding
  146. if startindex < len(astr):
  147. if space_re.match(astr, pos=startindex):
  148. startindex = len(astr)
  149. else:
  150. mo = sep_re.match(astr, pos=startindex)
  151. if not mo:
  152. raise ValueError(
  153. 'format number %d of "%s" is not recognized' %
  154. (len(result)+1, astr))
  155. startindex = mo.end()
  156. if order2 == '':
  157. order = order1
  158. elif order1 == '':
  159. order = order2
  160. else:
  161. order1 = _convorder.get(order1, order1)
  162. order2 = _convorder.get(order2, order2)
  163. if (order1 != order2):
  164. raise ValueError(
  165. 'inconsistent byte-order specification %s and %s' %
  166. (order1, order2))
  167. order = order1
  168. if order in ('|', '=', _nbo):
  169. order = ''
  170. dtype = order + dtype
  171. if (repeats == ''):
  172. newitem = dtype
  173. else:
  174. newitem = (dtype, ast.literal_eval(repeats))
  175. result.append(newitem)
  176. return result
  177. class dummy_ctype:
  178. def __init__(self, cls):
  179. self._cls = cls
  180. def __mul__(self, other):
  181. return self
  182. def __call__(self, *other):
  183. return self._cls(other)
  184. def __eq__(self, other):
  185. return self._cls == other._cls
  186. def __ne__(self, other):
  187. return self._cls != other._cls
  188. def _getintp_ctype():
  189. val = _getintp_ctype.cache
  190. if val is not None:
  191. return val
  192. if ctypes is None:
  193. import numpy as np
  194. val = dummy_ctype(np.intp)
  195. else:
  196. char = dtype('p').char
  197. if char == 'i':
  198. val = ctypes.c_int
  199. elif char == 'l':
  200. val = ctypes.c_long
  201. elif char == 'q':
  202. val = ctypes.c_longlong
  203. else:
  204. val = ctypes.c_long
  205. _getintp_ctype.cache = val
  206. return val
  207. _getintp_ctype.cache = None
  208. # Used for .ctypes attribute of ndarray
  209. class _missing_ctypes:
  210. def cast(self, num, obj):
  211. return num.value
  212. class c_void_p:
  213. def __init__(self, ptr):
  214. self.value = ptr
  215. class _ctypes:
  216. def __init__(self, array, ptr=None):
  217. self._arr = array
  218. if ctypes:
  219. self._ctypes = ctypes
  220. self._data = self._ctypes.c_void_p(ptr)
  221. else:
  222. # fake a pointer-like object that holds onto the reference
  223. self._ctypes = _missing_ctypes()
  224. self._data = self._ctypes.c_void_p(ptr)
  225. self._data._objects = array
  226. if self._arr.ndim == 0:
  227. self._zerod = True
  228. else:
  229. self._zerod = False
  230. def data_as(self, obj):
  231. """
  232. Return the data pointer cast to a particular c-types object.
  233. For example, calling ``self._as_parameter_`` is equivalent to
  234. ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
  235. pointer to a ctypes array of floating-point data:
  236. ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
  237. The returned pointer will keep a reference to the array.
  238. """
  239. # _ctypes.cast function causes a circular reference of self._data in
  240. # self._data._objects. Attributes of self._data cannot be released
  241. # until gc.collect is called. Make a copy of the pointer first then let
  242. # it hold the array reference. This is a workaround to circumvent the
  243. # CPython bug https://bugs.python.org/issue12836
  244. ptr = self._ctypes.cast(self._data, obj)
  245. ptr._arr = self._arr
  246. return ptr
  247. def shape_as(self, obj):
  248. """
  249. Return the shape tuple as an array of some other c-types
  250. type. For example: ``self.shape_as(ctypes.c_short)``.
  251. """
  252. if self._zerod:
  253. return None
  254. return (obj*self._arr.ndim)(*self._arr.shape)
  255. def strides_as(self, obj):
  256. """
  257. Return the strides tuple as an array of some other
  258. c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
  259. """
  260. if self._zerod:
  261. return None
  262. return (obj*self._arr.ndim)(*self._arr.strides)
  263. @property
  264. def data(self):
  265. """
  266. A pointer to the memory area of the array as a Python integer.
  267. This memory area may contain data that is not aligned, or not in correct
  268. byte-order. The memory area may not even be writeable. The array
  269. flags and data-type of this array should be respected when passing this
  270. attribute to arbitrary C-code to avoid trouble that can include Python
  271. crashing. User Beware! The value of this attribute is exactly the same
  272. as ``self._array_interface_['data'][0]``.
  273. Note that unlike ``data_as``, a reference will not be kept to the array:
  274. code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
  275. pointer to a deallocated array, and should be spelt
  276. ``(a + b).ctypes.data_as(ctypes.c_void_p)``
  277. """
  278. return self._data.value
  279. @property
  280. def shape(self):
  281. """
  282. (c_intp*self.ndim): A ctypes array of length self.ndim where
  283. the basetype is the C-integer corresponding to ``dtype('p')`` on this
  284. platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
  285. `ctypes.c_longlong` depending on the platform.
  286. The c_intp type is defined accordingly in `numpy.ctypeslib`.
  287. The ctypes array contains the shape of the underlying array.
  288. """
  289. return self.shape_as(_getintp_ctype())
  290. @property
  291. def strides(self):
  292. """
  293. (c_intp*self.ndim): A ctypes array of length self.ndim where
  294. the basetype is the same as for the shape attribute. This ctypes array
  295. contains the strides information from the underlying array. This strides
  296. information is important for showing how many bytes must be jumped to
  297. get to the next element in the array.
  298. """
  299. return self.strides_as(_getintp_ctype())
  300. @property
  301. def _as_parameter_(self):
  302. """
  303. Overrides the ctypes semi-magic method
  304. Enables `c_func(some_array.ctypes)`
  305. """
  306. return self.data_as(ctypes.c_void_p)
  307. # kept for compatibility
  308. get_data = data.fget
  309. get_shape = shape.fget
  310. get_strides = strides.fget
  311. get_as_parameter = _as_parameter_.fget
  312. def _newnames(datatype, order):
  313. """
  314. Given a datatype and an order object, return a new names tuple, with the
  315. order indicated
  316. """
  317. oldnames = datatype.names
  318. nameslist = list(oldnames)
  319. if isinstance(order, str):
  320. order = [order]
  321. seen = set()
  322. if isinstance(order, (list, tuple)):
  323. for name in order:
  324. try:
  325. nameslist.remove(name)
  326. except ValueError:
  327. if name in seen:
  328. raise ValueError(f"duplicate field name: {name}") from None
  329. else:
  330. raise ValueError(f"unknown field name: {name}") from None
  331. seen.add(name)
  332. return tuple(list(order) + nameslist)
  333. raise ValueError(f"unsupported order value: {order}")
  334. def _copy_fields(ary):
  335. """Return copy of structured array with padding between fields removed.
  336. Parameters
  337. ----------
  338. ary : ndarray
  339. Structured array from which to remove padding bytes
  340. Returns
  341. -------
  342. ary_copy : ndarray
  343. Copy of ary with padding bytes removed
  344. """
  345. dt = ary.dtype
  346. copy_dtype = {'names': dt.names,
  347. 'formats': [dt.fields[name][0] for name in dt.names]}
  348. return array(ary, dtype=copy_dtype, copy=True)
  349. def _getfield_is_safe(oldtype, newtype, offset):
  350. """ Checks safety of getfield for object arrays.
  351. As in _view_is_safe, we need to check that memory containing objects is not
  352. reinterpreted as a non-object datatype and vice versa.
  353. Parameters
  354. ----------
  355. oldtype : data-type
  356. Data type of the original ndarray.
  357. newtype : data-type
  358. Data type of the field being accessed by ndarray.getfield
  359. offset : int
  360. Offset of the field being accessed by ndarray.getfield
  361. Raises
  362. ------
  363. TypeError
  364. If the field access is invalid
  365. """
  366. if newtype.hasobject or oldtype.hasobject:
  367. if offset == 0 and newtype == oldtype:
  368. return
  369. if oldtype.names is not None:
  370. for name in oldtype.names:
  371. if (oldtype.fields[name][1] == offset and
  372. oldtype.fields[name][0] == newtype):
  373. return
  374. raise TypeError("Cannot get/set field of an object array")
  375. return
  376. def _view_is_safe(oldtype, newtype):
  377. """ Checks safety of a view involving object arrays, for example when
  378. doing::
  379. np.zeros(10, dtype=oldtype).view(newtype)
  380. Parameters
  381. ----------
  382. oldtype : data-type
  383. Data type of original ndarray
  384. newtype : data-type
  385. Data type of the view
  386. Raises
  387. ------
  388. TypeError
  389. If the new type is incompatible with the old type.
  390. """
  391. # if the types are equivalent, there is no problem.
  392. # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
  393. if oldtype == newtype:
  394. return
  395. if newtype.hasobject or oldtype.hasobject:
  396. raise TypeError("Cannot change data-type for object array.")
  397. return
  398. # Given a string containing a PEP 3118 format specifier,
  399. # construct a NumPy dtype
  400. _pep3118_native_map = {
  401. '?': '?',
  402. 'c': 'S1',
  403. 'b': 'b',
  404. 'B': 'B',
  405. 'h': 'h',
  406. 'H': 'H',
  407. 'i': 'i',
  408. 'I': 'I',
  409. 'l': 'l',
  410. 'L': 'L',
  411. 'q': 'q',
  412. 'Q': 'Q',
  413. 'e': 'e',
  414. 'f': 'f',
  415. 'd': 'd',
  416. 'g': 'g',
  417. 'Zf': 'F',
  418. 'Zd': 'D',
  419. 'Zg': 'G',
  420. 's': 'S',
  421. 'w': 'U',
  422. 'O': 'O',
  423. 'x': 'V', # padding
  424. }
  425. _pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
  426. _pep3118_standard_map = {
  427. '?': '?',
  428. 'c': 'S1',
  429. 'b': 'b',
  430. 'B': 'B',
  431. 'h': 'i2',
  432. 'H': 'u2',
  433. 'i': 'i4',
  434. 'I': 'u4',
  435. 'l': 'i4',
  436. 'L': 'u4',
  437. 'q': 'i8',
  438. 'Q': 'u8',
  439. 'e': 'f2',
  440. 'f': 'f',
  441. 'd': 'd',
  442. 'Zf': 'F',
  443. 'Zd': 'D',
  444. 's': 'S',
  445. 'w': 'U',
  446. 'O': 'O',
  447. 'x': 'V', # padding
  448. }
  449. _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
  450. _pep3118_unsupported_map = {
  451. 'u': 'UCS-2 strings',
  452. '&': 'pointers',
  453. 't': 'bitfields',
  454. 'X': 'function pointers',
  455. }
  456. class _Stream:
  457. def __init__(self, s):
  458. self.s = s
  459. self.byteorder = '@'
  460. def advance(self, n):
  461. res = self.s[:n]
  462. self.s = self.s[n:]
  463. return res
  464. def consume(self, c):
  465. if self.s[:len(c)] == c:
  466. self.advance(len(c))
  467. return True
  468. return False
  469. def consume_until(self, c):
  470. if callable(c):
  471. i = 0
  472. while i < len(self.s) and not c(self.s[i]):
  473. i = i + 1
  474. return self.advance(i)
  475. else:
  476. i = self.s.index(c)
  477. res = self.advance(i)
  478. self.advance(len(c))
  479. return res
  480. @property
  481. def next(self):
  482. return self.s[0]
  483. def __bool__(self):
  484. return bool(self.s)
  485. def _dtype_from_pep3118(spec):
  486. stream = _Stream(spec)
  487. dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
  488. return dtype
  489. def __dtype_from_pep3118(stream, is_subdtype):
  490. field_spec = dict(
  491. names=[],
  492. formats=[],
  493. offsets=[],
  494. itemsize=0
  495. )
  496. offset = 0
  497. common_alignment = 1
  498. is_padding = False
  499. # Parse spec
  500. while stream:
  501. value = None
  502. # End of structure, bail out to upper level
  503. if stream.consume('}'):
  504. break
  505. # Sub-arrays (1)
  506. shape = None
  507. if stream.consume('('):
  508. shape = stream.consume_until(')')
  509. shape = tuple(map(int, shape.split(',')))
  510. # Byte order
  511. if stream.next in ('@', '=', '<', '>', '^', '!'):
  512. byteorder = stream.advance(1)
  513. if byteorder == '!':
  514. byteorder = '>'
  515. stream.byteorder = byteorder
  516. # Byte order characters also control native vs. standard type sizes
  517. if stream.byteorder in ('@', '^'):
  518. type_map = _pep3118_native_map
  519. type_map_chars = _pep3118_native_typechars
  520. else:
  521. type_map = _pep3118_standard_map
  522. type_map_chars = _pep3118_standard_typechars
  523. # Item sizes
  524. itemsize_str = stream.consume_until(lambda c: not c.isdigit())
  525. if itemsize_str:
  526. itemsize = int(itemsize_str)
  527. else:
  528. itemsize = 1
  529. # Data types
  530. is_padding = False
  531. if stream.consume('T{'):
  532. value, align = __dtype_from_pep3118(
  533. stream, is_subdtype=True)
  534. elif stream.next in type_map_chars:
  535. if stream.next == 'Z':
  536. typechar = stream.advance(2)
  537. else:
  538. typechar = stream.advance(1)
  539. is_padding = (typechar == 'x')
  540. dtypechar = type_map[typechar]
  541. if dtypechar in 'USV':
  542. dtypechar += '%d' % itemsize
  543. itemsize = 1
  544. numpy_byteorder = {'@': '=', '^': '='}.get(
  545. stream.byteorder, stream.byteorder)
  546. value = dtype(numpy_byteorder + dtypechar)
  547. align = value.alignment
  548. elif stream.next in _pep3118_unsupported_map:
  549. desc = _pep3118_unsupported_map[stream.next]
  550. raise NotImplementedError(
  551. "Unrepresentable PEP 3118 data type {!r} ({})"
  552. .format(stream.next, desc))
  553. else:
  554. raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
  555. #
  556. # Native alignment may require padding
  557. #
  558. # Here we assume that the presence of a '@' character implicitly implies
  559. # that the start of the array is *already* aligned.
  560. #
  561. extra_offset = 0
  562. if stream.byteorder == '@':
  563. start_padding = (-offset) % align
  564. intra_padding = (-value.itemsize) % align
  565. offset += start_padding
  566. if intra_padding != 0:
  567. if itemsize > 1 or (shape is not None and _prod(shape) > 1):
  568. # Inject internal padding to the end of the sub-item
  569. value = _add_trailing_padding(value, intra_padding)
  570. else:
  571. # We can postpone the injection of internal padding,
  572. # as the item appears at most once
  573. extra_offset += intra_padding
  574. # Update common alignment
  575. common_alignment = _lcm(align, common_alignment)
  576. # Convert itemsize to sub-array
  577. if itemsize != 1:
  578. value = dtype((value, (itemsize,)))
  579. # Sub-arrays (2)
  580. if shape is not None:
  581. value = dtype((value, shape))
  582. # Field name
  583. if stream.consume(':'):
  584. name = stream.consume_until(':')
  585. else:
  586. name = None
  587. if not (is_padding and name is None):
  588. if name is not None and name in field_spec['names']:
  589. raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
  590. field_spec['names'].append(name)
  591. field_spec['formats'].append(value)
  592. field_spec['offsets'].append(offset)
  593. offset += value.itemsize
  594. offset += extra_offset
  595. field_spec['itemsize'] = offset
  596. # extra final padding for aligned types
  597. if stream.byteorder == '@':
  598. field_spec['itemsize'] += (-offset) % common_alignment
  599. # Check if this was a simple 1-item type, and unwrap it
  600. if (field_spec['names'] == [None]
  601. and field_spec['offsets'][0] == 0
  602. and field_spec['itemsize'] == field_spec['formats'][0].itemsize
  603. and not is_subdtype):
  604. ret = field_spec['formats'][0]
  605. else:
  606. _fix_names(field_spec)
  607. ret = dtype(field_spec)
  608. # Finished
  609. return ret, common_alignment
  610. def _fix_names(field_spec):
  611. """ Replace names which are None with the next unused f%d name """
  612. names = field_spec['names']
  613. for i, name in enumerate(names):
  614. if name is not None:
  615. continue
  616. j = 0
  617. while True:
  618. name = f'f{j}'
  619. if name not in names:
  620. break
  621. j = j + 1
  622. names[i] = name
  623. def _add_trailing_padding(value, padding):
  624. """Inject the specified number of padding bytes at the end of a dtype"""
  625. if value.fields is None:
  626. field_spec = dict(
  627. names=['f0'],
  628. formats=[value],
  629. offsets=[0],
  630. itemsize=value.itemsize
  631. )
  632. else:
  633. fields = value.fields
  634. names = value.names
  635. field_spec = dict(
  636. names=names,
  637. formats=[fields[name][0] for name in names],
  638. offsets=[fields[name][1] for name in names],
  639. itemsize=value.itemsize
  640. )
  641. field_spec['itemsize'] += padding
  642. return dtype(field_spec)
  643. def _prod(a):
  644. p = 1
  645. for x in a:
  646. p *= x
  647. return p
  648. def _gcd(a, b):
  649. """Calculate the greatest common divisor of a and b"""
  650. while b:
  651. a, b = b, a % b
  652. return a
  653. def _lcm(a, b):
  654. return a // _gcd(a, b) * b
  655. def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
  656. """ Format the error message for when __array_ufunc__ gives up. """
  657. args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
  658. ['{}={!r}'.format(k, v)
  659. for k, v in kwargs.items()])
  660. args = inputs + kwargs.get('out', ())
  661. types_string = ', '.join(repr(type(arg).__name__) for arg in args)
  662. return ('operand type(s) all returned NotImplemented from '
  663. '__array_ufunc__({!r}, {!r}, {}): {}'
  664. .format(ufunc, method, args_string, types_string))
  665. def array_function_errmsg_formatter(public_api, types):
  666. """ Format the error message for when __array_ufunc__ gives up. """
  667. func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
  668. return ("no implementation found for '{}' on types that implement "
  669. '__array_function__: {}'.format(func_name, list(types)))
  670. def _ufunc_doc_signature_formatter(ufunc):
  671. """
  672. Builds a signature string which resembles PEP 457
  673. This is used to construct the first line of the docstring
  674. """
  675. # input arguments are simple
  676. if ufunc.nin == 1:
  677. in_args = 'x'
  678. else:
  679. in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
  680. # output arguments are both keyword or positional
  681. if ufunc.nout == 0:
  682. out_args = ', /, out=()'
  683. elif ufunc.nout == 1:
  684. out_args = ', /, out=None'
  685. else:
  686. out_args = '[, {positional}], / [, out={default}]'.format(
  687. positional=', '.join(
  688. 'out{}'.format(i+1) for i in range(ufunc.nout)),
  689. default=repr((None,)*ufunc.nout)
  690. )
  691. # keyword only args depend on whether this is a gufunc
  692. kwargs = (
  693. ", casting='same_kind'"
  694. ", order='K'"
  695. ", dtype=None"
  696. ", subok=True"
  697. "[, signature"
  698. ", extobj]"
  699. )
  700. if ufunc.signature is None:
  701. kwargs = ", where=True" + kwargs
  702. # join all the parts together
  703. return '{name}({in_args}{out_args}, *{kwargs})'.format(
  704. name=ufunc.__name__,
  705. in_args=in_args,
  706. out_args=out_args,
  707. kwargs=kwargs
  708. )
  709. def npy_ctypes_check(cls):
  710. # determine if a class comes from ctypes, in order to work around
  711. # a bug in the buffer protocol for those objects, bpo-10746
  712. try:
  713. # ctypes class are new-style, so have an __mro__. This probably fails
  714. # for ctypes classes with multiple inheritance.
  715. if IS_PYPY:
  716. # (..., _ctypes.basics._CData, Bufferable, object)
  717. ctype_base = cls.__mro__[-3]
  718. else:
  719. # # (..., _ctypes._CData, object)
  720. ctype_base = cls.__mro__[-2]
  721. # right now, they're part of the _ctypes module
  722. return '_ctypes' in ctype_base.__module__
  723. except Exception:
  724. return False
  725. class recursive:
  726. '''
  727. A decorator class for recursive nested functions.
  728. Naive recursive nested functions hold a reference to themselves:
  729. def outer(*args):
  730. def stringify_leaky(arg0, *arg1):
  731. if len(arg1) > 0:
  732. return stringify_leaky(*arg1) # <- HERE
  733. return str(arg0)
  734. stringify_leaky(*args)
  735. This design pattern creates a reference cycle that is difficult for a
  736. garbage collector to resolve. The decorator class prevents the
  737. cycle by passing the nested function in as an argument `self`:
  738. def outer(*args):
  739. @recursive
  740. def stringify(self, arg0, *arg1):
  741. if len(arg1) > 0:
  742. return self(*arg1)
  743. return str(arg0)
  744. stringify(*args)
  745. '''
  746. def __init__(self, func):
  747. self.func = func
  748. def __call__(self, *args, **kwargs):
  749. return self.func(self, *args, **kwargs)