|
10 | 10 | from artiq.coredevice.runtime import source_loader
|
11 | 11 |
|
12 | 12 |
|
13 |
| -__all__ = ["int64", "round64", |
| 13 | +__all__ = ["host_int", "int", |
14 | 14 | "kernel", "portable", "syscall",
|
15 | 15 | "set_time_manager", "set_watchdog_factory",
|
16 | 16 | "ARTIQException"]
|
|
24 | 24 | )
|
25 | 25 | __all__.extend(kernel_globals)
|
26 | 26 |
|
| 27 | +host_int = int |
27 | 28 |
|
28 |
| -class int64(int): |
29 |
| - """64-bit integers for static compilation. |
| 29 | +class int: |
| 30 | + """ |
| 31 | + Arbitrary-precision integers for static compilation. |
30 | 32 |
|
31 |
| - When this class is used instead of Python's ``int``, the static compiler |
32 |
| - stores the corresponding variable on 64 bits instead of 32. |
| 33 | + The static compiler does not use unlimited-precision integers, |
| 34 | + like Python normally does, because of their unbounded memory requirements. |
| 35 | + Instead, it allows to choose a bit width (usually 32 or 64) at compile-time, |
| 36 | + and all computations follow wrap-around semantics on overflow. |
33 | 37 |
|
34 |
| - When used in the interpreter, it behaves as ``int`` and the results of |
35 |
| - integer operations involving it are also ``int64`` (which matches the |
36 |
| - size promotion rules of the static compiler). This way, it is possible to |
37 |
| - specify 64-bit size annotations on constants that are passed to the |
38 |
| - kernels. |
| 38 | + This class implements the same semantics on the host. |
39 | 39 |
|
40 |
| - Example: |
| 40 | + For example: |
41 | 41 |
|
42 |
| - >>> a = int64(1) |
43 |
| - >>> b = int64(3) + 2 |
44 |
| - >>> isinstance(a, int64) |
| 42 | + >>> a = int(1, width=64) |
| 43 | + >>> b = int(3, width=64) + 2 |
| 44 | + >>> isinstance(a, int) |
45 | 45 | True
|
46 |
| - >>> isinstance(b, int64) |
| 46 | + >>> isinstance(b, int) |
47 | 47 | True
|
48 | 48 | >>> a + b
|
49 |
| - 6 |
50 |
| - """ |
51 |
| - pass |
52 |
| - |
53 |
| -def _make_int64_op_method(int_method): |
54 |
| - def method(self, *args): |
55 |
| - r = int_method(self, *args) |
56 |
| - if isinstance(r, int): |
57 |
| - r = int64(r) |
58 |
| - return r |
59 |
| - return method |
60 |
| - |
61 |
| -for _op_name in ("neg", "pos", "abs", "invert", "round", |
62 |
| - "add", "radd", "sub", "rsub", "mul", "rmul", "pow", "rpow", |
63 |
| - "lshift", "rlshift", "rshift", "rrshift", |
64 |
| - "and", "rand", "xor", "rxor", "or", "ror", |
65 |
| - "floordiv", "rfloordiv", "mod", "rmod"): |
66 |
| - _method_name = "__" + _op_name + "__" |
67 |
| - _orig_method = getattr(int, _method_name) |
68 |
| - setattr(int64, _method_name, _make_int64_op_method(_orig_method)) |
69 |
| - |
70 |
| -for _op_name in ("add", "sub", "mul", "floordiv", "mod", |
71 |
| - "pow", "lshift", "rshift", "lshift", |
72 |
| - "and", "xor", "or"): |
73 |
| - _op_method = getattr(int, "__" + _op_name + "__") |
74 |
| - setattr(int64, "__i" + _op_name + "__", _make_int64_op_method(_op_method)) |
75 |
| - |
76 |
| - |
77 |
| -def round64(x): |
78 |
| - """Rounds to a 64-bit integer. |
79 |
| -
|
80 |
| - This function is equivalent to ``int64(round(x))`` but, when targeting |
81 |
| - static compilation, prevents overflow when the rounded value is too large |
82 |
| - to fit in a 32-bit integer. |
| 49 | + int(6, width=64) |
| 50 | + >>> int(10, width=32) + 0x7fffffff |
| 51 | + int(9, width=32) |
| 52 | + >>> int(0x80000000) |
| 53 | + int(-1, width=32) |
83 | 54 | """
|
84 |
| - return int64(round(x)) |
| 55 | + |
| 56 | + __slots__ = ['_value', '_width'] |
| 57 | + |
| 58 | + def __new__(cls, value, width=32): |
| 59 | + if isinstance(value, int): |
| 60 | + return value |
| 61 | + else: |
| 62 | + sign_bit = 2 ** (width - 1) |
| 63 | + value = host_int(value) |
| 64 | + if value & sign_bit: |
| 65 | + value = -1 & ~sign_bit + (value & (sign_bit - 1)) + 1 |
| 66 | + else: |
| 67 | + value &= sign_bit - 1 |
| 68 | + |
| 69 | + self = super().__new__(cls) |
| 70 | + self._value = value |
| 71 | + self._width = width |
| 72 | + return self |
| 73 | + |
| 74 | + @property |
| 75 | + def width(width): |
| 76 | + return width._width |
| 77 | + |
| 78 | + def __int__(self): |
| 79 | + return self._value |
| 80 | + |
| 81 | + def __float__(self): |
| 82 | + return float(self._value) |
| 83 | + |
| 84 | + def __str__(self): |
| 85 | + return str(self._value) |
| 86 | + |
| 87 | + def __repr__(self): |
| 88 | + return "int({}, width={})".format(self._value, self._width) |
| 89 | + |
| 90 | + def _unaryop(lower_fn): |
| 91 | + def operator(self): |
| 92 | + return int(lower_fn(self._value), self._width) |
| 93 | + return operator |
| 94 | + |
| 95 | + __neg__ = _unaryop(host_int.__neg__) |
| 96 | + __pos__ = _unaryop(host_int.__pos__) |
| 97 | + __abs__ = _unaryop(host_int.__abs__) |
| 98 | + __invert__ = _unaryop(host_int.__invert__) |
| 99 | + __round__ = _unaryop(host_int.__round__) |
| 100 | + |
| 101 | + def _binaryop(lower_fn, rlower_fn=None): |
| 102 | + def operator(self, other): |
| 103 | + if isinstance(other, host_int): |
| 104 | + return int(lower_fn(self._value, other), self._width) |
| 105 | + elif isinstance(other, int): |
| 106 | + width = self._width if self._width > other._width else other._width |
| 107 | + return int(lower_fn(self._value, other._value), width) |
| 108 | + elif rlower_fn: |
| 109 | + return getattr(other, rlower_fn)(self._value) |
| 110 | + else: |
| 111 | + return NotImplemented |
| 112 | + return operator |
| 113 | + |
| 114 | + __add__ = __iadd__ = _binaryop(host_int.__add__, "__radd__") |
| 115 | + __sub__ = __isub__ = _binaryop(host_int.__sub__, "__rsub__") |
| 116 | + __mul__ = __imul__ = _binaryop(host_int.__mul__, "__rmul__") |
| 117 | + __floordiv__ = __ifloordiv__ = _binaryop(host_int.__floordiv__, "__rfloordiv__") |
| 118 | + __mod__ = __imod__ = _binaryop(host_int.__mod__, "__rmod__") |
| 119 | + __pow__ = __ipow__ = _binaryop(host_int.__pow__, "__rpow__") |
| 120 | + |
| 121 | + __radd__ = _binaryop(host_int.__radd__, "__add__") |
| 122 | + __rsub__ = _binaryop(host_int.__rsub__, "__sub__") |
| 123 | + __rmul__ = _binaryop(host_int.__rmul__, "__mul__") |
| 124 | + __rfloordiv__ = _binaryop(host_int.__rfloordiv__, "__floordiv__") |
| 125 | + __rmod__ = _binaryop(host_int.__rmod__, "__mod__") |
| 126 | + __rpow__ = _binaryop(host_int.__rpow__, "__pow__") |
| 127 | + |
| 128 | + __lshift__ = __ilshift__ = _binaryop(host_int.__lshift__) |
| 129 | + __rshift__ = __irshift__ = _binaryop(host_int.__rshift__) |
| 130 | + __and__ = __iand__ = _binaryop(host_int.__and__) |
| 131 | + __or__ = __ior__ = _binaryop(host_int.__or__) |
| 132 | + __xor__ = __ixor__ = _binaryop(host_int.__xor__) |
| 133 | + |
| 134 | + __rlshift__ = _binaryop(host_int.__rlshift__) |
| 135 | + __rrshift__ = _binaryop(host_int.__rrshift__) |
| 136 | + __rand__ = _binaryop(host_int.__rand__) |
| 137 | + __ror__ = _binaryop(host_int.__ror__) |
| 138 | + __rxor__ = _binaryop(host_int.__rxor__) |
| 139 | + |
| 140 | + def _compareop(lower_fn, rlower_fn): |
| 141 | + def operator(self, other): |
| 142 | + if isinstance(other, host_int): |
| 143 | + return lower_fn(self._value, other) |
| 144 | + elif isinstance(other, int): |
| 145 | + return lower_fn(self._value, other._value) |
| 146 | + else: |
| 147 | + return getattr(other, rlower_fn)(self._value) |
| 148 | + return operator |
| 149 | + |
| 150 | + __eq__ = _compareop(host_int.__eq__, "__ne__") |
| 151 | + __ne__ = _compareop(host_int.__ne__, "__eq__") |
| 152 | + __gt__ = _compareop(host_int.__gt__, "__le__") |
| 153 | + __ge__ = _compareop(host_int.__ge__, "__lt__") |
| 154 | + __lt__ = _compareop(host_int.__lt__, "__ge__") |
| 155 | + __le__ = _compareop(host_int.__le__, "__gt__") |
85 | 156 |
|
86 | 157 |
|
87 | 158 | _ARTIQEmbeddedInfo = namedtuple("_ARTIQEmbeddedInfo",
|
|
0 commit comments