1 /* Long (arbitrary precision) integer object implementation */
3 /* XXX The functional organization of this file is terrible */
6 #include "longintrepr.h"
14 #define NSMALLPOSINTS 257
17 #define NSMALLNEGINTS 5
20 /* convert a PyLong of size 1, 0 or -1 to an sdigit */
21 #define MEDIUM_VALUE(x) (Py_SIZE(x) < 0 ? -(sdigit)(x)->ob_digit[0] : \
22 (Py_SIZE(x) == 0 ? (sdigit)0 : \
23 (sdigit)(x)->ob_digit[0]))
24 #define ABS(x) ((x) < 0 ? -(x) : (x))
26 #if NSMALLNEGINTS + NSMALLPOSINTS > 0
27 /* Small integers are preallocated in this array so that they
29 The integers that are preallocated are those in the range
30 -NSMALLNEGINTS (inclusive) to NSMALLPOSINTS (not inclusive).
32 static PyLongObject small_ints
[NSMALLNEGINTS
+ NSMALLPOSINTS
];
34 int quick_int_allocs
, quick_neg_int_allocs
;
38 get_small_int(sdigit ival
)
40 PyObject
*v
= (PyObject
*)(small_ints
+ ival
+ NSMALLNEGINTS
);
46 quick_neg_int_allocs
++;
50 #define CHECK_SMALL_INT(ival) \
51 do if (-NSMALLNEGINTS <= ival && ival < NSMALLPOSINTS) { \
52 return get_small_int((sdigit)ival); \
56 maybe_small_long(PyLongObject
*v
)
58 if (v
&& ABS(Py_SIZE(v
)) <= 1) {
59 sdigit ival
= MEDIUM_VALUE(v
);
60 if (-NSMALLNEGINTS
<= ival
&& ival
< NSMALLPOSINTS
) {
62 return (PyLongObject
*)get_small_int(ival
);
68 #define CHECK_SMALL_INT(ival)
69 #define maybe_small_long(val) (val)
72 /* If a freshly-allocated long is already shared, it must
73 be a small integer, so negating it must go to PyLong_FromLong */
75 do if (Py_REFCNT(x) == 1) Py_SIZE(x) = -Py_SIZE(x); \
76 else { PyObject* tmp=PyLong_FromLong(-MEDIUM_VALUE(x)); \
77 Py_DECREF(x); (x) = (PyLongObject*)tmp; } \
79 /* For long multiplication, use the O(N**2) school algorithm unless
80 * both operands contain more than KARATSUBA_CUTOFF digits (this
81 * being an internal Python long digit, in base BASE).
83 #define KARATSUBA_CUTOFF 70
84 #define KARATSUBA_SQUARE_CUTOFF (2 * KARATSUBA_CUTOFF)
86 /* For exponentiation, use the binary left-to-right algorithm
87 * unless the exponent contains more than FIVEARY_CUTOFF digits.
88 * In that case, do 5 bits at a time. The potential drawback is that
89 * a table of 2**5 intermediate results is computed.
91 #define FIVEARY_CUTOFF 8
95 #define MAX(x, y) ((x) < (y) ? (y) : (x))
96 #define MIN(x, y) ((x) > (y) ? (y) : (x))
98 #define SIGCHECK(PyTryBlock) \
99 if (--_Py_Ticker < 0) { \
100 _Py_Ticker = _Py_CheckInterval; \
101 if (PyErr_CheckSignals()) PyTryBlock \
104 /* forward declaration */
105 static int bits_in_digit(digit d
);
107 /* Normalize (remove leading zeros from) a long int object.
108 Doesn't attempt to free the storage--in most cases, due to the nature
109 of the algorithms used, this could save at most be one word anyway. */
111 static PyLongObject
*
112 long_normalize(register PyLongObject
*v
)
114 Py_ssize_t j
= ABS(Py_SIZE(v
));
117 while (i
> 0 && v
->ob_digit
[i
-1] == 0)
120 Py_SIZE(v
) = (Py_SIZE(v
) < 0) ? -(i
) : i
;
124 /* Allocate a new long int object with size digits.
125 Return NULL and set exception if we run out of memory. */
127 #define MAX_LONG_DIGITS \
128 ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
131 _PyLong_New(Py_ssize_t size
)
133 PyLongObject
*result
;
134 /* Number of bytes needed is: offsetof(PyLongObject, ob_digit) +
135 sizeof(digit)*size. Previous incarnations of this code used
136 sizeof(PyVarObject) instead of the offsetof, but this risks being
137 incorrect in the presence of padding between the PyVarObject header
139 if (size
> (Py_ssize_t
)MAX_LONG_DIGITS
) {
140 PyErr_SetString(PyExc_OverflowError
,
141 "too many digits in integer");
144 result
= PyObject_MALLOC(offsetof(PyLongObject
, ob_digit
) +
150 return (PyLongObject
*)PyObject_INIT_VAR(result
, &PyLong_Type
, size
);
154 _PyLong_Copy(PyLongObject
*src
)
156 PyLongObject
*result
;
164 sdigit ival
= src
->ob_digit
[0];
165 if (Py_SIZE(src
) < 0)
167 CHECK_SMALL_INT(ival
);
169 result
= _PyLong_New(i
);
170 if (result
!= NULL
) {
171 Py_SIZE(result
) = Py_SIZE(src
);
173 result
->ob_digit
[i
] = src
->ob_digit
[i
];
175 return (PyObject
*)result
;
178 /* Create a new long int object from a C long int */
181 PyLong_FromLong(long ival
)
184 unsigned long abs_ival
;
185 unsigned long t
; /* unsigned so >> doesn't propagate sign bit */
189 CHECK_SMALL_INT(ival
);
192 /* negate: can't write this as abs_ival = -ival since that
193 invokes undefined behaviour when ival is LONG_MIN */
194 abs_ival
= 0U-(unsigned long)ival
;
198 abs_ival
= (unsigned long)ival
;
201 /* Fast path for single-digit ints */
202 if (!(abs_ival
>> PyLong_SHIFT
)) {
206 v
->ob_digit
[0] = Py_SAFE_DOWNCAST(
207 abs_ival
, unsigned long, digit
);
214 if (!(abs_ival
>> 2*PyLong_SHIFT
)) {
218 v
->ob_digit
[0] = Py_SAFE_DOWNCAST(
219 abs_ival
& PyLong_MASK
, unsigned long, digit
);
220 v
->ob_digit
[1] = Py_SAFE_DOWNCAST(
221 abs_ival
>> PyLong_SHIFT
, unsigned long, digit
);
227 /* Larger numbers: loop to determine number of digits */
233 v
= _PyLong_New(ndigits
);
235 digit
*p
= v
->ob_digit
;
236 Py_SIZE(v
) = ndigits
*sign
;
239 *p
++ = Py_SAFE_DOWNCAST(
240 t
& PyLong_MASK
, unsigned long, digit
);
244 return (PyObject
*)v
;
247 /* Create a new long int object from a C unsigned long int */
250 PyLong_FromUnsignedLong(unsigned long ival
)
256 if (ival
< PyLong_BASE
)
257 return PyLong_FromLong(ival
);
258 /* Count the number of Python digits. */
259 t
= (unsigned long)ival
;
264 v
= _PyLong_New(ndigits
);
266 digit
*p
= v
->ob_digit
;
267 Py_SIZE(v
) = ndigits
;
269 *p
++ = (digit
)(ival
& PyLong_MASK
);
270 ival
>>= PyLong_SHIFT
;
273 return (PyObject
*)v
;
276 /* Create a new long int object from a C double */
279 PyLong_FromDouble(double dval
)
283 int i
, ndig
, expo
, neg
;
285 if (Py_IS_INFINITY(dval
)) {
286 PyErr_SetString(PyExc_OverflowError
,
287 "cannot convert float infinity to integer");
290 if (Py_IS_NAN(dval
)) {
291 PyErr_SetString(PyExc_ValueError
,
292 "cannot convert float NaN to integer");
299 frac
= frexp(dval
, &expo
); /* dval = frac*2**expo; 0.0 <= frac < 1.0 */
301 return PyLong_FromLong(0L);
302 ndig
= (expo
-1) / PyLong_SHIFT
+ 1; /* Number of 'digits' in result */
303 v
= _PyLong_New(ndig
);
306 frac
= ldexp(frac
, (expo
-1) % PyLong_SHIFT
+ 1);
307 for (i
= ndig
; --i
>= 0; ) {
308 digit bits
= (digit
)frac
;
309 v
->ob_digit
[i
] = bits
;
310 frac
= frac
- (double)bits
;
311 frac
= ldexp(frac
, PyLong_SHIFT
);
314 Py_SIZE(v
) = -(Py_SIZE(v
));
315 return (PyObject
*)v
;
318 /* Checking for overflow in PyLong_AsLong is a PITA since C doesn't define
319 * anything about what happens when a signed integer operation overflows,
320 * and some compilers think they're doing you a favor by being "clever"
321 * then. The bit pattern for the largest postive signed long is
322 * (unsigned long)LONG_MAX, and for the smallest negative signed long
323 * it is abs(LONG_MIN), which we could write -(unsigned long)LONG_MIN.
324 * However, some other compilers warn about applying unary minus to an
325 * unsigned operand. Hence the weird "0-".
327 #define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
328 #define PY_ABS_SSIZE_T_MIN (0-(size_t)PY_SSIZE_T_MIN)
330 /* Get a C long int from a long int object.
331 Returns -1 and sets an error condition if overflow occurs. */
334 PyLong_AsLongAndOverflow(PyObject
*vv
, int *overflow
)
336 /* This version by Tim Peters */
337 register PyLongObject
*v
;
338 unsigned long x
, prev
;
342 int do_decref
= 0; /* if nb_int was called */
346 PyErr_BadInternalCall();
350 if (!PyLong_Check(vv
)) {
352 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
353 nb
->nb_int
== NULL
) {
354 PyErr_SetString(PyExc_TypeError
, "an integer is required");
357 vv
= (*nb
->nb_int
) (vv
);
361 if (!PyLong_Check(vv
)) {
363 PyErr_SetString(PyExc_TypeError
,
364 "nb_int should return int object");
370 v
= (PyLongObject
*)vv
;
375 res
= -(sdigit
)v
->ob_digit
[0];
381 res
= v
->ob_digit
[0];
392 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
393 if ((x
>> PyLong_SHIFT
) != prev
) {
394 *overflow
= Py_SIZE(v
) > 0 ? 1 : -1;
398 /* Haven't lost any bits, but casting to long requires extra care
399 * (see comment above).
401 if (x
<= (unsigned long)LONG_MAX
) {
402 res
= (long)x
* sign
;
404 else if (sign
< 0 && x
== PY_ABS_LONG_MIN
) {
408 *overflow
= Py_SIZE(v
) > 0 ? 1 : -1;
409 /* res is already set to -1 */
420 PyLong_AsLong(PyObject
*obj
)
423 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
425 /* XXX: could be cute and give a different
426 message for overflow == -1 */
427 PyErr_SetString(PyExc_OverflowError
,
428 "Python int too large to convert to C long");
433 /* Get a Py_ssize_t from a long int object.
434 Returns -1 and sets an error condition if overflow occurs. */
437 PyLong_AsSsize_t(PyObject
*vv
) {
438 register PyLongObject
*v
;
444 PyErr_BadInternalCall();
447 if (!PyLong_Check(vv
)) {
448 PyErr_SetString(PyExc_TypeError
, "an integer is required");
452 v
= (PyLongObject
*)vv
;
455 case -1: return -(sdigit
)v
->ob_digit
[0];
457 case 1: return v
->ob_digit
[0];
467 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
468 if ((x
>> PyLong_SHIFT
) != prev
)
471 /* Haven't lost any bits, but casting to a signed type requires
472 * extra care (see comment above).
474 if (x
<= (size_t)PY_SSIZE_T_MAX
) {
475 return (Py_ssize_t
)x
* sign
;
477 else if (sign
< 0 && x
== PY_ABS_SSIZE_T_MIN
) {
478 return PY_SSIZE_T_MIN
;
483 PyErr_SetString(PyExc_OverflowError
,
484 "Python int too large to convert to C ssize_t");
488 /* Get a C unsigned long int from a long int object.
489 Returns -1 and sets an error condition if overflow occurs. */
492 PyLong_AsUnsignedLong(PyObject
*vv
)
494 register PyLongObject
*v
;
495 unsigned long x
, prev
;
499 PyErr_BadInternalCall();
500 return (unsigned long)-1;
502 if (!PyLong_Check(vv
)) {
503 PyErr_SetString(PyExc_TypeError
, "an integer is required");
504 return (unsigned long)-1;
507 v
= (PyLongObject
*)vv
;
511 PyErr_SetString(PyExc_OverflowError
,
512 "can't convert negative value to unsigned int");
513 return (unsigned long) -1;
517 case 1: return v
->ob_digit
[0];
521 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
522 if ((x
>> PyLong_SHIFT
) != prev
) {
523 PyErr_SetString(PyExc_OverflowError
,
524 "python int too large to convert to C unsigned long");
525 return (unsigned long) -1;
531 /* Get a C unsigned long int from a long int object.
532 Returns -1 and sets an error condition if overflow occurs. */
535 PyLong_AsSize_t(PyObject
*vv
)
537 register PyLongObject
*v
;
542 PyErr_BadInternalCall();
545 if (!PyLong_Check(vv
)) {
546 PyErr_SetString(PyExc_TypeError
, "an integer is required");
550 v
= (PyLongObject
*)vv
;
554 PyErr_SetString(PyExc_OverflowError
,
555 "can't convert negative value to size_t");
560 case 1: return v
->ob_digit
[0];
564 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
565 if ((x
>> PyLong_SHIFT
) != prev
) {
566 PyErr_SetString(PyExc_OverflowError
,
567 "Python int too large to convert to C size_t");
568 return (unsigned long) -1;
574 /* Get a C unsigned long int from a long int object, ignoring the high bits.
575 Returns -1 and sets an error condition if an error occurs. */
578 _PyLong_AsUnsignedLongMask(PyObject
*vv
)
580 register PyLongObject
*v
;
585 if (vv
== NULL
|| !PyLong_Check(vv
)) {
586 PyErr_BadInternalCall();
587 return (unsigned long) -1;
589 v
= (PyLongObject
*)vv
;
593 case 1: return v
->ob_digit
[0];
602 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
608 PyLong_AsUnsignedLongMask(register PyObject
*op
)
614 if (op
&& PyLong_Check(op
))
615 return _PyLong_AsUnsignedLongMask(op
);
617 if (op
== NULL
|| (nb
= op
->ob_type
->tp_as_number
) == NULL
||
618 nb
->nb_int
== NULL
) {
619 PyErr_SetString(PyExc_TypeError
, "an integer is required");
620 return (unsigned long)-1;
623 lo
= (PyLongObject
*) (*nb
->nb_int
) (op
);
625 return (unsigned long)-1;
626 if (PyLong_Check(lo
)) {
627 val
= _PyLong_AsUnsignedLongMask((PyObject
*)lo
);
629 if (PyErr_Occurred())
630 return (unsigned long)-1;
636 PyErr_SetString(PyExc_TypeError
,
637 "nb_int should return int object");
638 return (unsigned long)-1;
643 _PyLong_Sign(PyObject
*vv
)
645 PyLongObject
*v
= (PyLongObject
*)vv
;
648 assert(PyLong_Check(v
));
650 return Py_SIZE(v
) == 0 ? 0 : (Py_SIZE(v
) < 0 ? -1 : 1);
654 _PyLong_NumBits(PyObject
*vv
)
656 PyLongObject
*v
= (PyLongObject
*)vv
;
661 assert(PyLong_Check(v
));
662 ndigits
= ABS(Py_SIZE(v
));
663 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
665 digit msd
= v
->ob_digit
[ndigits
- 1];
667 result
= (ndigits
- 1) * PyLong_SHIFT
;
668 if (result
/ PyLong_SHIFT
!= (size_t)(ndigits
- 1))
680 PyErr_SetString(PyExc_OverflowError
, "int has too many bits "
681 "to express in a platform size_t");
686 _PyLong_FromByteArray(const unsigned char* bytes
, size_t n
,
687 int little_endian
, int is_signed
)
689 const unsigned char* pstartbyte
;/* LSB of bytes */
690 int incr
; /* direction to move pstartbyte */
691 const unsigned char* pendbyte
; /* MSB of bytes */
692 size_t numsignificantbytes
; /* number of bytes that matter */
693 Py_ssize_t ndigits
; /* number of Python long digits */
694 PyLongObject
* v
; /* result */
695 Py_ssize_t idigit
= 0; /* next free index in v->ob_digit */
698 return PyLong_FromLong(0L);
702 pendbyte
= bytes
+ n
- 1;
706 pstartbyte
= bytes
+ n
- 1;
712 is_signed
= *pendbyte
>= 0x80;
714 /* Compute numsignificantbytes. This consists of finding the most
715 significant byte. Leading 0 bytes are insignficant if the number
716 is positive, and leading 0xff bytes if negative. */
719 const unsigned char* p
= pendbyte
;
720 const int pincr
= -incr
; /* search MSB to LSB */
721 const unsigned char insignficant
= is_signed
? 0xff : 0x00;
723 for (i
= 0; i
< n
; ++i
, p
+= pincr
) {
724 if (*p
!= insignficant
)
727 numsignificantbytes
= n
- i
;
728 /* 2's-comp is a bit tricky here, e.g. 0xff00 == -0x0100, so
729 actually has 2 significant bytes. OTOH, 0xff0001 ==
730 -0x00ffff, so we wouldn't *need* to bump it there; but we
731 do for 0xffff = -0x0001. To be safe without bothering to
732 check every case, bump it regardless. */
733 if (is_signed
&& numsignificantbytes
< n
)
734 ++numsignificantbytes
;
737 /* How many Python long digits do we need? We have
738 8*numsignificantbytes bits, and each Python long digit has
739 PyLong_SHIFT bits, so it's the ceiling of the quotient. */
740 /* catch overflow before it happens */
741 if (numsignificantbytes
> (PY_SSIZE_T_MAX
- PyLong_SHIFT
) / 8) {
742 PyErr_SetString(PyExc_OverflowError
,
743 "byte array too long to convert to int");
746 ndigits
= (numsignificantbytes
* 8 + PyLong_SHIFT
- 1) / PyLong_SHIFT
;
747 v
= _PyLong_New(ndigits
);
751 /* Copy the bits over. The tricky parts are computing 2's-comp on
752 the fly for signed numbers, and dealing with the mismatch between
753 8-bit bytes and (probably) 15-bit Python digits.*/
756 twodigits carry
= 1; /* for 2's-comp calculation */
757 twodigits accum
= 0; /* sliding register */
758 unsigned int accumbits
= 0; /* number of bits in accum */
759 const unsigned char* p
= pstartbyte
;
761 for (i
= 0; i
< numsignificantbytes
; ++i
, p
+= incr
) {
762 twodigits thisbyte
= *p
;
763 /* Compute correction for 2's comp, if needed. */
765 thisbyte
= (0xff ^ thisbyte
) + carry
;
766 carry
= thisbyte
>> 8;
769 /* Because we're going LSB to MSB, thisbyte is
770 more significant than what's already in accum,
771 so needs to be prepended to accum. */
772 accum
|= (twodigits
)thisbyte
<< accumbits
;
774 if (accumbits
>= PyLong_SHIFT
) {
775 /* There's enough to fill a Python digit. */
776 assert(idigit
< ndigits
);
777 v
->ob_digit
[idigit
] = (digit
)(accum
&
780 accum
>>= PyLong_SHIFT
;
781 accumbits
-= PyLong_SHIFT
;
782 assert(accumbits
< PyLong_SHIFT
);
785 assert(accumbits
< PyLong_SHIFT
);
787 assert(idigit
< ndigits
);
788 v
->ob_digit
[idigit
] = (digit
)accum
;
793 Py_SIZE(v
) = is_signed
? -idigit
: idigit
;
794 return (PyObject
*)long_normalize(v
);
798 _PyLong_AsByteArray(PyLongObject
* v
,
799 unsigned char* bytes
, size_t n
,
800 int little_endian
, int is_signed
)
802 Py_ssize_t i
; /* index into v->ob_digit */
803 Py_ssize_t ndigits
; /* |v->ob_size| */
804 twodigits accum
; /* sliding register */
805 unsigned int accumbits
; /* # bits in accum */
806 int do_twos_comp
; /* store 2's-comp? is_signed and v < 0 */
807 digit carry
; /* for computing 2's-comp */
808 size_t j
; /* # bytes filled */
809 unsigned char* p
; /* pointer to next byte in bytes */
810 int pincr
; /* direction to move p */
812 assert(v
!= NULL
&& PyLong_Check(v
));
814 if (Py_SIZE(v
) < 0) {
815 ndigits
= -(Py_SIZE(v
));
817 PyErr_SetString(PyExc_OverflowError
,
818 "can't convert negative int to unsigned");
824 ndigits
= Py_SIZE(v
);
837 /* Copy over all the Python digits.
838 It's crucial that every Python digit except for the MSD contribute
839 exactly PyLong_SHIFT bits to the total, so first assert that the long is
841 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
845 carry
= do_twos_comp
? 1 : 0;
846 for (i
= 0; i
< ndigits
; ++i
) {
847 digit thisdigit
= v
->ob_digit
[i
];
849 thisdigit
= (thisdigit
^ PyLong_MASK
) + carry
;
850 carry
= thisdigit
>> PyLong_SHIFT
;
851 thisdigit
&= PyLong_MASK
;
853 /* Because we're going LSB to MSB, thisdigit is more
854 significant than what's already in accum, so needs to be
855 prepended to accum. */
856 accum
|= (twodigits
)thisdigit
<< accumbits
;
858 /* The most-significant digit may be (probably is) at least
860 if (i
== ndigits
- 1) {
861 /* Count # of sign bits -- they needn't be stored,
862 * although for signed conversion we need later to
863 * make sure at least one sign bit gets stored. */
864 digit s
= do_twos_comp
? thisdigit
^ PyLong_MASK
:
872 accumbits
+= PyLong_SHIFT
;
874 /* Store as many bytes as possible. */
875 while (accumbits
>= 8) {
879 *p
= (unsigned char)(accum
& 0xff);
886 /* Store the straggler (if any). */
887 assert(accumbits
< 8);
888 assert(carry
== 0); /* else do_twos_comp and *every* digit was 0 */
894 /* Fill leading bits of the byte with sign bits
895 (appropriately pretending that the long had an
896 infinite supply of sign bits). */
897 accum
|= (~(twodigits
)0) << accumbits
;
899 *p
= (unsigned char)(accum
& 0xff);
902 else if (j
== n
&& n
> 0 && is_signed
) {
903 /* The main loop filled the byte array exactly, so the code
904 just above didn't get to ensure there's a sign bit, and the
905 loop below wouldn't add one either. Make sure a sign bit
907 unsigned char msb
= *(p
- pincr
);
908 int sign_bit_set
= msb
>= 0x80;
909 assert(accumbits
== 0);
910 if (sign_bit_set
== do_twos_comp
)
916 /* Fill remaining bytes with copies of the sign bit. */
918 unsigned char signbyte
= do_twos_comp
? 0xffU
: 0U;
919 for ( ; j
< n
; ++j
, p
+= pincr
)
926 PyErr_SetString(PyExc_OverflowError
, "int too big to convert");
932 _PyLong_AsScaledDouble(PyObject
*vv
, int *exponent
)
934 /* NBITS_WANTED should be > the number of bits in a double's precision,
935 but small enough so that 2**NBITS_WANTED is within the normal double
936 range. nbitsneeded is set to 1 less than that because the most-significant
937 Python digit contains at least 1 significant bit, but we don't want to
938 bother counting them (catering to the worst case cheaply).
940 57 is one more than VAX-D double precision; I (Tim) don't know of a double
941 format with more precision than that; it's 1 larger so that we add in at
942 least one round bit to stand in for the ignored least-significant bits.
944 #define NBITS_WANTED 57
947 const double multiplier
= (double)(1L << PyLong_SHIFT
);
952 if (vv
== NULL
|| !PyLong_Check(vv
)) {
953 PyErr_BadInternalCall();
956 v
= (PyLongObject
*)vv
;
968 x
= (double)v
->ob_digit
[i
];
969 nbitsneeded
= NBITS_WANTED
- 1;
970 /* Invariant: i Python digits remain unaccounted for. */
971 while (i
> 0 && nbitsneeded
> 0) {
973 x
= x
* multiplier
+ (double)v
->ob_digit
[i
];
974 nbitsneeded
-= PyLong_SHIFT
;
976 /* There are i digits we didn't shift in. Pretending they're all
977 zeroes, the true value is x * 2**(i*PyLong_SHIFT). */
984 /* Get a C double from a long int object. Rounds to the nearest double,
985 using the round-half-to-even rule in the case of a tie. */
988 PyLong_AsDouble(PyObject
*vv
)
990 PyLongObject
*v
= (PyLongObject
*)vv
;
991 Py_ssize_t rnd_digit
, rnd_bit
, m
, n
;
996 if (vv
== NULL
|| !PyLong_Check(vv
)) {
997 PyErr_BadInternalCall();
1001 /* Notes on the method: for simplicity, assume v is positive and >=
1002 2**DBL_MANT_DIG. (For negative v we just ignore the sign until the
1003 end; for small v no rounding is necessary.) Write n for the number
1004 of bits in v, so that 2**(n-1) <= v < 2**n, and n > DBL_MANT_DIG.
1006 Some terminology: the *rounding bit* of v is the 1st bit of v that
1007 will be rounded away (bit n - DBL_MANT_DIG - 1); the *parity bit*
1008 is the bit immediately above. The round-half-to-even rule says
1009 that we round up if the rounding bit is set, unless v is exactly
1010 halfway between two floats and the parity bit is zero.
1012 Write d[0] ... d[m] for the digits of v, least to most significant.
1013 Let rnd_bit be the index of the rounding bit, and rnd_digit the
1014 index of the PyLong digit containing the rounding bit. Then the
1015 bits of the digit d[rnd_digit] look something like:
1020 msb -> sssssrttttttttt <- lsb
1025 where 's' represents a 'significant bit' that will be included in
1026 the mantissa of the result, 'r' is the rounding bit, and 't'
1027 represents a 'trailing bit' following the rounding bit. Note that
1028 if the rounding bit is at the top of d[rnd_digit] then the parity
1029 bit will be the lsb of d[rnd_digit+1]. If we set
1031 lsb = 1 << (rnd_bit % PyLong_SHIFT)
1033 then d[rnd_digit] & (PyLong_BASE - 2*lsb) selects just the
1034 significant bits of d[rnd_digit], d[rnd_digit] & (lsb-1) gets the
1035 trailing bits, and d[rnd_digit] & lsb gives the rounding bit.
1037 We initialize the double x to the integer given by digits
1038 d[rnd_digit:m-1], but with the rounding bit and trailing bits of
1039 d[rnd_digit] masked out. So the value of x comes from the top
1040 DBL_MANT_DIG bits of v, multiplied by 2*lsb. Note that in the loop
1041 that produces x, all floating-point operations are exact (assuming
1042 that FLT_RADIX==2). Now if we're rounding down, the value we want
1045 x * 2**(PyLong_SHIFT * rnd_digit).
1047 and if we're rounding up, it's
1049 (x + 2*lsb) * 2**(PyLong_SHIFT * rnd_digit).
1051 Under the round-half-to-even rule, we round up if, and only
1052 if, the rounding bit is set *and* at least one of the
1053 following three conditions is satisfied:
1055 (1) the parity bit is set, or
1056 (2) at least one of the trailing bits of d[rnd_digit] is set, or
1057 (3) at least one of the digits d[i], 0 <= i < rnd_digit
1060 Finally, we have to worry about overflow. If v >= 2**DBL_MAX_EXP,
1061 or equivalently n > DBL_MAX_EXP, then overflow occurs. If v <
1062 2**DBL_MAX_EXP then we're usually safe, but there's a corner case
1063 to consider: if v is very close to 2**DBL_MAX_EXP then it's
1064 possible that v is rounded up to exactly 2**DBL_MAX_EXP, and then
1065 again overflow occurs.
1068 if (Py_SIZE(v
) == 0)
1070 m
= ABS(Py_SIZE(v
)) - 1;
1072 assert(d
[m
]); /* v should be normalized */
1074 /* fast path for case where 0 < abs(v) < 2**DBL_MANT_DIG */
1075 if (m
< DBL_MANT_DIG
/ PyLong_SHIFT
||
1076 (m
== DBL_MANT_DIG
/ PyLong_SHIFT
&&
1077 d
[m
] < (digit
)1 << DBL_MANT_DIG
%PyLong_SHIFT
)) {
1080 x
= x
*PyLong_BASE
+ d
[m
];
1081 return Py_SIZE(v
) < 0 ? -x
: x
;
1084 /* if m is huge then overflow immediately; otherwise, compute the
1085 number of bits n in v. The condition below implies n (= #bits) >=
1086 m * PyLong_SHIFT + 1 > DBL_MAX_EXP, hence v >= 2**DBL_MAX_EXP. */
1087 if (m
> (DBL_MAX_EXP
-1)/PyLong_SHIFT
)
1089 n
= m
* PyLong_SHIFT
+ bits_in_digit(d
[m
]);
1090 if (n
> DBL_MAX_EXP
)
1093 /* find location of rounding bit */
1094 assert(n
> DBL_MANT_DIG
); /* dealt with |v| < 2**DBL_MANT_DIG above */
1095 rnd_bit
= n
- DBL_MANT_DIG
- 1;
1096 rnd_digit
= rnd_bit
/PyLong_SHIFT
;
1097 lsb
= (digit
)1 << (rnd_bit
%PyLong_SHIFT
);
1099 /* Get top DBL_MANT_DIG bits of v. Assumes PyLong_SHIFT <
1100 DBL_MANT_DIG, so we'll need bits from at least 2 digits of v. */
1102 assert(m
> rnd_digit
);
1103 while (--m
> rnd_digit
)
1104 x
= x
*PyLong_BASE
+ d
[m
];
1105 x
= x
*PyLong_BASE
+ (d
[m
] & (PyLong_BASE
-2*lsb
));
1107 /* decide whether to round up, using round-half-to-even */
1108 assert(m
== rnd_digit
);
1109 if (d
[m
] & lsb
) { /* if (rounding bit is set) */
1111 if (lsb
== PyLong_BASE
/2)
1112 parity_bit
= d
[m
+1] & 1;
1114 parity_bit
= d
[m
] & 2*lsb
;
1117 else if (d
[m
] & (lsb
-1))
1129 /* and round up if necessary */
1132 if (n
== DBL_MAX_EXP
&&
1133 x
== ldexp((double)(2*lsb
), DBL_MANT_DIG
)) {
1134 /* overflow corner case */
1139 /* shift, adjust for sign, and return */
1140 x
= ldexp(x
, rnd_digit
*PyLong_SHIFT
);
1141 return Py_SIZE(v
) < 0 ? -x
: x
;
1144 PyErr_SetString(PyExc_OverflowError
,
1145 "Python int too large to convert to C double");
1149 /* Create a new long (or int) object from a C pointer */
1152 PyLong_FromVoidPtr(void *p
)
1154 #ifndef HAVE_LONG_LONG
1155 # error "PyLong_FromVoidPtr: sizeof(void*) > sizeof(long), but no long long"
1157 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
1158 # error "PyLong_FromVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
1160 /* special-case null pointer */
1162 return PyLong_FromLong(0);
1163 return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG
)(Py_uintptr_t
)p
);
1167 /* Get a C pointer from a long object (or an int object in some cases) */
1170 PyLong_AsVoidPtr(PyObject
*vv
)
1172 /* This function will allow int or long objects. If vv is neither,
1173 then the PyLong_AsLong*() functions will raise the exception:
1174 PyExc_SystemError, "bad argument to internal function"
1176 #if SIZEOF_VOID_P <= SIZEOF_LONG
1179 if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
1180 x
= PyLong_AsLong(vv
);
1182 x
= PyLong_AsUnsignedLong(vv
);
1185 #ifndef HAVE_LONG_LONG
1186 # error "PyLong_AsVoidPtr: sizeof(void*) > sizeof(long), but no long long"
1188 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
1189 # error "PyLong_AsVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
1193 if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
1194 x
= PyLong_AsLongLong(vv
);
1196 x
= PyLong_AsUnsignedLongLong(vv
);
1198 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
1200 if (x
== -1 && PyErr_Occurred())
1205 #ifdef HAVE_LONG_LONG
1207 /* Initial PY_LONG_LONG support by Chris Herborth (chrish@qnx.com), later
1208 * rewritten to use the newer PyLong_{As,From}ByteArray API.
1211 #define IS_LITTLE_ENDIAN (int)*(unsigned char*)&one
1213 /* Create a new long int object from a C PY_LONG_LONG int. */
1216 PyLong_FromLongLong(PY_LONG_LONG ival
)
1219 unsigned PY_LONG_LONG abs_ival
;
1220 unsigned PY_LONG_LONG t
; /* unsigned so >> doesn't propagate sign bit */
1224 CHECK_SMALL_INT(ival
);
1226 /* avoid signed overflow on negation; see comments
1227 in PyLong_FromLong above. */
1228 abs_ival
= (unsigned PY_LONG_LONG
)(-1-ival
) + 1;
1232 abs_ival
= (unsigned PY_LONG_LONG
)ival
;
1235 /* Count the number of Python digits.
1236 We used to pick 5 ("big enough for anything"), but that's a
1237 waste of time and space given that 5*15 = 75 bits are rarely
1244 v
= _PyLong_New(ndigits
);
1246 digit
*p
= v
->ob_digit
;
1247 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
1250 *p
++ = (digit
)(t
& PyLong_MASK
);
1254 return (PyObject
*)v
;
1257 /* Create a new long int object from a C unsigned PY_LONG_LONG int. */
1260 PyLong_FromUnsignedLongLong(unsigned PY_LONG_LONG ival
)
1263 unsigned PY_LONG_LONG t
;
1266 if (ival
< PyLong_BASE
)
1267 return PyLong_FromLong((long)ival
);
1268 /* Count the number of Python digits. */
1269 t
= (unsigned PY_LONG_LONG
)ival
;
1274 v
= _PyLong_New(ndigits
);
1276 digit
*p
= v
->ob_digit
;
1277 Py_SIZE(v
) = ndigits
;
1279 *p
++ = (digit
)(ival
& PyLong_MASK
);
1280 ival
>>= PyLong_SHIFT
;
1283 return (PyObject
*)v
;
1286 /* Create a new long int object from a C Py_ssize_t. */
1289 PyLong_FromSsize_t(Py_ssize_t ival
)
1293 size_t t
; /* unsigned so >> doesn't propagate sign bit */
1297 CHECK_SMALL_INT(ival
);
1299 /* avoid signed overflow when ival = SIZE_T_MIN */
1300 abs_ival
= (size_t)(-1-ival
)+1;
1304 abs_ival
= (size_t)ival
;
1307 /* Count the number of Python digits. */
1313 v
= _PyLong_New(ndigits
);
1315 digit
*p
= v
->ob_digit
;
1316 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
1319 *p
++ = (digit
)(t
& PyLong_MASK
);
1323 return (PyObject
*)v
;
1326 /* Create a new long int object from a C size_t. */
1329 PyLong_FromSize_t(size_t ival
)
1335 if (ival
< PyLong_BASE
)
1336 return PyLong_FromLong((long)ival
);
1337 /* Count the number of Python digits. */
1343 v
= _PyLong_New(ndigits
);
1345 digit
*p
= v
->ob_digit
;
1346 Py_SIZE(v
) = ndigits
;
1348 *p
++ = (digit
)(ival
& PyLong_MASK
);
1349 ival
>>= PyLong_SHIFT
;
1352 return (PyObject
*)v
;
1355 /* Get a C PY_LONG_LONG int from a long int object.
1356 Return -1 and set an error if overflow occurs. */
1359 PyLong_AsLongLong(PyObject
*vv
)
1367 PyErr_BadInternalCall();
1370 if (!PyLong_Check(vv
)) {
1371 PyNumberMethods
*nb
;
1373 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
1374 nb
->nb_int
== NULL
) {
1375 PyErr_SetString(PyExc_TypeError
, "an integer is required");
1378 io
= (*nb
->nb_int
) (vv
);
1381 if (PyLong_Check(io
)) {
1382 bytes
= PyLong_AsLongLong(io
);
1387 PyErr_SetString(PyExc_TypeError
, "integer conversion failed");
1391 v
= (PyLongObject
*)vv
;
1392 switch(Py_SIZE(v
)) {
1393 case -1: return -(sdigit
)v
->ob_digit
[0];
1395 case 1: return v
->ob_digit
[0];
1397 res
= _PyLong_AsByteArray(
1398 (PyLongObject
*)vv
, (unsigned char *)&bytes
,
1399 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 1);
1401 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
1403 return (PY_LONG_LONG
)-1;
1408 /* Get a C unsigned PY_LONG_LONG int from a long int object.
1409 Return -1 and set an error if overflow occurs. */
1411 unsigned PY_LONG_LONG
1412 PyLong_AsUnsignedLongLong(PyObject
*vv
)
1415 unsigned PY_LONG_LONG bytes
;
1419 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1420 PyErr_BadInternalCall();
1421 return (unsigned PY_LONG_LONG
)-1;
1424 v
= (PyLongObject
*)vv
;
1425 switch(Py_SIZE(v
)) {
1427 case 1: return v
->ob_digit
[0];
1430 res
= _PyLong_AsByteArray(
1431 (PyLongObject
*)vv
, (unsigned char *)&bytes
,
1432 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 0);
1434 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
1436 return (unsigned PY_LONG_LONG
)res
;
1441 /* Get a C unsigned long int from a long int object, ignoring the high bits.
1442 Returns -1 and sets an error condition if an error occurs. */
1444 static unsigned PY_LONG_LONG
1445 _PyLong_AsUnsignedLongLongMask(PyObject
*vv
)
1447 register PyLongObject
*v
;
1448 unsigned PY_LONG_LONG x
;
1452 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1453 PyErr_BadInternalCall();
1454 return (unsigned long) -1;
1456 v
= (PyLongObject
*)vv
;
1457 switch(Py_SIZE(v
)) {
1459 case 1: return v
->ob_digit
[0];
1469 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
1474 unsigned PY_LONG_LONG
1475 PyLong_AsUnsignedLongLongMask(register PyObject
*op
)
1477 PyNumberMethods
*nb
;
1479 unsigned PY_LONG_LONG val
;
1481 if (op
&& PyLong_Check(op
))
1482 return _PyLong_AsUnsignedLongLongMask(op
);
1484 if (op
== NULL
|| (nb
= op
->ob_type
->tp_as_number
) == NULL
||
1485 nb
->nb_int
== NULL
) {
1486 PyErr_SetString(PyExc_TypeError
, "an integer is required");
1487 return (unsigned PY_LONG_LONG
)-1;
1490 lo
= (PyLongObject
*) (*nb
->nb_int
) (op
);
1492 return (unsigned PY_LONG_LONG
)-1;
1493 if (PyLong_Check(lo
)) {
1494 val
= _PyLong_AsUnsignedLongLongMask((PyObject
*)lo
);
1496 if (PyErr_Occurred())
1497 return (unsigned PY_LONG_LONG
)-1;
1503 PyErr_SetString(PyExc_TypeError
,
1504 "nb_int should return int object");
1505 return (unsigned PY_LONG_LONG
)-1;
1508 #undef IS_LITTLE_ENDIAN
1510 #endif /* HAVE_LONG_LONG */
1512 #define CHECK_BINOP(v,w) \
1513 if (!PyLong_Check(v) || !PyLong_Check(w)) { \
1514 Py_INCREF(Py_NotImplemented); \
1515 return Py_NotImplemented; \
1518 /* bits_in_digit(d) returns the unique integer k such that 2**(k-1) <= d <
1519 2**k if d is nonzero, else 0. */
1521 static const unsigned char BitLengthTable
[32] = {
1522 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
1523 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1527 bits_in_digit(digit d
)
1534 d_bits
+= (int)BitLengthTable
[d
];
1538 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1539 * is modified in place, by adding y to it. Carries are propagated as far as
1540 * x[m-1], and the remaining carry (0 or 1) is returned.
1543 v_iadd(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1549 for (i
= 0; i
< n
; ++i
) {
1550 carry
+= x
[i
] + y
[i
];
1551 x
[i
] = carry
& PyLong_MASK
;
1552 carry
>>= PyLong_SHIFT
;
1553 assert((carry
& 1) == carry
);
1555 for (; carry
&& i
< m
; ++i
) {
1557 x
[i
] = carry
& PyLong_MASK
;
1558 carry
>>= PyLong_SHIFT
;
1559 assert((carry
& 1) == carry
);
1564 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1565 * is modified in place, by subtracting y from it. Borrows are propagated as
1566 * far as x[m-1], and the remaining borrow (0 or 1) is returned.
1569 v_isub(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1575 for (i
= 0; i
< n
; ++i
) {
1576 borrow
= x
[i
] - y
[i
] - borrow
;
1577 x
[i
] = borrow
& PyLong_MASK
;
1578 borrow
>>= PyLong_SHIFT
;
1579 borrow
&= 1; /* keep only 1 sign bit */
1581 for (; borrow
&& i
< m
; ++i
) {
1582 borrow
= x
[i
] - borrow
;
1583 x
[i
] = borrow
& PyLong_MASK
;
1584 borrow
>>= PyLong_SHIFT
;
1590 /* Shift digit vector a[0:m] d bits left, with 0 <= d < PyLong_SHIFT. Put
1591 * result in z[0:m], and return the d bits shifted out of the top.
1594 v_lshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1599 assert(0 <= d
&& d
< PyLong_SHIFT
);
1600 for (i
=0; i
< m
; i
++) {
1601 twodigits acc
= (twodigits
)a
[i
] << d
| carry
;
1602 z
[i
] = (digit
)acc
& PyLong_MASK
;
1603 carry
= (digit
)(acc
>> PyLong_SHIFT
);
1608 /* Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
1609 * result in z[0:m], and return the d bits shifted out of the bottom.
1612 v_rshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1616 digit mask
= ((digit
)1 << d
) - 1U;
1618 assert(0 <= d
&& d
< PyLong_SHIFT
);
1619 for (i
=m
; i
-- > 0;) {
1620 twodigits acc
= (twodigits
)carry
<< PyLong_SHIFT
| a
[i
];
1621 carry
= (digit
)acc
& mask
;
1622 z
[i
] = (digit
)(acc
>> d
);
1627 /* Divide long pin, w/ size digits, by non-zero digit n, storing quotient
1628 in pout, and returning the remainder. pin and pout point at the LSD.
1629 It's OK for pin == pout on entry, which saves oodles of mallocs/frees in
1630 _PyLong_Format, but that should be done with great care since longs are
1634 inplace_divrem1(digit
*pout
, digit
*pin
, Py_ssize_t size
, digit n
)
1638 assert(n
> 0 && n
<= PyLong_MASK
);
1641 while (--size
>= 0) {
1643 rem
= (rem
<< PyLong_SHIFT
) + *--pin
;
1644 *--pout
= hi
= (digit
)(rem
/ n
);
1645 rem
-= (twodigits
)hi
* n
;
1650 /* Divide a long integer by a digit, returning both the quotient
1651 (as function result) and the remainder (through *prem).
1652 The sign of a is ignored; n should not be zero. */
1654 static PyLongObject
*
1655 divrem1(PyLongObject
*a
, digit n
, digit
*prem
)
1657 const Py_ssize_t size
= ABS(Py_SIZE(a
));
1660 assert(n
> 0 && n
<= PyLong_MASK
);
1661 z
= _PyLong_New(size
);
1664 *prem
= inplace_divrem1(z
->ob_digit
, a
->ob_digit
, size
, n
);
1665 return long_normalize(z
);
1668 /* Convert a long int object to a string, using a given conversion base.
1669 Return a string object.
1670 If base is 2, 8 or 16, add the proper prefix '0b', '0o' or '0x'. */
1673 _PyLong_Format(PyObject
*aa
, int base
)
1675 register PyLongObject
*a
= (PyLongObject
*)aa
;
1683 if (a
== NULL
|| !PyLong_Check(a
)) {
1684 PyErr_BadInternalCall();
1687 assert(base
>= 2 && base
<= 36);
1688 size_a
= ABS(Py_SIZE(a
));
1690 /* Compute a rough upper bound for the length of the string */
1698 /* ensure we don't get signed overflow in sz calculation */
1699 if (size_a
> (PY_SSIZE_T_MAX
- i
) / PyLong_SHIFT
) {
1700 PyErr_SetString(PyExc_OverflowError
,
1701 "int is too large to format");
1704 sz
= i
+ 1 + (size_a
* PyLong_SHIFT
- 1) / bits
;
1706 str
= PyUnicode_FromUnicode(NULL
, sz
);
1709 p
= PyUnicode_AS_UNICODE(str
) + sz
;
1714 if (Py_SIZE(a
) == 0) {
1717 else if ((base
& (base
- 1)) == 0) {
1718 /* JRH: special case for power-of-2 bases */
1719 twodigits accum
= 0;
1720 int accumbits
= 0; /* # of bits in accum */
1721 int basebits
= 1; /* # of bits in base-1 */
1723 while ((i
>>= 1) > 1)
1726 for (i
= 0; i
< size_a
; ++i
) {
1727 accum
|= (twodigits
)a
->ob_digit
[i
] << accumbits
;
1728 accumbits
+= PyLong_SHIFT
;
1729 assert(accumbits
>= basebits
);
1731 char cdigit
= (char)(accum
& (base
- 1));
1732 cdigit
+= (cdigit
< 10) ? '0' : 'a'-10;
1733 assert(p
> PyUnicode_AS_UNICODE(str
));
1735 accumbits
-= basebits
;
1737 } while (i
< size_a
-1 ? accumbits
>= basebits
:
1742 /* Not 0, and base not a power of 2. Divide repeatedly by
1743 base, but for speed use the highest power of base that
1745 Py_ssize_t size
= size_a
;
1746 digit
*pin
= a
->ob_digit
;
1747 PyLongObject
*scratch
;
1748 /* powbasw <- largest power of base that fits in a digit. */
1749 digit powbase
= base
; /* powbase == base ** power */
1752 twodigits newpow
= powbase
* (twodigits
)base
;
1753 if (newpow
>> PyLong_SHIFT
)
1754 /* doesn't fit in a digit */
1756 powbase
= (digit
)newpow
;
1760 /* Get a scratch area for repeated division. */
1761 scratch
= _PyLong_New(size
);
1762 if (scratch
== NULL
) {
1767 /* Repeatedly divide by powbase. */
1769 int ntostore
= power
;
1770 digit rem
= inplace_divrem1(scratch
->ob_digit
,
1771 pin
, size
, powbase
);
1772 pin
= scratch
->ob_digit
; /* no need to use a again */
1773 if (pin
[size
- 1] == 0)
1781 /* Break rem into digits. */
1782 assert(ntostore
> 0);
1784 digit nextrem
= (digit
)(rem
/ base
);
1785 char c
= (char)(rem
- nextrem
* base
);
1786 assert(p
> PyUnicode_AS_UNICODE(str
));
1787 c
+= (c
< 10) ? '0' : 'a'-10;
1791 /* Termination is a bit delicate: must not
1792 store leading zeroes, so must get out if
1793 remaining quotient and rem are both 0. */
1794 } while (ntostore
&& (size
|| rem
));
1795 } while (size
!= 0);
1803 else if (base
== 8) {
1807 else if (base
== 2) {
1811 else if (base
!= 10) {
1813 *--p
= '0' + base
%10;
1815 *--p
= '0' + base
/10;
1819 if (p
!= PyUnicode_AS_UNICODE(str
)) {
1820 Py_UNICODE
*q
= PyUnicode_AS_UNICODE(str
);
1823 } while ((*q
++ = *p
++) != '\0');
1825 if (PyUnicode_Resize(&str
,(Py_ssize_t
) (q
-
1826 PyUnicode_AS_UNICODE(str
)))) {
1831 return (PyObject
*)str
;
1834 /* Table of digit values for 8-bit string -> integer conversion.
1835 * '0' maps to 0, ..., '9' maps to 9.
1836 * 'a' and 'A' map to 10, ..., 'z' and 'Z' map to 35.
1837 * All other indices map to 37.
1838 * Note that when converting a base B string, a char c is a legitimate
1839 * base B digit iff _PyLong_DigitValue[Py_CHARPyLong_MASK(c)] < B.
1841 unsigned char _PyLong_DigitValue
[256] = {
1842 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1843 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1844 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1845 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 37, 37, 37, 37, 37, 37,
1846 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1847 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1848 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1849 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1850 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1851 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1852 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1853 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1854 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1855 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1856 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1857 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1860 /* *str points to the first digit in a string of base `base` digits. base
1861 * is a power of 2 (2, 4, 8, 16, or 32). *str is set to point to the first
1862 * non-digit (which may be *str!). A normalized long is returned.
1863 * The point to this routine is that it takes time linear in the number of
1864 * string characters.
1866 static PyLongObject
*
1867 long_from_binary_base(char **str
, int base
)
1878 assert(base
>= 2 && base
<= 32 && (base
& (base
- 1)) == 0);
1880 for (bits_per_char
= -1; n
; ++bits_per_char
)
1882 /* n <- total # of bits needed, while setting p to end-of-string */
1883 while (_PyLong_DigitValue
[Py_CHARMASK(*p
)] < base
)
1886 /* n <- # of Python digits needed, = ceiling(n/PyLong_SHIFT). */
1887 n
= (p
- start
) * bits_per_char
+ PyLong_SHIFT
- 1;
1888 if (n
/ bits_per_char
< p
- start
) {
1889 PyErr_SetString(PyExc_ValueError
,
1890 "int string too large to convert");
1893 n
= n
/ PyLong_SHIFT
;
1897 /* Read string from right, and fill in long from left; i.e.,
1898 * from least to most significant in both.
1902 pdigit
= z
->ob_digit
;
1903 while (--p
>= start
) {
1904 int k
= (int)_PyLong_DigitValue
[Py_CHARMASK(*p
)];
1905 assert(k
>= 0 && k
< base
);
1906 accum
|= (twodigits
)k
<< bits_in_accum
;
1907 bits_in_accum
+= bits_per_char
;
1908 if (bits_in_accum
>= PyLong_SHIFT
) {
1909 *pdigit
++ = (digit
)(accum
& PyLong_MASK
);
1910 assert(pdigit
- z
->ob_digit
<= n
);
1911 accum
>>= PyLong_SHIFT
;
1912 bits_in_accum
-= PyLong_SHIFT
;
1913 assert(bits_in_accum
< PyLong_SHIFT
);
1916 if (bits_in_accum
) {
1917 assert(bits_in_accum
<= PyLong_SHIFT
);
1918 *pdigit
++ = (digit
)accum
;
1919 assert(pdigit
- z
->ob_digit
<= n
);
1921 while (pdigit
- z
->ob_digit
< n
)
1923 return long_normalize(z
);
1927 PyLong_FromString(char *str
, char **pend
, int base
)
1929 int sign
= 1, error_if_nonzero
= 0;
1930 char *start
, *orig_str
= str
;
1931 PyLongObject
*z
= NULL
;
1935 if ((base
!= 0 && base
< 2) || base
> 36) {
1936 PyErr_SetString(PyExc_ValueError
,
1937 "int() arg 2 must be >= 2 and <= 36");
1940 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1944 else if (*str
== '-') {
1951 else if (str
[1] == 'x' || str
[1] == 'X')
1953 else if (str
[1] == 'o' || str
[1] == 'O')
1955 else if (str
[1] == 'b' || str
[1] == 'B')
1958 /* "old" (C-style) octal literal, now invalid.
1959 it might still be zero though */
1960 error_if_nonzero
= 1;
1964 if (str
[0] == '0' &&
1965 ((base
== 16 && (str
[1] == 'x' || str
[1] == 'X')) ||
1966 (base
== 8 && (str
[1] == 'o' || str
[1] == 'O')) ||
1967 (base
== 2 && (str
[1] == 'b' || str
[1] == 'B'))))
1971 if ((base
& (base
- 1)) == 0)
1972 z
= long_from_binary_base(&str
, base
);
1975 Binary bases can be converted in time linear in the number of digits, because
1976 Python's representation base is binary. Other bases (including decimal!) use
1977 the simple quadratic-time algorithm below, complicated by some speed tricks.
1979 First some math: the largest integer that can be expressed in N base-B digits
1980 is B**N-1. Consequently, if we have an N-digit input in base B, the worst-
1981 case number of Python digits needed to hold it is the smallest integer n s.t.
1983 BASE**n-1 >= B**N-1 [or, adding 1 to both sides]
1984 BASE**n >= B**N [taking logs to base BASE]
1985 n >= log(B**N)/log(BASE) = N * log(B)/log(BASE)
1987 The static array log_base_BASE[base] == log(base)/log(BASE) so we can compute
1988 this quickly. A Python long with that much space is reserved near the start,
1989 and the result is computed into it.
1991 The input string is actually treated as being in base base**i (i.e., i digits
1992 are processed at a time), where two more static arrays hold:
1994 convwidth_base[base] = the largest integer i such that base**i <= BASE
1995 convmultmax_base[base] = base ** convwidth_base[base]
1997 The first of these is the largest i such that i consecutive input digits
1998 must fit in a single Python digit. The second is effectively the input
1999 base we're really using.
2001 Viewing the input as a sequence <c0, c1, ..., c_n-1> of digits in base
2002 convmultmax_base[base], the result is "simply"
2004 (((c0*B + c1)*B + c2)*B + c3)*B + ... ))) + c_n-1
2006 where B = convmultmax_base[base].
2008 Error analysis: as above, the number of Python digits `n` needed is worst-
2011 n >= N * log(B)/log(BASE)
2013 where `N` is the number of input digits in base `B`. This is computed via
2015 size_z = (Py_ssize_t)((scan - str) * log_base_BASE[base]) + 1;
2017 below. Two numeric concerns are how much space this can waste, and whether
2018 the computed result can be too small. To be concrete, assume BASE = 2**15,
2019 which is the default (and it's unlikely anyone changes that).
2021 Waste isn't a problem: provided the first input digit isn't 0, the difference
2022 between the worst-case input with N digits and the smallest input with N
2023 digits is about a factor of B, but B is small compared to BASE so at most
2024 one allocated Python digit can remain unused on that count. If
2025 N*log(B)/log(BASE) is mathematically an exact integer, then truncating that
2026 and adding 1 returns a result 1 larger than necessary. However, that can't
2027 happen: whenever B is a power of 2, long_from_binary_base() is called
2028 instead, and it's impossible for B**i to be an integer power of 2**15 when
2029 B is not a power of 2 (i.e., it's impossible for N*log(B)/log(BASE) to be
2030 an exact integer when B is not a power of 2, since B**i has a prime factor
2031 other than 2 in that case, but (2**15)**j's only prime factor is 2).
2033 The computed result can be too small if the true value of N*log(B)/log(BASE)
2034 is a little bit larger than an exact integer, but due to roundoff errors (in
2035 computing log(B), log(BASE), their quotient, and/or multiplying that by N)
2036 yields a numeric result a little less than that integer. Unfortunately, "how
2037 close can a transcendental function get to an integer over some range?"
2038 questions are generally theoretically intractable. Computer analysis via
2039 continued fractions is practical: expand log(B)/log(BASE) via continued
2040 fractions, giving a sequence i/j of "the best" rational approximations. Then
2041 j*log(B)/log(BASE) is approximately equal to (the integer) i. This shows that
2042 we can get very close to being in trouble, but very rarely. For example,
2043 76573 is a denominator in one of the continued-fraction approximations to
2044 log(10)/log(2**15), and indeed:
2046 >>> log(10)/log(2**15)*76573
2049 is very close to an integer. If we were working with IEEE single-precision,
2050 rounding errors could kill us. Finding worst cases in IEEE double-precision
2051 requires better-than-double-precision log() functions, and Tim didn't bother.
2052 Instead the code checks to see whether the allocated space is enough as each
2053 new Python digit is added, and copies the whole thing to a larger long if not.
2054 This should happen extremely rarely, and in fact I don't have a test case
2055 that triggers it(!). Instead the code was tested by artificially allocating
2056 just 1 digit at the start, so that the copying code was exercised for every
2057 digit beyond the first.
2059 register twodigits c
; /* current input character */
2063 twodigits convmultmax
, convmult
;
2067 static double log_base_BASE
[37] = {0.0e0
,};
2068 static int convwidth_base
[37] = {0,};
2069 static twodigits convmultmax_base
[37] = {0,};
2071 if (log_base_BASE
[base
] == 0.0) {
2072 twodigits convmax
= base
;
2075 log_base_BASE
[base
] = log((double)base
) /
2076 log((double)PyLong_BASE
);
2078 twodigits next
= convmax
* base
;
2079 if (next
> PyLong_BASE
)
2084 convmultmax_base
[base
] = convmax
;
2086 convwidth_base
[base
] = i
;
2089 /* Find length of the string of numeric characters. */
2091 while (_PyLong_DigitValue
[Py_CHARMASK(*scan
)] < base
)
2094 /* Create a long object that can contain the largest possible
2095 * integer with this base and length. Note that there's no
2096 * need to initialize z->ob_digit -- no slot is read up before
2097 * being stored into.
2099 size_z
= (Py_ssize_t
)((scan
- str
) * log_base_BASE
[base
]) + 1;
2100 /* Uncomment next line to test exceedingly rare copy code */
2103 z
= _PyLong_New(size_z
);
2108 /* `convwidth` consecutive input digits are treated as a single
2109 * digit in base `convmultmax`.
2111 convwidth
= convwidth_base
[base
];
2112 convmultmax
= convmultmax_base
[base
];
2115 while (str
< scan
) {
2116 /* grab up to convwidth digits from the input string */
2117 c
= (digit
)_PyLong_DigitValue
[Py_CHARMASK(*str
++)];
2118 for (i
= 1; i
< convwidth
&& str
!= scan
; ++i
, ++str
) {
2119 c
= (twodigits
)(c
* base
+
2120 (int)_PyLong_DigitValue
[Py_CHARMASK(*str
)]);
2121 assert(c
< PyLong_BASE
);
2124 convmult
= convmultmax
;
2125 /* Calculate the shift only if we couldn't get
2128 if (i
!= convwidth
) {
2134 /* Multiply z by convmult, and add c. */
2136 pzstop
= pz
+ Py_SIZE(z
);
2137 for (; pz
< pzstop
; ++pz
) {
2138 c
+= (twodigits
)*pz
* convmult
;
2139 *pz
= (digit
)(c
& PyLong_MASK
);
2142 /* carry off the current end? */
2144 assert(c
< PyLong_BASE
);
2145 if (Py_SIZE(z
) < size_z
) {
2151 /* Extremely rare. Get more space. */
2152 assert(Py_SIZE(z
) == size_z
);
2153 tmp
= _PyLong_New(size_z
+ 1);
2158 memcpy(tmp
->ob_digit
,
2160 sizeof(digit
) * size_z
);
2163 z
->ob_digit
[size_z
] = (digit
)c
;
2171 if (error_if_nonzero
) {
2172 /* reset the base to 0, else the exception message
2173 doesn't make too much sense */
2175 if (Py_SIZE(z
) != 0)
2177 /* there might still be other problems, therefore base
2178 remains zero here for the same reason */
2183 Py_SIZE(z
) = -(Py_SIZE(z
));
2184 while (*str
&& isspace(Py_CHARMASK(*str
)))
2191 return (PyObject
*) maybe_small_long(z
);
2195 slen
= strlen(orig_str
) < 200 ? strlen(orig_str
) : 200;
2196 strobj
= PyUnicode_FromStringAndSize(orig_str
, slen
);
2199 PyErr_Format(PyExc_ValueError
,
2200 "invalid literal for int() with base %d: %R",
2207 PyLong_FromUnicode(Py_UNICODE
*u
, Py_ssize_t length
, int base
)
2210 char *buffer
= (char *)PyMem_MALLOC(length
+1);
2215 if (PyUnicode_EncodeDecimal(u
, length
, buffer
, NULL
)) {
2219 result
= PyLong_FromString(buffer
, NULL
, base
);
2225 static PyLongObject
*x_divrem
2226 (PyLongObject
*, PyLongObject
*, PyLongObject
**);
2227 static PyObject
*long_long(PyObject
*v
);
2229 /* Long division with remainder, top-level routine */
2232 long_divrem(PyLongObject
*a
, PyLongObject
*b
,
2233 PyLongObject
**pdiv
, PyLongObject
**prem
)
2235 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2239 PyErr_SetString(PyExc_ZeroDivisionError
,
2240 "integer division or modulo by zero");
2243 if (size_a
< size_b
||
2244 (size_a
== size_b
&&
2245 a
->ob_digit
[size_a
-1] < b
->ob_digit
[size_b
-1])) {
2247 *pdiv
= (PyLongObject
*)PyLong_FromLong(0);
2251 *prem
= (PyLongObject
*) a
;
2256 z
= divrem1(a
, b
->ob_digit
[0], &rem
);
2259 *prem
= (PyLongObject
*) PyLong_FromLong((long)rem
);
2260 if (*prem
== NULL
) {
2266 z
= x_divrem(a
, b
, prem
);
2271 The quotient z has the sign of a*b;
2272 the remainder r has the sign of a,
2274 if ((Py_SIZE(a
) < 0) != (Py_SIZE(b
) < 0))
2276 if (Py_SIZE(a
) < 0 && Py_SIZE(*prem
) != 0)
2278 *pdiv
= maybe_small_long(z
);
2282 /* Unsigned long division with remainder -- the algorithm. The arguments v1
2283 and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
2285 static PyLongObject
*
2286 x_divrem(PyLongObject
*v1
, PyLongObject
*w1
, PyLongObject
**prem
)
2288 PyLongObject
*v
, *w
, *a
;
2289 Py_ssize_t i
, k
, size_v
, size_w
;
2291 digit wm1
, wm2
, carry
, q
, r
, vtop
, *v0
, *vk
, *w0
, *ak
;
2296 /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd
2297 edn.), section 4.3.1, Algorithm D], except that we don't explicitly
2298 handle the special case when the initial estimate q for a quotient
2299 digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and
2300 that won't overflow a digit. */
2302 /* allocate space; w will also be used to hold the final remainder */
2303 size_v
= ABS(Py_SIZE(v1
));
2304 size_w
= ABS(Py_SIZE(w1
));
2305 assert(size_v
>= size_w
&& size_w
>= 2); /* Assert checks by div() */
2306 v
= _PyLong_New(size_v
+1);
2311 w
= _PyLong_New(size_w
);
2318 /* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
2319 shift v1 left by the same amount. Results go into w and v. */
2320 d
= PyLong_SHIFT
- bits_in_digit(w1
->ob_digit
[size_w
-1]);
2321 carry
= v_lshift(w
->ob_digit
, w1
->ob_digit
, size_w
, d
);
2323 carry
= v_lshift(v
->ob_digit
, v1
->ob_digit
, size_v
, d
);
2324 if (carry
!= 0 || v
->ob_digit
[size_v
-1] >= w
->ob_digit
[size_w
-1]) {
2325 v
->ob_digit
[size_v
] = carry
;
2329 /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
2330 at most (and usually exactly) k = size_v - size_w digits. */
2331 k
= size_v
- size_w
;
2344 for (vk
= v0
+k
, ak
= a
->ob_digit
+ k
; vk
-- > v0
;) {
2345 /* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
2346 single-digit quotient q, remainder in vk[0:size_w]. */
2356 /* estimate quotient digit q; may overestimate by 1 (rare) */
2358 assert(vtop
<= wm1
);
2359 vv
= ((twodigits
)vtop
<< PyLong_SHIFT
) | vk
[size_w
-1];
2360 q
= (digit
)(vv
/ wm1
);
2361 r
= (digit
)(vv
- (twodigits
)wm1
* q
); /* r = vv % wm1 */
2362 while ((twodigits
)wm2
* q
> (((twodigits
)r
<< PyLong_SHIFT
)
2366 if (r
>= PyLong_BASE
)
2369 assert(q
<= PyLong_BASE
);
2371 /* subtract q*w0[0:size_w] from vk[0:size_w+1] */
2373 for (i
= 0; i
< size_w
; ++i
) {
2374 /* invariants: -PyLong_BASE <= -q <= zhi <= 0;
2375 -PyLong_BASE * q <= z < PyLong_BASE */
2376 z
= (sdigit
)vk
[i
] + zhi
-
2377 (stwodigits
)q
* (stwodigits
)w0
[i
];
2378 vk
[i
] = (digit
)z
& PyLong_MASK
;
2379 zhi
= (sdigit
)Py_ARITHMETIC_RIGHT_SHIFT(stwodigits
,
2383 /* add w back if q was too large (this branch taken rarely) */
2384 assert((sdigit
)vtop
+ zhi
== -1 || (sdigit
)vtop
+ zhi
== 0);
2385 if ((sdigit
)vtop
+ zhi
< 0) {
2387 for (i
= 0; i
< size_w
; ++i
) {
2388 carry
+= vk
[i
] + w0
[i
];
2389 vk
[i
] = carry
& PyLong_MASK
;
2390 carry
>>= PyLong_SHIFT
;
2395 /* store quotient digit */
2396 assert(q
< PyLong_BASE
);
2400 /* unshift remainder; we reuse w to store the result */
2401 carry
= v_rshift(w0
, v0
, size_w
, d
);
2405 *prem
= long_normalize(w
);
2406 return long_normalize(a
);
2412 long_dealloc(PyObject
*v
)
2414 Py_TYPE(v
)->tp_free(v
);
2418 long_repr(PyObject
*v
)
2420 return _PyLong_Format(v
, 10);
2424 long_compare(PyLongObject
*a
, PyLongObject
*b
)
2428 if (Py_SIZE(a
) != Py_SIZE(b
)) {
2429 if (ABS(Py_SIZE(a
)) == 0 && ABS(Py_SIZE(b
)) == 0)
2432 sign
= Py_SIZE(a
) - Py_SIZE(b
);
2435 Py_ssize_t i
= ABS(Py_SIZE(a
));
2436 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2441 sign
= (sdigit
)a
->ob_digit
[i
] - (sdigit
)b
->ob_digit
[i
];
2446 return sign
< 0 ? -1 : sign
> 0 ? 1 : 0;
2449 #define TEST_COND(cond) \
2450 ((cond) ? Py_True : Py_False)
2453 long_richcompare(PyObject
*self
, PyObject
*other
, int op
)
2457 CHECK_BINOP(self
, other
);
2461 result
= long_compare((PyLongObject
*)self
, (PyLongObject
*)other
);
2462 /* Convert the return value to a Boolean */
2465 v
= TEST_COND(result
== 0);
2468 v
= TEST_COND(result
!= 0);
2471 v
= TEST_COND(result
<= 0);
2474 v
= TEST_COND(result
>= 0);
2477 v
= TEST_COND(result
== -1);
2480 v
= TEST_COND(result
== 1);
2483 PyErr_BadArgument();
2491 long_hash(PyLongObject
*v
)
2497 /* This is designed so that Python ints and longs with the
2498 same value hash to the same value, otherwise comparisons
2499 of mapping keys will turn out weird */
2502 case -1: return v
->ob_digit
[0]==1 ? -2 : -(sdigit
)v
->ob_digit
[0];
2504 case 1: return v
->ob_digit
[0];
2512 /* The following loop produces a C unsigned long x such that x is
2513 congruent to the absolute value of v modulo ULONG_MAX. The
2514 resulting x is nonzero if and only if v is. */
2516 /* Force a native long #-bits (32 or 64) circular shift */
2517 x
= (x
>> (8*SIZEOF_LONG
-PyLong_SHIFT
)) | (x
<< PyLong_SHIFT
);
2518 x
+= v
->ob_digit
[i
];
2519 /* If the addition above overflowed we compensate by
2520 incrementing. This preserves the value modulo
2522 if (x
< v
->ob_digit
[i
])
2526 if (x
== (unsigned long)-1)
2527 x
= (unsigned long)-2;
2532 /* Add the absolute values of two long integers. */
2534 static PyLongObject
*
2535 x_add(PyLongObject
*a
, PyLongObject
*b
)
2537 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2542 /* Ensure a is the larger of the two: */
2543 if (size_a
< size_b
) {
2544 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2545 { Py_ssize_t size_temp
= size_a
;
2547 size_b
= size_temp
; }
2549 z
= _PyLong_New(size_a
+1);
2552 for (i
= 0; i
< size_b
; ++i
) {
2553 carry
+= a
->ob_digit
[i
] + b
->ob_digit
[i
];
2554 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2555 carry
>>= PyLong_SHIFT
;
2557 for (; i
< size_a
; ++i
) {
2558 carry
+= a
->ob_digit
[i
];
2559 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2560 carry
>>= PyLong_SHIFT
;
2562 z
->ob_digit
[i
] = carry
;
2563 return long_normalize(z
);
2566 /* Subtract the absolute values of two integers. */
2568 static PyLongObject
*
2569 x_sub(PyLongObject
*a
, PyLongObject
*b
)
2571 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2577 /* Ensure a is the larger of the two: */
2578 if (size_a
< size_b
) {
2580 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2581 { Py_ssize_t size_temp
= size_a
;
2583 size_b
= size_temp
; }
2585 else if (size_a
== size_b
) {
2586 /* Find highest digit where a and b differ: */
2588 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2591 return (PyLongObject
*)PyLong_FromLong(0);
2592 if (a
->ob_digit
[i
] < b
->ob_digit
[i
]) {
2594 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2596 size_a
= size_b
= i
+1;
2598 z
= _PyLong_New(size_a
);
2601 for (i
= 0; i
< size_b
; ++i
) {
2602 /* The following assumes unsigned arithmetic
2603 works module 2**N for some N>PyLong_SHIFT. */
2604 borrow
= a
->ob_digit
[i
] - b
->ob_digit
[i
] - borrow
;
2605 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2606 borrow
>>= PyLong_SHIFT
;
2607 borrow
&= 1; /* Keep only one sign bit */
2609 for (; i
< size_a
; ++i
) {
2610 borrow
= a
->ob_digit
[i
] - borrow
;
2611 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2612 borrow
>>= PyLong_SHIFT
;
2613 borrow
&= 1; /* Keep only one sign bit */
2615 assert(borrow
== 0);
2618 return long_normalize(z
);
2622 long_add(PyLongObject
*a
, PyLongObject
*b
)
2628 if (ABS(Py_SIZE(a
)) <= 1 && ABS(Py_SIZE(b
)) <= 1) {
2629 PyObject
*result
= PyLong_FromLong(MEDIUM_VALUE(a
) +
2633 if (Py_SIZE(a
) < 0) {
2634 if (Py_SIZE(b
) < 0) {
2636 if (z
!= NULL
&& Py_SIZE(z
) != 0)
2637 Py_SIZE(z
) = -(Py_SIZE(z
));
2648 return (PyObject
*)z
;
2652 long_sub(PyLongObject
*a
, PyLongObject
*b
)
2658 if (ABS(Py_SIZE(a
)) <= 1 && ABS(Py_SIZE(b
)) <= 1) {
2660 r
= PyLong_FromLong(MEDIUM_VALUE(a
)-MEDIUM_VALUE(b
));
2663 if (Py_SIZE(a
) < 0) {
2668 if (z
!= NULL
&& Py_SIZE(z
) != 0)
2669 Py_SIZE(z
) = -(Py_SIZE(z
));
2677 return (PyObject
*)z
;
2680 /* Grade school multiplication, ignoring the signs.
2681 * Returns the absolute value of the product, or NULL if error.
2683 static PyLongObject
*
2684 x_mul(PyLongObject
*a
, PyLongObject
*b
)
2687 Py_ssize_t size_a
= ABS(Py_SIZE(a
));
2688 Py_ssize_t size_b
= ABS(Py_SIZE(b
));
2691 z
= _PyLong_New(size_a
+ size_b
);
2695 memset(z
->ob_digit
, 0, Py_SIZE(z
) * sizeof(digit
));
2697 /* Efficient squaring per HAC, Algorithm 14.16:
2698 * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
2699 * Gives slightly less than a 2x speedup when a == b,
2700 * via exploiting that each entry in the multiplication
2701 * pyramid appears twice (except for the size_a squares).
2703 for (i
= 0; i
< size_a
; ++i
) {
2705 twodigits f
= a
->ob_digit
[i
];
2706 digit
*pz
= z
->ob_digit
+ (i
<< 1);
2707 digit
*pa
= a
->ob_digit
+ i
+ 1;
2708 digit
*paend
= a
->ob_digit
+ size_a
;
2715 carry
= *pz
+ f
* f
;
2716 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2717 carry
>>= PyLong_SHIFT
;
2718 assert(carry
<= PyLong_MASK
);
2720 /* Now f is added in twice in each column of the
2721 * pyramid it appears. Same as adding f<<1 once.
2724 while (pa
< paend
) {
2725 carry
+= *pz
+ *pa
++ * f
;
2726 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2727 carry
>>= PyLong_SHIFT
;
2728 assert(carry
<= (PyLong_MASK
<< 1));
2732 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2733 carry
>>= PyLong_SHIFT
;
2736 *pz
+= (digit
)(carry
& PyLong_MASK
);
2737 assert((carry
>> PyLong_SHIFT
) == 0);
2740 else { /* a is not the same as b -- gradeschool long mult */
2741 for (i
= 0; i
< size_a
; ++i
) {
2742 twodigits carry
= 0;
2743 twodigits f
= a
->ob_digit
[i
];
2744 digit
*pz
= z
->ob_digit
+ i
;
2745 digit
*pb
= b
->ob_digit
;
2746 digit
*pbend
= b
->ob_digit
+ size_b
;
2753 while (pb
< pbend
) {
2754 carry
+= *pz
+ *pb
++ * f
;
2755 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2756 carry
>>= PyLong_SHIFT
;
2757 assert(carry
<= PyLong_MASK
);
2760 *pz
+= (digit
)(carry
& PyLong_MASK
);
2761 assert((carry
>> PyLong_SHIFT
) == 0);
2764 return long_normalize(z
);
2767 /* A helper for Karatsuba multiplication (k_mul).
2768 Takes a long "n" and an integer "size" representing the place to
2769 split, and sets low and high such that abs(n) == (high << size) + low,
2770 viewing the shift as being by digits. The sign bit is ignored, and
2771 the return values are >= 0.
2772 Returns 0 on success, -1 on failure.
2775 kmul_split(PyLongObject
*n
, Py_ssize_t size
, PyLongObject
**high
, PyLongObject
**low
)
2777 PyLongObject
*hi
, *lo
;
2778 Py_ssize_t size_lo
, size_hi
;
2779 const Py_ssize_t size_n
= ABS(Py_SIZE(n
));
2781 size_lo
= MIN(size_n
, size
);
2782 size_hi
= size_n
- size_lo
;
2784 if ((hi
= _PyLong_New(size_hi
)) == NULL
)
2786 if ((lo
= _PyLong_New(size_lo
)) == NULL
) {
2791 memcpy(lo
->ob_digit
, n
->ob_digit
, size_lo
* sizeof(digit
));
2792 memcpy(hi
->ob_digit
, n
->ob_digit
+ size_lo
, size_hi
* sizeof(digit
));
2794 *high
= long_normalize(hi
);
2795 *low
= long_normalize(lo
);
2799 static PyLongObject
*k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
);
2801 /* Karatsuba multiplication. Ignores the input signs, and returns the
2802 * absolute value of the product (or NULL if error).
2803 * See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
2805 static PyLongObject
*
2806 k_mul(PyLongObject
*a
, PyLongObject
*b
)
2808 Py_ssize_t asize
= ABS(Py_SIZE(a
));
2809 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2810 PyLongObject
*ah
= NULL
;
2811 PyLongObject
*al
= NULL
;
2812 PyLongObject
*bh
= NULL
;
2813 PyLongObject
*bl
= NULL
;
2814 PyLongObject
*ret
= NULL
;
2815 PyLongObject
*t1
, *t2
, *t3
;
2816 Py_ssize_t shift
; /* the number of digits we split off */
2819 /* (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
2820 * Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
2821 * Then the original product is
2822 * ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
2823 * By picking X to be a power of 2, "*X" is just shifting, and it's
2824 * been reduced to 3 multiplies on numbers half the size.
2827 /* We want to split based on the larger number; fiddle so that b
2830 if (asize
> bsize
) {
2840 /* Use gradeschool math when either number is too small. */
2841 i
= a
== b
? KARATSUBA_SQUARE_CUTOFF
: KARATSUBA_CUTOFF
;
2844 return (PyLongObject
*)PyLong_FromLong(0);
2849 /* If a is small compared to b, splitting on b gives a degenerate
2850 * case with ah==0, and Karatsuba may be (even much) less efficient
2851 * than "grade school" then. However, we can still win, by viewing
2852 * b as a string of "big digits", each of width a->ob_size. That
2853 * leads to a sequence of balanced calls to k_mul.
2855 if (2 * asize
<= bsize
)
2856 return k_lopsided_mul(a
, b
);
2858 /* Split a & b into hi & lo pieces. */
2860 if (kmul_split(a
, shift
, &ah
, &al
) < 0) goto fail
;
2861 assert(Py_SIZE(ah
) > 0); /* the split isn't degenerate */
2869 else if (kmul_split(b
, shift
, &bh
, &bl
) < 0) goto fail
;
2872 * 1. Allocate result space (asize + bsize digits: that's always
2874 * 2. Compute ah*bh, and copy into result at 2*shift.
2875 * 3. Compute al*bl, and copy into result at 0. Note that this
2876 * can't overlap with #2.
2877 * 4. Subtract al*bl from the result, starting at shift. This may
2878 * underflow (borrow out of the high digit), but we don't care:
2879 * we're effectively doing unsigned arithmetic mod
2880 * BASE**(sizea + sizeb), and so long as the *final* result fits,
2881 * borrows and carries out of the high digit can be ignored.
2882 * 5. Subtract ah*bh from the result, starting at shift.
2883 * 6. Compute (ah+al)*(bh+bl), and add it into the result starting
2887 /* 1. Allocate result space. */
2888 ret
= _PyLong_New(asize
+ bsize
);
2889 if (ret
== NULL
) goto fail
;
2891 /* Fill with trash, to catch reference to uninitialized digits. */
2892 memset(ret
->ob_digit
, 0xDF, Py_SIZE(ret
) * sizeof(digit
));
2895 /* 2. t1 <- ah*bh, and copy into high digits of result. */
2896 if ((t1
= k_mul(ah
, bh
)) == NULL
) goto fail
;
2897 assert(Py_SIZE(t1
) >= 0);
2898 assert(2*shift
+ Py_SIZE(t1
) <= Py_SIZE(ret
));
2899 memcpy(ret
->ob_digit
+ 2*shift
, t1
->ob_digit
,
2900 Py_SIZE(t1
) * sizeof(digit
));
2902 /* Zero-out the digits higher than the ah*bh copy. */
2903 i
= Py_SIZE(ret
) - 2*shift
- Py_SIZE(t1
);
2905 memset(ret
->ob_digit
+ 2*shift
+ Py_SIZE(t1
), 0,
2908 /* 3. t2 <- al*bl, and copy into the low digits. */
2909 if ((t2
= k_mul(al
, bl
)) == NULL
) {
2913 assert(Py_SIZE(t2
) >= 0);
2914 assert(Py_SIZE(t2
) <= 2*shift
); /* no overlap with high digits */
2915 memcpy(ret
->ob_digit
, t2
->ob_digit
, Py_SIZE(t2
) * sizeof(digit
));
2917 /* Zero out remaining digits. */
2918 i
= 2*shift
- Py_SIZE(t2
); /* number of uninitialized digits */
2920 memset(ret
->ob_digit
+ Py_SIZE(t2
), 0, i
* sizeof(digit
));
2922 /* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
2923 * because it's fresher in cache.
2925 i
= Py_SIZE(ret
) - shift
; /* # digits after shift */
2926 (void)v_isub(ret
->ob_digit
+ shift
, i
, t2
->ob_digit
, Py_SIZE(t2
));
2929 (void)v_isub(ret
->ob_digit
+ shift
, i
, t1
->ob_digit
, Py_SIZE(t1
));
2932 /* 6. t3 <- (ah+al)(bh+bl), and add into result. */
2933 if ((t1
= x_add(ah
, al
)) == NULL
) goto fail
;
2942 else if ((t2
= x_add(bh
, bl
)) == NULL
) {
2953 if (t3
== NULL
) goto fail
;
2954 assert(Py_SIZE(t3
) >= 0);
2956 /* Add t3. It's not obvious why we can't run out of room here.
2957 * See the (*) comment after this function.
2959 (void)v_iadd(ret
->ob_digit
+ shift
, i
, t3
->ob_digit
, Py_SIZE(t3
));
2962 return long_normalize(ret
);
2973 /* (*) Why adding t3 can't "run out of room" above.
2975 Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
2978 1. For any integer i, i = c(i/2) + f(i/2). In particular,
2979 bsize = c(bsize/2) + f(bsize/2).
2980 2. shift = f(bsize/2)
2982 4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
2983 routine, so asize > bsize/2 >= f(bsize/2) in this routine.
2985 We allocated asize + bsize result digits, and add t3 into them at an offset
2986 of shift. This leaves asize+bsize-shift allocated digit positions for t3
2987 to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
2988 asize + c(bsize/2) available digit positions.
2990 bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
2991 at most c(bsize/2) digits + 1 bit.
2993 If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
2994 digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
2995 most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
2997 The product (ah+al)*(bh+bl) therefore has at most
2999 c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
3001 and we have asize + c(bsize/2) available digit positions. We need to show
3002 this is always enough. An instance of c(bsize/2) cancels out in both, so
3003 the question reduces to whether asize digits is enough to hold
3004 (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
3005 then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
3006 asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
3007 digit is enough to hold 2 bits. This is so since PyLong_SHIFT=15 >= 2. If
3008 asize == bsize, then we're asking whether bsize digits is enough to hold
3009 c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
3010 is enough to hold 2 bits. This is so if bsize >= 2, which holds because
3011 bsize >= KARATSUBA_CUTOFF >= 2.
3013 Note that since there's always enough room for (ah+al)*(bh+bl), and that's
3014 clearly >= each of ah*bh and al*bl, there's always enough room to subtract
3015 ah*bh and al*bl too.
3018 /* b has at least twice the digits of a, and a is big enough that Karatsuba
3019 * would pay off *if* the inputs had balanced sizes. View b as a sequence
3020 * of slices, each with a->ob_size digits, and multiply the slices by a,
3021 * one at a time. This gives k_mul balanced inputs to work with, and is
3022 * also cache-friendly (we compute one double-width slice of the result
3023 * at a time, then move on, never bactracking except for the helpful
3024 * single-width slice overlap between successive partial sums).
3026 static PyLongObject
*
3027 k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
)
3029 const Py_ssize_t asize
= ABS(Py_SIZE(a
));
3030 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
3031 Py_ssize_t nbdone
; /* # of b digits already multiplied */
3033 PyLongObject
*bslice
= NULL
;
3035 assert(asize
> KARATSUBA_CUTOFF
);
3036 assert(2 * asize
<= bsize
);
3038 /* Allocate result space, and zero it out. */
3039 ret
= _PyLong_New(asize
+ bsize
);
3042 memset(ret
->ob_digit
, 0, Py_SIZE(ret
) * sizeof(digit
));
3044 /* Successive slices of b are copied into bslice. */
3045 bslice
= _PyLong_New(asize
);
3051 PyLongObject
*product
;
3052 const Py_ssize_t nbtouse
= MIN(bsize
, asize
);
3054 /* Multiply the next slice of b by a. */
3055 memcpy(bslice
->ob_digit
, b
->ob_digit
+ nbdone
,
3056 nbtouse
* sizeof(digit
));
3057 Py_SIZE(bslice
) = nbtouse
;
3058 product
= k_mul(a
, bslice
);
3059 if (product
== NULL
)
3062 /* Add into result. */
3063 (void)v_iadd(ret
->ob_digit
+ nbdone
, Py_SIZE(ret
) - nbdone
,
3064 product
->ob_digit
, Py_SIZE(product
));
3072 return long_normalize(ret
);
3081 long_mul(PyLongObject
*a
, PyLongObject
*b
)
3087 /* fast path for single-digit multiplication */
3088 if (ABS(Py_SIZE(a
)) <= 1 && ABS(Py_SIZE(b
)) <= 1) {
3089 stwodigits v
= (stwodigits
)(MEDIUM_VALUE(a
)) * MEDIUM_VALUE(b
);
3090 #ifdef HAVE_LONG_LONG
3091 return PyLong_FromLongLong((PY_LONG_LONG
)v
);
3093 /* if we don't have long long then we're almost certainly
3094 using 15-bit digits, so v will fit in a long. In the
3095 unlikely event that we're using 30-bit digits on a platform
3096 without long long, a large v will just cause us to fall
3097 through to the general multiplication code below. */
3098 if (v
>= LONG_MIN
&& v
<= LONG_MAX
)
3099 return PyLong_FromLong((long)v
);
3104 /* Negate if exactly one of the inputs is negative. */
3105 if (((Py_SIZE(a
) ^ Py_SIZE(b
)) < 0) && z
)
3107 return (PyObject
*)z
;
3110 /* The / and % operators are now defined in terms of divmod().
3111 The expression a mod b has the value a - b*floor(a/b).
3112 The long_divrem function gives the remainder after division of
3113 |a| by |b|, with the sign of a. This is also expressed
3114 as a - b*trunc(a/b), if trunc truncates towards zero.
3121 So, to get from rem to mod, we have to add b if a and b
3122 have different signs. We then subtract one from the 'div'
3123 part of the outcome to keep the invariant intact. */
3126 * *pdiv, *pmod = divmod(v, w)
3127 * NULL can be passed for pdiv or pmod, in which case that part of
3128 * the result is simply thrown away. The caller owns a reference to
3129 * each of these it requests (does not pass NULL for).
3132 l_divmod(PyLongObject
*v
, PyLongObject
*w
,
3133 PyLongObject
**pdiv
, PyLongObject
**pmod
)
3135 PyLongObject
*div
, *mod
;
3137 if (long_divrem(v
, w
, &div
, &mod
) < 0)
3139 if ((Py_SIZE(mod
) < 0 && Py_SIZE(w
) > 0) ||
3140 (Py_SIZE(mod
) > 0 && Py_SIZE(w
) < 0)) {
3143 temp
= (PyLongObject
*) long_add(mod
, w
);
3150 one
= (PyLongObject
*) PyLong_FromLong(1L);
3152 (temp
= (PyLongObject
*) long_sub(div
, one
)) == NULL
) {
3176 long_div(PyObject
*a
, PyObject
*b
)
3181 if (l_divmod((PyLongObject
*)a
, (PyLongObject
*)b
, &div
, NULL
) < 0)
3183 return (PyObject
*)div
;
3187 long_true_divide(PyObject
*a
, PyObject
*b
)
3190 int failed
, aexp
= -1, bexp
= -1;
3193 ad
= _PyLong_AsScaledDouble((PyObject
*)a
, &aexp
);
3194 bd
= _PyLong_AsScaledDouble((PyObject
*)b
, &bexp
);
3195 failed
= (ad
== -1.0 || bd
== -1.0) && PyErr_Occurred();
3198 /* 'aexp' and 'bexp' were initialized to -1 to silence gcc-4.0.x,
3199 but should really be set correctly after sucessful calls to
3200 _PyLong_AsScaledDouble() */
3201 assert(aexp
>= 0 && bexp
>= 0);
3204 PyErr_SetString(PyExc_ZeroDivisionError
,
3205 "int division or modulo by zero");
3209 /* True value is very close to ad/bd * 2**(PyLong_SHIFT*(aexp-bexp)) */
3210 ad
/= bd
; /* overflow/underflow impossible here */
3212 if (aexp
> INT_MAX
/ PyLong_SHIFT
)
3214 else if (aexp
< -(INT_MAX
/ PyLong_SHIFT
))
3215 return PyFloat_FromDouble(0.0); /* underflow to 0 */
3217 ad
= ldexp(ad
, aexp
* PyLong_SHIFT
);
3218 if (Py_OVERFLOWED(ad
)) /* ignore underflow to 0.0 */
3220 return PyFloat_FromDouble(ad
);
3223 PyErr_SetString(PyExc_OverflowError
,
3224 "int/int too large for a float");
3230 long_mod(PyObject
*a
, PyObject
*b
)
3236 if (l_divmod((PyLongObject
*)a
, (PyLongObject
*)b
, NULL
, &mod
) < 0)
3238 return (PyObject
*)mod
;
3242 long_divmod(PyObject
*a
, PyObject
*b
)
3244 PyLongObject
*div
, *mod
;
3249 if (l_divmod((PyLongObject
*)a
, (PyLongObject
*)b
, &div
, &mod
) < 0) {
3254 PyTuple_SetItem(z
, 0, (PyObject
*) div
);
3255 PyTuple_SetItem(z
, 1, (PyObject
*) mod
);
3266 long_pow(PyObject
*v
, PyObject
*w
, PyObject
*x
)
3268 PyLongObject
*a
, *b
, *c
; /* a,b,c = v,w,x */
3269 int negativeOutput
= 0; /* if x<0 return negative output */
3271 PyLongObject
*z
= NULL
; /* accumulated result */
3272 Py_ssize_t i
, j
, k
; /* counters */
3273 PyLongObject
*temp
= NULL
;
3275 /* 5-ary values. If the exponent is large enough, table is
3276 * precomputed so that table[i] == a**i % c for i in range(32).
3278 PyLongObject
*table
[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3279 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3281 /* a, b, c = v, w, x */
3283 a
= (PyLongObject
*)v
; Py_INCREF(a
);
3284 b
= (PyLongObject
*)w
; Py_INCREF(b
);
3285 if (PyLong_Check(x
)) {
3286 c
= (PyLongObject
*)x
;
3289 else if (x
== Py_None
)
3294 Py_INCREF(Py_NotImplemented
);
3295 return Py_NotImplemented
;
3298 if (Py_SIZE(b
) < 0) { /* if exponent is negative */
3300 PyErr_SetString(PyExc_TypeError
, "pow() 2nd argument "
3301 "cannot be negative when 3rd argument specified");
3305 /* else return a float. This works because we know
3306 that this calls float_pow() which converts its
3307 arguments to double. */
3310 return PyFloat_Type
.tp_as_number
->nb_power(v
, w
, x
);
3316 raise ValueError() */
3317 if (Py_SIZE(c
) == 0) {
3318 PyErr_SetString(PyExc_ValueError
,
3319 "pow() 3rd argument cannot be 0");
3324 negativeOutput = True
3325 modulus = -modulus */
3326 if (Py_SIZE(c
) < 0) {
3328 temp
= (PyLongObject
*)_PyLong_Copy(c
);
3339 if ((Py_SIZE(c
) == 1) && (c
->ob_digit
[0] == 1)) {
3340 z
= (PyLongObject
*)PyLong_FromLong(0L);
3345 base = base % modulus
3346 Having the base positive just makes things easier. */
3347 if (Py_SIZE(a
) < 0) {
3348 if (l_divmod(a
, c
, NULL
, &temp
) < 0)
3356 /* At this point a, b, and c are guaranteed non-negative UNLESS
3357 c is NULL, in which case a may be negative. */
3359 z
= (PyLongObject
*)PyLong_FromLong(1L);
3363 /* Perform a modular reduction, X = X % c, but leave X alone if c
3368 if (l_divmod(X, c, NULL, &temp) < 0) \
3375 /* Multiply two values, then reduce the result:
3376 result = X*Y % c. If c is NULL, skip the mod. */
3377 #define MULT(X, Y, result) \
3379 temp = (PyLongObject *)long_mul(X, Y); \
3382 Py_XDECREF(result); \
3388 if (Py_SIZE(b
) <= FIVEARY_CUTOFF
) {
3389 /* Left-to-right binary exponentiation (HAC Algorithm 14.79) */
3390 /* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf */
3391 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3392 digit bi
= b
->ob_digit
[i
];
3394 for (j
= (digit
)1 << (PyLong_SHIFT
-1); j
!= 0; j
>>= 1) {
3402 /* Left-to-right 5-ary exponentiation (HAC Algorithm 14.82) */
3403 Py_INCREF(z
); /* still holds 1L */
3405 for (i
= 1; i
< 32; ++i
)
3406 MULT(table
[i
-1], a
, table
[i
])
3408 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3409 const digit bi
= b
->ob_digit
[i
];
3411 for (j
= PyLong_SHIFT
- 5; j
>= 0; j
-= 5) {
3412 const int index
= (bi
>> j
) & 0x1f;
3413 for (k
= 0; k
< 5; ++k
)
3416 MULT(z
, table
[index
], z
)
3421 if (negativeOutput
&& (Py_SIZE(z
) != 0)) {
3422 temp
= (PyLongObject
*)long_sub(z
, c
);
3438 if (Py_SIZE(b
) > FIVEARY_CUTOFF
) {
3439 for (i
= 0; i
< 32; ++i
)
3440 Py_XDECREF(table
[i
]);
3446 return (PyObject
*)z
;
3450 long_invert(PyLongObject
*v
)
3452 /* Implement ~x as -(x+1) */
3455 if (ABS(Py_SIZE(v
)) <=1)
3456 return PyLong_FromLong(-(MEDIUM_VALUE(v
)+1));
3457 w
= (PyLongObject
*)PyLong_FromLong(1L);
3460 x
= (PyLongObject
*) long_add(v
, w
);
3464 Py_SIZE(x
) = -(Py_SIZE(x
));
3465 return (PyObject
*)maybe_small_long(x
);
3469 long_neg(PyLongObject
*v
)
3472 if (ABS(Py_SIZE(v
)) <= 1)
3473 return PyLong_FromLong(-MEDIUM_VALUE(v
));
3474 z
= (PyLongObject
*)_PyLong_Copy(v
);
3476 Py_SIZE(z
) = -(Py_SIZE(v
));
3477 return (PyObject
*)z
;
3481 long_abs(PyLongObject
*v
)
3486 return long_long((PyObject
*)v
);
3490 long_bool(PyLongObject
*v
)
3492 return ABS(Py_SIZE(v
)) != 0;
3496 long_rshift(PyLongObject
*a
, PyLongObject
*b
)
3498 PyLongObject
*z
= NULL
;
3500 Py_ssize_t newsize
, wordshift
, loshift
, hishift
, i
, j
;
3501 digit lomask
, himask
;
3505 if (Py_SIZE(a
) < 0) {
3506 /* Right shifting negative numbers is harder */
3507 PyLongObject
*a1
, *a2
;
3508 a1
= (PyLongObject
*) long_invert(a
);
3511 a2
= (PyLongObject
*) long_rshift(a1
, b
);
3515 z
= (PyLongObject
*) long_invert(a2
);
3520 shiftby
= PyLong_AsLong((PyObject
*)b
);
3521 if (shiftby
== -1L && PyErr_Occurred())
3524 PyErr_SetString(PyExc_ValueError
,
3525 "negative shift count");
3528 wordshift
= shiftby
/ PyLong_SHIFT
;
3529 newsize
= ABS(Py_SIZE(a
)) - wordshift
;
3531 return PyLong_FromLong(0);
3532 loshift
= shiftby
% PyLong_SHIFT
;
3533 hishift
= PyLong_SHIFT
- loshift
;
3534 lomask
= ((digit
)1 << hishift
) - 1;
3535 himask
= PyLong_MASK
^ lomask
;
3536 z
= _PyLong_New(newsize
);
3540 Py_SIZE(z
) = -(Py_SIZE(z
));
3541 for (i
= 0, j
= wordshift
; i
< newsize
; i
++, j
++) {
3542 z
->ob_digit
[i
] = (a
->ob_digit
[j
] >> loshift
) & lomask
;
3545 (a
->ob_digit
[j
+1] << hishift
) & himask
;
3547 z
= long_normalize(z
);
3550 return (PyObject
*) maybe_small_long(z
);
3555 long_lshift(PyObject
*v
, PyObject
*w
)
3557 /* This version due to Tim Peters */
3558 PyLongObject
*a
= (PyLongObject
*)v
;
3559 PyLongObject
*b
= (PyLongObject
*)w
;
3560 PyLongObject
*z
= NULL
;
3562 Py_ssize_t oldsize
, newsize
, wordshift
, remshift
, i
, j
;
3567 shiftby
= PyLong_AsLong((PyObject
*)b
);
3568 if (shiftby
== -1L && PyErr_Occurred())
3571 PyErr_SetString(PyExc_ValueError
, "negative shift count");
3574 if ((long)(int)shiftby
!= shiftby
) {
3575 PyErr_SetString(PyExc_ValueError
,
3576 "outrageous left shift count");
3579 /* wordshift, remshift = divmod(shiftby, PyLong_SHIFT) */
3580 wordshift
= (int)shiftby
/ PyLong_SHIFT
;
3581 remshift
= (int)shiftby
- wordshift
* PyLong_SHIFT
;
3583 oldsize
= ABS(Py_SIZE(a
));
3584 newsize
= oldsize
+ wordshift
;
3587 z
= _PyLong_New(newsize
);
3592 for (i
= 0; i
< wordshift
; i
++)
3595 for (i
= wordshift
, j
= 0; j
< oldsize
; i
++, j
++) {
3596 accum
|= (twodigits
)a
->ob_digit
[j
] << remshift
;
3597 z
->ob_digit
[i
] = (digit
)(accum
& PyLong_MASK
);
3598 accum
>>= PyLong_SHIFT
;
3601 z
->ob_digit
[newsize
-1] = (digit
)accum
;
3604 z
= long_normalize(z
);
3606 return (PyObject
*) maybe_small_long(z
);
3610 /* Bitwise and/xor/or operations */
3613 long_bitwise(PyLongObject
*a
,
3614 int op
, /* '&', '|', '^' */
3617 digit maska
, maskb
; /* 0 or PyLong_MASK */
3619 Py_ssize_t size_a
, size_b
, size_z
, i
;
3624 if (Py_SIZE(a
) < 0) {
3625 a
= (PyLongObject
*) long_invert(a
);
3628 maska
= PyLong_MASK
;
3634 if (Py_SIZE(b
) < 0) {
3635 b
= (PyLongObject
*) long_invert(b
);
3640 maskb
= PyLong_MASK
;
3650 if (maska
!= maskb
) {
3651 maska
^= PyLong_MASK
;
3656 if (maska
&& maskb
) {
3658 maska
^= PyLong_MASK
;
3659 maskb
^= PyLong_MASK
;
3664 if (maska
|| maskb
) {
3666 maska
^= PyLong_MASK
;
3667 maskb
^= PyLong_MASK
;
3673 /* JRH: The original logic here was to allocate the result value (z)
3674 as the longer of the two operands. However, there are some cases
3675 where the result is guaranteed to be shorter than that: AND of two
3676 positives, OR of two negatives: use the shorter number. AND with
3677 mixed signs: use the positive number. OR with mixed signs: use the
3678 negative number. After the transformations above, op will be '&'
3679 iff one of these cases applies, and mask will be non-0 for operands
3680 whose length should be ignored.
3683 size_a
= Py_SIZE(a
);
3684 size_b
= Py_SIZE(b
);
3688 : (maskb
? size_a
: MIN(size_a
, size_b
)))
3689 : MAX(size_a
, size_b
);
3690 z
= _PyLong_New(size_z
);
3697 for (i
= 0; i
< size_z
; ++i
) {
3698 diga
= (i
< size_a
? a
->ob_digit
[i
] : 0) ^ maska
;
3699 digb
= (i
< size_b
? b
->ob_digit
[i
] : 0) ^ maskb
;
3701 case '&': z
->ob_digit
[i
] = diga
& digb
; break;
3702 case '|': z
->ob_digit
[i
] = diga
| digb
; break;
3703 case '^': z
->ob_digit
[i
] = diga
^ digb
; break;
3709 z
= long_normalize(z
);
3711 return (PyObject
*) maybe_small_long(z
);
3718 long_and(PyObject
*a
, PyObject
*b
)
3722 c
= long_bitwise((PyLongObject
*)a
, '&', (PyLongObject
*)b
);
3727 long_xor(PyObject
*a
, PyObject
*b
)
3731 c
= long_bitwise((PyLongObject
*)a
, '^', (PyLongObject
*)b
);
3736 long_or(PyObject
*a
, PyObject
*b
)
3740 c
= long_bitwise((PyLongObject
*)a
, '|', (PyLongObject
*)b
);
3745 long_long(PyObject
*v
)
3747 if (PyLong_CheckExact(v
))
3750 v
= _PyLong_Copy((PyLongObject
*)v
);
3755 long_float(PyObject
*v
)
3758 result
= PyLong_AsDouble(v
);
3759 if (result
== -1.0 && PyErr_Occurred())
3761 return PyFloat_FromDouble(result
);
3765 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
);
3768 long_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3771 int base
= -909; /* unlikely! */
3772 static char *kwlist
[] = {"x", "base", 0};
3774 if (type
!= &PyLong_Type
)
3775 return long_subtype_new(type
, args
, kwds
); /* Wimp out */
3776 if (!PyArg_ParseTupleAndKeywords(args
, kwds
, "|Oi:int", kwlist
,
3780 return PyLong_FromLong(0L);
3782 return PyNumber_Long(x
);
3783 else if (PyUnicode_Check(x
))
3784 return PyLong_FromUnicode(PyUnicode_AS_UNICODE(x
),
3785 PyUnicode_GET_SIZE(x
),
3787 else if (PyByteArray_Check(x
) || PyBytes_Check(x
)) {
3788 /* Since PyLong_FromString doesn't have a length parameter,
3789 * check here for possible NULs in the string. */
3791 Py_ssize_t size
= Py_SIZE(x
);
3792 if (PyByteArray_Check(x
))
3793 string
= PyByteArray_AS_STRING(x
);
3795 string
= PyBytes_AS_STRING(x
);
3796 if (strlen(string
) != (size_t)size
) {
3797 /* We only see this if there's a null byte in x,
3798 x is a bytes or buffer, *and* a base is given. */
3799 PyErr_Format(PyExc_ValueError
,
3800 "invalid literal for int() with base %d: %R",
3804 return PyLong_FromString(string
, NULL
, base
);
3807 PyErr_SetString(PyExc_TypeError
,
3808 "int() can't convert non-string with explicit base");
3813 /* Wimpy, slow approach to tp_new calls for subtypes of long:
3814 first create a regular long from whatever arguments we got,
3815 then allocate a subtype instance and initialize it from
3816 the regular long. The regular long is then thrown away.
3819 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3821 PyLongObject
*tmp
, *newobj
;
3824 assert(PyType_IsSubtype(type
, &PyLong_Type
));
3825 tmp
= (PyLongObject
*)long_new(&PyLong_Type
, args
, kwds
);
3828 assert(PyLong_CheckExact(tmp
));
3832 newobj
= (PyLongObject
*)type
->tp_alloc(type
, n
);
3833 if (newobj
== NULL
) {
3837 assert(PyLong_Check(newobj
));
3838 Py_SIZE(newobj
) = Py_SIZE(tmp
);
3839 for (i
= 0; i
< n
; i
++)
3840 newobj
->ob_digit
[i
] = tmp
->ob_digit
[i
];
3842 return (PyObject
*)newobj
;
3846 long_getnewargs(PyLongObject
*v
)
3848 return Py_BuildValue("(N)", _PyLong_Copy(v
));
3852 long_get0(PyLongObject
*v
, void *context
) {
3853 return PyLong_FromLong(0L);
3857 long_get1(PyLongObject
*v
, void *context
) {
3858 return PyLong_FromLong(1L);
3862 long__format__(PyObject
*self
, PyObject
*args
)
3864 PyObject
*format_spec
;
3866 if (!PyArg_ParseTuple(args
, "U:__format__", &format_spec
))
3868 return _PyLong_FormatAdvanced(self
,
3869 PyUnicode_AS_UNICODE(format_spec
),
3870 PyUnicode_GET_SIZE(format_spec
));
3874 long_round(PyObject
*self
, PyObject
*args
)
3876 PyObject
*o_ndigits
=NULL
, *temp
;
3877 PyLongObject
*pow
=NULL
, *q
=NULL
, *r
=NULL
, *ndigits
=NULL
, *one
;
3881 /* Notes on the algorithm: to round to the nearest 10**n (n positive),
3882 the straightforward method is:
3885 (2) round to nearest integer (round to even in case of tie)
3886 (3) multiply result by 10**n.
3888 But the rounding step involves examining the fractional part of the
3889 quotient to see whether it's greater than 0.5 or not. Since we
3890 want to do the whole calculation in integer arithmetic, it's
3893 (1) divide by (10**n)/2
3894 (2) round to nearest multiple of 2 (multiple of 4 in case of tie)
3895 (3) multiply result by (10**n)/2.
3897 Then all we need to know about the fractional part of the quotient
3898 arising in step (2) is whether it's zero or not.
3900 Doing both a multiplication and division is wasteful, and is easily
3901 avoided if we just figure out how much to adjust the original input
3902 by to do the rounding.
3904 Here's the whole algorithm expressed in Python.
3906 def round(self, ndigits = None):
3907 """round(int, int) -> int"""
3908 if ndigits is None or ndigits >= 0:
3910 pow = 10**-ndigits >> 1
3911 q, r = divmod(self, pow)
3914 if (q & 2 == r == 0):
3921 if (!PyArg_ParseTuple(args
, "|O", &o_ndigits
))
3923 if (o_ndigits
== NULL
)
3924 return long_long(self
);
3926 ndigits
= (PyLongObject
*)PyNumber_Index(o_ndigits
);
3927 if (ndigits
== NULL
)
3930 if (Py_SIZE(ndigits
) >= 0) {
3932 return long_long(self
);
3935 Py_INCREF(self
); /* to keep refcounting simple */
3936 /* we now own references to self, ndigits */
3938 /* pow = 10 ** -ndigits >> 1 */
3939 pow
= (PyLongObject
*)PyLong_FromLong(10L);
3942 temp
= long_neg(ndigits
);
3944 ndigits
= (PyLongObject
*)temp
;
3945 if (ndigits
== NULL
)
3947 temp
= long_pow((PyObject
*)pow
, (PyObject
*)ndigits
, Py_None
);
3949 pow
= (PyLongObject
*)temp
;
3952 assert(PyLong_Check(pow
)); /* check long_pow returned a long */
3953 one
= (PyLongObject
*)PyLong_FromLong(1L);
3956 temp
= long_rshift(pow
, one
);
3959 pow
= (PyLongObject
*)temp
;
3963 /* q, r = divmod(self, pow) */
3964 errcode
= l_divmod((PyLongObject
*)self
, pow
, &q
, &r
);
3969 temp
= long_sub((PyLongObject
*)self
, r
);
3975 /* get value of quotient modulo 4 */
3976 if (Py_SIZE(q
) == 0)
3978 else if (Py_SIZE(q
) > 0)
3979 q_mod_4
= q
->ob_digit
[0] & 3;
3981 q_mod_4
= (PyLong_BASE
-q
->ob_digit
[0]) & 3;
3983 if ((q_mod_4
& 1) == 1) {
3984 /* q is odd; round self up or down by adding or subtracting pow */
3985 if (q_mod_4
== 1 && Py_SIZE(r
) == 0)
3986 temp
= (PyObject
*)long_sub((PyLongObject
*)self
, pow
);
3988 temp
= (PyObject
*)long_add((PyLongObject
*)self
, pow
);
4005 Py_XDECREF(ndigits
);
4010 long_sizeof(PyLongObject
*v
)
4014 res
= offsetof(PyLongObject
, ob_digit
) + ABS(Py_SIZE(v
))*sizeof(digit
);
4015 return PyLong_FromSsize_t(res
);
4019 long_bit_length(PyLongObject
*v
)
4021 PyLongObject
*result
, *x
, *y
;
4022 Py_ssize_t ndigits
, msd_bits
= 0;
4026 assert(PyLong_Check(v
));
4028 ndigits
= ABS(Py_SIZE(v
));
4030 return PyLong_FromLong(0);
4032 msd
= v
->ob_digit
[ndigits
-1];
4037 msd_bits
+= (long)(BitLengthTable
[msd
]);
4039 if (ndigits
<= PY_SSIZE_T_MAX
/PyLong_SHIFT
)
4040 return PyLong_FromSsize_t((ndigits
-1)*PyLong_SHIFT
+ msd_bits
);
4042 /* expression above may overflow; use Python integers instead */
4043 result
= (PyLongObject
*)PyLong_FromSsize_t(ndigits
- 1);
4046 x
= (PyLongObject
*)PyLong_FromLong(PyLong_SHIFT
);
4049 y
= (PyLongObject
*)long_mul(result
, x
);
4056 x
= (PyLongObject
*)PyLong_FromLong((long)msd_bits
);
4059 y
= (PyLongObject
*)long_add(result
, x
);
4066 return (PyObject
*)result
;
4073 PyDoc_STRVAR(long_bit_length_doc
,
4074 "int.bit_length() -> int\n\
4076 Number of bits necessary to represent self in binary.\n\
4079 >>> (37).bit_length()\n\
4084 long_is_finite(PyObject
*v
)
4090 static PyMethodDef long_methods
[] = {
4091 {"conjugate", (PyCFunction
)long_long
, METH_NOARGS
,
4092 "Returns self, the complex conjugate of any int."},
4093 {"bit_length", (PyCFunction
)long_bit_length
, METH_NOARGS
,
4094 long_bit_length_doc
},
4096 {"is_finite", (PyCFunction
)long_is_finite
, METH_NOARGS
,
4097 "Returns always True."},
4099 {"__trunc__", (PyCFunction
)long_long
, METH_NOARGS
,
4100 "Truncating an Integral returns itself."},
4101 {"__floor__", (PyCFunction
)long_long
, METH_NOARGS
,
4102 "Flooring an Integral returns itself."},
4103 {"__ceil__", (PyCFunction
)long_long
, METH_NOARGS
,
4104 "Ceiling of an Integral returns itself."},
4105 {"__round__", (PyCFunction
)long_round
, METH_VARARGS
,
4106 "Rounding an Integral returns itself.\n"
4107 "Rounding with an ndigits argument also returns an integer."},
4108 {"__getnewargs__", (PyCFunction
)long_getnewargs
, METH_NOARGS
},
4109 {"__format__", (PyCFunction
)long__format__
, METH_VARARGS
},
4110 {"__sizeof__", (PyCFunction
)long_sizeof
, METH_NOARGS
,
4111 "Returns size in memory, in bytes"},
4112 {NULL
, NULL
} /* sentinel */
4115 static PyGetSetDef long_getset
[] = {
4117 (getter
)long_long
, (setter
)NULL
,
4118 "the real part of a complex number",
4121 (getter
)long_get0
, (setter
)NULL
,
4122 "the imaginary part of a complex number",
4125 (getter
)long_long
, (setter
)NULL
,
4126 "the numerator of a rational number in lowest terms",
4129 (getter
)long_get1
, (setter
)NULL
,
4130 "the denominator of a rational number in lowest terms",
4132 {NULL
} /* Sentinel */
4135 PyDoc_STRVAR(long_doc
,
4136 "int(x[, base]) -> integer\n\
4138 Convert a string or number to an integer, if possible. A floating\n\
4139 point argument will be truncated towards zero (this does not include a\n\
4140 string representation of a floating point number!) When converting a\n\
4141 string, use the optional base. It is an error to supply a base when\n\
4142 converting a non-string.");
4144 static PyNumberMethods long_as_number
= {
4145 (binaryfunc
) long_add
, /*nb_add*/
4146 (binaryfunc
) long_sub
, /*nb_subtract*/
4147 (binaryfunc
) long_mul
, /*nb_multiply*/
4148 long_mod
, /*nb_remainder*/
4149 long_divmod
, /*nb_divmod*/
4150 long_pow
, /*nb_power*/
4151 (unaryfunc
) long_neg
, /*nb_negative*/
4152 (unaryfunc
) long_long
, /*tp_positive*/
4153 (unaryfunc
) long_abs
, /*tp_absolute*/
4154 (inquiry
) long_bool
, /*tp_bool*/
4155 (unaryfunc
) long_invert
, /*nb_invert*/
4156 long_lshift
, /*nb_lshift*/
4157 (binaryfunc
) long_rshift
, /*nb_rshift*/
4158 long_and
, /*nb_and*/
4159 long_xor
, /*nb_xor*/
4161 long_long
, /*nb_int*/
4163 long_float
, /*nb_float*/
4164 0, /* nb_inplace_add */
4165 0, /* nb_inplace_subtract */
4166 0, /* nb_inplace_multiply */
4167 0, /* nb_inplace_remainder */
4168 0, /* nb_inplace_power */
4169 0, /* nb_inplace_lshift */
4170 0, /* nb_inplace_rshift */
4171 0, /* nb_inplace_and */
4172 0, /* nb_inplace_xor */
4173 0, /* nb_inplace_or */
4174 long_div
, /* nb_floor_divide */
4175 long_true_divide
, /* nb_true_divide */
4176 0, /* nb_inplace_floor_divide */
4177 0, /* nb_inplace_true_divide */
4178 long_long
, /* nb_index */
4181 PyTypeObject PyLong_Type
= {
4182 PyVarObject_HEAD_INIT(&PyType_Type
, 0)
4183 "int", /* tp_name */
4184 offsetof(PyLongObject
, ob_digit
), /* tp_basicsize */
4185 sizeof(digit
), /* tp_itemsize */
4186 long_dealloc
, /* tp_dealloc */
4190 0, /* tp_reserved */
4191 long_repr
, /* tp_repr */
4192 &long_as_number
, /* tp_as_number */
4193 0, /* tp_as_sequence */
4194 0, /* tp_as_mapping */
4195 (hashfunc
)long_hash
, /* tp_hash */
4197 long_repr
, /* tp_str */
4198 PyObject_GenericGetAttr
, /* tp_getattro */
4199 0, /* tp_setattro */
4200 0, /* tp_as_buffer */
4201 Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_BASETYPE
|
4202 Py_TPFLAGS_LONG_SUBCLASS
, /* tp_flags */
4203 long_doc
, /* tp_doc */
4204 0, /* tp_traverse */
4206 long_richcompare
, /* tp_richcompare */
4207 0, /* tp_weaklistoffset */
4209 0, /* tp_iternext */
4210 long_methods
, /* tp_methods */
4212 long_getset
, /* tp_getset */
4215 0, /* tp_descr_get */
4216 0, /* tp_descr_set */
4217 0, /* tp_dictoffset */
4220 long_new
, /* tp_new */
4221 PyObject_Del
, /* tp_free */
4224 static PyTypeObject Int_InfoType
;
4226 PyDoc_STRVAR(int_info__doc__
,
4229 A struct sequence that holds information about Python's\n\
4230 internal representation of integers. The attributes are read only.");
4232 static PyStructSequence_Field int_info_fields
[] = {
4233 {"bits_per_digit", "size of a digit in bits"},
4234 {"sizeof_digit", "size in bytes of the C type used to "
4235 "represent a digit"},
4239 static PyStructSequence_Desc int_info_desc
= {
4240 "sys.int_info", /* name */
4241 int_info__doc__
, /* doc */
4242 int_info_fields
, /* fields */
4243 2 /* number of fields */
4247 PyLong_GetInfo(void)
4251 int_info
= PyStructSequence_New(&Int_InfoType
);
4252 if (int_info
== NULL
)
4254 PyStructSequence_SET_ITEM(int_info
, field
++,
4255 PyLong_FromLong(PyLong_SHIFT
));
4256 PyStructSequence_SET_ITEM(int_info
, field
++,
4257 PyLong_FromLong(sizeof(digit
)));
4258 if (PyErr_Occurred()) {
4268 #if NSMALLNEGINTS + NSMALLPOSINTS > 0
4270 PyLongObject
*v
= small_ints
;
4272 for (ival
= -NSMALLNEGINTS
; ival
< NSMALLPOSINTS
; ival
++, v
++) {
4273 size
= (ival
< 0) ? -1 : ((ival
== 0) ? 0 : 1);
4274 if (Py_TYPE(v
) == &PyLong_Type
) {
4275 /* The element is already initialized, most likely
4276 * the Python interpreter was initialized before.
4279 PyObject
* op
= (PyObject
*)v
;
4281 refcnt
= Py_REFCNT(op
) < 0 ? 0 : Py_REFCNT(op
);
4282 _Py_NewReference(op
);
4283 /* _Py_NewReference sets the ref count to 1 but
4284 * the ref count might be larger. Set the refcnt
4285 * to the original refcnt + 1 */
4286 Py_REFCNT(op
) = refcnt
+ 1;
4287 assert(Py_SIZE(op
) == size
);
4288 assert(v
->ob_digit
[0] == abs(ival
));
4291 PyObject_INIT(v
, &PyLong_Type
);
4294 v
->ob_digit
[0] = abs(ival
);
4297 /* initialize int_info */
4298 if (Int_InfoType
.tp_name
== 0)
4299 PyStructSequence_InitType(&Int_InfoType
, &int_info_desc
);
4307 /* Integers are currently statically allocated. Py_DECREF is not
4308 needed, but Python must forget about the reference or multiple
4309 reinitializations will fail. */
4310 #if NSMALLNEGINTS + NSMALLPOSINTS > 0
4312 PyLongObject
*v
= small_ints
;
4313 for (i
= 0; i
< NSMALLNEGINTS
+ NSMALLPOSINTS
; i
++, v
++) {
4315 _Py_ForgetReference((PyObject
*)v
);