3 /* Long (arbitrary precision) integer object implementation */
5 /* XXX The functional organization of this file is terrible */
8 #include "longintrepr.h"
15 /* For long multiplication, use the O(N**2) school algorithm unless
16 * both operands contain more than KARATSUBA_CUTOFF digits (this
17 * being an internal Python long digit, in base PyLong_BASE).
19 #define KARATSUBA_CUTOFF 70
20 #define KARATSUBA_SQUARE_CUTOFF (2 * KARATSUBA_CUTOFF)
22 /* For exponentiation, use the binary left-to-right algorithm
23 * unless the exponent contains more than FIVEARY_CUTOFF digits.
24 * In that case, do 5 bits at a time. The potential drawback is that
25 * a table of 2**5 intermediate results is computed.
27 #define FIVEARY_CUTOFF 8
29 #define ABS(x) ((x) < 0 ? -(x) : (x))
33 #define MAX(x, y) ((x) < (y) ? (y) : (x))
34 #define MIN(x, y) ((x) > (y) ? (y) : (x))
36 #define SIGCHECK(PyTryBlock) \
37 if (--_Py_Ticker < 0) { \
38 _Py_Ticker = _Py_CheckInterval; \
39 if (PyErr_CheckSignals()) PyTryBlock \
42 /* Normalize (remove leading zeros from) a long int object.
43 Doesn't attempt to free the storage--in most cases, due to the nature
44 of the algorithms used, this could save at most be one word anyway. */
47 long_normalize(register PyLongObject
*v
)
49 Py_ssize_t j
= ABS(Py_SIZE(v
));
52 while (i
> 0 && v
->ob_digit
[i
-1] == 0)
55 Py_SIZE(v
) = (Py_SIZE(v
) < 0) ? -(i
) : i
;
59 /* Allocate a new long int object with size digits.
60 Return NULL and set exception if we run out of memory. */
62 #define MAX_LONG_DIGITS \
63 ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
66 _PyLong_New(Py_ssize_t size
)
68 if (size
> (Py_ssize_t
)MAX_LONG_DIGITS
) {
69 PyErr_SetString(PyExc_OverflowError
,
70 "too many digits in integer");
73 /* coverity[ampersand_in_size] */
74 /* XXX(nnorwitz): PyObject_NEW_VAR / _PyObject_VAR_SIZE need to detect
76 return PyObject_NEW_VAR(PyLongObject
, &PyLong_Type
, size
);
80 _PyLong_Copy(PyLongObject
*src
)
89 result
= _PyLong_New(i
);
91 result
->ob_size
= src
->ob_size
;
93 result
->ob_digit
[i
] = src
->ob_digit
[i
];
95 return (PyObject
*)result
;
98 /* Create a new long int object from a C long int */
101 PyLong_FromLong(long ival
)
104 unsigned long abs_ival
;
105 unsigned long t
; /* unsigned so >> doesn't propagate sign bit */
110 /* if LONG_MIN == -LONG_MAX-1 (true on most platforms) then
111 ANSI C says that the result of -ival is undefined when ival
112 == LONG_MIN. Hence the following workaround. */
113 abs_ival
= (unsigned long)(-1-ival
) + 1;
117 abs_ival
= (unsigned long)ival
;
120 /* Count the number of Python digits.
121 We used to pick 5 ("big enough for anything"), but that's a
122 waste of time and space given that 5*15 = 75 bits are rarely
129 v
= _PyLong_New(ndigits
);
131 digit
*p
= v
->ob_digit
;
132 v
->ob_size
= negative
? -ndigits
: ndigits
;
135 *p
++ = (digit
)(t
& PyLong_MASK
);
139 return (PyObject
*)v
;
142 /* Create a new long int object from a C unsigned long int */
145 PyLong_FromUnsignedLong(unsigned long ival
)
151 /* Count the number of Python digits. */
152 t
= (unsigned long)ival
;
157 v
= _PyLong_New(ndigits
);
159 digit
*p
= v
->ob_digit
;
160 Py_SIZE(v
) = ndigits
;
162 *p
++ = (digit
)(ival
& PyLong_MASK
);
163 ival
>>= PyLong_SHIFT
;
166 return (PyObject
*)v
;
169 /* Create a new long int object from a C double */
172 PyLong_FromDouble(double dval
)
176 int i
, ndig
, expo
, neg
;
178 if (Py_IS_INFINITY(dval
)) {
179 PyErr_SetString(PyExc_OverflowError
,
180 "cannot convert float infinity to integer");
183 if (Py_IS_NAN(dval
)) {
184 PyErr_SetString(PyExc_ValueError
,
185 "cannot convert float NaN to integer");
192 frac
= frexp(dval
, &expo
); /* dval = frac*2**expo; 0.0 <= frac < 1.0 */
194 return PyLong_FromLong(0L);
195 ndig
= (expo
-1) / PyLong_SHIFT
+ 1; /* Number of 'digits' in result */
196 v
= _PyLong_New(ndig
);
199 frac
= ldexp(frac
, (expo
-1) % PyLong_SHIFT
+ 1);
200 for (i
= ndig
; --i
>= 0; ) {
201 digit bits
= (digit
)frac
;
202 v
->ob_digit
[i
] = bits
;
203 frac
= frac
- (double)bits
;
204 frac
= ldexp(frac
, PyLong_SHIFT
);
207 Py_SIZE(v
) = -(Py_SIZE(v
));
208 return (PyObject
*)v
;
211 /* Checking for overflow in PyLong_AsLong is a PITA since C doesn't define
212 * anything about what happens when a signed integer operation overflows,
213 * and some compilers think they're doing you a favor by being "clever"
214 * then. The bit pattern for the largest postive signed long is
215 * (unsigned long)LONG_MAX, and for the smallest negative signed long
216 * it is abs(LONG_MIN), which we could write -(unsigned long)LONG_MIN.
217 * However, some other compilers warn about applying unary minus to an
218 * unsigned operand. Hence the weird "0-".
220 #define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
221 #define PY_ABS_SSIZE_T_MIN (0-(size_t)PY_SSIZE_T_MIN)
223 /* Get a C long int from a Python long or Python int object.
224 On overflow, returns -1 and sets *overflow to 1 or -1 depending
225 on the sign of the result. Otherwise *overflow is 0.
227 For other errors (e.g., type error), returns -1 and sets an error
232 PyLong_AsLongAndOverflow(PyObject
*vv
, int *overflow
)
234 /* This version by Tim Peters */
235 register PyLongObject
*v
;
236 unsigned long x
, prev
;
240 int do_decref
= 0; /* if nb_int was called */
244 PyErr_BadInternalCall();
249 return PyInt_AsLong(vv
);
251 if (!PyLong_Check(vv
)) {
253 nb
= vv
->ob_type
->tp_as_number
;
254 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
255 PyErr_SetString(PyExc_TypeError
,
256 "an integer is required");
259 vv
= (*nb
->nb_int
) (vv
);
263 if(PyInt_Check(vv
)) {
264 res
= PyInt_AsLong(vv
);
267 if (!PyLong_Check(vv
)) {
269 PyErr_SetString(PyExc_TypeError
,
270 "nb_int should return int object");
276 v
= (PyLongObject
*)vv
;
281 res
= -(sdigit
)v
->ob_digit
[0];
287 res
= v
->ob_digit
[0];
298 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
299 if ((x
>> PyLong_SHIFT
) != prev
) {
304 /* Haven't lost any bits, but casting to long requires extra
305 * care (see comment above).
307 if (x
<= (unsigned long)LONG_MAX
) {
308 res
= (long)x
* sign
;
310 else if (sign
< 0 && x
== PY_ABS_LONG_MIN
) {
315 /* res is already set to -1 */
325 /* Get a C long int from a long int object.
326 Returns -1 and sets an error condition if overflow occurs. */
329 PyLong_AsLong(PyObject
*obj
)
332 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
334 /* XXX: could be cute and give a different
335 message for overflow == -1 */
336 PyErr_SetString(PyExc_OverflowError
,
337 "Python int too large to convert to C long");
342 /* Get a Py_ssize_t from a long int object.
343 Returns -1 and sets an error condition if overflow occurs. */
346 PyLong_AsSsize_t(PyObject
*vv
) {
347 register PyLongObject
*v
;
352 if (vv
== NULL
|| !PyLong_Check(vv
)) {
353 PyErr_BadInternalCall();
356 v
= (PyLongObject
*)vv
;
366 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
367 if ((x
>> PyLong_SHIFT
) != prev
)
370 /* Haven't lost any bits, but casting to a signed type requires
371 * extra care (see comment above).
373 if (x
<= (size_t)PY_SSIZE_T_MAX
) {
374 return (Py_ssize_t
)x
* sign
;
376 else if (sign
< 0 && x
== PY_ABS_SSIZE_T_MIN
) {
377 return PY_SSIZE_T_MIN
;
382 PyErr_SetString(PyExc_OverflowError
,
383 "long int too large to convert to int");
387 /* Get a C unsigned long int from a long int object.
388 Returns -1 and sets an error condition if overflow occurs. */
391 PyLong_AsUnsignedLong(PyObject
*vv
)
393 register PyLongObject
*v
;
394 unsigned long x
, prev
;
397 if (vv
== NULL
|| !PyLong_Check(vv
)) {
398 if (vv
!= NULL
&& PyInt_Check(vv
)) {
399 long val
= PyInt_AsLong(vv
);
401 PyErr_SetString(PyExc_OverflowError
,
402 "can't convert negative value to unsigned long");
403 return (unsigned long) -1;
407 PyErr_BadInternalCall();
408 return (unsigned long) -1;
410 v
= (PyLongObject
*)vv
;
414 PyErr_SetString(PyExc_OverflowError
,
415 "can't convert negative value to unsigned long");
416 return (unsigned long) -1;
420 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
421 if ((x
>> PyLong_SHIFT
) != prev
) {
422 PyErr_SetString(PyExc_OverflowError
,
423 "long int too large to convert");
424 return (unsigned long) -1;
430 /* Get a C unsigned long int from a long int object, ignoring the high bits.
431 Returns -1 and sets an error condition if an error occurs. */
434 PyLong_AsUnsignedLongMask(PyObject
*vv
)
436 register PyLongObject
*v
;
441 if (vv
== NULL
|| !PyLong_Check(vv
)) {
442 if (vv
!= NULL
&& PyInt_Check(vv
))
443 return PyInt_AsUnsignedLongMask(vv
);
444 PyErr_BadInternalCall();
445 return (unsigned long) -1;
447 v
= (PyLongObject
*)vv
;
456 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
462 _PyLong_Sign(PyObject
*vv
)
464 PyLongObject
*v
= (PyLongObject
*)vv
;
467 assert(PyLong_Check(v
));
469 return Py_SIZE(v
) == 0 ? 0 : (Py_SIZE(v
) < 0 ? -1 : 1);
473 _PyLong_NumBits(PyObject
*vv
)
475 PyLongObject
*v
= (PyLongObject
*)vv
;
480 assert(PyLong_Check(v
));
481 ndigits
= ABS(Py_SIZE(v
));
482 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
484 digit msd
= v
->ob_digit
[ndigits
- 1];
486 result
= (ndigits
- 1) * PyLong_SHIFT
;
487 if (result
/ PyLong_SHIFT
!= (size_t)(ndigits
- 1))
499 PyErr_SetString(PyExc_OverflowError
, "long has too many bits "
500 "to express in a platform size_t");
505 _PyLong_FromByteArray(const unsigned char* bytes
, size_t n
,
506 int little_endian
, int is_signed
)
508 const unsigned char* pstartbyte
;/* LSB of bytes */
509 int incr
; /* direction to move pstartbyte */
510 const unsigned char* pendbyte
; /* MSB of bytes */
511 size_t numsignificantbytes
; /* number of bytes that matter */
512 Py_ssize_t ndigits
; /* number of Python long digits */
513 PyLongObject
* v
; /* result */
514 Py_ssize_t idigit
= 0; /* next free index in v->ob_digit */
517 return PyLong_FromLong(0L);
521 pendbyte
= bytes
+ n
- 1;
525 pstartbyte
= bytes
+ n
- 1;
531 is_signed
= *pendbyte
>= 0x80;
533 /* Compute numsignificantbytes. This consists of finding the most
534 significant byte. Leading 0 bytes are insignficant if the number
535 is positive, and leading 0xff bytes if negative. */
538 const unsigned char* p
= pendbyte
;
539 const int pincr
= -incr
; /* search MSB to LSB */
540 const unsigned char insignficant
= is_signed
? 0xff : 0x00;
542 for (i
= 0; i
< n
; ++i
, p
+= pincr
) {
543 if (*p
!= insignficant
)
546 numsignificantbytes
= n
- i
;
547 /* 2's-comp is a bit tricky here, e.g. 0xff00 == -0x0100, so
548 actually has 2 significant bytes. OTOH, 0xff0001 ==
549 -0x00ffff, so we wouldn't *need* to bump it there; but we
550 do for 0xffff = -0x0001. To be safe without bothering to
551 check every case, bump it regardless. */
552 if (is_signed
&& numsignificantbytes
< n
)
553 ++numsignificantbytes
;
556 /* How many Python long digits do we need? We have
557 8*numsignificantbytes bits, and each Python long digit has
558 PyLong_SHIFT bits, so it's the ceiling of the quotient. */
559 /* catch overflow before it happens */
560 if (numsignificantbytes
> (PY_SSIZE_T_MAX
- PyLong_SHIFT
) / 8) {
561 PyErr_SetString(PyExc_OverflowError
,
562 "byte array too long to convert to int");
565 ndigits
= (numsignificantbytes
* 8 + PyLong_SHIFT
- 1) / PyLong_SHIFT
;
566 v
= _PyLong_New(ndigits
);
570 /* Copy the bits over. The tricky parts are computing 2's-comp on
571 the fly for signed numbers, and dealing with the mismatch between
572 8-bit bytes and (probably) 15-bit Python digits.*/
575 twodigits carry
= 1; /* for 2's-comp calculation */
576 twodigits accum
= 0; /* sliding register */
577 unsigned int accumbits
= 0; /* number of bits in accum */
578 const unsigned char* p
= pstartbyte
;
580 for (i
= 0; i
< numsignificantbytes
; ++i
, p
+= incr
) {
581 twodigits thisbyte
= *p
;
582 /* Compute correction for 2's comp, if needed. */
584 thisbyte
= (0xff ^ thisbyte
) + carry
;
585 carry
= thisbyte
>> 8;
588 /* Because we're going LSB to MSB, thisbyte is
589 more significant than what's already in accum,
590 so needs to be prepended to accum. */
591 accum
|= (twodigits
)thisbyte
<< accumbits
;
593 if (accumbits
>= PyLong_SHIFT
) {
594 /* There's enough to fill a Python digit. */
595 assert(idigit
< ndigits
);
596 v
->ob_digit
[idigit
] = (digit
)(accum
&
599 accum
>>= PyLong_SHIFT
;
600 accumbits
-= PyLong_SHIFT
;
601 assert(accumbits
< PyLong_SHIFT
);
604 assert(accumbits
< PyLong_SHIFT
);
606 assert(idigit
< ndigits
);
607 v
->ob_digit
[idigit
] = (digit
)accum
;
612 Py_SIZE(v
) = is_signed
? -idigit
: idigit
;
613 return (PyObject
*)long_normalize(v
);
617 _PyLong_AsByteArray(PyLongObject
* v
,
618 unsigned char* bytes
, size_t n
,
619 int little_endian
, int is_signed
)
621 Py_ssize_t i
; /* index into v->ob_digit */
622 Py_ssize_t ndigits
; /* |v->ob_size| */
623 twodigits accum
; /* sliding register */
624 unsigned int accumbits
; /* # bits in accum */
625 int do_twos_comp
; /* store 2's-comp? is_signed and v < 0 */
626 digit carry
; /* for computing 2's-comp */
627 size_t j
; /* # bytes filled */
628 unsigned char* p
; /* pointer to next byte in bytes */
629 int pincr
; /* direction to move p */
631 assert(v
!= NULL
&& PyLong_Check(v
));
633 if (Py_SIZE(v
) < 0) {
634 ndigits
= -(Py_SIZE(v
));
636 PyErr_SetString(PyExc_OverflowError
,
637 "can't convert negative long to unsigned");
643 ndigits
= Py_SIZE(v
);
656 /* Copy over all the Python digits.
657 It's crucial that every Python digit except for the MSD contribute
658 exactly PyLong_SHIFT bits to the total, so first assert that the long is
660 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
664 carry
= do_twos_comp
? 1 : 0;
665 for (i
= 0; i
< ndigits
; ++i
) {
666 digit thisdigit
= v
->ob_digit
[i
];
668 thisdigit
= (thisdigit
^ PyLong_MASK
) + carry
;
669 carry
= thisdigit
>> PyLong_SHIFT
;
670 thisdigit
&= PyLong_MASK
;
672 /* Because we're going LSB to MSB, thisdigit is more
673 significant than what's already in accum, so needs to be
674 prepended to accum. */
675 accum
|= (twodigits
)thisdigit
<< accumbits
;
677 /* The most-significant digit may be (probably is) at least
679 if (i
== ndigits
- 1) {
680 /* Count # of sign bits -- they needn't be stored,
681 * although for signed conversion we need later to
682 * make sure at least one sign bit gets stored. */
683 digit s
= do_twos_comp
? thisdigit
^ PyLong_MASK
:
691 accumbits
+= PyLong_SHIFT
;
693 /* Store as many bytes as possible. */
694 while (accumbits
>= 8) {
698 *p
= (unsigned char)(accum
& 0xff);
705 /* Store the straggler (if any). */
706 assert(accumbits
< 8);
707 assert(carry
== 0); /* else do_twos_comp and *every* digit was 0 */
713 /* Fill leading bits of the byte with sign bits
714 (appropriately pretending that the long had an
715 infinite supply of sign bits). */
716 accum
|= (~(twodigits
)0) << accumbits
;
718 *p
= (unsigned char)(accum
& 0xff);
721 else if (j
== n
&& n
> 0 && is_signed
) {
722 /* The main loop filled the byte array exactly, so the code
723 just above didn't get to ensure there's a sign bit, and the
724 loop below wouldn't add one either. Make sure a sign bit
726 unsigned char msb
= *(p
- pincr
);
727 int sign_bit_set
= msb
>= 0x80;
728 assert(accumbits
== 0);
729 if (sign_bit_set
== do_twos_comp
)
735 /* Fill remaining bytes with copies of the sign bit. */
737 unsigned char signbyte
= do_twos_comp
? 0xffU
: 0U;
738 for ( ; j
< n
; ++j
, p
+= pincr
)
745 PyErr_SetString(PyExc_OverflowError
, "long too big to convert");
750 /* Create a new long (or int) object from a C pointer */
753 PyLong_FromVoidPtr(void *p
)
755 #if SIZEOF_VOID_P <= SIZEOF_LONG
757 return PyLong_FromUnsignedLong((unsigned long)p
);
758 return PyInt_FromLong((long)p
);
761 #ifndef HAVE_LONG_LONG
762 # error "PyLong_FromVoidPtr: sizeof(void*) > sizeof(long), but no long long"
764 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
765 # error "PyLong_FromVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
767 /* optimize null pointers */
769 return PyInt_FromLong(0);
770 return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG
)p
);
772 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
775 /* Get a C pointer from a long object (or an int object in some cases) */
778 PyLong_AsVoidPtr(PyObject
*vv
)
780 /* This function will allow int or long objects. If vv is neither,
781 then the PyLong_AsLong*() functions will raise the exception:
782 PyExc_SystemError, "bad argument to internal function"
784 #if SIZEOF_VOID_P <= SIZEOF_LONG
788 x
= PyInt_AS_LONG(vv
);
789 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
790 x
= PyLong_AsLong(vv
);
792 x
= PyLong_AsUnsignedLong(vv
);
795 #ifndef HAVE_LONG_LONG
796 # error "PyLong_AsVoidPtr: sizeof(void*) > sizeof(long), but no long long"
798 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
799 # error "PyLong_AsVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
804 x
= PyInt_AS_LONG(vv
);
805 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
806 x
= PyLong_AsLongLong(vv
);
808 x
= PyLong_AsUnsignedLongLong(vv
);
810 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
812 if (x
== -1 && PyErr_Occurred())
817 #ifdef HAVE_LONG_LONG
819 /* Initial PY_LONG_LONG support by Chris Herborth (chrish@qnx.com), later
820 * rewritten to use the newer PyLong_{As,From}ByteArray API.
823 #define IS_LITTLE_ENDIAN (int)*(unsigned char*)&one
825 /* Create a new long int object from a C PY_LONG_LONG int. */
828 PyLong_FromLongLong(PY_LONG_LONG ival
)
831 unsigned PY_LONG_LONG abs_ival
;
832 unsigned PY_LONG_LONG t
; /* unsigned so >> doesn't propagate sign bit */
837 /* avoid signed overflow on negation; see comments
838 in PyLong_FromLong above. */
839 abs_ival
= (unsigned PY_LONG_LONG
)(-1-ival
) + 1;
843 abs_ival
= (unsigned PY_LONG_LONG
)ival
;
846 /* Count the number of Python digits.
847 We used to pick 5 ("big enough for anything"), but that's a
848 waste of time and space given that 5*15 = 75 bits are rarely
855 v
= _PyLong_New(ndigits
);
857 digit
*p
= v
->ob_digit
;
858 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
861 *p
++ = (digit
)(t
& PyLong_MASK
);
865 return (PyObject
*)v
;
868 /* Create a new long int object from a C unsigned PY_LONG_LONG int. */
871 PyLong_FromUnsignedLongLong(unsigned PY_LONG_LONG ival
)
874 unsigned PY_LONG_LONG t
;
877 /* Count the number of Python digits. */
878 t
= (unsigned PY_LONG_LONG
)ival
;
883 v
= _PyLong_New(ndigits
);
885 digit
*p
= v
->ob_digit
;
886 Py_SIZE(v
) = ndigits
;
888 *p
++ = (digit
)(ival
& PyLong_MASK
);
889 ival
>>= PyLong_SHIFT
;
892 return (PyObject
*)v
;
895 /* Create a new long int object from a C Py_ssize_t. */
898 PyLong_FromSsize_t(Py_ssize_t ival
)
900 Py_ssize_t bytes
= ival
;
902 return _PyLong_FromByteArray(
903 (unsigned char *)&bytes
,
904 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 1);
907 /* Create a new long int object from a C size_t. */
910 PyLong_FromSize_t(size_t ival
)
914 return _PyLong_FromByteArray(
915 (unsigned char *)&bytes
,
916 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 0);
919 /* Get a C PY_LONG_LONG int from a long int object.
920 Return -1 and set an error if overflow occurs. */
923 PyLong_AsLongLong(PyObject
*vv
)
930 PyErr_BadInternalCall();
933 if (!PyLong_Check(vv
)) {
937 return (PY_LONG_LONG
)PyInt_AsLong(vv
);
938 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
939 nb
->nb_int
== NULL
) {
940 PyErr_SetString(PyExc_TypeError
, "an integer is required");
943 io
= (*nb
->nb_int
) (vv
);
946 if (PyInt_Check(io
)) {
947 bytes
= PyInt_AsLong(io
);
951 if (PyLong_Check(io
)) {
952 bytes
= PyLong_AsLongLong(io
);
957 PyErr_SetString(PyExc_TypeError
, "integer conversion failed");
961 res
= _PyLong_AsByteArray(
962 (PyLongObject
*)vv
, (unsigned char *)&bytes
,
963 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 1);
965 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
967 return (PY_LONG_LONG
)-1;
972 /* Get a C unsigned PY_LONG_LONG int from a long int object.
973 Return -1 and set an error if overflow occurs. */
975 unsigned PY_LONG_LONG
976 PyLong_AsUnsignedLongLong(PyObject
*vv
)
978 unsigned PY_LONG_LONG bytes
;
982 if (vv
== NULL
|| !PyLong_Check(vv
)) {
983 PyErr_BadInternalCall();
984 return (unsigned PY_LONG_LONG
)-1;
987 res
= _PyLong_AsByteArray(
988 (PyLongObject
*)vv
, (unsigned char *)&bytes
,
989 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 0);
991 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
993 return (unsigned PY_LONG_LONG
)res
;
998 /* Get a C unsigned long int from a long int object, ignoring the high bits.
999 Returns -1 and sets an error condition if an error occurs. */
1001 unsigned PY_LONG_LONG
1002 PyLong_AsUnsignedLongLongMask(PyObject
*vv
)
1004 register PyLongObject
*v
;
1005 unsigned PY_LONG_LONG x
;
1009 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1010 PyErr_BadInternalCall();
1011 return (unsigned long) -1;
1013 v
= (PyLongObject
*)vv
;
1022 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
1026 #undef IS_LITTLE_ENDIAN
1028 #endif /* HAVE_LONG_LONG */
1032 convert_binop(PyObject
*v
, PyObject
*w
, PyLongObject
**a
, PyLongObject
**b
) {
1033 if (PyLong_Check(v
)) {
1034 *a
= (PyLongObject
*) v
;
1037 else if (PyInt_Check(v
)) {
1038 *a
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(v
));
1043 if (PyLong_Check(w
)) {
1044 *b
= (PyLongObject
*) w
;
1047 else if (PyInt_Check(w
)) {
1048 *b
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(w
));
1057 #define CONVERT_BINOP(v, w, a, b) \
1058 if (!convert_binop(v, w, a, b)) { \
1059 Py_INCREF(Py_NotImplemented); \
1060 return Py_NotImplemented; \
1063 /* bits_in_digit(d) returns the unique integer k such that 2**(k-1) <= d <
1064 2**k if d is nonzero, else 0. */
1066 static const unsigned char BitLengthTable
[32] = {
1067 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
1068 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1072 bits_in_digit(digit d
)
1079 d_bits
+= (int)BitLengthTable
[d
];
1083 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1084 * is modified in place, by adding y to it. Carries are propagated as far as
1085 * x[m-1], and the remaining carry (0 or 1) is returned.
1088 v_iadd(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1094 for (i
= 0; i
< n
; ++i
) {
1095 carry
+= x
[i
] + y
[i
];
1096 x
[i
] = carry
& PyLong_MASK
;
1097 carry
>>= PyLong_SHIFT
;
1098 assert((carry
& 1) == carry
);
1100 for (; carry
&& i
< m
; ++i
) {
1102 x
[i
] = carry
& PyLong_MASK
;
1103 carry
>>= PyLong_SHIFT
;
1104 assert((carry
& 1) == carry
);
1109 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1110 * is modified in place, by subtracting y from it. Borrows are propagated as
1111 * far as x[m-1], and the remaining borrow (0 or 1) is returned.
1114 v_isub(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1120 for (i
= 0; i
< n
; ++i
) {
1121 borrow
= x
[i
] - y
[i
] - borrow
;
1122 x
[i
] = borrow
& PyLong_MASK
;
1123 borrow
>>= PyLong_SHIFT
;
1124 borrow
&= 1; /* keep only 1 sign bit */
1126 for (; borrow
&& i
< m
; ++i
) {
1127 borrow
= x
[i
] - borrow
;
1128 x
[i
] = borrow
& PyLong_MASK
;
1129 borrow
>>= PyLong_SHIFT
;
1135 /* Shift digit vector a[0:m] d bits left, with 0 <= d < PyLong_SHIFT. Put
1136 * result in z[0:m], and return the d bits shifted out of the top.
1139 v_lshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1144 assert(0 <= d
&& d
< PyLong_SHIFT
);
1145 for (i
=0; i
< m
; i
++) {
1146 twodigits acc
= (twodigits
)a
[i
] << d
| carry
;
1147 z
[i
] = (digit
)acc
& PyLong_MASK
;
1148 carry
= (digit
)(acc
>> PyLong_SHIFT
);
1153 /* Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
1154 * result in z[0:m], and return the d bits shifted out of the bottom.
1157 v_rshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1161 digit mask
= ((digit
)1 << d
) - 1U;
1163 assert(0 <= d
&& d
< PyLong_SHIFT
);
1164 for (i
=m
; i
-- > 0;) {
1165 twodigits acc
= (twodigits
)carry
<< PyLong_SHIFT
| a
[i
];
1166 carry
= (digit
)acc
& mask
;
1167 z
[i
] = (digit
)(acc
>> d
);
1172 /* Divide long pin, w/ size digits, by non-zero digit n, storing quotient
1173 in pout, and returning the remainder. pin and pout point at the LSD.
1174 It's OK for pin == pout on entry, which saves oodles of mallocs/frees in
1175 _PyLong_Format, but that should be done with great care since longs are
1179 inplace_divrem1(digit
*pout
, digit
*pin
, Py_ssize_t size
, digit n
)
1183 assert(n
> 0 && n
<= PyLong_MASK
);
1186 while (--size
>= 0) {
1188 rem
= (rem
<< PyLong_SHIFT
) | *--pin
;
1189 *--pout
= hi
= (digit
)(rem
/ n
);
1190 rem
-= (twodigits
)hi
* n
;
1195 /* Divide a long integer by a digit, returning both the quotient
1196 (as function result) and the remainder (through *prem).
1197 The sign of a is ignored; n should not be zero. */
1199 static PyLongObject
*
1200 divrem1(PyLongObject
*a
, digit n
, digit
*prem
)
1202 const Py_ssize_t size
= ABS(Py_SIZE(a
));
1205 assert(n
> 0 && n
<= PyLong_MASK
);
1206 z
= _PyLong_New(size
);
1209 *prem
= inplace_divrem1(z
->ob_digit
, a
->ob_digit
, size
, n
);
1210 return long_normalize(z
);
1213 /* Convert a long integer to a base 10 string. Returns a new non-shared
1214 string. (Return value is non-shared so that callers can modify the
1215 returned value if necessary.) */
1218 long_to_decimal_string(PyObject
*aa
, int addL
)
1220 PyLongObject
*scratch
, *a
;
1222 Py_ssize_t size
, strlen
, size_a
, i
, j
;
1223 digit
*pout
, *pin
, rem
, tenpow
;
1227 a
= (PyLongObject
*)aa
;
1228 if (a
== NULL
|| !PyLong_Check(a
)) {
1229 PyErr_BadInternalCall();
1232 size_a
= ABS(Py_SIZE(a
));
1233 negative
= Py_SIZE(a
) < 0;
1235 /* quick and dirty upper bound for the number of digits
1236 required to express a in base _PyLong_DECIMAL_BASE:
1238 #digits = 1 + floor(log2(a) / log2(_PyLong_DECIMAL_BASE))
1240 But log2(a) < size_a * PyLong_SHIFT, and
1241 log2(_PyLong_DECIMAL_BASE) = log2(10) * _PyLong_DECIMAL_SHIFT
1242 > 3 * _PyLong_DECIMAL_SHIFT
1244 if (size_a
> PY_SSIZE_T_MAX
/ PyLong_SHIFT
) {
1245 PyErr_SetString(PyExc_OverflowError
,
1246 "long is too large to format");
1249 /* the expression size_a * PyLong_SHIFT is now safe from overflow */
1250 size
= 1 + size_a
* PyLong_SHIFT
/ (3 * _PyLong_DECIMAL_SHIFT
);
1251 scratch
= _PyLong_New(size
);
1252 if (scratch
== NULL
)
1255 /* convert array of base _PyLong_BASE digits in pin to an array of
1256 base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP,
1257 Volume 2 (3rd edn), section 4.4, Method 1b). */
1259 pout
= scratch
->ob_digit
;
1261 for (i
= size_a
; --i
>= 0; ) {
1263 for (j
= 0; j
< size
; j
++) {
1264 twodigits z
= (twodigits
)pout
[j
] << PyLong_SHIFT
| hi
;
1265 hi
= (digit
)(z
/ _PyLong_DECIMAL_BASE
);
1266 pout
[j
] = (digit
)(z
- (twodigits
)hi
*
1267 _PyLong_DECIMAL_BASE
);
1270 pout
[size
++] = hi
% _PyLong_DECIMAL_BASE
;
1271 hi
/= _PyLong_DECIMAL_BASE
;
1273 /* check for keyboard interrupt */
1279 /* pout should have at least one digit, so that the case when a = 0
1284 /* calculate exact length of output string, and allocate */
1285 strlen
= (addL
!= 0) + negative
+
1286 1 + (size
- 1) * _PyLong_DECIMAL_SHIFT
;
1289 while (rem
>= tenpow
) {
1293 str
= PyString_FromStringAndSize(NULL
, strlen
);
1299 /* fill the string right-to-left */
1300 p
= PyString_AS_STRING(str
) + strlen
;
1304 /* pout[0] through pout[size-2] contribute exactly
1305 _PyLong_DECIMAL_SHIFT digits each */
1306 for (i
=0; i
< size
- 1; i
++) {
1308 for (j
= 0; j
< _PyLong_DECIMAL_SHIFT
; j
++) {
1309 *--p
= '0' + rem
% 10;
1313 /* pout[size-1]: always produce at least one decimal digit */
1316 *--p
= '0' + rem
% 10;
1324 /* check we've counted correctly */
1325 assert(p
== PyString_AS_STRING(str
));
1327 return (PyObject
*)str
;
1330 /* Convert the long to a string object with given base,
1331 appending a base prefix of 0[box] if base is 2, 8 or 16.
1332 Add a trailing "L" if addL is non-zero.
1333 If newstyle is zero, then use the pre-2.6 behavior of octal having
1334 a leading "0", instead of the prefix "0o" */
1335 PyAPI_FUNC(PyObject
*)
1336 _PyLong_Format(PyObject
*aa
, int base
, int addL
, int newstyle
)
1338 register PyLongObject
*a
= (PyLongObject
*)aa
;
1339 PyStringObject
*str
;
1347 return long_to_decimal_string((PyObject
*)a
, addL
);
1349 if (a
== NULL
|| !PyLong_Check(a
)) {
1350 PyErr_BadInternalCall();
1353 assert(base
>= 2 && base
<= 36);
1354 size_a
= ABS(Py_SIZE(a
));
1356 /* Compute a rough upper bound for the length of the string */
1363 i
= 5 + (addL
? 1 : 0);
1364 /* ensure we don't get signed overflow in sz calculation */
1365 if (size_a
> (PY_SSIZE_T_MAX
- i
) / PyLong_SHIFT
) {
1366 PyErr_SetString(PyExc_OverflowError
,
1367 "long is too large to format");
1370 sz
= i
+ 1 + (size_a
* PyLong_SHIFT
- 1) / bits
;
1372 str
= (PyStringObject
*) PyString_FromStringAndSize((char *)0, sz
);
1375 p
= PyString_AS_STRING(str
) + sz
;
1382 if (a
->ob_size
== 0) {
1385 else if ((base
& (base
- 1)) == 0) {
1386 /* JRH: special case for power-of-2 bases */
1387 twodigits accum
= 0;
1388 int accumbits
= 0; /* # of bits in accum */
1389 int basebits
= 1; /* # of bits in base-1 */
1391 while ((i
>>= 1) > 1)
1394 for (i
= 0; i
< size_a
; ++i
) {
1395 accum
|= (twodigits
)a
->ob_digit
[i
] << accumbits
;
1396 accumbits
+= PyLong_SHIFT
;
1397 assert(accumbits
>= basebits
);
1399 char cdigit
= (char)(accum
& (base
- 1));
1400 cdigit
+= (cdigit
< 10) ? '0' : 'a'-10;
1401 assert(p
> PyString_AS_STRING(str
));
1403 accumbits
-= basebits
;
1405 } while (i
< size_a
-1 ? accumbits
>= basebits
:
1410 /* Not 0, and base not a power of 2. Divide repeatedly by
1411 base, but for speed use the highest power of base that
1413 Py_ssize_t size
= size_a
;
1414 digit
*pin
= a
->ob_digit
;
1415 PyLongObject
*scratch
;
1416 /* powbasw <- largest power of base that fits in a digit. */
1417 digit powbase
= base
; /* powbase == base ** power */
1420 twodigits newpow
= powbase
* (twodigits
)base
;
1421 if (newpow
>> PyLong_SHIFT
)
1422 /* doesn't fit in a digit */
1424 powbase
= (digit
)newpow
;
1428 /* Get a scratch area for repeated division. */
1429 scratch
= _PyLong_New(size
);
1430 if (scratch
== NULL
) {
1435 /* Repeatedly divide by powbase. */
1437 int ntostore
= power
;
1438 digit rem
= inplace_divrem1(scratch
->ob_digit
,
1439 pin
, size
, powbase
);
1440 pin
= scratch
->ob_digit
; /* no need to use a again */
1441 if (pin
[size
- 1] == 0)
1449 /* Break rem into digits. */
1450 assert(ntostore
> 0);
1452 digit nextrem
= (digit
)(rem
/ base
);
1453 char c
= (char)(rem
- nextrem
* base
);
1454 assert(p
> PyString_AS_STRING(str
));
1455 c
+= (c
< 10) ? '0' : 'a'-10;
1459 /* Termination is a bit delicate: must not
1460 store leading zeroes, so must get out if
1461 remaining quotient and rem are both 0. */
1462 } while (ntostore
&& (size
|| rem
));
1463 } while (size
!= 0);
1471 else if (base
== 8) {
1480 else if (base
== 16) {
1484 else if (base
!= 10) {
1486 *--p
= '0' + base
%10;
1488 *--p
= '0' + base
/10;
1492 if (p
!= PyString_AS_STRING(str
)) {
1493 char *q
= PyString_AS_STRING(str
);
1496 } while ((*q
++ = *p
++) != '\0');
1498 _PyString_Resize((PyObject
**)&str
,
1499 (Py_ssize_t
) (q
- PyString_AS_STRING(str
)));
1501 return (PyObject
*)str
;
1504 /* Table of digit values for 8-bit string -> integer conversion.
1505 * '0' maps to 0, ..., '9' maps to 9.
1506 * 'a' and 'A' map to 10, ..., 'z' and 'Z' map to 35.
1507 * All other indices map to 37.
1508 * Note that when converting a base B string, a char c is a legitimate
1509 * base B digit iff _PyLong_DigitValue[Py_CHARMASK(c)] < B.
1511 int _PyLong_DigitValue
[256] = {
1512 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1513 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1514 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1515 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 37, 37, 37, 37, 37, 37,
1516 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1517 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1518 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1519 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1520 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1521 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1522 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1523 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1524 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1525 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1526 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1527 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1530 /* *str points to the first digit in a string of base `base` digits. base
1531 * is a power of 2 (2, 4, 8, 16, or 32). *str is set to point to the first
1532 * non-digit (which may be *str!). A normalized long is returned.
1533 * The point to this routine is that it takes time linear in the number of
1534 * string characters.
1536 static PyLongObject
*
1537 long_from_binary_base(char **str
, int base
)
1548 assert(base
>= 2 && base
<= 32 && (base
& (base
- 1)) == 0);
1550 for (bits_per_char
= -1; n
; ++bits_per_char
)
1552 /* n <- total # of bits needed, while setting p to end-of-string */
1553 while (_PyLong_DigitValue
[Py_CHARMASK(*p
)] < base
)
1556 /* n <- # of Python digits needed, = ceiling(n/PyLong_SHIFT). */
1557 n
= (p
- start
) * bits_per_char
+ PyLong_SHIFT
- 1;
1558 if (n
/ bits_per_char
< p
- start
) {
1559 PyErr_SetString(PyExc_ValueError
,
1560 "long string too large to convert");
1563 n
= n
/ PyLong_SHIFT
;
1567 /* Read string from right, and fill in long from left; i.e.,
1568 * from least to most significant in both.
1572 pdigit
= z
->ob_digit
;
1573 while (--p
>= start
) {
1574 int k
= _PyLong_DigitValue
[Py_CHARMASK(*p
)];
1575 assert(k
>= 0 && k
< base
);
1576 accum
|= (twodigits
)k
<< bits_in_accum
;
1577 bits_in_accum
+= bits_per_char
;
1578 if (bits_in_accum
>= PyLong_SHIFT
) {
1579 *pdigit
++ = (digit
)(accum
& PyLong_MASK
);
1580 assert(pdigit
- z
->ob_digit
<= n
);
1581 accum
>>= PyLong_SHIFT
;
1582 bits_in_accum
-= PyLong_SHIFT
;
1583 assert(bits_in_accum
< PyLong_SHIFT
);
1586 if (bits_in_accum
) {
1587 assert(bits_in_accum
<= PyLong_SHIFT
);
1588 *pdigit
++ = (digit
)accum
;
1589 assert(pdigit
- z
->ob_digit
<= n
);
1591 while (pdigit
- z
->ob_digit
< n
)
1593 return long_normalize(z
);
1597 PyLong_FromString(char *str
, char **pend
, int base
)
1600 char *start
, *orig_str
= str
;
1602 PyObject
*strobj
, *strrepr
;
1605 if ((base
!= 0 && base
< 2) || base
> 36) {
1606 PyErr_SetString(PyExc_ValueError
,
1607 "long() arg 2 must be >= 2 and <= 36");
1610 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1614 else if (*str
== '-') {
1618 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1621 /* No base given. Deduce the base from the contents
1625 else if (str
[1] == 'x' || str
[1] == 'X')
1627 else if (str
[1] == 'o' || str
[1] == 'O')
1629 else if (str
[1] == 'b' || str
[1] == 'B')
1632 /* "old" (C-style) octal literal, still valid in
1633 2.x, although illegal in 3.x */
1636 /* Whether or not we were deducing the base, skip leading chars
1638 if (str
[0] == '0' &&
1639 ((base
== 16 && (str
[1] == 'x' || str
[1] == 'X')) ||
1640 (base
== 8 && (str
[1] == 'o' || str
[1] == 'O')) ||
1641 (base
== 2 && (str
[1] == 'b' || str
[1] == 'B'))))
1645 if ((base
& (base
- 1)) == 0)
1646 z
= long_from_binary_base(&str
, base
);
1649 Binary bases can be converted in time linear in the number of digits, because
1650 Python's representation base is binary. Other bases (including decimal!) use
1651 the simple quadratic-time algorithm below, complicated by some speed tricks.
1653 First some math: the largest integer that can be expressed in N base-B digits
1654 is B**N-1. Consequently, if we have an N-digit input in base B, the worst-
1655 case number of Python digits needed to hold it is the smallest integer n s.t.
1657 PyLong_BASE**n-1 >= B**N-1 [or, adding 1 to both sides]
1658 PyLong_BASE**n >= B**N [taking logs to base PyLong_BASE]
1659 n >= log(B**N)/log(PyLong_BASE) = N * log(B)/log(PyLong_BASE)
1661 The static array log_base_PyLong_BASE[base] == log(base)/log(PyLong_BASE) so we can compute
1662 this quickly. A Python long with that much space is reserved near the start,
1663 and the result is computed into it.
1665 The input string is actually treated as being in base base**i (i.e., i digits
1666 are processed at a time), where two more static arrays hold:
1668 convwidth_base[base] = the largest integer i such that base**i <= PyLong_BASE
1669 convmultmax_base[base] = base ** convwidth_base[base]
1671 The first of these is the largest i such that i consecutive input digits
1672 must fit in a single Python digit. The second is effectively the input
1673 base we're really using.
1675 Viewing the input as a sequence <c0, c1, ..., c_n-1> of digits in base
1676 convmultmax_base[base], the result is "simply"
1678 (((c0*B + c1)*B + c2)*B + c3)*B + ... ))) + c_n-1
1680 where B = convmultmax_base[base].
1682 Error analysis: as above, the number of Python digits `n` needed is worst-
1685 n >= N * log(B)/log(PyLong_BASE)
1687 where `N` is the number of input digits in base `B`. This is computed via
1689 size_z = (Py_ssize_t)((scan - str) * log_base_PyLong_BASE[base]) + 1;
1691 below. Two numeric concerns are how much space this can waste, and whether
1692 the computed result can be too small. To be concrete, assume PyLong_BASE = 2**15,
1693 which is the default (and it's unlikely anyone changes that).
1695 Waste isn't a problem: provided the first input digit isn't 0, the difference
1696 between the worst-case input with N digits and the smallest input with N
1697 digits is about a factor of B, but B is small compared to PyLong_BASE so at most
1698 one allocated Python digit can remain unused on that count. If
1699 N*log(B)/log(PyLong_BASE) is mathematically an exact integer, then truncating that
1700 and adding 1 returns a result 1 larger than necessary. However, that can't
1701 happen: whenever B is a power of 2, long_from_binary_base() is called
1702 instead, and it's impossible for B**i to be an integer power of 2**15 when
1703 B is not a power of 2 (i.e., it's impossible for N*log(B)/log(PyLong_BASE) to be
1704 an exact integer when B is not a power of 2, since B**i has a prime factor
1705 other than 2 in that case, but (2**15)**j's only prime factor is 2).
1707 The computed result can be too small if the true value of N*log(B)/log(PyLong_BASE)
1708 is a little bit larger than an exact integer, but due to roundoff errors (in
1709 computing log(B), log(PyLong_BASE), their quotient, and/or multiplying that by N)
1710 yields a numeric result a little less than that integer. Unfortunately, "how
1711 close can a transcendental function get to an integer over some range?"
1712 questions are generally theoretically intractable. Computer analysis via
1713 continued fractions is practical: expand log(B)/log(PyLong_BASE) via continued
1714 fractions, giving a sequence i/j of "the best" rational approximations. Then
1715 j*log(B)/log(PyLong_BASE) is approximately equal to (the integer) i. This shows that
1716 we can get very close to being in trouble, but very rarely. For example,
1717 76573 is a denominator in one of the continued-fraction approximations to
1718 log(10)/log(2**15), and indeed:
1720 >>> log(10)/log(2**15)*76573
1723 is very close to an integer. If we were working with IEEE single-precision,
1724 rounding errors could kill us. Finding worst cases in IEEE double-precision
1725 requires better-than-double-precision log() functions, and Tim didn't bother.
1726 Instead the code checks to see whether the allocated space is enough as each
1727 new Python digit is added, and copies the whole thing to a larger long if not.
1728 This should happen extremely rarely, and in fact I don't have a test case
1729 that triggers it(!). Instead the code was tested by artificially allocating
1730 just 1 digit at the start, so that the copying code was exercised for every
1731 digit beyond the first.
1733 register twodigits c
; /* current input character */
1737 twodigits convmultmax
, convmult
;
1741 static double log_base_PyLong_BASE
[37] = {0.0e0
,};
1742 static int convwidth_base
[37] = {0,};
1743 static twodigits convmultmax_base
[37] = {0,};
1745 if (log_base_PyLong_BASE
[base
] == 0.0) {
1746 twodigits convmax
= base
;
1749 log_base_PyLong_BASE
[base
] = log((double)base
) /
1750 log((double)PyLong_BASE
);
1752 twodigits next
= convmax
* base
;
1753 if (next
> PyLong_BASE
)
1758 convmultmax_base
[base
] = convmax
;
1760 convwidth_base
[base
] = i
;
1763 /* Find length of the string of numeric characters. */
1765 while (_PyLong_DigitValue
[Py_CHARMASK(*scan
)] < base
)
1768 /* Create a long object that can contain the largest possible
1769 * integer with this base and length. Note that there's no
1770 * need to initialize z->ob_digit -- no slot is read up before
1771 * being stored into.
1773 size_z
= (Py_ssize_t
)((scan
- str
) * log_base_PyLong_BASE
[base
]) + 1;
1774 /* Uncomment next line to test exceedingly rare copy code */
1777 z
= _PyLong_New(size_z
);
1782 /* `convwidth` consecutive input digits are treated as a single
1783 * digit in base `convmultmax`.
1785 convwidth
= convwidth_base
[base
];
1786 convmultmax
= convmultmax_base
[base
];
1789 while (str
< scan
) {
1790 /* grab up to convwidth digits from the input string */
1791 c
= (digit
)_PyLong_DigitValue
[Py_CHARMASK(*str
++)];
1792 for (i
= 1; i
< convwidth
&& str
!= scan
; ++i
, ++str
) {
1793 c
= (twodigits
)(c
* base
+
1794 _PyLong_DigitValue
[Py_CHARMASK(*str
)]);
1795 assert(c
< PyLong_BASE
);
1798 convmult
= convmultmax
;
1799 /* Calculate the shift only if we couldn't get
1802 if (i
!= convwidth
) {
1808 /* Multiply z by convmult, and add c. */
1810 pzstop
= pz
+ Py_SIZE(z
);
1811 for (; pz
< pzstop
; ++pz
) {
1812 c
+= (twodigits
)*pz
* convmult
;
1813 *pz
= (digit
)(c
& PyLong_MASK
);
1816 /* carry off the current end? */
1818 assert(c
< PyLong_BASE
);
1819 if (Py_SIZE(z
) < size_z
) {
1825 /* Extremely rare. Get more space. */
1826 assert(Py_SIZE(z
) == size_z
);
1827 tmp
= _PyLong_New(size_z
+ 1);
1832 memcpy(tmp
->ob_digit
,
1834 sizeof(digit
) * size_z
);
1837 z
->ob_digit
[size_z
] = (digit
)c
;
1848 Py_SIZE(z
) = -(Py_SIZE(z
));
1849 if (*str
== 'L' || *str
== 'l')
1851 while (*str
&& isspace(Py_CHARMASK(*str
)))
1857 return (PyObject
*) z
;
1861 slen
= strlen(orig_str
) < 200 ? strlen(orig_str
) : 200;
1862 strobj
= PyString_FromStringAndSize(orig_str
, slen
);
1865 strrepr
= PyObject_Repr(strobj
);
1867 if (strrepr
== NULL
)
1869 PyErr_Format(PyExc_ValueError
,
1870 "invalid literal for long() with base %d: %s",
1871 base
, PyString_AS_STRING(strrepr
));
1876 #ifdef Py_USING_UNICODE
1878 PyLong_FromUnicode(Py_UNICODE
*u
, Py_ssize_t length
, int base
)
1881 char *buffer
= (char *)PyMem_MALLOC(length
+1);
1886 if (PyUnicode_EncodeDecimal(u
, length
, buffer
, NULL
)) {
1890 result
= PyLong_FromString(buffer
, NULL
, base
);
1897 static PyLongObject
*x_divrem
1898 (PyLongObject
*, PyLongObject
*, PyLongObject
**);
1899 static PyObject
*long_long(PyObject
*v
);
1901 /* Long division with remainder, top-level routine */
1904 long_divrem(PyLongObject
*a
, PyLongObject
*b
,
1905 PyLongObject
**pdiv
, PyLongObject
**prem
)
1907 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
1911 PyErr_SetString(PyExc_ZeroDivisionError
,
1912 "long division or modulo by zero");
1915 if (size_a
< size_b
||
1916 (size_a
== size_b
&&
1917 a
->ob_digit
[size_a
-1] < b
->ob_digit
[size_b
-1])) {
1919 *pdiv
= _PyLong_New(0);
1923 *prem
= (PyLongObject
*) a
;
1928 z
= divrem1(a
, b
->ob_digit
[0], &rem
);
1931 *prem
= (PyLongObject
*) PyLong_FromLong((long)rem
);
1932 if (*prem
== NULL
) {
1938 z
= x_divrem(a
, b
, prem
);
1943 The quotient z has the sign of a*b;
1944 the remainder r has the sign of a,
1946 if ((a
->ob_size
< 0) != (b
->ob_size
< 0))
1947 z
->ob_size
= -(z
->ob_size
);
1948 if (a
->ob_size
< 0 && (*prem
)->ob_size
!= 0)
1949 (*prem
)->ob_size
= -((*prem
)->ob_size
);
1954 /* Unsigned long division with remainder -- the algorithm. The arguments v1
1955 and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
1957 static PyLongObject
*
1958 x_divrem(PyLongObject
*v1
, PyLongObject
*w1
, PyLongObject
**prem
)
1960 PyLongObject
*v
, *w
, *a
;
1961 Py_ssize_t i
, k
, size_v
, size_w
;
1963 digit wm1
, wm2
, carry
, q
, r
, vtop
, *v0
, *vk
, *w0
, *ak
;
1968 /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd
1969 edn.), section 4.3.1, Algorithm D], except that we don't explicitly
1970 handle the special case when the initial estimate q for a quotient
1971 digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and
1972 that won't overflow a digit. */
1974 /* allocate space; w will also be used to hold the final remainder */
1975 size_v
= ABS(Py_SIZE(v1
));
1976 size_w
= ABS(Py_SIZE(w1
));
1977 assert(size_v
>= size_w
&& size_w
>= 2); /* Assert checks by div() */
1978 v
= _PyLong_New(size_v
+1);
1983 w
= _PyLong_New(size_w
);
1990 /* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
1991 shift v1 left by the same amount. Results go into w and v. */
1992 d
= PyLong_SHIFT
- bits_in_digit(w1
->ob_digit
[size_w
-1]);
1993 carry
= v_lshift(w
->ob_digit
, w1
->ob_digit
, size_w
, d
);
1995 carry
= v_lshift(v
->ob_digit
, v1
->ob_digit
, size_v
, d
);
1996 if (carry
!= 0 || v
->ob_digit
[size_v
-1] >= w
->ob_digit
[size_w
-1]) {
1997 v
->ob_digit
[size_v
] = carry
;
2001 /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
2002 at most (and usually exactly) k = size_v - size_w digits. */
2003 k
= size_v
- size_w
;
2016 for (vk
= v0
+k
, ak
= a
->ob_digit
+ k
; vk
-- > v0
;) {
2017 /* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
2018 single-digit quotient q, remainder in vk[0:size_w]. */
2028 /* estimate quotient digit q; may overestimate by 1 (rare) */
2030 assert(vtop
<= wm1
);
2031 vv
= ((twodigits
)vtop
<< PyLong_SHIFT
) | vk
[size_w
-1];
2032 q
= (digit
)(vv
/ wm1
);
2033 r
= (digit
)(vv
- (twodigits
)wm1
* q
); /* r = vv % wm1 */
2034 while ((twodigits
)wm2
* q
> (((twodigits
)r
<< PyLong_SHIFT
)
2038 if (r
>= PyLong_BASE
)
2041 assert(q
<= PyLong_BASE
);
2043 /* subtract q*w0[0:size_w] from vk[0:size_w+1] */
2045 for (i
= 0; i
< size_w
; ++i
) {
2046 /* invariants: -PyLong_BASE <= -q <= zhi <= 0;
2047 -PyLong_BASE * q <= z < PyLong_BASE */
2048 z
= (sdigit
)vk
[i
] + zhi
-
2049 (stwodigits
)q
* (stwodigits
)w0
[i
];
2050 vk
[i
] = (digit
)z
& PyLong_MASK
;
2051 zhi
= (sdigit
)Py_ARITHMETIC_RIGHT_SHIFT(stwodigits
,
2055 /* add w back if q was too large (this branch taken rarely) */
2056 assert((sdigit
)vtop
+ zhi
== -1 || (sdigit
)vtop
+ zhi
== 0);
2057 if ((sdigit
)vtop
+ zhi
< 0) {
2059 for (i
= 0; i
< size_w
; ++i
) {
2060 carry
+= vk
[i
] + w0
[i
];
2061 vk
[i
] = carry
& PyLong_MASK
;
2062 carry
>>= PyLong_SHIFT
;
2067 /* store quotient digit */
2068 assert(q
< PyLong_BASE
);
2072 /* unshift remainder; we reuse w to store the result */
2073 carry
= v_rshift(w0
, v0
, size_w
, d
);
2077 *prem
= long_normalize(w
);
2078 return long_normalize(a
);
2081 /* For a nonzero PyLong a, express a in the form x * 2**e, with 0.5 <=
2082 abs(x) < 1.0 and e >= 0; return x and put e in *e. Here x is
2083 rounded to DBL_MANT_DIG significant bits using round-half-to-even.
2084 If a == 0, return 0.0 and set *e = 0. If the resulting exponent
2085 e is larger than PY_SSIZE_T_MAX, raise OverflowError and return
2088 /* attempt to define 2.0**DBL_MANT_DIG as a compile-time constant */
2089 #if DBL_MANT_DIG == 53
2090 #define EXP2_DBL_MANT_DIG 9007199254740992.0
2092 #define EXP2_DBL_MANT_DIG (ldexp(1.0, DBL_MANT_DIG))
2096 _PyLong_Frexp(PyLongObject
*a
, Py_ssize_t
*e
)
2098 Py_ssize_t a_size
, a_bits
, shift_digits
, shift_bits
, x_size
;
2099 /* See below for why x_digits is always large enough. */
2100 digit rem
, x_digits
[2 + (DBL_MANT_DIG
+ 1) / PyLong_SHIFT
];
2102 /* Correction term for round-half-to-even rounding. For a digit x,
2103 "x + half_even_correction[x & 7]" gives x rounded to the nearest
2104 multiple of 4, rounding ties to a multiple of 8. */
2105 static const int half_even_correction
[8] = {0, -1, -2, 1, 0, -1, 2, 1};
2107 a_size
= ABS(Py_SIZE(a
));
2109 /* Special case for 0: significand 0.0, exponent 0. */
2113 a_bits
= bits_in_digit(a
->ob_digit
[a_size
-1]);
2114 /* The following is an overflow-free version of the check
2115 "if ((a_size - 1) * PyLong_SHIFT + a_bits > PY_SSIZE_T_MAX) ..." */
2116 if (a_size
>= (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 &&
2117 (a_size
> (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 ||
2118 a_bits
> (PY_SSIZE_T_MAX
- 1) % PyLong_SHIFT
+ 1))
2120 a_bits
= (a_size
- 1) * PyLong_SHIFT
+ a_bits
;
2122 /* Shift the first DBL_MANT_DIG + 2 bits of a into x_digits[0:x_size]
2123 (shifting left if a_bits <= DBL_MANT_DIG + 2).
2125 Number of digits needed for result: write // for floor division.
2126 Then if shifting left, we end up using
2128 1 + a_size + (DBL_MANT_DIG + 2 - a_bits) // PyLong_SHIFT
2130 digits. If shifting right, we use
2132 a_size - (a_bits - DBL_MANT_DIG - 2) // PyLong_SHIFT
2134 digits. Using a_size = 1 + (a_bits - 1) // PyLong_SHIFT along with
2137 m // PyLong_SHIFT + n // PyLong_SHIFT <= (m + n) // PyLong_SHIFT
2138 m // PyLong_SHIFT - n // PyLong_SHIFT <=
2139 1 + (m - n - 1) // PyLong_SHIFT,
2141 valid for any integers m and n, we find that x_size satisfies
2143 x_size <= 2 + (DBL_MANT_DIG + 1) // PyLong_SHIFT
2147 if (a_bits
<= DBL_MANT_DIG
+ 2) {
2148 shift_digits
= (DBL_MANT_DIG
+ 2 - a_bits
) / PyLong_SHIFT
;
2149 shift_bits
= (DBL_MANT_DIG
+ 2 - a_bits
) % PyLong_SHIFT
;
2151 while (x_size
< shift_digits
)
2152 x_digits
[x_size
++] = 0;
2153 rem
= v_lshift(x_digits
+ x_size
, a
->ob_digit
, a_size
,
2156 x_digits
[x_size
++] = rem
;
2159 shift_digits
= (a_bits
- DBL_MANT_DIG
- 2) / PyLong_SHIFT
;
2160 shift_bits
= (a_bits
- DBL_MANT_DIG
- 2) % PyLong_SHIFT
;
2161 rem
= v_rshift(x_digits
, a
->ob_digit
+ shift_digits
,
2162 a_size
- shift_digits
, shift_bits
);
2163 x_size
= a_size
- shift_digits
;
2164 /* For correct rounding below, we need the least significant
2165 bit of x to be 'sticky' for this shift: if any of the bits
2166 shifted out was nonzero, we set the least significant bit
2171 while (shift_digits
> 0)
2172 if (a
->ob_digit
[--shift_digits
]) {
2177 assert(1 <= x_size
&& x_size
<= sizeof(x_digits
)/sizeof(digit
));
2179 /* Round, and convert to double. */
2180 x_digits
[0] += half_even_correction
[x_digits
[0] & 7];
2181 dx
= x_digits
[--x_size
];
2183 dx
= dx
* PyLong_BASE
+ x_digits
[--x_size
];
2185 /* Rescale; make correction if result is 1.0. */
2186 dx
/= 4.0 * EXP2_DBL_MANT_DIG
;
2188 if (a_bits
== PY_SSIZE_T_MAX
)
2195 return Py_SIZE(a
) < 0 ? -dx
: dx
;
2198 /* exponent > PY_SSIZE_T_MAX */
2199 PyErr_SetString(PyExc_OverflowError
,
2200 "huge integer: number of bits overflows a Py_ssize_t");
2205 /* Get a C double from a long int object. Rounds to the nearest double,
2206 using the round-half-to-even rule in the case of a tie. */
2209 PyLong_AsDouble(PyObject
*v
)
2211 Py_ssize_t exponent
;
2214 if (v
== NULL
|| !PyLong_Check(v
)) {
2215 PyErr_BadInternalCall();
2218 x
= _PyLong_Frexp((PyLongObject
*)v
, &exponent
);
2219 if ((x
== -1.0 && PyErr_Occurred()) || exponent
> DBL_MAX_EXP
) {
2220 PyErr_SetString(PyExc_OverflowError
,
2221 "long int too large to convert to float");
2224 return ldexp(x
, exponent
);
2230 long_dealloc(PyObject
*v
)
2232 Py_TYPE(v
)->tp_free(v
);
2236 long_repr(PyObject
*v
)
2238 return _PyLong_Format(v
, 10, 1, 0);
2242 long_str(PyObject
*v
)
2244 return _PyLong_Format(v
, 10, 0, 0);
2248 long_compare(PyLongObject
*a
, PyLongObject
*b
)
2252 if (Py_SIZE(a
) != Py_SIZE(b
)) {
2253 if (ABS(Py_SIZE(a
)) == 0 && ABS(Py_SIZE(b
)) == 0)
2256 sign
= Py_SIZE(a
) - Py_SIZE(b
);
2259 Py_ssize_t i
= ABS(Py_SIZE(a
));
2260 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2265 sign
= (sdigit
)a
->ob_digit
[i
] - (sdigit
)b
->ob_digit
[i
];
2270 return sign
< 0 ? -1 : sign
> 0 ? 1 : 0;
2274 long_hash(PyLongObject
*v
)
2280 /* This is designed so that Python ints and longs with the
2281 same value hash to the same value, otherwise comparisons
2282 of mapping keys will turn out weird */
2290 /* The following loop produces a C unsigned long x such that x is
2291 congruent to the absolute value of v modulo ULONG_MAX. The
2292 resulting x is nonzero if and only if v is. */
2294 /* Force a native long #-bits (32 or 64) circular shift */
2295 x
= (x
>> (8*SIZEOF_LONG
-PyLong_SHIFT
)) | (x
<< PyLong_SHIFT
);
2296 x
+= v
->ob_digit
[i
];
2297 /* If the addition above overflowed we compensate by
2298 incrementing. This preserves the value modulo
2300 if (x
< v
->ob_digit
[i
])
2304 if (x
== (unsigned long)-1)
2305 x
= (unsigned long)-2;
2310 /* Add the absolute values of two long integers. */
2312 static PyLongObject
*
2313 x_add(PyLongObject
*a
, PyLongObject
*b
)
2315 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2320 /* Ensure a is the larger of the two: */
2321 if (size_a
< size_b
) {
2322 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2323 { Py_ssize_t size_temp
= size_a
;
2325 size_b
= size_temp
; }
2327 z
= _PyLong_New(size_a
+1);
2330 for (i
= 0; i
< size_b
; ++i
) {
2331 carry
+= a
->ob_digit
[i
] + b
->ob_digit
[i
];
2332 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2333 carry
>>= PyLong_SHIFT
;
2335 for (; i
< size_a
; ++i
) {
2336 carry
+= a
->ob_digit
[i
];
2337 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2338 carry
>>= PyLong_SHIFT
;
2340 z
->ob_digit
[i
] = carry
;
2341 return long_normalize(z
);
2344 /* Subtract the absolute values of two integers. */
2346 static PyLongObject
*
2347 x_sub(PyLongObject
*a
, PyLongObject
*b
)
2349 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2355 /* Ensure a is the larger of the two: */
2356 if (size_a
< size_b
) {
2358 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2359 { Py_ssize_t size_temp
= size_a
;
2361 size_b
= size_temp
; }
2363 else if (size_a
== size_b
) {
2364 /* Find highest digit where a and b differ: */
2366 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2369 return _PyLong_New(0);
2370 if (a
->ob_digit
[i
] < b
->ob_digit
[i
]) {
2372 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2374 size_a
= size_b
= i
+1;
2376 z
= _PyLong_New(size_a
);
2379 for (i
= 0; i
< size_b
; ++i
) {
2380 /* The following assumes unsigned arithmetic
2381 works module 2**N for some N>PyLong_SHIFT. */
2382 borrow
= a
->ob_digit
[i
] - b
->ob_digit
[i
] - borrow
;
2383 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2384 borrow
>>= PyLong_SHIFT
;
2385 borrow
&= 1; /* Keep only one sign bit */
2387 for (; i
< size_a
; ++i
) {
2388 borrow
= a
->ob_digit
[i
] - borrow
;
2389 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2390 borrow
>>= PyLong_SHIFT
;
2391 borrow
&= 1; /* Keep only one sign bit */
2393 assert(borrow
== 0);
2395 z
->ob_size
= -(z
->ob_size
);
2396 return long_normalize(z
);
2400 long_add(PyLongObject
*v
, PyLongObject
*w
)
2402 PyLongObject
*a
, *b
, *z
;
2404 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2406 if (a
->ob_size
< 0) {
2407 if (b
->ob_size
< 0) {
2409 if (z
!= NULL
&& z
->ob_size
!= 0)
2410 z
->ob_size
= -(z
->ob_size
);
2423 return (PyObject
*)z
;
2427 long_sub(PyLongObject
*v
, PyLongObject
*w
)
2429 PyLongObject
*a
, *b
, *z
;
2431 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2433 if (a
->ob_size
< 0) {
2438 if (z
!= NULL
&& z
->ob_size
!= 0)
2439 z
->ob_size
= -(z
->ob_size
);
2449 return (PyObject
*)z
;
2452 /* Grade school multiplication, ignoring the signs.
2453 * Returns the absolute value of the product, or NULL if error.
2455 static PyLongObject
*
2456 x_mul(PyLongObject
*a
, PyLongObject
*b
)
2459 Py_ssize_t size_a
= ABS(Py_SIZE(a
));
2460 Py_ssize_t size_b
= ABS(Py_SIZE(b
));
2463 z
= _PyLong_New(size_a
+ size_b
);
2467 memset(z
->ob_digit
, 0, Py_SIZE(z
) * sizeof(digit
));
2469 /* Efficient squaring per HAC, Algorithm 14.16:
2470 * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
2471 * Gives slightly less than a 2x speedup when a == b,
2472 * via exploiting that each entry in the multiplication
2473 * pyramid appears twice (except for the size_a squares).
2475 for (i
= 0; i
< size_a
; ++i
) {
2477 twodigits f
= a
->ob_digit
[i
];
2478 digit
*pz
= z
->ob_digit
+ (i
<< 1);
2479 digit
*pa
= a
->ob_digit
+ i
+ 1;
2480 digit
*paend
= a
->ob_digit
+ size_a
;
2487 carry
= *pz
+ f
* f
;
2488 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2489 carry
>>= PyLong_SHIFT
;
2490 assert(carry
<= PyLong_MASK
);
2492 /* Now f is added in twice in each column of the
2493 * pyramid it appears. Same as adding f<<1 once.
2496 while (pa
< paend
) {
2497 carry
+= *pz
+ *pa
++ * f
;
2498 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2499 carry
>>= PyLong_SHIFT
;
2500 assert(carry
<= (PyLong_MASK
<< 1));
2504 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2505 carry
>>= PyLong_SHIFT
;
2508 *pz
+= (digit
)(carry
& PyLong_MASK
);
2509 assert((carry
>> PyLong_SHIFT
) == 0);
2512 else { /* a is not the same as b -- gradeschool long mult */
2513 for (i
= 0; i
< size_a
; ++i
) {
2514 twodigits carry
= 0;
2515 twodigits f
= a
->ob_digit
[i
];
2516 digit
*pz
= z
->ob_digit
+ i
;
2517 digit
*pb
= b
->ob_digit
;
2518 digit
*pbend
= b
->ob_digit
+ size_b
;
2525 while (pb
< pbend
) {
2526 carry
+= *pz
+ *pb
++ * f
;
2527 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2528 carry
>>= PyLong_SHIFT
;
2529 assert(carry
<= PyLong_MASK
);
2532 *pz
+= (digit
)(carry
& PyLong_MASK
);
2533 assert((carry
>> PyLong_SHIFT
) == 0);
2536 return long_normalize(z
);
2539 /* A helper for Karatsuba multiplication (k_mul).
2540 Takes a long "n" and an integer "size" representing the place to
2541 split, and sets low and high such that abs(n) == (high << size) + low,
2542 viewing the shift as being by digits. The sign bit is ignored, and
2543 the return values are >= 0.
2544 Returns 0 on success, -1 on failure.
2547 kmul_split(PyLongObject
*n
, Py_ssize_t size
, PyLongObject
**high
, PyLongObject
**low
)
2549 PyLongObject
*hi
, *lo
;
2550 Py_ssize_t size_lo
, size_hi
;
2551 const Py_ssize_t size_n
= ABS(Py_SIZE(n
));
2553 size_lo
= MIN(size_n
, size
);
2554 size_hi
= size_n
- size_lo
;
2556 if ((hi
= _PyLong_New(size_hi
)) == NULL
)
2558 if ((lo
= _PyLong_New(size_lo
)) == NULL
) {
2563 memcpy(lo
->ob_digit
, n
->ob_digit
, size_lo
* sizeof(digit
));
2564 memcpy(hi
->ob_digit
, n
->ob_digit
+ size_lo
, size_hi
* sizeof(digit
));
2566 *high
= long_normalize(hi
);
2567 *low
= long_normalize(lo
);
2571 static PyLongObject
*k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
);
2573 /* Karatsuba multiplication. Ignores the input signs, and returns the
2574 * absolute value of the product (or NULL if error).
2575 * See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
2577 static PyLongObject
*
2578 k_mul(PyLongObject
*a
, PyLongObject
*b
)
2580 Py_ssize_t asize
= ABS(Py_SIZE(a
));
2581 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2582 PyLongObject
*ah
= NULL
;
2583 PyLongObject
*al
= NULL
;
2584 PyLongObject
*bh
= NULL
;
2585 PyLongObject
*bl
= NULL
;
2586 PyLongObject
*ret
= NULL
;
2587 PyLongObject
*t1
, *t2
, *t3
;
2588 Py_ssize_t shift
; /* the number of digits we split off */
2591 /* (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
2592 * Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
2593 * Then the original product is
2594 * ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
2595 * By picking X to be a power of 2, "*X" is just shifting, and it's
2596 * been reduced to 3 multiplies on numbers half the size.
2599 /* We want to split based on the larger number; fiddle so that b
2602 if (asize
> bsize
) {
2612 /* Use gradeschool math when either number is too small. */
2613 i
= a
== b
? KARATSUBA_SQUARE_CUTOFF
: KARATSUBA_CUTOFF
;
2616 return _PyLong_New(0);
2621 /* If a is small compared to b, splitting on b gives a degenerate
2622 * case with ah==0, and Karatsuba may be (even much) less efficient
2623 * than "grade school" then. However, we can still win, by viewing
2624 * b as a string of "big digits", each of width a->ob_size. That
2625 * leads to a sequence of balanced calls to k_mul.
2627 if (2 * asize
<= bsize
)
2628 return k_lopsided_mul(a
, b
);
2630 /* Split a & b into hi & lo pieces. */
2632 if (kmul_split(a
, shift
, &ah
, &al
) < 0) goto fail
;
2633 assert(Py_SIZE(ah
) > 0); /* the split isn't degenerate */
2641 else if (kmul_split(b
, shift
, &bh
, &bl
) < 0) goto fail
;
2644 * 1. Allocate result space (asize + bsize digits: that's always
2646 * 2. Compute ah*bh, and copy into result at 2*shift.
2647 * 3. Compute al*bl, and copy into result at 0. Note that this
2648 * can't overlap with #2.
2649 * 4. Subtract al*bl from the result, starting at shift. This may
2650 * underflow (borrow out of the high digit), but we don't care:
2651 * we're effectively doing unsigned arithmetic mod
2652 * PyLong_BASE**(sizea + sizeb), and so long as the *final* result fits,
2653 * borrows and carries out of the high digit can be ignored.
2654 * 5. Subtract ah*bh from the result, starting at shift.
2655 * 6. Compute (ah+al)*(bh+bl), and add it into the result starting
2659 /* 1. Allocate result space. */
2660 ret
= _PyLong_New(asize
+ bsize
);
2661 if (ret
== NULL
) goto fail
;
2663 /* Fill with trash, to catch reference to uninitialized digits. */
2664 memset(ret
->ob_digit
, 0xDF, Py_SIZE(ret
) * sizeof(digit
));
2667 /* 2. t1 <- ah*bh, and copy into high digits of result. */
2668 if ((t1
= k_mul(ah
, bh
)) == NULL
) goto fail
;
2669 assert(Py_SIZE(t1
) >= 0);
2670 assert(2*shift
+ Py_SIZE(t1
) <= Py_SIZE(ret
));
2671 memcpy(ret
->ob_digit
+ 2*shift
, t1
->ob_digit
,
2672 Py_SIZE(t1
) * sizeof(digit
));
2674 /* Zero-out the digits higher than the ah*bh copy. */
2675 i
= Py_SIZE(ret
) - 2*shift
- Py_SIZE(t1
);
2677 memset(ret
->ob_digit
+ 2*shift
+ Py_SIZE(t1
), 0,
2680 /* 3. t2 <- al*bl, and copy into the low digits. */
2681 if ((t2
= k_mul(al
, bl
)) == NULL
) {
2685 assert(Py_SIZE(t2
) >= 0);
2686 assert(Py_SIZE(t2
) <= 2*shift
); /* no overlap with high digits */
2687 memcpy(ret
->ob_digit
, t2
->ob_digit
, Py_SIZE(t2
) * sizeof(digit
));
2689 /* Zero out remaining digits. */
2690 i
= 2*shift
- Py_SIZE(t2
); /* number of uninitialized digits */
2692 memset(ret
->ob_digit
+ Py_SIZE(t2
), 0, i
* sizeof(digit
));
2694 /* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
2695 * because it's fresher in cache.
2697 i
= Py_SIZE(ret
) - shift
; /* # digits after shift */
2698 (void)v_isub(ret
->ob_digit
+ shift
, i
, t2
->ob_digit
, Py_SIZE(t2
));
2701 (void)v_isub(ret
->ob_digit
+ shift
, i
, t1
->ob_digit
, Py_SIZE(t1
));
2704 /* 6. t3 <- (ah+al)(bh+bl), and add into result. */
2705 if ((t1
= x_add(ah
, al
)) == NULL
) goto fail
;
2714 else if ((t2
= x_add(bh
, bl
)) == NULL
) {
2725 if (t3
== NULL
) goto fail
;
2726 assert(Py_SIZE(t3
) >= 0);
2728 /* Add t3. It's not obvious why we can't run out of room here.
2729 * See the (*) comment after this function.
2731 (void)v_iadd(ret
->ob_digit
+ shift
, i
, t3
->ob_digit
, Py_SIZE(t3
));
2734 return long_normalize(ret
);
2745 /* (*) Why adding t3 can't "run out of room" above.
2747 Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
2750 1. For any integer i, i = c(i/2) + f(i/2). In particular,
2751 bsize = c(bsize/2) + f(bsize/2).
2752 2. shift = f(bsize/2)
2754 4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
2755 routine, so asize > bsize/2 >= f(bsize/2) in this routine.
2757 We allocated asize + bsize result digits, and add t3 into them at an offset
2758 of shift. This leaves asize+bsize-shift allocated digit positions for t3
2759 to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
2760 asize + c(bsize/2) available digit positions.
2762 bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
2763 at most c(bsize/2) digits + 1 bit.
2765 If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
2766 digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
2767 most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
2769 The product (ah+al)*(bh+bl) therefore has at most
2771 c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
2773 and we have asize + c(bsize/2) available digit positions. We need to show
2774 this is always enough. An instance of c(bsize/2) cancels out in both, so
2775 the question reduces to whether asize digits is enough to hold
2776 (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
2777 then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
2778 asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
2779 digit is enough to hold 2 bits. This is so since PyLong_SHIFT=15 >= 2. If
2780 asize == bsize, then we're asking whether bsize digits is enough to hold
2781 c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
2782 is enough to hold 2 bits. This is so if bsize >= 2, which holds because
2783 bsize >= KARATSUBA_CUTOFF >= 2.
2785 Note that since there's always enough room for (ah+al)*(bh+bl), and that's
2786 clearly >= each of ah*bh and al*bl, there's always enough room to subtract
2787 ah*bh and al*bl too.
2790 /* b has at least twice the digits of a, and a is big enough that Karatsuba
2791 * would pay off *if* the inputs had balanced sizes. View b as a sequence
2792 * of slices, each with a->ob_size digits, and multiply the slices by a,
2793 * one at a time. This gives k_mul balanced inputs to work with, and is
2794 * also cache-friendly (we compute one double-width slice of the result
2795 * at a time, then move on, never bactracking except for the helpful
2796 * single-width slice overlap between successive partial sums).
2798 static PyLongObject
*
2799 k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
)
2801 const Py_ssize_t asize
= ABS(Py_SIZE(a
));
2802 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2803 Py_ssize_t nbdone
; /* # of b digits already multiplied */
2805 PyLongObject
*bslice
= NULL
;
2807 assert(asize
> KARATSUBA_CUTOFF
);
2808 assert(2 * asize
<= bsize
);
2810 /* Allocate result space, and zero it out. */
2811 ret
= _PyLong_New(asize
+ bsize
);
2814 memset(ret
->ob_digit
, 0, Py_SIZE(ret
) * sizeof(digit
));
2816 /* Successive slices of b are copied into bslice. */
2817 bslice
= _PyLong_New(asize
);
2823 PyLongObject
*product
;
2824 const Py_ssize_t nbtouse
= MIN(bsize
, asize
);
2826 /* Multiply the next slice of b by a. */
2827 memcpy(bslice
->ob_digit
, b
->ob_digit
+ nbdone
,
2828 nbtouse
* sizeof(digit
));
2829 Py_SIZE(bslice
) = nbtouse
;
2830 product
= k_mul(a
, bslice
);
2831 if (product
== NULL
)
2834 /* Add into result. */
2835 (void)v_iadd(ret
->ob_digit
+ nbdone
, Py_SIZE(ret
) - nbdone
,
2836 product
->ob_digit
, Py_SIZE(product
));
2844 return long_normalize(ret
);
2853 long_mul(PyLongObject
*v
, PyLongObject
*w
)
2855 PyLongObject
*a
, *b
, *z
;
2857 if (!convert_binop((PyObject
*)v
, (PyObject
*)w
, &a
, &b
)) {
2858 Py_INCREF(Py_NotImplemented
);
2859 return Py_NotImplemented
;
2863 /* Negate if exactly one of the inputs is negative. */
2864 if (((a
->ob_size
^ b
->ob_size
) < 0) && z
)
2865 z
->ob_size
= -(z
->ob_size
);
2868 return (PyObject
*)z
;
2871 /* The / and % operators are now defined in terms of divmod().
2872 The expression a mod b has the value a - b*floor(a/b).
2873 The long_divrem function gives the remainder after division of
2874 |a| by |b|, with the sign of a. This is also expressed
2875 as a - b*trunc(a/b), if trunc truncates towards zero.
2882 So, to get from rem to mod, we have to add b if a and b
2883 have different signs. We then subtract one from the 'div'
2884 part of the outcome to keep the invariant intact. */
2887 * *pdiv, *pmod = divmod(v, w)
2888 * NULL can be passed for pdiv or pmod, in which case that part of
2889 * the result is simply thrown away. The caller owns a reference to
2890 * each of these it requests (does not pass NULL for).
2893 l_divmod(PyLongObject
*v
, PyLongObject
*w
,
2894 PyLongObject
**pdiv
, PyLongObject
**pmod
)
2896 PyLongObject
*div
, *mod
;
2898 if (long_divrem(v
, w
, &div
, &mod
) < 0)
2900 if ((Py_SIZE(mod
) < 0 && Py_SIZE(w
) > 0) ||
2901 (Py_SIZE(mod
) > 0 && Py_SIZE(w
) < 0)) {
2904 temp
= (PyLongObject
*) long_add(mod
, w
);
2911 one
= (PyLongObject
*) PyLong_FromLong(1L);
2913 (temp
= (PyLongObject
*) long_sub(div
, one
)) == NULL
) {
2937 long_div(PyObject
*v
, PyObject
*w
)
2939 PyLongObject
*a
, *b
, *div
;
2941 CONVERT_BINOP(v
, w
, &a
, &b
);
2942 if (l_divmod(a
, b
, &div
, NULL
) < 0)
2946 return (PyObject
*)div
;
2950 long_classic_div(PyObject
*v
, PyObject
*w
)
2952 PyLongObject
*a
, *b
, *div
;
2954 CONVERT_BINOP(v
, w
, &a
, &b
);
2955 if (Py_DivisionWarningFlag
&&
2956 PyErr_Warn(PyExc_DeprecationWarning
, "classic long division") < 0)
2958 else if (l_divmod(a
, b
, &div
, NULL
) < 0)
2962 return (PyObject
*)div
;
2965 /* PyLong/PyLong -> float, with correctly rounded result. */
2967 #define MANT_DIG_DIGITS (DBL_MANT_DIG / PyLong_SHIFT)
2968 #define MANT_DIG_BITS (DBL_MANT_DIG % PyLong_SHIFT)
2971 long_true_divide(PyObject
*v
, PyObject
*w
)
2973 PyLongObject
*a
, *b
, *x
;
2974 Py_ssize_t a_size
, b_size
, shift
, extra_bits
, diff
, x_size
, x_bits
;
2976 int inexact
, negate
, a_is_small
, b_is_small
;
2979 CONVERT_BINOP(v
, w
, &a
, &b
);
2982 Method in a nutshell:
2984 0. reduce to case a, b > 0; filter out obvious underflow/overflow
2985 1. choose a suitable integer 'shift'
2986 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
2987 3. adjust x for correct rounding
2988 4. convert x to a double dx with the same value
2989 5. return ldexp(dx, shift).
2993 0. For any a, a/0 raises ZeroDivisionError; for nonzero b, 0/b
2994 returns either 0.0 or -0.0, depending on the sign of b. For a and
2995 b both nonzero, ignore signs of a and b, and add the sign back in
2996 at the end. Now write a_bits and b_bits for the bit lengths of a
2997 and b respectively (that is, a_bits = 1 + floor(log_2(a)); likewise
3000 2**(a_bits - b_bits - 1) < a/b < 2**(a_bits - b_bits + 1).
3002 So if a_bits - b_bits > DBL_MAX_EXP then a/b > 2**DBL_MAX_EXP and
3003 so overflows. Similarly, if a_bits - b_bits < DBL_MIN_EXP -
3004 DBL_MANT_DIG - 1 then a/b underflows to 0. With these cases out of
3005 the way, we can assume that
3007 DBL_MIN_EXP - DBL_MANT_DIG - 1 <= a_bits - b_bits <= DBL_MAX_EXP.
3009 1. The integer 'shift' is chosen so that x has the right number of
3010 bits for a double, plus two or three extra bits that will be used
3011 in the rounding decisions. Writing a_bits and b_bits for the
3012 number of significant bits in a and b respectively, a
3013 straightforward formula for shift is:
3015 shift = a_bits - b_bits - DBL_MANT_DIG - 2
3017 This is fine in the usual case, but if a/b is smaller than the
3018 smallest normal float then it can lead to double rounding on an
3019 IEEE 754 platform, giving incorrectly rounded results. So we
3020 adjust the formula slightly. The actual formula used is:
3022 shift = MAX(a_bits - b_bits, DBL_MIN_EXP) - DBL_MANT_DIG - 2
3024 2. The quantity x is computed by first shifting a (left -shift bits
3025 if shift <= 0, right shift bits if shift > 0) and then dividing by
3026 b. For both the shift and the division, we keep track of whether
3027 the result is inexact, in a flag 'inexact'; this information is
3028 needed at the rounding stage.
3030 With the choice of shift above, together with our assumption that
3031 a_bits - b_bits >= DBL_MIN_EXP - DBL_MANT_DIG - 1, it follows
3034 3. Now x * 2**shift <= a/b < (x+1) * 2**shift. We want to replace
3035 this with an exactly representable float of the form
3037 round(x/2**extra_bits) * 2**(extra_bits+shift).
3039 For float representability, we need x/2**extra_bits <
3040 2**DBL_MANT_DIG and extra_bits + shift >= DBL_MIN_EXP -
3041 DBL_MANT_DIG. This translates to the condition:
3043 extra_bits >= MAX(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
3045 To round, we just modify the bottom digit of x in-place; this can
3046 end up giving a digit with value > PyLONG_MASK, but that's not a
3047 problem since digits can hold values up to 2*PyLONG_MASK+1.
3049 With the original choices for shift above, extra_bits will always
3050 be 2 or 3. Then rounding under the round-half-to-even rule, we
3051 round up iff the most significant of the extra bits is 1, and
3052 either: (a) the computation of x in step 2 had an inexact result,
3053 or (b) at least one other of the extra bits is 1, or (c) the least
3054 significant bit of x (above those to be rounded) is 1.
3056 4. Conversion to a double is straightforward; all floating-point
3057 operations involved in the conversion are exact, so there's no
3058 danger of rounding errors.
3060 5. Use ldexp(x, shift) to compute x*2**shift, the final result.
3061 The result will always be exactly representable as a double, except
3062 in the case that it overflows. To avoid dependence on the exact
3063 behaviour of ldexp on overflow, we check for overflow before
3064 applying ldexp. The result of ldexp is adjusted for sign before
3068 /* Reduce to case where a and b are both positive. */
3069 a_size
= ABS(Py_SIZE(a
));
3070 b_size
= ABS(Py_SIZE(b
));
3071 negate
= (Py_SIZE(a
) < 0) ^ (Py_SIZE(b
) < 0);
3073 PyErr_SetString(PyExc_ZeroDivisionError
,
3074 "division by zero");
3078 goto underflow_or_zero
;
3080 /* Fast path for a and b small (exactly representable in a double).
3081 Relies on floating-point division being correctly rounded; results
3082 may be subject to double rounding on x86 machines that operate with
3083 the x87 FPU set to 64-bit precision. */
3084 a_is_small
= a_size
<= MANT_DIG_DIGITS
||
3085 (a_size
== MANT_DIG_DIGITS
+1 &&
3086 a
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3087 b_is_small
= b_size
<= MANT_DIG_DIGITS
||
3088 (b_size
== MANT_DIG_DIGITS
+1 &&
3089 b
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3090 if (a_is_small
&& b_is_small
) {
3092 da
= a
->ob_digit
[--a_size
];
3094 da
= da
* PyLong_BASE
+ a
->ob_digit
[--a_size
];
3095 db
= b
->ob_digit
[--b_size
];
3097 db
= db
* PyLong_BASE
+ b
->ob_digit
[--b_size
];
3102 /* Catch obvious cases of underflow and overflow */
3103 diff
= a_size
- b_size
;
3104 if (diff
> PY_SSIZE_T_MAX
/PyLong_SHIFT
- 1)
3105 /* Extreme overflow */
3107 else if (diff
< 1 - PY_SSIZE_T_MAX
/PyLong_SHIFT
)
3108 /* Extreme underflow */
3109 goto underflow_or_zero
;
3110 /* Next line is now safe from overflowing a Py_ssize_t */
3111 diff
= diff
* PyLong_SHIFT
+ bits_in_digit(a
->ob_digit
[a_size
- 1]) -
3112 bits_in_digit(b
->ob_digit
[b_size
- 1]);
3113 /* Now diff = a_bits - b_bits. */
3114 if (diff
> DBL_MAX_EXP
)
3116 else if (diff
< DBL_MIN_EXP
- DBL_MANT_DIG
- 1)
3117 goto underflow_or_zero
;
3119 /* Choose value for shift; see comments for step 1 above. */
3120 shift
= MAX(diff
, DBL_MIN_EXP
) - DBL_MANT_DIG
- 2;
3124 /* x = abs(a * 2**-shift) */
3126 Py_ssize_t i
, shift_digits
= -shift
/ PyLong_SHIFT
;
3128 /* x = a << -shift */
3129 if (a_size
>= PY_SSIZE_T_MAX
- 1 - shift_digits
) {
3130 /* In practice, it's probably impossible to end up
3131 here. Both a and b would have to be enormous,
3132 using close to SIZE_T_MAX bytes of memory each. */
3133 PyErr_SetString(PyExc_OverflowError
,
3134 "intermediate overflow during division");
3137 x
= _PyLong_New(a_size
+ shift_digits
+ 1);
3140 for (i
= 0; i
< shift_digits
; i
++)
3142 rem
= v_lshift(x
->ob_digit
+ shift_digits
, a
->ob_digit
,
3143 a_size
, -shift
% PyLong_SHIFT
);
3144 x
->ob_digit
[a_size
+ shift_digits
] = rem
;
3147 Py_ssize_t shift_digits
= shift
/ PyLong_SHIFT
;
3149 /* x = a >> shift */
3150 assert(a_size
>= shift_digits
);
3151 x
= _PyLong_New(a_size
- shift_digits
);
3154 rem
= v_rshift(x
->ob_digit
, a
->ob_digit
+ shift_digits
,
3155 a_size
- shift_digits
, shift
% PyLong_SHIFT
);
3156 /* set inexact if any of the bits shifted out is nonzero */
3159 while (!inexact
&& shift_digits
> 0)
3160 if (a
->ob_digit
[--shift_digits
])
3164 x_size
= Py_SIZE(x
);
3166 /* x //= b. If the remainder is nonzero, set inexact. We own the only
3167 reference to x, so it's safe to modify it in-place. */
3169 digit rem
= inplace_divrem1(x
->ob_digit
, x
->ob_digit
, x_size
,
3176 PyLongObject
*div
, *rem
;
3177 div
= x_divrem(x
, b
, &rem
);
3186 x_size
= ABS(Py_SIZE(x
));
3187 assert(x_size
> 0); /* result of division is never zero */
3188 x_bits
= (x_size
-1)*PyLong_SHIFT
+bits_in_digit(x
->ob_digit
[x_size
-1]);
3190 /* The number of extra bits that have to be rounded away. */
3191 extra_bits
= MAX(x_bits
, DBL_MIN_EXP
- shift
) - DBL_MANT_DIG
;
3192 assert(extra_bits
== 2 || extra_bits
== 3);
3194 /* Round by directly modifying the low digit of x. */
3195 mask
= (digit
)1 << (extra_bits
- 1);
3196 low
= x
->ob_digit
[0] | inexact
;
3197 if (low
& mask
&& low
& (3*mask
-1))
3199 x
->ob_digit
[0] = low
& ~(mask
-1U);
3201 /* Convert x to a double dx; the conversion is exact. */
3202 dx
= x
->ob_digit
[--x_size
];
3204 dx
= dx
* PyLong_BASE
+ x
->ob_digit
[--x_size
];
3207 /* Check whether ldexp result will overflow a double. */
3208 if (shift
+ x_bits
>= DBL_MAX_EXP
&&
3209 (shift
+ x_bits
> DBL_MAX_EXP
|| dx
== ldexp(1.0, x_bits
)))
3211 result
= ldexp(dx
, shift
);
3216 return PyFloat_FromDouble(negate
? -result
: result
);
3221 return PyFloat_FromDouble(negate
? -0.0 : 0.0);
3224 PyErr_SetString(PyExc_OverflowError
,
3225 "integer division result too large for a float");
3233 long_mod(PyObject
*v
, PyObject
*w
)
3235 PyLongObject
*a
, *b
, *mod
;
3237 CONVERT_BINOP(v
, w
, &a
, &b
);
3239 if (l_divmod(a
, b
, NULL
, &mod
) < 0)
3243 return (PyObject
*)mod
;
3247 long_divmod(PyObject
*v
, PyObject
*w
)
3249 PyLongObject
*a
, *b
, *div
, *mod
;
3252 CONVERT_BINOP(v
, w
, &a
, &b
);
3254 if (l_divmod(a
, b
, &div
, &mod
) < 0) {
3261 PyTuple_SetItem(z
, 0, (PyObject
*) div
);
3262 PyTuple_SetItem(z
, 1, (PyObject
*) mod
);
3275 long_pow(PyObject
*v
, PyObject
*w
, PyObject
*x
)
3277 PyLongObject
*a
, *b
, *c
; /* a,b,c = v,w,x */
3278 int negativeOutput
= 0; /* if x<0 return negative output */
3280 PyLongObject
*z
= NULL
; /* accumulated result */
3281 Py_ssize_t i
, j
, k
; /* counters */
3282 PyLongObject
*temp
= NULL
;
3284 /* 5-ary values. If the exponent is large enough, table is
3285 * precomputed so that table[i] == a**i % c for i in range(32).
3287 PyLongObject
*table
[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3290 /* a, b, c = v, w, x */
3291 CONVERT_BINOP(v
, w
, &a
, &b
);
3292 if (PyLong_Check(x
)) {
3293 c
= (PyLongObject
*)x
;
3296 else if (PyInt_Check(x
)) {
3297 c
= (PyLongObject
*)PyLong_FromLong(PyInt_AS_LONG(x
));
3301 else if (x
== Py_None
)
3306 Py_INCREF(Py_NotImplemented
);
3307 return Py_NotImplemented
;
3310 if (Py_SIZE(b
) < 0) { /* if exponent is negative */
3312 PyErr_SetString(PyExc_TypeError
, "pow() 2nd argument "
3313 "cannot be negative when 3rd argument specified");
3317 /* else return a float. This works because we know
3318 that this calls float_pow() which converts its
3319 arguments to double. */
3322 return PyFloat_Type
.tp_as_number
->nb_power(v
, w
, x
);
3328 raise ValueError() */
3329 if (Py_SIZE(c
) == 0) {
3330 PyErr_SetString(PyExc_ValueError
,
3331 "pow() 3rd argument cannot be 0");
3336 negativeOutput = True
3337 modulus = -modulus */
3338 if (Py_SIZE(c
) < 0) {
3340 temp
= (PyLongObject
*)_PyLong_Copy(c
);
3346 c
->ob_size
= - c
->ob_size
;
3351 if ((Py_SIZE(c
) == 1) && (c
->ob_digit
[0] == 1)) {
3352 z
= (PyLongObject
*)PyLong_FromLong(0L);
3357 base = base % modulus
3358 Having the base positive just makes things easier. */
3359 if (Py_SIZE(a
) < 0) {
3360 if (l_divmod(a
, c
, NULL
, &temp
) < 0)
3368 /* At this point a, b, and c are guaranteed non-negative UNLESS
3369 c is NULL, in which case a may be negative. */
3371 z
= (PyLongObject
*)PyLong_FromLong(1L);
3375 /* Perform a modular reduction, X = X % c, but leave X alone if c
3380 if (l_divmod(X, c, NULL, &temp) < 0) \
3387 /* Multiply two values, then reduce the result:
3388 result = X*Y % c. If c is NULL, skip the mod. */
3389 #define MULT(X, Y, result) \
3391 temp = (PyLongObject *)long_mul(X, Y); \
3394 Py_XDECREF(result); \
3400 if (Py_SIZE(b
) <= FIVEARY_CUTOFF
) {
3401 /* Left-to-right binary exponentiation (HAC Algorithm 14.79) */
3402 /* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf */
3403 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3404 digit bi
= b
->ob_digit
[i
];
3406 for (j
= (digit
)1 << (PyLong_SHIFT
-1); j
!= 0; j
>>= 1) {
3414 /* Left-to-right 5-ary exponentiation (HAC Algorithm 14.82) */
3415 Py_INCREF(z
); /* still holds 1L */
3417 for (i
= 1; i
< 32; ++i
)
3418 MULT(table
[i
-1], a
, table
[i
])
3420 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3421 const digit bi
= b
->ob_digit
[i
];
3423 for (j
= PyLong_SHIFT
- 5; j
>= 0; j
-= 5) {
3424 const int index
= (bi
>> j
) & 0x1f;
3425 for (k
= 0; k
< 5; ++k
)
3428 MULT(z
, table
[index
], z
)
3433 if (negativeOutput
&& (Py_SIZE(z
) != 0)) {
3434 temp
= (PyLongObject
*)long_sub(z
, c
);
3450 if (Py_SIZE(b
) > FIVEARY_CUTOFF
) {
3451 for (i
= 0; i
< 32; ++i
)
3452 Py_XDECREF(table
[i
]);
3458 return (PyObject
*)z
;
3462 long_invert(PyLongObject
*v
)
3464 /* Implement ~x as -(x+1) */
3467 w
= (PyLongObject
*)PyLong_FromLong(1L);
3470 x
= (PyLongObject
*) long_add(v
, w
);
3474 Py_SIZE(x
) = -(Py_SIZE(x
));
3475 return (PyObject
*)x
;
3479 long_neg(PyLongObject
*v
)
3482 if (v
->ob_size
== 0 && PyLong_CheckExact(v
)) {
3485 return (PyObject
*) v
;
3487 z
= (PyLongObject
*)_PyLong_Copy(v
);
3489 z
->ob_size
= -(v
->ob_size
);
3490 return (PyObject
*)z
;
3494 long_abs(PyLongObject
*v
)
3499 return long_long((PyObject
*)v
);
3503 long_nonzero(PyLongObject
*v
)
3505 return ABS(Py_SIZE(v
)) != 0;
3509 long_rshift(PyLongObject
*v
, PyLongObject
*w
)
3511 PyLongObject
*a
, *b
;
3512 PyLongObject
*z
= NULL
;
3514 Py_ssize_t newsize
, wordshift
, loshift
, hishift
, i
, j
;
3515 digit lomask
, himask
;
3517 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
3519 if (Py_SIZE(a
) < 0) {
3520 /* Right shifting negative numbers is harder */
3521 PyLongObject
*a1
, *a2
;
3522 a1
= (PyLongObject
*) long_invert(a
);
3525 a2
= (PyLongObject
*) long_rshift(a1
, b
);
3529 z
= (PyLongObject
*) long_invert(a2
);
3534 shiftby
= PyLong_AsLong((PyObject
*)b
);
3535 if (shiftby
== -1L && PyErr_Occurred())
3538 PyErr_SetString(PyExc_ValueError
,
3539 "negative shift count");
3542 wordshift
= shiftby
/ PyLong_SHIFT
;
3543 newsize
= ABS(Py_SIZE(a
)) - wordshift
;
3548 return (PyObject
*)z
;
3550 loshift
= shiftby
% PyLong_SHIFT
;
3551 hishift
= PyLong_SHIFT
- loshift
;
3552 lomask
= ((digit
)1 << hishift
) - 1;
3553 himask
= PyLong_MASK
^ lomask
;
3554 z
= _PyLong_New(newsize
);
3558 Py_SIZE(z
) = -(Py_SIZE(z
));
3559 for (i
= 0, j
= wordshift
; i
< newsize
; i
++, j
++) {
3560 z
->ob_digit
[i
] = (a
->ob_digit
[j
] >> loshift
) & lomask
;
3563 (a
->ob_digit
[j
+1] << hishift
) & himask
;
3565 z
= long_normalize(z
);
3570 return (PyObject
*) z
;
3575 long_lshift(PyObject
*v
, PyObject
*w
)
3577 /* This version due to Tim Peters */
3578 PyLongObject
*a
, *b
;
3579 PyLongObject
*z
= NULL
;
3581 Py_ssize_t oldsize
, newsize
, wordshift
, remshift
, i
, j
;
3584 CONVERT_BINOP(v
, w
, &a
, &b
);
3586 shiftby
= PyLong_AsLong((PyObject
*)b
);
3587 if (shiftby
== -1L && PyErr_Occurred())
3590 PyErr_SetString(PyExc_ValueError
, "negative shift count");
3593 if ((long)(int)shiftby
!= shiftby
) {
3594 PyErr_SetString(PyExc_ValueError
,
3595 "outrageous left shift count");
3598 /* wordshift, remshift = divmod(shiftby, PyLong_SHIFT) */
3599 wordshift
= (int)shiftby
/ PyLong_SHIFT
;
3600 remshift
= (int)shiftby
- wordshift
* PyLong_SHIFT
;
3602 oldsize
= ABS(a
->ob_size
);
3603 newsize
= oldsize
+ wordshift
;
3606 z
= _PyLong_New(newsize
);
3610 z
->ob_size
= -(z
->ob_size
);
3611 for (i
= 0; i
< wordshift
; i
++)
3614 for (i
= wordshift
, j
= 0; j
< oldsize
; i
++, j
++) {
3615 accum
|= (twodigits
)a
->ob_digit
[j
] << remshift
;
3616 z
->ob_digit
[i
] = (digit
)(accum
& PyLong_MASK
);
3617 accum
>>= PyLong_SHIFT
;
3620 z
->ob_digit
[newsize
-1] = (digit
)accum
;
3623 z
= long_normalize(z
);
3627 return (PyObject
*) z
;
3630 /* Compute two's complement of digit vector a[0:m], writing result to
3631 z[0:m]. The digit vector a need not be normalized, but should not
3632 be entirely zero. a and z may point to the same digit vector. */
3635 v_complement(digit
*z
, digit
*a
, Py_ssize_t m
)
3639 for (i
= 0; i
< m
; ++i
) {
3640 carry
+= a
[i
] ^ PyLong_MASK
;
3641 z
[i
] = carry
& PyLong_MASK
;
3642 carry
>>= PyLong_SHIFT
;
3647 /* Bitwise and/xor/or operations */
3650 long_bitwise(PyLongObject
*a
,
3651 int op
, /* '&', '|', '^' */
3654 int nega
, negb
, negz
;
3655 Py_ssize_t size_a
, size_b
, size_z
, i
;
3658 /* Bitwise operations for negative numbers operate as though
3659 on a two's complement representation. So convert arguments
3660 from sign-magnitude to two's complement, and convert the
3661 result back to sign-magnitude at the end. */
3663 /* If a is negative, replace it by its two's complement. */
3664 size_a
= ABS(Py_SIZE(a
));
3665 nega
= Py_SIZE(a
) < 0;
3667 z
= _PyLong_New(size_a
);
3670 v_complement(z
->ob_digit
, a
->ob_digit
, size_a
);
3674 /* Keep reference count consistent. */
3678 size_b
= ABS(Py_SIZE(b
));
3679 negb
= Py_SIZE(b
) < 0;
3681 z
= _PyLong_New(size_b
);
3686 v_complement(z
->ob_digit
, b
->ob_digit
, size_b
);
3692 /* Swap a and b if necessary to ensure size_a >= size_b. */
3693 if (size_a
< size_b
) {
3694 z
= a
; a
= b
; b
= z
;
3695 size_z
= size_a
; size_a
= size_b
; size_b
= size_z
;
3696 negz
= nega
; nega
= negb
; negb
= negz
;
3699 /* JRH: The original logic here was to allocate the result value (z)
3700 as the longer of the two operands. However, there are some cases
3701 where the result is guaranteed to be shorter than that: AND of two
3702 positives, OR of two negatives: use the shorter number. AND with
3703 mixed signs: use the positive number. OR with mixed signs: use the
3713 size_z
= negb
? size_a
: size_b
;
3717 size_z
= negb
? size_b
: size_a
;
3720 PyErr_BadArgument();
3724 /* We allow an extra digit if z is negative, to make sure that
3725 the final two's complement of z doesn't overflow. */
3726 z
= _PyLong_New(size_z
+ negz
);
3733 /* Compute digits for overlap of a and b. */
3736 for (i
= 0; i
< size_b
; ++i
)
3737 z
->ob_digit
[i
] = a
->ob_digit
[i
] & b
->ob_digit
[i
];
3740 for (i
= 0; i
< size_b
; ++i
)
3741 z
->ob_digit
[i
] = a
->ob_digit
[i
] | b
->ob_digit
[i
];
3744 for (i
= 0; i
< size_b
; ++i
)
3745 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ b
->ob_digit
[i
];
3748 PyErr_BadArgument();
3752 /* Copy any remaining digits of a, inverting if necessary. */
3753 if (op
== '^' && negb
)
3754 for (; i
< size_z
; ++i
)
3755 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ PyLong_MASK
;
3756 else if (i
< size_z
)
3757 memcpy(&z
->ob_digit
[i
], &a
->ob_digit
[i
],
3758 (size_z
-i
)*sizeof(digit
));
3760 /* Complement result if negative. */
3762 Py_SIZE(z
) = -(Py_SIZE(z
));
3763 z
->ob_digit
[size_z
] = PyLong_MASK
;
3764 v_complement(z
->ob_digit
, z
->ob_digit
, size_z
+1);
3769 return (PyObject
*)long_normalize(z
);
3773 long_and(PyObject
*v
, PyObject
*w
)
3775 PyLongObject
*a
, *b
;
3777 CONVERT_BINOP(v
, w
, &a
, &b
);
3778 c
= long_bitwise(a
, '&', b
);
3785 long_xor(PyObject
*v
, PyObject
*w
)
3787 PyLongObject
*a
, *b
;
3789 CONVERT_BINOP(v
, w
, &a
, &b
);
3790 c
= long_bitwise(a
, '^', b
);
3797 long_or(PyObject
*v
, PyObject
*w
)
3799 PyLongObject
*a
, *b
;
3801 CONVERT_BINOP(v
, w
, &a
, &b
);
3802 c
= long_bitwise(a
, '|', b
);
3809 long_coerce(PyObject
**pv
, PyObject
**pw
)
3811 if (PyInt_Check(*pw
)) {
3812 *pw
= PyLong_FromLong(PyInt_AS_LONG(*pw
));
3818 else if (PyLong_Check(*pw
)) {
3823 return 1; /* Can't do it */
3827 long_long(PyObject
*v
)
3829 if (PyLong_CheckExact(v
))
3832 v
= _PyLong_Copy((PyLongObject
*)v
);
3837 long_int(PyObject
*v
)
3840 x
= PyLong_AsLong(v
);
3841 if (PyErr_Occurred()) {
3842 if (PyErr_ExceptionMatches(PyExc_OverflowError
)) {
3844 if (PyLong_CheckExact(v
)) {
3849 return _PyLong_Copy((PyLongObject
*)v
);
3854 return PyInt_FromLong(x
);
3858 long_float(PyObject
*v
)
3861 result
= PyLong_AsDouble(v
);
3862 if (result
== -1.0 && PyErr_Occurred())
3864 return PyFloat_FromDouble(result
);
3868 long_oct(PyObject
*v
)
3870 return _PyLong_Format(v
, 8, 1, 0);
3874 long_hex(PyObject
*v
)
3876 return _PyLong_Format(v
, 16, 1, 0);
3880 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
);
3883 long_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3886 int base
= -909; /* unlikely! */
3887 static char *kwlist
[] = {"x", "base", 0};
3889 if (type
!= &PyLong_Type
)
3890 return long_subtype_new(type
, args
, kwds
); /* Wimp out */
3891 if (!PyArg_ParseTupleAndKeywords(args
, kwds
, "|Oi:long", kwlist
,
3895 return PyLong_FromLong(0L);
3897 return PyNumber_Long(x
);
3898 else if (PyString_Check(x
)) {
3899 /* Since PyLong_FromString doesn't have a length parameter,
3900 * check here for possible NULs in the string. */
3901 char *string
= PyString_AS_STRING(x
);
3902 if (strlen(string
) != (size_t)PyString_Size(x
)) {
3903 /* create a repr() of the input string,
3904 * just like PyLong_FromString does. */
3906 srepr
= PyObject_Repr(x
);
3909 PyErr_Format(PyExc_ValueError
,
3910 "invalid literal for long() with base %d: %s",
3911 base
, PyString_AS_STRING(srepr
));
3915 return PyLong_FromString(PyString_AS_STRING(x
), NULL
, base
);
3917 #ifdef Py_USING_UNICODE
3918 else if (PyUnicode_Check(x
))
3919 return PyLong_FromUnicode(PyUnicode_AS_UNICODE(x
),
3920 PyUnicode_GET_SIZE(x
),
3924 PyErr_SetString(PyExc_TypeError
,
3925 "long() can't convert non-string with explicit base");
3930 /* Wimpy, slow approach to tp_new calls for subtypes of long:
3931 first create a regular long from whatever arguments we got,
3932 then allocate a subtype instance and initialize it from
3933 the regular long. The regular long is then thrown away.
3936 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3938 PyLongObject
*tmp
, *newobj
;
3941 assert(PyType_IsSubtype(type
, &PyLong_Type
));
3942 tmp
= (PyLongObject
*)long_new(&PyLong_Type
, args
, kwds
);
3945 assert(PyLong_CheckExact(tmp
));
3949 newobj
= (PyLongObject
*)type
->tp_alloc(type
, n
);
3950 if (newobj
== NULL
) {
3954 assert(PyLong_Check(newobj
));
3955 Py_SIZE(newobj
) = Py_SIZE(tmp
);
3956 for (i
= 0; i
< n
; i
++)
3957 newobj
->ob_digit
[i
] = tmp
->ob_digit
[i
];
3959 return (PyObject
*)newobj
;
3963 long_getnewargs(PyLongObject
*v
)
3965 return Py_BuildValue("(N)", _PyLong_Copy(v
));
3969 long_get0(PyLongObject
*v
, void *context
) {
3970 return PyLong_FromLong(0L);
3974 long_get1(PyLongObject
*v
, void *context
) {
3975 return PyLong_FromLong(1L);
3979 long__format__(PyObject
*self
, PyObject
*args
)
3981 PyObject
*format_spec
;
3983 if (!PyArg_ParseTuple(args
, "O:__format__", &format_spec
))
3985 if (PyBytes_Check(format_spec
))
3986 return _PyLong_FormatAdvanced(self
,
3987 PyBytes_AS_STRING(format_spec
),
3988 PyBytes_GET_SIZE(format_spec
));
3989 if (PyUnicode_Check(format_spec
)) {
3990 /* Convert format_spec to a str */
3992 PyObject
*str_spec
= PyObject_Str(format_spec
);
3994 if (str_spec
== NULL
)
3997 result
= _PyLong_FormatAdvanced(self
,
3998 PyBytes_AS_STRING(str_spec
),
3999 PyBytes_GET_SIZE(str_spec
));
4001 Py_DECREF(str_spec
);
4004 PyErr_SetString(PyExc_TypeError
, "__format__ requires str or unicode");
4009 long_sizeof(PyLongObject
*v
)
4013 res
= v
->ob_type
->tp_basicsize
+ ABS(Py_SIZE(v
))*sizeof(digit
);
4014 return PyInt_FromSsize_t(res
);
4018 long_bit_length(PyLongObject
*v
)
4020 PyLongObject
*result
, *x
, *y
;
4021 Py_ssize_t ndigits
, msd_bits
= 0;
4025 assert(PyLong_Check(v
));
4027 ndigits
= ABS(Py_SIZE(v
));
4029 return PyInt_FromLong(0);
4031 msd
= v
->ob_digit
[ndigits
-1];
4036 msd_bits
+= (long)(BitLengthTable
[msd
]);
4038 if (ndigits
<= PY_SSIZE_T_MAX
/PyLong_SHIFT
)
4039 return PyInt_FromSsize_t((ndigits
-1)*PyLong_SHIFT
+ msd_bits
);
4041 /* expression above may overflow; use Python integers instead */
4042 result
= (PyLongObject
*)PyLong_FromSsize_t(ndigits
- 1);
4045 x
= (PyLongObject
*)PyLong_FromLong(PyLong_SHIFT
);
4048 y
= (PyLongObject
*)long_mul(result
, x
);
4055 x
= (PyLongObject
*)PyLong_FromLong(msd_bits
);
4058 y
= (PyLongObject
*)long_add(result
, x
);
4065 return (PyObject
*)result
;
4072 PyDoc_STRVAR(long_bit_length_doc
,
4073 "long.bit_length() -> int or long\n\
4075 Number of bits necessary to represent self in binary.\n\
4078 >>> (37L).bit_length()\n\
4083 long_is_finite(PyObject
*v
)
4089 static PyMethodDef long_methods
[] = {
4090 {"conjugate", (PyCFunction
)long_long
, METH_NOARGS
,
4091 "Returns self, the complex conjugate of any long."},
4092 {"bit_length", (PyCFunction
)long_bit_length
, METH_NOARGS
,
4093 long_bit_length_doc
},
4095 {"is_finite", (PyCFunction
)long_is_finite
, METH_NOARGS
,
4096 "Returns always True."},
4098 {"__trunc__", (PyCFunction
)long_long
, METH_NOARGS
,
4099 "Truncating an Integral returns itself."},
4100 {"__getnewargs__", (PyCFunction
)long_getnewargs
, METH_NOARGS
},
4101 {"__format__", (PyCFunction
)long__format__
, METH_VARARGS
},
4102 {"__sizeof__", (PyCFunction
)long_sizeof
, METH_NOARGS
,
4103 "Returns size in memory, in bytes"},
4104 {NULL
, NULL
} /* sentinel */
4107 static PyGetSetDef long_getset
[] = {
4109 (getter
)long_long
, (setter
)NULL
,
4110 "the real part of a complex number",
4113 (getter
)long_get0
, (setter
)NULL
,
4114 "the imaginary part of a complex number",
4117 (getter
)long_long
, (setter
)NULL
,
4118 "the numerator of a rational number in lowest terms",
4121 (getter
)long_get1
, (setter
)NULL
,
4122 "the denominator of a rational number in lowest terms",
4124 {NULL
} /* Sentinel */
4127 PyDoc_STRVAR(long_doc
,
4128 "long(x[, base]) -> integer\n\
4130 Convert a string or number to a long integer, if possible. A floating\n\
4131 point argument will be truncated towards zero (this does not include a\n\
4132 string representation of a floating point number!) When converting a\n\
4133 string, use the optional base. It is an error to supply a base when\n\
4134 converting a non-string.");
4136 static PyNumberMethods long_as_number
= {
4137 (binaryfunc
) long_add
, /*nb_add*/
4138 (binaryfunc
) long_sub
, /*nb_subtract*/
4139 (binaryfunc
) long_mul
, /*nb_multiply*/
4140 long_classic_div
, /*nb_divide*/
4141 long_mod
, /*nb_remainder*/
4142 long_divmod
, /*nb_divmod*/
4143 long_pow
, /*nb_power*/
4144 (unaryfunc
) long_neg
, /*nb_negative*/
4145 (unaryfunc
) long_long
, /*tp_positive*/
4146 (unaryfunc
) long_abs
, /*tp_absolute*/
4147 (inquiry
) long_nonzero
, /*tp_nonzero*/
4148 (unaryfunc
) long_invert
, /*nb_invert*/
4149 long_lshift
, /*nb_lshift*/
4150 (binaryfunc
) long_rshift
, /*nb_rshift*/
4151 long_and
, /*nb_and*/
4152 long_xor
, /*nb_xor*/
4154 long_coerce
, /*nb_coerce*/
4155 long_int
, /*nb_int*/
4156 long_long
, /*nb_long*/
4157 long_float
, /*nb_float*/
4158 long_oct
, /*nb_oct*/
4159 long_hex
, /*nb_hex*/
4160 0, /* nb_inplace_add */
4161 0, /* nb_inplace_subtract */
4162 0, /* nb_inplace_multiply */
4163 0, /* nb_inplace_divide */
4164 0, /* nb_inplace_remainder */
4165 0, /* nb_inplace_power */
4166 0, /* nb_inplace_lshift */
4167 0, /* nb_inplace_rshift */
4168 0, /* nb_inplace_and */
4169 0, /* nb_inplace_xor */
4170 0, /* nb_inplace_or */
4171 long_div
, /* nb_floor_divide */
4172 long_true_divide
, /* nb_true_divide */
4173 0, /* nb_inplace_floor_divide */
4174 0, /* nb_inplace_true_divide */
4175 long_long
, /* nb_index */
4178 PyTypeObject PyLong_Type
= {
4179 PyObject_HEAD_INIT(&PyType_Type
)
4181 "long", /* tp_name */
4182 offsetof(PyLongObject
, ob_digit
), /* tp_basicsize */
4183 sizeof(digit
), /* tp_itemsize */
4184 long_dealloc
, /* tp_dealloc */
4188 (cmpfunc
)long_compare
, /* tp_compare */
4189 long_repr
, /* tp_repr */
4190 &long_as_number
, /* tp_as_number */
4191 0, /* tp_as_sequence */
4192 0, /* tp_as_mapping */
4193 (hashfunc
)long_hash
, /* tp_hash */
4195 long_str
, /* tp_str */
4196 PyObject_GenericGetAttr
, /* tp_getattro */
4197 0, /* tp_setattro */
4198 0, /* tp_as_buffer */
4199 Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_CHECKTYPES
|
4200 Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_LONG_SUBCLASS
, /* tp_flags */
4201 long_doc
, /* tp_doc */
4202 0, /* tp_traverse */
4204 0, /* tp_richcompare */
4205 0, /* tp_weaklistoffset */
4207 0, /* tp_iternext */
4208 long_methods
, /* tp_methods */
4210 long_getset
, /* tp_getset */
4213 0, /* tp_descr_get */
4214 0, /* tp_descr_set */
4215 0, /* tp_dictoffset */
4218 long_new
, /* tp_new */
4219 PyObject_Del
, /* tp_free */
4222 static PyTypeObject Long_InfoType
;
4224 PyDoc_STRVAR(long_info__doc__
,
4227 A struct sequence that holds information about Python's\n\
4228 internal representation of integers. The attributes are read only.");
4230 static PyStructSequence_Field long_info_fields
[] = {
4231 {"bits_per_digit", "size of a digit in bits"},
4232 {"sizeof_digit", "size in bytes of the C type used to "
4233 "represent a digit"},
4237 static PyStructSequence_Desc long_info_desc
= {
4238 "sys.long_info", /* name */
4239 long_info__doc__
, /* doc */
4240 long_info_fields
, /* fields */
4241 2 /* number of fields */
4245 PyLong_GetInfo(void)
4247 PyObject
* long_info
;
4249 long_info
= PyStructSequence_New(&Long_InfoType
);
4250 if (long_info
== NULL
)
4252 PyStructSequence_SET_ITEM(long_info
, field
++,
4253 PyInt_FromLong(PyLong_SHIFT
));
4254 PyStructSequence_SET_ITEM(long_info
, field
++,
4255 PyInt_FromLong(sizeof(digit
)));
4256 if (PyErr_Occurred()) {
4257 Py_CLEAR(long_info
);
4266 /* initialize long_info */
4267 if (Long_InfoType
.tp_name
== 0)
4268 PyStructSequence_InitType(&Long_InfoType
, &long_info_desc
);