1 #ifndef CRYPTOPP_MISC_H
2 #define CRYPTOPP_MISC_H
6 #include <string.h> // for memcpy and memmove
11 // VC2005 workaround: disable declarations that conflict with winnt.h
12 #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
13 #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
14 #define _interlockedbittestandset64 CRYPTOPP_DISABLED_INTRINSIC_3
15 #define _interlockedbittestandreset64 CRYPTOPP_DISABLED_INTRINSIC_4
17 #undef _interlockedbittestandset
18 #undef _interlockedbittestandreset
19 #undef _interlockedbittestandset64
20 #undef _interlockedbittestandreset64
21 #define CRYPTOPP_FAST_ROTATE(x) 1
22 #elif _MSC_VER >= 1300
23 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
25 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
27 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
28 (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
29 #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
30 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
31 #define CRYPTOPP_FAST_ROTATE(x) 1
33 #define CRYPTOPP_FAST_ROTATE(x) 0
40 #if defined(__GNUC__) && defined(__linux__)
41 #define CRYPTOPP_BYTESWAP_AVAILABLE
45 NAMESPACE_BEGIN(CryptoPP
)
47 // ************** compile-time assertion ***************
52 static char dummy
[2*b
-1];
55 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
56 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
57 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
59 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
61 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
62 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
64 // ************** misc classes ***************
66 class CRYPTOPP_DLL Empty
71 template <class BASE1
, class BASE2
>
72 class CRYPTOPP_NO_VTABLE TwoBases
: public BASE1
, public BASE2
77 template <class BASE1
, class BASE2
, class BASE3
>
78 class CRYPTOPP_NO_VTABLE ThreeBases
: public BASE1
, public BASE2
, public BASE3
94 NotCopyable(const NotCopyable
&);
95 void operator=(const NotCopyable
&);
101 T
* operator()() const {return new T
;}
104 /*! This function safely initializes a static object in a multithreaded environment without using locks.
105 It may leak memory when two threads try to initialize the static object at the same time
106 but this should be acceptable since each static object is only initialized once per session.
108 template <class T
, class F
= NewObject
<T
>, int instance
=0>
112 Singleton(F objectFactory
= F()) : m_objectFactory(objectFactory
) {}
114 // prevent this function from being inlined
115 CRYPTOPP_NOINLINE
const T
& Ref(CRYPTOPP_NOINLINE_DOTDOTDOT
) const;
121 template <class T
, class F
, int instance
>
122 const T
& Singleton
<T
, F
, instance
>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT
) const
124 static simple_ptr
<T
> s_pObject
;
125 static char s_objectState
= 0;
128 switch (s_objectState
)
134 s_pObject
.m_p
= m_objectFactory();
148 return *s_pObject
.m_p
;
151 // ************** misc functions ***************
153 #if (!__STDC_WANT_SECURE_LIB__)
154 inline void memcpy_s(void *dest
, size_t sizeInBytes
, const void *src
, size_t count
)
156 if (count
> sizeInBytes
)
157 throw InvalidArgument("memcpy_s: buffer overflow");
158 memcpy(dest
, src
, count
);
161 inline void memmove_s(void *dest
, size_t sizeInBytes
, const void *src
, size_t count
)
163 if (count
> sizeInBytes
)
164 throw InvalidArgument("memmove_s: buffer overflow");
165 memmove(dest
, src
, count
);
169 inline void * memset_z(void *ptr
, int value
, size_t num
)
171 // avoid extranous warning on GCC 4.3.2 Ubuntu 8.10
172 #if CRYPTOPP_GCC_VERSION >= 30001
173 if (__builtin_constant_p(num
) && num
==0)
176 return memset(ptr
, value
, num
);
179 // can't use std::min or std::max in MSVC60 or Cygwin 1.1.0
180 template <class T
> inline const T
& STDMIN(const T
& a
, const T
& b
)
182 return b
< a
? b
: a
;
185 template <class T1
, class T2
> inline const T1
UnsignedMin(const T1
& a
, const T2
& b
)
187 CRYPTOPP_COMPILE_ASSERT((sizeof(T1
)<=sizeof(T2
) && T2(-1)>0) || (sizeof(T1
)>sizeof(T2
) && T1(-1)>0));
188 assert(a
==0 || a
>0); // GCC workaround: get rid of the warning "comparison is always true due to limited range of data type"
191 if (sizeof(T1
)<=sizeof(T2
))
192 return b
< (T2
)a
? (T1
)b
: a
;
194 return (T1
)b
< a
? (T1
)b
: a
;
197 template <class T
> inline const T
& STDMAX(const T
& a
, const T
& b
)
199 return a
< b
? b
: a
;
202 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
204 // this version of the macro is fastest on Pentium 3 and Pentium 4 with MSVC 6 SP5 w/ Processor Pack
205 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
206 // these may be faster on other CPUs/compilers
207 // #define GETBYTE(x, y) (unsigned int)(((x)>>(8*(y)))&255)
208 // #define GETBYTE(x, y) (((byte *)&(x))[y])
210 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
213 unsigned int Parity(T value
)
215 for (unsigned int i
=8*sizeof(value
)/2; i
>0; i
/=2)
217 return (unsigned int)value
&1;
221 unsigned int BytePrecision(const T
&value
)
226 unsigned int l
=0, h
=8*sizeof(value
);
230 unsigned int t
= (l
+h
)/2;
241 unsigned int BitPrecision(const T
&value
)
246 unsigned int l
=0, h
=8*sizeof(value
);
250 unsigned int t
= (l
+h
)/2;
261 inline T
Crop(T value
, size_t size
)
263 if (size
< 8*sizeof(value
))
264 return T(value
& ((T(1) << size
) - 1));
269 template <class T1
, class T2
>
270 inline bool SafeConvert(T1 from
, T2
&to
)
273 if (from
!= to
|| (from
> 0) != (to
> 0))
278 inline size_t BitsToBytes(size_t bitCount
)
280 return ((bitCount
+7)/(8));
283 inline size_t BytesToWords(size_t byteCount
)
285 return ((byteCount
+WORD_SIZE
-1)/WORD_SIZE
);
288 inline size_t BitsToWords(size_t bitCount
)
290 return ((bitCount
+WORD_BITS
-1)/(WORD_BITS
));
293 inline size_t BitsToDwords(size_t bitCount
)
295 return ((bitCount
+2*WORD_BITS
-1)/(2*WORD_BITS
));
298 CRYPTOPP_DLL
void CRYPTOPP_API
xorbuf(byte
*buf
, const byte
*mask
, size_t count
);
299 CRYPTOPP_DLL
void CRYPTOPP_API
xorbuf(byte
*output
, const byte
*input
, const byte
*mask
, size_t count
);
301 CRYPTOPP_DLL
bool CRYPTOPP_API
VerifyBufsEqual(const byte
*buf1
, const byte
*buf2
, size_t count
);
304 inline bool IsPowerOf2(const T
&n
)
306 return n
> 0 && (n
& (n
-1)) == 0;
309 template <class T1
, class T2
>
310 inline T2
ModPowerOf2(const T1
&a
, const T2
&b
)
312 assert(IsPowerOf2(b
));
313 return T2(a
) & (b
-1);
316 template <class T1
, class T2
>
317 inline T1
RoundDownToMultipleOf(const T1
&n
, const T2
&m
)
320 return n
- ModPowerOf2(n
, m
);
325 template <class T1
, class T2
>
326 inline T1
RoundUpToMultipleOf(const T1
&n
, const T2
&m
)
329 throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
330 return RoundDownToMultipleOf(n
+m
-1, m
);
334 inline unsigned int GetAlignmentOf(T
*dummy
=NULL
) // VC60 workaround
336 #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
341 #if (_MSC_VER >= 1300)
343 #elif defined(__GNUC__)
344 return __alignof__(T
);
345 #elif CRYPTOPP_BOOL_SLOW_WORD64
346 return UnsignedMin(4U, sizeof(T
));
352 inline bool IsAlignedOn(const void *p
, unsigned int alignment
)
354 return alignment
==1 || (IsPowerOf2(alignment
) ? ModPowerOf2((size_t)p
, alignment
) == 0 : (size_t)p
% alignment
== 0);
358 inline bool IsAligned(const void *p
, T
*dummy
=NULL
) // VC60 workaround
360 return IsAlignedOn(p
, GetAlignmentOf
<T
>());
363 #ifdef IS_LITTLE_ENDIAN
364 typedef LittleEndian NativeByteOrder
;
366 typedef BigEndian NativeByteOrder
;
369 inline ByteOrder
GetNativeByteOrder()
371 return NativeByteOrder::ToEnum();
374 inline bool NativeByteOrderIs(ByteOrder order
)
376 return order
== GetNativeByteOrder();
380 std::string
IntToString(T a
, unsigned int base
= 10)
388 a
= 0-a
; // VC .NET does not like -a
394 result
= char((digit
< 10 ? '0' : ('a' - 10)) + digit
) + result
;
398 result
= "-" + result
;
402 template <class T1
, class T2
>
403 inline T1
SaturatingSubtract(const T1
&a
, const T2
&b
)
405 return T1((a
> b
) ? (a
- b
) : 0);
409 inline CipherDir
GetCipherDir(const T
&obj
)
411 return obj
.IsForwardTransformation() ? ENCRYPTION
: DECRYPTION
;
414 CRYPTOPP_DLL
void CRYPTOPP_API
CallNewHandler();
416 inline void IncrementCounterByOne(byte
*inout
, unsigned int s
)
418 for (int i
=s
-1, carry
=1; i
>=0 && carry
; i
--)
422 inline void IncrementCounterByOne(byte
*output
, const byte
*input
, unsigned int s
)
425 for (i
=s
-1, carry
=1; i
>=0 && carry
; i
--)
426 carry
= ((output
[i
] = input
[i
]+1) == 0);
427 memcpy_s(output
, s
, input
, i
+1);
430 // ************** rotate functions ***************
432 template <class T
> inline T
rotlFixed(T x
, unsigned int y
)
434 assert(y
< sizeof(T
)*8);
435 return T((x
<<y
) | (x
>>(sizeof(T
)*8-y
)));
438 template <class T
> inline T
rotrFixed(T x
, unsigned int y
)
440 assert(y
< sizeof(T
)*8);
441 return T((x
>>y
) | (x
<<(sizeof(T
)*8-y
)));
444 template <class T
> inline T
rotlVariable(T x
, unsigned int y
)
446 assert(y
< sizeof(T
)*8);
447 return T((x
<<y
) | (x
>>(sizeof(T
)*8-y
)));
450 template <class T
> inline T
rotrVariable(T x
, unsigned int y
)
452 assert(y
< sizeof(T
)*8);
453 return T((x
>>y
) | (x
<<(sizeof(T
)*8-y
)));
456 template <class T
> inline T
rotlMod(T x
, unsigned int y
)
459 return T((x
<<y
) | (x
>>(sizeof(T
)*8-y
)));
462 template <class T
> inline T
rotrMod(T x
, unsigned int y
)
465 return T((x
>>y
) | (x
<<(sizeof(T
)*8-y
)));
470 template<> inline word32 rotlFixed
<word32
>(word32 x
, unsigned int y
)
472 assert(y
< 8*sizeof(x
));
473 return y
? _lrotl(x
, y
) : x
;
476 template<> inline word32 rotrFixed
<word32
>(word32 x
, unsigned int y
)
478 assert(y
< 8*sizeof(x
));
479 return y
? _lrotr(x
, y
) : x
;
482 template<> inline word32 rotlVariable
<word32
>(word32 x
, unsigned int y
)
484 assert(y
< 8*sizeof(x
));
488 template<> inline word32 rotrVariable
<word32
>(word32 x
, unsigned int y
)
490 assert(y
< 8*sizeof(x
));
494 template<> inline word32 rotlMod
<word32
>(word32 x
, unsigned int y
)
499 template<> inline word32 rotrMod
<word32
>(word32 x
, unsigned int y
)
504 #endif // #ifdef _MSC_VER
506 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
507 // Intel C++ Compiler 10.0 calls a function instead of using the rotate instruction when using these instructions
509 template<> inline word64 rotlFixed
<word64
>(word64 x
, unsigned int y
)
511 assert(y
< 8*sizeof(x
));
512 return y
? _rotl64(x
, y
) : x
;
515 template<> inline word64 rotrFixed
<word64
>(word64 x
, unsigned int y
)
517 assert(y
< 8*sizeof(x
));
518 return y
? _rotr64(x
, y
) : x
;
521 template<> inline word64 rotlVariable
<word64
>(word64 x
, unsigned int y
)
523 assert(y
< 8*sizeof(x
));
524 return _rotl64(x
, y
);
527 template<> inline word64 rotrVariable
<word64
>(word64 x
, unsigned int y
)
529 assert(y
< 8*sizeof(x
));
530 return _rotr64(x
, y
);
533 template<> inline word64 rotlMod
<word64
>(word64 x
, unsigned int y
)
535 return _rotl64(x
, y
);
538 template<> inline word64 rotrMod
<word64
>(word64 x
, unsigned int y
)
540 return _rotr64(x
, y
);
543 #endif // #if _MSC_VER >= 1310
545 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
546 // Intel C++ Compiler 10.0 gives undefined externals with these
548 template<> inline word16 rotlFixed
<word16
>(word16 x
, unsigned int y
)
550 assert(y
< 8*sizeof(x
));
551 return y
? _rotl16(x
, y
) : x
;
554 template<> inline word16 rotrFixed
<word16
>(word16 x
, unsigned int y
)
556 assert(y
< 8*sizeof(x
));
557 return y
? _rotr16(x
, y
) : x
;
560 template<> inline word16 rotlVariable
<word16
>(word16 x
, unsigned int y
)
562 assert(y
< 8*sizeof(x
));
563 return _rotl16(x
, y
);
566 template<> inline word16 rotrVariable
<word16
>(word16 x
, unsigned int y
)
568 assert(y
< 8*sizeof(x
));
569 return _rotr16(x
, y
);
572 template<> inline word16 rotlMod
<word16
>(word16 x
, unsigned int y
)
574 return _rotl16(x
, y
);
577 template<> inline word16 rotrMod
<word16
>(word16 x
, unsigned int y
)
579 return _rotr16(x
, y
);
582 template<> inline byte rotlFixed
<byte
>(byte x
, unsigned int y
)
584 assert(y
< 8*sizeof(x
));
585 return y
? _rotl8(x
, y
) : x
;
588 template<> inline byte rotrFixed
<byte
>(byte x
, unsigned int y
)
590 assert(y
< 8*sizeof(x
));
591 return y
? _rotr8(x
, y
) : x
;
594 template<> inline byte rotlVariable
<byte
>(byte x
, unsigned int y
)
596 assert(y
< 8*sizeof(x
));
600 template<> inline byte rotrVariable
<byte
>(byte x
, unsigned int y
)
602 assert(y
< 8*sizeof(x
));
606 template<> inline byte rotlMod
<byte
>(byte x
, unsigned int y
)
611 template<> inline byte rotrMod
<byte
>(byte x
, unsigned int y
)
616 #endif // #if _MSC_VER >= 1400
618 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
620 template<> inline word32 rotlFixed
<word32
>(word32 x
, unsigned int y
)
623 return y
? __rlwinm(x
,y
,0,31) : x
;
626 template<> inline word32 rotrFixed
<word32
>(word32 x
, unsigned int y
)
629 return y
? __rlwinm(x
,32-y
,0,31) : x
;
632 template<> inline word32 rotlVariable
<word32
>(word32 x
, unsigned int y
)
635 return (__rlwnm(x
,y
,0,31));
638 template<> inline word32 rotrVariable
<word32
>(word32 x
, unsigned int y
)
641 return (__rlwnm(x
,32-y
,0,31));
644 template<> inline word32 rotlMod
<word32
>(word32 x
, unsigned int y
)
646 return (__rlwnm(x
,y
,0,31));
649 template<> inline word32 rotrMod
<word32
>(word32 x
, unsigned int y
)
651 return (__rlwnm(x
,32-y
,0,31));
654 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
656 // ************** endian reversal ***************
659 inline unsigned int GetByte(ByteOrder order
, T value
, unsigned int index
)
661 if (order
== LITTLE_ENDIAN_ORDER
)
662 return GETBYTE(value
, index
);
664 return GETBYTE(value
, sizeof(T
)-index
-1);
667 inline byte
ByteReverse(byte value
)
672 inline word16
ByteReverse(word16 value
)
674 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
675 return bswap_16(value
);
676 #elif defined(_MSC_VER) && _MSC_VER >= 1300
677 return _byteswap_ushort(value
);
679 return rotlFixed(value
, 8U);
683 inline word32
ByteReverse(word32 value
)
685 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
686 __asm__ ("bswap %0" : "=r" (value
) : "0" (value
));
688 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
689 return bswap_32(value
);
690 #elif defined(__MWERKS__) && TARGET_CPU_PPC
691 return (word32
)__lwbrx(&value
,0);
692 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
693 return _byteswap_ulong(value
);
694 #elif CRYPTOPP_FAST_ROTATE(32)
695 // 5 instructions with rotate instruction, 9 without
696 return (rotrFixed(value
, 8U) & 0xff00ff00) | (rotlFixed(value
, 8U) & 0x00ff00ff);
698 // 6 instructions with rotate instruction, 8 without
699 value
= ((value
& 0xFF00FF00) >> 8) | ((value
& 0x00FF00FF) << 8);
700 return rotlFixed(value
, 16U);
704 inline word64
ByteReverse(word64 value
)
706 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
707 __asm__ ("bswap %0" : "=r" (value
) : "0" (value
));
709 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
710 return bswap_64(value
);
711 #elif defined(_MSC_VER) && _MSC_VER >= 1300
712 return _byteswap_uint64(value
);
713 #elif CRYPTOPP_BOOL_SLOW_WORD64
714 return (word64(ByteReverse(word32(value
))) << 32) | ByteReverse(word32(value
>>32));
716 value
= ((value
& W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value
& W64LIT(0x00FF00FF00FF00FF)) << 8);
717 value
= ((value
& W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value
& W64LIT(0x0000FFFF0000FFFF)) << 16);
718 return rotlFixed(value
, 32U);
722 inline byte
BitReverse(byte value
)
724 value
= ((value
& 0xAA) >> 1) | ((value
& 0x55) << 1);
725 value
= ((value
& 0xCC) >> 2) | ((value
& 0x33) << 2);
726 return rotlFixed(value
, 4U);
729 inline word16
BitReverse(word16 value
)
731 value
= ((value
& 0xAAAA) >> 1) | ((value
& 0x5555) << 1);
732 value
= ((value
& 0xCCCC) >> 2) | ((value
& 0x3333) << 2);
733 value
= ((value
& 0xF0F0) >> 4) | ((value
& 0x0F0F) << 4);
734 return ByteReverse(value
);
737 inline word32
BitReverse(word32 value
)
739 value
= ((value
& 0xAAAAAAAA) >> 1) | ((value
& 0x55555555) << 1);
740 value
= ((value
& 0xCCCCCCCC) >> 2) | ((value
& 0x33333333) << 2);
741 value
= ((value
& 0xF0F0F0F0) >> 4) | ((value
& 0x0F0F0F0F) << 4);
742 return ByteReverse(value
);
745 inline word64
BitReverse(word64 value
)
747 #if CRYPTOPP_BOOL_SLOW_WORD64
748 return (word64(BitReverse(word32(value
))) << 32) | BitReverse(word32(value
>>32));
750 value
= ((value
& W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value
& W64LIT(0x5555555555555555)) << 1);
751 value
= ((value
& W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value
& W64LIT(0x3333333333333333)) << 2);
752 value
= ((value
& W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value
& W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
753 return ByteReverse(value
);
758 inline T
BitReverse(T value
)
761 return (T
)BitReverse((byte
)value
);
762 else if (sizeof(T
) == 2)
763 return (T
)BitReverse((word16
)value
);
764 else if (sizeof(T
) == 4)
765 return (T
)BitReverse((word32
)value
);
768 assert(sizeof(T
) == 8);
769 return (T
)BitReverse((word64
)value
);
774 inline T
ConditionalByteReverse(ByteOrder order
, T value
)
776 return NativeByteOrderIs(order
) ? value
: ByteReverse(value
);
780 void ByteReverse(T
*out
, const T
*in
, size_t byteCount
)
782 assert(byteCount
% sizeof(T
) == 0);
783 size_t count
= byteCount
/sizeof(T
);
784 for (size_t i
=0; i
<count
; i
++)
785 out
[i
] = ByteReverse(in
[i
]);
789 inline void ConditionalByteReverse(ByteOrder order
, T
*out
, const T
*in
, size_t byteCount
)
791 if (!NativeByteOrderIs(order
))
792 ByteReverse(out
, in
, byteCount
);
794 memcpy_s(out
, byteCount
, in
, byteCount
);
798 inline void GetUserKey(ByteOrder order
, T
*out
, size_t outlen
, const byte
*in
, size_t inlen
)
800 const size_t U
= sizeof(T
);
801 assert(inlen
<= outlen
*U
);
802 memcpy_s(out
, outlen
*U
, in
, inlen
);
803 memset_z((byte
*)out
+inlen
, 0, outlen
*U
-inlen
);
804 ConditionalByteReverse(order
, out
, out
, RoundUpToMultipleOf(inlen
, U
));
807 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
808 inline byte
UnalignedGetWordNonTemplate(ByteOrder order
, const byte
*block
, const byte
*)
813 inline word16
UnalignedGetWordNonTemplate(ByteOrder order
, const byte
*block
, const word16
*)
815 return (order
== BIG_ENDIAN_ORDER
)
816 ? block
[1] | (block
[0] << 8)
817 : block
[0] | (block
[1] << 8);
820 inline word32
UnalignedGetWordNonTemplate(ByteOrder order
, const byte
*block
, const word32
*)
822 return (order
== BIG_ENDIAN_ORDER
)
823 ? word32(block
[3]) | (word32(block
[2]) << 8) | (word32(block
[1]) << 16) | (word32(block
[0]) << 24)
824 : word32(block
[0]) | (word32(block
[1]) << 8) | (word32(block
[2]) << 16) | (word32(block
[3]) << 24);
827 inline word64
UnalignedGetWordNonTemplate(ByteOrder order
, const byte
*block
, const word64
*)
829 return (order
== BIG_ENDIAN_ORDER
)
832 (word64(block
[6]) << 8) |
833 (word64(block
[5]) << 16) |
834 (word64(block
[4]) << 24) |
835 (word64(block
[3]) << 32) |
836 (word64(block
[2]) << 40) |
837 (word64(block
[1]) << 48) |
838 (word64(block
[0]) << 56))
841 (word64(block
[1]) << 8) |
842 (word64(block
[2]) << 16) |
843 (word64(block
[3]) << 24) |
844 (word64(block
[4]) << 32) |
845 (word64(block
[5]) << 40) |
846 (word64(block
[6]) << 48) |
847 (word64(block
[7]) << 56));
850 inline void UnalignedPutWordNonTemplate(ByteOrder order
, byte
*block
, byte value
, const byte
*xorBlock
)
852 block
[0] = xorBlock
? (value
^ xorBlock
[0]) : value
;
855 inline void UnalignedPutWordNonTemplate(ByteOrder order
, byte
*block
, word16 value
, const byte
*xorBlock
)
857 if (order
== BIG_ENDIAN_ORDER
)
861 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
862 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
866 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
867 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
874 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
875 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
879 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
880 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
885 inline void UnalignedPutWordNonTemplate(ByteOrder order
, byte
*block
, word32 value
, const byte
*xorBlock
)
887 if (order
== BIG_ENDIAN_ORDER
)
891 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
892 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
893 block
[2] = xorBlock
[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
894 block
[3] = xorBlock
[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
898 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
899 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
900 block
[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
901 block
[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
908 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
909 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
910 block
[2] = xorBlock
[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
911 block
[3] = xorBlock
[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
915 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
916 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
917 block
[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
918 block
[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
923 inline void UnalignedPutWordNonTemplate(ByteOrder order
, byte
*block
, word64 value
, const byte
*xorBlock
)
925 if (order
== BIG_ENDIAN_ORDER
)
929 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 7);
930 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 6);
931 block
[2] = xorBlock
[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 5);
932 block
[3] = xorBlock
[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 4);
933 block
[4] = xorBlock
[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
934 block
[5] = xorBlock
[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
935 block
[6] = xorBlock
[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
936 block
[7] = xorBlock
[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
940 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 7);
941 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 6);
942 block
[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 5);
943 block
[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 4);
944 block
[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
945 block
[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
946 block
[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
947 block
[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
954 block
[0] = xorBlock
[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
955 block
[1] = xorBlock
[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
956 block
[2] = xorBlock
[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
957 block
[3] = xorBlock
[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
958 block
[4] = xorBlock
[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 4);
959 block
[5] = xorBlock
[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 5);
960 block
[6] = xorBlock
[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 6);
961 block
[7] = xorBlock
[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value
, 7);
965 block
[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 0);
966 block
[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 1);
967 block
[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 2);
968 block
[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 3);
969 block
[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 4);
970 block
[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 5);
971 block
[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 6);
972 block
[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value
, 7);
976 #endif // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
979 inline T
GetWord(bool assumeAligned
, ByteOrder order
, const byte
*block
)
981 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
983 return UnalignedGetWordNonTemplate(order
, block
, (T
*)NULL
);
984 assert(IsAligned
<T
>(block
));
986 return ConditionalByteReverse(order
, *reinterpret_cast<const T
*>(block
));
990 inline void GetWord(bool assumeAligned
, ByteOrder order
, T
&result
, const byte
*block
)
992 result
= GetWord
<T
>(assumeAligned
, order
, block
);
996 inline void PutWord(bool assumeAligned
, ByteOrder order
, byte
*block
, T value
, const byte
*xorBlock
= NULL
)
998 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
1000 return UnalignedPutWordNonTemplate(order
, block
, value
, xorBlock
);
1001 assert(IsAligned
<T
>(block
));
1002 assert(IsAligned
<T
>(xorBlock
));
1004 *reinterpret_cast<T
*>(block
) = ConditionalByteReverse(order
, value
) ^ (xorBlock
? *reinterpret_cast<const T
*>(xorBlock
) : 0);
1007 template <class T
, class B
, bool A
=false>
1011 GetBlock(const void *block
)
1012 : m_block((const byte
*)block
) {}
1015 inline GetBlock
<T
, B
, A
> & operator()(U
&x
)
1017 CRYPTOPP_COMPILE_ASSERT(sizeof(U
) >= sizeof(T
));
1018 x
= GetWord
<T
>(A
, B::ToEnum(), m_block
);
1019 m_block
+= sizeof(T
);
1024 const byte
*m_block
;
1027 template <class T
, class B
, bool A
=false>
1031 PutBlock(const void *xorBlock
, void *block
)
1032 : m_xorBlock((const byte
*)xorBlock
), m_block((byte
*)block
) {}
1035 inline PutBlock
<T
, B
, A
> & operator()(U x
)
1037 PutWord(A
, B::ToEnum(), m_block
, (T
)x
, m_xorBlock
);
1038 m_block
+= sizeof(T
);
1040 m_xorBlock
+= sizeof(T
);
1045 const byte
*m_xorBlock
;
1049 template <class T
, class B
, bool GA
=false, bool PA
=false>
1050 struct BlockGetAndPut
1052 // function needed because of C++ grammatical ambiguity between expression-statements and declarations
1053 static inline GetBlock
<T
, B
, GA
> Get(const void *block
) {return GetBlock
<T
, B
, GA
>(block
);}
1054 typedef PutBlock
<T
, B
, PA
> Put
;
1058 std::string
WordToString(T value
, ByteOrder order
= BIG_ENDIAN_ORDER
)
1060 if (!NativeByteOrderIs(order
))
1061 value
= ByteReverse(value
);
1063 return std::string((char *)&value
, sizeof(value
));
1067 T
StringToWord(const std::string
&str
, ByteOrder order
= BIG_ENDIAN_ORDER
)
1070 memcpy_s(&value
, sizeof(value
), str
.data(), UnsignedMin(str
.size(), sizeof(value
)));
1071 return NativeByteOrderIs(order
) ? value
: ByteReverse(value
);
1074 // ************** help remove warning on g++ ***************
1076 template <bool overflow
> struct SafeShifter
;
1078 template<> struct SafeShifter
<true>
1081 static inline T
RightShift(T value
, unsigned int bits
)
1087 static inline T
LeftShift(T value
, unsigned int bits
)
1093 template<> struct SafeShifter
<false>
1096 static inline T
RightShift(T value
, unsigned int bits
)
1098 return value
>> bits
;
1102 static inline T
LeftShift(T value
, unsigned int bits
)
1104 return value
<< bits
;
1108 template <unsigned int bits
, class T
>
1109 inline T
SafeRightShift(T value
)
1111 return SafeShifter
<(bits
>=(8*sizeof(T
)))>::RightShift(value
, bits
);
1114 template <unsigned int bits
, class T
>
1115 inline T
SafeLeftShift(T value
)
1117 return SafeShifter
<(bits
>=(8*sizeof(T
)))>::LeftShift(value
, bits
);
1120 // ************** use one buffer for multiple data members ***************
1122 #define CRYPTOPP_BLOCK_1(n, t, s) t* m_##n() {return (t *)(m_aggregate+0);} size_t SS1() {return sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1123 #define CRYPTOPP_BLOCK_2(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS1());} size_t SS2() {return SS1()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1124 #define CRYPTOPP_BLOCK_3(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS2());} size_t SS3() {return SS2()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1125 #define CRYPTOPP_BLOCK_4(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS3());} size_t SS4() {return SS3()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1126 #define CRYPTOPP_BLOCK_5(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS4());} size_t SS5() {return SS4()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1127 #define CRYPTOPP_BLOCK_6(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS5());} size_t SS6() {return SS5()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1128 #define CRYPTOPP_BLOCK_7(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS6());} size_t SS7() {return SS6()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1129 #define CRYPTOPP_BLOCK_8(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS7());} size_t SS8() {return SS7()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1130 #define CRYPTOPP_BLOCKS_END(i) size_t SST() {return SS##i();} void AllocateBlocks() {m_aggregate.New(SST());} AlignedSecByteBlock m_aggregate;