[rubygems/rubygems] Use a constant empty tar header to avoid extra allocations
[ruby.git] / missing / crypt.h
blobff135eee93fc1a761ddf2441a5e0ce5640ef09c2
1 #ifndef CRYPT_H
2 #define CRYPT_H 1
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Tom Truscott.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 /* ===== Configuration ==================== */
37 #ifdef CHAR_BITS
38 #if CHAR_BITS != 8
39 #error C_block structure assumes 8 bit characters
40 #endif
41 #endif
43 #ifndef LONG_LONG
44 # if SIZEOF_LONG_LONG > 0
45 # define LONG_LONG long long
46 # elif SIZEOF___INT64 > 0
47 # define HAVE_LONG_LONG 1
48 # define LONG_LONG __int64
49 # undef SIZEOF_LONG_LONG
50 # define SIZEOF_LONG_LONG SIZEOF___INT64
51 # endif
52 #endif
55 * define "LONG_IS_32_BITS" only if sizeof(long)==4.
56 * This avoids use of bit fields (your compiler may be sloppy with them).
58 #if SIZEOF_LONG == 4
59 #define LONG_IS_32_BITS
60 #endif
63 * define "B64" to be the declaration for a 64 bit integer.
64 * XXX this feature is currently unused, see "endian" comment below.
66 #if SIZEOF_LONG == 8
67 #define B64 long
68 #elif SIZEOF_LONG_LONG == 8
69 #define B64 LONG_LONG
70 #endif
73 * define "LARGEDATA" to get faster permutations, by using about 72 kilobytes
74 * of lookup tables. This speeds up des_setkey() and des_cipher(), but has
75 * little effect on crypt().
77 #if defined(notdef)
78 #define LARGEDATA
79 #endif
81 /* compile with "-DSTATIC=int" when profiling */
82 #ifndef STATIC
83 #define STATIC static
84 #endif
86 /* ==================================== */
89 * Cipher-block representation (Bob Baldwin):
91 * DES operates on groups of 64 bits, numbered 1..64 (sigh). One
92 * representation is to store one bit per byte in an array of bytes. Bit N of
93 * the NBS spec is stored as the LSB of the Nth byte (index N-1) in the array.
94 * Another representation stores the 64 bits in 8 bytes, with bits 1..8 in the
95 * first byte, 9..16 in the second, and so on. The DES spec apparently has
96 * bit 1 in the MSB of the first byte, but that is particularly noxious so we
97 * bit-reverse each byte so that bit 1 is the LSB of the first byte, bit 8 is
98 * the MSB of the first byte. Specifically, the 64-bit input data and key are
99 * converted to LSB format, and the output 64-bit block is converted back into
100 * MSB format.
102 * DES operates internally on groups of 32 bits which are expanded to 48 bits
103 * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
104 * the computation, the expansion is applied only once, the expanded
105 * representation is maintained during the encryption, and a compression
106 * permutation is applied only at the end. To speed up the S-box lookups,
107 * the 48 bits are maintained as eight 6 bit groups, one per byte, which
108 * directly feed the eight S-boxes. Within each byte, the 6 bits are the
109 * most significant ones. The low two bits of each byte are zero. (Thus,
110 * bit 1 of the 48 bit E expansion is stored as the "4"-valued bit of the
111 * first byte in the eight byte representation, bit 2 of the 48 bit value is
112 * the "8"-valued bit, and so on.) In fact, a combined "SPE"-box lookup is
113 * used, in which the output is the 64 bit result of an S-box lookup which
114 * has been permuted by P and expanded by E, and is ready for use in the next
115 * iteration. Two 32-bit wide tables, SPE[0] and SPE[1], are used for this
116 * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
117 * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
118 * "salt" are also converted to this 8*(6+2) format. The SPE table size is
119 * 8*64*8 = 4K bytes.
121 * To speed up bit-parallel operations (such as XOR), the 8 byte
122 * representation is "union"ed with 32 bit values "i0" and "i1", and, on
123 * machines which support it, a 64 bit value "b64". This data structure,
124 * "C_block", has two problems. First, alignment restrictions must be
125 * honored. Second, the byte-order (e.g. little-endian or big-endian) of
126 * the architecture becomes visible.
128 * The byte-order problem is unfortunate, since on the one hand it is good
129 * to have a machine-independent C_block representation (bits 1..8 in the
130 * first byte, etc.), and on the other hand it is good for the LSB of the
131 * first byte to be the LSB of i0. We cannot have both these things, so we
132 * currently use the "little-endian" representation and avoid any multi-byte
133 * operations that depend on byte order. This largely precludes use of the
134 * 64-bit datatype since the relative order of i0 and i1 are unknown. It
135 * also inhibits grouping the SPE table to look up 12 bits at a time. (The
136 * 12 bits can be stored in a 16-bit field with 3 low-order zeroes and 1
137 * high-order zero, providing fast indexing into a 64-bit wide SPE.) On the
138 * other hand, 64-bit datatypes are currently rare, and a 12-bit SPE lookup
139 * requires a 128 kilobyte table, so perhaps this is not a big loss.
141 * Permutation representation (Jim Gillogly):
143 * A transformation is defined by its effect on each of the 8 bytes of the
144 * 64-bit input. For each byte we give a 64-bit output that has the bits in
145 * the input distributed appropriately. The transformation is then the OR
146 * of the 8 sets of 64-bits. This uses 8*256*8 = 16K bytes of storage for
147 * each transformation. Unless LARGEDATA is defined, however, a more compact
148 * table is used which looks up 16 4-bit "chunks" rather than 8 8-bit chunks.
149 * The smaller table uses 16*16*8 = 2K bytes for each transformation. This
150 * is slower but tolerable, particularly for password encryption in which
151 * the SPE transformation is iterated many times. The small tables total 9K
152 * bytes, the large tables total 72K bytes.
154 * The transformations used are:
155 * IE3264: MSB->LSB conversion, initial permutation, and expansion.
156 * This is done by collecting the 32 even-numbered bits and applying
157 * a 32->64 bit transformation, and then collecting the 32 odd-numbered
158 * bits and applying the same transformation. Since there are only
159 * 32 input bits, the IE3264 transformation table is half the size of
160 * the usual table.
161 * CF6464: Compression, final permutation, and LSB->MSB conversion.
162 * This is done by two trivial 48->32 bit compressions to obtain
163 * a 64-bit block (the bit numbering is given in the "CIFP" table)
164 * followed by a 64->64 bit "cleanup" transformation. (It would
165 * be possible to group the bits in the 64-bit block so that 2
166 * identical 32->32 bit transformations could be used instead,
167 * saving a factor of 4 in space and possibly 2 in time, but
168 * byte-ordering and other complications rear their ugly head.
169 * Similar opportunities/problems arise in the key schedule
170 * transforms.)
171 * PC1ROT: MSB->LSB, PC1 permutation, rotate, and PC2 permutation.
172 * This admittedly baroque 64->64 bit transformation is used to
173 * produce the first code (in 8*(6+2) format) of the key schedule.
174 * PC2ROT[0]: Inverse PC2 permutation, rotate, and PC2 permutation.
175 * It would be possible to define 15 more transformations, each
176 * with a different rotation, to generate the entire key schedule.
177 * To save space, however, we instead permute each code into the
178 * next by using a transformation that "undoes" the PC2 permutation,
179 * rotates the code, and then applies PC2. Unfortunately, PC2
180 * transforms 56 bits into 48 bits, dropping 8 bits, so PC2 is not
181 * invertible. We get around that problem by using a modified PC2
182 * which retains the 8 otherwise-lost bits in the unused low-order
183 * bits of each byte. The low-order bits are cleared when the
184 * codes are stored into the key schedule.
185 * PC2ROT[1]: Same as PC2ROT[0], but with two rotations.
186 * This is faster than applying PC2ROT[0] twice,
188 * The Bell Labs "salt" (Bob Baldwin):
190 * The salting is a simple permutation applied to the 48-bit result of E.
191 * Specifically, if bit i (1 <= i <= 24) of the salt is set then bits i and
192 * i+24 of the result are swapped. The salt is thus a 24 bit number, with
193 * 16777216 possible values. (The original salt was 12 bits and could not
194 * swap bits 13..24 with 36..48.)
196 * It is possible, but ugly, to warp the SPE table to account for the salt
197 * permutation. Fortunately, the conditional bit swapping requires only
198 * about four machine instructions and can be done on-the-fly with about an
199 * 8% performance penalty.
202 typedef union {
203 unsigned char b[8];
204 struct {
205 #if defined(LONG_IS_32_BITS)
206 /* long is often faster than a 32-bit bit field */
207 long i0;
208 long i1;
209 #else
210 long i0: 32;
211 long i1: 32;
212 #endif
213 } b32;
214 #if defined(B64)
215 B64 b64;
216 #endif
217 } C_block;
219 #if defined(LARGEDATA)
220 /* Waste memory like crazy. Also, do permutations in line */
221 #define LGCHUNKBITS 3
222 #define CHUNKBITS (1<<LGCHUNKBITS)
223 #else
224 /* "small data" */
225 #define LGCHUNKBITS 2
226 #define CHUNKBITS (1<<LGCHUNKBITS)
227 #endif
229 struct crypt_data {
230 /* The Key Schedule, filled in by des_setkey() or setkey(). */
231 #define KS_SIZE 16
232 C_block KS[KS_SIZE];
234 /* ==================================== */
236 char cryptresult[1+4+4+11+1]; /* encrypted result */
239 char *crypt(const char *key, const char *setting);
240 void setkey(const char *key);
241 void encrypt(char *block, int flag);
243 char *crypt_r(const char *key, const char *setting, struct crypt_data *data);
244 void setkey_r(const char *key, struct crypt_data *data);
245 void encrypt_r(char *block, int flag, struct crypt_data *data);
247 #endif /* CRYPT_H */