2 * Copyright (c) 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #if defined(LIBC_SCCS) && !defined(lint)
38 static char sccsid
[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
39 #endif /* LIBC_SCCS and not lint */
41 #include <sys/param.h>
59 static int alloc_segs
__P((HTAB
*, int));
60 static int flush_meta
__P((HTAB
*));
61 static int hash_access
__P((HTAB
*, ACTION
, DBT
*, DBT
*));
62 static int hash_close
__P((DB
*));
63 static int hash_delete
__P((const DB
*, const DBT
*, u_int32_t
));
64 static int hash_fd
__P((const DB
*));
65 static int hash_get
__P((const DB
*, const DBT
*, DBT
*, u_int32_t
));
66 static int hash_put
__P((const DB
*, DBT
*, const DBT
*, u_int32_t
));
67 static void *hash_realloc
__P((SEGMENT
**, int, int));
68 static int hash_seq
__P((const DB
*, DBT
*, DBT
*, u_int32_t
));
69 static int hash_sync
__P((const DB
*, u_int32_t
));
70 static int hdestroy
__P((HTAB
*));
71 static HTAB
*init_hash
__P((HTAB
*, const char *, HASHINFO
*));
72 static int init_htab
__P((HTAB
*, int));
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header
__P((HTAB
*));
75 static void swap_header_copy
__P((HASHHDR
*, HASHHDR
*));
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses
, hash_collisions
, hash_expansions
, hash_overflows
;
92 /************************** INTERFACE ROUTINES ***************************/
96 __hash_open(file
, flags
, mode
, info
, dflags
)
98 int flags
, mode
, dflags
;
99 const HASHINFO
*info
; /* Special directives for create */
104 int bpages
, hdrsize
, new_table
, nsegs
, save_errno
;
106 if ((flags
& O_ACCMODE
) == O_WRONLY
) {
111 if (!(hashp
= (HTAB
*)calloc(1, sizeof(HTAB
))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp
->flags
= flags
;
124 if (!file
|| (flags
& O_TRUNC
) ||
125 (stat(file
, &statbuf
) && (errno
== ENOENT
))) {
127 errno
= 0; /* Just in case someone looks at errno */
131 if ((hashp
->fp
= open(file
, flags
, mode
)) == -1)
132 RETURN_ERROR(errno
, error0
);
133 (void)fcntl(hashp
->fp
, F_SETFD
, 1);
136 if (!(hashp
= init_hash(hashp
, file
, (HASHINFO
*)info
)))
137 RETURN_ERROR(errno
, error1
);
139 /* Table already exists */
140 if (info
&& info
->hash
)
141 hashp
->hash
= info
->hash
;
143 hashp
->hash
= __default_hash
;
145 hdrsize
= read(hashp
->fp
, &hashp
->hdr
, sizeof(HASHHDR
));
146 #if BYTE_ORDER == LITTLE_ENDIAN
150 RETURN_ERROR(errno
, error1
);
151 if (hdrsize
!= sizeof(HASHHDR
))
152 RETURN_ERROR(EFTYPE
, error1
);
153 /* Verify file type, versions and hash function */
154 if (hashp
->MAGIC
!= HASHMAGIC
)
155 RETURN_ERROR(EFTYPE
, error1
);
156 #define OLDHASHVERSION 1
157 if (hashp
->VERSION
!= HASHVERSION
&&
158 hashp
->VERSION
!= OLDHASHVERSION
)
159 RETURN_ERROR(EFTYPE
, error1
);
160 if (hashp
->hash(CHARKEY
, sizeof(CHARKEY
))
161 != (u_int32_t
) hashp
->H_CHARKEY
)
162 RETURN_ERROR(EFTYPE
, error1
);
164 * Figure out how many segments we need. Max_Bucket is the
165 * maximum bucket number, so the number of buckets is
168 nsegs
= (hashp
->MAX_BUCKET
+ 1 + hashp
->SGSIZE
- 1) /
171 if (alloc_segs(hashp
, nsegs
))
173 * If alloc_segs fails, table will have been destroyed
174 * and errno will have been set.
177 /* Read in bitmaps */
178 bpages
= (hashp
->SPARES
[hashp
->OVFL_POINT
] +
179 (hashp
->BSIZE
<< BYTE_SHIFT
) - 1) >>
180 (hashp
->BSHIFT
+ BYTE_SHIFT
);
182 hashp
->nmaps
= bpages
;
183 (void)memset(&hashp
->mapp
[0], 0, bpages
* sizeof(u_int32_t
*));
186 /* Initialize Buffer Manager */
187 if (info
&& info
->cachesize
)
188 __buf_init(hashp
, info
->cachesize
);
190 __buf_init(hashp
, DEF_BUFSIZE
);
192 hashp
->new_file
= new_table
;
193 hashp
->save_file
= file
&& (hashp
->flags
& O_ACCMODE
) != O_RDONLY
;
195 if (!(dbp
= (DB
*)malloc(sizeof(DB
)))) {
201 dbp
->internal
= hashp
;
202 dbp
->close
= hash_close
;
203 dbp
->del
= hash_delete
;
208 dbp
->sync
= hash_sync
;
212 (void)fprintf(stderr
,
213 "%s\n%s%x\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
215 "TABLE POINTER ", hashp
,
216 "BUCKET SIZE ", hashp
->BSIZE
,
217 "BUCKET SHIFT ", hashp
->BSHIFT
,
218 "DIRECTORY SIZE ", hashp
->DSIZE
,
219 "SEGMENT SIZE ", hashp
->SGSIZE
,
220 "SEGMENT SHIFT ", hashp
->SSHIFT
,
221 "FILL FACTOR ", hashp
->FFACTOR
,
222 "MAX BUCKET ", hashp
->MAX_BUCKET
,
223 "OVFL POINT ", hashp
->OVFL_POINT
,
224 "LAST FREED ", hashp
->LAST_FREED
,
225 "HIGH MASK ", hashp
->HIGH_MASK
,
226 "LOW MASK ", hashp
->LOW_MASK
,
227 "NSEGS ", hashp
->nsegs
,
228 "NKEYS ", hashp
->NKEYS
);
230 #ifdef HASH_STATISTICS
231 hash_overflows
= hash_accesses
= hash_collisions
= hash_expansions
= 0;
237 (void)close(hashp
->fp
);
255 hashp
= (HTAB
*)dbp
->internal
;
256 retval
= hdestroy(hashp
);
270 hashp
= (HTAB
*)dbp
->internal
;
271 if (hashp
->fp
== -1) {
278 /************************** LOCAL CREATION ROUTINES **********************/
280 init_hash(hashp
, file
, info
)
285 #ifdef _STATBUF_ST_BLKSIZE
292 hashp
->LORDER
= BYTE_ORDER
;
293 hashp
->BSIZE
= DEF_BUCKET_SIZE
;
294 hashp
->BSHIFT
= DEF_BUCKET_SHIFT
;
295 hashp
->SGSIZE
= DEF_SEGSIZE
;
296 hashp
->SSHIFT
= DEF_SEGSIZE_SHIFT
;
297 hashp
->DSIZE
= DEF_DIRSIZE
;
298 hashp
->FFACTOR
= DEF_FFACTOR
;
299 hashp
->hash
= __default_hash
;
300 memset(hashp
->SPARES
, 0, sizeof(hashp
->SPARES
));
301 memset(hashp
->BITMAPS
, 0, sizeof (hashp
->BITMAPS
));
303 /* Fix bucket size to be optimal for file system */
304 #ifdef _STATBUF_ST_BLKSIZE
306 if (stat(file
, &statbuf
))
308 hashp
->BSIZE
= statbuf
.st_blksize
;
309 hashp
->BSHIFT
= __hash_log2(hashp
->BSIZE
);
315 /* Round pagesize up to power of 2 */
316 hashp
->BSHIFT
= __hash_log2(info
->bsize
);
317 hashp
->BSIZE
= 1 << hashp
->BSHIFT
;
318 if (hashp
->BSIZE
> MAX_BSIZE
) {
324 hashp
->FFACTOR
= info
->ffactor
;
326 hashp
->hash
= info
->hash
;
330 if (info
->lorder
!= BIG_ENDIAN
&&
331 info
->lorder
!= LITTLE_ENDIAN
) {
335 hashp
->LORDER
= info
->lorder
;
338 /* init_htab should destroy the table and set errno if it fails */
339 if (init_htab(hashp
, nelem
))
345 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
346 * the table and set errno, so we just pass the error information along.
348 * Returns 0 on No Error
351 init_htab(hashp
, nelem
)
355 register int nbuckets
, nsegs
;
359 * Divide number of elements by the fill factor and determine a
360 * desired number of buckets. Allocate space for the next greater
361 * power of two number of buckets.
363 nelem
= (nelem
- 1) / hashp
->FFACTOR
+ 1;
365 l2
= __hash_log2(MAX(nelem
, 2));
368 hashp
->SPARES
[l2
] = l2
+ 1;
369 hashp
->SPARES
[l2
+ 1] = l2
+ 1;
370 hashp
->OVFL_POINT
= l2
;
371 hashp
->LAST_FREED
= 2;
373 /* First bitmap page is at: splitpoint l2 page offset 1 */
374 if (__ibitmap(hashp
, OADDR_OF(l2
, 1), l2
+ 1, 0))
377 hashp
->MAX_BUCKET
= hashp
->LOW_MASK
= nbuckets
- 1;
378 hashp
->HIGH_MASK
= (nbuckets
<< 1) - 1;
379 hashp
->HDRPAGES
= ((MAX(sizeof(HASHHDR
), MINHDRSIZE
) - 1) >>
382 nsegs
= (nbuckets
- 1) / hashp
->SGSIZE
+ 1;
383 nsegs
= 1 << __hash_log2(nsegs
);
385 if (nsegs
> hashp
->DSIZE
)
386 hashp
->DSIZE
= nsegs
;
387 return (alloc_segs(hashp
, nsegs
));
390 /********************** DESTROY/CLOSE ROUTINES ************************/
393 * Flushes any changes to the file if necessary and destroys the hashp
394 * structure, freeing all allocated space.
404 #ifdef HASH_STATISTICS
405 (void)fprintf(stderr
, "hdestroy: accesses %ld collisions %ld\n",
406 hash_accesses
, hash_collisions
);
407 (void)fprintf(stderr
, "hdestroy: expansions %ld\n",
409 (void)fprintf(stderr
, "hdestroy: overflows %ld\n",
411 (void)fprintf(stderr
, "keys %ld maxp %d segmentcount %d\n",
412 hashp
->NKEYS
, hashp
->MAX_BUCKET
, hashp
->nsegs
);
414 for (i
= 0; i
< NCACHED
; i
++)
415 (void)fprintf(stderr
,
416 "spares[%d] = %d\n", i
, hashp
->SPARES
[i
]);
419 * Call on buffer manager to free buffers, and if required,
420 * write them to disk.
422 if (__buf_free(hashp
, 1, hashp
->save_file
))
425 free(*hashp
->dir
); /* Free initial segments */
426 /* Free extra segments */
427 while (hashp
->exsegs
--)
428 free(hashp
->dir
[--hashp
->nsegs
]);
431 if (flush_meta(hashp
) && !save_errno
)
434 for (i
= 0; i
< hashp
->nmaps
; i
++)
436 free(hashp
->mapp
[i
]);
439 (void)close(hashp
->fp
);
450 * Write modified pages to disk
457 hash_sync(dbp
, flags
)
471 hashp
= (HTAB
*)dbp
->internal
;
472 if (!hashp
->save_file
)
474 if (__buf_free(hashp
, 0, 1) || flush_meta(hashp
))
483 * -1 indicates that errno should be set
490 #if BYTE_ORDER == LITTLE_ENDIAN
495 if (!hashp
->save_file
)
497 hashp
->MAGIC
= HASHMAGIC
;
498 hashp
->VERSION
= HASHVERSION
;
499 hashp
->H_CHARKEY
= hashp
->hash(CHARKEY
, sizeof(CHARKEY
));
503 #if BYTE_ORDER == LITTLE_ENDIAN
505 swap_header_copy(&hashp
->hdr
, whdrp
);
507 if ((lseek(fp
, (off_t
)0, SEEK_SET
) == -1) ||
508 ((wsize
= write(fp
, whdrp
, sizeof(HASHHDR
))) == -1))
511 if (wsize
!= sizeof(HASHHDR
)) {
513 hashp
->errnum
= errno
;
516 for (i
= 0; i
< NCACHED
; i
++)
518 if (__put_page(hashp
, (char *)hashp
->mapp
[i
],
519 hashp
->BITMAPS
[i
], 0, 1))
524 /*******************************SEARCH ROUTINES *****************************/
526 * All the access routines return
530 * 1 to indicate an external ERROR (i.e. key not found, etc)
531 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
534 hash_get(dbp
, key
, data
, flag
)
542 hashp
= (HTAB
*)dbp
->internal
;
544 hashp
->errnum
= errno
= EINVAL
;
547 return (hash_access(hashp
, HASH_GET
, (DBT
*)key
, data
));
551 hash_put(dbp
, key
, data
, flag
)
559 hashp
= (HTAB
*)dbp
->internal
;
560 if (flag
&& flag
!= R_NOOVERWRITE
) {
561 hashp
->errnum
= errno
= EINVAL
;
564 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
565 hashp
->errnum
= errno
= EPERM
;
568 return (hash_access(hashp
, flag
== R_NOOVERWRITE
?
569 HASH_PUTNEW
: HASH_PUT
, (DBT
*)key
, (DBT
*)data
));
573 hash_delete(dbp
, key
, flag
)
576 u_int32_t flag
; /* Ignored */
580 hashp
= (HTAB
*)dbp
->internal
;
581 if (flag
&& flag
!= R_CURSOR
) {
582 hashp
->errnum
= errno
= EINVAL
;
585 if ((hashp
->flags
& O_ACCMODE
) == O_RDONLY
) {
586 hashp
->errnum
= errno
= EPERM
;
589 return (hash_access(hashp
, HASH_DELETE
, (DBT
*)key
, NULL
));
593 * Assume that hashp has been set in wrapper routine.
596 hash_access(hashp
, action
, key
, val
)
601 register BUFHEAD
*rbufp
;
602 BUFHEAD
*bufp
, *save_bufp
;
603 register u_int16_t
*bp
;
604 register int n
, ndx
, off
, size
;
608 #ifdef HASH_STATISTICS
614 kp
= (char *)key
->data
;
615 rbufp
= __get_buf(hashp
, __call_hash(hashp
, kp
, size
), NULL
, 0);
620 /* Pin the bucket chain */
621 rbufp
->flags
|= BUF_PIN
;
622 for (bp
= (u_int16_t
*)rbufp
->page
, n
= *bp
++, ndx
= 1; ndx
< n
;)
623 if (bp
[1] >= REAL_KEY
) {
624 /* Real key/data pair */
625 if (size
== off
- *bp
&&
626 memcmp(kp
, rbufp
->page
+ *bp
, size
) == 0)
629 #ifdef HASH_STATISTICS
634 } else if (bp
[1] == OVFLPAGE
) {
635 rbufp
= __get_buf(hashp
, *bp
, rbufp
, 0);
637 save_bufp
->flags
&= ~BUF_PIN
;
641 bp
= (u_int16_t
*)rbufp
->page
;
645 } else if (bp
[1] < REAL_KEY
) {
647 __find_bigpair(hashp
, rbufp
, ndx
, kp
, size
)) > 0)
652 __find_last_page(hashp
, &bufp
))) {
657 rbufp
= __get_buf(hashp
, pageno
, bufp
, 0);
659 save_bufp
->flags
&= ~BUF_PIN
;
663 bp
= (u_int16_t
*)rbufp
->page
;
668 save_bufp
->flags
&= ~BUF_PIN
;
677 if (__addel(hashp
, rbufp
, key
, val
)) {
678 save_bufp
->flags
&= ~BUF_PIN
;
681 save_bufp
->flags
&= ~BUF_PIN
;
687 save_bufp
->flags
&= ~BUF_PIN
;
694 save_bufp
->flags
&= ~BUF_PIN
;
697 bp
= (u_int16_t
*)rbufp
->page
;
698 if (bp
[ndx
+ 1] < REAL_KEY
) {
699 if (__big_return(hashp
, rbufp
, ndx
, val
, 0))
702 val
->data
= (u_char
*)rbufp
->page
+ (int)bp
[ndx
+ 1];
703 val
->size
= bp
[ndx
] - bp
[ndx
+ 1];
707 if ((__delpair(hashp
, rbufp
, ndx
)) ||
708 (__addel(hashp
, rbufp
, key
, val
))) {
709 save_bufp
->flags
&= ~BUF_PIN
;
714 if (__delpair(hashp
, rbufp
, ndx
))
720 save_bufp
->flags
&= ~BUF_PIN
;
725 hash_seq(dbp
, key
, data
, flag
)
730 register u_int32_t bucket
;
731 register BUFHEAD
*bufp
;
735 hashp
= (HTAB
*)dbp
->internal
;
736 if (flag
&& flag
!= R_FIRST
&& flag
!= R_NEXT
) {
737 hashp
->errnum
= errno
= EINVAL
;
740 #ifdef HASH_STATISTICS
743 if ((hashp
->cbucket
< 0) || (flag
== R_FIRST
)) {
749 for (bp
= NULL
; !bp
|| !bp
[0]; ) {
750 if (!(bufp
= hashp
->cpage
)) {
751 for (bucket
= hashp
->cbucket
;
752 bucket
<= (u_int32_t
) hashp
->MAX_BUCKET
;
753 bucket
++, hashp
->cndx
= 1) {
754 bufp
= __get_buf(hashp
, bucket
, NULL
, 0);
758 bp
= (u_int16_t
*)bufp
->page
;
762 hashp
->cbucket
= bucket
;
763 if (hashp
->cbucket
> hashp
->MAX_BUCKET
) {
768 bp
= (u_int16_t
*)hashp
->cpage
->page
;
774 while (bp
[hashp
->cndx
+ 1] == OVFLPAGE
) {
775 bufp
= hashp
->cpage
=
776 __get_buf(hashp
, bp
[hashp
->cndx
], bufp
, 0);
779 bp
= (u_int16_t
*)(bufp
->page
);
788 if (bp
[ndx
+ 1] < REAL_KEY
) {
789 if (__big_keydata(hashp
, bufp
, key
, data
, 1))
792 key
->data
= (u_char
*)hashp
->cpage
->page
+ bp
[ndx
];
793 key
->size
= (ndx
> 1 ? bp
[ndx
- 1] : hashp
->BSIZE
) - bp
[ndx
];
794 data
->data
= (u_char
*)hashp
->cpage
->page
+ bp
[ndx
+ 1];
795 data
->size
= bp
[ndx
] - bp
[ndx
+ 1];
807 /********************************* UTILITIES ************************/
815 __expand_table(hashp
)
818 u_int32_t old_bucket
, new_bucket
;
819 int dirsize
, new_segnum
, spare_ndx
;
821 #ifdef HASH_STATISTICS
824 new_bucket
= ++hashp
->MAX_BUCKET
;
825 old_bucket
= (hashp
->MAX_BUCKET
& hashp
->LOW_MASK
);
827 new_segnum
= new_bucket
>> hashp
->SSHIFT
;
829 /* Check if we need a new segment */
830 if (new_segnum
>= hashp
->nsegs
) {
831 /* Check if we need to expand directory */
832 if (new_segnum
>= hashp
->DSIZE
) {
833 /* Reallocate directory */
834 dirsize
= hashp
->DSIZE
* sizeof(SEGMENT
*);
835 if (!hash_realloc(&hashp
->dir
, dirsize
, dirsize
<< 1))
837 hashp
->DSIZE
= dirsize
<< 1;
839 if ((hashp
->dir
[new_segnum
] =
840 (SEGMENT
)calloc(hashp
->SGSIZE
, sizeof(SEGMENT
))) == NULL
)
846 * If the split point is increasing (MAX_BUCKET's log base 2
847 * * increases), we need to copy the current contents of the spare
848 * split bucket to the next bucket.
850 spare_ndx
= __hash_log2(hashp
->MAX_BUCKET
+ 1);
851 if (spare_ndx
> hashp
->OVFL_POINT
) {
852 hashp
->SPARES
[spare_ndx
] = hashp
->SPARES
[hashp
->OVFL_POINT
];
853 hashp
->OVFL_POINT
= spare_ndx
;
856 if (new_bucket
> (u_int32_t
) hashp
->HIGH_MASK
) {
857 /* Starting a new doubling */
858 hashp
->LOW_MASK
= hashp
->HIGH_MASK
;
859 hashp
->HIGH_MASK
= new_bucket
| hashp
->LOW_MASK
;
861 /* Relocate records to the new bucket */
862 return (__split_page(hashp
, old_bucket
, new_bucket
));
866 * If realloc guarantees that the pointer is not destroyed if the realloc
867 * fails, then this routine can go away.
870 hash_realloc(p_ptr
, oldsize
, newsize
)
872 int oldsize
, newsize
;
876 if (p
= malloc(newsize
)) {
877 memmove(p
, *p_ptr
, oldsize
);
878 memset((char *)p
+ oldsize
, 0, newsize
- oldsize
);
886 __call_hash(hashp
, k
, len
)
893 n
= hashp
->hash(k
, len
);
894 bucket
= n
& hashp
->HIGH_MASK
;
895 if (bucket
> hashp
->MAX_BUCKET
)
896 bucket
= bucket
& hashp
->LOW_MASK
;
901 * Allocate segment table. On error, destroy the table and set errno.
903 * Returns 0 on success
906 alloc_segs(hashp
, nsegs
)
911 register SEGMENT store
;
916 (SEGMENT
*)calloc(hashp
->DSIZE
, sizeof(SEGMENT
*))) == NULL
) {
918 (void)hdestroy(hashp
);
922 /* Allocate segments */
924 (SEGMENT
)calloc(nsegs
<< hashp
->SSHIFT
, sizeof(SEGMENT
))) == NULL
) {
926 (void)hdestroy(hashp
);
930 for (i
= 0; i
< nsegs
; i
++, hashp
->nsegs
++)
931 hashp
->dir
[i
] = &store
[i
<< hashp
->SSHIFT
];
935 #if BYTE_ORDER == LITTLE_ENDIAN
937 * Hashp->hdr needs to be byteswapped.
940 swap_header_copy(srcp
, destp
)
941 HASHHDR
*srcp
, *destp
;
945 P_32_COPY(srcp
->magic
, destp
->magic
);
946 P_32_COPY(srcp
->version
, destp
->version
);
947 P_32_COPY(srcp
->lorder
, destp
->lorder
);
948 P_32_COPY(srcp
->bsize
, destp
->bsize
);
949 P_32_COPY(srcp
->bshift
, destp
->bshift
);
950 P_32_COPY(srcp
->dsize
, destp
->dsize
);
951 P_32_COPY(srcp
->ssize
, destp
->ssize
);
952 P_32_COPY(srcp
->sshift
, destp
->sshift
);
953 P_32_COPY(srcp
->ovfl_point
, destp
->ovfl_point
);
954 P_32_COPY(srcp
->last_freed
, destp
->last_freed
);
955 P_32_COPY(srcp
->max_bucket
, destp
->max_bucket
);
956 P_32_COPY(srcp
->high_mask
, destp
->high_mask
);
957 P_32_COPY(srcp
->low_mask
, destp
->low_mask
);
958 P_32_COPY(srcp
->ffactor
, destp
->ffactor
);
959 P_32_COPY(srcp
->nkeys
, destp
->nkeys
);
960 P_32_COPY(srcp
->hdrpages
, destp
->hdrpages
);
961 P_32_COPY(srcp
->h_charkey
, destp
->h_charkey
);
962 for (i
= 0; i
< NCACHED
; i
++) {
963 P_32_COPY(srcp
->spares
[i
], destp
->spares
[i
]);
964 P_16_COPY(srcp
->bitmaps
[i
], destp
->bitmaps
[i
]);
977 M_32_SWAP(hdrp
->magic
);
978 M_32_SWAP(hdrp
->version
);
979 M_32_SWAP(hdrp
->lorder
);
980 M_32_SWAP(hdrp
->bsize
);
981 M_32_SWAP(hdrp
->bshift
);
982 M_32_SWAP(hdrp
->dsize
);
983 M_32_SWAP(hdrp
->ssize
);
984 M_32_SWAP(hdrp
->sshift
);
985 M_32_SWAP(hdrp
->ovfl_point
);
986 M_32_SWAP(hdrp
->last_freed
);
987 M_32_SWAP(hdrp
->max_bucket
);
988 M_32_SWAP(hdrp
->high_mask
);
989 M_32_SWAP(hdrp
->low_mask
);
990 M_32_SWAP(hdrp
->ffactor
);
991 M_32_SWAP(hdrp
->nkeys
);
992 M_32_SWAP(hdrp
->hdrpages
);
993 M_32_SWAP(hdrp
->h_charkey
);
994 for (i
= 0; i
< NCACHED
; i
++) {
995 M_32_SWAP(hdrp
->spares
[i
]);
996 M_16_SWAP(hdrp
->bitmaps
[i
]);