clean up the hash entry code a bit. Got rid of lp_shmem_hash_size()
[Samba.git] / source / locking / shmem_sysv.c
blobe3f40418d97e4b014299e06bbe388fb66e1a1bf3
1 /*
2 Unix SMB/Netbios implementation.
3 Version 1.9.
4 Shared memory functions - SYSV IPC implementation
5 Copyright (C) Erik Devriendt 1996-1997
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "includes.h"
26 #ifdef USE_SYSV_IPC
28 extern int DEBUGLEVEL;
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
36 #ifdef SHM_R
37 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
38 #else
39 #define IPC_PERMS 0644
40 #endif
42 #ifdef SEMMSL
43 #define SHMEM_HASH_SIZE (SEMMSL-1)
44 #else
45 #define SHMEM_HASH_SIZE 63
46 #endif
48 #define MIN_SHM_SIZE 10240
50 static int shm_id;
51 static int sem_id;
52 static int shm_size;
53 static int hash_size;
54 static int global_lock_count;
56 struct ShmHeader {
57 int shm_magic;
58 int shm_version;
59 int total_size; /* in bytes */
60 BOOL consistent;
61 int first_free_off;
62 int userdef_off; /* a userdefined offset. can be used to store
63 root of tree or list */
64 struct { /* a cell is a range of bytes of sizeof(struct
65 ShmBlockDesc) size */
66 int cells_free;
67 int cells_used;
68 int cells_system; /* number of cells used as allocated
69 block descriptors */
70 } statistics;
73 #define SHM_NOT_FREE_OFF (-1)
74 struct ShmBlockDesc
76 int next; /* offset of next block in the free list or
77 SHM_NOT_FREE_OFF when block in use */
78 int size; /* user size in BlockDescSize units */
81 #define EOList_Addr (struct ShmBlockDesc *)( 0 )
82 #define EOList_Off (NULL_OFFSET)
84 #define CellSize sizeof(struct ShmBlockDesc)
86 /* HeaderSize aligned on 8 byte boundary */
87 #define AlignedHeaderSize ((sizeof(struct ShmHeader)+7) & ~7)
89 static struct ShmHeader *shm_header_p = (struct ShmHeader *)0;
91 static BOOL shm_initialize_called = False;
93 static int read_only;
95 static BOOL sem_lock(int i)
97 struct sembuf sb;
98 if (read_only) return True;
100 sb.sem_num = i;
101 sb.sem_op = -1;
102 sb.sem_flg = SEM_UNDO;
104 if (semop(sem_id, &sb, 1) != 0) {
105 DEBUG(0,("ERROR: IPC lock failed on semaphore %d\n", i));
106 return False;
109 return True;
112 static BOOL sem_unlock(int i)
114 struct sembuf sb;
115 if (read_only) return True;
117 sb.sem_num = i;
118 sb.sem_op = 1;
119 sb.sem_flg = SEM_UNDO;
121 if (semop(sem_id, &sb, 1) != 0) {
122 DEBUG(0,("ERROR: IPC unlock failed on semaphore %d\n", i));
123 return False;
126 return True;
129 static BOOL global_lock(void)
131 global_lock_count++;
132 if (global_lock_count == 1)
133 return sem_lock(0);
134 return True;
137 static BOOL global_unlock(void)
139 global_lock_count--;
140 if (global_lock_count == 0)
141 return sem_unlock(0);
142 return True;
145 static void *shm_offset2addr(int offset)
147 if (offset == NULL_OFFSET )
148 return (void *)(0);
150 if (!shm_header_p)
151 return (void *)(0);
153 return (void *)((char *)shm_header_p + offset );
156 static int shm_addr2offset(void *addr)
158 if (!addr)
159 return NULL_OFFSET;
161 if (!shm_header_p)
162 return NULL_OFFSET;
164 return (int)((char *)addr - (char *)shm_header_p);
168 static int shm_alloc(int size)
170 unsigned num_cells ;
171 struct ShmBlockDesc *scanner_p;
172 struct ShmBlockDesc *prev_p;
173 struct ShmBlockDesc *new_p;
174 int result_offset;
177 if (!shm_header_p) {
178 /* not mapped yet */
179 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
180 return NULL_OFFSET;
183 global_lock();
185 if (!shm_header_p->consistent) {
186 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
187 global_unlock();
188 return NULL_OFFSET;
191 /* calculate the number of cells */
192 num_cells = (size + CellSize -1) / CellSize;
194 /* set start of scan */
195 prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
196 scanner_p = prev_p ;
198 /* scan the free list to find a matching free space */
199 while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
200 prev_p = scanner_p;
201 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
204 /* at this point scanner point to a block header or to the end of
205 the list */
206 if (scanner_p == EOList_Addr) {
207 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed, no free space found\n",size));
208 global_unlock();
209 return (NULL_OFFSET);
212 /* going to modify shared mem */
213 shm_header_p->consistent = False;
215 /* if we found a good one : scanner == the good one */
216 if (scanner_p->size <= num_cells + 2) {
217 /* there is no use in making a new one, it will be too small anyway
218 * we will link out scanner
220 if ( prev_p == scanner_p ) {
221 shm_header_p->first_free_off = scanner_p->next ;
222 } else {
223 prev_p->next = scanner_p->next ;
225 shm_header_p->statistics.cells_free -= scanner_p->size;
226 shm_header_p->statistics.cells_used += scanner_p->size;
227 } else {
228 /* Make a new one */
229 new_p = scanner_p + 1 + num_cells;
230 new_p->size = scanner_p->size - num_cells - 1;
231 new_p->next = scanner_p->next;
232 scanner_p->size = num_cells;
233 scanner_p->next = shm_addr2offset(new_p);
235 if (prev_p != scanner_p) {
236 prev_p->next = shm_addr2offset(new_p) ;
237 } else {
238 shm_header_p->first_free_off = shm_addr2offset(new_p);
240 shm_header_p->statistics.cells_free -= num_cells+1;
241 shm_header_p->statistics.cells_used += num_cells;
242 shm_header_p->statistics.cells_system += 1;
245 result_offset = shm_addr2offset( &(scanner_p[1]) );
246 scanner_p->next = SHM_NOT_FREE_OFF ;
248 /* end modification of shared mem */
249 shm_header_p->consistent = True;
251 DEBUG(6,("shm_alloc : request for %d bytes, allocated %d bytes at offset %d\n",size,scanner_p->size*CellSize,result_offset ));
253 global_unlock();
254 return result_offset;
260 * Function to create the hash table for the share mode entries. Called
261 * when smb shared memory is global locked.
263 static BOOL shm_create_hash_table( unsigned int size )
265 size *= sizeof(int);
267 global_lock();
268 shm_header_p->userdef_off = shm_alloc( size );
270 if(shm_header_p->userdef_off == NULL_OFFSET) {
271 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",size));
272 global_unlock();
273 return False;
276 /* Clear hash buckets. */
277 memset( shm_offset2addr(shm_header_p->userdef_off), '\0', size);
278 global_unlock();
279 return True;
282 static BOOL shm_validate_header(int size)
284 if( !shm_header_p ) {
285 /* not mapped yet */
286 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
287 return False;
290 if(shm_header_p->shm_magic != SHM_MAGIC) {
291 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
292 return False;
295 if(shm_header_p->shm_version != SHM_VERSION) {
296 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",shm_header_p->shm_version));
297 return False;
300 if(shm_header_p->total_size != size) {
301 DEBUG(0,("ERROR shm_validate_header : shmem size mismatch (old = %d, new = %d)\n",shm_header_p->total_size,size));
302 return False;
305 if(!shm_header_p->consistent) {
306 DEBUG(0,("ERROR shm_validate_header : shmem not consistent\n"));
307 return False;
309 return True;
312 static BOOL shm_initialize(int size)
314 struct ShmBlockDesc * first_free_block_p;
316 DEBUG(5,("shm_initialize : initializing shmem file of size %d\n",size));
318 if( !shm_header_p ) {
319 /* not mapped yet */
320 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
321 return False;
324 shm_header_p->shm_magic = SHM_MAGIC;
325 shm_header_p->shm_version = SHM_VERSION;
326 shm_header_p->total_size = size;
327 shm_header_p->first_free_off = AlignedHeaderSize;
328 shm_header_p->userdef_off = NULL_OFFSET;
330 first_free_block_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
331 first_free_block_p->next = EOList_Off;
332 first_free_block_p->size = ( size - AlignedHeaderSize - CellSize ) / CellSize ;
334 shm_header_p->statistics.cells_free = first_free_block_p->size;
335 shm_header_p->statistics.cells_used = 0;
336 shm_header_p->statistics.cells_system = 1;
338 shm_header_p->consistent = True;
340 shm_initialize_called = True;
342 return True;
345 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
347 struct ShmBlockDesc *next_p;
349 /* Check if head_p and head_p->next are neighbors and if so
350 join them */
351 if ( head_p == EOList_Addr ) return ;
352 if ( head_p->next == EOList_Off ) return ;
354 next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
355 if ( ( head_p + head_p->size + 1 ) == next_p) {
356 head_p->size += next_p->size +1 ; /* adapt size */
357 head_p->next = next_p->next ; /* link out */
359 shm_header_p->statistics.cells_free += 1;
360 shm_header_p->statistics.cells_system -= 1;
367 static BOOL shm_close( void )
369 return True;
373 static BOOL shm_free(int offset)
375 struct ShmBlockDesc *header_p; /* pointer to header of
376 block to free */
377 struct ShmBlockDesc *scanner_p; /* used to scan the list */
378 struct ShmBlockDesc *prev_p; /* holds previous in the
379 list */
381 if (!shm_header_p) {
382 /* not mapped yet */
383 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
384 return False;
387 global_lock();
389 if (!shm_header_p->consistent) {
390 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
391 global_unlock();
392 return False;
395 /* make pointer to header of block */
396 header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1);
398 if (header_p->next != SHM_NOT_FREE_OFF) {
399 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
400 global_unlock();
401 return False;
404 /* find a place in the free_list to put the header in */
406 /* set scanner and previous pointer to start of list */
407 prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
408 scanner_p = prev_p ;
410 while ((scanner_p != EOList_Addr) &&
411 (scanner_p < header_p)) {
412 /* while we didn't scan past its position */
413 prev_p = scanner_p ;
414 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
417 shm_header_p->consistent = False;
419 DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
420 header_p->size*CellSize,offset));
422 if (scanner_p == prev_p) {
423 shm_header_p->statistics.cells_free += header_p->size;
424 shm_header_p->statistics.cells_used -= header_p->size;
426 /* we must free it at the beginning of the list */
427 shm_header_p->first_free_off = shm_addr2offset(header_p); /* set the free_list_pointer to this block_header */
429 /* scanner is the one that was first in the list */
430 header_p->next = shm_addr2offset(scanner_p);
431 shm_solve_neighbors( header_p ); /* if neighbors then link them */
433 shm_header_p->consistent = True;
434 } else {
435 shm_header_p->statistics.cells_free += header_p->size;
436 shm_header_p->statistics.cells_used -= header_p->size;
438 prev_p->next = shm_addr2offset(header_p);
439 header_p->next = shm_addr2offset(scanner_p);
440 shm_solve_neighbors(header_p) ;
441 shm_solve_neighbors(prev_p) ;
443 shm_header_p->consistent = True;
446 global_unlock();
447 return True;
451 static int shm_get_userdef_off(void)
453 if (!shm_header_p)
454 return NULL_OFFSET;
455 else
456 return shm_header_p->userdef_off;
459 /*******************************************************************
460 Lock a particular hash bucket entry.
461 ******************************************************************/
462 static BOOL shm_lock_hash_entry(unsigned int entry)
464 DEBUG(0,("hash lock %d\n", entry));
465 return sem_lock(entry+1);
468 /*******************************************************************
469 Unlock a particular hash bucket entry.
470 ******************************************************************/
471 static BOOL shm_unlock_hash_entry(unsigned int entry)
473 DEBUG(0,("hash unlock %d\n", entry));
474 return sem_unlock(entry+1);
478 /*******************************************************************
479 Gather statistics on shared memory usage.
480 ******************************************************************/
481 static BOOL shm_get_usage(int *bytes_free,
482 int *bytes_used,
483 int *bytes_overhead)
485 if(!shm_header_p) {
486 /* not mapped yet */
487 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
488 return False;
491 *bytes_free = shm_header_p->statistics.cells_free * CellSize;
492 *bytes_used = shm_header_p->statistics.cells_used * CellSize;
493 *bytes_overhead = shm_header_p->statistics.cells_system * CellSize + AlignedHeaderSize;
495 return True;
499 /*******************************************************************
500 hash a number into a hash_entry
501 ******************************************************************/
502 static unsigned shm_hash_size(void)
504 return hash_size;
508 static struct shmem_ops shmops = {
509 shm_close,
510 shm_alloc,
511 shm_free,
512 shm_get_userdef_off,
513 shm_offset2addr,
514 shm_addr2offset,
515 shm_lock_hash_entry,
516 shm_unlock_hash_entry,
517 shm_get_usage,
518 shm_hash_size,
521 /*******************************************************************
522 open the shared memory
523 ******************************************************************/
524 struct shmem_ops *sysv_shm_open(int ronly)
526 BOOL created_new = False;
527 BOOL other_processes;
528 struct shmid_ds shm_ds;
529 struct semid_ds sem_ds;
530 union semun su;
531 int i;
533 read_only = ronly;
535 shm_size = lp_shmem_size();
537 DEBUG(4,("Trying sysv shmem open of size %d\n", shm_size));
539 /* first the semaphore */
540 sem_id = semget(SEMAPHORE_KEY, 0, 0);
541 if (sem_id == -1) {
542 if (read_only) return NULL;
544 hash_size = SHMEM_HASH_SIZE;
546 while (hash_size > 1) {
547 sem_id = semget(SEMAPHORE_KEY, hash_size+1,
548 IPC_CREAT | IPC_EXCL | IPC_PERMS);
549 if (sem_id != -1 || errno != EINVAL) break;
550 hash_size--;
553 if (sem_id == -1) {
554 DEBUG(0,("Can't create or use semaphore %s\n",
555 strerror(errno)));
558 if (sem_id != -1) {
559 su.val = 1;
560 for (i=0;i<hash_size+1;i++) {
561 if (semctl(sem_id, i, SETVAL, su) != 0) {
562 DEBUG(1,("Failed to init semaphore %d\n", i));
567 if (shm_id == -1) {
568 sem_id = semget(SEMAPHORE_KEY, 0, 0);
570 if (sem_id == -1) {
571 DEBUG(0,("Can't create or use semaphore %s\n",
572 strerror(errno)));
573 return NULL;
576 su.buf = &sem_ds;
577 if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
578 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
580 hash_size = sem_ds.sem_nsems-1;
582 if (!read_only) {
583 if (sem_ds.sem_perm.cuid != 0 || sem_ds.sem_perm.cgid != 0) {
584 DEBUG(0,("ERROR: root did not create the semaphore\n"));
585 return NULL;
591 if (!global_lock())
592 return NULL;
594 /* try to use an existing key */
595 shm_id = shmget(SHMEM_KEY, shm_size, 0);
597 /* if that failed then create one */
598 if (shm_id == -1) {
599 if (read_only) return NULL;
600 while (shm_size > MIN_SHM_SIZE) {
601 shm_id = shmget(SHMEM_KEY, shm_size,
602 IPC_CREAT | IPC_EXCL | IPC_PERMS);
603 if (shm_id != -1 || errno != EINVAL) break;
604 shm_size *= 0.9;
606 created_new = (shm_id != -1);
609 if (shm_id == -1) {
610 DEBUG(0,("Can't create or use IPC area\n"));
611 global_unlock();
612 return NULL;
616 shm_header_p = (struct ShmHeader *)shmat(shm_id, 0,
617 read_only?SHM_RDONLY:0);
618 if ((int)shm_header_p == -1) {
619 DEBUG(0,("Can't attach to IPC area\n"));
620 global_unlock();
621 return NULL;
624 /* to find out if some other process is already mapping the file,
625 we use a registration file containing the processids of the file
626 mapping processes */
627 if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
628 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
631 if (!read_only) {
632 if (shm_ds.shm_perm.cuid != 0 || shm_ds.shm_perm.cgid != 0) {
633 DEBUG(0,("ERROR: root did not create the shmem\n"));
634 global_unlock();
635 return NULL;
639 shm_size = shm_ds.shm_segsz;
641 other_processes = (shm_ds.shm_nattch > 1);
643 if (!read_only && !other_processes) {
644 memset((char *)shm_header_p, 0, shm_size);
645 shm_initialize(shm_size);
646 shm_create_hash_table(hash_size);
647 DEBUG(3,("Initialised IPC area of size %d\n", shm_size));
648 } else if (!shm_validate_header(shm_size)) {
649 /* existing file is corrupt, samba admin should remove
650 it by hand */
651 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
652 global_unlock();
653 return NULL;
656 global_unlock();
657 return &shmops;
662 #else
663 int ipc_dummy_procedure(void)
664 {return 0;}
665 #endif