don't use SEM_UNDO because of ridiculously small undo limits on some
[Samba.git] / source / locking / shmem_sysv.c
blob826de5694cbf1cc0aad729b8e893b46eddd7a14d
1 /*
2 Unix SMB/Netbios implementation.
3 Version 1.9.
4 Shared memory functions - SYSV IPC implementation
5 Copyright (C) Andrew Tridgell 1997
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "includes.h"
26 #ifdef USE_SYSV_IPC
28 extern int DEBUGLEVEL;
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
36 #ifdef SHM_R
37 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
38 #else
39 #define IPC_PERMS 0644
40 #endif
42 #ifdef SECURE_SEMAPHORES
43 /* secure semaphores are slow because we have to do a become_root()
44 on every call! */
45 #define SEMAPHORE_PERMS IPC_PERMS
46 #else
47 #define SEMAPHORE_PERMS 0666
48 #endif
50 #ifdef SEMMSL
51 #define SHMEM_HASH_SIZE (SEMMSL-1)
52 #else
53 #define SHMEM_HASH_SIZE 63
54 #endif
56 #define MIN_SHM_SIZE 0x1000
58 static int shm_id;
59 static int sem_id;
60 static int shm_size;
61 static int hash_size;
62 static int global_lock_count;
64 struct ShmHeader {
65 int shm_magic;
66 int shm_version;
67 int total_size; /* in bytes */
68 BOOL consistent;
69 int first_free_off;
70 int userdef_off; /* a userdefined offset. can be used to store
71 root of tree or list */
72 struct { /* a cell is a range of bytes of sizeof(struct
73 ShmBlockDesc) size */
74 int cells_free;
75 int cells_used;
76 int cells_system; /* number of cells used as allocated
77 block descriptors */
78 } statistics;
81 #define SHM_NOT_FREE_OFF (-1)
82 struct ShmBlockDesc
84 int next; /* offset of next block in the free list or
85 SHM_NOT_FREE_OFF when block in use */
86 int size; /* user size in BlockDescSize units */
89 #define EOList_Addr NULL
90 #define EOList_Off (0)
92 #define CellSize sizeof(struct ShmBlockDesc)
94 /* HeaderSize aligned on a 8 byte boundary */
95 #define AlignedHeaderSize ((sizeof(struct ShmHeader)+7) & ~7)
97 static struct ShmHeader *shm_header_p = NULL;
98 static BOOL shm_initialize_called = False;
100 static int read_only;
102 static BOOL sem_change(int i, int op)
104 #ifdef SECURE_SEMAPHORES
105 extern struct current_user current_user;
106 int became_root=0;
107 #endif
108 struct sembuf sb;
109 int ret;
111 if (read_only) return True;
113 #ifdef SECURE_SEMAPHORES
114 if (current_user.uid != 0) {
115 become_root(0);
116 became_root = 1;
118 #endif
120 sb.sem_num = i;
121 sb.sem_op = op;
122 sb.sem_flg = 0;
124 ret = semop(sem_id, &sb, 1);
126 if (ret != 0) {
127 DEBUG(0,("ERROR: sem_change(%d,%d) failed (%s)\n",
128 i, op, strerror(errno)));
131 #ifdef SECURE_SEMAPHORES
132 if (became_root) {
133 unbecome_root(0);
135 #endif
137 return ret == 0;
140 static BOOL global_lock(void)
142 global_lock_count++;
143 if (global_lock_count == 1)
144 return sem_change(0, -1);
145 return True;
148 static BOOL global_unlock(void)
150 global_lock_count--;
151 if (global_lock_count == 0)
152 return sem_change(0, 1);
153 return True;
156 static void *shm_offset2addr(int offset)
158 if (offset == 0 )
159 return (void *)(0);
161 if (!shm_header_p)
162 return (void *)(0);
164 return (void *)((char *)shm_header_p + offset);
167 static int shm_addr2offset(void *addr)
169 if (!addr)
170 return 0;
172 if (!shm_header_p)
173 return 0;
175 return (int)((char *)addr - (char *)shm_header_p);
179 static int shm_alloc(int size)
181 unsigned num_cells ;
182 struct ShmBlockDesc *scanner_p;
183 struct ShmBlockDesc *prev_p;
184 struct ShmBlockDesc *new_p;
185 int result_offset;
188 if (!shm_header_p) {
189 /* not mapped yet */
190 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
191 return 0;
194 global_lock();
196 if (!shm_header_p->consistent) {
197 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
198 global_unlock();
199 return 0;
202 /* calculate the number of cells */
203 num_cells = (size + (CellSize-1)) / CellSize;
205 /* set start of scan */
206 prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
207 scanner_p = prev_p ;
209 /* scan the free list to find a matching free space */
210 while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
211 prev_p = scanner_p;
212 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
215 /* at this point scanner point to a block header or to the end of
216 the list */
217 if (scanner_p == EOList_Addr) {
218 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed\n",size));
219 global_unlock();
220 return (0);
223 /* going to modify shared mem */
224 shm_header_p->consistent = False;
226 /* if we found a good one : scanner == the good one */
227 if (scanner_p->size > num_cells + 2) {
228 /* Make a new one */
229 new_p = scanner_p + 1 + num_cells;
230 new_p->size = scanner_p->size - (num_cells + 1);
231 new_p->next = scanner_p->next;
232 scanner_p->size = num_cells;
233 scanner_p->next = shm_addr2offset(new_p);
235 shm_header_p->statistics.cells_free -= 1;
236 shm_header_p->statistics.cells_system += 1;
239 /* take it from the free list */
240 if (prev_p == scanner_p) {
241 shm_header_p->first_free_off = scanner_p->next;
242 } else {
243 prev_p->next = scanner_p->next;
245 shm_header_p->statistics.cells_free -= scanner_p->size;
246 shm_header_p->statistics.cells_used += scanner_p->size;
248 result_offset = shm_addr2offset(&(scanner_p[1]));
249 scanner_p->next = SHM_NOT_FREE_OFF;
251 /* end modification of shared mem */
252 shm_header_p->consistent = True;
254 global_unlock();
256 DEBUG(6,("shm_alloc : allocated %d bytes at offset %d\n",
257 size,result_offset));
259 return result_offset;
262 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
264 struct ShmBlockDesc *next_p;
266 /* Check if head_p and head_p->next are neighbors and if so
267 join them */
268 if ( head_p == EOList_Addr ) return ;
269 if ( head_p->next == EOList_Off ) return ;
271 next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
272 if ((head_p + head_p->size + 1) == next_p) {
273 head_p->size += next_p->size + 1; /* adapt size */
274 head_p->next = next_p->next; /* link out */
276 shm_header_p->statistics.cells_free += 1;
277 shm_header_p->statistics.cells_system -= 1;
282 static BOOL shm_free(int offset)
284 struct ShmBlockDesc *header_p; /* pointer to header of
285 block to free */
286 struct ShmBlockDesc *scanner_p; /* used to scan the list */
287 struct ShmBlockDesc *prev_p; /* holds previous in the
288 list */
290 if (!shm_header_p) {
291 /* not mapped yet */
292 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
293 return False;
296 global_lock();
298 if (!shm_header_p->consistent) {
299 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
300 global_unlock();
301 return False;
304 /* make pointer to header of block */
305 header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1);
307 if (header_p->next != SHM_NOT_FREE_OFF) {
308 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
309 global_unlock();
310 return False;
313 /* find a place in the free_list to put the header in */
315 /* set scanner and previous pointer to start of list */
316 prev_p = (struct ShmBlockDesc *)
317 shm_offset2addr(shm_header_p->first_free_off);
318 scanner_p = prev_p ;
320 while ((scanner_p != EOList_Addr) &&
321 (scanner_p < header_p)) {
322 /* while we didn't scan past its position */
323 prev_p = scanner_p ;
324 scanner_p = (struct ShmBlockDesc *)
325 shm_offset2addr(scanner_p->next);
328 shm_header_p->consistent = False;
330 DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
331 header_p->size*CellSize,offset));
333 if (scanner_p == prev_p) {
334 shm_header_p->statistics.cells_free += header_p->size;
335 shm_header_p->statistics.cells_used -= header_p->size;
337 /* we must free it at the beginning of the list */
338 shm_header_p->first_free_off = shm_addr2offset(header_p);
339 /* set the free_list_pointer to this block_header */
341 /* scanner is the one that was first in the list */
342 header_p->next = shm_addr2offset(scanner_p);
343 shm_solve_neighbors(header_p);
345 shm_header_p->consistent = True;
346 } else {
347 shm_header_p->statistics.cells_free += header_p->size;
348 shm_header_p->statistics.cells_used -= header_p->size;
350 prev_p->next = shm_addr2offset(header_p);
351 header_p->next = shm_addr2offset(scanner_p);
352 shm_solve_neighbors(header_p) ;
353 shm_solve_neighbors(prev_p) ;
355 shm_header_p->consistent = True;
358 global_unlock();
359 return True;
364 * Function to create the hash table for the share mode entries. Called
365 * when smb shared memory is global locked.
367 static BOOL shm_create_hash_table(unsigned int hash_entries)
369 int size = hash_entries * sizeof(int);
371 global_lock();
372 shm_header_p->userdef_off = shm_alloc(size);
374 if(shm_header_p->userdef_off == 0) {
375 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",
376 size));
377 global_unlock();
378 return False;
381 /* Clear hash buckets. */
382 memset(shm_offset2addr(shm_header_p->userdef_off), '\0', size);
383 global_unlock();
384 return True;
388 static BOOL shm_validate_header(int size)
390 if(!shm_header_p) {
391 /* not mapped yet */
392 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
393 return False;
396 if(shm_header_p->shm_magic != SHM_MAGIC) {
397 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
398 return False;
401 if(shm_header_p->shm_version != SHM_VERSION) {
402 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",
403 shm_header_p->shm_version));
404 return False;
407 if(shm_header_p->total_size != size) {
408 DEBUG(0,("ERROR shmem size mismatch (old = %d, new = %d)\n",
409 shm_header_p->total_size,size));
410 return False;
413 if(!shm_header_p->consistent) {
414 DEBUG(0,("ERROR shmem not consistent\n"));
415 return False;
417 return True;
421 static BOOL shm_initialize(int size)
423 struct ShmBlockDesc * first_free_block_p;
425 DEBUG(5,("shm_initialize : initializing shmem size %d\n",size));
427 if( !shm_header_p ) {
428 /* not mapped yet */
429 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
430 return False;
433 shm_header_p->shm_magic = SHM_MAGIC;
434 shm_header_p->shm_version = SHM_VERSION;
435 shm_header_p->total_size = size;
436 shm_header_p->first_free_off = AlignedHeaderSize;
437 shm_header_p->userdef_off = 0;
439 first_free_block_p = (struct ShmBlockDesc *)
440 shm_offset2addr(shm_header_p->first_free_off);
441 first_free_block_p->next = EOList_Off;
442 first_free_block_p->size =
443 (size - (AlignedHeaderSize+CellSize))/CellSize;
444 shm_header_p->statistics.cells_free = first_free_block_p->size;
445 shm_header_p->statistics.cells_used = 0;
446 shm_header_p->statistics.cells_system = 1;
448 shm_header_p->consistent = True;
450 shm_initialize_called = True;
452 return True;
455 static BOOL shm_close( void )
457 return True;
461 static int shm_get_userdef_off(void)
463 if (!shm_header_p)
464 return 0;
465 else
466 return shm_header_p->userdef_off;
470 /*******************************************************************
471 Lock a particular hash bucket entry.
472 ******************************************************************/
473 static BOOL shm_lock_hash_entry(unsigned int entry)
475 return sem_change(entry+1, -1);
478 /*******************************************************************
479 Unlock a particular hash bucket entry.
480 ******************************************************************/
481 static BOOL shm_unlock_hash_entry(unsigned int entry)
483 return sem_change(entry+1, 1);
487 /*******************************************************************
488 Gather statistics on shared memory usage.
489 ******************************************************************/
490 static BOOL shm_get_usage(int *bytes_free,
491 int *bytes_used,
492 int *bytes_overhead)
494 if(!shm_header_p) {
495 /* not mapped yet */
496 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
497 return False;
500 *bytes_free = shm_header_p->statistics.cells_free * CellSize;
501 *bytes_used = shm_header_p->statistics.cells_used * CellSize;
502 *bytes_overhead = shm_header_p->statistics.cells_system * CellSize +
503 AlignedHeaderSize;
505 return True;
509 /*******************************************************************
510 hash a number into a hash_entry
511 ******************************************************************/
512 static unsigned shm_hash_size(void)
514 return hash_size;
518 static struct shmem_ops shmops = {
519 shm_close,
520 shm_alloc,
521 shm_free,
522 shm_get_userdef_off,
523 shm_offset2addr,
524 shm_addr2offset,
525 shm_lock_hash_entry,
526 shm_unlock_hash_entry,
527 shm_get_usage,
528 shm_hash_size,
531 /*******************************************************************
532 open the shared memory
533 ******************************************************************/
534 struct shmem_ops *sysv_shm_open(int ronly)
536 BOOL created_new = False;
537 BOOL other_processes;
538 struct shmid_ds shm_ds;
539 struct semid_ds sem_ds;
540 union semun su;
541 int i;
542 int pid;
544 read_only = ronly;
546 shm_size = lp_shmem_size();
548 DEBUG(4,("Trying sysv shmem open of size %d\n", shm_size));
550 /* first the semaphore */
551 sem_id = semget(SEMAPHORE_KEY, 0, 0);
552 if (sem_id == -1) {
553 if (read_only) return NULL;
555 hash_size = SHMEM_HASH_SIZE;
557 while (hash_size > 1) {
558 sem_id = semget(SEMAPHORE_KEY, hash_size+1,
559 IPC_CREAT|IPC_EXCL| SEMAPHORE_PERMS);
560 if (sem_id != -1 || errno != EINVAL) break;
561 hash_size--;
564 if (sem_id == -1) {
565 DEBUG(0,("Can't create or use semaphore %s\n",
566 strerror(errno)));
569 if (sem_id != -1) {
570 su.val = 1;
571 for (i=0;i<hash_size+1;i++) {
572 if (semctl(sem_id, i, SETVAL, su) != 0) {
573 DEBUG(1,("Failed to init semaphore %d\n", i));
578 if (shm_id == -1) {
579 sem_id = semget(SEMAPHORE_KEY, 0, 0);
581 if (sem_id == -1) {
582 DEBUG(0,("Can't create or use semaphore %s\n",
583 strerror(errno)));
584 return NULL;
587 su.buf = &sem_ds;
588 if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
589 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
591 hash_size = sem_ds.sem_nsems-1;
593 if (!read_only) {
594 if (sem_ds.sem_perm.cuid != 0 || sem_ds.sem_perm.cgid != 0) {
595 DEBUG(0,("ERROR: root did not create the semaphore\n"));
596 return NULL;
599 if (semctl(sem_id, 0, GETVAL, su) == 0 &&
600 !process_exists((pid=semctl(sem_id, 0, GETPID, su)))) {
601 DEBUG(0,("WARNING: clearing global IPC lock set by dead process %d\n",
602 pid));
603 su.val = 1;
604 if (semctl(sem_id, 0, SETVAL, su) != 0) {
605 DEBUG(0,("ERROR: Failed to clear global lock\n"));
609 sem_ds.sem_perm.mode = SEMAPHORE_PERMS;
610 if (semctl(sem_id, 0, IPC_SET, su) != 0) {
611 DEBUG(0,("ERROR shm_open : can't IPC_SET\n"));
617 if (!global_lock())
618 return NULL;
621 for (i=1;i<hash_size+1;i++) {
622 if (semctl(sem_id, i, GETVAL, su) == 0 &&
623 !process_exists((pid=semctl(sem_id, i, GETPID, su)))) {
624 DEBUG(1,("WARNING: clearing IPC lock %d set by dead process %d\n",
625 i, pid));
626 su.val = 1;
627 if (semctl(sem_id, i, SETVAL, su) != 0) {
628 DEBUG(0,("ERROR: Failed to clear IPC lock %d\n", i));
633 /* try to use an existing key */
634 shm_id = shmget(SHMEM_KEY, shm_size, 0);
636 /* if that failed then create one */
637 if (shm_id == -1) {
638 if (read_only) return NULL;
639 while (shm_size > MIN_SHM_SIZE) {
640 shm_id = shmget(SHMEM_KEY, shm_size,
641 IPC_CREAT | IPC_EXCL | IPC_PERMS);
642 if (shm_id != -1 || errno != EINVAL) break;
643 shm_size *= 0.9;
645 created_new = (shm_id != -1);
648 if (shm_id == -1) {
649 DEBUG(0,("Can't create or use IPC area\n"));
650 global_unlock();
651 return NULL;
655 shm_header_p = (struct ShmHeader *)shmat(shm_id, 0,
656 read_only?SHM_RDONLY:0);
657 if ((int)shm_header_p == -1) {
658 DEBUG(0,("Can't attach to IPC area\n"));
659 global_unlock();
660 return NULL;
663 /* to find out if some other process is already mapping the file,
664 we use a registration file containing the processids of the file
665 mapping processes */
666 if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
667 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
670 if (!read_only) {
671 if (shm_ds.shm_perm.cuid != 0 || shm_ds.shm_perm.cgid != 0) {
672 DEBUG(0,("ERROR: root did not create the shmem\n"));
673 global_unlock();
674 return NULL;
678 shm_size = shm_ds.shm_segsz;
680 other_processes = (shm_ds.shm_nattch > 1);
682 if (!read_only && !other_processes) {
683 memset((char *)shm_header_p, 0, shm_size);
684 shm_initialize(shm_size);
685 shm_create_hash_table(hash_size);
686 DEBUG(3,("Initialised IPC area of size %d\n", shm_size));
687 } else if (!shm_validate_header(shm_size)) {
688 /* existing file is corrupt, samba admin should remove
689 it by hand */
690 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
691 global_unlock();
692 return NULL;
695 global_unlock();
696 return &shmops;
701 #else
702 int ipc_dummy_procedure(void)
703 {return 0;}
704 #endif