2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
3 * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/vmmeter.h>
37 #include <sys/queue.h>
54 #include "sysvipc_hash.h"
55 #include "sysvipc_sockets.h"
57 static struct shminfo shminfo
= {
66 static int shm_last_free
, shm_committed
, shmalloced
;
68 static struct shmid_ds
*shmsegs
;
71 extern struct msginfo msginfo
;
73 extern struct hashtable
*clientshash
;
76 create_sysv_file(struct shmget_msg
*msg
, size_t size
,
77 struct shmid_ds
*shmseg
) {
78 char filename
[FILENAME_MAX
];
82 struct semid_pool
*sems
;
83 struct msqid_pool
*msgq
;
91 sprintf(filename
, "%s/%s_%ld", DIRPATH
, SHM_NAME
, key
);
94 sprintf(filename
, "%s/%s_%ld", DIRPATH
, SEM_NAME
, key
);
97 sprintf(filename
, "%s/%s_%ld", DIRPATH
, MSG_NAME
, key
);
100 sprintf(filename
, "%s/%s_%ld", DIRPATH
, UNDO_NAME
, key
);
106 fd
= open(filename
, O_RDWR
| O_CREAT
, 0666);
108 sysvd_print_err("create sysv file: open\n");
116 /* Map the semaphore to initialize it. */
117 addr
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
118 //TODO modify 0 for more sems on a page
120 sysvd_print_err("create sysv file: mmap");
124 /* There is no need for any lock because all clients
125 * that try to access this segment are blocked until
126 * it becames ~SHMSEG_REMOVED. */
127 sems
= (struct semid_pool
*)addr
;
128 nsems
= (msg
->size
- sizeof(struct semid_pool
)) /
130 sysvd_print("allocate %d sems\n", nsems
);
134 sysv_rwlock_init(&sems
->rwlock
);
136 sysv_mutex_init(&sems
->mutex
);
138 /* Credentials are kept in shmid_ds structure. */
139 sems
->ds
.sem_perm
.seq
= shmseg
->shm_perm
.seq
;
140 sems
->ds
.sem_nsems
= nsems
;
141 sems
->ds
.sem_otime
= 0;
142 //sems->ds.sem_ctime = time(NULL);
146 /* Initialize each sem. */
147 memset(sems
->ds
.sem_base
, 0, nsems
+ sizeof(struct sem
));
151 for (l
=0; l
< nsems
; l
++)
152 sysv_mutex_init(&sems
->ds
.sem_base
[l
].sem_mutex
);
159 /* Map the message queue to initialize it. */
160 addr
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
162 sysvd_print_err("create sysv file: mmap");
166 /* There is no need for any lock because all clients
167 * that try to access this segment are blocked until
168 * it becames ~SHMSEG_REMOVED. */
169 msgq
= (struct msqid_pool
*)addr
; //TODO
170 /*sysvd_print("Attention!!! : %ld %ld %ld %ld\n",
171 sizeof(struct msqid_pool),
172 sizeof(msgq->msghdrs),
173 sizeof(msgq->msgmaps),
174 sizeof(msgq->msgpool));*/
178 sysv_rwlock_init(&msgq
->rwlock
);
180 sysv_mutex_init(&msgq
->mutex
);
182 /* In kernel implementation, this was done globally. */
183 for (i
= 0; i
< msginfo
.msgseg
; i
++) {
185 msgq
->msgmaps
[i
-1].next
= i
;
186 msgq
->msgmaps
[i
].next
= -1; /* implies entry is available */
188 msgq
->free_msgmaps
= 0;
189 msgq
->nfree_msgmaps
= msginfo
.msgseg
;
191 for (i
= 0; i
< msginfo
.msgtql
; i
++) {
192 msgq
->msghdrs
[i
].msg_type
= 0;
194 msgq
->msghdrs
[i
-1].msg_next
= i
;
195 msgq
->msghdrs
[i
].msg_next
= -1;
197 msgq
->free_msghdrs
= 0;
199 /* Credentials are kept in shmid_ds structure. */
200 msgq
->ds
.msg_perm
.seq
= shmseg
->shm_perm
.seq
;
201 msgq
->ds
.first
.msg_first_index
= -1;
202 msgq
->ds
.last
.msg_last_index
= -1;
203 msgq
->ds
.msg_cbytes
= 0;
204 msgq
->ds
.msg_qnum
= 0;
205 msgq
->ds
.msg_qbytes
= msginfo
.msgmnb
;
206 msgq
->ds
.msg_lspid
= 0;
207 msgq
->ds
.msg_lrpid
= 0;
208 msgq
->ds
.msg_stime
= 0;
209 msgq
->ds
.msg_rtime
= 0;
226 /* Install for the client the file corresponding to fd. */
228 install_fd_client(pid_t pid
, int fd
) {
230 struct client
*cl
= _hash_lookup(clientshash
, pid
);
232 sysvd_print_err("no client entry for pid = %d\n", pid
);
236 ret
= send_fd(cl
->sock
, fd
);
238 sysvd_print_err("can not send fd to client %d\n", pid
);
246 shm_find_segment_by_key(key_t key
)
250 for (i
= 0; i
< shmalloced
; i
++) {
251 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
252 shmsegs
[i
].shm_perm
.key
== key
)
258 static struct shmid_ds
*
259 shm_find_segment_by_shmid(int shmid
)
262 struct shmid_ds
*shmseg
;
264 segnum
= IPCID_TO_IX(shmid
);
265 if (segnum
< 0 || segnum
>= shmalloced
) {
266 sysvd_print_err("segnum out of range\n");
270 shmseg
= &shmsegs
[segnum
];
271 if ((shmseg
->shm_perm
.mode
& (SHMSEG_ALLOCATED
| SHMSEG_REMOVED
))
272 != SHMSEG_ALLOCATED
||
273 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
)) {
274 sysvd_print("segment most probably removed\n");
280 /* Remove a shared memory segment. */
282 shm_deallocate_segment(int segnum
)
285 struct shmid_ds
*shmseg
= &shmsegs
[segnum
];
286 struct shm_handle
*internal
=
287 (struct shm_handle
*)shmseg
->shm_internal
;
290 sysvd_print("deallocate segment %d\n", segnum
);
292 size
= round_page(shmseg
->shm_segsz
);
295 if (internal
->type
== SEMGET
) {
296 nsems
= (shmseg
->shm_segsz
- sizeof(struct semid_pool
)) /
299 sysvd_print("freed %d sems\n", nsems
);
303 /* Close the corresponding file. */
306 /* Free other resources. */
307 free(shmseg
->shm_internal
);
308 shmseg
->shm_internal
= NULL
;
309 shm_committed
-= btoc(size
);
312 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
315 static void *map_seg(int);
316 static int munmap_seg(int, void *);
318 /* In sem and msg case notify the other processes that use it. */
320 mark_segment_removed(int shmid
, int type
) {
321 struct semid_pool
*semaptr
;
322 struct msqid_pool
*msgq
;
326 semaptr
= (struct semid_pool
*)map_seg(shmid
);
328 sysv_rwlock_wrlock(&semaptr
->rwlock
);
330 sysv_mutex_lock(&semaptr
->mutex
);
334 /* It is not necessary to wake waiting threads because
335 * if the group of semaphores is acquired by a thread,
336 * the smaptr lock is held, so it is impossible to
340 sysv_rwlock_unlock(&semaptr
->rwlock
);
342 sysv_mutex_unlock(&semaptr
->mutex
);
344 munmap_seg(shmid
, semaptr
);
347 msgq
= (struct msqid_pool
*)map_seg(shmid
);
349 sysv_rwlock_wrlock(&msgq
->rwlock
);
351 sysv_mutex_lock(&msgq
->mutex
);
356 sysv_rwlock_unlock(&msgq
->rwlock
);
358 sysv_mutex_unlock(&msgq
->mutex
);
360 munmap_seg(shmid
, msgq
);
367 /* Get the id of an existing shared memory segment. */
369 shmget_existing(struct shmget_msg
*shmget_msg
, int mode
,
370 int segnum
, struct cmsgcred
*cred
)
372 struct shmid_ds
*shmseg
;
375 shmseg
= &shmsegs
[segnum
];
376 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
378 * This segment is in the process of being allocated. Wait
379 * until it's done, and look the key up again (in case the
380 * allocation failed or it was freed).
382 //TODO Maybe it will be necessary if the daemon is multithreading
383 /*shmseg->shm_perm.mode |= SHMSEG_WANTED;
384 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
389 if ((shmget_msg
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
391 error
= ipcperm(cred
, &shmseg
->shm_perm
, mode
);
394 if (shmget_msg
->size
&& (shmget_msg
->size
> shmseg
->shm_segsz
))
396 return (IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
));
399 /* Create a shared memory segment and return the id. */
401 shmget_allocate_segment(pid_t pid
, struct shmget_msg
*shmget_msg
,
402 int mode
, struct cmsgcred
*cred
)
404 int i
, segnum
, shmid
;
406 struct shmid_ds
*shmseg
;
407 struct shm_handle
*handle
;
409 /* It is possible after a process calls exec().
410 * We don't create another segment but return the old one
411 * with all information.
412 * This segment is destroyed only when process dies.
414 if (shmget_msg
->type
== UNDOGET
) {
415 struct client
*cl
= _hash_lookup(clientshash
, pid
);
416 if (cl
->undoid
!= -1)
420 if ((long)shmget_msg
->size
< shminfo
.shmmin
)
421 //|| (long)shmget_msg->size > shminfo.shmmax)
422 /* There is no need to check the max limit,
423 * the operating system do this for us.
426 if (shm_nused
>= shminfo
.shmmni
) /* any shmids left? */
429 /* Compute the size of the segment. */
430 size
= round_page(shmget_msg
->size
);
432 /* Find a free entry in the shmsegs vector. */
433 if (shm_last_free
< 0) {
434 // shmrealloc(); /* maybe expand the shmsegs[] array */
435 for (i
= 0; i
< shmalloced
; i
++) {
436 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
439 if (i
== shmalloced
) {
440 sysvd_print("i == shmalloced\n");
445 segnum
= shm_last_free
;
448 shmseg
= &shmsegs
[segnum
];
450 * In case we sleep in malloc(), mark the segment present but deleted
451 * so that noone else tries to create the same key.
453 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
454 shmseg
->shm_perm
.key
= shmget_msg
->key
;
455 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
457 /* Create the file for the shared memory segment. */
458 handle
= shmseg
->shm_internal
= malloc(sizeof(struct shm_handle
));
459 handle
->type
= shmget_msg
->type
;
460 handle
->fd
= create_sysv_file(shmget_msg
, size
, shmseg
);
461 if (handle
->fd
== -1) {
464 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
465 shm_last_free
= segnum
;
470 LIST_INIT(&handle
->attached_list
);
472 if (handle
->fd
< 0) {
473 free(shmseg
->shm_internal
);
474 shmseg
->shm_internal
= NULL
;
475 shm_last_free
= segnum
;
476 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
481 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
483 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cmcred_euid
;
484 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cmcred_gid
;
485 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
486 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
488 shmseg
->shm_cpid
= pid
;
489 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
490 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
491 shmseg
->shm_ctime
= time(NULL
);
493 shmseg
->shm_segsz
= shmget_msg
->size
;
494 shm_committed
+= btoc(size
);
497 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
499 * Somebody else wanted this key while we were asleep. Wake
502 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
503 //TODO multithreading
504 //wakeup((caddr_t)shmseg);
506 shmseg
->shm_perm
.mode
&= ~SHMSEG_REMOVED
;
508 if (shmget_msg
->type
== UNDOGET
) {
509 /* The file is used by daemon when clients terminates
510 * and sem_undo resources must be cleaned.
512 struct client
*cl
= _hash_lookup(clientshash
, pid
);
519 /* Handle a shmget() request. */
521 handle_shmget(pid_t pid
, struct shmget_msg
*shmget_msg
,
522 struct cmsgcred
*cred
) {
523 int segnum
, mode
, error
;
524 struct shmid_ds
*shmseg
;
525 struct shm_handle
*handle
;
527 //if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
529 mode
= shmget_msg
->shmflg
& ACCESSPERMS
;
531 sysvd_print("ask for key = %ld\n", shmget_msg
->key
);
532 shmget_msg
->key
= (shmget_msg
->key
& 0x3FFF) |
533 (shmget_msg
->type
<< 30);
534 sysvd_print("ask for key = %ld\n", shmget_msg
->key
);
536 if (shmget_msg
->key
!= IPC_PRIVATE
) {
538 segnum
= shm_find_segment_by_key(shmget_msg
->key
);
540 error
= shmget_existing(shmget_msg
, mode
, segnum
, cred
);
541 //TODO if daemon is multithreading
542 //if (error == EAGAIN)
546 if ((shmget_msg
->shmflg
& IPC_CREAT
) == 0) {
551 error
= shmget_allocate_segment(pid
, shmget_msg
, mode
, cred
);
552 sysvd_print("allocate segment = %d\n", error
);
555 * Install to th client the file corresponding to the
556 * shared memory segment.
557 * client_fd is the file descriptor added in the client
560 shmseg
= shm_find_segment_by_shmid(error
);
561 if (shmseg
== NULL
) {
562 sysvd_print_err("can not find segment by shmid\n");
566 handle
= (struct shm_handle
*)shmseg
->shm_internal
;
567 if (install_fd_client(pid
, handle
->fd
) != 0)
574 /* Handle a shmat() request. */
576 handle_shmat(pid_t pid
, struct shmat_msg
*shmat_msg
,
577 struct cmsgcred
*cred
) {
580 struct shmid_ds
*shmseg
;
581 struct pid_attached
*pidatt
;
582 struct shm_handle
*handle
;
583 size_t new_size
= shmat_msg
->size
;
585 struct id_attached
*idatt
;
587 /*if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
591 shmseg
= shm_find_segment_by_shmid(shmat_msg
->shmid
);
592 if (shmseg
== NULL
) {
593 sysvd_print_err("shmat error: segment was not found\n");
597 error
= ipcperm(cred
, &shmseg
->shm_perm
,
598 (shmat_msg
->shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
602 handle
= shmseg
->shm_internal
;
604 if (shmat_msg
->size
> shmseg
->shm_segsz
) {
605 if (handle
->type
!= UNDOGET
) {
610 fd
= ((struct shm_handle
*)shmseg
->shm_internal
)->fd
;
611 ftruncate(fd
, round_page(new_size
));
612 shmseg
->shm_segsz
= new_size
;
615 shmseg
->shm_lpid
= pid
;
616 shmseg
->shm_atime
= time(NULL
);
618 if (handle
->type
!= UNDOGET
)
619 shmseg
->shm_nattch
++;
621 shmseg
->shm_nattch
= 1; /* Only a process calls shmat and
622 only once. If it does it for more than once that is because
623 it called exec() and reinitialized the undo segment. */
625 /* Insert the pid in the segment list of attaced pids.
626 * The list is checked in handle_shmdt so that only
627 * attached pids can dettached from this segment.
629 sysvd_print("nattch = %d pid = %d\n",
630 shmseg
->shm_nattch
, pid
);
632 pidatt
= malloc(sizeof(*pidatt
));
634 LIST_INSERT_HEAD(&handle
->attached_list
, pidatt
, link
);
636 /* Add the segment at the list of attached segments of the client.
637 * It is used when the process finishes its execution. The daemon
638 * walks through the list to dettach the segments.
640 idatt
= malloc(sizeof(*idatt
));
641 idatt
->shmid
= shmat_msg
->shmid
;
642 cl
= _hash_lookup(clientshash
, pid
);
643 LIST_INSERT_HEAD(&cl
->ids_attached
, idatt
, link
);
650 /* Handle a shmdt() request. */
652 handle_shmdt(pid_t pid
, int shmid
) {
653 struct shmid_ds
*shmseg
;
655 struct shm_handle
*handle
;
656 struct pid_attached
*pidatt
;
657 struct id_attached
*idatt
;
660 sysvd_print("shmdt pid %d shmid %d\n", pid
, shmid
);
661 /*if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
665 segnum
= IPCID_TO_IX(shmid
);
666 shmseg
= &shmsegs
[segnum
];
667 handle
= shmseg
->shm_internal
;
669 /* Check if pid is attached. */
670 LIST_FOREACH(pidatt
, &handle
->attached_list
, link
)
671 if (pidatt
->pid
== pid
)
674 sysvd_print_err("process %d is not attached to %d (1)\n",
678 LIST_REMOVE(pidatt
, link
);
680 /* Remove the segment from the list of attached segments of the pid.*/
681 cl
= _hash_lookup(clientshash
, pid
);
682 LIST_FOREACH(idatt
, &cl
->ids_attached
, link
)
683 if (idatt
->shmid
== shmid
)
686 sysvd_print_err("process %d is not attached to %d (2)\n",
690 LIST_REMOVE(idatt
, link
);
692 shmseg
->shm_dtime
= time(NULL
);
694 /* If no other process attaced remove the segment. */
695 if ((--shmseg
->shm_nattch
<= 0) &&
696 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
697 shm_deallocate_segment(segnum
);
698 shm_last_free
= segnum
;
704 /* Handle a shmctl() request. */
706 handle_shmctl(struct shmctl_msg
*shmctl_msg
,
707 struct cmsgcred
*cred
) {
709 struct shmid_ds
*shmseg
, *inbuf
;
711 /* if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
714 shmseg
= shm_find_segment_by_shmid(shmctl_msg
->shmid
);
716 if (shmseg
== NULL
) {
721 switch (shmctl_msg
->cmd
) {
723 sysvd_print("IPC STAT\n");
724 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_R
);
726 sysvd_print("IPC_STAT not allowed\n");
729 shmctl_msg
->buf
= *shmseg
;
732 sysvd_print("IPC SET\n");
733 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
735 sysvd_print("IPC_SET not allowed\n");
738 inbuf
= &shmctl_msg
->buf
;
740 shmseg
->shm_perm
.uid
= inbuf
->shm_perm
.uid
;
741 shmseg
->shm_perm
.gid
= inbuf
->shm_perm
.gid
;
742 shmseg
->shm_perm
.mode
=
743 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
744 (inbuf
->shm_perm
.mode
& ACCESSPERMS
);
745 shmseg
->shm_ctime
= time(NULL
);
748 sysvd_print("IPC RMID shmid = %d\n",
750 error
= ipcperm(cred
, &shmseg
->shm_perm
, IPC_M
);
752 sysvd_print("IPC_RMID not allowed\n");
755 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
756 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
757 if (shmseg
->shm_nattch
<= 0) {
758 shm_deallocate_segment(IPCID_TO_IX(shmctl_msg
->shmid
));
759 shm_last_free
= IPCID_TO_IX(shmctl_msg
->shmid
);
762 /* In sem and msg cases, other process must be
763 * noticed about the removal. */
764 struct shm_handle
*internal
=
765 (struct shm_handle
*)shmseg
->shm_internal
;
766 mark_segment_removed(shmctl_msg
->shmid
,
783 /* Function used by daemon to map a sysv resource. */
786 struct shmid_ds
*shmseg
;
787 struct shm_handle
*internal
;
793 shmseg
= shm_find_segment_by_shmid(shmid
);
795 sysvd_print_err("map_seg error:"
796 "semid %d not found\n", shmid
);
800 internal
= (struct shm_handle
*)shmseg
->shm_internal
;
802 sysvd_print_err("map_seg error: internal for"
803 "semid %d not found\n", shmid
);
809 size
= round_page(shmseg
->shm_segsz
);
811 addr
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
813 sysvd_print_err("map_seg: error mmap semid = %d\n", shmid
);
820 /* Function used by daemon to munmap a sysv resource. */
822 munmap_seg(int shmid
, void *addr
) {
823 struct shmid_ds
*shmseg
;
824 struct shm_handle
*internal
;
828 shmseg
= shm_find_segment_by_shmid(shmid
);
830 sysvd_print_err("munmap_seg error:"
831 "semid %d not found\n", shmid
);
835 internal
= (struct shm_handle
*)shmseg
->shm_internal
;
837 sysvd_print_err("munmap_seg error: internal for"
838 "semid %d not found\n", shmid
);
842 size
= round_page(shmseg
->shm_segsz
);
852 shmalloced
= shminfo
.shmmni
;
853 shmsegs
= malloc(shmalloced
* sizeof(shmsegs
[0]));
854 for (i
= 0; i
< shmalloced
; i
++) {
855 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
856 shmsegs
[i
].shm_perm
.seq
= 0;
863 * msginfo.msgssz should be a power of two for efficiency reasons.
864 * It is also pretty silly if msginfo.msgssz is less than 8
865 * or greater than about 256 so ...
868 while (i
< 1024 && i
!= msginfo
.msgssz
)
870 if (i
!= msginfo
.msgssz
) {
871 sysvd_print_err("msginfo.msgssz=%d (0x%x)\n", msginfo
.msgssz
,
873 sysvd_print_err("msginfo.msgssz not a small power of 2");
876 msginfo
.msgmax
= msginfo
.msgseg
* msginfo
.msgssz
;
885 semexit(int undoid
) {
886 struct sem_undo
*suptr
;
888 struct shmid_ds
*undoseg
;
894 undoseg
= shm_find_segment_by_shmid(undoid
);
895 /* The UNDO segment must be mapped by only one segment. */
896 if (undoseg
->shm_nattch
!= 1) {
897 sysvd_print_err("undo segment mapped by more"
898 "than one process\n");
902 suptr
= (struct sem_undo
*)map_seg(undoid
);
904 sysvd_print_err("no %d undo segment found\n", undoid
);
908 /* No locking mechanism is required because only the
909 * client and the daemon can access the UNDO segment.
910 * At this moment the client is disconnected so only
911 * the daemon can modify this segment.
913 while (suptr
->un_cnt
) {
914 struct semid_pool
*semaptr
;
920 ix
= suptr
->un_cnt
- 1;
921 semid
= suptr
->un_ent
[ix
].un_id
;
922 semnum
= suptr
->un_ent
[ix
].un_num
;
923 adjval
= suptr
->un_ent
[ix
].un_adjval
;
925 semaptr
= (struct semid_pool
*)map_seg(semid
);
930 /* Was it removed? */
931 if (semaptr
->gen
== -1 ||
932 semaptr
->ds
.sem_perm
.seq
!= IPCID_TO_SEQ(semid
) ||
933 (semaptr
->ds
.sem_perm
.mode
& SHMSEG_ALLOCATED
) == 0) {
935 sysvd_print_err("semexit - semid not allocated\n");
938 if (semnum
>= semaptr
->ds
.sem_nsems
) {
940 sysvd_print_err("semexit - semnum out of range\n");
946 sysv_rwlock_rdlock(&semaptr
->rwlock
);
948 sysv_rwlock_wrlock(&semaptr
->rwlock
);
951 sysv_mutex_lock(&semaptr
->mutex
);
952 /* Nobody can remove the semaphore beteen the check and the
953 * lock acquisition because it must first send a IPC_RMID
954 * to me and I will process that after finishing this function.
957 semptr
= &semaptr
->ds
.sem_base
[semnum
];
959 sysv_mutex_lock(&semptr
->sem_mutex
);
961 if (ix
== suptr
->un_cnt
- 1 &&
962 semid
== suptr
->un_ent
[ix
].un_id
&&
963 semnum
== suptr
->un_ent
[ix
].un_num
&&
964 adjval
== suptr
->un_ent
[ix
].un_adjval
) {
968 if (semptr
->semval
< -adjval
)
971 semptr
->semval
+= adjval
;
973 semptr
->semval
+= adjval
;
975 /* TODO multithreaded daemon:
976 * Check again if the semaphore was removed and do
977 * not wake anyone if it was.*/
978 umtx_wakeup((int *)&semptr
->semval
, 0);
981 sysv_mutex_unlock(&semptr
->sem_mutex
);
985 sysv_rwlock_unlock(&semaptr
->rwlock
);
987 sysv_mutex_unlock(&semaptr
->mutex
);
989 munmap_seg(semid
, semaptr
);
992 munmap_seg(undoid
, suptr
);
997 shmexit(struct client
*cl
) {
998 struct id_attached
*idatt
;
1000 while (!LIST_EMPTY(&cl
->ids_attached
)) {
1001 idatt
= LIST_FIRST(&cl
->ids_attached
);
1002 handle_shmdt(cl
->pid
, idatt
->shmid
);