5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
11 #include <linux/pid.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/syscalls.h>
14 #include <linux/err.h>
15 #include <linux/acct.h>
17 #define BITS_PER_PAGE (PAGE_SIZE*8)
22 struct kmem_cache
*cachep
;
23 struct list_head list
;
26 static LIST_HEAD(pid_caches_lh
);
27 static DEFINE_MUTEX(pid_caches_mutex
);
28 static struct kmem_cache
*pid_ns_cachep
;
31 * creates the kmem cache to allocate pids from.
32 * @nr_ids: the number of numerical ids this pid will have to carry
35 static struct kmem_cache
*create_pid_cachep(int nr_ids
)
37 struct pid_cache
*pcache
;
38 struct kmem_cache
*cachep
;
40 mutex_lock(&pid_caches_mutex
);
41 list_for_each_entry(pcache
, &pid_caches_lh
, list
)
42 if (pcache
->nr_ids
== nr_ids
)
45 pcache
= kmalloc(sizeof(struct pid_cache
), GFP_KERNEL
);
49 snprintf(pcache
->name
, sizeof(pcache
->name
), "pid_%d", nr_ids
);
50 cachep
= kmem_cache_create(pcache
->name
,
51 sizeof(struct pid
) + (nr_ids
- 1) * sizeof(struct upid
),
52 0, SLAB_HWCACHE_ALIGN
, NULL
);
56 pcache
->nr_ids
= nr_ids
;
57 pcache
->cachep
= cachep
;
58 list_add(&pcache
->list
, &pid_caches_lh
);
60 mutex_unlock(&pid_caches_mutex
);
61 return pcache
->cachep
;
66 mutex_unlock(&pid_caches_mutex
);
70 static struct pid_namespace
*create_pid_namespace(unsigned int level
)
72 struct pid_namespace
*ns
;
75 ns
= kmem_cache_zalloc(pid_ns_cachep
, GFP_KERNEL
);
79 ns
->pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
80 if (!ns
->pidmap
[0].page
)
83 ns
->pid_cachep
= create_pid_cachep(level
+ 1);
84 if (ns
->pid_cachep
== NULL
)
90 set_bit(0, ns
->pidmap
[0].page
);
91 atomic_set(&ns
->pidmap
[0].nr_free
, BITS_PER_PAGE
- 1);
93 for (i
= 1; i
< PIDMAP_ENTRIES
; i
++)
94 atomic_set(&ns
->pidmap
[i
].nr_free
, BITS_PER_PAGE
);
99 kfree(ns
->pidmap
[0].page
);
101 kmem_cache_free(pid_ns_cachep
, ns
);
103 return ERR_PTR(-ENOMEM
);
106 static void destroy_pid_namespace(struct pid_namespace
*ns
)
110 for (i
= 0; i
< PIDMAP_ENTRIES
; i
++)
111 kfree(ns
->pidmap
[i
].page
);
112 kmem_cache_free(pid_ns_cachep
, ns
);
115 struct pid_namespace
*copy_pid_ns(unsigned long flags
, struct pid_namespace
*old_ns
)
117 struct pid_namespace
*new_ns
;
120 new_ns
= get_pid_ns(old_ns
);
121 if (!(flags
& CLONE_NEWPID
))
124 new_ns
= ERR_PTR(-EINVAL
);
125 if (flags
& CLONE_THREAD
)
128 new_ns
= create_pid_namespace(old_ns
->level
+ 1);
130 new_ns
->parent
= get_pid_ns(old_ns
);
138 void free_pid_ns(struct kref
*kref
)
140 struct pid_namespace
*ns
, *parent
;
142 ns
= container_of(kref
, struct pid_namespace
, kref
);
145 destroy_pid_namespace(ns
);
151 void zap_pid_ns_processes(struct pid_namespace
*pid_ns
)
157 * The last thread in the cgroup-init thread group is terminating.
158 * Find remaining pid_ts in the namespace, signal and wait for them
161 * Note: This signals each threads in the namespace - even those that
162 * belong to the same thread group, To avoid this, we would have
163 * to walk the entire tasklist looking a processes in this
164 * namespace, but that could be unnecessarily expensive if the
165 * pid namespace has just a few processes. Or we need to
166 * maintain a tasklist for each pid namespace.
169 read_lock(&tasklist_lock
);
170 nr
= next_pidmap(pid_ns
, 1);
172 kill_proc_info(SIGKILL
, SEND_SIG_PRIV
, nr
);
173 nr
= next_pidmap(pid_ns
, nr
);
175 read_unlock(&tasklist_lock
);
178 clear_thread_flag(TIF_SIGPENDING
);
179 rc
= sys_wait4(-1, NULL
, __WALL
, NULL
);
180 } while (rc
!= -ECHILD
);
182 acct_exit_ns(pid_ns
);
186 static __init
int pid_namespaces_init(void)
188 pid_ns_cachep
= KMEM_CACHE(pid_namespace
, SLAB_PANIC
);
192 __initcall(pid_namespaces_init
);