1 #include <linux/workqueue.h>
2 #include <linux/rtnetlink.h>
3 #include <linux/cache.h>
4 #include <linux/slab.h>
5 #include <linux/list.h>
6 #include <linux/delay.h>
7 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/nsproxy.h>
11 #include <linux/netdevice.h>
12 #include <net/net_namespace.h>
13 #include <net/netns/generic.h>
14 #include <net/rtnetlink.h>
17 * Our network namespace constructor/destructor lists
20 static LIST_HEAD(pernet_list
);
21 static struct list_head
*first_device
= &pernet_list
;
22 static DEFINE_MUTEX(net_mutex
);
24 LIST_HEAD(net_namespace_list
);
25 EXPORT_SYMBOL_GPL(net_namespace_list
);
28 EXPORT_SYMBOL(init_net
);
30 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
32 static void unregister_netdevices(struct net
*net
, struct list_head
*list
)
34 struct net_device
*dev
;
35 /* At exit all network devices most be removed from a network
36 * namespace. Do this in the reverse order of registeration.
38 for_each_netdev_reverse(net
, dev
) {
39 if (dev
->rtnl_link_ops
)
40 dev
->rtnl_link_ops
->dellink(dev
, list
);
42 unregister_netdevice_queue(dev
, list
);
46 static int ops_init(const struct pernet_operations
*ops
, struct net
*net
)
49 if (ops
->id
&& ops
->size
) {
50 void *data
= kzalloc(ops
->size
, GFP_KERNEL
);
54 err
= net_assign_generic(net
, *ops
->id
, data
);
61 return ops
->init(net
);
65 static void ops_free(const struct pernet_operations
*ops
, struct net
*net
)
67 if (ops
->id
&& ops
->size
) {
69 kfree(net_generic(net
, id
));
74 * setup_net runs the initializers for the network namespace object.
76 static __net_init
int setup_net(struct net
*net
)
78 /* Must be called with net_mutex held */
79 const struct pernet_operations
*ops
, *saved_ops
;
82 atomic_set(&net
->count
, 1);
84 #ifdef NETNS_REFCNT_DEBUG
85 atomic_set(&net
->use_count
, 0);
88 list_for_each_entry(ops
, &pernet_list
, list
) {
89 error
= ops_init(ops
, net
);
97 /* Walk through the list backwards calling the exit functions
98 * for the pernet modules whose init functions did not fail.
101 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
) {
104 if (&ops
->list
== first_device
) {
105 LIST_HEAD(dev_kill_list
);
107 unregister_netdevices(net
, &dev_kill_list
);
108 unregister_netdevice_many(&dev_kill_list
);
113 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
)
120 static struct net_generic
*net_alloc_generic(void)
122 struct net_generic
*ng
;
123 size_t generic_size
= sizeof(struct net_generic
) +
124 INITIAL_NET_GEN_PTRS
* sizeof(void *);
126 ng
= kzalloc(generic_size
, GFP_KERNEL
);
128 ng
->len
= INITIAL_NET_GEN_PTRS
;
134 static struct kmem_cache
*net_cachep
;
135 static struct workqueue_struct
*netns_wq
;
137 static struct net
*net_alloc(void)
139 struct net
*net
= NULL
;
140 struct net_generic
*ng
;
142 ng
= net_alloc_generic();
146 net
= kmem_cache_zalloc(net_cachep
, GFP_KERNEL
);
150 rcu_assign_pointer(net
->gen
, ng
);
159 static void net_free(struct net
*net
)
161 #ifdef NETNS_REFCNT_DEBUG
162 if (unlikely(atomic_read(&net
->use_count
) != 0)) {
163 printk(KERN_EMERG
"network namespace not free! Usage: %d\n",
164 atomic_read(&net
->use_count
));
169 kmem_cache_free(net_cachep
, net
);
172 static struct net
*net_create(void)
179 return ERR_PTR(-ENOMEM
);
180 mutex_lock(&net_mutex
);
184 list_add_tail_rcu(&net
->list
, &net_namespace_list
);
187 mutex_unlock(&net_mutex
);
195 struct net
*copy_net_ns(unsigned long flags
, struct net
*old_net
)
197 if (!(flags
& CLONE_NEWNET
))
198 return get_net(old_net
);
202 static DEFINE_SPINLOCK(cleanup_list_lock
);
203 static LIST_HEAD(cleanup_list
); /* Must hold cleanup_list_lock to touch */
205 static void cleanup_net(struct work_struct
*work
)
207 const struct pernet_operations
*ops
;
208 struct net
*net
, *tmp
;
209 LIST_HEAD(net_kill_list
);
211 /* Atomically snapshot the list of namespaces to cleanup */
212 spin_lock_irq(&cleanup_list_lock
);
213 list_replace_init(&cleanup_list
, &net_kill_list
);
214 spin_unlock_irq(&cleanup_list_lock
);
216 mutex_lock(&net_mutex
);
218 /* Don't let anyone else find us. */
220 list_for_each_entry(net
, &net_kill_list
, cleanup_list
)
221 list_del_rcu(&net
->list
);
225 * Another CPU might be rcu-iterating the list, wait for it.
226 * This needs to be before calling the exit() notifiers, so
227 * the rcu_barrier() below isn't sufficient alone.
231 /* Run all of the network namespace exit methods */
232 list_for_each_entry_reverse(ops
, &pernet_list
, list
) {
234 list_for_each_entry(net
, &net_kill_list
, cleanup_list
)
237 if (&ops
->list
== first_device
) {
238 LIST_HEAD(dev_kill_list
);
240 list_for_each_entry(net
, &net_kill_list
, cleanup_list
)
241 unregister_netdevices(net
, &dev_kill_list
);
242 unregister_netdevice_many(&dev_kill_list
);
246 /* Free the net generic variables */
247 list_for_each_entry_reverse(ops
, &pernet_list
, list
) {
248 if (ops
->size
&& ops
->id
) {
249 list_for_each_entry(net
, &net_kill_list
, cleanup_list
)
254 mutex_unlock(&net_mutex
);
256 /* Ensure there are no outstanding rcu callbacks using this
261 /* Finally it is safe to free my network namespace structure */
262 list_for_each_entry_safe(net
, tmp
, &net_kill_list
, cleanup_list
) {
263 list_del_init(&net
->cleanup_list
);
267 static DECLARE_WORK(net_cleanup_work
, cleanup_net
);
269 void __put_net(struct net
*net
)
271 /* Cleanup the network namespace in process context */
274 spin_lock_irqsave(&cleanup_list_lock
, flags
);
275 list_add(&net
->cleanup_list
, &cleanup_list
);
276 spin_unlock_irqrestore(&cleanup_list_lock
, flags
);
278 queue_work(netns_wq
, &net_cleanup_work
);
280 EXPORT_SYMBOL_GPL(__put_net
);
283 struct net
*copy_net_ns(unsigned long flags
, struct net
*old_net
)
285 if (flags
& CLONE_NEWNET
)
286 return ERR_PTR(-EINVAL
);
291 struct net
*get_net_ns_by_pid(pid_t pid
)
293 struct task_struct
*tsk
;
296 /* Lookup the network namespace */
297 net
= ERR_PTR(-ESRCH
);
299 tsk
= find_task_by_vpid(pid
);
301 struct nsproxy
*nsproxy
;
302 nsproxy
= task_nsproxy(tsk
);
304 net
= get_net(nsproxy
->net_ns
);
309 EXPORT_SYMBOL_GPL(get_net_ns_by_pid
);
311 static int __init
net_ns_init(void)
313 struct net_generic
*ng
;
316 net_cachep
= kmem_cache_create("net_namespace", sizeof(struct net
),
320 /* Create workqueue for cleanup */
321 netns_wq
= create_singlethread_workqueue("netns");
323 panic("Could not create netns workq");
326 ng
= net_alloc_generic();
328 panic("Could not allocate generic netns");
330 rcu_assign_pointer(init_net
.gen
, ng
);
332 mutex_lock(&net_mutex
);
333 if (setup_net(&init_net
))
334 panic("Could not setup the initial network namespace");
337 list_add_tail_rcu(&init_net
.list
, &net_namespace_list
);
340 mutex_unlock(&net_mutex
);
345 pure_initcall(net_ns_init
);
348 static int __register_pernet_operations(struct list_head
*list
,
349 struct pernet_operations
*ops
)
351 struct net
*net
, *undo_net
;
354 list_add_tail(&ops
->list
, list
);
355 if (ops
->init
|| (ops
->id
&& ops
->size
)) {
357 error
= ops_init(ops
, net
);
365 /* If I have an error cleanup all namespaces I initialized */
366 list_del(&ops
->list
);
368 for_each_net(undo_net
) {
369 if (net_eq(undo_net
, net
))
375 if (ops
->size
&& ops
->id
) {
376 for_each_net(undo_net
) {
377 if (net_eq(undo_net
, net
))
379 ops_free(ops
, undo_net
);
386 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
390 list_del(&ops
->list
);
394 if (ops
->id
&& ops
->size
)
401 static int __register_pernet_operations(struct list_head
*list
,
402 struct pernet_operations
*ops
)
405 err
= ops_init(ops
, &init_net
);
407 ops_free(ops
, &init_net
);
412 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
415 ops
->exit(&init_net
);
416 ops_free(ops
, &init_net
);
419 #endif /* CONFIG_NET_NS */
421 static DEFINE_IDA(net_generic_ids
);
423 static int register_pernet_operations(struct list_head
*list
,
424 struct pernet_operations
*ops
)
430 error
= ida_get_new_above(&net_generic_ids
, 1, ops
->id
);
432 if (error
== -EAGAIN
) {
433 ida_pre_get(&net_generic_ids
, GFP_KERNEL
);
439 error
= __register_pernet_operations(list
, ops
);
440 if (error
&& ops
->id
)
441 ida_remove(&net_generic_ids
, *ops
->id
);
446 static void unregister_pernet_operations(struct pernet_operations
*ops
)
449 __unregister_pernet_operations(ops
);
451 ida_remove(&net_generic_ids
, *ops
->id
);
455 * register_pernet_subsys - register a network namespace subsystem
456 * @ops: pernet operations structure for the subsystem
458 * Register a subsystem which has init and exit functions
459 * that are called when network namespaces are created and
460 * destroyed respectively.
462 * When registered all network namespace init functions are
463 * called for every existing network namespace. Allowing kernel
464 * modules to have a race free view of the set of network namespaces.
466 * When a new network namespace is created all of the init
467 * methods are called in the order in which they were registered.
469 * When a network namespace is destroyed all of the exit methods
470 * are called in the reverse of the order with which they were
473 int register_pernet_subsys(struct pernet_operations
*ops
)
476 mutex_lock(&net_mutex
);
477 error
= register_pernet_operations(first_device
, ops
);
478 mutex_unlock(&net_mutex
);
481 EXPORT_SYMBOL_GPL(register_pernet_subsys
);
484 * unregister_pernet_subsys - unregister a network namespace subsystem
485 * @ops: pernet operations structure to manipulate
487 * Remove the pernet operations structure from the list to be
488 * used when network namespaces are created or destroyed. In
489 * addition run the exit method for all existing network
492 void unregister_pernet_subsys(struct pernet_operations
*module
)
494 mutex_lock(&net_mutex
);
495 unregister_pernet_operations(module
);
496 mutex_unlock(&net_mutex
);
498 EXPORT_SYMBOL_GPL(unregister_pernet_subsys
);
501 * register_pernet_device - register a network namespace device
502 * @ops: pernet operations structure for the subsystem
504 * Register a device which has init and exit functions
505 * that are called when network namespaces are created and
506 * destroyed respectively.
508 * When registered all network namespace init functions are
509 * called for every existing network namespace. Allowing kernel
510 * modules to have a race free view of the set of network namespaces.
512 * When a new network namespace is created all of the init
513 * methods are called in the order in which they were registered.
515 * When a network namespace is destroyed all of the exit methods
516 * are called in the reverse of the order with which they were
519 int register_pernet_device(struct pernet_operations
*ops
)
522 mutex_lock(&net_mutex
);
523 error
= register_pernet_operations(&pernet_list
, ops
);
524 if (!error
&& (first_device
== &pernet_list
))
525 first_device
= &ops
->list
;
526 mutex_unlock(&net_mutex
);
529 EXPORT_SYMBOL_GPL(register_pernet_device
);
532 * unregister_pernet_device - unregister a network namespace netdevice
533 * @ops: pernet operations structure to manipulate
535 * Remove the pernet operations structure from the list to be
536 * used when network namespaces are created or destroyed. In
537 * addition run the exit method for all existing network
540 void unregister_pernet_device(struct pernet_operations
*ops
)
542 mutex_lock(&net_mutex
);
543 if (&ops
->list
== first_device
)
544 first_device
= first_device
->next
;
545 unregister_pernet_operations(ops
);
546 mutex_unlock(&net_mutex
);
548 EXPORT_SYMBOL_GPL(unregister_pernet_device
);
550 static void net_generic_release(struct rcu_head
*rcu
)
552 struct net_generic
*ng
;
554 ng
= container_of(rcu
, struct net_generic
, rcu
);
558 int net_assign_generic(struct net
*net
, int id
, void *data
)
560 struct net_generic
*ng
, *old_ng
;
562 BUG_ON(!mutex_is_locked(&net_mutex
));
565 ng
= old_ng
= net
->gen
;
566 if (old_ng
->len
>= id
)
569 ng
= kzalloc(sizeof(struct net_generic
) +
570 id
* sizeof(void *), GFP_KERNEL
);
575 * Some synchronisation notes:
577 * The net_generic explores the net->gen array inside rcu
578 * read section. Besides once set the net->gen->ptr[x]
579 * pointer never changes (see rules in netns/generic.h).
581 * That said, we simply duplicate this array and schedule
582 * the old copy for kfree after a grace period.
586 memcpy(&ng
->ptr
, &old_ng
->ptr
, old_ng
->len
* sizeof(void*));
588 rcu_assign_pointer(net
->gen
, ng
);
589 call_rcu(&old_ng
->rcu
, net_generic_release
);
591 ng
->ptr
[id
- 1] = data
;
594 EXPORT_SYMBOL_GPL(net_assign_generic
);