3c59x maintainer
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / security / selinux / netif.c
blobb10c34e8a74344f97afce95012a41ebaf7157242
1 /*
2 * Network interface table.
4 * Network interfaces (devices) do not have a security field, so we
5 * maintain a table associating each interface with a SID.
7 * Author: James Morris <jmorris@redhat.com>
9 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2,
13 * as published by the Free Software Foundation.
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/stddef.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/rcupdate.h>
24 #include "security.h"
25 #include "objsec.h"
26 #include "netif.h"
28 #define SEL_NETIF_HASH_SIZE 64
29 #define SEL_NETIF_HASH_MAX 1024
31 #undef DEBUG
33 #ifdef DEBUG
34 #define DEBUGP printk
35 #else
36 #define DEBUGP(format, args...)
37 #endif
39 struct sel_netif
41 struct list_head list;
42 struct netif_security_struct nsec;
43 struct rcu_head rcu_head;
46 static u32 sel_netif_total;
47 static LIST_HEAD(sel_netif_list);
48 static DEFINE_SPINLOCK(sel_netif_lock);
49 static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
51 static inline u32 sel_netif_hasfn(struct net_device *dev)
53 return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
57 * All of the devices should normally fit in the hash, so we optimize
58 * for that case.
60 static inline struct sel_netif *sel_netif_find(struct net_device *dev)
62 struct list_head *pos;
63 int idx = sel_netif_hasfn(dev);
65 __list_for_each_rcu(pos, &sel_netif_hash[idx]) {
66 struct sel_netif *netif = list_entry(pos,
67 struct sel_netif, list);
68 if (likely(netif->nsec.dev == dev))
69 return netif;
71 return NULL;
74 static int sel_netif_insert(struct sel_netif *netif)
76 int idx, ret = 0;
78 if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
79 ret = -ENOSPC;
80 goto out;
83 idx = sel_netif_hasfn(netif->nsec.dev);
84 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
85 sel_netif_total++;
86 out:
87 return ret;
90 static void sel_netif_free(struct rcu_head *p)
92 struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
94 DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
95 kfree(netif);
98 static void sel_netif_destroy(struct sel_netif *netif)
100 DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
102 list_del_rcu(&netif->list);
103 sel_netif_total--;
104 call_rcu(&netif->rcu_head, sel_netif_free);
107 static struct sel_netif *sel_netif_lookup(struct net_device *dev)
109 int ret;
110 struct sel_netif *netif, *new;
111 struct netif_security_struct *nsec;
113 netif = sel_netif_find(dev);
114 if (likely(netif != NULL))
115 goto out;
117 new = kzalloc(sizeof(*new), GFP_ATOMIC);
118 if (!new) {
119 netif = ERR_PTR(-ENOMEM);
120 goto out;
123 nsec = &new->nsec;
125 ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
126 if (ret < 0) {
127 kfree(new);
128 netif = ERR_PTR(ret);
129 goto out;
132 nsec->dev = dev;
134 spin_lock_bh(&sel_netif_lock);
136 netif = sel_netif_find(dev);
137 if (netif) {
138 spin_unlock_bh(&sel_netif_lock);
139 kfree(new);
140 goto out;
143 ret = sel_netif_insert(new);
144 spin_unlock_bh(&sel_netif_lock);
146 if (ret) {
147 kfree(new);
148 netif = ERR_PTR(ret);
149 goto out;
152 netif = new;
154 DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
155 nsec->if_sid, nsec->msg_sid);
156 out:
157 return netif;
160 static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
162 if (if_sid_out)
163 *if_sid_out = if_sid_in;
164 if (msg_sid_out)
165 *msg_sid_out = msg_sid_in;
168 static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
170 int ret = 0;
171 u32 tmp_if_sid, tmp_msg_sid;
173 ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
174 if (!ret)
175 sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
176 return ret;
179 int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
181 int ret = 0;
182 struct sel_netif *netif;
184 rcu_read_lock();
185 netif = sel_netif_lookup(dev);
186 if (IS_ERR(netif)) {
187 rcu_read_unlock();
188 ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
189 goto out;
191 sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
192 rcu_read_unlock();
193 out:
194 return ret;
197 static void sel_netif_kill(struct net_device *dev)
199 struct sel_netif *netif;
201 spin_lock_bh(&sel_netif_lock);
202 netif = sel_netif_find(dev);
203 if (netif)
204 sel_netif_destroy(netif);
205 spin_unlock_bh(&sel_netif_lock);
208 static void sel_netif_flush(void)
210 int idx;
212 spin_lock_bh(&sel_netif_lock);
213 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
214 struct sel_netif *netif;
216 list_for_each_entry(netif, &sel_netif_hash[idx], list)
217 sel_netif_destroy(netif);
219 spin_unlock_bh(&sel_netif_lock);
222 static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
223 u16 class, u32 perms, u32 *retained)
225 if (event == AVC_CALLBACK_RESET) {
226 sel_netif_flush();
227 synchronize_net();
229 return 0;
232 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
233 unsigned long event, void *ptr)
235 struct net_device *dev = ptr;
237 if (event == NETDEV_DOWN)
238 sel_netif_kill(dev);
240 return NOTIFY_DONE;
243 static struct notifier_block sel_netif_netdev_notifier = {
244 .notifier_call = sel_netif_netdev_notifier_handler,
247 static __init int sel_netif_init(void)
249 int i, err = 0;
251 if (!selinux_enabled)
252 goto out;
254 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
255 INIT_LIST_HEAD(&sel_netif_hash[i]);
257 register_netdevice_notifier(&sel_netif_netdev_notifier);
259 err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
260 SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
261 if (err)
262 panic("avc_add_callback() failed, error %d\n", err);
264 out:
265 return err;
268 __initcall(sel_netif_init);