2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
25 #include "net-sysfs.h"
28 static const char fmt_hex
[] = "%#x\n";
29 static const char fmt_long_hex
[] = "%#lx\n";
30 static const char fmt_dec
[] = "%d\n";
31 static const char fmt_ulong
[] = "%lu\n";
33 static inline int dev_isalive(const struct net_device
*dev
)
35 return dev
->reg_state
<= NETREG_REGISTERED
;
38 /* use same locking rules as GIF* ioctl's */
39 static ssize_t
netdev_show(const struct device
*dev
,
40 struct device_attribute
*attr
, char *buf
,
41 ssize_t (*format
)(const struct net_device
*, char *))
43 struct net_device
*net
= to_net_dev(dev
);
44 ssize_t ret
= -EINVAL
;
46 read_lock(&dev_base_lock
);
48 ret
= (*format
)(net
, buf
);
49 read_unlock(&dev_base_lock
);
54 /* generate a show function for simple field */
55 #define NETDEVICE_SHOW(field, format_string) \
56 static ssize_t format_##field(const struct net_device *net, char *buf) \
58 return sprintf(buf, format_string, net->field); \
60 static ssize_t show_##field(struct device *dev, \
61 struct device_attribute *attr, char *buf) \
63 return netdev_show(dev, attr, buf, format_##field); \
67 /* use same locking and permission rules as SIF* ioctl's */
68 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
69 const char *buf
, size_t len
,
70 int (*set
)(struct net_device
*, unsigned long))
72 struct net_device
*net
= to_net_dev(dev
);
77 if (!capable(CAP_NET_ADMIN
))
80 new = simple_strtoul(buf
, &endp
, 0);
85 return restart_syscall();
87 if (dev_isalive(net
)) {
88 if ((ret
= (*set
)(net
, new)) == 0)
96 NETDEVICE_SHOW(dev_id
, fmt_hex
);
97 NETDEVICE_SHOW(addr_len
, fmt_dec
);
98 NETDEVICE_SHOW(iflink
, fmt_dec
);
99 NETDEVICE_SHOW(ifindex
, fmt_dec
);
100 NETDEVICE_SHOW(features
, fmt_long_hex
);
101 NETDEVICE_SHOW(type
, fmt_dec
);
102 NETDEVICE_SHOW(link_mode
, fmt_dec
);
104 /* use same locking rules as GIFHWADDR ioctl's */
105 static ssize_t
show_address(struct device
*dev
, struct device_attribute
*attr
,
108 struct net_device
*net
= to_net_dev(dev
);
109 ssize_t ret
= -EINVAL
;
111 read_lock(&dev_base_lock
);
112 if (dev_isalive(net
))
113 ret
= sysfs_format_mac(buf
, net
->dev_addr
, net
->addr_len
);
114 read_unlock(&dev_base_lock
);
118 static ssize_t
show_broadcast(struct device
*dev
,
119 struct device_attribute
*attr
, char *buf
)
121 struct net_device
*net
= to_net_dev(dev
);
122 if (dev_isalive(net
))
123 return sysfs_format_mac(buf
, net
->broadcast
, net
->addr_len
);
127 static ssize_t
show_carrier(struct device
*dev
,
128 struct device_attribute
*attr
, char *buf
)
130 struct net_device
*netdev
= to_net_dev(dev
);
131 if (netif_running(netdev
)) {
132 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
137 static ssize_t
show_speed(struct device
*dev
,
138 struct device_attribute
*attr
, char *buf
)
140 struct net_device
*netdev
= to_net_dev(dev
);
144 return restart_syscall();
146 if (netif_running(netdev
) &&
147 netdev
->ethtool_ops
&&
148 netdev
->ethtool_ops
->get_settings
) {
149 struct ethtool_cmd cmd
= { ETHTOOL_GSET
};
151 if (!netdev
->ethtool_ops
->get_settings(netdev
, &cmd
))
152 ret
= sprintf(buf
, fmt_dec
, ethtool_cmd_speed(&cmd
));
158 static ssize_t
show_duplex(struct device
*dev
,
159 struct device_attribute
*attr
, char *buf
)
161 struct net_device
*netdev
= to_net_dev(dev
);
165 return restart_syscall();
167 if (netif_running(netdev
) &&
168 netdev
->ethtool_ops
&&
169 netdev
->ethtool_ops
->get_settings
) {
170 struct ethtool_cmd cmd
= { ETHTOOL_GSET
};
172 if (!netdev
->ethtool_ops
->get_settings(netdev
, &cmd
))
173 ret
= sprintf(buf
, "%s\n", cmd
.duplex
? "full" : "half");
179 static ssize_t
show_dormant(struct device
*dev
,
180 struct device_attribute
*attr
, char *buf
)
182 struct net_device
*netdev
= to_net_dev(dev
);
184 if (netif_running(netdev
))
185 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
190 static const char *const operstates
[] = {
192 "notpresent", /* currently unused */
195 "testing", /* currently unused */
200 static ssize_t
show_operstate(struct device
*dev
,
201 struct device_attribute
*attr
, char *buf
)
203 const struct net_device
*netdev
= to_net_dev(dev
);
204 unsigned char operstate
;
206 read_lock(&dev_base_lock
);
207 operstate
= netdev
->operstate
;
208 if (!netif_running(netdev
))
209 operstate
= IF_OPER_DOWN
;
210 read_unlock(&dev_base_lock
);
212 if (operstate
>= ARRAY_SIZE(operstates
))
213 return -EINVAL
; /* should not happen */
215 return sprintf(buf
, "%s\n", operstates
[operstate
]);
218 /* read-write attributes */
219 NETDEVICE_SHOW(mtu
, fmt_dec
);
221 static int change_mtu(struct net_device
*net
, unsigned long new_mtu
)
223 return dev_set_mtu(net
, (int) new_mtu
);
226 static ssize_t
store_mtu(struct device
*dev
, struct device_attribute
*attr
,
227 const char *buf
, size_t len
)
229 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
232 NETDEVICE_SHOW(flags
, fmt_hex
);
234 static int change_flags(struct net_device
*net
, unsigned long new_flags
)
236 return dev_change_flags(net
, (unsigned) new_flags
);
239 static ssize_t
store_flags(struct device
*dev
, struct device_attribute
*attr
,
240 const char *buf
, size_t len
)
242 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
245 NETDEVICE_SHOW(tx_queue_len
, fmt_ulong
);
247 static int change_tx_queue_len(struct net_device
*net
, unsigned long new_len
)
249 net
->tx_queue_len
= new_len
;
253 static ssize_t
store_tx_queue_len(struct device
*dev
,
254 struct device_attribute
*attr
,
255 const char *buf
, size_t len
)
257 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
260 static ssize_t
store_ifalias(struct device
*dev
, struct device_attribute
*attr
,
261 const char *buf
, size_t len
)
263 struct net_device
*netdev
= to_net_dev(dev
);
267 if (!capable(CAP_NET_ADMIN
))
270 /* ignore trailing newline */
271 if (len
> 0 && buf
[len
- 1] == '\n')
275 return restart_syscall();
276 ret
= dev_set_alias(netdev
, buf
, count
);
279 return ret
< 0 ? ret
: len
;
282 static ssize_t
show_ifalias(struct device
*dev
,
283 struct device_attribute
*attr
, char *buf
)
285 const struct net_device
*netdev
= to_net_dev(dev
);
289 return restart_syscall();
291 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
296 static struct device_attribute net_class_attributes
[] = {
297 __ATTR(addr_len
, S_IRUGO
, show_addr_len
, NULL
),
298 __ATTR(dev_id
, S_IRUGO
, show_dev_id
, NULL
),
299 __ATTR(ifalias
, S_IRUGO
| S_IWUSR
, show_ifalias
, store_ifalias
),
300 __ATTR(iflink
, S_IRUGO
, show_iflink
, NULL
),
301 __ATTR(ifindex
, S_IRUGO
, show_ifindex
, NULL
),
302 __ATTR(features
, S_IRUGO
, show_features
, NULL
),
303 __ATTR(type
, S_IRUGO
, show_type
, NULL
),
304 __ATTR(link_mode
, S_IRUGO
, show_link_mode
, NULL
),
305 __ATTR(address
, S_IRUGO
, show_address
, NULL
),
306 __ATTR(broadcast
, S_IRUGO
, show_broadcast
, NULL
),
307 __ATTR(carrier
, S_IRUGO
, show_carrier
, NULL
),
308 __ATTR(speed
, S_IRUGO
, show_speed
, NULL
),
309 __ATTR(duplex
, S_IRUGO
, show_duplex
, NULL
),
310 __ATTR(dormant
, S_IRUGO
, show_dormant
, NULL
),
311 __ATTR(operstate
, S_IRUGO
, show_operstate
, NULL
),
312 __ATTR(mtu
, S_IRUGO
| S_IWUSR
, show_mtu
, store_mtu
),
313 __ATTR(flags
, S_IRUGO
| S_IWUSR
, show_flags
, store_flags
),
314 __ATTR(tx_queue_len
, S_IRUGO
| S_IWUSR
, show_tx_queue_len
,
319 /* Show a given an attribute in the statistics group */
320 static ssize_t
netstat_show(const struct device
*d
,
321 struct device_attribute
*attr
, char *buf
,
322 unsigned long offset
)
324 struct net_device
*dev
= to_net_dev(d
);
325 ssize_t ret
= -EINVAL
;
327 WARN_ON(offset
> sizeof(struct net_device_stats
) ||
328 offset
% sizeof(unsigned long) != 0);
330 read_lock(&dev_base_lock
);
331 if (dev_isalive(dev
)) {
332 const struct net_device_stats
*stats
= dev_get_stats(dev
);
333 ret
= sprintf(buf
, fmt_ulong
,
334 *(unsigned long *)(((u8
*) stats
) + offset
));
336 read_unlock(&dev_base_lock
);
340 /* generate a read-only statistics attribute */
341 #define NETSTAT_ENTRY(name) \
342 static ssize_t show_##name(struct device *d, \
343 struct device_attribute *attr, char *buf) \
345 return netstat_show(d, attr, buf, \
346 offsetof(struct net_device_stats, name)); \
348 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
350 NETSTAT_ENTRY(rx_packets
);
351 NETSTAT_ENTRY(tx_packets
);
352 NETSTAT_ENTRY(rx_bytes
);
353 NETSTAT_ENTRY(tx_bytes
);
354 NETSTAT_ENTRY(rx_errors
);
355 NETSTAT_ENTRY(tx_errors
);
356 NETSTAT_ENTRY(rx_dropped
);
357 NETSTAT_ENTRY(tx_dropped
);
358 NETSTAT_ENTRY(multicast
);
359 NETSTAT_ENTRY(collisions
);
360 NETSTAT_ENTRY(rx_length_errors
);
361 NETSTAT_ENTRY(rx_over_errors
);
362 NETSTAT_ENTRY(rx_crc_errors
);
363 NETSTAT_ENTRY(rx_frame_errors
);
364 NETSTAT_ENTRY(rx_fifo_errors
);
365 NETSTAT_ENTRY(rx_missed_errors
);
366 NETSTAT_ENTRY(tx_aborted_errors
);
367 NETSTAT_ENTRY(tx_carrier_errors
);
368 NETSTAT_ENTRY(tx_fifo_errors
);
369 NETSTAT_ENTRY(tx_heartbeat_errors
);
370 NETSTAT_ENTRY(tx_window_errors
);
371 NETSTAT_ENTRY(rx_compressed
);
372 NETSTAT_ENTRY(tx_compressed
);
374 static struct attribute
*netstat_attrs
[] = {
375 &dev_attr_rx_packets
.attr
,
376 &dev_attr_tx_packets
.attr
,
377 &dev_attr_rx_bytes
.attr
,
378 &dev_attr_tx_bytes
.attr
,
379 &dev_attr_rx_errors
.attr
,
380 &dev_attr_tx_errors
.attr
,
381 &dev_attr_rx_dropped
.attr
,
382 &dev_attr_tx_dropped
.attr
,
383 &dev_attr_multicast
.attr
,
384 &dev_attr_collisions
.attr
,
385 &dev_attr_rx_length_errors
.attr
,
386 &dev_attr_rx_over_errors
.attr
,
387 &dev_attr_rx_crc_errors
.attr
,
388 &dev_attr_rx_frame_errors
.attr
,
389 &dev_attr_rx_fifo_errors
.attr
,
390 &dev_attr_rx_missed_errors
.attr
,
391 &dev_attr_tx_aborted_errors
.attr
,
392 &dev_attr_tx_carrier_errors
.attr
,
393 &dev_attr_tx_fifo_errors
.attr
,
394 &dev_attr_tx_heartbeat_errors
.attr
,
395 &dev_attr_tx_window_errors
.attr
,
396 &dev_attr_rx_compressed
.attr
,
397 &dev_attr_tx_compressed
.attr
,
402 static struct attribute_group netstat_group
= {
403 .name
= "statistics",
404 .attrs
= netstat_attrs
,
407 #ifdef CONFIG_WIRELESS_EXT_SYSFS
408 /* helper function that does all the locking etc for wireless stats */
409 static ssize_t
wireless_show(struct device
*d
, char *buf
,
410 ssize_t (*format
)(const struct iw_statistics
*,
413 struct net_device
*dev
= to_net_dev(d
);
414 const struct iw_statistics
*iw
;
415 ssize_t ret
= -EINVAL
;
418 return restart_syscall();
419 if (dev_isalive(dev
)) {
420 iw
= get_wireless_stats(dev
);
422 ret
= (*format
)(iw
, buf
);
429 /* show function template for wireless fields */
430 #define WIRELESS_SHOW(name, field, format_string) \
431 static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
433 return sprintf(buf, format_string, iw->field); \
435 static ssize_t show_iw_##name(struct device *d, \
436 struct device_attribute *attr, char *buf) \
438 return wireless_show(d, buf, format_iw_##name); \
440 static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
442 WIRELESS_SHOW(status
, status
, fmt_hex
);
443 WIRELESS_SHOW(link
, qual
.qual
, fmt_dec
);
444 WIRELESS_SHOW(level
, qual
.level
, fmt_dec
);
445 WIRELESS_SHOW(noise
, qual
.noise
, fmt_dec
);
446 WIRELESS_SHOW(nwid
, discard
.nwid
, fmt_dec
);
447 WIRELESS_SHOW(crypt
, discard
.code
, fmt_dec
);
448 WIRELESS_SHOW(fragment
, discard
.fragment
, fmt_dec
);
449 WIRELESS_SHOW(misc
, discard
.misc
, fmt_dec
);
450 WIRELESS_SHOW(retries
, discard
.retries
, fmt_dec
);
451 WIRELESS_SHOW(beacon
, miss
.beacon
, fmt_dec
);
453 static struct attribute
*wireless_attrs
[] = {
454 &dev_attr_status
.attr
,
456 &dev_attr_level
.attr
,
457 &dev_attr_noise
.attr
,
459 &dev_attr_crypt
.attr
,
460 &dev_attr_fragment
.attr
,
461 &dev_attr_retries
.attr
,
463 &dev_attr_beacon
.attr
,
467 static struct attribute_group wireless_group
= {
469 .attrs
= wireless_attrs
,
472 #endif /* CONFIG_SYSFS */
476 * RX queue sysfs structures and functions.
478 struct rx_queue_attribute
{
479 struct attribute attr
;
480 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
481 struct rx_queue_attribute
*attr
, char *buf
);
482 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
483 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
485 #define to_rx_queue_attr(_attr) container_of(_attr, \
486 struct rx_queue_attribute, attr)
488 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
490 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
493 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
494 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
496 if (!attribute
->show
)
499 return attribute
->show(queue
, attribute
, buf
);
502 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
503 const char *buf
, size_t count
)
505 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
506 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
508 if (!attribute
->store
)
511 return attribute
->store(queue
, attribute
, buf
, count
);
514 static struct sysfs_ops rx_queue_sysfs_ops
= {
515 .show
= rx_queue_attr_show
,
516 .store
= rx_queue_attr_store
,
519 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
520 struct rx_queue_attribute
*attribute
, char *buf
)
527 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
531 map
= rcu_dereference(queue
->rps_map
);
533 for (i
= 0; i
< map
->len
; i
++)
534 cpumask_set_cpu(map
->cpus
[i
], mask
);
536 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
537 if (PAGE_SIZE
- len
< 3) {
539 free_cpumask_var(mask
);
544 free_cpumask_var(mask
);
545 len
+= sprintf(buf
+ len
, "\n");
549 static void rps_map_release(struct rcu_head
*rcu
)
551 struct rps_map
*map
= container_of(rcu
, struct rps_map
, rcu
);
556 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
557 struct rx_queue_attribute
*attribute
,
558 const char *buf
, size_t len
)
560 struct rps_map
*old_map
, *map
;
563 static DEFINE_SPINLOCK(rps_map_lock
);
565 if (!capable(CAP_NET_ADMIN
))
568 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
571 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
573 free_cpumask_var(mask
);
577 map
= kzalloc(max_t(unsigned,
578 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
581 free_cpumask_var(mask
);
586 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
587 map
->cpus
[i
++] = cpu
;
596 spin_lock(&rps_map_lock
);
597 old_map
= queue
->rps_map
;
598 rcu_assign_pointer(queue
->rps_map
, map
);
599 spin_unlock(&rps_map_lock
);
602 call_rcu(&old_map
->rcu
, rps_map_release
);
604 free_cpumask_var(mask
);
608 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
609 struct rx_queue_attribute
*attr
,
612 struct rps_dev_flow_table
*flow_table
;
613 unsigned int val
= 0;
616 flow_table
= rcu_dereference(queue
->rps_flow_table
);
618 val
= flow_table
->mask
+ 1;
621 return sprintf(buf
, "%u\n", val
);
624 static void rps_dev_flow_table_release_work(struct work_struct
*work
)
626 struct rps_dev_flow_table
*table
= container_of(work
,
627 struct rps_dev_flow_table
, free_work
);
632 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
634 struct rps_dev_flow_table
*table
= container_of(rcu
,
635 struct rps_dev_flow_table
, rcu
);
637 INIT_WORK(&table
->free_work
, rps_dev_flow_table_release_work
);
638 schedule_work(&table
->free_work
);
641 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
642 struct rx_queue_attribute
*attr
,
643 const char *buf
, size_t len
)
647 struct rps_dev_flow_table
*table
, *old_table
;
648 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
650 if (!capable(CAP_NET_ADMIN
))
653 count
= simple_strtoul(buf
, &endp
, 0);
661 /* Enforce a limit to prevent overflow */
664 count
= roundup_pow_of_two(count
);
665 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count
));
669 table
->mask
= count
- 1;
670 for (i
= 0; i
< count
; i
++)
671 table
->flows
[i
].cpu
= RPS_NO_CPU
;
675 spin_lock(&rps_dev_flow_lock
);
676 old_table
= queue
->rps_flow_table
;
677 rcu_assign_pointer(queue
->rps_flow_table
, table
);
678 spin_unlock(&rps_dev_flow_lock
);
681 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
686 static struct rx_queue_attribute rps_cpus_attribute
=
687 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
690 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
691 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
692 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
694 static struct attribute
*rx_queue_default_attrs
[] = {
695 &rps_cpus_attribute
.attr
,
696 &rps_dev_flow_table_cnt_attribute
.attr
,
700 static void rx_queue_release(struct kobject
*kobj
)
702 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
703 struct netdev_rx_queue
*first
= queue
->first
;
706 call_rcu(&queue
->rps_map
->rcu
, rps_map_release
);
708 if (queue
->rps_flow_table
)
709 call_rcu(&queue
->rps_flow_table
->rcu
,
710 rps_dev_flow_table_release
);
712 if (atomic_dec_and_test(&first
->count
))
716 static struct kobj_type rx_queue_ktype
= {
717 .sysfs_ops
= &rx_queue_sysfs_ops
,
718 .release
= rx_queue_release
,
719 .default_attrs
= rx_queue_default_attrs
,
722 static int rx_queue_add_kobject(struct net_device
*net
, int index
)
724 struct netdev_rx_queue
*queue
= net
->_rx
+ index
;
725 struct kobject
*kobj
= &queue
->kobj
;
728 kobj
->kset
= net
->queues_kset
;
729 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
736 kobject_uevent(kobj
, KOBJ_ADD
);
741 static int rx_queue_register_kobjects(struct net_device
*net
)
746 net
->queues_kset
= kset_create_and_add("queues",
747 NULL
, &net
->dev
.kobj
);
748 if (!net
->queues_kset
)
750 for (i
= 0; i
< net
->num_rx_queues
; i
++) {
751 error
= rx_queue_add_kobject(net
, i
);
758 kobject_put(&net
->_rx
[i
].kobj
);
763 static void rx_queue_remove_kobjects(struct net_device
*net
)
767 for (i
= 0; i
< net
->num_rx_queues
; i
++)
768 kobject_put(&net
->_rx
[i
].kobj
);
769 kset_unregister(net
->queues_kset
);
771 #endif /* CONFIG_RPS */
773 static const void *net_current_ns(void)
775 return current
->nsproxy
->net_ns
;
778 static const void *net_initial_ns(void)
783 static const void *net_netlink_ns(struct sock
*sk
)
788 static struct kobj_ns_type_operations net_ns_type_operations
= {
789 .type
= KOBJ_NS_TYPE_NET
,
790 .current_ns
= net_current_ns
,
791 .netlink_ns
= net_netlink_ns
,
792 .initial_ns
= net_initial_ns
,
795 static void net_kobj_ns_exit(struct net
*net
)
797 kobj_ns_exit(KOBJ_NS_TYPE_NET
, net
);
800 static struct pernet_operations kobj_net_ops
= {
801 .exit
= net_kobj_ns_exit
,
805 #ifdef CONFIG_HOTPLUG
806 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
808 struct net_device
*dev
= to_net_dev(d
);
811 /* pass interface to uevent. */
812 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
816 /* pass ifindex to uevent.
817 * ifindex is useful as it won't change (interface name may change)
818 * and is what RtNetlink uses natively. */
819 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
827 * netdev_release -- destroy and free a dead device.
828 * Called when last reference to device kobject is gone.
830 static void netdev_release(struct device
*d
)
832 struct net_device
*dev
= to_net_dev(d
);
834 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
837 kfree((char *)dev
- dev
->padded
);
840 static const void *net_namespace(struct device
*d
)
842 struct net_device
*dev
;
843 dev
= container_of(d
, struct net_device
, dev
);
847 static struct class net_class
= {
849 .dev_release
= netdev_release
,
851 .dev_attrs
= net_class_attributes
,
852 #endif /* CONFIG_SYSFS */
853 #ifdef CONFIG_HOTPLUG
854 .dev_uevent
= netdev_uevent
,
856 .ns_type
= &net_ns_type_operations
,
857 .namespace = net_namespace
,
860 /* Delete sysfs entries but hold kobject reference until after all
861 * netdev references are gone.
863 void netdev_unregister_kobject(struct net_device
* net
)
865 struct device
*dev
= &(net
->dev
);
867 kobject_get(&dev
->kobj
);
870 rx_queue_remove_kobjects(net
);
876 /* Create sysfs entries for network device. */
877 int netdev_register_kobject(struct net_device
*net
)
879 struct device
*dev
= &(net
->dev
);
880 const struct attribute_group
**groups
= net
->sysfs_groups
;
883 device_initialize(dev
);
884 dev
->class = &net_class
;
885 dev
->platform_data
= net
;
886 dev
->groups
= groups
;
888 dev_set_name(dev
, "%s", net
->name
);
891 /* Allow for a device specific group */
895 *groups
++ = &netstat_group
;
896 #ifdef CONFIG_WIRELESS_EXT_SYSFS
897 if (net
->ieee80211_ptr
)
898 *groups
++ = &wireless_group
;
899 #ifdef CONFIG_WIRELESS_EXT
900 else if (net
->wireless_handlers
)
901 *groups
++ = &wireless_group
;
904 #endif /* CONFIG_SYSFS */
906 error
= device_add(dev
);
911 error
= rx_queue_register_kobjects(net
);
921 int netdev_class_create_file(struct class_attribute
*class_attr
)
923 return class_create_file(&net_class
, class_attr
);
926 void netdev_class_remove_file(struct class_attribute
*class_attr
)
928 class_remove_file(&net_class
, class_attr
);
931 EXPORT_SYMBOL(netdev_class_create_file
);
932 EXPORT_SYMBOL(netdev_class_remove_file
);
934 int netdev_kobject_init(void)
936 kobj_ns_type_register(&net_ns_type_operations
);
937 register_pernet_subsys(&kobj_net_ops
);
938 return class_register(&net_class
);