12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
23 #include <linux/export.h>
30 static const char fmt_hex[] =
"%#x\n";
31 static const char fmt_long_hex[] =
"%#lx\n";
32 static const char fmt_dec[] =
"%d\n";
33 static const char fmt_udec[] =
"%u\n";
34 static const char fmt_ulong[] =
"%lu\n";
35 static const char fmt_u64[] =
"%llu\n";
39 return dev->
reg_state <= NETREG_REGISTERED;
52 ret = (*format)(net,
buf);
59 #define NETDEVICE_SHOW(field, format_string) \
60 static ssize_t format_##field(const struct net_device *net, char *buf) \
62 return sprintf(buf, format_string, net->field); \
64 static ssize_t show_##field(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
67 return netdev_show(dev, attr, buf, format_##field); \
73 const char *buf,
size_t len,
74 int (*
set)(
struct net_device *,
unsigned long))
83 ret = kstrtoul(buf, 0, &
new);
88 return restart_syscall();
90 if (dev_isalive(net)) {
91 if ((ret = (*
set)(net,
new)) == 0)
99 NETDEVICE_SHOW(
dev_id, fmt_hex);
102 NETDEVICE_SHOW(
iflink, fmt_dec);
103 NETDEVICE_SHOW(
ifindex, fmt_dec);
104 NETDEVICE_SHOW(
type, fmt_dec);
115 if (dev_isalive(net))
125 if (dev_isalive(net))
134 if (netif_running(netdev)) {
135 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
147 return restart_syscall();
149 if (netif_running(netdev)) {
152 ret =
sprintf(buf, fmt_udec, ethtool_cmd_speed(&
cmd));
165 return restart_syscall();
167 if (netif_running(netdev)) {
171 switch (
cmd.duplex) {
182 ret =
sprintf(buf,
"%s\n", duplex);
194 if (netif_running(netdev))
195 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
200 static const char *
const operstates[] = {
218 if (!netif_running(netdev))
225 return sprintf(buf,
"%s\n", operstates[operstate]);
229 NETDEVICE_SHOW(
mtu, fmt_dec);
231 static int change_mtu(
struct net_device *net,
unsigned long new_mtu)
237 const char *buf,
size_t len)
239 return netdev_store(dev, attr, buf, len, change_mtu);
242 NETDEVICE_SHOW(
flags, fmt_hex);
250 const char *buf,
size_t len)
257 static int change_tx_queue_len(
struct net_device *net,
unsigned long new_len)
265 const char *buf,
size_t len)
267 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
271 const char *buf,
size_t len)
281 if (len > 0 && buf[len - 1] ==
'\n')
285 return restart_syscall();
289 return ret < 0 ? ret : len;
299 return restart_syscall();
306 NETDEVICE_SHOW(
group, fmt_dec);
308 static int change_group(
struct net_device *net,
unsigned long new_group)
315 const char *buf,
size_t len)
317 return netdev_store(dev, attr, buf, len, change_group);
353 offset %
sizeof(
u64) != 0);
356 if (dev_isalive(dev)) {
360 ret =
sprintf(buf, fmt_u64, *(
u64 *)(((
u8 *) stats) + offset));
367 #define NETSTAT_ENTRY(name) \
368 static ssize_t show_##name(struct device *d, \
369 struct device_attribute *attr, char *buf) \
371 return netstat_show(d, attr, buf, \
372 offsetof(struct rtnl_link_stats64, name)); \
374 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
400 static struct attribute *netstat_attrs[] = {
401 &dev_attr_rx_packets.attr,
402 &dev_attr_tx_packets.attr,
403 &dev_attr_rx_bytes.attr,
404 &dev_attr_tx_bytes.attr,
405 &dev_attr_rx_errors.attr,
406 &dev_attr_tx_errors.attr,
407 &dev_attr_rx_dropped.attr,
408 &dev_attr_tx_dropped.attr,
409 &dev_attr_multicast.attr,
410 &dev_attr_collisions.attr,
411 &dev_attr_rx_length_errors.attr,
412 &dev_attr_rx_over_errors.attr,
413 &dev_attr_rx_crc_errors.attr,
414 &dev_attr_rx_frame_errors.attr,
415 &dev_attr_rx_fifo_errors.attr,
416 &dev_attr_rx_missed_errors.attr,
417 &dev_attr_tx_aborted_errors.attr,
418 &dev_attr_tx_carrier_errors.attr,
419 &dev_attr_tx_fifo_errors.attr,
420 &dev_attr_tx_heartbeat_errors.attr,
421 &dev_attr_tx_window_errors.attr,
422 &dev_attr_rx_compressed.attr,
423 &dev_attr_tx_compressed.attr,
429 .
name =
"statistics",
430 .attrs = netstat_attrs,
433 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
434 static struct attribute *wireless_attrs[] = {
440 .attrs = wireless_attrs,
449 struct rx_queue_attribute {
452 struct rx_queue_attribute *
attr,
char *
buf);
454 struct rx_queue_attribute *
attr,
const char *
buf,
size_t len);
456 #define to_rx_queue_attr(_attr) container_of(_attr, \
457 struct rx_queue_attribute, attr)
459 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
464 struct rx_queue_attribute *
attribute = to_rx_queue_attr(attr);
465 struct netdev_rx_queue *
queue = to_rx_queue(kobj);
467 if (!attribute->show)
470 return attribute->show(queue, attribute, buf);
473 static ssize_t rx_queue_attr_store(
struct kobject *kobj,
struct attribute *attr,
474 const char *buf,
size_t count)
476 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
477 struct netdev_rx_queue *queue = to_rx_queue(kobj);
479 if (!attribute->store)
482 return attribute->store(queue, attribute, buf, count);
485 static const struct sysfs_ops rx_queue_sysfs_ops = {
486 .
show = rx_queue_attr_show,
487 .store = rx_queue_attr_store,
490 static ssize_t show_rps_map(
struct netdev_rx_queue *queue,
491 struct rx_queue_attribute *attribute,
char *buf)
504 for (i = 0; i < map->len; i++)
505 cpumask_set_cpu(map->cpus[i], mask);
507 len += cpumask_scnprintf(buf + len,
PAGE_SIZE, mask);
510 free_cpumask_var(mask);
515 free_cpumask_var(mask);
516 len +=
sprintf(buf + len,
"\n");
520 static ssize_t store_rps_map(
struct netdev_rx_queue *queue,
521 struct rx_queue_attribute *attribute,
522 const char *buf,
size_t len)
524 struct rps_map *old_map, *
map;
537 free_cpumask_var(mask);
541 map = kzalloc(
max_t(
unsigned int,
545 free_cpumask_var(mask);
551 map->
cpus[i++] = cpu;
560 spin_lock(&rps_map_lock);
562 lockdep_is_held(&rps_map_lock));
564 spin_unlock(&rps_map_lock);
567 static_key_slow_inc(&rps_needed);
570 static_key_slow_dec(&rps_needed);
572 free_cpumask_var(mask);
576 static ssize_t show_rps_dev_flow_table_cnt(
struct netdev_rx_queue *queue,
577 struct rx_queue_attribute *attr,
581 unsigned long val = 0;
586 val = (
unsigned long)flow_table->mask + 1;
589 return sprintf(buf,
"%lu\n", val);
595 struct rps_dev_flow_table, free_work);
600 static void rps_dev_flow_table_release(
struct rcu_head *rcu)
603 struct rps_dev_flow_table, rcu);
605 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
609 static ssize_t store_rps_dev_flow_table_cnt(
struct netdev_rx_queue *queue,
610 struct rx_queue_attribute *attr,
611 const char *buf,
size_t len)
614 struct rps_dev_flow_table *
table, *old_table;
621 rc = kstrtoul(buf, 0, &count);
630 while ((mask | (mask >> 1)) != mask)
636 #if BITS_PER_LONG > 32
637 if (mask > (
unsigned long)(
u32)mask)
640 if (mask > (
ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
641 /
sizeof(
struct rps_dev_flow)) {
646 table =
vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
651 for (count = 0; count <=
mask; count++)
652 table->flows[count].cpu = RPS_NO_CPU;
656 spin_lock(&rps_dev_flow_lock);
658 lockdep_is_held(&rps_dev_flow_lock));
660 spin_unlock(&rps_dev_flow_lock);
663 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
668 static struct rx_queue_attribute rps_cpus_attribute =
672 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
674 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
676 static struct attribute *rx_queue_default_attrs[] = {
677 &rps_cpus_attribute.attr,
678 &rps_dev_flow_table_cnt_attribute.attr,
682 static void rx_queue_release(
struct kobject *kobj)
684 struct netdev_rx_queue *queue = to_rx_queue(kobj);
686 struct rps_dev_flow_table *flow_table;
698 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
701 memset(kobj, 0,
sizeof(*kobj));
705 static struct kobj_type rx_queue_ktype = {
707 .release = rx_queue_release,
708 .default_attrs = rx_queue_default_attrs,
713 struct netdev_rx_queue *queue = net->_rx +
index;
714 struct kobject *kobj = &queue->kobj;
717 kobj->
kset = net->queues_kset;
726 dev_hold(queue->dev);
739 for (i = old_num; i < new_num; i++) {
740 error = rx_queue_add_kobject(net, i);
747 while (--i >= new_num)
760 struct netdev_queue_attribute {
761 struct attribute attr;
763 struct netdev_queue_attribute *
attr,
char *
buf);
765 struct netdev_queue_attribute *
attr,
const char *
buf,
size_t len);
767 #define to_netdev_queue_attr(_attr) container_of(_attr, \
768 struct netdev_queue_attribute, attr)
770 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
773 struct attribute *attr,
char *buf)
775 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
778 if (!attribute->show)
781 return attribute->show(queue, attribute, buf);
785 struct attribute *attr,
786 const char *buf,
size_t count)
788 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
791 if (!attribute->store)
794 return attribute->store(queue, attribute, buf, count);
797 static const struct sysfs_ops netdev_queue_sysfs_ops = {
798 .
show = netdev_queue_attr_show,
799 .store = netdev_queue_attr_store,
803 struct netdev_queue_attribute *attribute,
806 unsigned long trans_timeout;
808 spin_lock_irq(&queue->_xmit_lock);
810 spin_unlock_irq(&queue->_xmit_lock);
812 return sprintf(buf,
"%lu", trans_timeout);
815 static struct netdev_queue_attribute queue_trans_timeout =
824 return sprintf(buf,
"%u\n", value);
827 static ssize_t bql_set(
const char *buf,
const size_t count,
828 unsigned int *pvalue)
834 value = DQL_MAX_LIMIT;
839 if (value > DQL_MAX_LIMIT)
849 struct netdev_queue_attribute *attr,
852 struct dql *dql = &queue->dql;
858 struct netdev_queue_attribute *attribute,
859 const char *buf,
size_t len)
861 struct dql *dql = &queue->dql;
874 static struct netdev_queue_attribute bql_hold_time_attribute =
879 struct netdev_queue_attribute *attr,
882 struct dql *dql = &queue->dql;
884 return sprintf(buf,
"%u\n", dql->num_queued - dql->num_completed);
887 static struct netdev_queue_attribute bql_inflight_attribute =
890 #define BQL_ATTR(NAME, FIELD) \
891 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
892 struct netdev_queue_attribute *attr, \
895 return bql_show(buf, queue->dql.FIELD); \
898 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
899 struct netdev_queue_attribute *attr, \
900 const char *buf, size_t len) \
902 return bql_set(buf, len, &queue->dql.FIELD); \
905 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
906 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
910 BQL_ATTR(limit_max, max_limit)
911 BQL_ATTR(limit_min, min_limit)
913 static
struct attribute *dql_attrs[] = {
914 &bql_limit_attribute.attr,
915 &bql_limit_max_attribute.attr,
916 &bql_limit_min_attribute.attr,
917 &bql_hold_time_attribute.attr,
918 &bql_inflight_attribute.attr,
923 .
name =
"byte_queue_limits",
929 static inline unsigned int get_netdev_queue_index(
struct netdev_queue *queue)
935 if (queue == &dev->_tx[i])
945 struct netdev_queue_attribute *attribute,
char *buf)
948 struct xps_dev_maps *dev_maps;
957 index = get_netdev_queue_index(queue);
963 struct xps_map *map =
967 for (j = 0; j < map->len; j++) {
968 if (map->queues[j] == index) {
969 cpumask_set_cpu(i, mask);
978 len += cpumask_scnprintf(buf + len,
PAGE_SIZE, mask);
980 free_cpumask_var(mask);
984 free_cpumask_var(mask);
985 len +=
sprintf(buf + len,
"\n");
990 #define xmap_dereference(P) \
991 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
993 static void xps_queue_release(
struct netdev_queue *queue)
996 struct xps_dev_maps *dev_maps;
999 int i,
pos, nonempty = 0;
1001 index = get_netdev_queue_index(queue);
1004 dev_maps = xmap_dereference(dev->xps_maps);
1008 map = xmap_dereference(dev_maps->cpu_map[i]);
1012 for (pos = 0; pos < map->len; pos++)
1013 if (map->queues[pos] == index)
1016 if (pos < map->len) {
1019 map->queues[--map->len];
1040 struct netdev_queue_attribute *attribute,
1041 const char *buf,
size_t len)
1045 int err,
i,
cpu,
pos, map_len, alloc_len, need_set;
1046 unsigned long index;
1047 struct xps_map *
map, *new_map;
1048 struct xps_dev_maps *dev_maps, *new_dev_maps;
1058 index = get_netdev_queue_index(queue);
1062 free_cpumask_var(mask);
1066 new_dev_maps = kzalloc(
max_t(
unsigned int,
1068 if (!new_dev_maps) {
1069 free_cpumask_var(mask);
1075 dev_maps = xmap_dereference(dev->xps_maps);
1079 xmap_dereference(dev_maps->cpu_map[cpu]) :
NULL;
1082 for (pos = 0; pos < map->len; pos++)
1083 if (map->queues[pos] == index)
1086 alloc_len = map->alloc_len;
1088 pos = map_len = alloc_len = 0;
1093 if (numa_node_id == -2)
1099 if (need_set && pos >= map_len) {
1101 if (map_len >= alloc_len) {
1102 alloc_len = alloc_len ?
1103 2 * alloc_len : XPS_MIN_MAP_ALLOC;
1104 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1109 new_map->alloc_len = alloc_len;
1110 for (i = 0; i < map_len; i++)
1111 new_map->queues[i] = map->queues[i];
1112 new_map->len = map_len;
1114 new_map->queues[new_map->len++] =
index;
1115 }
else if (!need_set && pos < map_len) {
1118 new_map->queues[
pos] =
1119 new_map->queues[--new_map->len];
1129 xmap_dereference(dev_maps->cpu_map[cpu]) :
NULL;
1130 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1132 if (new_dev_maps->cpu_map[cpu])
1139 kfree(new_dev_maps);
1146 netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1151 free_cpumask_var(mask);
1162 kfree(new_dev_maps);
1163 free_cpumask_var(mask);
1167 static
struct netdev_queue_attribute xps_cpus_attribute =
1171 static struct attribute *netdev_queue_default_attrs[] = {
1172 &queue_trans_timeout.attr,
1174 &xps_cpus_attribute.attr,
1179 static void netdev_queue_release(
struct kobject *kobj)
1184 xps_queue_release(queue);
1187 memset(kobj, 0,
sizeof(*kobj));
1188 dev_put(queue->
dev);
1191 static struct kobj_type netdev_queue_ktype = {
1193 .release = netdev_queue_release,
1194 .default_attrs = netdev_queue_default_attrs,
1197 static int netdev_queue_add_kobject(
struct net_device *net,
int index)
1200 struct kobject *kobj = &queue->kobj;
1203 kobj->
kset = net->queues_kset;
1216 dev_hold(queue->
dev);
1232 for (i = old_num; i < new_num; i++) {
1233 error = netdev_queue_add_kobject(net, i);
1240 while (--i >= new_num) {
1255 static int register_queue_kobjects(
struct net_device *net)
1257 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1262 if (!net->queues_kset)
1267 real_rx = net->real_num_rx_queues;
1289 static void remove_queue_kobjects(
struct net_device *net)
1291 int real_rx = 0, real_tx = 0;
1294 real_rx = net->real_num_rx_queues;
1305 static void *net_grab_current_ns(
void)
1307 struct net *
ns =
current->nsproxy->net_ns;
1308 #ifdef CONFIG_NET_NS
1315 static const void *net_initial_ns(
void)
1320 static const void *net_netlink_ns(
struct sock *
sk)
1322 return sock_net(sk);
1327 .grab_current_ns = net_grab_current_ns,
1328 .netlink_ns = net_netlink_ns,
1329 .initial_ns = net_initial_ns,
1334 #ifdef CONFIG_HOTPLUG
1359 static void netdev_release(
struct device *d)
1369 static const void *net_namespace(
struct device *d)
1373 return dev_net(dev);
1376 static struct class net_class = {
1378 .dev_release = netdev_release,
1380 .dev_attrs = net_class_attributes,
1382 #ifdef CONFIG_HOTPLUG
1383 .dev_uevent = netdev_uevent,
1386 .namespace = net_namespace,
1398 remove_queue_kobjects(net);
1411 dev->
class = &net_class;
1422 *groups++ = &netstat_group;
1424 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1426 *groups++ = &wireless_group;
1427 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1428 else if (net->wireless_handlers)
1429 *groups++ = &wireless_group;
1438 error = register_queue_kobjects(net);