25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
42 #include <linux/ethtool.h>
60 #define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
64 #define NET_ADDR_PERM 0
65 #define NET_ADDR_RANDOM 1
66 #define NET_ADDR_STOLEN 2
69 #define NET_RX_SUCCESS 0
90 #define NET_XMIT_SUCCESS 0x00
91 #define NET_XMIT_DROP 0x01
92 #define NET_XMIT_CN 0x02
93 #define NET_XMIT_POLICED 0x03
94 #define NET_XMIT_MASK 0x0f
99 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
100 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
103 #define NETDEV_TX_MASK 0xf0
117 static inline bool dev_xmit_complete(
int rc)
136 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
137 # if defined(CONFIG_MAC80211_MESH)
138 # define LL_MAX_HEADER 128
140 # define LL_MAX_HEADER 96
142 #elif IS_ENABLED(CONFIG_TR)
143 # define LL_MAX_HEADER 48
145 # define LL_MAX_HEADER 32
148 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
149 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
150 #define MAX_HEADER LL_MAX_HEADER
152 #define MAX_HEADER (LL_MAX_HEADER + 48)
203 #define NETDEV_HW_ADDR_T_LAN 1
204 #define NETDEV_HW_ADDR_T_SAN 2
205 #define NETDEV_HW_ADDR_T_SLAVE 3
206 #define NETDEV_HW_ADDR_T_UNICAST 4
207 #define NETDEV_HW_ADDR_T_MULTICAST 5
219 #define netdev_hw_addr_list_count(l) ((l)->count)
220 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
221 #define netdev_hw_addr_list_for_each(ha, l) \
222 list_for_each_entry(ha, &(l)->list, list)
224 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
225 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
226 #define netdev_for_each_uc_addr(ha, dev) \
227 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
229 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
230 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
231 #define netdev_for_each_mc_addr(ha, dev) \
232 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
240 #define HH_DATA_MOD 16
241 #define HH_DATA_OFF(__len) \
242 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
243 #define HH_DATA_ALIGN(__len) \
244 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
256 #define LL_RESERVED_SPACE(dev) \
257 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
258 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
259 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264 const void *
saddr,
unsigned int len);
270 const unsigned char *haddr);
295 #define NETDEV_BOOT_SETUP_MAX 8
315 #ifdef CONFIG_NETPOLL
392 static inline bool napi_disable_pending(
struct napi_struct *
n)
406 static inline bool napi_schedule_prep(
struct napi_struct *
n)
408 return !napi_disable_pending(n) &&
421 if (napi_schedule_prep(n))
428 if (napi_schedule_prep(napi)) {
451 static inline void napi_disable(
struct napi_struct *n)
466 static inline void napi_enable(
struct napi_struct *n)
488 # define napi_synchronize(n) barrier()
495 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
496 (1 << __QUEUE_STATE_STACK_XOFF))
497 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
498 (1 << __QUEUE_STATE_FROZEN))
520 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
546 static inline int netdev_queue_numa_node_read(
const struct netdev_queue *
q)
548 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
555 static inline void netdev_queue_numa_node_write(
struct netdev_queue *
q,
int node)
557 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
572 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
579 struct rps_dev_flow {
582 unsigned int last_qtail;
584 #define RPS_NO_FILTER 0xffff
589 struct rps_dev_flow_table {
593 struct rps_dev_flow flows[0];
595 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
596 ((_num) * sizeof(struct rps_dev_flow)))
602 struct rps_sock_flow_table {
606 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
607 ((_num) * sizeof(u16)))
609 #define RPS_NO_CPU 0xffff
611 static inline void rps_record_sock_flow(
struct rps_sock_flow_table *
table,
615 unsigned int cpu,
index = hash & table->mask;
620 if (table->ents[index] != cpu)
625 static inline void rps_reset_sock_flow(
struct rps_sock_flow_table *table,
629 table->ents[hash & table->mask] = RPS_NO_CPU;
632 extern struct rps_sock_flow_table
__rcu *rps_sock_flow_table;
634 #ifdef CONFIG_RFS_ACCEL
636 u32 flow_id,
u16 filter_id);
640 struct netdev_rx_queue {
641 struct rps_map
__rcu *rps_map;
642 struct rps_dev_flow_table
__rcu *rps_flow_table;
655 unsigned int alloc_len;
659 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
660 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
666 struct xps_dev_maps {
670 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
671 (nr_cpu_ids * sizeof(struct xps_map *)))
674 #define TC_MAX_QUEUE 16
675 #define TC_BITMASK 15
682 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
687 struct netdev_fcoe_hbainfo {
692 char optionrom_version[64];
695 char model_description[256];
924 #ifdef CONFIG_NET_POLL_CONTROLLER
938 int vf,
bool setting);
948 #if IS_ENABLED(CONFIG_FCOE)
962 struct netdev_fcoe_hbainfo *hbainfo);
965 #if IS_ENABLED(CONFIG_LIBFCOE)
966 #define NETDEV_FCOE_WWNN 0
967 #define NETDEV_FCOE_WWPN 1
972 #ifdef CONFIG_RFS_ACCEL
992 const unsigned char *
addr,
996 const unsigned char *
addr);
1066 #ifdef CONFIG_WIRELESS_EXT
1120 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1123 #if IS_ENABLED(CONFIG_NET_DSA)
1160 struct kset *queues_kset;
1164 struct netdev_rx_queue *_rx;
1167 unsigned int num_rx_queues;
1170 unsigned int real_num_rx_queues;
1172 #ifdef CONFIG_RFS_ACCEL
1203 struct xps_dev_maps
__rcu *xps_maps;
1246 #ifdef CONFIG_NETPOLL
1250 #ifdef CONFIG_NET_NS
1274 #define GSO_MAX_SIZE 65536
1276 #define GSO_MAX_SEGS 65535
1287 #if IS_ENABLED(CONFIG_FCOE)
1289 unsigned int fcoe_ddp_xid;
1291 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1292 struct netprio_map
__rcu *priomap;
1304 #define to_net_dev(d) container_of(d, struct net_device, dev)
1306 #define NETDEV_ALIGN 32
1344 int netdev_set_num_tc(
struct net_device *dev,
u8 num_tc)
1354 int netdev_get_num_tc(
struct net_device *dev)
1363 return &dev->_tx[
index];
1366 static inline void netdev_for_each_tx_queue(
struct net_device *dev,
1375 f(dev, &dev->_tx[i], arg);
1393 #ifdef CONFIG_NET_NS
1394 release_net(dev->nd_net);
1395 dev->nd_net = hold_net(net);
1399 static inline bool netdev_uses_dsa_tags(
struct net_device *dev)
1401 #ifdef CONFIG_NET_DSA_TAG_DSA
1402 if (dev->dsa_ptr !=
NULL)
1403 return dsa_uses_dsa_tags(dev->dsa_ptr);
1409 static inline bool netdev_uses_trailer_tags(
struct net_device *dev)
1411 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1412 if (dev->dsa_ptr !=
NULL)
1413 return dsa_uses_trailer_tags(dev->dsa_ptr);
1425 static inline void *netdev_priv(
const struct net_device *dev)
1433 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1439 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1483 #define NAPI_GRO_FREE 1
1484 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1496 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1523 #define NETDEV_UP 0x0001
1524 #define NETDEV_DOWN 0x0002
1525 #define NETDEV_REBOOT 0x0003
1529 #define NETDEV_CHANGE 0x0004
1530 #define NETDEV_REGISTER 0x0005
1531 #define NETDEV_UNREGISTER 0x0006
1532 #define NETDEV_CHANGEMTU 0x0007
1533 #define NETDEV_CHANGEADDR 0x0008
1534 #define NETDEV_GOING_DOWN 0x0009
1535 #define NETDEV_CHANGENAME 0x000A
1536 #define NETDEV_FEAT_CHANGE 0x000B
1537 #define NETDEV_BONDING_FAILOVER 0x000C
1538 #define NETDEV_PRE_UP 0x000D
1539 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1540 #define NETDEV_POST_TYPE_CHANGE 0x000F
1541 #define NETDEV_POST_INIT 0x0010
1542 #define NETDEV_UNREGISTER_FINAL 0x0011
1543 #define NETDEV_RELEASE 0x0012
1544 #define NETDEV_NOTIFY_PEERS 0x0013
1545 #define NETDEV_JOIN 0x0014
1555 #define for_each_netdev(net, d) \
1556 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1557 #define for_each_netdev_reverse(net, d) \
1558 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1559 #define for_each_netdev_rcu(net, d) \
1560 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1561 #define for_each_netdev_safe(net, d, n) \
1562 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1563 #define for_each_netdev_continue(net, d) \
1564 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1565 #define for_each_netdev_continue_rcu(net, d) \
1566 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1567 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1589 static inline struct net_device *first_net_device(
struct net *net)
1595 static inline struct net_device *first_net_device_rcu(
struct net *net)
1605 const char *hwaddr);
1613 unsigned short mask);
1627 static inline void unregister_netdevice(
struct net_device *dev)
1642 #ifdef CONFIG_NETPOLL_TRAP
1648 static inline unsigned int skb_gro_offset(
const struct sk_buff *
skb)
1653 static inline unsigned int skb_gro_len(
const struct sk_buff *
skb)
1658 static inline void skb_gro_pull(
struct sk_buff *
skb,
unsigned int len)
1663 static inline void *skb_gro_header_fast(
struct sk_buff *
skb,
1664 unsigned int offset)
1669 static inline int skb_gro_header_hard(
struct sk_buff *
skb,
unsigned int hlen)
1674 static inline void *skb_gro_header_slow(
struct sk_buff *
skb,
unsigned int hlen,
1675 unsigned int offset)
1677 if (!pskb_may_pull(skb, hlen))
1685 static inline void *skb_gro_mac_header(
struct sk_buff *skb)
1687 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1690 static inline void *skb_gro_network_header(
struct sk_buff *skb)
1693 skb_network_offset(skb);
1697 unsigned short type,
1704 return dev->
header_ops->create(skb, dev, type, daddr, saddr, len);
1707 static inline int dev_parse_header(
const struct sk_buff *skb,
1708 unsigned char *haddr)
1719 static inline int unregister_gifconf(
unsigned int family)
1747 unsigned int input_queue_head;
1748 unsigned int input_queue_tail;
1755 static inline void input_queue_head_incr(
struct softnet_data *
sd)
1758 sd->input_queue_head++;
1762 static inline void input_queue_tail_incr_save(
struct softnet_data *
sd,
1763 unsigned int *qtail)
1766 *qtail = ++sd->input_queue_tail;
1774 static inline void netif_schedule_queue(
struct netdev_queue *txq)
1780 static inline void netif_tx_schedule_all(
struct net_device *dev)
1785 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1788 static inline void netif_tx_start_queue(
struct netdev_queue *dev_queue)
1799 static inline void netif_start_queue(
struct net_device *dev)
1801 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1804 static inline void netif_tx_start_all_queues(
struct net_device *dev)
1809 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1810 netif_tx_start_queue(txq);
1814 static inline void netif_tx_wake_queue(
struct netdev_queue *dev_queue)
1816 #ifdef CONFIG_NETPOLL_TRAP
1818 netif_tx_start_queue(dev_queue);
1833 static inline void netif_wake_queue(
struct net_device *dev)
1835 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1838 static inline void netif_tx_wake_all_queues(
struct net_device *dev)
1843 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1844 netif_tx_wake_queue(txq);
1848 static inline void netif_tx_stop_queue(
struct netdev_queue *dev_queue)
1851 pr_info(
"netif_stop_queue() cannot be called before register_netdev()\n");
1864 static inline void netif_stop_queue(
struct net_device *dev)
1866 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1869 static inline void netif_tx_stop_all_queues(
struct net_device *dev)
1874 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1875 netif_tx_stop_queue(txq);
1879 static inline bool netif_tx_queue_stopped(
const struct netdev_queue *dev_queue)
1890 static inline bool netif_queue_stopped(
const struct net_device *dev)
1892 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1895 static inline bool netif_xmit_stopped(
const struct netdev_queue *dev_queue)
1900 static inline bool netif_xmit_frozen_or_stopped(
const struct netdev_queue *dev_queue)
1905 static inline void netdev_tx_sent_queue(
struct netdev_queue *dev_queue,
1909 dql_queued(&dev_queue->dql, bytes);
1911 if (
likely(dql_avail(&dev_queue->dql) >= 0))
1924 if (
unlikely(dql_avail(&dev_queue->dql) >= 0))
1929 static inline void netdev_sent_queue(
struct net_device *dev,
unsigned int bytes)
1931 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1934 static inline void netdev_tx_completed_queue(
struct netdev_queue *dev_queue,
1935 unsigned int pkts,
unsigned int bytes)
1950 if (dql_avail(&dev_queue->dql) < 0)
1954 netif_schedule_queue(dev_queue);
1958 static inline void netdev_completed_queue(
struct net_device *dev,
1959 unsigned int pkts,
unsigned int bytes)
1961 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1964 static inline void netdev_tx_reset_queue(
struct netdev_queue *
q)
1972 static inline void netdev_reset_queue(
struct net_device *dev_queue)
1974 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1983 static inline bool netif_running(
const struct net_device *dev)
2004 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2006 netif_tx_start_queue(txq);
2018 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2019 #ifdef CONFIG_NETPOLL_TRAP
2023 netif_tx_stop_queue(txq);
2033 static inline bool __netif_subqueue_stopped(
const struct net_device *dev,
2036 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2038 return netif_tx_queue_stopped(txq);
2041 static inline bool netif_subqueue_stopped(
const struct net_device *dev,
2044 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2054 static inline void netif_wake_subqueue(
struct net_device *dev,
u16 queue_index)
2056 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2057 #ifdef CONFIG_NETPOLL_TRAP
2081 static inline bool netif_is_multiqueue(
const struct net_device *dev)
2090 extern int netif_set_real_num_rx_queues(
struct net_device *dev,
2093 static inline int netif_set_real_num_rx_queues(
struct net_device *dev,
2110 return netif_set_real_num_rx_queues(to_dev,
2111 from_dev->real_num_rx_queues);
2117 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2147 static inline void napi_free_frags(
struct napi_struct *napi)
2155 void *rx_handler_data);
2159 extern int dev_ioctl(
struct net *net,
unsigned int cmd,
void __user *);
2168 struct net *,
const char *);
2190 static inline void dev_put(
struct net_device *dev)
2201 static inline void dev_hold(
struct net_device *dev)
2225 static inline bool netif_carrier_ok(
const struct net_device *dev)
2251 static inline void netif_dormant_on(
struct net_device *dev)
2263 static inline void netif_dormant_off(
struct net_device *dev)
2275 static inline bool netif_dormant(
const struct net_device *dev)
2287 static inline bool netif_oper_up(
const struct net_device *dev)
2299 static inline bool netif_device_present(
struct net_device *dev)
2330 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2331 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2332 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2333 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2334 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2335 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2336 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2337 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2338 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2339 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2340 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2341 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2342 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2343 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2344 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2346 static inline u32 netif_msg_init(
int debug_value,
int default_msg_enable_bits)
2349 if (debug_value < 0 || debug_value >= (
sizeof(
u32) * 8))
2350 return default_msg_enable_bits;
2351 if (debug_value == 0)
2354 return (1 << debug_value) - 1;
2359 spin_lock(&txq->_xmit_lock);
2363 static inline void __netif_tx_lock_bh(
struct netdev_queue *txq)
2365 spin_lock_bh(&txq->_xmit_lock);
2369 static inline bool __netif_tx_trylock(
struct netdev_queue *txq)
2371 bool ok = spin_trylock(&txq->_xmit_lock);
2377 static inline void __netif_tx_unlock(
struct netdev_queue *txq)
2380 spin_unlock(&txq->_xmit_lock);
2383 static inline void __netif_tx_unlock_bh(
struct netdev_queue *txq)
2386 spin_unlock_bh(&txq->_xmit_lock);
2389 static inline void txq_trans_update(
struct netdev_queue *txq)
2401 static inline void netif_tx_lock(
struct net_device *dev)
2409 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2417 __netif_tx_lock(txq, cpu);
2419 __netif_tx_unlock(txq);
2423 static inline void netif_tx_lock_bh(
struct net_device *dev)
2429 static inline void netif_tx_unlock(
struct net_device *dev)
2434 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2441 netif_schedule_queue(txq);
2446 static inline void netif_tx_unlock_bh(
struct net_device *dev)
2448 netif_tx_unlock(dev);
2452 #define HARD_TX_LOCK(dev, txq, cpu) { \
2453 if ((dev->features & NETIF_F_LLTX) == 0) { \
2454 __netif_tx_lock(txq, cpu); \
2458 #define HARD_TX_UNLOCK(dev, txq) { \
2459 if ((dev->features & NETIF_F_LLTX) == 0) { \
2460 __netif_tx_unlock(txq); \
2464 static inline void netif_tx_disable(
struct net_device *dev)
2472 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2474 __netif_tx_lock(txq, cpu);
2475 netif_tx_stop_queue(txq);
2476 __netif_tx_unlock(txq);
2481 static inline void netif_addr_lock(
struct net_device *dev)
2486 static inline void netif_addr_lock_nested(
struct net_device *dev)
2491 static inline void netif_addr_lock_bh(
struct net_device *dev)
2496 static inline void netif_addr_unlock(
struct net_device *dev)
2501 static inline void netif_addr_unlock_bh(
struct net_device *dev)
2510 #define for_each_dev_addr(dev, ha) \
2511 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2520 unsigned int txqs,
unsigned int rxqs);
2521 #define alloc_netdev(sizeof_priv, name, setup) \
2522 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2524 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2525 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2533 int addr_len,
unsigned char addr_type);
2536 int addr_len,
unsigned char addr_type);
2548 unsigned char addr_type);
2550 unsigned char addr_type);
2553 unsigned char addr_type);
2556 unsigned char addr_type);
2589 extern void dev_load(
struct net *net,
const char *name);
2607 extern void netdev_rx_csum_fault(
struct net_device *dev);
2609 static inline void netdev_rx_csum_fault(
struct net_device *dev)
2617 #ifdef CONFIG_PROC_FS
2618 extern void *dev_seq_start(
struct seq_file *seq, loff_t *
pos);
2619 extern void *dev_seq_next(
struct seq_file *seq,
void *
v, loff_t *
pos);
2620 extern void dev_seq_stop(
struct seq_file *seq,
void *
v);
2660 return (features & feature) ==
feature;
2665 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2669 static inline bool netif_needs_gso(
struct sk_buff *skb,
2672 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2677 static inline void netif_set_gso_max_size(
struct net_device *dev,
2683 static inline bool netif_is_bond_slave(
struct net_device *dev)
2688 static inline bool netif_supports_nofcs(
struct net_device *dev)
2699 static inline const char *netdev_name(
const struct net_device *dev)
2701 if (dev->
reg_state != NETREG_REGISTERED)
2702 return "(unregistered net_device)";
2711 extern __printf(2, 3)
2713 extern __printf(2, 3)
2715 extern __printf(2, 3)
2717 extern __printf(2, 3)
2719 extern __printf(2, 3)
2721 extern __printf(2, 3)
2724 #define MODULE_ALIAS_NETDEV(device) \
2725 MODULE_ALIAS("netdev-" device)
2727 #if defined(CONFIG_DYNAMIC_DEBUG)
2728 #define netdev_dbg(__dev, format, args...) \
2730 dynamic_netdev_dbg(__dev, format, ##args); \
2732 #elif defined(DEBUG)
2733 #define netdev_dbg(__dev, format, args...) \
2734 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2736 #define netdev_dbg(__dev, format, args...) \
2739 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2744 #if defined(VERBOSE_DEBUG)
2745 #define netdev_vdbg netdev_dbg
2748 #define netdev_vdbg(dev, format, args...) \
2751 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2761 #define netdev_WARN(dev, format, args...) \
2762 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2766 #define netif_printk(priv, type, level, dev, fmt, args...) \
2768 if (netif_msg_##type(priv)) \
2769 netdev_printk(level, (dev), fmt, ##args); \
2772 #define netif_level(level, priv, type, dev, fmt, args...) \
2774 if (netif_msg_##type(priv)) \
2775 netdev_##level(dev, fmt, ##args); \
2778 #define netif_emerg(priv, type, dev, fmt, args...) \
2779 netif_level(emerg, priv, type, dev, fmt, ##args)
2780 #define netif_alert(priv, type, dev, fmt, args...) \
2781 netif_level(alert, priv, type, dev, fmt, ##args)
2782 #define netif_crit(priv, type, dev, fmt, args...) \
2783 netif_level(crit, priv, type, dev, fmt, ##args)
2784 #define netif_err(priv, type, dev, fmt, args...) \
2785 netif_level(err, priv, type, dev, fmt, ##args)
2786 #define netif_warn(priv, type, dev, fmt, args...) \
2787 netif_level(warn, priv, type, dev, fmt, ##args)
2788 #define netif_notice(priv, type, dev, fmt, args...) \
2789 netif_level(notice, priv, type, dev, fmt, ##args)
2790 #define netif_info(priv, type, dev, fmt, args...) \
2791 netif_level(info, priv, type, dev, fmt, ##args)
2793 #if defined(CONFIG_DYNAMIC_DEBUG)
2794 #define netif_dbg(priv, type, netdev, format, args...) \
2796 if (netif_msg_##type(priv)) \
2797 dynamic_netdev_dbg(netdev, format, ##args); \
2799 #elif defined(DEBUG)
2800 #define netif_dbg(priv, type, dev, format, args...) \
2801 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2803 #define netif_dbg(priv, type, dev, format, args...) \
2806 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2811 #if defined(VERBOSE_DEBUG)
2812 #define netif_vdbg netif_dbg
2814 #define netif_vdbg(priv, type, dev, format, args...) \
2817 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \