Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
link_watch.c
Go to the documentation of this file.
1 /*
2  * Linux network device link state notification
3  *
4  * Author:
5  * Stefan Rompf <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  *
12  */
13 
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/if.h>
17 #include <net/sock.h>
18 #include <net/pkt_sched.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/bitops.h>
24 #include <asm/types.h>
25 
26 
27 enum lw_bits {
28  LW_URGENT = 0,
29 };
30 
31 static unsigned long linkwatch_flags;
32 static unsigned long linkwatch_nextevent;
33 
34 static void linkwatch_event(struct work_struct *dummy);
35 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
36 
37 static LIST_HEAD(lweventlist);
38 static DEFINE_SPINLOCK(lweventlist_lock);
39 
40 static unsigned char default_operstate(const struct net_device *dev)
41 {
42  if (!netif_carrier_ok(dev))
43  return (dev->ifindex != dev->iflink ?
45 
46  if (netif_dormant(dev))
47  return IF_OPER_DORMANT;
48 
49  return IF_OPER_UP;
50 }
51 
52 
53 static void rfc2863_policy(struct net_device *dev)
54 {
55  unsigned char operstate = default_operstate(dev);
56 
57  if (operstate == dev->operstate)
58  return;
59 
60  write_lock_bh(&dev_base_lock);
61 
62  switch(dev->link_mode) {
64  if (operstate == IF_OPER_UP)
65  operstate = IF_OPER_DORMANT;
66  break;
67 
69  default:
70  break;
71  }
72 
73  dev->operstate = operstate;
74 
75  write_unlock_bh(&dev_base_lock);
76 }
77 
78 
79 void linkwatch_init_dev(struct net_device *dev)
80 {
81  /* Handle pre-registration link state changes */
82  if (!netif_carrier_ok(dev) || netif_dormant(dev))
83  rfc2863_policy(dev);
84 }
85 
86 
87 static bool linkwatch_urgent_event(struct net_device *dev)
88 {
89  if (!netif_running(dev))
90  return false;
91 
92  if (dev->ifindex != dev->iflink)
93  return true;
94 
95  return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
96 }
97 
98 
99 static void linkwatch_add_event(struct net_device *dev)
100 {
101  unsigned long flags;
102 
103  spin_lock_irqsave(&lweventlist_lock, flags);
104  if (list_empty(&dev->link_watch_list)) {
105  list_add_tail(&dev->link_watch_list, &lweventlist);
106  dev_hold(dev);
107  }
108  spin_unlock_irqrestore(&lweventlist_lock, flags);
109 }
110 
111 
112 static void linkwatch_schedule_work(int urgent)
113 {
114  unsigned long delay = linkwatch_nextevent - jiffies;
115 
116  if (test_bit(LW_URGENT, &linkwatch_flags))
117  return;
118 
119  /* Minimise down-time: drop delay for up event. */
120  if (urgent) {
121  if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
122  return;
123  delay = 0;
124  }
125 
126  /* If we wrap around we'll delay it by at most HZ. */
127  if (delay > HZ)
128  delay = 0;
129 
130  /*
131  * If urgent, schedule immediate execution; otherwise, don't
132  * override the existing timer.
133  */
134  if (test_bit(LW_URGENT, &linkwatch_flags))
135  mod_delayed_work(system_wq, &linkwatch_work, 0);
136  else
137  schedule_delayed_work(&linkwatch_work, delay);
138 }
139 
140 
141 static void linkwatch_do_dev(struct net_device *dev)
142 {
143  /*
144  * Make sure the above read is complete since it can be
145  * rewritten as soon as we clear the bit below.
146  */
148 
149  /* We are about to handle this device,
150  * so new events can be accepted
151  */
153 
154  rfc2863_policy(dev);
155  if (dev->flags & IFF_UP) {
156  if (netif_carrier_ok(dev))
157  dev_activate(dev);
158  else
159  dev_deactivate(dev);
160 
161  netdev_state_change(dev);
162  }
163  dev_put(dev);
164 }
165 
166 static void __linkwatch_run_queue(int urgent_only)
167 {
168  struct net_device *dev;
169  LIST_HEAD(wrk);
170 
171  /*
172  * Limit the number of linkwatch events to one
173  * per second so that a runaway driver does not
174  * cause a storm of messages on the netlink
175  * socket. This limit does not apply to up events
176  * while the device qdisc is down.
177  */
178  if (!urgent_only)
179  linkwatch_nextevent = jiffies + HZ;
180  /* Limit wrap-around effect on delay. */
181  else if (time_after(linkwatch_nextevent, jiffies + HZ))
182  linkwatch_nextevent = jiffies;
183 
184  clear_bit(LW_URGENT, &linkwatch_flags);
185 
186  spin_lock_irq(&lweventlist_lock);
187  list_splice_init(&lweventlist, &wrk);
188 
189  while (!list_empty(&wrk)) {
190 
191  dev = list_first_entry(&wrk, struct net_device, link_watch_list);
192  list_del_init(&dev->link_watch_list);
193 
194  if (urgent_only && !linkwatch_urgent_event(dev)) {
195  list_add_tail(&dev->link_watch_list, &lweventlist);
196  continue;
197  }
198  spin_unlock_irq(&lweventlist_lock);
199  linkwatch_do_dev(dev);
200  spin_lock_irq(&lweventlist_lock);
201  }
202 
203  if (!list_empty(&lweventlist))
204  linkwatch_schedule_work(0);
205  spin_unlock_irq(&lweventlist_lock);
206 }
207 
209 {
210  unsigned long flags;
211  int clean = 0;
212 
213  spin_lock_irqsave(&lweventlist_lock, flags);
214  if (!list_empty(&dev->link_watch_list)) {
215  list_del_init(&dev->link_watch_list);
216  clean = 1;
217  }
218  spin_unlock_irqrestore(&lweventlist_lock, flags);
219  if (clean)
220  linkwatch_do_dev(dev);
221 }
222 
223 
224 /* Must be called with the rtnl semaphore held */
226 {
227  __linkwatch_run_queue(0);
228 }
229 
230 
231 static void linkwatch_event(struct work_struct *dummy)
232 {
233  rtnl_lock();
234  __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
235  rtnl_unlock();
236 }
237 
238 
240 {
241  bool urgent = linkwatch_urgent_event(dev);
242 
244  linkwatch_add_event(dev);
245  } else if (!urgent)
246  return;
247 
248  linkwatch_schedule_work(urgent);
249 }