Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
send.c
Go to the documentation of this file.
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include "main.h"
21 #include "send.h"
22 #include "routing.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "vis.h"
27 #include "gateway_common.h"
28 #include "originator.h"
29 
30 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
31 
32 /* send out an already prepared packet to the given address via the
33  * specified batman interface
34  */
36  struct batadv_hard_iface *hard_iface,
37  const uint8_t *dst_addr)
38 {
39  struct ethhdr *ethhdr;
40 
41  if (hard_iface->if_status != BATADV_IF_ACTIVE)
42  goto send_skb_err;
43 
44  if (unlikely(!hard_iface->net_dev))
45  goto send_skb_err;
46 
47  if (!(hard_iface->net_dev->flags & IFF_UP)) {
48  pr_warn("Interface %s is not up - can't send packet via that interface!\n",
49  hard_iface->net_dev->name);
50  goto send_skb_err;
51  }
52 
53  /* push to the ethernet header. */
54  if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55  goto send_skb_err;
56 
57  skb_reset_mac_header(skb);
58 
59  ethhdr = (struct ethhdr *)skb_mac_header(skb);
60  memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61  memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63 
64  skb_set_network_header(skb, ETH_HLEN);
67 
68  skb->dev = hard_iface->net_dev;
69 
70  /* dev_queue_xmit() returns a negative result on error. However on
71  * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72  * (which is > 0). This will not be treated as an error.
73  */
74  return dev_queue_xmit(skb);
75 send_skb_err:
76  kfree_skb(skb);
77  return NET_XMIT_DROP;
78 }
79 
81 {
82  struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
83 
84  if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
85  (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
86  return;
87 
88  /* the interface gets activated here to avoid race conditions between
89  * the moment of activating the interface in
90  * hardif_activate_interface() where the originator mac is set and
91  * outdated packets (especially uninitialized mac addresses) in the
92  * packet queue
93  */
94  if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
95  hard_iface->if_status = BATADV_IF_ACTIVE;
96 
97  bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
98 }
99 
100 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
101 {
102  if (forw_packet->skb)
103  kfree_skb(forw_packet->skb);
104  if (forw_packet->if_incoming)
105  batadv_hardif_free_ref(forw_packet->if_incoming);
106  kfree(forw_packet);
107 }
108 
109 static void
110 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
111  struct batadv_forw_packet *forw_packet,
112  unsigned long send_time)
113 {
114  INIT_HLIST_NODE(&forw_packet->list);
115 
116  /* add new packet to packet list */
117  spin_lock_bh(&bat_priv->forw_bcast_list_lock);
118  hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
119  spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
120 
121  /* start timer for this packet */
122  INIT_DELAYED_WORK(&forw_packet->delayed_work,
123  batadv_send_outstanding_bcast_packet);
125  send_time);
126 }
127 
128 /* add a broadcast packet to the queue and setup timers. broadcast packets
129  * are sent multiple times to increase probability for being received.
130  *
131  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
132  * errors.
133  *
134  * The skb is not consumed, so the caller should make sure that the
135  * skb is freed.
136  */
138  const struct sk_buff *skb,
139  unsigned long delay)
140 {
141  struct batadv_hard_iface *primary_if = NULL;
142  struct batadv_forw_packet *forw_packet;
143  struct batadv_bcast_packet *bcast_packet;
144  struct sk_buff *newskb;
145 
146  if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
147  batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
148  "bcast packet queue full\n");
149  goto out;
150  }
151 
152  primary_if = batadv_primary_if_get_selected(bat_priv);
153  if (!primary_if)
154  goto out_and_inc;
155 
156  forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
157 
158  if (!forw_packet)
159  goto out_and_inc;
160 
161  newskb = skb_copy(skb, GFP_ATOMIC);
162  if (!newskb)
163  goto packet_free;
164 
165  /* as we have a copy now, it is safe to decrease the TTL */
166  bcast_packet = (struct batadv_bcast_packet *)newskb->data;
167  bcast_packet->header.ttl--;
168 
169  skb_reset_mac_header(newskb);
170 
171  forw_packet->skb = newskb;
172  forw_packet->if_incoming = primary_if;
173 
174  /* how often did we send the bcast packet ? */
175  forw_packet->num_packets = 0;
176 
177  _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
178  return NETDEV_TX_OK;
179 
180 packet_free:
181  kfree(forw_packet);
182 out_and_inc:
183  atomic_inc(&bat_priv->bcast_queue_left);
184 out:
185  if (primary_if)
186  batadv_hardif_free_ref(primary_if);
187  return NETDEV_TX_BUSY;
188 }
189 
190 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
191 {
192  struct batadv_hard_iface *hard_iface;
193  struct delayed_work *delayed_work;
194  struct batadv_forw_packet *forw_packet;
195  struct sk_buff *skb1;
196  struct net_device *soft_iface;
197  struct batadv_priv *bat_priv;
198 
199  delayed_work = container_of(work, struct delayed_work, work);
200  forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201  delayed_work);
202  soft_iface = forw_packet->if_incoming->soft_iface;
203  bat_priv = netdev_priv(soft_iface);
204 
205  spin_lock_bh(&bat_priv->forw_bcast_list_lock);
206  hlist_del(&forw_packet->list);
207  spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
208 
210  goto out;
211 
212  /* rebroadcast packet */
213  rcu_read_lock();
214  list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
215  if (hard_iface->soft_iface != soft_iface)
216  continue;
217 
218  /* send a copy of the saved skb */
219  skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
220  if (skb1)
221  batadv_send_skb_packet(skb1, hard_iface,
223  }
224  rcu_read_unlock();
225 
226  forw_packet->num_packets++;
227 
228  /* if we still have some more bcasts to send */
229  if (forw_packet->num_packets < 3) {
230  _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
231  msecs_to_jiffies(5));
232  return;
233  }
234 
235 out:
236  batadv_forw_packet_free(forw_packet);
237  atomic_inc(&bat_priv->bcast_queue_left);
238 }
239 
241 {
242  struct delayed_work *delayed_work;
243  struct batadv_forw_packet *forw_packet;
244  struct batadv_priv *bat_priv;
245 
246  delayed_work = container_of(work, struct delayed_work, work);
247  forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248  delayed_work);
249  bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
250  spin_lock_bh(&bat_priv->forw_bat_list_lock);
251  hlist_del(&forw_packet->list);
252  spin_unlock_bh(&bat_priv->forw_bat_list_lock);
253 
255  goto out;
256 
257  bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
258 
259  /* we have to have at least one packet in the queue
260  * to determine the queues wake up time unless we are
261  * shutting down
262  */
263  if (forw_packet->own)
264  batadv_schedule_bat_ogm(forw_packet->if_incoming);
265 
266 out:
267  /* don't count own packet */
268  if (!forw_packet->own)
269  atomic_inc(&bat_priv->batman_queue_left);
270 
271  batadv_forw_packet_free(forw_packet);
272 }
273 
274 void
276  const struct batadv_hard_iface *hard_iface)
277 {
278  struct batadv_forw_packet *forw_packet;
279  struct hlist_node *tmp_node, *safe_tmp_node;
280  bool pending;
281 
282  if (hard_iface)
283  batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
284  "purge_outstanding_packets(): %s\n",
285  hard_iface->net_dev->name);
286  else
287  batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
288  "purge_outstanding_packets()\n");
289 
290  /* free bcast list */
291  spin_lock_bh(&bat_priv->forw_bcast_list_lock);
292  hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
293  &bat_priv->forw_bcast_list, list) {
294 
295  /* if purge_outstanding_packets() was called with an argument
296  * we delete only packets belonging to the given interface
297  */
298  if ((hard_iface) &&
299  (forw_packet->if_incoming != hard_iface))
300  continue;
301 
302  spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
303 
304  /* batadv_send_outstanding_bcast_packet() will lock the list to
305  * delete the item from the list
306  */
307  pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
308  spin_lock_bh(&bat_priv->forw_bcast_list_lock);
309 
310  if (pending) {
311  hlist_del(&forw_packet->list);
312  batadv_forw_packet_free(forw_packet);
313  }
314  }
315  spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
316 
317  /* free batman packet list */
318  spin_lock_bh(&bat_priv->forw_bat_list_lock);
319  hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
320  &bat_priv->forw_bat_list, list) {
321 
322  /* if purge_outstanding_packets() was called with an argument
323  * we delete only packets belonging to the given interface
324  */
325  if ((hard_iface) &&
326  (forw_packet->if_incoming != hard_iface))
327  continue;
328 
329  spin_unlock_bh(&bat_priv->forw_bat_list_lock);
330 
331  /* send_outstanding_bat_packet() will lock the list to
332  * delete the item from the list
333  */
334  pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
335  spin_lock_bh(&bat_priv->forw_bat_list_lock);
336 
337  if (pending) {
338  hlist_del(&forw_packet->list);
339  batadv_forw_packet_free(forw_packet);
340  }
341  }
342  spin_unlock_bh(&bat_priv->forw_bat_list_lock);
343 }