Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vport.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <net/net_namespace.h>
31 
32 #include "datapath.h"
33 #include "vport.h"
34 #include "vport-internal_dev.h"
35 
36 /* List of statically compiled vport implementations. Don't forget to also
37  * add yours to the list at the bottom of vport.h. */
38 static const struct vport_ops *vport_ops_list[] = {
41 };
42 
43 /* Protected by RCU read lock for reading, RTNL lock for writing. */
44 static struct hlist_head *dev_table;
45 #define VPORT_HASH_BUCKETS 1024
46 
52 int ovs_vport_init(void)
53 {
54  dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
55  GFP_KERNEL);
56  if (!dev_table)
57  return -ENOMEM;
58 
59  return 0;
60 }
61 
67 void ovs_vport_exit(void)
68 {
69  kfree(dev_table);
70 }
71 
72 static struct hlist_head *hash_bucket(struct net *net, const char *name)
73 {
74  unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
75  return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
76 }
77 
85 struct vport *ovs_vport_locate(struct net *net, const char *name)
86 {
87  struct hlist_head *bucket = hash_bucket(net, name);
88  struct vport *vport;
89  struct hlist_node *node;
90 
91  hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
92  if (!strcmp(name, vport->ops->get_name(vport)) &&
93  net_eq(ovs_dp_get_net(vport->dp), net))
94  return vport;
95 
96  return NULL;
97 }
98 
110 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
111  const struct vport_parms *parms)
112 {
113  struct vport *vport;
114  size_t alloc_size;
115 
116  alloc_size = sizeof(struct vport);
117  if (priv_size) {
118  alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
119  alloc_size += priv_size;
120  }
121 
122  vport = kzalloc(alloc_size, GFP_KERNEL);
123  if (!vport)
124  return ERR_PTR(-ENOMEM);
125 
126  vport->dp = parms->dp;
127  vport->port_no = parms->port_no;
128  vport->upcall_portid = parms->upcall_portid;
129  vport->ops = ops;
130  INIT_HLIST_NODE(&vport->dp_hash_node);
131 
133  if (!vport->percpu_stats) {
134  kfree(vport);
135  return ERR_PTR(-ENOMEM);
136  }
137 
138  spin_lock_init(&vport->stats_lock);
139 
140  return vport;
141 }
142 
154 {
155  free_percpu(vport->percpu_stats);
156  kfree(vport);
157 }
158 
167 struct vport *ovs_vport_add(const struct vport_parms *parms)
168 {
169  struct vport *vport;
170  int err = 0;
171  int i;
172 
173  ASSERT_RTNL();
174 
175  for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
176  if (vport_ops_list[i]->type == parms->type) {
177  struct hlist_head *bucket;
178 
179  vport = vport_ops_list[i]->create(parms);
180  if (IS_ERR(vport)) {
181  err = PTR_ERR(vport);
182  goto out;
183  }
184 
185  bucket = hash_bucket(ovs_dp_get_net(vport->dp),
186  vport->ops->get_name(vport));
187  hlist_add_head_rcu(&vport->hash_node, bucket);
188  return vport;
189  }
190  }
191 
192  err = -EAFNOSUPPORT;
193 
194 out:
195  return ERR_PTR(err);
196 }
197 
208 {
209  ASSERT_RTNL();
210 
211  if (!vport->ops->set_options)
212  return -EOPNOTSUPP;
213  return vport->ops->set_options(vport, options);
214 }
215 
224 void ovs_vport_del(struct vport *vport)
225 {
226  ASSERT_RTNL();
227 
228  hlist_del_rcu(&vport->hash_node);
229 
230  vport->ops->destroy(vport);
231 }
232 
244 {
245  int i;
246 
247  memset(stats, 0, sizeof(*stats));
248 
249  /* We potentially have 2 sources of stats that need to be combined:
250  * those we have collected (split into err_stats and percpu_stats) from
251  * set_stats() and device error stats from netdev->get_stats() (for
252  * errors that happen downstream and therefore aren't reported through
253  * our vport_record_error() function).
254  * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
255  * netdev-stats can be directly read over netlink-ioctl.
256  */
257 
258  spin_lock_bh(&vport->stats_lock);
259 
260  stats->rx_errors = vport->err_stats.rx_errors;
261  stats->tx_errors = vport->err_stats.tx_errors;
262  stats->tx_dropped = vport->err_stats.tx_dropped;
263  stats->rx_dropped = vport->err_stats.rx_dropped;
264 
265  spin_unlock_bh(&vport->stats_lock);
266 
268  const struct vport_percpu_stats *percpu_stats;
269  struct vport_percpu_stats local_stats;
270  unsigned int start;
271 
272  percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
273 
274  do {
275  start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
276  local_stats = *percpu_stats;
277  } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
278 
279  stats->rx_bytes += local_stats.rx_bytes;
280  stats->rx_packets += local_stats.rx_packets;
281  stats->tx_bytes += local_stats.tx_bytes;
282  stats->tx_packets += local_stats.tx_packets;
283  }
284 }
285 
302 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
303 {
304  struct nlattr *nla;
305 
306  nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
307  if (!nla)
308  return -EMSGSIZE;
309 
310  if (vport->ops->get_options) {
311  int err = vport->ops->get_options(vport, skb);
312  if (err) {
313  nla_nest_cancel(skb, nla);
314  return err;
315  }
316  }
317 
318  nla_nest_end(skb, nla);
319  return 0;
320 }
321 
332 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
333 {
334  struct vport_percpu_stats *stats;
335 
336  stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
337 
338  u64_stats_update_begin(&stats->sync);
339  stats->rx_packets++;
340  stats->rx_bytes += skb->len;
341  u64_stats_update_end(&stats->sync);
342 
343  ovs_dp_process_received_packet(vport, skb);
344 }
345 
355 int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
356 {
357  int sent = vport->ops->send(vport, skb);
358 
359  if (likely(sent)) {
360  struct vport_percpu_stats *stats;
361 
362  stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
363 
364  u64_stats_update_begin(&stats->sync);
365  stats->tx_packets++;
366  stats->tx_bytes += sent;
367  u64_stats_update_end(&stats->sync);
368  }
369  return sent;
370 }
371 
381 void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
382 {
383  spin_lock(&vport->stats_lock);
384 
385  switch (err_type) {
386  case VPORT_E_RX_DROPPED:
387  vport->err_stats.rx_dropped++;
388  break;
389 
390  case VPORT_E_RX_ERROR:
391  vport->err_stats.rx_errors++;
392  break;
393 
394  case VPORT_E_TX_DROPPED:
395  vport->err_stats.tx_dropped++;
396  break;
397 
398  case VPORT_E_TX_ERROR:
399  vport->err_stats.tx_errors++;
400  break;
401  }
402 
403  spin_unlock(&vport->stats_lock);
404 }