Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ackvec.c
Go to the documentation of this file.
1 /*
2  * net/dccp/ackvec.c
3  *
4  * An implementation of Ack Vectors for the DCCP protocol
5  * Copyright (c) 2007 University of Aberdeen, Scotland, UK
6  * Copyright (c) 2005 Arnaldo Carvalho de Melo <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; version 2 of the License;
11  */
12 #include "dccp.h"
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 
17 static struct kmem_cache *dccp_ackvec_slab;
18 static struct kmem_cache *dccp_ackvec_record_slab;
19 
21 {
22  struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
23 
24  if (av != NULL) {
26  INIT_LIST_HEAD(&av->av_records);
27  }
28  return av;
29 }
30 
31 static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
32 {
33  struct dccp_ackvec_record *cur, *next;
34 
36  kmem_cache_free(dccp_ackvec_record_slab, cur);
37  INIT_LIST_HEAD(&av->av_records);
38 }
39 
41 {
42  if (likely(av != NULL)) {
43  dccp_ackvec_purge_records(av);
44  kmem_cache_free(dccp_ackvec_slab, av);
45  }
46 }
47 
54 int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
55 {
56  struct dccp_ackvec_record *avr;
57 
58  avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
59  if (avr == NULL)
60  return -ENOBUFS;
61 
62  avr->avr_ack_seqno = seqno;
63  avr->avr_ack_ptr = av->av_buf_head;
64  avr->avr_ack_ackno = av->av_buf_ackno;
65  avr->avr_ack_nonce = nonce_sum;
66  avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
67  /*
68  * When the buffer overflows, we keep no more than one record. This is
69  * the simplest way of disambiguating sender-Acks dating from before the
70  * overflow from sender-Acks which refer to after the overflow; a simple
71  * solution is preferable here since we are handling an exception.
72  */
73  if (av->av_overflow)
74  dccp_ackvec_purge_records(av);
75  /*
76  * Since GSS is incremented for each packet, the list is automatically
77  * arranged in descending order of @ack_seqno.
78  */
79  list_add(&avr->avr_node, &av->av_records);
80 
81  dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
82  (unsigned long long)avr->avr_ack_seqno,
83  (unsigned long long)avr->avr_ack_ackno,
84  avr->avr_ack_runlen);
85  return 0;
86 }
87 
88 static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
89  const u64 ackno)
90 {
91  struct dccp_ackvec_record *avr;
92  /*
93  * Exploit that records are inserted in descending order of sequence
94  * number, start with the oldest record first. If @ackno is `before'
95  * the earliest ack_ackno, the packet is too old to be considered.
96  */
97  list_for_each_entry_reverse(avr, av_list, avr_node) {
98  if (avr->avr_ack_seqno == ackno)
99  return avr;
100  if (before48(ackno, avr->avr_ack_seqno))
101  break;
102  }
103  return NULL;
104 }
105 
106 /*
107  * Buffer index and length computation using modulo-buffersize arithmetic.
108  * Note that, as pointers move from right to left, head is `before' tail.
109  */
110 static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
111 {
112  return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
113 }
114 
115 static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
116 {
117  return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
118 }
119 
121 {
122  if (unlikely(av->av_overflow))
123  return DCCPAV_MAX_ACKVEC_LEN;
124  return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
125 }
126 
134 static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
136 {
137  u16 ptr = av->av_buf_head;
138 
139  BUG_ON(distance > 0);
140  if (unlikely(dccp_ackvec_is_empty(av)))
141  return;
142 
143  do {
144  u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
145 
146  if (distance + runlen >= 0) {
147  /*
148  * Only update the state if packet has not been received
149  * yet. This is OK as per the second table in RFC 4340,
150  * 11.4.1; i.e. here we are using the following table:
151  * RECEIVED
152  * 0 1 3
153  * S +---+---+---+
154  * T 0 | 0 | 0 | 0 |
155  * O +---+---+---+
156  * R 1 | 1 | 1 | 1 |
157  * E +---+---+---+
158  * D 3 | 0 | 1 | 3 |
159  * +---+---+---+
160  * The "Not Received" state was set by reserve_seats().
161  */
162  if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
163  av->av_buf[ptr] = state;
164  else
165  dccp_pr_debug("Not changing %llu state to %u\n",
166  (unsigned long long)seqno, state);
167  break;
168  }
169 
170  distance += runlen + 1;
171  ptr = __ackvec_idx_add(ptr, 1);
172 
173  } while (ptr != av->av_buf_tail);
174 }
175 
176 /* Mark @num entries after buf_head as "Not yet received". */
177 static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
178 {
179  u16 start = __ackvec_idx_add(av->av_buf_head, 1),
181 
182  /* check for buffer wrap-around */
183  if (num > len) {
184  memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
185  start = 0;
186  num -= len;
187  }
188  if (num)
189  memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
190 }
191 
199 static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
200  u64 seqno, enum dccp_ackvec_states state)
201 {
202  u32 num_cells = num_packets;
203 
204  if (num_packets > DCCPAV_BURST_THRESH) {
205  u32 lost_packets = num_packets - 1;
206 
207  DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
208  /*
209  * We received 1 packet and have a loss of size "num_packets-1"
210  * which we squeeze into num_cells-1 rather than reserving an
211  * entire byte for each lost packet.
212  * The reason is that the vector grows in O(burst_length); when
213  * it grows too large there will no room left for the payload.
214  * This is a trade-off: if a few packets out of the burst show
215  * up later, their state will not be changed; it is simply too
216  * costly to reshuffle/reallocate/copy the buffer each time.
217  * Should such problems persist, we will need to switch to a
218  * different underlying data structure.
219  */
220  for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
221  u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
222 
223  av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
224  av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
225 
226  lost_packets -= len;
227  }
228  }
229 
230  if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
231  DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
232  av->av_overflow = true;
233  }
234 
235  av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
236  if (av->av_overflow)
237  av->av_buf_tail = av->av_buf_head;
238 
239  av->av_buf[av->av_buf_head] = state;
240  av->av_buf_ackno = seqno;
241 
242  if (num_packets > 1)
243  dccp_ackvec_reserve_seats(av, num_packets - 1);
244 }
245 
249 void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
250 {
251  u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
252  enum dccp_ackvec_states state = DCCPAV_RECEIVED;
253 
254  if (dccp_ackvec_is_empty(av)) {
255  dccp_ackvec_add_new(av, 1, seqno, state);
256  av->av_tail_ackno = seqno;
257 
258  } else {
259  s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
260  u8 *current_head = av->av_buf + av->av_buf_head;
261 
262  if (num_packets == 1 &&
263  dccp_ackvec_state(current_head) == state &&
264  dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
265 
266  *current_head += 1;
267  av->av_buf_ackno = seqno;
268 
269  } else if (num_packets > 0) {
270  dccp_ackvec_add_new(av, num_packets, seqno, state);
271  } else {
272  dccp_ackvec_update_old(av, num_packets, seqno, state);
273  }
274  }
275 }
276 
285 void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
286 {
287  struct dccp_ackvec_record *avr, *next;
288  u8 runlen_now, eff_runlen;
289  s64 delta;
290 
291  avr = dccp_ackvec_lookup(&av->av_records, ackno);
292  if (avr == NULL)
293  return;
294  /*
295  * Deal with outdated acknowledgments: this arises when e.g. there are
296  * several old records and the acks from the peer come in slowly. In
297  * that case we may still have records that pre-date tail_ackno.
298  */
299  delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
300  if (delta < 0)
301  goto free_records;
302  /*
303  * Deal with overlapping Ack Vectors: don't subtract more than the
304  * number of packets between tail_ackno and ack_ackno.
305  */
306  eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
307 
308  runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
309  /*
310  * The run length of Ack Vector cells does not decrease over time. If
311  * the run length is the same as at the time the Ack Vector was sent, we
312  * free the ack_ptr cell. That cell can however not be freed if the run
313  * length has increased: in this case we need to move the tail pointer
314  * backwards (towards higher indices), to its next-oldest neighbour.
315  */
316  if (runlen_now > eff_runlen) {
317 
318  av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
319  av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
320 
321  /* This move may not have cleared the overflow flag. */
322  if (av->av_overflow)
323  av->av_overflow = (av->av_buf_head == av->av_buf_tail);
324  } else {
325  av->av_buf_tail = avr->avr_ack_ptr;
326  /*
327  * We have made sure that avr points to a valid cell within the
328  * buffer. This cell is either older than head, or equals head
329  * (empty buffer): in both cases we no longer have any overflow.
330  */
331  av->av_overflow = 0;
332  }
333 
334  /*
335  * The peer has acknowledged up to and including ack_ackno. Hence the
336  * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
337  */
338  av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
339 
340 free_records:
342  list_del(&avr->avr_node);
343  kmem_cache_free(dccp_ackvec_record_slab, avr);
344  }
345 }
346 
347 /*
348  * Routines to keep track of Ack Vectors received in an skb
349  */
351 {
352  struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
353 
354  if (new == NULL)
355  return -ENOBUFS;
356  new->vec = vec;
357  new->len = len;
358  new->nonce = nonce;
359 
360  list_add_tail(&new->node, head);
361  return 0;
362 }
364 
365 void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
366 {
367  struct dccp_ackvec_parsed *cur, *next;
368 
369  list_for_each_entry_safe(cur, next, parsed_chunks, node)
370  kfree(cur);
371  INIT_LIST_HEAD(parsed_chunks);
372 }
374 
376 {
377  dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
378  sizeof(struct dccp_ackvec), 0,
380  if (dccp_ackvec_slab == NULL)
381  goto out_err;
382 
383  dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
384  sizeof(struct dccp_ackvec_record),
386  if (dccp_ackvec_record_slab == NULL)
387  goto out_destroy_slab;
388 
389  return 0;
390 
391 out_destroy_slab:
392  kmem_cache_destroy(dccp_ackvec_slab);
393  dccp_ackvec_slab = NULL;
394 out_err:
395  DCCP_CRIT("Unable to create Ack Vector slab cache");
396  return -ENOBUFS;
397 }
398 
400 {
401  if (dccp_ackvec_slab != NULL) {
402  kmem_cache_destroy(dccp_ackvec_slab);
403  dccp_ackvec_slab = NULL;
404  }
405  if (dccp_ackvec_record_slab != NULL) {
406  kmem_cache_destroy(dccp_ackvec_record_slab);
407  dccp_ackvec_record_slab = NULL;
408  }
409 }