Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ar-connection.c
Go to the documentation of this file.
1 /* RxRPC virtual connection handler
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells ([email protected])
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/net.h>
15 #include <linux/skbuff.h>
16 #include <linux/crypto.h>
17 #include <net/sock.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
20 
21 static void rxrpc_connection_reaper(struct work_struct *work);
22 
23 LIST_HEAD(rxrpc_connections);
24 DEFINE_RWLOCK(rxrpc_connection_lock);
25 static unsigned long rxrpc_connection_timeout = 10 * 60;
26 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
27 
28 /*
29  * allocate a new client connection bundle
30  */
31 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
32 {
33  struct rxrpc_conn_bundle *bundle;
34 
35  _enter("");
36 
37  bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
38  if (bundle) {
39  INIT_LIST_HEAD(&bundle->unused_conns);
40  INIT_LIST_HEAD(&bundle->avail_conns);
41  INIT_LIST_HEAD(&bundle->busy_conns);
42  init_waitqueue_head(&bundle->chanwait);
43  atomic_set(&bundle->usage, 1);
44  }
45 
46  _leave(" = %p", bundle);
47  return bundle;
48 }
49 
50 /*
51  * compare bundle parameters with what we're looking for
52  * - return -ve, 0 or +ve
53  */
54 static inline
55 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
56  struct key *key, __be16 service_id)
57 {
58  return (bundle->service_id - service_id) ?:
59  ((unsigned long) bundle->key - (unsigned long) key);
60 }
61 
62 /*
63  * get bundle of client connections that a client socket can make use of
64  */
66  struct rxrpc_transport *trans,
67  struct key *key,
68  __be16 service_id,
69  gfp_t gfp)
70 {
71  struct rxrpc_conn_bundle *bundle, *candidate;
72  struct rb_node *p, *parent, **pp;
73 
74  _enter("%p{%x},%x,%hx,",
75  rx, key_serial(key), trans->debug_id, ntohs(service_id));
76 
77  if (rx->trans == trans && rx->bundle) {
78  atomic_inc(&rx->bundle->usage);
79  return rx->bundle;
80  }
81 
82  /* search the extant bundles first for one that matches the specified
83  * user ID */
84  spin_lock(&trans->client_lock);
85 
86  p = trans->bundles.rb_node;
87  while (p) {
88  bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
89 
90  if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
91  p = p->rb_left;
92  else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
93  p = p->rb_right;
94  else
95  goto found_extant_bundle;
96  }
97 
98  spin_unlock(&trans->client_lock);
99 
100  /* not yet present - create a candidate for a new record and then
101  * redo the search */
102  candidate = rxrpc_alloc_bundle(gfp);
103  if (!candidate) {
104  _leave(" = -ENOMEM");
105  return ERR_PTR(-ENOMEM);
106  }
107 
108  candidate->key = key_get(key);
109  candidate->service_id = service_id;
110 
111  spin_lock(&trans->client_lock);
112 
113  pp = &trans->bundles.rb_node;
114  parent = NULL;
115  while (*pp) {
116  parent = *pp;
117  bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
118 
119  if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
120  pp = &(*pp)->rb_left;
121  else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
122  pp = &(*pp)->rb_right;
123  else
124  goto found_extant_second;
125  }
126 
127  /* second search also failed; add the new bundle */
128  bundle = candidate;
129  candidate = NULL;
130 
131  rb_link_node(&bundle->node, parent, pp);
132  rb_insert_color(&bundle->node, &trans->bundles);
133  spin_unlock(&trans->client_lock);
134  _net("BUNDLE new on trans %d", trans->debug_id);
135  if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
136  atomic_inc(&bundle->usage);
137  rx->bundle = bundle;
138  }
139  _leave(" = %p [new]", bundle);
140  return bundle;
141 
142  /* we found the bundle in the list immediately */
143 found_extant_bundle:
144  atomic_inc(&bundle->usage);
145  spin_unlock(&trans->client_lock);
146  _net("BUNDLE old on trans %d", trans->debug_id);
147  if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
148  atomic_inc(&bundle->usage);
149  rx->bundle = bundle;
150  }
151  _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
152  return bundle;
153 
154  /* we found the bundle on the second time through the list */
155 found_extant_second:
156  atomic_inc(&bundle->usage);
157  spin_unlock(&trans->client_lock);
158  kfree(candidate);
159  _net("BUNDLE old2 on trans %d", trans->debug_id);
160  if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
161  atomic_inc(&bundle->usage);
162  rx->bundle = bundle;
163  }
164  _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
165  return bundle;
166 }
167 
168 /*
169  * release a bundle
170  */
172  struct rxrpc_conn_bundle *bundle)
173 {
174  _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
175 
176  if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
177  _debug("Destroy bundle");
178  rb_erase(&bundle->node, &trans->bundles);
179  spin_unlock(&trans->client_lock);
180  ASSERT(list_empty(&bundle->unused_conns));
181  ASSERT(list_empty(&bundle->avail_conns));
182  ASSERT(list_empty(&bundle->busy_conns));
183  ASSERTCMP(bundle->num_conns, ==, 0);
184  key_put(bundle->key);
185  kfree(bundle);
186  }
187 
188  _leave("");
189 }
190 
191 /*
192  * allocate a new connection
193  */
194 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
195 {
196  struct rxrpc_connection *conn;
197 
198  _enter("");
199 
200  conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
201  if (conn) {
203  INIT_LIST_HEAD(&conn->bundle_link);
204  conn->calls = RB_ROOT;
205  skb_queue_head_init(&conn->rx_queue);
206  rwlock_init(&conn->lock);
207  spin_lock_init(&conn->state_lock);
208  atomic_set(&conn->usage, 1);
210  conn->avail_calls = RXRPC_MAXCALLS;
211  conn->size_align = 4;
212  conn->header_size = sizeof(struct rxrpc_header);
213  }
214 
215  _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
216  return conn;
217 }
218 
219 /*
220  * assign a connection ID to a connection and add it to the transport's
221  * connection lookup tree
222  * - called with transport client lock held
223  */
224 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
225 {
226  struct rxrpc_connection *xconn;
227  struct rb_node *parent, **p;
228  __be32 epoch;
229  u32 real_conn_id;
230 
231  _enter("");
232 
233  epoch = conn->epoch;
234 
235  write_lock_bh(&conn->trans->conn_lock);
236 
237  conn->trans->conn_idcounter += RXRPC_CID_INC;
238  if (conn->trans->conn_idcounter < RXRPC_CID_INC)
239  conn->trans->conn_idcounter = RXRPC_CID_INC;
240  real_conn_id = conn->trans->conn_idcounter;
241 
242 attempt_insertion:
243  parent = NULL;
244  p = &conn->trans->client_conns.rb_node;
245 
246  while (*p) {
247  parent = *p;
248  xconn = rb_entry(parent, struct rxrpc_connection, node);
249 
250  if (epoch < xconn->epoch)
251  p = &(*p)->rb_left;
252  else if (epoch > xconn->epoch)
253  p = &(*p)->rb_right;
254  else if (real_conn_id < xconn->real_conn_id)
255  p = &(*p)->rb_left;
256  else if (real_conn_id > xconn->real_conn_id)
257  p = &(*p)->rb_right;
258  else
259  goto id_exists;
260  }
261 
262  /* we've found a suitable hole - arrange for this connection to occupy
263  * it */
264  rb_link_node(&conn->node, parent, p);
265  rb_insert_color(&conn->node, &conn->trans->client_conns);
266 
267  conn->real_conn_id = real_conn_id;
268  conn->cid = htonl(real_conn_id);
269  write_unlock_bh(&conn->trans->conn_lock);
270  _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
271  return;
272 
273  /* we found a connection with the proposed ID - walk the tree from that
274  * point looking for the next unused ID */
275 id_exists:
276  for (;;) {
277  real_conn_id += RXRPC_CID_INC;
278  if (real_conn_id < RXRPC_CID_INC) {
279  real_conn_id = RXRPC_CID_INC;
280  conn->trans->conn_idcounter = real_conn_id;
281  goto attempt_insertion;
282  }
283 
284  parent = rb_next(parent);
285  if (!parent)
286  goto attempt_insertion;
287 
288  xconn = rb_entry(parent, struct rxrpc_connection, node);
289  if (epoch < xconn->epoch ||
290  real_conn_id < xconn->real_conn_id)
291  goto attempt_insertion;
292  }
293 }
294 
295 /*
296  * add a call to a connection's call-by-ID tree
297  */
298 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
299  struct rxrpc_call *call)
300 {
301  struct rxrpc_call *xcall;
302  struct rb_node *parent, **p;
303  __be32 call_id;
304 
305  write_lock_bh(&conn->lock);
306 
307  call_id = call->call_id;
308  p = &conn->calls.rb_node;
309  parent = NULL;
310  while (*p) {
311  parent = *p;
312  xcall = rb_entry(parent, struct rxrpc_call, conn_node);
313 
314  if (call_id < xcall->call_id)
315  p = &(*p)->rb_left;
316  else if (call_id > xcall->call_id)
317  p = &(*p)->rb_right;
318  else
319  BUG();
320  }
321 
322  rb_link_node(&call->conn_node, parent, p);
323  rb_insert_color(&call->conn_node, &conn->calls);
324 
325  write_unlock_bh(&conn->lock);
326 }
327 
328 /*
329  * connect a call on an exclusive connection
330  */
331 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
332  struct rxrpc_transport *trans,
333  __be16 service_id,
334  struct rxrpc_call *call,
335  gfp_t gfp)
336 {
337  struct rxrpc_connection *conn;
338  int chan, ret;
339 
340  _enter("");
341 
342  conn = rx->conn;
343  if (!conn) {
344  /* not yet present - create a candidate for a new connection
345  * and then redo the check */
346  conn = rxrpc_alloc_connection(gfp);
347  if (!conn) {
348  _leave(" = -ENOMEM");
349  return -ENOMEM;
350  }
351 
352  conn->trans = trans;
353  conn->bundle = NULL;
354  conn->service_id = service_id;
355  conn->epoch = rxrpc_epoch;
356  conn->in_clientflag = 0;
358  conn->cid = 0;
359  conn->state = RXRPC_CONN_CLIENT;
360  conn->avail_calls = RXRPC_MAXCALLS - 1;
361  conn->security_level = rx->min_sec_level;
362  conn->key = key_get(rx->key);
363 
365  if (ret < 0) {
366  key_put(conn->key);
367  kfree(conn);
368  _leave(" = %d [key]", ret);
369  return ret;
370  }
371 
372  write_lock_bh(&rxrpc_connection_lock);
373  list_add_tail(&conn->link, &rxrpc_connections);
374  write_unlock_bh(&rxrpc_connection_lock);
375 
376  spin_lock(&trans->client_lock);
377  atomic_inc(&trans->usage);
378 
379  _net("CONNECT EXCL new %d on TRANS %d",
380  conn->debug_id, conn->trans->debug_id);
381 
382  rxrpc_assign_connection_id(conn);
383  rx->conn = conn;
384  }
385 
386  /* we've got a connection with a free channel and we can now attach the
387  * call to it
388  * - we're holding the transport's client lock
389  * - we're holding a reference on the connection
390  */
391  for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
392  if (!conn->channels[chan])
393  goto found_channel;
394  goto no_free_channels;
395 
396 found_channel:
397  atomic_inc(&conn->usage);
398  conn->channels[chan] = call;
399  call->conn = conn;
400  call->channel = chan;
401  call->cid = conn->cid | htonl(chan);
402  call->call_id = htonl(++conn->call_counter);
403 
404  _net("CONNECT client on conn %d chan %d as call %x",
405  conn->debug_id, chan, ntohl(call->call_id));
406 
407  spin_unlock(&trans->client_lock);
408 
409  rxrpc_add_call_ID_to_conn(conn, call);
410  _leave(" = 0");
411  return 0;
412 
413 no_free_channels:
414  spin_unlock(&trans->client_lock);
415  _leave(" = -ENOSR");
416  return -ENOSR;
417 }
418 
419 /*
420  * find a connection for a call
421  * - called in process context with IRQs enabled
422  */
424  struct rxrpc_transport *trans,
425  struct rxrpc_conn_bundle *bundle,
426  struct rxrpc_call *call,
427  gfp_t gfp)
428 {
429  struct rxrpc_connection *conn, *candidate;
430  int chan, ret;
431 
432  DECLARE_WAITQUEUE(myself, current);
433 
434  _enter("%p,%lx,", rx, call->user_call_ID);
435 
437  return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
438  call, gfp);
439 
440  spin_lock(&trans->client_lock);
441  for (;;) {
442  /* see if the bundle has a call slot available */
443  if (!list_empty(&bundle->avail_conns)) {
444  _debug("avail");
445  conn = list_entry(bundle->avail_conns.next,
446  struct rxrpc_connection,
447  bundle_link);
448  if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
449  list_del_init(&conn->bundle_link);
450  bundle->num_conns--;
451  continue;
452  }
453  if (--conn->avail_calls == 0)
454  list_move(&conn->bundle_link,
455  &bundle->busy_conns);
456  ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
457  ASSERT(conn->channels[0] == NULL ||
458  conn->channels[1] == NULL ||
459  conn->channels[2] == NULL ||
460  conn->channels[3] == NULL);
461  atomic_inc(&conn->usage);
462  break;
463  }
464 
465  if (!list_empty(&bundle->unused_conns)) {
466  _debug("unused");
467  conn = list_entry(bundle->unused_conns.next,
468  struct rxrpc_connection,
469  bundle_link);
470  if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
471  list_del_init(&conn->bundle_link);
472  bundle->num_conns--;
473  continue;
474  }
475  ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
476  conn->avail_calls = RXRPC_MAXCALLS - 1;
477  ASSERT(conn->channels[0] == NULL &&
478  conn->channels[1] == NULL &&
479  conn->channels[2] == NULL &&
480  conn->channels[3] == NULL);
481  atomic_inc(&conn->usage);
482  list_move(&conn->bundle_link, &bundle->avail_conns);
483  break;
484  }
485 
486  /* need to allocate a new connection */
487  _debug("get new conn [%d]", bundle->num_conns);
488 
489  spin_unlock(&trans->client_lock);
490 
491  if (signal_pending(current))
492  goto interrupted;
493 
494  if (bundle->num_conns >= 20) {
495  _debug("too many conns");
496 
497  if (!(gfp & __GFP_WAIT)) {
498  _leave(" = -EAGAIN");
499  return -EAGAIN;
500  }
501 
502  add_wait_queue(&bundle->chanwait, &myself);
503  for (;;) {
505  if (bundle->num_conns < 20 ||
506  !list_empty(&bundle->unused_conns) ||
507  !list_empty(&bundle->avail_conns))
508  break;
509  if (signal_pending(current))
510  goto interrupted_dequeue;
511  schedule();
512  }
513  remove_wait_queue(&bundle->chanwait, &myself);
515  spin_lock(&trans->client_lock);
516  continue;
517  }
518 
519  /* not yet present - create a candidate for a new connection and then
520  * redo the check */
521  candidate = rxrpc_alloc_connection(gfp);
522  if (!candidate) {
523  _leave(" = -ENOMEM");
524  return -ENOMEM;
525  }
526 
527  candidate->trans = trans;
528  candidate->bundle = bundle;
529  candidate->service_id = bundle->service_id;
530  candidate->epoch = rxrpc_epoch;
531  candidate->in_clientflag = 0;
533  candidate->cid = 0;
534  candidate->state = RXRPC_CONN_CLIENT;
535  candidate->avail_calls = RXRPC_MAXCALLS;
536  candidate->security_level = rx->min_sec_level;
537  candidate->key = key_get(bundle->key);
538 
539  ret = rxrpc_init_client_conn_security(candidate);
540  if (ret < 0) {
541  key_put(candidate->key);
542  kfree(candidate);
543  _leave(" = %d [key]", ret);
544  return ret;
545  }
546 
548  list_add_tail(&candidate->link, &rxrpc_connections);
550 
551  spin_lock(&trans->client_lock);
552 
553  list_add(&candidate->bundle_link, &bundle->unused_conns);
554  bundle->num_conns++;
555  atomic_inc(&bundle->usage);
556  atomic_inc(&trans->usage);
557 
558  _net("CONNECT new %d on TRANS %d",
559  candidate->debug_id, candidate->trans->debug_id);
560 
561  rxrpc_assign_connection_id(candidate);
562  if (candidate->security)
563  candidate->security->prime_packet_security(candidate);
564 
565  /* leave the candidate lurking in zombie mode attached to the
566  * bundle until we're ready for it */
567  rxrpc_put_connection(candidate);
568  candidate = NULL;
569  }
570 
571  /* we've got a connection with a free channel and we can now attach the
572  * call to it
573  * - we're holding the transport's client lock
574  * - we're holding a reference on the connection
575  * - we're holding a reference on the bundle
576  */
577  for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
578  if (!conn->channels[chan])
579  goto found_channel;
580  ASSERT(conn->channels[0] == NULL ||
581  conn->channels[1] == NULL ||
582  conn->channels[2] == NULL ||
583  conn->channels[3] == NULL);
584  BUG();
585 
586 found_channel:
587  conn->channels[chan] = call;
588  call->conn = conn;
589  call->channel = chan;
590  call->cid = conn->cid | htonl(chan);
591  call->call_id = htonl(++conn->call_counter);
592 
593  _net("CONNECT client on conn %d chan %d as call %x",
594  conn->debug_id, chan, ntohl(call->call_id));
595 
596  ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
597  spin_unlock(&trans->client_lock);
598 
599  rxrpc_add_call_ID_to_conn(conn, call);
600 
601  _leave(" = 0");
602  return 0;
603 
604 interrupted_dequeue:
605  remove_wait_queue(&bundle->chanwait, &myself);
607 interrupted:
608  _leave(" = -ERESTARTSYS");
609  return -ERESTARTSYS;
610 }
611 
612 /*
613  * get a record of an incoming connection
614  */
615 struct rxrpc_connection *
617  struct rxrpc_header *hdr,
618  gfp_t gfp)
619 {
620  struct rxrpc_connection *conn, *candidate = NULL;
621  struct rb_node *p, **pp;
622  const char *new = "old";
623  __be32 epoch;
624  u32 conn_id;
625 
626  _enter("");
627 
629 
630  epoch = hdr->epoch;
631  conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
632 
633  /* search the connection list first */
634  read_lock_bh(&trans->conn_lock);
635 
636  p = trans->server_conns.rb_node;
637  while (p) {
638  conn = rb_entry(p, struct rxrpc_connection, node);
639 
640  _debug("maybe %x", conn->real_conn_id);
641 
642  if (epoch < conn->epoch)
643  p = p->rb_left;
644  else if (epoch > conn->epoch)
645  p = p->rb_right;
646  else if (conn_id < conn->real_conn_id)
647  p = p->rb_left;
648  else if (conn_id > conn->real_conn_id)
649  p = p->rb_right;
650  else
651  goto found_extant_connection;
652  }
653  read_unlock_bh(&trans->conn_lock);
654 
655  /* not yet present - create a candidate for a new record and then
656  * redo the search */
657  candidate = rxrpc_alloc_connection(gfp);
658  if (!candidate) {
659  _leave(" = -ENOMEM");
660  return ERR_PTR(-ENOMEM);
661  }
662 
663  candidate->trans = trans;
664  candidate->epoch = hdr->epoch;
665  candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
666  candidate->service_id = hdr->serviceId;
667  candidate->security_ix = hdr->securityIndex;
669  candidate->out_clientflag = 0;
670  candidate->real_conn_id = conn_id;
671  candidate->state = RXRPC_CONN_SERVER;
672  if (candidate->service_id)
673  candidate->state = RXRPC_CONN_SERVER_UNSECURED;
674 
675  write_lock_bh(&trans->conn_lock);
676 
677  pp = &trans->server_conns.rb_node;
678  p = NULL;
679  while (*pp) {
680  p = *pp;
681  conn = rb_entry(p, struct rxrpc_connection, node);
682 
683  if (epoch < conn->epoch)
684  pp = &(*pp)->rb_left;
685  else if (epoch > conn->epoch)
686  pp = &(*pp)->rb_right;
687  else if (conn_id < conn->real_conn_id)
688  pp = &(*pp)->rb_left;
689  else if (conn_id > conn->real_conn_id)
690  pp = &(*pp)->rb_right;
691  else
692  goto found_extant_second;
693  }
694 
695  /* we can now add the new candidate to the list */
696  conn = candidate;
697  candidate = NULL;
698  rb_link_node(&conn->node, p, pp);
699  rb_insert_color(&conn->node, &trans->server_conns);
700  atomic_inc(&conn->trans->usage);
701 
702  write_unlock_bh(&trans->conn_lock);
703 
707 
708  new = "new";
709 
710 success:
711  _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
712 
713  _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
714  return conn;
715 
716  /* we found the connection in the list immediately */
717 found_extant_connection:
718  if (hdr->securityIndex != conn->security_ix) {
719  read_unlock_bh(&trans->conn_lock);
720  goto security_mismatch;
721  }
722  atomic_inc(&conn->usage);
723  read_unlock_bh(&trans->conn_lock);
724  goto success;
725 
726  /* we found the connection on the second time through the list */
727 found_extant_second:
728  if (hdr->securityIndex != conn->security_ix) {
729  write_unlock_bh(&trans->conn_lock);
730  goto security_mismatch;
731  }
732  atomic_inc(&conn->usage);
733  write_unlock_bh(&trans->conn_lock);
734  kfree(candidate);
735  goto success;
736 
737 security_mismatch:
738  kfree(candidate);
739  _leave(" = -EKEYREJECTED");
740  return ERR_PTR(-EKEYREJECTED);
741 }
742 
743 /*
744  * find a connection based on transport and RxRPC connection ID for an incoming
745  * packet
746  */
748  struct rxrpc_header *hdr)
749 {
750  struct rxrpc_connection *conn;
751  struct rb_node *p;
752  __be32 epoch;
753  u32 conn_id;
754 
755  _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
756 
757  read_lock_bh(&trans->conn_lock);
758 
759  conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
760  epoch = hdr->epoch;
761 
762  if (hdr->flags & RXRPC_CLIENT_INITIATED)
763  p = trans->server_conns.rb_node;
764  else
765  p = trans->client_conns.rb_node;
766 
767  while (p) {
768  conn = rb_entry(p, struct rxrpc_connection, node);
769 
770  _debug("maybe %x", conn->real_conn_id);
771 
772  if (epoch < conn->epoch)
773  p = p->rb_left;
774  else if (epoch > conn->epoch)
775  p = p->rb_right;
776  else if (conn_id < conn->real_conn_id)
777  p = p->rb_left;
778  else if (conn_id > conn->real_conn_id)
779  p = p->rb_right;
780  else
781  goto found;
782  }
783 
784  read_unlock_bh(&trans->conn_lock);
785  _leave(" = NULL");
786  return NULL;
787 
788 found:
789  atomic_inc(&conn->usage);
790  read_unlock_bh(&trans->conn_lock);
791  _leave(" = %p", conn);
792  return conn;
793 }
794 
795 /*
796  * release a virtual connection
797  */
799 {
800  _enter("%p{u=%d,d=%d}",
801  conn, atomic_read(&conn->usage), conn->debug_id);
802 
803  ASSERTCMP(atomic_read(&conn->usage), >, 0);
804 
805  conn->put_time = get_seconds();
806  if (atomic_dec_and_test(&conn->usage)) {
807  _debug("zombie");
808  rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
809  }
810 
811  _leave("");
812 }
813 
814 /*
815  * destroy a virtual connection
816  */
817 static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
818 {
819  _enter("%p{%d}", conn, atomic_read(&conn->usage));
820 
821  ASSERTCMP(atomic_read(&conn->usage), ==, 0);
822 
823  _net("DESTROY CONN %d", conn->debug_id);
824 
825  if (conn->bundle)
826  rxrpc_put_bundle(conn->trans, conn->bundle);
827 
828  ASSERT(RB_EMPTY_ROOT(&conn->calls));
829  rxrpc_purge_queue(&conn->rx_queue);
830 
832  rxrpc_put_transport(conn->trans);
833  kfree(conn);
834  _leave("");
835 }
836 
837 /*
838  * reap dead connections
839  */
840 static void rxrpc_connection_reaper(struct work_struct *work)
841 {
842  struct rxrpc_connection *conn, *_p;
843  unsigned long now, earliest, reap_time;
844 
845  LIST_HEAD(graveyard);
846 
847  _enter("");
848 
849  now = get_seconds();
850  earliest = ULONG_MAX;
851 
852  write_lock_bh(&rxrpc_connection_lock);
853  list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
854  _debug("reap CONN %d { u=%d,t=%ld }",
855  conn->debug_id, atomic_read(&conn->usage),
856  (long) now - (long) conn->put_time);
857 
858  if (likely(atomic_read(&conn->usage) > 0))
859  continue;
860 
861  spin_lock(&conn->trans->client_lock);
862  write_lock(&conn->trans->conn_lock);
863  reap_time = conn->put_time + rxrpc_connection_timeout;
864 
865  if (atomic_read(&conn->usage) > 0) {
866  ;
867  } else if (reap_time <= now) {
868  list_move_tail(&conn->link, &graveyard);
869  if (conn->out_clientflag)
870  rb_erase(&conn->node,
871  &conn->trans->client_conns);
872  else
873  rb_erase(&conn->node,
874  &conn->trans->server_conns);
875  if (conn->bundle) {
876  list_del_init(&conn->bundle_link);
877  conn->bundle->num_conns--;
878  }
879 
880  } else if (reap_time < earliest) {
881  earliest = reap_time;
882  }
883 
884  write_unlock(&conn->trans->conn_lock);
885  spin_unlock(&conn->trans->client_lock);
886  }
887  write_unlock_bh(&rxrpc_connection_lock);
888 
889  if (earliest != ULONG_MAX) {
890  _debug("reschedule reaper %ld", (long) earliest - now);
891  ASSERTCMP(earliest, >, now);
892  rxrpc_queue_delayed_work(&rxrpc_connection_reap,
893  (earliest - now) * HZ);
894  }
895 
896  /* then destroy all those pulled out */
897  while (!list_empty(&graveyard)) {
898  conn = list_entry(graveyard.next, struct rxrpc_connection,
899  link);
900  list_del_init(&conn->link);
901 
902  ASSERTCMP(atomic_read(&conn->usage), ==, 0);
903  rxrpc_destroy_connection(conn);
904  }
905 
906  _leave("");
907 }
908 
909 /*
910  * preemptively destroy all the connection records rather than waiting for them
911  * to time out
912  */
914 {
915  _enter("");
916 
917  rxrpc_connection_timeout = 0;
918  cancel_delayed_work(&rxrpc_connection_reap);
919  rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
920 
921  _leave("");
922 }