Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
send.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Oracle. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
36 #include <net/sock.h>
37 #include <linux/in.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41 
42 #include "rds.h"
43 
44 /* When transmitting messages in rds_send_xmit, we need to emerge from
45  * time to time and briefly release the CPU. Otherwise the softlock watchdog
46  * will kick our shin.
47  * Also, it seems fairer to not let one busy connection stall all the
48  * others.
49  *
50  * send_batch_count is the number of times we'll loop in send_xmit. Setting
51  * it to 0 will restore the old behavior (where we looped until we had
52  * drained the queue).
53  */
54 static int send_batch_count = 64;
55 module_param(send_batch_count, int, 0444);
56 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
57 
58 static void rds_send_remove_from_sock(struct list_head *messages, int status);
59 
60 /*
61  * Reset the send state. Callers must ensure that this doesn't race with
62  * rds_send_xmit().
63  */
64 void rds_send_reset(struct rds_connection *conn)
65 {
66  struct rds_message *rm, *tmp;
67  unsigned long flags;
68 
69  if (conn->c_xmit_rm) {
70  rm = conn->c_xmit_rm;
71  conn->c_xmit_rm = NULL;
72  /* Tell the user the RDMA op is no longer mapped by the
73  * transport. This isn't entirely true (it's flushed out
74  * independently) but as the connection is down, there's
75  * no ongoing RDMA to/from that memory */
77  rds_message_put(rm);
78  }
79 
80  conn->c_xmit_sg = 0;
81  conn->c_xmit_hdr_off = 0;
82  conn->c_xmit_data_off = 0;
83  conn->c_xmit_atomic_sent = 0;
84  conn->c_xmit_rdma_sent = 0;
85  conn->c_xmit_data_sent = 0;
86 
87  conn->c_map_queued = 0;
88 
91 
92  /* Mark messages as retransmissions, and move them to the send q */
93  spin_lock_irqsave(&conn->c_lock, flags);
97  }
98  list_splice_init(&conn->c_retrans, &conn->c_send_queue);
99  spin_unlock_irqrestore(&conn->c_lock, flags);
100 }
101 
102 static int acquire_in_xmit(struct rds_connection *conn)
103 {
104  return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
105 }
106 
107 static void release_in_xmit(struct rds_connection *conn)
108 {
109  clear_bit(RDS_IN_XMIT, &conn->c_flags);
111  /*
112  * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113  * hot path and finding waiters is very rare. We don't want to walk
114  * the system-wide hashed waitqueue buckets in the fast path only to
115  * almost never find waiters.
116  */
117  if (waitqueue_active(&conn->c_waitq))
118  wake_up_all(&conn->c_waitq);
119 }
120 
121 /*
122  * We're making the conscious trade-off here to only send one message
123  * down the connection at a time.
124  * Pro:
125  * - tx queueing is a simple fifo list
126  * - reassembly is optional and easily done by transports per conn
127  * - no per flow rx lookup at all, straight to the socket
128  * - less per-frag memory and wire overhead
129  * Con:
130  * - queued acks can be delayed behind large messages
131  * Depends:
132  * - small message latency is higher behind queued large messages
133  * - large message latency isn't starved by intervening small sends
134  */
135 int rds_send_xmit(struct rds_connection *conn)
136 {
137  struct rds_message *rm;
138  unsigned long flags;
139  unsigned int tmp;
140  struct scatterlist *sg;
141  int ret = 0;
142  LIST_HEAD(to_be_dropped);
143 
144 restart:
145 
146  /*
147  * sendmsg calls here after having queued its message on the send
148  * queue. We only have one task feeding the connection at a time. If
149  * another thread is already feeding the queue then we back off. This
150  * avoids blocking the caller and trading per-connection data between
151  * caches per message.
152  */
153  if (!acquire_in_xmit(conn)) {
154  rds_stats_inc(s_send_lock_contention);
155  ret = -ENOMEM;
156  goto out;
157  }
158 
159  /*
160  * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
161  * we do the opposite to avoid races.
162  */
163  if (!rds_conn_up(conn)) {
164  release_in_xmit(conn);
165  ret = 0;
166  goto out;
167  }
168 
169  if (conn->c_trans->xmit_prepare)
170  conn->c_trans->xmit_prepare(conn);
171 
172  /*
173  * spin trying to push headers and data down the connection until
174  * the connection doesn't make forward progress.
175  */
176  while (1) {
177 
178  rm = conn->c_xmit_rm;
179 
180  /*
181  * If between sending messages, we can send a pending congestion
182  * map update.
183  */
184  if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
185  rm = rds_cong_update_alloc(conn);
186  if (IS_ERR(rm)) {
187  ret = PTR_ERR(rm);
188  break;
189  }
190  rm->data.op_active = 1;
191 
192  conn->c_xmit_rm = rm;
193  }
194 
195  /*
196  * If not already working on one, grab the next message.
197  *
198  * c_xmit_rm holds a ref while we're sending this message down
199  * the connction. We can use this ref while holding the
200  * send_sem.. rds_send_reset() is serialized with it.
201  */
202  if (!rm) {
203  unsigned int len;
204 
205  spin_lock_irqsave(&conn->c_lock, flags);
206 
207  if (!list_empty(&conn->c_send_queue)) {
208  rm = list_entry(conn->c_send_queue.next,
209  struct rds_message,
210  m_conn_item);
211  rds_message_addref(rm);
212 
213  /*
214  * Move the message from the send queue to the retransmit
215  * list right away.
216  */
217  list_move_tail(&rm->m_conn_item, &conn->c_retrans);
218  }
219 
220  spin_unlock_irqrestore(&conn->c_lock, flags);
221 
222  if (!rm)
223  break;
224 
225  /* Unfortunately, the way Infiniband deals with
226  * RDMA to a bad MR key is by moving the entire
227  * queue pair to error state. We cold possibly
228  * recover from that, but right now we drop the
229  * connection.
230  * Therefore, we never retransmit messages with RDMA ops.
231  */
232  if (rm->rdma.op_active &&
234  spin_lock_irqsave(&conn->c_lock, flags);
236  list_move(&rm->m_conn_item, &to_be_dropped);
237  spin_unlock_irqrestore(&conn->c_lock, flags);
238  continue;
239  }
240 
241  /* Require an ACK every once in a while */
242  len = ntohl(rm->m_inc.i_hdr.h_len);
243  if (conn->c_unacked_packets == 0 ||
244  conn->c_unacked_bytes < len) {
246 
249  rds_stats_inc(s_send_ack_required);
250  } else {
251  conn->c_unacked_bytes -= len;
252  conn->c_unacked_packets--;
253  }
254 
255  conn->c_xmit_rm = rm;
256  }
257 
258  /* The transport either sends the whole rdma or none of it */
259  if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
260  rm->m_final_op = &rm->rdma;
261  ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
262  if (ret)
263  break;
264  conn->c_xmit_rdma_sent = 1;
265 
266  /* The transport owns the mapped memory for now.
267  * You can't unmap it while it's on the send queue */
269  }
270 
271  if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
272  rm->m_final_op = &rm->atomic;
273  ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
274  if (ret)
275  break;
276  conn->c_xmit_atomic_sent = 1;
277 
278  /* The transport owns the mapped memory for now.
279  * You can't unmap it while it's on the send queue */
281  }
282 
283  /*
284  * A number of cases require an RDS header to be sent
285  * even if there is no data.
286  * We permit 0-byte sends; rds-ping depends on this.
287  * However, if there are exclusively attached silent ops,
288  * we skip the hdr/data send, to enable silent operation.
289  */
290  if (rm->data.op_nents == 0) {
291  int ops_present;
292  int all_ops_are_silent = 1;
293 
294  ops_present = (rm->atomic.op_active || rm->rdma.op_active);
295  if (rm->atomic.op_active && !rm->atomic.op_silent)
296  all_ops_are_silent = 0;
297  if (rm->rdma.op_active && !rm->rdma.op_silent)
298  all_ops_are_silent = 0;
299 
300  if (ops_present && all_ops_are_silent
301  && !rm->m_rdma_cookie)
302  rm->data.op_active = 0;
303  }
304 
305  if (rm->data.op_active && !conn->c_xmit_data_sent) {
306  rm->m_final_op = &rm->data;
307  ret = conn->c_trans->xmit(conn, rm,
308  conn->c_xmit_hdr_off,
309  conn->c_xmit_sg,
310  conn->c_xmit_data_off);
311  if (ret <= 0)
312  break;
313 
314  if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
315  tmp = min_t(int, ret,
316  sizeof(struct rds_header) -
317  conn->c_xmit_hdr_off);
318  conn->c_xmit_hdr_off += tmp;
319  ret -= tmp;
320  }
321 
322  sg = &rm->data.op_sg[conn->c_xmit_sg];
323  while (ret) {
324  tmp = min_t(int, ret, sg->length -
325  conn->c_xmit_data_off);
326  conn->c_xmit_data_off += tmp;
327  ret -= tmp;
328  if (conn->c_xmit_data_off == sg->length) {
329  conn->c_xmit_data_off = 0;
330  sg++;
331  conn->c_xmit_sg++;
332  BUG_ON(ret != 0 &&
333  conn->c_xmit_sg == rm->data.op_nents);
334  }
335  }
336 
337  if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
338  (conn->c_xmit_sg == rm->data.op_nents))
339  conn->c_xmit_data_sent = 1;
340  }
341 
342  /*
343  * A rm will only take multiple times through this loop
344  * if there is a data op. Thus, if the data is sent (or there was
345  * none), then we're done with the rm.
346  */
347  if (!rm->data.op_active || conn->c_xmit_data_sent) {
348  conn->c_xmit_rm = NULL;
349  conn->c_xmit_sg = 0;
350  conn->c_xmit_hdr_off = 0;
351  conn->c_xmit_data_off = 0;
352  conn->c_xmit_rdma_sent = 0;
353  conn->c_xmit_atomic_sent = 0;
354  conn->c_xmit_data_sent = 0;
355 
356  rds_message_put(rm);
357  }
358  }
359 
360  if (conn->c_trans->xmit_complete)
361  conn->c_trans->xmit_complete(conn);
362 
363  release_in_xmit(conn);
364 
365  /* Nuke any messages we decided not to retransmit. */
366  if (!list_empty(&to_be_dropped)) {
367  /* irqs on here, so we can put(), unlike above */
368  list_for_each_entry(rm, &to_be_dropped, m_conn_item)
369  rds_message_put(rm);
370  rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
371  }
372 
373  /*
374  * Other senders can queue a message after we last test the send queue
375  * but before we clear RDS_IN_XMIT. In that case they'd back off and
376  * not try and send their newly queued message. We need to check the
377  * send queue after having cleared RDS_IN_XMIT so that their message
378  * doesn't get stuck on the send queue.
379  *
380  * If the transport cannot continue (i.e ret != 0), then it must
381  * call us when more room is available, such as from the tx
382  * completion handler.
383  */
384  if (ret == 0) {
385  smp_mb();
386  if (!list_empty(&conn->c_send_queue)) {
387  rds_stats_inc(s_send_lock_queue_raced);
388  goto restart;
389  }
390  }
391 out:
392  return ret;
393 }
394 
395 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
396 {
397  u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
398 
400 
401  BUG_ON(rs->rs_snd_bytes < len);
402  rs->rs_snd_bytes -= len;
403 
404  if (rs->rs_snd_bytes == 0)
405  rds_stats_inc(s_send_queue_empty);
406 }
407 
408 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
409  is_acked_func is_acked)
410 {
411  if (is_acked)
412  return is_acked(rm, ack);
413  return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
414 }
415 
416 /*
417  * This is pretty similar to what happens below in the ACK
418  * handling code - except that we call here as soon as we get
419  * the IB send completion on the RDMA op and the accompanying
420  * message.
421  */
423 {
424  struct rds_sock *rs = NULL;
425  struct rm_rdma_op *ro;
426  struct rds_notifier *notifier;
427  unsigned long flags;
428 
429  spin_lock_irqsave(&rm->m_rs_lock, flags);
430 
431  ro = &rm->rdma;
432  if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
433  ro->op_active && ro->op_notify && ro->op_notifier) {
434  notifier = ro->op_notifier;
435  rs = rm->m_rs;
436  sock_hold(rds_rs_to_sk(rs));
437 
438  notifier->n_status = status;
439  spin_lock(&rs->rs_lock);
440  list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
441  spin_unlock(&rs->rs_lock);
442 
443  ro->op_notifier = NULL;
444  }
445 
446  spin_unlock_irqrestore(&rm->m_rs_lock, flags);
447 
448  if (rs) {
449  rds_wake_sk_sleep(rs);
450  sock_put(rds_rs_to_sk(rs));
451  }
452 }
454 
455 /*
456  * Just like above, except looks at atomic op
457  */
459 {
460  struct rds_sock *rs = NULL;
461  struct rm_atomic_op *ao;
462  struct rds_notifier *notifier;
463  unsigned long flags;
464 
465  spin_lock_irqsave(&rm->m_rs_lock, flags);
466 
467  ao = &rm->atomic;
468  if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
469  && ao->op_active && ao->op_notify && ao->op_notifier) {
470  notifier = ao->op_notifier;
471  rs = rm->m_rs;
472  sock_hold(rds_rs_to_sk(rs));
473 
474  notifier->n_status = status;
475  spin_lock(&rs->rs_lock);
476  list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
477  spin_unlock(&rs->rs_lock);
478 
479  ao->op_notifier = NULL;
480  }
481 
482  spin_unlock_irqrestore(&rm->m_rs_lock, flags);
483 
484  if (rs) {
485  rds_wake_sk_sleep(rs);
486  sock_put(rds_rs_to_sk(rs));
487  }
488 }
490 
491 /*
492  * This is the same as rds_rdma_send_complete except we
493  * don't do any locking - we have all the ingredients (message,
494  * socket, socket lock) and can just move the notifier.
495  */
496 static inline void
497 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
498 {
499  struct rm_rdma_op *ro;
500  struct rm_atomic_op *ao;
501 
502  ro = &rm->rdma;
503  if (ro->op_active && ro->op_notify && ro->op_notifier) {
504  ro->op_notifier->n_status = status;
505  list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
506  ro->op_notifier = NULL;
507  }
508 
509  ao = &rm->atomic;
510  if (ao->op_active && ao->op_notify && ao->op_notifier) {
511  ao->op_notifier->n_status = status;
512  list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
513  ao->op_notifier = NULL;
514  }
515 
516  /* No need to wake the app - caller does this */
517 }
518 
519 /*
520  * This is called from the IB send completion when we detect
521  * a RDMA operation that failed with remote access error.
522  * So speed is not an issue here.
523  */
525  struct rm_rdma_op *op)
526 {
527  struct rds_message *rm, *tmp, *found = NULL;
528  unsigned long flags;
529 
530  spin_lock_irqsave(&conn->c_lock, flags);
531 
533  if (&rm->rdma == op) {
534  atomic_inc(&rm->m_refcount);
535  found = rm;
536  goto out;
537  }
538  }
539 
541  if (&rm->rdma == op) {
542  atomic_inc(&rm->m_refcount);
543  found = rm;
544  break;
545  }
546  }
547 
548 out:
549  spin_unlock_irqrestore(&conn->c_lock, flags);
550 
551  return found;
552 }
554 
555 /*
556  * This removes messages from the socket's list if they're on it. The list
557  * argument must be private to the caller, we must be able to modify it
558  * without locks. The messages must have a reference held for their
559  * position on the list. This function will drop that reference after
560  * removing the messages from the 'messages' list regardless of if it found
561  * the messages on the socket list or not.
562  */
563 static void rds_send_remove_from_sock(struct list_head *messages, int status)
564 {
565  unsigned long flags;
566  struct rds_sock *rs = NULL;
567  struct rds_message *rm;
568 
569  while (!list_empty(messages)) {
570  int was_on_sock = 0;
571 
572  rm = list_entry(messages->next, struct rds_message,
573  m_conn_item);
574  list_del_init(&rm->m_conn_item);
575 
576  /*
577  * If we see this flag cleared then we're *sure* that someone
578  * else beat us to removing it from the sock. If we race
579  * with their flag update we'll get the lock and then really
580  * see that the flag has been cleared.
581  *
582  * The message spinlock makes sure nobody clears rm->m_rs
583  * while we're messing with it. It does not prevent the
584  * message from being removed from the socket, though.
585  */
586  spin_lock_irqsave(&rm->m_rs_lock, flags);
587  if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
588  goto unlock_and_drop;
589 
590  if (rs != rm->m_rs) {
591  if (rs) {
592  rds_wake_sk_sleep(rs);
593  sock_put(rds_rs_to_sk(rs));
594  }
595  rs = rm->m_rs;
596  sock_hold(rds_rs_to_sk(rs));
597  }
598  spin_lock(&rs->rs_lock);
599 
601  struct rm_rdma_op *ro = &rm->rdma;
602  struct rds_notifier *notifier;
603 
604  list_del_init(&rm->m_sock_item);
605  rds_send_sndbuf_remove(rs, rm);
606 
607  if (ro->op_active && ro->op_notifier &&
608  (ro->op_notify || (ro->op_recverr && status))) {
609  notifier = ro->op_notifier;
610  list_add_tail(&notifier->n_list,
611  &rs->rs_notify_queue);
612  if (!notifier->n_status)
613  notifier->n_status = status;
614  rm->rdma.op_notifier = NULL;
615  }
616  was_on_sock = 1;
617  rm->m_rs = NULL;
618  }
619  spin_unlock(&rs->rs_lock);
620 
621 unlock_and_drop:
622  spin_unlock_irqrestore(&rm->m_rs_lock, flags);
623  rds_message_put(rm);
624  if (was_on_sock)
625  rds_message_put(rm);
626  }
627 
628  if (rs) {
629  rds_wake_sk_sleep(rs);
630  sock_put(rds_rs_to_sk(rs));
631  }
632 }
633 
634 /*
635  * Transports call here when they've determined that the receiver queued
636  * messages up to, and including, the given sequence number. Messages are
637  * moved to the retrans queue when rds_send_xmit picks them off the send
638  * queue. This means that in the TCP case, the message may not have been
639  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
640  * checks the RDS_MSG_HAS_ACK_SEQ bit.
641  *
642  * XXX It's not clear to me how this is safely serialized with socket
643  * destruction. Maybe it should bail if it sees SOCK_DEAD.
644  */
645 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
646  is_acked_func is_acked)
647 {
648  struct rds_message *rm, *tmp;
649  unsigned long flags;
650  LIST_HEAD(list);
651 
652  spin_lock_irqsave(&conn->c_lock, flags);
653 
655  if (!rds_send_is_acked(rm, ack, is_acked))
656  break;
657 
658  list_move(&rm->m_conn_item, &list);
660  }
661 
662  /* order flag updates with spin locks */
663  if (!list_empty(&list))
665 
666  spin_unlock_irqrestore(&conn->c_lock, flags);
667 
668  /* now remove the messages from the sock list as needed */
669  rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
670 }
672 
673 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
674 {
675  struct rds_message *rm, *tmp;
676  struct rds_connection *conn;
677  unsigned long flags;
678  LIST_HEAD(list);
679 
680  /* get all the messages we're dropping under the rs lock */
681  spin_lock_irqsave(&rs->rs_lock, flags);
682 
683  list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
684  if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
685  dest->sin_port != rm->m_inc.i_hdr.h_dport))
686  continue;
687 
688  list_move(&rm->m_sock_item, &list);
689  rds_send_sndbuf_remove(rs, rm);
691  }
692 
693  /* order flag updates with the rs lock */
695 
696  spin_unlock_irqrestore(&rs->rs_lock, flags);
697 
698  if (list_empty(&list))
699  return;
700 
701  /* Remove the messages from the conn */
702  list_for_each_entry(rm, &list, m_sock_item) {
703 
704  conn = rm->m_inc.i_conn;
705 
706  spin_lock_irqsave(&conn->c_lock, flags);
707  /*
708  * Maybe someone else beat us to removing rm from the conn.
709  * If we race with their flag update we'll get the lock and
710  * then really see that the flag has been cleared.
711  */
713  spin_unlock_irqrestore(&conn->c_lock, flags);
714  continue;
715  }
716  list_del_init(&rm->m_conn_item);
717  spin_unlock_irqrestore(&conn->c_lock, flags);
718 
719  /*
720  * Couldn't grab m_rs_lock in top loop (lock ordering),
721  * but we can now.
722  */
723  spin_lock_irqsave(&rm->m_rs_lock, flags);
724 
725  spin_lock(&rs->rs_lock);
726  __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
727  spin_unlock(&rs->rs_lock);
728 
729  rm->m_rs = NULL;
730  spin_unlock_irqrestore(&rm->m_rs_lock, flags);
731 
732  rds_message_put(rm);
733  }
734 
735  rds_wake_sk_sleep(rs);
736 
737  while (!list_empty(&list)) {
738  rm = list_entry(list.next, struct rds_message, m_sock_item);
739  list_del_init(&rm->m_sock_item);
740 
741  rds_message_wait(rm);
742  rds_message_put(rm);
743  }
744 }
745 
746 /*
747  * we only want this to fire once so we use the callers 'queued'. It's
748  * possible that another thread can race with us and remove the
749  * message from the flow with RDS_CANCEL_SENT_TO.
750  */
751 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
752  struct rds_message *rm, __be16 sport,
753  __be16 dport, int *queued)
754 {
755  unsigned long flags;
756  u32 len;
757 
758  if (*queued)
759  goto out;
760 
761  len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
762 
763  /* this is the only place which holds both the socket's rs_lock
764  * and the connection's c_lock */
765  spin_lock_irqsave(&rs->rs_lock, flags);
766 
767  /*
768  * If there is a little space in sndbuf, we don't queue anything,
769  * and userspace gets -EAGAIN. But poll() indicates there's send
770  * room. This can lead to bad behavior (spinning) if snd_bytes isn't
771  * freed up by incoming acks. So we check the *old* value of
772  * rs_snd_bytes here to allow the last msg to exceed the buffer,
773  * and poll() now knows no more data can be sent.
774  */
775  if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
776  rs->rs_snd_bytes += len;
777 
778  /* let recv side know we are close to send space exhaustion.
779  * This is probably not the optimal way to do it, as this
780  * means we set the flag on *all* messages as soon as our
781  * throughput hits a certain threshold.
782  */
783  if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
785 
788  rds_message_addref(rm);
789  rm->m_rs = rs;
790 
791  /* The code ordering is a little weird, but we're
792  trying to minimize the time we hold c_lock */
793  rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
794  rm->m_inc.i_conn = conn;
795  rds_message_addref(rm);
796 
797  spin_lock(&conn->c_lock);
798  rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
799  list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
801  spin_unlock(&conn->c_lock);
802 
803  rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
804  rm, len, rs, rs->rs_snd_bytes,
805  (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
806 
807  *queued = 1;
808  }
809 
810  spin_unlock_irqrestore(&rs->rs_lock, flags);
811 out:
812  return *queued;
813 }
814 
815 /*
816  * rds_message is getting to be quite complicated, and we'd like to allocate
817  * it all in one go. This figures out how big it needs to be up front.
818  */
819 static int rds_rm_size(struct msghdr *msg, int data_len)
820 {
821  struct cmsghdr *cmsg;
822  int size = 0;
823  int cmsg_groups = 0;
824  int retval;
825 
826  for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
827  if (!CMSG_OK(msg, cmsg))
828  return -EINVAL;
829 
830  if (cmsg->cmsg_level != SOL_RDS)
831  continue;
832 
833  switch (cmsg->cmsg_type) {
834  case RDS_CMSG_RDMA_ARGS:
835  cmsg_groups |= 1;
836  retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
837  if (retval < 0)
838  return retval;
839  size += retval;
840 
841  break;
842 
843  case RDS_CMSG_RDMA_DEST:
844  case RDS_CMSG_RDMA_MAP:
845  cmsg_groups |= 2;
846  /* these are valid but do no add any size */
847  break;
848 
853  cmsg_groups |= 1;
854  size += sizeof(struct scatterlist);
855  break;
856 
857  default:
858  return -EINVAL;
859  }
860 
861  }
862 
863  size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
864 
865  /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
866  if (cmsg_groups == 3)
867  return -EINVAL;
868 
869  return size;
870 }
871 
872 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
873  struct msghdr *msg, int *allocated_mr)
874 {
875  struct cmsghdr *cmsg;
876  int ret = 0;
877 
878  for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
879  if (!CMSG_OK(msg, cmsg))
880  return -EINVAL;
881 
882  if (cmsg->cmsg_level != SOL_RDS)
883  continue;
884 
885  /* As a side effect, RDMA_DEST and RDMA_MAP will set
886  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
887  */
888  switch (cmsg->cmsg_type) {
889  case RDS_CMSG_RDMA_ARGS:
890  ret = rds_cmsg_rdma_args(rs, rm, cmsg);
891  break;
892 
893  case RDS_CMSG_RDMA_DEST:
894  ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
895  break;
896 
897  case RDS_CMSG_RDMA_MAP:
898  ret = rds_cmsg_rdma_map(rs, rm, cmsg);
899  if (!ret)
900  *allocated_mr = 1;
901  break;
906  ret = rds_cmsg_atomic(rs, rm, cmsg);
907  break;
908 
909  default:
910  return -EINVAL;
911  }
912 
913  if (ret)
914  break;
915  }
916 
917  return ret;
918 }
919 
920 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
921  size_t payload_len)
922 {
923  struct sock *sk = sock->sk;
924  struct rds_sock *rs = rds_sk_to_rs(sk);
925  struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
926  __be32 daddr;
927  __be16 dport;
928  struct rds_message *rm = NULL;
929  struct rds_connection *conn;
930  int ret = 0;
931  int queued = 0, allocated_mr = 0;
932  int nonblock = msg->msg_flags & MSG_DONTWAIT;
933  long timeo = sock_sndtimeo(sk, nonblock);
934 
935  /* Mirror Linux UDP mirror of BSD error message compatibility */
936  /* XXX: Perhaps MSG_MORE someday */
937  if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
938  ret = -EOPNOTSUPP;
939  goto out;
940  }
941 
942  if (msg->msg_namelen) {
943  /* XXX fail non-unicast destination IPs? */
944  if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
945  ret = -EINVAL;
946  goto out;
947  }
948  daddr = usin->sin_addr.s_addr;
949  dport = usin->sin_port;
950  } else {
951  /* We only care about consistency with ->connect() */
952  lock_sock(sk);
953  daddr = rs->rs_conn_addr;
954  dport = rs->rs_conn_port;
955  release_sock(sk);
956  }
957 
958  /* racing with another thread binding seems ok here */
959  if (daddr == 0 || rs->rs_bound_addr == 0) {
960  ret = -ENOTCONN; /* XXX not a great errno */
961  goto out;
962  }
963 
964  /* size of rm including all sgs */
965  ret = rds_rm_size(msg, payload_len);
966  if (ret < 0)
967  goto out;
968 
969  rm = rds_message_alloc(ret, GFP_KERNEL);
970  if (!rm) {
971  ret = -ENOMEM;
972  goto out;
973  }
974 
975  /* Attach data to the rm */
976  if (payload_len) {
977  rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
978  if (!rm->data.op_sg) {
979  ret = -ENOMEM;
980  goto out;
981  }
982  ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
983  if (ret)
984  goto out;
985  }
986  rm->data.op_active = 1;
987 
988  rm->m_daddr = daddr;
989 
990  /* rds_conn_create has a spinlock that runs with IRQ off.
991  * Caching the conn in the socket helps a lot. */
992  if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
993  conn = rs->rs_conn;
994  else {
996  rs->rs_transport,
997  sock->sk->sk_allocation);
998  if (IS_ERR(conn)) {
999  ret = PTR_ERR(conn);
1000  goto out;
1001  }
1002  rs->rs_conn = conn;
1003  }
1004 
1005  /* Parse any control messages the user may have included. */
1006  ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1007  if (ret)
1008  goto out;
1009 
1010  if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1011  printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1012  &rm->rdma, conn->c_trans->xmit_rdma);
1013  ret = -EOPNOTSUPP;
1014  goto out;
1015  }
1016 
1017  if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1018  printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1019  &rm->atomic, conn->c_trans->xmit_atomic);
1020  ret = -EOPNOTSUPP;
1021  goto out;
1022  }
1023 
1025 
1026  ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1027  if (ret) {
1028  rs->rs_seen_congestion = 1;
1029  goto out;
1030  }
1031 
1032  while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1033  dport, &queued)) {
1034  rds_stats_inc(s_send_queue_full);
1035  /* XXX make sure this is reasonable */
1036  if (payload_len > rds_sk_sndbuf(rs)) {
1037  ret = -EMSGSIZE;
1038  goto out;
1039  }
1040  if (nonblock) {
1041  ret = -EAGAIN;
1042  goto out;
1043  }
1044 
1045  timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1046  rds_send_queue_rm(rs, conn, rm,
1047  rs->rs_bound_port,
1048  dport,
1049  &queued),
1050  timeo);
1051  rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1052  if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1053  continue;
1054 
1055  ret = timeo;
1056  if (ret == 0)
1057  ret = -ETIMEDOUT;
1058  goto out;
1059  }
1060 
1061  /*
1062  * By now we've committed to the send. We reuse rds_send_worker()
1063  * to retry sends in the rds thread if the transport asks us to.
1064  */
1065  rds_stats_inc(s_send_queued);
1066 
1067  if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1068  rds_send_xmit(conn);
1069 
1070  rds_message_put(rm);
1071  return payload_len;
1072 
1073 out:
1074  /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1075  * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1076  * or in any other way, we need to destroy the MR again */
1077  if (allocated_mr)
1078  rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1079 
1080  if (rm)
1081  rds_message_put(rm);
1082  return ret;
1083 }
1084 
1085 /*
1086  * Reply to a ping packet.
1087  */
1088 int
1090 {
1091  struct rds_message *rm;
1092  unsigned long flags;
1093  int ret = 0;
1094 
1095  rm = rds_message_alloc(0, GFP_ATOMIC);
1096  if (!rm) {
1097  ret = -ENOMEM;
1098  goto out;
1099  }
1100 
1101  rm->m_daddr = conn->c_faddr;
1102  rm->data.op_active = 1;
1103 
1105 
1106  ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1107  if (ret)
1108  goto out;
1109 
1110  spin_lock_irqsave(&conn->c_lock, flags);
1111  list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1113  rds_message_addref(rm);
1114  rm->m_inc.i_conn = conn;
1115 
1116  rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1117  conn->c_next_tx_seq);
1118  conn->c_next_tx_seq++;
1119  spin_unlock_irqrestore(&conn->c_lock, flags);
1120 
1121  rds_stats_inc(s_send_queued);
1122  rds_stats_inc(s_send_pong);
1123 
1124  if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1125  queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1126 
1127  rds_message_put(rm);
1128  return 0;
1129 
1130 out:
1131  if (rm)
1132  rds_message_put(rm);
1133  return ret;
1134 }