Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
message.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Oracle. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 
37 #include "rds.h"
38 
39 static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
40 [RDS_EXTHDR_NONE] = 0,
42 [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
44 };
45 
46 
48 {
49  rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
50  atomic_inc(&rm->m_refcount);
51 }
53 
54 /*
55  * This relies on dma_map_sg() not touching sg[].page during merging.
56  */
57 static void rds_message_purge(struct rds_message *rm)
58 {
59  unsigned long i;
60 
62  return;
63 
64  for (i = 0; i < rm->data.op_nents; i++) {
65  rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
66  /* XXX will have to put_page for page refs */
67  __free_page(sg_page(&rm->data.op_sg[i]));
68  }
69  rm->data.op_nents = 0;
70 
71  if (rm->rdma.op_active)
72  rds_rdma_free_op(&rm->rdma);
73  if (rm->rdma.op_rdma_mr)
74  rds_mr_put(rm->rdma.op_rdma_mr);
75 
76  if (rm->atomic.op_active)
78  if (rm->atomic.op_rdma_mr)
79  rds_mr_put(rm->atomic.op_rdma_mr);
80 }
81 
82 void rds_message_put(struct rds_message *rm)
83 {
84  rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
85  if (atomic_read(&rm->m_refcount) == 0) {
86 printk(KERN_CRIT "danger refcount zero on %p\n", rm);
87 WARN_ON(1);
88  }
89  if (atomic_dec_and_test(&rm->m_refcount)) {
90  BUG_ON(!list_empty(&rm->m_sock_item));
91  BUG_ON(!list_empty(&rm->m_conn_item));
92  rds_message_purge(rm);
93 
94  kfree(rm);
95  }
96 }
98 
100  __be16 dport, u64 seq)
101 {
102  hdr->h_flags = 0;
103  hdr->h_sport = sport;
104  hdr->h_dport = dport;
105  hdr->h_sequence = cpu_to_be64(seq);
106  hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
107 }
109 
110 int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
111  const void *data, unsigned int len)
112 {
113  unsigned int ext_len = sizeof(u8) + len;
114  unsigned char *dst;
115 
116  /* For now, refuse to add more than one extension header */
117  if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
118  return 0;
119 
120  if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
121  return 0;
122 
123  if (ext_len >= RDS_HEADER_EXT_SPACE)
124  return 0;
125  dst = hdr->h_exthdr;
126 
127  *dst++ = type;
128  memcpy(dst, data, len);
129 
130  dst[len] = RDS_EXTHDR_NONE;
131  return 1;
132 }
134 
135 /*
136  * If a message has extension headers, retrieve them here.
137  * Call like this:
138  *
139  * unsigned int pos = 0;
140  *
141  * while (1) {
142  * buflen = sizeof(buffer);
143  * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
144  * if (type == RDS_EXTHDR_NONE)
145  * break;
146  * ...
147  * }
148  */
150  unsigned int *pos, void *buf, unsigned int *buflen)
151 {
152  unsigned int offset, ext_type, ext_len;
153  u8 *src = hdr->h_exthdr;
154 
155  offset = *pos;
156  if (offset >= RDS_HEADER_EXT_SPACE)
157  goto none;
158 
159  /* Get the extension type and length. For now, the
160  * length is implied by the extension type. */
161  ext_type = src[offset++];
162 
163  if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
164  goto none;
165  ext_len = rds_exthdr_size[ext_type];
166  if (offset + ext_len > RDS_HEADER_EXT_SPACE)
167  goto none;
168 
169  *pos = offset + ext_len;
170  if (ext_len < *buflen)
171  *buflen = ext_len;
172  memcpy(buf, src + offset, *buflen);
173  return ext_type;
174 
175 none:
176  *pos = RDS_HEADER_EXT_SPACE;
177  *buflen = 0;
178  return RDS_EXTHDR_NONE;
179 }
180 
182 {
183  struct rds_ext_header_rdma_dest ext_hdr;
184 
185  ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
186  ext_hdr.h_rdma_offset = cpu_to_be32(offset);
187  return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
188 }
190 
191 /*
192  * Each rds_message is allocated with extra space for the scatterlist entries
193  * rds ops will need. This is to minimize memory allocation count. Then, each rds op
194  * can grab SGs when initializing its part of the rds_message.
195  */
196 struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
197 {
198  struct rds_message *rm;
199 
200  rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
201  if (!rm)
202  goto out;
203 
204  rm->m_used_sgs = 0;
205  rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
206 
207  atomic_set(&rm->m_refcount, 1);
208  INIT_LIST_HEAD(&rm->m_sock_item);
209  INIT_LIST_HEAD(&rm->m_conn_item);
212 
213 out:
214  return rm;
215 }
216 
217 /*
218  * RDS ops use this to grab SG entries from the rm's sg pool.
219  */
220 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
221 {
222  struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
223  struct scatterlist *sg_ret;
224 
225  WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
226  WARN_ON(!nents);
227 
228  if (rm->m_used_sgs + nents > rm->m_total_sgs)
229  return NULL;
230 
231  sg_ret = &sg_first[rm->m_used_sgs];
232  sg_init_table(sg_ret, nents);
233  rm->m_used_sgs += nents;
234 
235  return sg_ret;
236 }
237 
238 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
239 {
240  struct rds_message *rm;
241  unsigned int i;
242  int num_sgs = ceil(total_len, PAGE_SIZE);
243  int extra_bytes = num_sgs * sizeof(struct scatterlist);
244 
245  rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
246  if (!rm)
247  return ERR_PTR(-ENOMEM);
248 
249  set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
250  rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
251  rm->data.op_nents = ceil(total_len, PAGE_SIZE);
252  rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
253  if (!rm->data.op_sg) {
254  rds_message_put(rm);
255  return ERR_PTR(-ENOMEM);
256  }
257 
258  for (i = 0; i < rm->data.op_nents; ++i) {
259  sg_set_page(&rm->data.op_sg[i],
260  virt_to_page(page_addrs[i]),
261  PAGE_SIZE, 0);
262  }
263 
264  return rm;
265 }
266 
267 int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
268  size_t total_len)
269 {
270  unsigned long to_copy;
271  unsigned long iov_off;
272  unsigned long sg_off;
273  struct iovec *iov;
274  struct scatterlist *sg;
275  int ret = 0;
276 
277  rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
278 
279  /*
280  * now allocate and copy in the data payload.
281  */
282  sg = rm->data.op_sg;
283  iov = first_iov;
284  iov_off = 0;
285  sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
286 
287  while (total_len) {
288  if (!sg_page(sg)) {
289  ret = rds_page_remainder_alloc(sg, total_len,
290  GFP_HIGHUSER);
291  if (ret)
292  goto out;
293  rm->data.op_nents++;
294  sg_off = 0;
295  }
296 
297  while (iov_off == iov->iov_len) {
298  iov_off = 0;
299  iov++;
300  }
301 
302  to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
303  to_copy = min_t(size_t, to_copy, total_len);
304 
305  rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
306  "sg [%p, %u, %u] + %lu\n",
307  to_copy, iov->iov_base, iov->iov_len, iov_off,
308  (void *)sg_page(sg), sg->offset, sg->length, sg_off);
309 
310  ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
311  iov->iov_base + iov_off,
312  to_copy);
313  if (ret)
314  goto out;
315 
316  iov_off += to_copy;
317  total_len -= to_copy;
318  sg_off += to_copy;
319 
320  if (sg_off == sg->length)
321  sg++;
322  }
323 
324 out:
325  return ret;
326 }
327 
329  struct iovec *first_iov, size_t size)
330 {
331  struct rds_message *rm;
332  struct iovec *iov;
333  struct scatterlist *sg;
334  unsigned long to_copy;
335  unsigned long iov_off;
336  unsigned long vec_off;
337  int copied;
338  int ret;
339  u32 len;
340 
341  rm = container_of(inc, struct rds_message, m_inc);
342  len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
343 
344  iov = first_iov;
345  iov_off = 0;
346  sg = rm->data.op_sg;
347  vec_off = 0;
348  copied = 0;
349 
350  while (copied < size && copied < len) {
351  while (iov_off == iov->iov_len) {
352  iov_off = 0;
353  iov++;
354  }
355 
356  to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
357  to_copy = min_t(size_t, to_copy, size - copied);
358  to_copy = min_t(unsigned long, to_copy, len - copied);
359 
360  rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
361  "sg [%p, %u, %u] + %lu\n",
362  to_copy, iov->iov_base, iov->iov_len, iov_off,
363  sg_page(sg), sg->offset, sg->length, vec_off);
364 
365  ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
366  iov->iov_base + iov_off,
367  to_copy);
368  if (ret) {
369  copied = ret;
370  break;
371  }
372 
373  iov_off += to_copy;
374  vec_off += to_copy;
375  copied += to_copy;
376 
377  if (vec_off == sg->length) {
378  vec_off = 0;
379  sg++;
380  }
381  }
382 
383  return copied;
384 }
385 
386 /*
387  * If the message is still on the send queue, wait until the transport
388  * is done with it. This is particularly important for RDMA operations.
389  */
391 {
394 }
395 
397 {
400 }
402