32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
41 #define ISER_KMALLOC_THRESHOLD 0x20000
46 static int iser_start_rdma_unaligned_sg(
struct iscsi_iser_task *iser_task,
53 unsigned long cmd_data_len = data->
data_len;
62 iser_err(
"Failed to allocate mem size %d %d for copying sglist\n",
63 data->
size,(
int)cmd_data_len);
92 dev = iser_task->
iser_conn->ib_conn->device->ib_device;
93 dma_nents = ib_dma_map_sg(dev,
100 iser_task->
data_copy[cmd_dir].dma_nents = dma_nents;
112 unsigned long cmd_data_len;
114 dev = iser_task->
iser_conn->ib_conn->device->ib_device;
115 mem_copy = &iser_task->
data_copy[cmd_dir];
117 ib_dma_unmap_sg(dev, &mem_copy->
sg_single, 1,
124 unsigned char *
p, *to;
125 unsigned int sg_size;
145 cmd_data_len = iser_task->
data[cmd_dir].data_len;
156 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
177 unsigned long total_sz = 0;
178 unsigned int dma_len;
179 int i, new_chunk, cur_page, last_ent = data->
dma_nents - 1;
187 start_addr = ib_sg_dma_address(ibdev, sg);
190 dma_len = ib_sg_dma_len(ibdev, sg);
191 end_addr = start_addr + dma_len;
208 }
while (
page < end_addr);
228 int i, ret_len, start_check = 0;
234 start_addr = ib_sg_dma_address(ibdev, sgl);
244 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
245 start_addr = ib_sg_dma_address(ibdev, next_sg);
247 if (end_addr == start_addr) {
256 ret_len = (next_sg) ? i : i+1;
257 iser_dbg(
"Found %d aligned entries out of %d in sg:0x%p\n",
274 "off:0
x%
x sz:0
x%
x dma_len:0
x%
x\
n",
275 i, (
unsigned long)ib_sg_dma_address(ibdev, sg),
277 sg->
length, ib_sg_dma_len(ibdev, sg));
284 iser_err(
"page vec length %d data size %d\n",
285 page_vec->length, page_vec->data_size);
286 for (i = 0; i < page_vec->length; i++)
287 iser_err(
"%d %lx\n",i,(
unsigned long)page_vec->pages[i]);
294 int page_vec_len = 0;
300 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
303 page_vec->
length = page_vec_len;
305 if (page_vec_len * SIZE_4K < page_vec->
data_size) {
306 iser_err(
"page_vec too short to hold this SG\n");
307 iser_data_buf_dump(data, ibdev);
308 iser_dump_page_vec(page_vec);
320 iser_task->
dir[iser_dir] = 1;
321 dev = iser_task->
iser_conn->ib_conn->device->ib_device;
336 dev = iser_task->
iser_conn->ib_conn->device->ib_device;
369 regd_buf = &iser_task->
rdma_regd[cmd_dir];
371 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
374 iser_warn(
"rdma alignment violation %d/%d aligned\n",
375 aligned_len, mem->
size);
376 iser_data_buf_dump(mem, ibdev);
383 if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
392 regd_buf->
reg.lkey = device->
mr->lkey;
393 regd_buf->
reg.rkey = device->
mr->rkey;
394 regd_buf->
reg.len = ib_sg_dma_len(ibdev, &sg[0]);
395 regd_buf->
reg.va = ib_sg_dma_address(ibdev, &sg[0]);
396 regd_buf->
reg.is_fmr = 0;
398 iser_dbg(
"PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
399 "va: 0x%08lX sz: %ld]\n",
400 (
unsigned int)regd_buf->
reg.lkey,
401 (
unsigned int)regd_buf->
reg.rkey,
402 (
unsigned long)regd_buf->
reg.va,
403 (
unsigned long)regd_buf->
reg.len);
405 iser_page_vec_build(mem, ib_conn->
page_vec, ibdev);
408 iser_data_buf_dump(mem, ibdev);
409 iser_err(
"mem->dma_nents = %d (dlength = 0x%x)\n",
411 ntoh24(iser_task->
desc.iscsi_header.dlength));
412 iser_err(
"page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
415 for (i=0 ; i<ib_conn->
page_vec->length ; i++)
416 iser_err(
"page_vec[%d] = 0x%llx\n", i,
417 (
unsigned long long) ib_conn->
page_vec->pages[i]);