36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
47 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
48 #define MLX4_MPT_FLAG_FREE (0x3UL << 28)
49 #define MLX4_MPT_FLAG_MIO (1 << 17)
50 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
51 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
52 #define MLX4_MPT_FLAG_REGION (1 << 8)
54 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
55 #define MLX4_MPT_PD_FLAG_RAE (1 << 28)
56 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
58 #define MLX4_MPT_STATUS_SW 0xF0
59 #define MLX4_MPT_STATUS_HW 0x00
67 spin_lock(&buddy->
lock);
69 for (o = order; o <= buddy->
max_order; ++o)
77 spin_unlock(&buddy->
lock);
91 spin_unlock(&buddy->
lock);
102 spin_lock(&buddy->
lock);
114 spin_unlock(&buddy->
lock);
117 static int mlx4_buddy_init(
struct mlx4_buddy *buddy,
int max_order)
134 if (!buddy->
bits[i]) {
148 if (buddy->
bits[i] && is_vmalloc_addr(buddy->
bits[i]))
160 static void mlx4_buddy_cleanup(
struct mlx4_buddy *buddy)
165 if (is_vmalloc_addr(buddy->
bits[i]))
183 seg = mlx4_buddy_alloc(&mr_table->
mtt_buddy, seg_order);
190 offset + (1 << order) - 1)) {
191 mlx4_buddy_free(&mr_table->
mtt_buddy, seg, seg_order);
204 if (mlx4_is_mfunc(dev)) {
205 set_param_l(&in_param, order);
206 err = mlx4_cmd_imm(dev, in_param, &out_param,
RES_MTT,
213 return get_param_l(&out_param);
230 for (mtt->
order = 0, i = 1; i < npages; i <<= 1)
233 mtt->
offset = mlx4_alloc_mtt_range(dev, mtt->
order);
250 mlx4_buddy_free(&mr_table->
mtt_buddy, first_seg, seg_order);
252 offset + (1 << order) - 1);
260 if (mlx4_is_mfunc(dev)) {
261 set_param_l(&in_param, offset);
262 set_param_h(&in_param, order);
268 mlx4_warn(dev,
"Failed to free mtt range at:"
269 "%d order:%d\n", offset, order);
292 return (ind >> 24) | (ind << 8);
297 return (key << 24) | (key >> 8);
311 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->
dma : 0, mpt_index,
316 static int mlx4_mr_alloc_reserved(
struct mlx4_dev *dev,
u32 mridx,
u32 pd,
318 int page_shift,
struct mlx4_mr *mr)
325 mr->
key = hw_index_to_key(mridx);
330 static int mlx4_WRITE_MTT(
struct mlx4_dev *dev,
345 static int mlx4_mr_reserve(
struct mlx4_dev *dev)
349 if (mlx4_is_mfunc(dev)) {
354 return get_param_l(&out_param);
370 if (mlx4_is_mfunc(dev)) {
371 set_param_l(&in_param, index);
375 mlx4_warn(dev,
"Failed to release mr index:%d\n",
389 static int mlx4_mr_alloc_icm(
struct mlx4_dev *dev,
u32 index)
393 if (mlx4_is_mfunc(dev)) {
394 set_param_l(¶m, index);
410 static void mlx4_mr_free_icm(
struct mlx4_dev *dev,
u32 index)
414 if (mlx4_is_mfunc(dev)) {
415 set_param_l(&in_param, index);
419 mlx4_warn(dev,
"Failed to free icm of mr index:%d\n",
427 int npages,
int page_shift,
struct mlx4_mr *mr)
432 index = mlx4_mr_reserve(dev);
436 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
437 access, npages, page_shift, mr);
439 mlx4_mr_release(dev, index);
445 static void mlx4_mr_free_reserved(
struct mlx4_dev *dev,
struct mlx4_mr *mr)
450 err = mlx4_HW2SW_MPT(dev,
NULL,
451 key_to_hw_index(mr->
key) &
452 (dev->
caps.num_mpts - 1));
454 mlx4_warn(dev,
"xxx HW2SW_MPT failed (%d)\n", err);
463 mlx4_mr_free_reserved(dev, mr);
465 mlx4_mr_free_icm(dev, key_to_hw_index(mr->
key));
466 mlx4_mr_release(dev, key_to_hw_index(mr->
key));
476 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->
key));
481 if (IS_ERR(mailbox)) {
482 err = PTR_ERR(mailbox);
485 mpt_entry = mailbox->
buf;
487 memset(mpt_entry, 0,
sizeof *mpt_entry);
499 if (mr->
mtt.order < 0) {
507 if (mr->
mtt.order >= 0 && mr->
mtt.page_shift == 0) {
517 err = mlx4_SW2HW_MPT(dev, mailbox,
518 key_to_hw_index(mr->
key) & (dev->
caps.num_mpts - 1));
520 mlx4_warn(dev,
"SW2HW_MPT failed (%d)\n", err);
533 mlx4_mr_free_icm(dev, key_to_hw_index(mr->
key));
539 int start_index,
int npages,
u64 *page_list)
547 start_index, &dma_handle);
555 for (i = 0; i < npages; ++
i)
565 int start_index,
int npages,
u64 *page_list)
570 int max_mtts_first_page;
574 max_mtts_first_page = mtts_per_page - (mtt->
offset + start_index)
577 chunk =
min_t(
int, max_mtts_first_page, npages);
580 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
584 start_index +=
chunk;
587 chunk =
min_t(
int, mtts_per_page, npages);
593 int start_index,
int npages,
u64 *page_list)
604 if (mlx4_is_mfunc(dev)) {
607 return PTR_ERR(mailbox);
608 inbox = mailbox->
buf;
615 for (i = 0; i <
chunk; ++
i)
618 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
625 start_index +=
chunk;
647 for (i = 0; i < buf->
npages; ++
i)
671 if (mlx4_is_slave(dev))
675 ~0, dev->
caps.reserved_mrws, 0);
679 err = mlx4_buddy_init(&mr_table->
mtt_buddy,
685 if (dev->
caps.reserved_mtts) {
687 mlx4_alloc_mtt_range(dev,
688 fls(dev->
caps.reserved_mtts - 1));
690 mlx4_warn(dev,
"MTT table of order %u is too small.\n",
693 goto err_reserve_mtts;
700 mlx4_buddy_cleanup(&mr_table->
mtt_buddy);
713 if (mlx4_is_slave(dev))
717 fls(dev->
caps.reserved_mtts - 1));
718 mlx4_buddy_cleanup(&mr_table->
mtt_buddy);
722 static inline int mlx4_check_fmr(
struct mlx4_fmr *
fmr,
u64 *page_list,
733 if (iova & page_mask)
738 for (i = 0; i < npages; ++
i) {
739 if (page_list[i] & ~page_mask)
755 err = mlx4_check_fmr(fmr, page_list, npages, iova);
761 key = key_to_hw_index(fmr->
mr.key);
762 key += dev->
caps.num_mpts;
763 *lkey = *rkey = fmr->
mr.key = hw_index_to_key(key);
773 for (i = 0; i < npages; ++
i)
797 int max_maps,
u8 page_shift,
struct mlx4_fmr *fmr)
802 if (max_maps > dev->
caps.max_fmr_maps)
805 if (page_shift < (
ffs(dev->
caps.page_size_cap) - 1) || page_shift >= 32)
818 page_shift, &fmr->
mr);
849 key_to_hw_index(fmr->
mr.key),
NULL);
869 if (IS_ERR(mailbox)) {
870 err = PTR_ERR(mailbox);
872 " failed (%d)\n", err);
876 err = mlx4_HW2SW_MPT(dev,
NULL,
877 key_to_hw_index(fmr->
mr.key) &
878 (dev->
caps.num_mpts - 1));