Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
device.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 #include <linux/cpu_rmap.h>
40 
41 #include <linux/atomic.h>
42 
43 #define MAX_MSIX_P_PORT 17
44 #define MAX_MSIX 64
45 #define MSIX_LEGACY_SZ 4
46 #define MIN_MSIX_P_PORT 5
47 
48 enum {
49  MLX4_FLAG_MSI_X = 1 << 0,
51  MLX4_FLAG_MASTER = 1 << 2,
52  MLX4_FLAG_SLAVE = 1 << 3,
53  MLX4_FLAG_SRIOV = 1 << 4,
54 };
55 
56 enum {
59 };
60 
61 enum {
64 };
65 
66 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
67  * These qkeys must not be allowed for general use. This is a 64k range,
68  * and to test for violation, we use the mask (protect against future chg).
69  */
70 #define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
71 #define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
72 
73 enum {
75 };
76 
77 enum {
85 };
86 
87 /* Driver supports 3 diffrent device methods to manage traffic steering:
88  * -device managed - High level API for ib and eth flow steering. FW is
89  * managing flow steering tables.
90  * - B0 steering mode - Common low level API for ib and (if supported) eth.
91  * - A0 steering mode - Limited low level API for eth. In case of IB,
92  * B0 mode is in use.
93  */
94 enum {
98 };
99 
100 static inline const char *mlx4_steering_mode_str(int steering_mode)
101 {
102  switch (steering_mode) {
104  return "A0 steering";
105 
107  return "B0 steering";
108 
110  return "Device managed flow steering";
111 
112  default:
113  return "Unrecognize steering mode";
114  }
115 }
116 
117 enum {
145 };
146 
147 enum {
152 };
153 
154 #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
155 
156 enum {
162 };
163 
189 };
190 
191 enum {
194 };
195 
196 enum {
198 };
199 
204 };
205 
210 };
211 
217 };
218 
219 enum {
225 };
226 
227 enum {
244 
249 
252 };
253 
254 enum {
256 };
257 
263 };
264 
265 enum {
267 };
268 
275 };
276 
282 };
283 
288 };
289 
294 };
295 
296 enum {
297  MLX4_NUM_FEXCH = 64 * 1024,
298 };
299 
300 enum {
302 };
303 
304 enum {
308 };
309 
310 /* Port mgmt change event handling */
311 enum {
317 };
318 
319 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
320  MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
321 
322 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
323 {
324  return (major << 32) | (minor << 16) | subminor;
325 }
326 
334 };
335 
336 struct mlx4_caps {
338  u32 function;
352  int num_uars;
358  int num_qps;
359  int max_wqes;
368  int num_srqs;
372  int num_cqs;
373  int max_cqes;
375  int num_eqs;
379  int num_mpts;
381  int num_mtts;
386  int num_mgms;
392  int num_pds;
422 };
423 
425  void *buf;
427 };
428 
429 struct mlx4_buf {
432  int nbufs;
433  int npages;
435 };
436 
437 struct mlx4_mtt {
439  int order;
441 };
442 
443 enum {
445 };
446 
448  struct list_head list;
450  DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
451  unsigned long *bits[2];
454 };
455 
456 struct mlx4_ib_user_db_page;
457 
458 struct mlx4_db {
460  union {
463  } u;
465  int index;
466  int order;
467 };
468 
470  struct mlx4_db db;
471  struct mlx4_mtt mtt;
472  struct mlx4_buf buf;
473 };
474 
475 struct mlx4_mr {
476  struct mlx4_mtt mtt;
482  int enabled;
483 };
484 
485 struct mlx4_fmr {
486  struct mlx4_mr mr;
491  int max_maps;
492  int maps;
494 };
495 
496 struct mlx4_uar {
497  unsigned long pfn;
498  int index;
500  unsigned free_bf_bmap;
501  void __iomem *map;
503 };
504 
505 struct mlx4_bf {
506  unsigned long offset;
507  int buf_size;
508  struct mlx4_uar *uar;
509  void __iomem *reg;
510 };
511 
512 struct mlx4_cq {
513  void (*comp) (struct mlx4_cq *);
514  void (*event) (struct mlx4_cq *, enum mlx4_event);
515 
516  struct mlx4_uar *uar;
517 
519 
522  int arm_sn;
523 
524  int cqn;
525  unsigned vector;
526 
528  struct completion free;
529 };
530 
531 struct mlx4_qp {
532  void (*event) (struct mlx4_qp *, enum mlx4_event);
533 
534  int qpn;
535 
537  struct completion free;
538 };
539 
540 struct mlx4_srq {
541  void (*event) (struct mlx4_srq *, enum mlx4_event);
542 
543  int srqn;
544  int max;
545  int max_gs;
547 
549  struct completion free;
550 };
551 
552 struct mlx4_av {
562  u8 dgid[16];
563 };
564 
565 struct mlx4_eth_av {
575  u8 dgid[16];
578  u8 mac[6];
579 };
580 
581 union mlx4_ext_av {
582  struct mlx4_av ib;
583  struct mlx4_eth_av eth;
584 };
585 
586 struct mlx4_counter {
595 };
596 
597 struct mlx4_dev {
598  struct pci_dev *pdev;
599  unsigned long flags;
600  unsigned long num_slaves;
601  struct mlx4_caps caps;
606  int num_vfs;
609 };
610 
611 struct mlx4_eqe {
616  union {
617  u32 raw[6];
618  struct {
620  } __packed comp;
621  struct {
628  } __packed cmd;
629  struct {
631  } __packed qp;
632  struct {
634  } __packed srq;
635  struct {
636  __be32 cqn;
638  u8 reserved2[3];
640  } __packed cq_err;
641  struct {
642  u32 reserved1[2];
645  struct {
646  #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
650  struct {
655  struct {
658  struct {
661  } __packed warming;
662  struct {
663  u8 reserved[3];
664  u8 port;
665  union {
666  struct {
670  u8 reserved[3];
674  struct {
678  } params;
680  } event;
682  u8 reserved3[2];
684 } __packed;
685 
698 };
699 
700 #define mlx4_foreach_port(port, dev, type) \
701  for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
702  if ((type) == (dev)->caps.port_mask[(port)])
703 
704 #define mlx4_foreach_non_ib_transport_port(port, dev) \
705  for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
706  if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
707 
708 #define mlx4_foreach_ib_transport_port(port, dev) \
709  for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
710  if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
711  ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
712 
713 #define MLX4_INVALID_SLAVE_ID 0xFF
714 
716 
717 static inline int mlx4_master_func_num(struct mlx4_dev *dev)
718 {
719  return dev->caps.function;
720 }
721 
722 static inline int mlx4_is_master(struct mlx4_dev *dev)
723 {
724  return dev->flags & MLX4_FLAG_MASTER;
725 }
726 
727 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
728 {
729  return (qpn < dev->phys_caps.base_sqpn + 8 +
730  16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
731 }
732 
733 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
734 {
735  int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
736 
737  if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
738  return 1;
739 
740  return 0;
741 }
742 
743 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
744 {
745  return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
746 }
747 
748 static inline int mlx4_is_slave(struct mlx4_dev *dev)
749 {
750  return dev->flags & MLX4_FLAG_SLAVE;
751 }
752 
753 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
754  struct mlx4_buf *buf);
755 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
756 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
757 {
758  if (BITS_PER_LONG == 64 || buf->nbufs == 1)
759  return buf->direct.buf + offset;
760  else
761  return buf->page_list[offset >> PAGE_SHIFT].buf +
762  (offset & (PAGE_SIZE - 1));
763 }
764 
765 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
766 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
767 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
768 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
769 
770 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
771 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
772 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
773 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
774 
775 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
776  struct mlx4_mtt *mtt);
777 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
778 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
779 
780 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
781  int npages, int page_shift, struct mlx4_mr *mr);
782 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
783 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
784 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
785  int start_index, int npages, u64 *page_list);
786 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
787  struct mlx4_buf *buf);
788 
789 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
790 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
791 
792 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
793  int size, int max_direct);
794 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
795  int size);
796 
797 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
798  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
799  unsigned vector, int collapsed);
800 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
801 
802 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
803 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
804 
805 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
806 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
807 
808 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
809  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
810 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
811 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
812 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
813 
814 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
815 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
816 
817 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
818  int block_mcast_loopback, enum mlx4_protocol prot);
819 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
820  enum mlx4_protocol prot);
821 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
822  u8 port, int block_mcast_loopback,
823  enum mlx4_protocol protocol, u64 *reg_id);
824 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
825  enum mlx4_protocol protocol, u64 reg_id);
826 
827 enum {
830  MLX4_DOMAIN_RFS = 0x3000,
831  MLX4_DOMAIN_NIC = 0x5000,
832 };
833 
841  MLX4_NET_TRANS_RULE_NUM, /* should be last */
842 };
843 
844 extern const u16 __sw_id_hw[];
845 
846 static inline int map_hw_to_sw_id(u16 header_id)
847 {
848 
849  int i;
850  for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
851  if (header_id == __sw_id_hw[i])
852  return i;
853  }
854  return -EINVAL;
855 }
856 
860  /* For future use. Not implemented yet */
863 };
864 
874 };
875 
881 };
882 
888 };
889 
890 struct mlx4_spec_ib {
893  u8 dst_gid[16];
895 };
896 
898  struct list_head list;
900  union {
902  struct mlx4_spec_ib ib;
905  };
906 };
907 
911 };
912 
914  struct list_head list;
916  bool exclusive;
922 };
923 
924 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
928 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
929 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
930 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
931 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
932 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
933 
934 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
935 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
936 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
937 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
938 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
939 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
940 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
941  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
942 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
943  u8 promisc);
944 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
945 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
946  u8 *pg, u16 *ratelimit);
947 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
948 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
949 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
950 
951 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
952  int npages, u64 iova, u32 *lkey, u32 *rkey);
953 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
954  int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
955 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
956 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
957  u32 *lkey, u32 *rkey);
958 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
959 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
960 int mlx4_test_interrupts(struct mlx4_dev *dev);
961 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
962  int *vector);
963 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
964 
965 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
966 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
967 
968 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
969 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
970 
971 int mlx4_flow_attach(struct mlx4_dev *dev,
972  struct mlx4_net_trans_rule *rule, u64 *reg_id);
973 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
974 
975 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
976  int i, int val);
977 
978 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
979 
980 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
981 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
982 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
983 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
984 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
985 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
986 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
987 
988 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
989 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
990 
991 #endif /* MLX4_DEVICE_H */