41 #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
42 #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
43 #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
44 #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
58 static int recv_subn_get_nodedescription(
struct ib_smp *
smp,
84 static int recv_subn_get_nodeinfo(
struct ib_smp *smp,
105 nip->
sys_guid = to_idev(ibdev)->sys_image_guid;
110 majrev =
dd->ipath_majrev;
111 minrev =
dd->ipath_minrev;
114 vendor =
dd->ipath_vendorid;
122 static int recv_subn_get_guidinfo(
struct ib_smp *smp,
137 __be64 g = to_idev(ibdev)->dd->ipath_guid;
174 static int set_overrunthreshold(
struct ipath_devdata *dd,
unsigned n)
206 static int set_phyerrthreshold(
struct ipath_devdata *dd,
unsigned n)
230 static int get_linkdowndefaultstate(
struct ipath_devdata *dd)
235 static int recv_subn_get_portinfo(
struct ib_smp *smp,
252 dev = to_idev(ibdev);
280 (get_linkdowndefaultstate(dd) ? 1 : 2);
327 (get_phyerrthreshold(dd) << 4) |
328 get_overrunthreshold(dd);
360 static int recv_subn_get_pkeytable(
struct ib_smp *smp,
374 get_pkeys(dev->
dd, p);
376 for (i = 0; i <
n; i++)
384 static int recv_subn_set_guidinfo(
struct ib_smp *smp,
388 return recv_subn_get_guidinfo(smp, ibdev);
398 static int set_linkdowndefaultstate(
struct ipath_devdata *dd,
int sleep)
417 static int recv_subn_set_portinfo(
struct ib_smp *smp,
424 char clientrereg = 0;
436 dev = to_idev(ibdev);
438 event.device = ibdev;
439 event.element.port_num =
port;
457 if (smlid != dev->
sm_lid) {
473 set_link_width_enabled(dd, lwe);
483 set_link_speed_enabled(dd, lse);
491 if (set_linkdowndefaultstate(dd, 1))
495 if (set_linkdowndefaultstate(dd, 0))
549 if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
552 if (set_overrunthreshold(dd, (ore & 0xF)))
586 else if (lstate == 1)
588 else if (lstate == 2)
590 else if (lstate == 3)
613 ret = recv_subn_get_portinfo(smp, ibdev, port);
622 ret = recv_subn_get_portinfo(smp, ibdev, port);
673 if (lkey == 0x7FFF) {
749 changed |= rm_pkey(dd, okey);
751 int ret = add_pkey(dd, key);
768 (
unsigned long long) pkey);
769 ipath_write_kreg(dd, dd->
ipath_kregs->kr_partitionkey,
775 static int recv_subn_set_pkeytable(
struct ib_smp *smp,
784 for (i = 0; i <
n; i++)
787 if (startpx != 0 || set_pkeys(dev->
dd,
q) != 0)
790 return recv_subn_get_pkeytable(smp, ibdev);
793 static int recv_pma_get_classportinfo(
struct ib_pma_mad *pmp)
800 if (pmp->
mad_hdr.attr_mod != 0)
813 return reply((
struct ib_smp *) pmp);
821 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
822 #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
823 COUNTER_MASK(1, 1) | \
824 COUNTER_MASK(1, 2) | \
825 COUNTER_MASK(1, 3) | \
828 static int recv_pma_get_portsamplescontrol(
struct ib_pma_mad *pmp,
841 if (pmp->
mad_hdr.attr_mod != 0 ||
842 (port_select != port && port_select != 0xFF))
854 p->
tick = dev->
dd->ipath_link_speed_active - 1;
874 return reply((
struct ib_smp *) pmp);
877 static int recv_pma_set_portsamplescontrol(
struct ib_pma_mad *pmp,
888 if (pmp->
mad_hdr.attr_mod != 0 ||
891 ret = reply((
struct ib_smp *) pmp);
897 status = ipath_read_creg32(dev->
dd, crp->
cr_psstat);
919 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
964 static int recv_pma_get_portsamplesresult(
struct ib_pma_mad *pmp,
977 status = ipath_read_creg32(dev->
dd, crp->
cr_psstat);
986 return reply((
struct ib_smp *) pmp);
989 static int recv_pma_get_portsamplesresult_ext(
struct ib_pma_mad *pmp,
1002 status = ipath_read_creg32(dev->
dd, crp->
cr_psstat);
1013 return reply((
struct ib_smp *) pmp);
1016 static int recv_pma_get_portcounters(
struct ib_pma_mad *pmp,
1029 cntrs.link_error_recovery_counter -=
1040 cntrs.local_link_integrity_errors -=
1042 cntrs.excessive_buffer_overrun_errors -=
1050 if (pmp->
mad_hdr.attr_mod != 0 ||
1051 (port_select != port && port_select != 0xFF))
1054 if (cntrs.symbol_error_counter > 0xFFFFUL)
1059 if (cntrs.link_error_recovery_counter > 0xFFUL)
1063 (
u8)cntrs.link_error_recovery_counter;
1064 if (cntrs.link_downed_counter > 0xFFUL)
1068 if (cntrs.port_rcv_errors > 0xFFFFUL)
1073 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1078 if (cntrs.port_xmit_discards > 0xFFFFUL)
1083 if (cntrs.local_link_integrity_errors > 0xFUL)
1084 cntrs.local_link_integrity_errors = 0xF
UL;
1085 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1086 cntrs.excessive_buffer_overrun_errors = 0xF
UL;
1088 cntrs.excessive_buffer_overrun_errors;
1089 if (cntrs.vl15_dropped > 0xFFFFUL)
1093 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1097 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1101 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1106 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1112 return reply((
struct ib_smp *) pmp);
1115 static int recv_pma_get_portcounters_ext(
struct ib_pma_mad *pmp,
1121 u64 swords, rwords, spkts, rpkts, xwait;
1136 if (pmp->
mad_hdr.attr_mod != 0 ||
1137 (port_select != port && port_select != 0xFF))
1149 return reply((
struct ib_smp *) pmp);
1152 static int recv_pma_set_portcounters(
struct ib_pma_mad *pmp,
1171 cntrs.link_error_recovery_counter;
1182 cntrs.port_rcv_remphys_errors;
1189 cntrs.local_link_integrity_errors;
1193 cntrs.excessive_buffer_overrun_errors;
1212 return recv_pma_get_portcounters(pmp, ibdev, port);
1215 static int recv_pma_set_portcounters_ext(
struct ib_pma_mad *pmp,
1221 u64 swords, rwords, spkts, rpkts, xwait;
1250 return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1253 static int process_subn(
struct ib_device *ibdev,
int mad_flags,
1305 ret = recv_subn_get_nodedescription(smp, ibdev);
1308 ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1311 ret = recv_subn_get_guidinfo(smp, ibdev);
1314 ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1317 ret = recv_subn_get_pkeytable(smp, ibdev);
1339 ret = recv_subn_set_guidinfo(smp, ibdev);
1342 ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1345 ret = recv_subn_set_pkeytable(smp, ibdev);
1385 static int process_perf(
struct ib_device *ibdev,
u8 port_num,
1393 if (pmp->
mad_hdr.class_version != 1) {
1395 ret = reply((
struct ib_smp *) pmp);
1399 switch (pmp->
mad_hdr.method) {
1401 switch (pmp->
mad_hdr.attr_id) {
1403 ret = recv_pma_get_classportinfo(pmp);
1406 ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1410 ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1413 ret = recv_pma_get_portsamplesresult_ext(pmp,
1417 ret = recv_pma_get_portcounters(pmp, ibdev,
1421 ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1426 ret = reply((
struct ib_smp *) pmp);
1431 switch (pmp->
mad_hdr.attr_id) {
1433 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1437 ret = recv_pma_set_portcounters(pmp, ibdev,
1441 ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1446 ret = reply((
struct ib_smp *) pmp);
1460 ret = reply((
struct ib_smp *) pmp);
1492 switch (in_mad->
mad_hdr.mgmt_class) {
1495 ret = process_subn(ibdev, mad_flags, port_num,
1499 ret = process_perf(ibdev, port_num, in_mad, out_mad);