52 static int reply_failure(
struct ib_smp *smp)
64 static void qib_send_trap(
struct qib_ibport *ibp,
void *
data,
unsigned len)
111 ibp->
sm_ah = to_iah(ah);
117 send_buf->
ah = &ibp->
sm_ah->ibah;
120 spin_unlock_irqrestore(&ibp->
lock, flags);
162 qib_send_trap(ibp, &data,
sizeof data);
174 data.prod_type_msb = 0;
178 data.toggle_count = 0;
179 memset(&data.details, 0,
sizeof data.details);
180 data.details.ntc_256.lid = data.issuer_lid;
181 data.details.ntc_256.method = smp->
method;
182 data.details.ntc_256.attr_id = smp->
attr_id;
183 data.details.ntc_256.attr_mod = smp->
attr_mod;
184 data.details.ntc_256.mkey = smp->
mkey;
188 data.details.ntc_256.dr_slid = smp->
dr_slid;
191 if (hop_cnt >
ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
192 data.details.ntc_256.dr_trunc_hop |=
194 hop_cnt =
ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
196 data.details.ntc_256.dr_trunc_hop |=
hop_cnt;
201 qib_send_trap(ibp, &data,
sizeof data);
221 qib_send_trap(ibp, &data,
sizeof data);
241 qib_send_trap(ibp, &data,
sizeof data);
259 data.
details.ntc_144.local_changes = 1;
262 qib_send_trap(ibp, &data,
sizeof data);
265 static int subn_get_nodedescription(
struct ib_smp *smp,
276 static int subn_get_nodeinfo(
struct ib_smp *smp,
struct ib_device *ibdev,
282 unsigned pidx = port - 1;
285 if (smp->
attr_mod || pidx >=
dd->num_pports ||
286 dd->pport[pidx].guid == 0)
304 vendor =
dd->vendorid;
312 static int subn_get_guidinfo(
struct ib_smp *smp,
struct ib_device *ibdev,
318 unsigned pidx = port - 1;
337 p[i] = ibp->
guids[i - 1];
367 static int set_overrunthreshold(
struct qib_pportdata *ppd,
unsigned n)
386 static int set_phyerrthreshold(
struct qib_pportdata *ppd,
unsigned n)
399 static int get_linkdowndefaultstate(
struct qib_pportdata *ppd)
405 static int check_mkey(
struct qib_ibport *ibp,
struct ib_smp *smp,
int mad_flags)
443 qib_bad_mkey(ibp, smp);
451 static int subn_get_portinfo(
struct ib_smp *smp,
struct ib_device *ibdev,
473 ret = check_mkey(ibp, smp, 0);
481 dd = dd_from_ibdev(ibdev);
509 (get_linkdowndefaultstate(ppd) ? 1 : 2);
513 switch (ppd->
ibmtu) {
554 (get_phyerrthreshold(ppd) << 4) |
555 get_overrunthreshold(ppd);
593 static int subn_get_pkeytable(
struct ib_smp *smp,
struct ib_device *ibdev,
607 get_pkeys(dd, port, p);
609 for (i = 0; i <
n; i++)
617 static int subn_set_guidinfo(
struct ib_smp *smp,
struct ib_device *ibdev,
623 unsigned pidx = port - 1;
627 if (startgx == 0 && pidx < dd->num_pports) {
634 ibp->
guids[i - 1] = p[i];
639 return subn_get_guidinfo(smp, ibdev, port);
650 static int subn_set_portinfo(
struct ib_smp *smp,
struct ib_device *ibdev,
676 if (port_num != port)
680 dd = dd_from_ibdev(ibdev);
682 ppd = dd->
pport + (port_num - 1);
684 event.device = ibdev;
685 event.element.port_num =
port;
710 else if (smlid != ibp->
sm_lid || msl != ibp->
sm_sl) {
714 ibp->
sm_ah->attr.dlid = smlid;
715 if (msl != ibp->
sm_sl)
716 ibp->
sm_ah->attr.sl = msl;
718 spin_unlock_irqrestore(&ibp->
lock, flags);
721 if (msl != ibp->
sm_sl)
735 set_link_width_enabled(ppd, lwe);
746 set_link_speed_enabled(ppd,
751 set_link_speed_enabled(ppd, lse);
800 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
803 if (set_overrunthreshold(ppd, (ore & 0xF)))
831 else if (lstate == 1)
833 else if (lstate == 2)
835 else if (lstate == 3)
870 ret = subn_get_portinfo(smp, ibdev, port);
880 ret = subn_get_portinfo(smp, ibdev, port);
899 if (ppd->
pkeys[i] != key)
930 if (lkey == 0x7FFF) {
937 if (!ppd->
pkeys[i]) {
942 if (ppd->
pkeys[i] == key) {
956 if ((ppd->
pkeys[i] & 0x7FFF) == lkey) {
966 if (!ppd->
pkeys[i] &&
999 ppd = dd->
pport + (port - 1);
1013 changed |= rm_pkey(ppd, okey);
1015 int ret = add_pkey(ppd, key);
1031 event.element.port_num = 1;
1037 static int subn_set_pkeytable(
struct ib_smp *smp,
struct ib_device *ibdev,
1046 for (i = 0; i <
n; i++)
1049 if (startpx != 0 || set_pkeys(dd, port,
q) != 0)
1052 return subn_get_pkeytable(smp, ibdev, port);
1055 static int subn_get_sl_to_vl(
struct ib_smp *smp,
struct ib_device *ibdev,
1058 struct qib_ibport *ibp = to_iport(ibdev, port);
1073 static int subn_set_sl_to_vl(
struct ib_smp *smp,
struct ib_device *ibdev,
1076 struct qib_ibport *ibp = to_iport(ibdev, port);
1092 return subn_get_sl_to_vl(smp, ibdev, port);
1095 static int subn_get_vl_arb(
struct ib_smp *smp,
struct ib_device *ibdev,
1099 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1117 static int subn_set_vl_arb(
struct ib_smp *smp,
struct ib_device *ibdev,
1121 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1134 return subn_get_vl_arb(smp, ibdev, port);
1137 static int subn_trap_repress(
struct ib_smp *smp,
struct ib_device *ibdev,
1149 static int pma_get_classportinfo(
struct ib_pma_mad *pmp,
1158 if (pmp->
mad_hdr.attr_mod != 0)
1175 return reply((
struct ib_smp *) pmp);
1178 static int pma_get_portsamplescontrol(
struct ib_pma_mad *pmp,
1185 struct qib_ibport *ibp = to_iport(ibdev, port);
1187 unsigned long flags;
1193 if (pmp->
mad_hdr.attr_mod != 0 || port_select != port) {
1210 spin_unlock_irqrestore(&ibp->
lock, flags);
1213 return reply((
struct ib_smp *) pmp);
1216 static int pma_set_portsamplescontrol(
struct ib_pma_mad *pmp,
1223 struct qib_ibport *ibp = to_iport(ibdev, port);
1225 unsigned long flags;
1231 ret = reply((
struct ib_smp *) pmp);
1255 spin_unlock_irqrestore(&ibp->
lock, flags);
1257 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1301 static void cache_hw_sample_counters(
struct qib_pportdata *ppd)
1324 ret = ppd->
cong_stats.counter_cache.psxmitdata;
1327 ret = ppd->
cong_stats.counter_cache.psrcvdata;
1330 ret = ppd->
cong_stats.counter_cache.psxmitpkts;
1333 ret = ppd->
cong_stats.counter_cache.psrcvpkts;
1336 ret = ppd->
cong_stats.counter_cache.psxmitwait;
1345 static int pma_get_portsamplesresult(
struct ib_pma_mad *pmp,
1352 struct qib_ibport *ibp = to_iport(ibdev, port);
1354 unsigned long flags;
1367 cache_hw_sample_counters(ppd);
1369 xmit_wait_get_value_delta(ppd);
1377 get_cache_hw_sample_counters(
1379 spin_unlock_irqrestore(&ibp->
lock, flags);
1381 return reply((
struct ib_smp *) pmp);
1384 static int pma_get_portsamplesresult_ext(
struct ib_pma_mad *pmp,
1391 struct qib_ibport *ibp = to_iport(ibdev, port);
1393 unsigned long flags;
1409 cache_hw_sample_counters(ppd);
1411 xmit_wait_get_value_delta(ppd);
1419 get_cache_hw_sample_counters(
1421 spin_unlock_irqrestore(&ibp->
lock, flags);
1423 return reply((
struct ib_smp *) pmp);
1426 static int pma_get_portcounters(
struct ib_pma_mad *pmp,
1431 struct qib_ibport *ibp = to_iport(ibdev, port);
1440 cntrs.link_error_recovery_counter -=
1450 cntrs.local_link_integrity_errors -=
1452 cntrs.excessive_buffer_overrun_errors -=
1460 if (pmp->
mad_hdr.attr_mod != 0 || port_select != port)
1463 if (cntrs.symbol_error_counter > 0xFFFFUL)
1468 if (cntrs.link_error_recovery_counter > 0xFFUL)
1472 (
u8)cntrs.link_error_recovery_counter;
1473 if (cntrs.link_downed_counter > 0xFFUL)
1477 if (cntrs.port_rcv_errors > 0xFFFFUL)
1482 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1487 if (cntrs.port_xmit_discards > 0xFFFFUL)
1492 if (cntrs.local_link_integrity_errors > 0xFUL)
1493 cntrs.local_link_integrity_errors = 0xF
UL;
1494 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1495 cntrs.excessive_buffer_overrun_errors = 0xF
UL;
1497 cntrs.excessive_buffer_overrun_errors;
1498 if (cntrs.vl15_dropped > 0xFFFFUL)
1502 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1506 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1510 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1515 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1521 return reply((
struct ib_smp *) pmp);
1524 static int pma_get_portcounters_cong(
struct ib_pma_mad *pmp,
1531 struct qib_ibport *ibp = to_iport(ibdev, port);
1535 u64 xmit_wait_counter;
1536 unsigned long flags;
1544 if (port_select != port)
1549 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1550 spin_unlock_irqrestore(&ppd->
ibport_data.lock, flags);
1554 cntrs.link_error_recovery_counter -=
1558 cntrs.port_rcv_remphys_errors -=
1561 cntrs.local_link_integrity_errors -=
1563 cntrs.excessive_buffer_overrun_errors -=
1591 if (cntrs.symbol_error_counter > 0xFFFFUL)
1596 (
u16)cntrs.symbol_error_counter);
1597 if (cntrs.link_error_recovery_counter > 0xFFUL)
1601 (
u8)cntrs.link_error_recovery_counter;
1602 if (cntrs.link_downed_counter > 0xFFUL)
1606 (
u8)cntrs.link_downed_counter;
1607 if (cntrs.port_rcv_errors > 0xFFFFUL)
1612 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1617 (
u16)cntrs.port_rcv_remphys_errors);
1618 if (cntrs.port_xmit_discards > 0xFFFFUL)
1623 if (cntrs.local_link_integrity_errors > 0xFUL)
1624 cntrs.local_link_integrity_errors = 0xF
UL;
1625 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1626 cntrs.excessive_buffer_overrun_errors = 0xF
UL;
1628 cntrs.excessive_buffer_overrun_errors;
1629 if (cntrs.vl15_dropped > 0xFFFFUL)
1634 return reply((
struct ib_smp *)pmp);
1637 static int pma_get_portcounters_ext(
struct ib_pma_mad *pmp,
1642 struct qib_ibport *ibp = to_iport(ibdev, port);
1644 u64 swords, rwords, spkts, rpkts, xwait;
1650 if (pmp->
mad_hdr.attr_mod != 0 || port_select != port) {
1673 return reply((
struct ib_smp *) pmp);
1676 static int pma_set_portcounters(
struct ib_pma_mad *pmp,
1681 struct qib_ibport *ibp = to_iport(ibdev, port);
1696 cntrs.link_error_recovery_counter;
1706 cntrs.port_rcv_remphys_errors;
1713 cntrs.local_link_integrity_errors;
1717 cntrs.excessive_buffer_overrun_errors;
1736 return pma_get_portcounters(pmp, ibdev, port);
1739 static int pma_set_portcounters_cong(
struct ib_pma_mad *pmp,
1742 struct qib_ibport *ibp = to_iport(ibdev, port);
1748 unsigned long flags;
1752 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1759 spin_unlock_irqrestore(&ppd->
ibport_data.lock, flags);
1769 cntrs.symbol_error_counter;
1771 cntrs.link_error_recovery_counter;
1773 cntrs.link_downed_counter;
1776 cntrs.port_rcv_remphys_errors;
1778 cntrs.port_xmit_discards;
1780 cntrs.local_link_integrity_errors;
1782 cntrs.excessive_buffer_overrun_errors;
1790 static int pma_set_portcounters_ext(
struct ib_pma_mad *pmp,
1795 struct qib_ibport *ibp = to_iport(ibdev, port);
1797 u64 swords, rwords, spkts, rpkts, xwait;
1825 return pma_get_portcounters_ext(pmp, ibdev, port);
1828 static int process_subn(
struct ib_device *ibdev,
int mad_flags,
1833 struct qib_ibport *ibp = to_iport(ibdev, port);
1844 ret = check_mkey(ibp, smp, mad_flags);
1858 port_num && port_num <= ibdev->phys_port_cnt &&
1860 (
void) check_mkey(to_iport(ibdev, port_num),
smp, 0);
1869 ret = subn_get_nodedescription(smp, ibdev);
1872 ret = subn_get_nodeinfo(smp, ibdev, port);
1875 ret = subn_get_guidinfo(smp, ibdev, port);
1878 ret = subn_get_portinfo(smp, ibdev, port);
1881 ret = subn_get_pkeytable(smp, ibdev, port);
1884 ret = subn_get_sl_to_vl(smp, ibdev, port);
1887 ret = subn_get_vl_arb(smp, ibdev, port);
1909 ret = subn_set_guidinfo(smp, ibdev, port);
1912 ret = subn_set_portinfo(smp, ibdev, port);
1915 ret = subn_set_pkeytable(smp, ibdev, port);
1918 ret = subn_set_sl_to_vl(smp, ibdev, port);
1921 ret = subn_set_vl_arb(smp, ibdev, port);
1942 ret = subn_trap_repress(smp, ibdev, port);
1962 if (ib_get_smp_direction(smp) &&
1980 static int process_perf(
struct ib_device *ibdev,
u8 port,
1988 if (pmp->
mad_hdr.class_version != 1) {
1990 ret = reply((
struct ib_smp *) pmp);
1994 switch (pmp->
mad_hdr.method) {
1996 switch (pmp->
mad_hdr.attr_id) {
1998 ret = pma_get_classportinfo(pmp, ibdev);
2001 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2004 ret = pma_get_portsamplesresult(pmp, ibdev, port);
2007 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2010 ret = pma_get_portcounters(pmp, ibdev, port);
2013 ret = pma_get_portcounters_ext(pmp, ibdev, port);
2016 ret = pma_get_portcounters_cong(pmp, ibdev, port);
2020 ret = reply((
struct ib_smp *) pmp);
2025 switch (pmp->
mad_hdr.attr_id) {
2027 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2030 ret = pma_set_portcounters(pmp, ibdev, port);
2033 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2036 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2040 ret = reply((
struct ib_smp *) pmp);
2056 ret = reply((
struct ib_smp *) pmp);
2063 static int cc_get_classportinfo(
struct ib_cc_mad *ccp,
2080 return reply((
struct ib_smp *) ccp);
2083 static int cc_get_congestion_info(
struct ib_cc_mad *ccp,
2088 struct qib_ibport *ibp = to_iport(ibdev, port);
2096 return reply((
struct ib_smp *) ccp);
2099 static int cc_get_congestion_setting(
struct ib_cc_mad *ccp,
2105 struct qib_ibport *ibp = to_iport(ibdev, port);
2111 spin_lock(&ppd->cc_shadow_lock);
2125 spin_unlock(&ppd->cc_shadow_lock);
2127 return reply((
struct ib_smp *) ccp);
2130 static int cc_get_congestion_control_table(
struct ib_cc_mad *ccp,
2135 struct qib_ibport *ibp = to_iport(ibdev, port);
2149 spin_lock(&ppd->cc_shadow_lock);
2153 max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2155 if (cct_block_index > max_cct_block) {
2156 spin_unlock(&ppd->cc_shadow_lock);
2172 for (i = 0; i <= cct_entry; i++)
2175 spin_unlock(&ppd->cc_shadow_lock);
2177 return reply((
struct ib_smp *) ccp);
2180 return reply_failure((
struct ib_smp *) ccp);
2183 static int cc_set_congestion_setting(
struct ib_cc_mad *ccp,
2188 struct qib_ibport *ibp = to_iport(ibdev, port);
2208 return reply((
struct ib_smp *) ccp);
2211 static int cc_set_congestion_control_table(
struct ib_cc_mad *ccp,
2216 struct qib_ibport *ibp = to_iport(ibdev, port);
2245 for (i = 0; i <= cct_entry; i++)
2248 spin_lock(&ppd->cc_shadow_lock);
2259 spin_unlock(&ppd->cc_shadow_lock);
2261 return reply((
struct ib_smp *) ccp);
2264 return reply_failure((
struct ib_smp *) ccp);
2267 static int check_cc_key(
struct qib_ibport *ibp,
2273 static int process_cc(
struct ib_device *ibdev,
int mad_flags,
2278 struct qib_ibport *ibp = to_iport(ibdev, port);
2285 ret = reply((
struct ib_smp *)ccp);
2289 ret = check_cc_key(ibp, ccp, mad_flags);
2297 ret = cc_get_classportinfo(ccp, ibdev);
2301 ret = cc_get_congestion_info(ccp, ibdev, port);
2305 ret = cc_get_congestion_setting(ccp, ibdev, port);
2309 ret = cc_get_congestion_control_table(ccp, ibdev, port);
2315 ret = reply((
struct ib_smp *) ccp);
2322 ret = cc_set_congestion_setting(ccp, ibdev, port);
2326 ret = cc_set_congestion_control_table(ccp, ibdev, port);
2332 ret = reply((
struct ib_smp *) ccp);
2348 ret = reply((
struct ib_smp *) ccp);
2379 struct qib_ibport *ibp = to_iport(ibdev, port);
2382 switch (in_mad->
mad_hdr.mgmt_class) {
2385 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2389 ret = process_perf(ibdev, port, in_mad, out_mad);
2398 ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
2415 static void xmit_wait_timer_func(
unsigned long opaque)
2419 unsigned long flags;
2427 cache_hw_sample_counters(ppd);
2432 ppd->
cong_stats.counter = xmit_wait_get_value_delta(ppd);
2435 spin_unlock_irqrestore(&ppd->
ibport_data.lock, flags);
2448 ibp = &dd->
pport[
p].ibport_data;
2450 NULL, 0, send_handler,
2452 if (IS_ERR(agent)) {
2453 ret = PTR_ERR(agent);
2458 dd->
pport[
p].cong_stats.counter = 0;
2460 dd->
pport[
p].cong_stats.timer.function = xmit_wait_timer_func;
2461 dd->
pport[
p].cong_stats.timer.data =
2463 dd->
pport[
p].cong_stats.timer.expires = 0;
2473 ibp = &dd->
pport[
p].ibport_data;
2492 ibp = &dd->
pport[
p].ibport_data;
2502 if (dd->
pport[p].cong_stats.timer.data)