51 #ifndef __CVMX_POW_H__
52 #define __CVMX_POW_H__
60 #ifndef CVMX_ENABLE_POW_CHECKS
61 #define CVMX_ENABLE_POW_CHECKS 1
1022 load_addr.
sstatus.coreid = cvmx_get_core_num();
1023 load_addr.
sstatus.get_cur = 1;
1024 load_resp.
u64 = cvmx_read_csr(load_addr.
u64);
1039 static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(
void)
1048 load_addr.
sstatus.coreid = cvmx_get_core_num();
1049 load_addr.
sstatus.get_cur = 1;
1050 load_addr.
sstatus.get_wqp = 1;
1051 load_resp.
u64 = cvmx_read_csr(load_addr.
u64);
1052 return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.
s_sstatus4.wqp);
1055 #ifndef CVMX_MF_CHORD
1056 #define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
1064 static inline void __cvmx_pow_warn_if_pending_switch(
const char *
function)
1068 if (!switch_complete)
1069 pr_warning(
"%s called with tag switch in progress\n",
function);
1077 static inline void cvmx_pow_tag_sw_wait(
void)
1079 const uint64_t MAX_CYCLES = 1ull << 31;
1081 uint64_t start_cycle = cvmx_get_cycle();
1086 if (
unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
1087 pr_warning(
"Tag switch is taking a long time, "
1088 "possible deadlock\n");
1089 start_cycle = -MAX_CYCLES - 1;
1105 static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(
cvmx_pow_wait_t
1112 __cvmx_pow_warn_if_pending_switch(__func__);
1116 ptr.
swork.is_io = 1;
1120 result.
u64 = cvmx_read_csr(ptr.
u64);
1122 if (result.
s_work.no_work)
1125 return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.
s_work.addr);
1139 static inline cvmx_wqe_t *cvmx_pow_work_request_sync(
cvmx_pow_wait_t wait)
1142 __cvmx_pow_warn_if_pending_switch(__func__);
1145 cvmx_pow_tag_sw_wait();
1146 return cvmx_pow_work_request_sync_nocheck(wait);
1163 __cvmx_pow_warn_if_pending_switch(__func__);
1166 cvmx_pow_tag_sw_wait();
1173 result.
u64 = cvmx_read_csr(ptr.
u64);
1192 static inline void cvmx_pow_work_request_async_nocheck(
int scr_addr,
1198 __cvmx_pow_warn_if_pending_switch(__func__);
1201 data.
s.scraddr = scr_addr >> 3;
1205 cvmx_send_single(data.
u64);
1221 static inline void cvmx_pow_work_request_async(
int scr_addr,
1225 __cvmx_pow_warn_if_pending_switch(__func__);
1228 cvmx_pow_tag_sw_wait();
1229 cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1242 static inline cvmx_wqe_t *cvmx_pow_work_response_async(
int scr_addr)
1247 result.
u64 = cvmx_scratch_read64(scr_addr);
1249 if (result.
s_work.no_work)
1252 return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.
s_work.addr);
1265 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1267 return wqe_ptr ==
NULL;
1288 static inline void cvmx_pow_tag_sw_nocheck(
uint32_t tag,
1296 __cvmx_pow_warn_if_pending_switch(__func__);
1297 current_tag = cvmx_pow_get_current_tag();
1302 pr_warning(
"%s called with NULL tag\n", __func__);
1303 if ((current_tag.
s.type == tag_type)
1304 && (current_tag.
s.tag == tag))
1305 pr_warning(
"%s called to perform a tag switch to the "
1309 pr_warning(
"%s called to perform a tag switch to "
1310 "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1324 tag_req.
s.tag =
tag;
1334 cvmx_write_io(ptr.
u64, tag_req.
u64);
1355 static inline void cvmx_pow_tag_sw(
uint32_t tag,
1359 __cvmx_pow_warn_if_pending_switch(__func__);
1374 cvmx_pow_tag_sw_wait();
1375 cvmx_pow_tag_sw_nocheck(tag, tag_type);
1398 static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp,
uint32_t tag,
1407 __cvmx_pow_warn_if_pending_switch(__func__);
1408 current_tag = cvmx_pow_get_current_tag();
1412 if ((current_tag.
s.type == tag_type)
1413 && (current_tag.
s.tag == tag))
1414 pr_warning(
"%s called to perform a tag switch to "
1418 pr_warning(
"%s called to perform a tag switch to "
1419 "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1421 if (wqp != cvmx_phys_to_ptr(0x80))
1422 if (wqp != cvmx_pow_get_current_wqp())
1423 pr_warning(
"%s passed WQE(%p) doesn't match "
1424 "the address in the POW(%p)\n",
1426 cvmx_pow_get_current_wqp());
1439 tag_req.
s.tag =
tag;
1453 cvmx_write_io(ptr.
u64, tag_req.
u64);
1476 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp,
uint32_t tag,
1481 __cvmx_pow_warn_if_pending_switch(__func__);
1488 cvmx_pow_tag_sw_wait();
1489 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1500 static inline void cvmx_pow_tag_sw_null_nocheck(
void)
1507 __cvmx_pow_warn_if_pending_switch(__func__);
1508 current_tag = cvmx_pow_get_current_tag();
1513 pr_warning(
"%s called when we already have a "
1527 cvmx_write_io(ptr.
u64, tag_req.
u64);
1540 static inline void cvmx_pow_tag_sw_null(
void)
1543 __cvmx_pow_warn_if_pending_switch(__func__);
1550 cvmx_pow_tag_sw_wait();
1551 cvmx_pow_tag_sw_null_nocheck();
1569 static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp,
uint32_t tag,
1584 tag_req.
s.tag =
tag;
1585 tag_req.
s.qos = qos;
1586 tag_req.
s.grp = grp;
1592 ptr.
sio.offset = cvmx_ptr_to_phys(wqp);
1599 cvmx_write_io(ptr.
u64, tag_req.
u64);
1618 grp_msk.s.grp_msk =
mask;
1635 static inline void cvmx_pow_set_priority(
uint64_t core_num,
1643 grp_msk.s.qos0_pri = priority[0];
1644 grp_msk.s.qos1_pri = priority[1];
1645 grp_msk.s.qos2_pri = priority[2];
1646 grp_msk.s.qos3_pri = priority[3];
1647 grp_msk.s.qos4_pri = priority[4];
1648 grp_msk.s.qos5_pri = priority[5];
1649 grp_msk.s.qos6_pri = priority[6];
1650 grp_msk.s.qos7_pri = priority[7];
1657 for (i = 0; i < 8; i++)
1658 if (priority[i] != 0xF)
1659 prio_mask |= 1 << priority[
i];
1661 if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
1662 pr_err(
"POW static priorities should be "
1663 "contiguous (0x%llx)\n",
1664 (
unsigned long long)prio_mask);
1714 static inline void cvmx_pow_tag_sw_desched_nocheck(
1725 __cvmx_pow_warn_if_pending_switch(__func__);
1726 current_tag = cvmx_pow_get_current_tag();
1731 pr_warning(
"%s called with NULL tag. Deschedule not "
1732 "allowed from NULL state\n",
1736 pr_warning(
"%s called where neither the before or "
1737 "after tag is ATOMIC\n",
1743 tag_req.
s.tag =
tag;
1746 tag_req.
s.no_sched = no_sched;
1756 cvmx_write_io(ptr.
u64, tag_req.
u64);
1800 static inline void cvmx_pow_tag_sw_desched(
uint32_t tag,
1805 __cvmx_pow_warn_if_pending_switch(__func__);
1814 cvmx_pow_tag_sw_wait();
1815 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
1825 static inline void cvmx_pow_desched(
uint64_t no_sched)
1832 __cvmx_pow_warn_if_pending_switch(__func__);
1833 current_tag = cvmx_pow_get_current_tag();
1838 pr_warning(
"%s called with NULL tag. Deschedule not "
1839 "expected from NULL state\n",
1848 tag_req.
s.no_sched = no_sched;
1858 cvmx_write_io(ptr.
u64, tag_req.
u64);
1872 #define CVMX_TAG_SW_BITS (8)
1873 #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
1880 #define CVMX_TAG_SW_BITS_INTERNAL 0x1
1893 #define CVMX_TAG_SUBGROUP_MASK 0xFFFF
1894 #define CVMX_TAG_SUBGROUP_SHIFT 16
1895 #define CVMX_TAG_SUBGROUP_PKO 0x1