31 #include <linux/kernel.h>
32 #include <linux/string.h>
42 #define CVMX_MIPS32_SPACE_KSEG0 1l
43 #define CVMX_ADD_SEG32(segment, add) \
44 (((int32_t)segment << 31) | (int32_t)(add))
46 #define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
49 #define CVMX_ADD_SEG(segment, add) \
50 ((((uint64_t)segment) << 62) | (add))
51 #ifndef CVMX_ADD_IO_SEG
52 #define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
74 #ifndef CVMX_ENABLE_DEBUG_PRINTS
75 #define CVMX_ENABLE_DEBUG_PRINTS 1
78 #if CVMX_ENABLE_DEBUG_PRINTS
79 #define cvmx_dprintf printk
81 #define cvmx_dprintf(...) {}
84 #define CVMX_MAX_CORES (16)
85 #define CVMX_CACHE_LINE_SIZE (128)
86 #define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1)
87 #define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
88 #define CAST64(v) ((long long)(long)(v))
89 #define CASTPTR(type, v) ((type *)(long)(v))
99 asm(
"mfc0 %0, $15,0" :
"=r"(
id));
104 #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
105 #define CVMX_TMP_STR2(x) #x
114 return ~((~0x0ull) << bits);
127 return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
150 return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
163 if (
sizeof(
void *) == 8) {
172 if ((
CAST64(ptr) >> 62) == 3)
173 return CAST64(ptr) & cvmx_build_mask(30);
175 return CAST64(ptr) & cvmx_build_mask(40);
177 return (
long)(
ptr) & 0x1fffffff;
189 static inline void *cvmx_phys_to_ptr(
uint64_t physical_address)
191 if (
sizeof(
void *) == 8) {
210 #define CVMX_BUILD_WRITE64(TYPE, ST) \
211 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
213 *CASTPTR(volatile TYPE##_t, addr) = val; \
224 #define CVMX_BUILD_READ64(TYPE, LT) \
225 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
227 return *CASTPTR(volatile TYPE##_t, addr); \
245 #define cvmx_write64 cvmx_write64_uint64
261 #define cvmx_read64 cvmx_read64_uint64
274 if (((csr_addr >> 40) & 0x7ffff) == (0x118))
293 const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
308 addr.s.scraddr = scraddr >> 3;
310 cvmx_send_single(
addr.u64);
314 static inline int cvmx_octeon_is_pass1(
void)
316 #if OCTEON_IS_COMMON_BINARY()
320 #if OCTEON_IS_MODEL(OCTEON_CN38XX)
328 static inline unsigned int cvmx_get_core_num(
void)
330 unsigned int core_num;
371 static inline uint64_t cvmx_get_cycle(
void)
382 static inline void cvmx_wait(
uint64_t cycles)
386 while (cvmx_get_cycle() < done)
397 static inline uint64_t cvmx_get_cycle_global(
void)
399 if (cvmx_octeon_is_pass1())
414 #define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
419 uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
420 cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
423 c.u64 = cvmx_read_csr(address); \
424 if ((c.s.field) op(value)) { \
427 } else if (cvmx_get_cycle() > done) { \
439 static inline void cvmx_reset_octeon(
void)
442 ciu_soft_rst.
u64 = 0;
443 ciu_soft_rst.
s.soft_rst = 1;
448 static inline uint32_t cvmx_octeon_num_cores(
void)
451 return cvmx_pop(ciu_fuse);
460 static uint8_t cvmx_fuse_read_byte(
int byte_addr)
465 read_cmd.s.addr = byte_addr;
471 return read_cmd.s.dat;
481 static inline int cvmx_fuse_read(
int fuse)
483 return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
486 static inline int cvmx_octeon_model_CN36XX(
void)
489 && !cvmx_octeon_is_pass1()
490 && cvmx_fuse_read(264);
493 static inline int cvmx_octeon_zip_present(
void)
498 static inline int cvmx_octeon_dfa_present(
void)
506 else if (cvmx_octeon_is_pass1())
509 return !cvmx_fuse_read(120);
512 static inline int cvmx_octeon_crypto_present(
void)