Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cvmx-l2c.c
Go to the documentation of this file.
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: [email protected]
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2010 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT. See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /*
29  * Implementation of the Level 2 Cache (L2C) control,
30  * measurement, and debugging facilities.
31  */
32 
33 #include <linux/irqflags.h>
34 #include <asm/octeon/cvmx.h>
35 #include <asm/octeon/cvmx-l2c.h>
37 
38 /*
39  * This spinlock is used internally to ensure that only one core is
40  * performing certain L2 operations at a time.
41  *
42  * NOTE: This only protects calls from within a single application -
43  * if multiple applications or operating systems are running, then it
44  * is up to the user program to coordinate between them.
45  */
47 
49 {
51 
52  /* Validate the core number */
53  if (core >= cvmx_octeon_num_cores())
54  return -1;
55 
57  return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
58 
59  /*
60  * Use the lower two bits of the coreNumber to determine the
61  * bit offset of the UMSK[] field in the L2C_SPAR register.
62  */
63  field = (core & 0x3) * 8;
64 
65  /*
66  * Return the UMSK[] field from the appropriate L2C_SPAR
67  * register based on the coreNumber.
68  */
69 
70  switch (core & 0xC) {
71  case 0x0:
72  return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
73  case 0x4:
74  return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
75  case 0x8:
76  return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
77  case 0xC:
78  return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
79  }
80  return 0;
81 }
82 
84 {
86  uint32_t valid_mask;
87 
88  valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
89 
90  mask &= valid_mask;
91 
92  /* A UMSK setting which blocks all L2C Ways is an error on some chips */
93  if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
94  return -1;
95 
96  /* Validate the core number */
97  if (core >= cvmx_octeon_num_cores())
98  return -1;
99 
101  cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
102  return 0;
103  }
104 
105  /*
106  * Use the lower two bits of core to determine the bit offset of the
107  * UMSK[] field in the L2C_SPAR register.
108  */
109  field = (core & 0x3) * 8;
110 
111  /*
112  * Assign the new mask setting to the UMSK[] field in the appropriate
113  * L2C_SPAR register based on the core_num.
114  *
115  */
116  switch (core & 0xC) {
117  case 0x0:
118  cvmx_write_csr(CVMX_L2C_SPAR0,
119  (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
120  mask << field);
121  break;
122  case 0x4:
123  cvmx_write_csr(CVMX_L2C_SPAR1,
124  (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
125  mask << field);
126  break;
127  case 0x8:
128  cvmx_write_csr(CVMX_L2C_SPAR2,
129  (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
130  mask << field);
131  break;
132  case 0xC:
133  cvmx_write_csr(CVMX_L2C_SPAR3,
134  (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
135  mask << field);
136  break;
137  }
138  return 0;
139 }
140 
142 {
143  uint32_t valid_mask;
144 
145  valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
146  mask &= valid_mask;
147 
148  /* A UMSK setting which blocks all L2C Ways is an error on some chips */
149  if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
150  return -1;
151 
153  cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
154  else
155  cvmx_write_csr(CVMX_L2C_SPAR4,
156  (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
157  return 0;
158 }
159 
161 {
163  return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
164  else
165  return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
166 }
167 
169  uint32_t clear_on_read)
170 {
172  union cvmx_l2c_pfctl pfctl;
173 
174  pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
175 
176  switch (counter) {
177  case 0:
178  pfctl.s.cnt0sel = event;
179  pfctl.s.cnt0ena = 1;
180  pfctl.s.cnt0rdclr = clear_on_read;
181  break;
182  case 1:
183  pfctl.s.cnt1sel = event;
184  pfctl.s.cnt1ena = 1;
185  pfctl.s.cnt1rdclr = clear_on_read;
186  break;
187  case 2:
188  pfctl.s.cnt2sel = event;
189  pfctl.s.cnt2ena = 1;
190  pfctl.s.cnt2rdclr = clear_on_read;
191  break;
192  case 3:
193  default:
194  pfctl.s.cnt3sel = event;
195  pfctl.s.cnt3ena = 1;
196  pfctl.s.cnt3rdclr = clear_on_read;
197  break;
198  }
199 
200  cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
201  } else {
202  union cvmx_l2c_tadx_prf l2c_tadx_prf;
203  int tad;
204 
205  cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
206  if (clear_on_read)
207  cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
208 
209  l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
210 
211  switch (counter) {
212  case 0:
213  l2c_tadx_prf.s.cnt0sel = event;
214  break;
215  case 1:
216  l2c_tadx_prf.s.cnt1sel = event;
217  break;
218  case 2:
219  l2c_tadx_prf.s.cnt2sel = event;
220  break;
221  default:
222  case 3:
223  l2c_tadx_prf.s.cnt3sel = event;
224  break;
225  }
226  for (tad = 0; tad < CVMX_L2C_TADS; tad++)
227  cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
228  l2c_tadx_prf.u64);
229  }
230 }
231 
233 {
234  switch (counter) {
235  case 0:
237  return cvmx_read_csr(CVMX_L2C_PFC0);
238  else {
239  uint64_t counter = 0;
240  int tad;
241  for (tad = 0; tad < CVMX_L2C_TADS; tad++)
242  counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
243  return counter;
244  }
245  case 1:
247  return cvmx_read_csr(CVMX_L2C_PFC1);
248  else {
249  uint64_t counter = 0;
250  int tad;
251  for (tad = 0; tad < CVMX_L2C_TADS; tad++)
252  counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
253  return counter;
254  }
255  case 2:
257  return cvmx_read_csr(CVMX_L2C_PFC2);
258  else {
259  uint64_t counter = 0;
260  int tad;
261  for (tad = 0; tad < CVMX_L2C_TADS; tad++)
262  counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
263  return counter;
264  }
265  case 3:
266  default:
268  return cvmx_read_csr(CVMX_L2C_PFC3);
269  else {
270  uint64_t counter = 0;
271  int tad;
272  for (tad = 0; tad < CVMX_L2C_TADS; tad++)
273  counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
274  return counter;
275  }
276  }
277 }
278 
286 static void fault_in(uint64_t addr, int len)
287 {
288  volatile char *ptr;
289  volatile char dummy;
290  /*
291  * Adjust addr and length so we get all cache lines even for
292  * small ranges spanning two cache lines.
293  */
294  len += addr & CVMX_CACHE_LINE_MASK;
295  addr &= ~CVMX_CACHE_LINE_MASK;
296  ptr = (volatile char *)cvmx_phys_to_ptr(addr);
297  /*
298  * Invalidate L1 cache to make sure all loads result in data
299  * being in L2.
300  */
302  while (len > 0) {
303  dummy += *ptr;
304  len -= CVMX_CACHE_LINE_SIZE;
305  ptr += CVMX_CACHE_LINE_SIZE;
306  }
307 }
308 
310 {
312  int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
314  uint64_t tag = addr >> shift;
316  uint64_t way;
317  union cvmx_l2c_tadx_tag l2c_tadx_tag;
318 
320 
321  /* Make sure we were able to lock the line */
322  for (way = 0; way < assoc; way++) {
323  CVMX_CACHE_LTGL2I(index | (way << shift), 0);
324  /* make sure CVMX_L2C_TADX_TAG is updated */
325  CVMX_SYNC;
326  l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
327  if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
328  break;
329  }
330 
331  /* Check if a valid line is found */
332  if (way >= assoc) {
333  /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
334  return -1;
335  }
336 
337  /* Check if lock bit is not set */
338  if (!l2c_tadx_tag.s.lock) {
339  /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
340  return -1;
341  }
342  return way;
343  } else {
344  int retval = 0;
345  union cvmx_l2c_dbg l2cdbg;
346  union cvmx_l2c_lckbase lckbase;
347  union cvmx_l2c_lckoff lckoff;
348  union cvmx_l2t_err l2t_err;
349 
350  cvmx_spinlock_lock(&cvmx_l2c_spinlock);
351 
352  l2cdbg.u64 = 0;
353  lckbase.u64 = 0;
354  lckoff.u64 = 0;
355 
356  /* Clear l2t error bits if set */
357  l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
358  l2t_err.s.lckerr = 1;
359  l2t_err.s.lckerr2 = 1;
360  cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
361 
362  addr &= ~CVMX_CACHE_LINE_MASK;
363 
364  /* Set this core as debug core */
365  l2cdbg.s.ppnum = cvmx_get_core_num();
366  CVMX_SYNC;
367  cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
368  cvmx_read_csr(CVMX_L2C_DBG);
369 
370  lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
371  cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
372  cvmx_read_csr(CVMX_L2C_LCKOFF);
373 
374  if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
375  int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
376  uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
377  lckbase.s.lck_base = addr_tmp >> 7;
378  } else {
379  lckbase.s.lck_base = addr >> 7;
380  }
381 
382  lckbase.s.lck_ena = 1;
383  cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
384  /* Make sure it gets there */
385  cvmx_read_csr(CVMX_L2C_LCKBASE);
386 
387  fault_in(addr, CVMX_CACHE_LINE_SIZE);
388 
389  lckbase.s.lck_ena = 0;
390  cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
391  /* Make sure it gets there */
392  cvmx_read_csr(CVMX_L2C_LCKBASE);
393 
394  /* Stop being debug core */
395  cvmx_write_csr(CVMX_L2C_DBG, 0);
396  cvmx_read_csr(CVMX_L2C_DBG);
397 
398  l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
399  if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
400  retval = 1; /* We were unable to lock the line */
401 
402  cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
403  return retval;
404  }
405 }
406 
408 {
409  int retval = 0;
410 
411  /* Round start/end to cache line boundaries */
412  len += start & CVMX_CACHE_LINE_MASK;
413  start &= ~CVMX_CACHE_LINE_MASK;
414  len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
415 
416  while (len) {
417  retval += cvmx_l2c_lock_line(start);
418  start += CVMX_CACHE_LINE_SIZE;
419  len -= CVMX_CACHE_LINE_SIZE;
420  }
421  return retval;
422 }
423 
424 void cvmx_l2c_flush(void)
425 {
426  uint64_t assoc, set;
427  uint64_t n_assoc, n_set;
428 
429  n_set = cvmx_l2c_get_num_sets();
430  n_assoc = cvmx_l2c_get_num_assoc();
431 
434  /* These may look like constants, but they aren't... */
435  int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
436  int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
437  for (set = 0; set < n_set; set++) {
438  for (assoc = 0; assoc < n_assoc; assoc++) {
440  (assoc << assoc_shift) | (set << set_shift));
441  CVMX_CACHE_WBIL2I(address, 0);
442  }
443  }
444  } else {
445  for (set = 0; set < n_set; set++)
446  for (assoc = 0; assoc < n_assoc; assoc++)
447  cvmx_l2c_flush_line(assoc, set);
448  }
449 }
450 
451 
453 {
454 
456  int assoc;
457  union cvmx_l2c_tag tag;
458  uint32_t tag_addr;
460 
461  tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
462 
463  /*
464  * For 63XX, we can flush a line by using the physical
465  * address directly, so finding the cache line used by
466  * the address is only required to provide the proper
467  * return value for the function.
468  */
469  for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
470  tag = cvmx_l2c_get_tag(assoc, index);
471 
472  if (tag.s.V && (tag.s.addr == tag_addr)) {
474  return tag.s.L;
475  }
476  }
477  } else {
478  int assoc;
479  union cvmx_l2c_tag tag;
480  uint32_t tag_addr;
481 
483 
484  /* Compute portion of address that is stored in tag */
485  tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
486  for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
487  tag = cvmx_l2c_get_tag(assoc, index);
488 
489  if (tag.s.V && (tag.s.addr == tag_addr)) {
490  cvmx_l2c_flush_line(assoc, index);
491  return tag.s.L;
492  }
493  }
494  }
495  return 0;
496 }
497 
499 {
500  int num_unlocked = 0;
501  /* Round start/end to cache line boundaries */
502  len += start & CVMX_CACHE_LINE_MASK;
503  start &= ~CVMX_CACHE_LINE_MASK;
504  len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
505  while (len > 0) {
506  num_unlocked += cvmx_l2c_unlock_line(start);
507  start += CVMX_CACHE_LINE_SIZE;
508  len -= CVMX_CACHE_LINE_SIZE;
509  }
510 
511  return num_unlocked;
512 }
513 
514 /*
515  * Internal l2c tag types. These are converted to a generic structure
516  * that can be used on all chips.
517  */
522  uint64_t V:1; /* Line valid */
523  uint64_t D:1; /* Line dirty */
524  uint64_t L:1; /* Line locked */
525  uint64_t U:1; /* Use, LRU eviction */
526  uint64_t addr:20; /* Phys mem addr (33..14) */
527  } cn50xx;
530  uint64_t V:1; /* Line valid */
531  uint64_t D:1; /* Line dirty */
532  uint64_t L:1; /* Line locked */
533  uint64_t U:1; /* Use, LRU eviction */
534  uint64_t addr:19; /* Phys mem addr (33..15) */
535  } cn30xx;
538  uint64_t V:1; /* Line valid */
539  uint64_t D:1; /* Line dirty */
540  uint64_t L:1; /* Line locked */
541  uint64_t U:1; /* Use, LRU eviction */
542  uint64_t addr:18; /* Phys mem addr (33..16) */
543  } cn31xx;
546  uint64_t V:1; /* Line valid */
547  uint64_t D:1; /* Line dirty */
548  uint64_t L:1; /* Line locked */
549  uint64_t U:1; /* Use, LRU eviction */
550  uint64_t addr:17; /* Phys mem addr (33..17) */
551  } cn38xx;
554  uint64_t V:1; /* Line valid */
555  uint64_t D:1; /* Line dirty */
556  uint64_t L:1; /* Line locked */
557  uint64_t U:1; /* Use, LRU eviction */
558  uint64_t addr:16; /* Phys mem addr (33..18) */
559  } cn58xx;
560  struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
561  struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
562 };
563 
564 
578 static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
579 {
580 
581  uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
582  uint64_t core = cvmx_get_core_num();
583  union __cvmx_l2c_tag tag_val;
584  uint64_t dbg_addr = CVMX_L2C_DBG;
585  unsigned long flags;
586 
587  union cvmx_l2c_dbg debug_val;
588  debug_val.u64 = 0;
589  /*
590  * For low core count parts, the core number is always small
591  * enough to stay in the correct field and not set any
592  * reserved bits.
593  */
594  debug_val.s.ppnum = core;
595  debug_val.s.l2t = 1;
596  debug_val.s.set = assoc;
597 
599  /*
600  * Make sure core is quiet (no prefetches, etc.) before
601  * entering debug mode.
602  */
603  CVMX_SYNC;
604  /* Flush L1 to make sure debug load misses L1 */
606 
607  /*
608  * The following must be done in assembly as when in debug
609  * mode all data loads from L2 return special debug data, not
610  * normal memory contents. Also, interrupts must be disabled,
611  * since if an interrupt occurs while in debug mode the ISR
612  * will get debug data from all its memory * reads instead of
613  * the contents of memory.
614  */
615 
616  asm volatile (
617  ".set push\n\t"
618  ".set mips64\n\t"
619  ".set noreorder\n\t"
620  "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
621  "ld $0, 0(%[dbg_addr])\n\t"
622  "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
623  "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
624  "ld $0, 0(%[dbg_addr])\n\t"
625  "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
626  ".set pop"
627  : [tag_val] "=r" (tag_val)
628  : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
629  : "memory");
630 
632 
633  return tag_val;
634 }
635 
636 
638 {
639  union cvmx_l2c_tag tag;
640  tag.u64 = 0;
641 
642  if ((int)association >= cvmx_l2c_get_num_assoc()) {
643  cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
644  return tag;
645  }
646  if ((int)index >= cvmx_l2c_get_num_sets()) {
647  cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
648  (int)index, cvmx_l2c_get_num_sets());
649  return tag;
650  }
652  union cvmx_l2c_tadx_tag l2c_tadx_tag;
654  (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
656  /*
657  * Use L2 cache Index load tag cache instruction, as
658  * hardware loads the virtual tag for the L2 cache
659  * block with the contents of L2C_TAD0_TAG
660  * register.
661  */
662  CVMX_CACHE_LTGL2I(address, 0);
663  CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
664  l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
665 
666  tag.s.V = l2c_tadx_tag.s.valid;
667  tag.s.D = l2c_tadx_tag.s.dirty;
668  tag.s.L = l2c_tadx_tag.s.lock;
669  tag.s.U = l2c_tadx_tag.s.use;
670  tag.s.addr = l2c_tadx_tag.s.tag;
671  } else {
672  union __cvmx_l2c_tag tmp_tag;
673  /* __read_l2_tag is intended for internal use only */
674  tmp_tag = __read_l2_tag(association, index);
675 
676  /*
677  * Convert all tag structure types to generic version,
678  * as it can represent all models.
679  */
681  tag.s.V = tmp_tag.cn58xx.V;
682  tag.s.D = tmp_tag.cn58xx.D;
683  tag.s.L = tmp_tag.cn58xx.L;
684  tag.s.U = tmp_tag.cn58xx.U;
685  tag.s.addr = tmp_tag.cn58xx.addr;
686  } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
687  tag.s.V = tmp_tag.cn38xx.V;
688  tag.s.D = tmp_tag.cn38xx.D;
689  tag.s.L = tmp_tag.cn38xx.L;
690  tag.s.U = tmp_tag.cn38xx.U;
691  tag.s.addr = tmp_tag.cn38xx.addr;
693  tag.s.V = tmp_tag.cn31xx.V;
694  tag.s.D = tmp_tag.cn31xx.D;
695  tag.s.L = tmp_tag.cn31xx.L;
696  tag.s.U = tmp_tag.cn31xx.U;
697  tag.s.addr = tmp_tag.cn31xx.addr;
698  } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
699  tag.s.V = tmp_tag.cn30xx.V;
700  tag.s.D = tmp_tag.cn30xx.D;
701  tag.s.L = tmp_tag.cn30xx.L;
702  tag.s.U = tmp_tag.cn30xx.U;
703  tag.s.addr = tmp_tag.cn30xx.addr;
704  } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
705  tag.s.V = tmp_tag.cn50xx.V;
706  tag.s.D = tmp_tag.cn50xx.D;
707  tag.s.L = tmp_tag.cn50xx.L;
708  tag.s.U = tmp_tag.cn50xx.U;
709  tag.s.addr = tmp_tag.cn50xx.addr;
710  } else {
711  cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
712  }
713  }
714  return tag;
715 }
716 
718 {
720  int indxalias = 0;
721 
723  union cvmx_l2c_ctl l2c_ctl;
724  l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
725  indxalias = !l2c_ctl.s.disidxalias;
726  } else {
727  union cvmx_l2c_cfg l2c_cfg;
728  l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
729  indxalias = l2c_cfg.s.idxalias;
730  }
731 
732  if (indxalias) {
734  uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
735  idx ^= idx / cvmx_l2c_get_num_sets();
736  idx ^= a_14_12;
737  } else {
739  }
740  }
741  idx &= CVMX_L2C_IDX_MASK;
742  return idx;
743 }
744 
746 {
749 }
750 
756 {
757  int l2_set_bits;
759  l2_set_bits = 11; /* 2048 sets */
761  l2_set_bits = 10; /* 1024 sets */
763  l2_set_bits = 9; /* 512 sets */
764  else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
765  l2_set_bits = 8; /* 256 sets */
766  else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
767  l2_set_bits = 7; /* 128 sets */
768  else {
769  cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
770  l2_set_bits = 11; /* 2048 sets */
771  }
772  return l2_set_bits;
773 }
774 
775 /* Return the number of sets in the L2 Cache */
777 {
778  return 1 << cvmx_l2c_get_set_bits();
779 }
780 
781 /* Return the number of associations in the L2 Cache */
783 {
784  int l2_assoc;
790  l2_assoc = 8;
791  else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
792  l2_assoc = 16;
793  else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
795  l2_assoc = 4;
796  else {
797  cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
798  l2_assoc = 8;
799  }
800 
801  /* Check to see if part of the cache is disabled */
803  union cvmx_mio_fus_dat3 mio_fus_dat3;
804 
805  mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
806  /*
807  * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
808  * <2> will be not used for 63xx
809  * <1> disables 1/2 ways
810  * <0> disables 1/4 ways
811  * They are cumulative, so for 63xx:
812  * <1> <0>
813  * 0 0 16-way 2MB cache
814  * 0 1 12-way 1.5MB cache
815  * 1 0 8-way 1MB cache
816  * 1 1 4-way 512KB cache
817  */
818 
819  if (mio_fus_dat3.s.l2c_crip == 3)
820  l2_assoc = 4;
821  else if (mio_fus_dat3.s.l2c_crip == 2)
822  l2_assoc = 8;
823  else if (mio_fus_dat3.s.l2c_crip == 1)
824  l2_assoc = 12;
825  } else {
826  union cvmx_l2d_fus3 val;
827  val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
828  /*
829  * Using shifts here, as bit position names are
830  * different for each model but they all mean the
831  * same.
832  */
833  if ((val.u64 >> 35) & 0x1)
834  l2_assoc = l2_assoc >> 2;
835  else if ((val.u64 >> 34) & 0x1)
836  l2_assoc = l2_assoc >> 1;
837  }
838  return l2_assoc;
839 }
840 
850 {
851  /* Check the range of the index. */
852  if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
853  cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
854  return;
855  }
856 
857  /* Check the range of association. */
858  if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
859  cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
860  return;
861  }
862 
865  /* Create the address based on index and association.
866  * Bits<20:17> select the way of the cache block involved in
867  * the operation
868  * Bits<16:7> of the effect address select the index
869  */
871  (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
872  (index << CVMX_L2C_IDX_ADDR_SHIFT));
873  CVMX_CACHE_WBIL2I(address, 0);
874  } else {
875  union cvmx_l2c_dbg l2cdbg;
876 
877  l2cdbg.u64 = 0;
879  l2cdbg.s.ppnum = cvmx_get_core_num();
880  l2cdbg.s.finv = 1;
881 
882  l2cdbg.s.set = assoc;
883  cvmx_spinlock_lock(&cvmx_l2c_spinlock);
884  /*
885  * Enter debug mode, and make sure all other writes
886  * complete before we enter debug mode
887  */
888  CVMX_SYNC;
889  cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
890  cvmx_read_csr(CVMX_L2C_DBG);
891 
893  index * CVMX_CACHE_LINE_SIZE),
894  0);
895  /* Exit debug mode */
896  CVMX_SYNC;
897  cvmx_write_csr(CVMX_L2C_DBG, 0);
898  cvmx_read_csr(CVMX_L2C_DBG);
899  cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
900  }
901 }