Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
intel.c
Go to the documentation of this file.
1 #include <linux/init.h>
2 #include <linux/kernel.h>
3 
4 #include <linux/string.h>
5 #include <linux/bitops.h>
6 #include <linux/smp.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
11 
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
14 #include <asm/msr.h>
15 #include <asm/bugs.h>
16 #include <asm/cpu.h>
17 
18 #ifdef CONFIG_X86_64
19 #include <linux/topology.h>
20 #include <asm/numa_64.h>
21 #endif
22 
23 #include "cpu.h"
24 
25 #ifdef CONFIG_X86_LOCAL_APIC
26 #include <asm/mpspec.h>
27 #include <asm/apic.h>
28 #endif
29 
30 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31 {
33 
34  /* Unmask CPUID levels if masked: */
35  if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
36  rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
37 
38  if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
39  misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40  wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41  c->cpuid_level = cpuid_eax(0);
42  get_cpu_cap(c);
43  }
44  }
45 
46  if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
47  (c->x86 == 0x6 && c->x86_model >= 0x0e))
48  set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
49 
50  if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
51  unsigned lower_word;
52 
54  /* Required by the SDM */
55  sync_core();
56  rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
57  }
58 
59  /*
60  * Atom erratum AAE44/AAF40/AAG38/AAH41:
61  *
62  * A race condition between speculative fetches and invalidating
63  * a large page. This is worked around in microcode, but we
64  * need the microcode to have already been loaded... so if it is
65  * not, recommend a BIOS update and disable large pages.
66  */
67  if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
68  c->microcode < 0x20e) {
69  printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
70  clear_cpu_cap(c, X86_FEATURE_PSE);
71  }
72 
73 #ifdef CONFIG_X86_64
74  set_cpu_cap(c, X86_FEATURE_SYSENTER32);
75 #else
76  /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
77  if (c->x86 == 15 && c->x86_cache_alignment == 64)
78  c->x86_cache_alignment = 128;
79 #endif
80 
81  /* CPUID workaround for 0F33/0F34 CPU */
82  if (c->x86 == 0xF && c->x86_model == 0x3
83  && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
84  c->x86_phys_bits = 36;
85 
86  /*
87  * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
88  * with P/T states and does not stop in deep C-states.
89  *
90  * It is also reliable across cores and sockets. (but not across
91  * cabinets - we turn it off in that case explicitly.)
92  */
93  if (c->x86_power & (1 << 8)) {
94  set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
95  set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
96  if (!check_tsc_unstable())
97  sched_clock_stable = 1;
98  }
99 
100  /*
101  * There is a known erratum on Pentium III and Core Solo
102  * and Core Duo CPUs.
103  * " Page with PAT set to WC while associated MTRR is UC
104  * may consolidate to UC "
105  * Because of this erratum, it is better to stick with
106  * setting WC in MTRR rather than using PAT on these CPUs.
107  *
108  * Enable PAT WC only on P4, Core 2 or later CPUs.
109  */
110  if (c->x86 == 6 && c->x86_model < 15)
111  clear_cpu_cap(c, X86_FEATURE_PAT);
112 
113 #ifdef CONFIG_KMEMCHECK
114  /*
115  * P4s have a "fast strings" feature which causes single-
116  * stepping REP instructions to only generate a #DB on
117  * cache-line boundaries.
118  *
119  * Ingo Molnar reported a Pentium D (model 6) and a Xeon
120  * (model 2) with the same problem.
121  */
122  if (c->x86 == 15) {
123  rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124 
125  if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
126  printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
127 
128  misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
129  wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
130  }
131  }
132 #endif
133 
134  /*
135  * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
136  * clear the fast string and enhanced fast string CPU capabilities.
137  */
138  if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
139  rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
140  if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
141  printk(KERN_INFO "Disabled fast string operations\n");
142  setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
143  setup_clear_cpu_cap(X86_FEATURE_ERMS);
144  }
145  }
146 }
147 
148 #ifdef CONFIG_X86_32
149 /*
150  * Early probe support logic for ppro memory erratum #50
151  *
152  * This is called before we do cpu ident work
153  */
154 
155 int __cpuinit ppro_with_ram_bug(void)
156 {
157  /* Uses data from early_cpu_detect now */
158  if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159  boot_cpu_data.x86 == 6 &&
160  boot_cpu_data.x86_model == 1 &&
161  boot_cpu_data.x86_mask < 8) {
162  printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
163  return 1;
164  }
165  return 0;
166 }
167 
168 #ifdef CONFIG_X86_F00F_BUG
169 static void __cpuinit trap_init_f00f_bug(void)
170 {
171  __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
172 
173  /*
174  * Update the IDT descriptor and reload the IDT so that
175  * it uses the read-only mapped virtual address.
176  */
177  idt_descr.address = fix_to_virt(FIX_F00F_IDT);
178  load_idt(&idt_descr);
179 }
180 #endif
181 
182 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
183 {
184  /* calling is from identify_secondary_cpu() ? */
185  if (!c->cpu_index)
186  return;
187 
188  /*
189  * Mask B, Pentium, but not Pentium MMX
190  */
191  if (c->x86 == 5 &&
192  c->x86_mask >= 1 && c->x86_mask <= 4 &&
193  c->x86_model <= 3) {
194  /*
195  * Remember we have B step Pentia with bugs
196  */
197  WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
198  "with B stepping processors.\n");
199  }
200 }
201 
202 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
203 {
204  unsigned long lo, hi;
205 
206 #ifdef CONFIG_X86_F00F_BUG
207  /*
208  * All current models of Pentium and Pentium with MMX technology CPUs
209  * have the F0 0F bug, which lets nonprivileged users lock up the
210  * system.
211  * Note that the workaround only should be initialized once...
212  */
213  c->f00f_bug = 0;
214  if (!paravirt_enabled() && c->x86 == 5) {
215  static int f00f_workaround_enabled;
216 
217  c->f00f_bug = 1;
218  if (!f00f_workaround_enabled) {
219  trap_init_f00f_bug();
220  printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
221  f00f_workaround_enabled = 1;
222  }
223  }
224 #endif
225 
226  /*
227  * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
228  * model 3 mask 3
229  */
230  if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
231  clear_cpu_cap(c, X86_FEATURE_SEP);
232 
233  /*
234  * P4 Xeon errata 037 workaround.
235  * Hardware prefetcher may cause stale data to be loaded into the cache.
236  */
237  if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
238  rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
239  if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
240  printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
241  printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
243  wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
244  }
245  }
246 
247  /*
248  * See if we have a good local APIC by checking for buggy Pentia,
249  * i.e. all B steppings and the C2 stepping of P54C when using their
250  * integrated APIC (see 11AP erratum in "Pentium Processor
251  * Specification Update").
252  */
253  if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
254  (c->x86_mask < 0x6 || c->x86_mask == 0xb))
255  set_cpu_cap(c, X86_FEATURE_11AP);
256 
257 
258 #ifdef CONFIG_X86_INTEL_USERCOPY
259  /*
260  * Set up the preferred alignment for movsl bulk memory moves
261  */
262  switch (c->x86) {
263  case 4: /* 486: untested */
264  break;
265  case 5: /* Old Pentia: untested */
266  break;
267  case 6: /* PII/PIII only like movsl with 8-byte alignment */
268  movsl_mask.mask = 7;
269  break;
270  case 15: /* P4 is OK down to 8-byte alignment */
271  movsl_mask.mask = 7;
272  break;
273  }
274 #endif
275 
276 #ifdef CONFIG_X86_NUMAQ
278 #endif
279 
280  intel_smp_check(c);
281 }
282 #else
283 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
284 {
285 }
286 #endif
287 
288 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
289 {
290 #ifdef CONFIG_NUMA
291  unsigned node;
292  int cpu = smp_processor_id();
293 
294  /* Don't do the funky fallback heuristics the AMD version employs
295  for now. */
296  node = numa_cpu_node(cpu);
297  if (node == NUMA_NO_NODE || !node_online(node)) {
298  /* reuse the value from init_cpu_to_node() */
299  node = cpu_to_node(cpu);
300  }
301  numa_set_node(cpu, node);
302 #endif
303 }
304 
305 /*
306  * find out the number of processor cores on the die
307  */
308 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
309 {
310  unsigned int eax, ebx, ecx, edx;
311 
312  if (c->cpuid_level < 4)
313  return 1;
314 
315  /* Intel has a non-standard dependency on %ecx for this CPUID level. */
316  cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
317  if (eax & 0x1f)
318  return (eax >> 26) + 1;
319  else
320  return 1;
321 }
322 
323 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
324 {
325  /* Intel VMX MSR indicated features */
326 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
327 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
328 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
329 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
330 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
331 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
332 
333  u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
334 
335  clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
336  clear_cpu_cap(c, X86_FEATURE_VNMI);
337  clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
338  clear_cpu_cap(c, X86_FEATURE_EPT);
339  clear_cpu_cap(c, X86_FEATURE_VPID);
340 
341  rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
342  msr_ctl = vmx_msr_high | vmx_msr_low;
343  if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
344  set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
345  if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
346  set_cpu_cap(c, X86_FEATURE_VNMI);
347  if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
349  vmx_msr_low, vmx_msr_high);
350  msr_ctl2 = vmx_msr_high | vmx_msr_low;
351  if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
352  (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
353  set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
354  if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
355  set_cpu_cap(c, X86_FEATURE_EPT);
356  if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
357  set_cpu_cap(c, X86_FEATURE_VPID);
358  }
359 }
360 
361 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
362 {
363  unsigned int l2 = 0;
364 
365  early_init_intel(c);
366 
367  intel_workarounds(c);
368 
369  /*
370  * Detect the extended topology information if available. This
371  * will reinitialise the initial_apicid which will be used
372  * in init_intel_cacheinfo()
373  */
375 
376  l2 = init_intel_cacheinfo(c);
377  if (c->cpuid_level > 9) {
378  unsigned eax = cpuid_eax(10);
379  /* Check for version and the number of counters */
380  if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
381  set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
382  }
383 
384  if (cpu_has_xmm2)
385  set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
386  if (cpu_has_ds) {
387  unsigned int l1;
388  rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
389  if (!(l1 & (1<<11)))
390  set_cpu_cap(c, X86_FEATURE_BTS);
391  if (!(l1 & (1<<12)))
392  set_cpu_cap(c, X86_FEATURE_PEBS);
393  }
394 
395  if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
396  set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
397 
398 #ifdef CONFIG_X86_64
399  if (c->x86 == 15)
401  if (c->x86 == 6)
402  set_cpu_cap(c, X86_FEATURE_REP_GOOD);
403 #else
404  /*
405  * Names for the Pentium II/Celeron processors
406  * detectable only by also checking the cache size.
407  * Dixon is NOT a Celeron.
408  */
409  if (c->x86 == 6) {
410  char *p = NULL;
411 
412  switch (c->x86_model) {
413  case 5:
414  if (l2 == 0)
415  p = "Celeron (Covington)";
416  else if (l2 == 256)
417  p = "Mobile Pentium II (Dixon)";
418  break;
419 
420  case 6:
421  if (l2 == 128)
422  p = "Celeron (Mendocino)";
423  else if (c->x86_mask == 0 || c->x86_mask == 5)
424  p = "Celeron-A";
425  break;
426 
427  case 8:
428  if (l2 == 128)
429  p = "Celeron (Coppermine)";
430  break;
431  }
432 
433  if (p)
434  strcpy(c->x86_model_id, p);
435  }
436 
437  if (c->x86 == 15)
438  set_cpu_cap(c, X86_FEATURE_P4);
439  if (c->x86 == 6)
440  set_cpu_cap(c, X86_FEATURE_P3);
441 #endif
442 
443  if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
444  /*
445  * let's use the legacy cpuid vector 0x1 and 0x4 for topology
446  * detection.
447  */
448  c->x86_max_cores = intel_num_cpu_cores(c);
449 #ifdef CONFIG_X86_32
450  detect_ht(c);
451 #endif
452  }
453 
454  /* Work around errata */
455  srat_detect_node(c);
456 
457  if (cpu_has(c, X86_FEATURE_VMX))
458  detect_vmx_virtcap(c);
459 
460  /*
461  * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
462  * x86_energy_perf_policy(8) is available to change it at run-time
463  */
464  if (cpu_has(c, X86_FEATURE_EPB)) {
465  u64 epb;
466 
467  rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
468  if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
469  printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
470  " Set to 'normal', was 'performance'\n"
471  "ENERGY_PERF_BIAS: View and update with"
472  " x86_energy_perf_policy(8)\n");
473  epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
474  wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
475  }
476  }
477 }
478 
479 #ifdef CONFIG_X86_32
480 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
481 {
482  /*
483  * Intel PIII Tualatin. This comes in two flavours.
484  * One has 256kb of cache, the other 512. We have no way
485  * to determine which, so we use a boottime override
486  * for the 512kb model, and assume 256 otherwise.
487  */
488  if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
489  size = 256;
490  return size;
491 }
492 #endif
493 
494 #define TLB_INST_4K 0x01
495 #define TLB_INST_4M 0x02
496 #define TLB_INST_2M_4M 0x03
497 
498 #define TLB_INST_ALL 0x05
499 #define TLB_INST_1G 0x06
500 
501 #define TLB_DATA_4K 0x11
502 #define TLB_DATA_4M 0x12
503 #define TLB_DATA_2M_4M 0x13
504 #define TLB_DATA_4K_4M 0x14
505 
506 #define TLB_DATA_1G 0x16
507 
508 #define TLB_DATA0_4K 0x21
509 #define TLB_DATA0_4M 0x22
510 #define TLB_DATA0_2M_4M 0x23
511 
512 #define STLB_4K 0x41
513 
514 static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
515  { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
516  { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
517  { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
518  { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
519  { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
520  { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
521  { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
522  { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
523  { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
524  { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
525  { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
526  { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
527  { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
528  { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
529  { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
530  { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
531  { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
532  { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
533  { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
534  { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
535  { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
536  { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
537  { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
538  { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
539  { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
540  { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
541  { 0x00, 0, 0 }
542 };
543 
544 static void __cpuinit intel_tlb_lookup(const unsigned char desc)
545 {
546  unsigned char k;
547  if (desc == 0)
548  return;
549 
550  /* look up this descriptor in the table */
551  for (k = 0; intel_tlb_table[k].descriptor != desc && \
552  intel_tlb_table[k].descriptor != 0; k++)
553  ;
554 
555  if (intel_tlb_table[k].tlb_type == 0)
556  return;
557 
558  switch (intel_tlb_table[k].tlb_type) {
559  case STLB_4K:
560  if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
561  tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
562  if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
563  tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
564  break;
565  case TLB_INST_ALL:
566  if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
567  tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
568  if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
569  tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
570  if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
571  tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
572  break;
573  case TLB_INST_4K:
574  if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
575  tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
576  break;
577  case TLB_INST_4M:
578  if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
579  tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
580  break;
581  case TLB_INST_2M_4M:
582  if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
583  tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
584  if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
585  tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
586  break;
587  case TLB_DATA_4K:
588  case TLB_DATA0_4K:
589  if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
590  tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
591  break;
592  case TLB_DATA_4M:
593  case TLB_DATA0_4M:
594  if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
595  tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
596  break;
597  case TLB_DATA_2M_4M:
598  case TLB_DATA0_2M_4M:
599  if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
600  tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
601  if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
602  tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
603  break;
604  case TLB_DATA_4K_4M:
605  if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
606  tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
607  if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
608  tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
609  break;
610  }
611 }
612 
613 static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
614 {
615  if (!cpu_has_invlpg) {
616  tlb_flushall_shift = -1;
617  return;
618  }
619  switch ((c->x86 << 8) + c->x86_model) {
620  case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
621  case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
622  case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
623  case 0x61d: /* six-core 45 nm xeon "Dunnington" */
624  tlb_flushall_shift = -1;
625  break;
626  case 0x61a: /* 45 nm nehalem, "Bloomfield" */
627  case 0x61e: /* 45 nm nehalem, "Lynnfield" */
628  case 0x625: /* 32 nm nehalem, "Clarkdale" */
629  case 0x62c: /* 32 nm nehalem, "Gulftown" */
630  case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
631  case 0x62f: /* 32 nm Xeon E7 */
632  tlb_flushall_shift = 6;
633  break;
634  case 0x62a: /* SandyBridge */
635  case 0x62d: /* SandyBridge, "Romely-EP" */
636  tlb_flushall_shift = 5;
637  break;
638  case 0x63a: /* Ivybridge */
639  tlb_flushall_shift = 1;
640  break;
641  default:
642  tlb_flushall_shift = 6;
643  }
644 }
645 
646 static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
647 {
648  int i, j, n;
649  unsigned int regs[4];
650  unsigned char *desc = (unsigned char *)regs;
651 
652  if (c->cpuid_level < 2)
653  return;
654 
655  /* Number of times to iterate */
656  n = cpuid_eax(2) & 0xFF;
657 
658  for (i = 0 ; i < n ; i++) {
659  cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
660 
661  /* If bit 31 is set, this is an unknown format */
662  for (j = 0 ; j < 3 ; j++)
663  if (regs[j] & (1 << 31))
664  regs[j] = 0;
665 
666  /* Byte 0 is level count, not a descriptor */
667  for (j = 1 ; j < 16 ; j++)
668  intel_tlb_lookup(desc[j]);
669  }
670  intel_tlb_flushall_shift_set(c);
671 }
672 
673 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
674  .c_vendor = "Intel",
675  .c_ident = { "GenuineIntel" },
676 #ifdef CONFIG_X86_32
677  .c_models = {
678  { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
679  {
680  [0] = "486 DX-25/33",
681  [1] = "486 DX-50",
682  [2] = "486 SX",
683  [3] = "486 DX/2",
684  [4] = "486 SL",
685  [5] = "486 SX/2",
686  [7] = "486 DX/2-WB",
687  [8] = "486 DX/4",
688  [9] = "486 DX/4-WB"
689  }
690  },
691  { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
692  {
693  [0] = "Pentium 60/66 A-step",
694  [1] = "Pentium 60/66",
695  [2] = "Pentium 75 - 200",
696  [3] = "OverDrive PODP5V83",
697  [4] = "Pentium MMX",
698  [7] = "Mobile Pentium 75 - 200",
699  [8] = "Mobile Pentium MMX"
700  }
701  },
702  { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
703  {
704  [0] = "Pentium Pro A-step",
705  [1] = "Pentium Pro",
706  [3] = "Pentium II (Klamath)",
707  [4] = "Pentium II (Deschutes)",
708  [5] = "Pentium II (Deschutes)",
709  [6] = "Mobile Pentium II",
710  [7] = "Pentium III (Katmai)",
711  [8] = "Pentium III (Coppermine)",
712  [10] = "Pentium III (Cascades)",
713  [11] = "Pentium III (Tualatin)",
714  }
715  },
716  { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
717  {
718  [0] = "Pentium 4 (Unknown)",
719  [1] = "Pentium 4 (Willamette)",
720  [2] = "Pentium 4 (Northwood)",
721  [4] = "Pentium 4 (Foster)",
722  [5] = "Pentium 4 (Foster)",
723  }
724  },
725  },
726  .c_size_cache = intel_size_cache,
727 #endif
728  .c_detect_tlb = intel_detect_tlb,
729  .c_early_init = early_init_intel,
730  .c_init = init_intel,
731  .c_x86_vendor = X86_VENDOR_INTEL,
732 };
733 
734 cpu_dev_register(intel_cpu_dev);
735