14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
20 #include <linux/slab.h>
24 #include <linux/pci.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
34 #include <asm/uv/uv.h>
39 #include <asm/emergency-restart.h>
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
61 static struct apic apic_x2apic_uv_x;
63 static unsigned long __init uv_early_read_mmr(
unsigned long addr)
65 unsigned long val, *mmr;
75 return start >= gru_start_paddr && end <= gru_end_paddr;
80 return is_ISA_range(start, end) || is_GRU_range(start, end);
83 static int __init early_get_pnodeid(
void)
100 pnode = (
node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
104 static void __init early_get_apic_pnode_shift(
void)
106 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
111 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
119 static void __init uv_set_apicid_hibit(
void)
127 apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
131 static int __init uv_acpi_madt_oem_check(
char *
oem_id,
char *oem_table_id)
133 int pnodeid, is_uv1, is_uv2;
135 is_uv1 = !
strcmp(oem_id,
"SGI");
136 is_uv2 = !
strcmp(oem_id,
"SGI2");
137 if (is_uv1 || is_uv2) {
139 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
140 pnodeid = early_get_pnodeid();
141 early_get_apic_pnode_shift();
142 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
144 if (!
strcmp(oem_table_id,
"UVL"))
146 else if (!
strcmp(oem_table_id,
"UVX"))
148 else if (!
strcmp(oem_table_id,
"UVH")) {
150 pnodeid << uvh_apicid.s.pnode_shift);
152 uv_set_apicid_hibit();
188 static int __cpuinit uv_wakeup_secondary(
int phys_apicid,
unsigned long start_rip)
194 pnode = uv_apicid_to_pnode(phys_apicid);
198 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
204 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
213 static void uv_send_IPI_one(
int cpu,
int vector)
218 apicid =
per_cpu(x86_cpu_to_apicid, cpu);
219 pnode = uv_apicid_to_pnode(apicid);
220 uv_hub_send_ipi(pnode, apicid, vector);
228 uv_send_IPI_one(cpu, vector);
238 uv_send_IPI_one(cpu, vector);
242 static void uv_send_IPI_allbutself(
int vector)
249 uv_send_IPI_one(cpu, vector);
253 static void uv_send_IPI_all(
int vector)
255 uv_send_IPI_mask(cpu_online_mask, vector);
258 static int uv_apic_id_valid(
int apicid)
263 static int uv_apic_id_registered(
void)
268 static void uv_init_apic_ldr(
void)
274 const struct cpumask *andmask,
288 if (
likely(cpu < nr_cpu_ids)) {
296 static unsigned int x2apic_get_apic_id(
unsigned long x)
306 static unsigned long set_apic_id(
unsigned int id)
315 static unsigned int uv_read_apic_id(
void)
318 return x2apic_get_apic_id(apic_read(
APIC_ID));
323 return uv_read_apic_id() >> index_msb;
326 static void uv_send_IPI_self(
int vector)
331 static int uv_probe(
void)
333 return apic == &apic_x2apic_uv_x;
338 .name =
"UV large system",
340 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
341 .apic_id_valid = uv_apic_id_valid,
342 .apic_id_registered = uv_apic_id_registered,
347 .target_cpus = online_target_cpus,
350 .check_apicid_used =
NULL,
351 .check_apicid_present =
NULL,
353 .vector_allocation_domain = default_vector_allocation_domain,
354 .init_apic_ldr = uv_init_apic_ldr,
356 .ioapic_phys_id_map =
NULL,
357 .setup_apic_routing =
NULL,
358 .multi_timer_check =
NULL,
359 .cpu_present_to_apicid = default_cpu_present_to_apicid,
360 .apicid_to_cpu_present =
NULL,
361 .setup_portio_remap =
NULL,
362 .check_phys_apicid_present = default_check_phys_apicid_present,
363 .enable_apic_mode =
NULL,
364 .phys_pkg_id = uv_phys_pkg_id,
365 .mps_oem_check =
NULL,
367 .get_apic_id = x2apic_get_apic_id,
369 .apic_id_mask = 0xFFFFFFFF
u,
371 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
373 .send_IPI_mask = uv_send_IPI_mask,
374 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
375 .send_IPI_allbutself = uv_send_IPI_allbutself,
376 .send_IPI_all = uv_send_IPI_all,
377 .send_IPI_self = uv_send_IPI_self,
379 .wakeup_secondary_cpu = uv_wakeup_secondary,
382 .wait_for_init_deassert =
NULL,
383 .smp_callin_clear_local_apic =
NULL,
384 .inquire_remote_apic =
NULL,
386 .read = native_apic_msr_read,
387 .write = native_apic_msr_write,
388 .eoi_write = native_apic_msr_eoi_write,
389 .icr_read = native_x2apic_icr_read,
390 .icr_write = native_x2apic_icr_write,
391 .wait_icr_idle = native_x2apic_wait_icr_idle,
392 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
395 static __cpuinit void set_x2apic_extra_bits(
int pnode)
403 static __init int boot_pnode_to_blade(
int pnode)
407 for (blade = 0; blade < uv_num_possible_blades(); blade++)
408 if (pnode == uv_blade_info[blade].pnode)
418 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
426 static __init void get_lowmem_redirect(
unsigned long *base,
unsigned long *
size)
432 for (i = 0; i <
ARRAY_SIZE(redir_addrs); i++) {
433 alias.v = uv_read_local_mmr(redir_addrs[i].
alias);
435 *size = (1
UL <<
alias.s.m_alias);
436 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
446 static __init void map_high(
char *
id,
unsigned long base,
int pshift,
451 paddr = base << pshift;
452 bytes = (1
UL << bshift) * (max_pnode + 1);
461 static __init void map_gru_high(
int max_pnode)
468 map_high(
"GRU", gru.s.base, shift, shift, max_pnode,
map_wb);
469 gru_start_paddr = ((
u64)gru.s.base << shift);
470 gru_end_paddr = gru_start_paddr + (1
UL << shift) * (max_pnode + 1);
475 static __init void map_mmr_high(
int max_pnode)
482 map_high(
"MMR", mmr.s.base, shift, shift, max_pnode,
map_uc);
485 static __init void map_mmioh_high(
int max_pnode)
491 if (is_uv1_hub() && mmioh.s1.enable) {
493 map_high(
"MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
496 if (is_uv2_hub() && mmioh.s2.enable) {
498 map_high(
"MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
503 static __init void map_low_mmrs(
void)
509 static __init void uv_rtc_init(
void)
518 "unable to determine platform RTC clock frequency, "
521 sn_rtc_cycles_per_second = 1000000000000
UL / 30000
UL;
523 sn_rtc_cycles_per_second = ticks_per_sec;
529 static void uv_heartbeat(
unsigned long ignored)
535 bits ^= SCIR_CPU_HEARTBEAT;
539 bits &= ~SCIR_CPU_ACTIVITY;
541 bits |= SCIR_CPU_ACTIVITY;
544 uv_set_scir_bits(bits);
550 static void __cpuinit uv_heartbeat_enable(
int cpu)
555 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
566 #ifdef CONFIG_HOTPLUG_CPU
567 static void __cpuinit uv_heartbeat_disable(
int cpu)
573 uv_set_cpu_scir_bits(cpu, 0xff);
580 unsigned long action,
void *hcpu)
582 long cpu = (
long)hcpu;
586 uv_heartbeat_enable(cpu);
589 uv_heartbeat_disable(cpu);
597 static __init void uv_scir_register_cpu_notifier(
void)
604 static __init void uv_scir_register_cpu_notifier(
void)
608 static __init int uv_init_heartbeat(
void)
614 uv_heartbeat_enable(cpu);
624 unsigned int command_bits,
u32 flags)
628 PR_DEVEL(
"devfn %x decode %d cmd %x flags %d\n",
629 pdev->
devfn, decode, command_bits, flags);
631 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
638 bus = pdev->
bus->number;
641 PR_DEVEL(
"vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
656 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
667 unsigned long real_uv_nmi;
676 bid = uv_numa_blade_id();
680 spin_lock(&uv_blade_info[bid].nmi_lock);
683 uv_blade_info[
bid].nmi_count++;
686 spin_unlock(&uv_blade_info[bid].nmi_lock);
698 spin_lock(&uv_nmi_lock);
701 spin_unlock(&uv_nmi_lock);
729 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
730 int bytes, nid,
cpu, lcpu, pnode, blade,
i,
j, m_val, n_val, n_io;
731 int gnode_extra, max_pnode = 0;
732 unsigned long mmr_base, present,
paddr;
733 unsigned short pnode_mask, pnode_io_mask;
739 m_val = m_n_config.
s.m_skt;
740 n_val = m_n_config.
s.n_skt;
742 n_io = is_uv1_hub() ? mmioh.
s1.n_io : mmioh.
s2.n_io;
746 pnode_mask = (1 << n_val) - 1;
747 pnode_io_mask = (1 << n_io) - 1;
750 gnode_extra = (node_id.
s.node_id & ~((1 << n_val) - 1)) >> 1;
751 gnode_upper = ((
unsigned long)gnode_extra << m_val);
752 printk(
KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
753 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
758 uv_possible_blades +=
763 is_uv1_hub() ? uv_num_possible_blades() :
764 (uv_num_possible_blades() + 1) / 2,
765 uv_num_possible_blades());
767 bytes =
sizeof(
struct uv_blade_info) * uv_num_possible_blades();
771 for (blade = 0; blade < uv_num_possible_blades(); blade++)
772 uv_blade_info[blade].memory_nid = -1;
774 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
778 BUG_ON(!uv_node_to_blade);
779 memset(uv_node_to_blade, 255, bytes);
784 memset(uv_cpu_to_blade, 255, bytes);
789 for (j = 0; j < 64; j++) {
792 pnode = (i * 64 +
j) & pnode_mask;
793 uv_blade_info[blade].pnode = pnode;
794 uv_blade_info[blade].nr_possible_cpus = 0;
795 uv_blade_info[blade].nr_online_cpus = 0;
797 max_pnode =
max(pnode, max_pnode);
808 int apicid =
per_cpu(x86_cpu_to_apicid, cpu);
820 (m_val == 40 ? 40 : 39) : m_val;
822 pnode = uv_apicid_to_pnode(apicid);
823 blade = boot_pnode_to_blade(pnode);
824 lcpu = uv_blade_info[blade].nr_possible_cpus;
825 uv_blade_info[blade].nr_possible_cpus++;
828 uv_blade_info[blade].memory_nid = nid;
843 uv_node_to_blade[nid] = blade;
844 uv_cpu_to_blade[
cpu] = blade;
849 if (uv_node_to_blade[nid] >= 0)
852 pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
853 blade = boot_pnode_to_blade(pnode);
854 uv_node_to_blade[nid] = blade;
857 map_gru_high(max_pnode);
858 map_mmr_high(max_pnode);
859 map_mmioh_high(max_pnode & pnode_io_mask);
862 uv_scir_register_cpu_notifier();
873 if (is_kdump_kernel())