3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
17 static int x2apic_acpi_madt_oem_check(
char *
oem_id,
char *oem_table_id)
19 return x2apic_enabled();
22 static inline u32 x2apic_cluster(
int cpu)
24 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
30 struct cpumask *cpus_in_cluster_ptr;
32 unsigned int cpu, this_cpu;
47 cpumask_copy(ipi_mask_ptr, mask);
55 cpus_in_cluster_ptr =
per_cpu(cpus_in_cluster, cpu);
61 dest |=
per_cpu(x86_cpu_to_logical_apicid, i);
72 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
78 static void x2apic_send_IPI_mask(
const struct cpumask *mask,
int vector)
84 x2apic_send_IPI_mask_allbutself(
const struct cpumask *mask,
int vector)
89 static void x2apic_send_IPI_allbutself(
int vector)
94 static void x2apic_send_IPI_all(
int vector)
101 const struct cpumask *andmask,
111 dest =
per_cpu(x86_cpu_to_logical_apicid, i);
112 cluster = x2apic_cluster(i);
122 if (cluster != x2apic_cluster(i))
124 dest |=
per_cpu(x86_cpu_to_logical_apicid, i);
132 static void init_x2apic_ldr(
void)
139 __cpu_set(this_cpu,
per_cpu(cpus_in_cluster, this_cpu));
141 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
143 __cpu_set(this_cpu,
per_cpu(cpus_in_cluster, cpu));
144 __cpu_set(cpu,
per_cpu(cpus_in_cluster, this_cpu));
154 unsigned int this_cpu = (
unsigned long)hcpu;
160 if (!zalloc_cpumask_var(&
per_cpu(cpus_in_cluster, this_cpu),
163 }
else if (!zalloc_cpumask_var(&
per_cpu(ipi_mask, this_cpu),
165 free_cpumask_var(
per_cpu(cpus_in_cluster, this_cpu));
173 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
175 __cpu_clear(this_cpu,
per_cpu(cpus_in_cluster, cpu));
176 __cpu_clear(cpu,
per_cpu(cpus_in_cluster, this_cpu));
178 free_cpumask_var(
per_cpu(cpus_in_cluster, this_cpu));
179 free_cpumask_var(
per_cpu(ipi_mask, this_cpu));
183 return notifier_from_errno(err);
187 .notifier_call = update_clusterinfo,
190 static int x2apic_init_cpu_notifier(
void)
199 __cpu_set(cpu,
per_cpu(cpus_in_cluster, cpu));
204 static int x2apic_cluster_probe(
void)
207 return x2apic_init_cpu_notifier();
212 static const struct cpumask *x2apic_cluster_target_cpus(
void)
220 static void cluster_vector_allocation_domain(
int cpu,
struct cpumask *retmask,
221 const struct cpumask *mask)
232 if (mask == x2apic_cluster_target_cpus())
235 cpumask_and(retmask, mask,
per_cpu(cpus_in_cluster, cpu));
238 static struct apic apic_x2apic_cluster = {
240 .name =
"cluster x2apic",
241 .probe = x2apic_cluster_probe,
242 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
243 .apic_id_valid = x2apic_apic_id_valid,
244 .apic_id_registered = x2apic_apic_id_registered,
249 .target_cpus = x2apic_cluster_target_cpus,
252 .check_apicid_used =
NULL,
253 .check_apicid_present =
NULL,
255 .vector_allocation_domain = cluster_vector_allocation_domain,
256 .init_apic_ldr = init_x2apic_ldr,
258 .ioapic_phys_id_map =
NULL,
259 .setup_apic_routing =
NULL,
260 .multi_timer_check =
NULL,
261 .cpu_present_to_apicid = default_cpu_present_to_apicid,
262 .apicid_to_cpu_present =
NULL,
263 .setup_portio_remap =
NULL,
264 .check_phys_apicid_present = default_check_phys_apicid_present,
265 .enable_apic_mode =
NULL,
266 .phys_pkg_id = x2apic_phys_pkg_id,
267 .mps_oem_check =
NULL,
269 .get_apic_id = x2apic_get_apic_id,
270 .set_apic_id = x2apic_set_apic_id,
271 .apic_id_mask = 0xFFFFFFFF
u,
273 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
275 .send_IPI_mask = x2apic_send_IPI_mask,
276 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
277 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
278 .send_IPI_all = x2apic_send_IPI_all,
279 .send_IPI_self = x2apic_send_IPI_self,
283 .wait_for_init_deassert =
NULL,
284 .smp_callin_clear_local_apic =
NULL,
285 .inquire_remote_apic =
NULL,
287 .read = native_apic_msr_read,
288 .write = native_apic_msr_write,
289 .eoi_write = native_apic_msr_eoi_write,
290 .icr_read = native_x2apic_icr_read,
291 .icr_write = native_x2apic_icr_write,
292 .wait_icr_idle = native_x2apic_wait_icr_idle,
293 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,