Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
sparc
kernel
smp_32.c
Go to the documentation of this file.
1
/* smp.c: Sparc SMP support.
2
*
3
* Copyright (C) 1996 David S. Miller (
[email protected]
)
4
* Copyright (C) 1998 Jakub Jelinek (
[email protected]
)
5
* Copyright (C) 2004 Keith M Wesolowski (
[email protected]
)
6
*/
7
8
#include <
asm/head.h
>
9
10
#include <linux/kernel.h>
11
#include <linux/sched.h>
12
#include <
linux/threads.h
>
13
#include <
linux/smp.h
>
14
#include <
linux/interrupt.h
>
15
#include <
linux/kernel_stat.h
>
16
#include <
linux/init.h
>
17
#include <
linux/spinlock.h
>
18
#include <
linux/mm.h
>
19
#include <linux/fs.h>
20
#include <
linux/seq_file.h
>
21
#include <
linux/cache.h
>
22
#include <
linux/delay.h
>
23
24
#include <asm/ptrace.h>
25
#include <
linux/atomic.h
>
26
27
#include <asm/irq.h>
28
#include <asm/page.h>
29
#include <asm/pgalloc.h>
30
#include <asm/pgtable.h>
31
#include <asm/oplib.h>
32
#include <asm/cacheflush.h>
33
#include <asm/tlbflush.h>
34
#include <
asm/cpudata.h
>
35
#include <
asm/leon.h
>
36
37
#include "
irq.h
"
38
39
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
]
__cpuinitdata
= {0,};
40
41
cpumask_t
smp_commenced_mask
=
CPU_MASK_NONE
;
42
43
const
struct
sparc32_ipi_ops
*
sparc32_ipi_ops
;
44
45
/* The only guaranteed locking primitive available on all Sparc
46
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
47
* places the current byte at the effective address into dest_reg and
48
* places 0xff there afterwards. Pretty lame locking primitive
49
* compared to the Alpha and the Intel no? Most Sparcs have 'swap'
50
* instruction which is much better...
51
*/
52
53
void
__cpuinit
smp_store_cpu_info
(
int
id
)
54
{
55
int
cpu_node;
56
int
mid
;
57
58
cpu_data
(
id
).udelay_val =
loops_per_jiffy
;
59
60
cpu_find_by_mid
(
id
, &cpu_node);
61
cpu_data
(
id
).clock_tick =
prom_getintdefault
(cpu_node,
62
"clock-frequency"
, 0);
63
cpu_data
(
id
).prom_node = cpu_node;
64
mid =
cpu_get_hwmid
(cpu_node);
65
66
if
(mid < 0) {
67
printk
(
KERN_NOTICE
"No MID found for CPU%d at node 0x%08d"
,
id
, cpu_node);
68
mid = 0;
69
}
70
cpu_data
(
id
).mid =
mid
;
71
}
72
73
void
__init
smp_cpus_done
(
unsigned
int
max_cpus)
74
{
75
extern
void
smp4m_smp_done
(
void
);
76
extern
void
smp4d_smp_done
(
void
);
77
unsigned
long
bogosum = 0;
78
int
cpu
,
num
= 0;
79
80
for_each_online_cpu
(cpu) {
81
num++;
82
bogosum +=
cpu_data
(cpu).udelay_val;
83
}
84
85
printk
(
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n"
,
86
num, bogosum/(500000/
HZ
),
87
(bogosum/(5000/
HZ
))%100);
88
89
switch
(
sparc_cpu_model
) {
90
case
sun4m
:
91
smp4m_smp_done
();
92
break
;
93
case
sun4d
:
94
smp4d_smp_done
();
95
break
;
96
case
sparc_leon
:
97
leon_smp_done
();
98
break
;
99
case
sun4e
:
100
printk
(
"SUN4E\n"
);
101
BUG
();
102
break
;
103
case
sun4u
:
104
printk
(
"SUN4U\n"
);
105
BUG
();
106
break
;
107
default
:
108
printk
(
"UNKNOWN!\n"
);
109
BUG
();
110
break
;
111
}
112
}
113
114
void
cpu_panic
(
void
)
115
{
116
printk
(
"CPU[%d]: Returns from cpu_idle!\n"
,
smp_processor_id
());
117
panic
(
"SMP bolixed\n"
);
118
}
119
120
struct
linux_prom_registers
smp_penguin_ctable
__cpuinitdata
= { 0 };
121
122
void
smp_send_reschedule
(
int
cpu
)
123
{
124
/*
125
* CPU model dependent way of implementing IPI generation targeting
126
* a single CPU. The trap handler needs only to do trap entry/return
127
* to call schedule.
128
*/
129
sparc32_ipi_ops
->resched(cpu);
130
}
131
132
void
smp_send_stop
(
void
)
133
{
134
}
135
136
void
arch_send_call_function_single_ipi
(
int
cpu
)
137
{
138
/* trigger one IPI single call on one CPU */
139
sparc32_ipi_ops
->single(cpu);
140
}
141
142
void
arch_send_call_function_ipi_mask
(
const
struct
cpumask
*
mask
)
143
{
144
int
cpu
;
145
146
/* trigger IPI mask call on each CPU */
147
for_each_cpu
(cpu, mask)
148
sparc32_ipi_ops
->mask_one(cpu);
149
}
150
151
void
smp_resched_interrupt
(
void
)
152
{
153
irq_enter
();
154
scheduler_ipi();
155
local_cpu_data
().irq_resched_count++;
156
irq_exit
();
157
/* re-schedule routine called by interrupt return code. */
158
}
159
160
void
smp_call_function_single_interrupt
(
void
)
161
{
162
irq_enter
();
163
generic_smp_call_function_single_interrupt();
164
local_cpu_data
().irq_call_count++;
165
irq_exit
();
166
}
167
168
void
smp_call_function_interrupt
(
void
)
169
{
170
irq_enter
();
171
generic_smp_call_function_interrupt();
172
local_cpu_data
().irq_call_count++;
173
irq_exit
();
174
}
175
176
int
setup_profiling_timer
(
unsigned
int
multiplier)
177
{
178
return
-
EINVAL
;
179
}
180
181
void
__init
smp_prepare_cpus
(
unsigned
int
max_cpus)
182
{
183
extern
void
__init
smp4m_boot_cpus
(
void
);
184
extern
void
__init
smp4d_boot_cpus
(
void
);
185
int
i
,
cpuid
,
extra
;
186
187
printk
(
"Entering SMP Mode...\n"
);
188
189
extra = 0;
190
for
(i = 0; !
cpu_find_by_instance
(i,
NULL
, &cpuid); i++) {
191
if
(cpuid >=
NR_CPUS
)
192
extra++;
193
}
194
/* i = number of cpus */
195
if
(extra && max_cpus > i - extra)
196
printk
(
"Warning: NR_CPUS is too low to start all cpus\n"
);
197
198
smp_store_cpu_info
(
boot_cpu_id
);
199
200
switch
(
sparc_cpu_model
) {
201
case
sun4m
:
202
smp4m_boot_cpus
();
203
break
;
204
case
sun4d
:
205
smp4d_boot_cpus
();
206
break
;
207
case
sparc_leon
:
208
leon_boot_cpus
();
209
break
;
210
case
sun4e
:
211
printk
(
"SUN4E\n"
);
212
BUG
();
213
break
;
214
case
sun4u
:
215
printk
(
"SUN4U\n"
);
216
BUG
();
217
break
;
218
default
:
219
printk
(
"UNKNOWN!\n"
);
220
BUG
();
221
break
;
222
}
223
}
224
225
/* Set this up early so that things like the scheduler can init
226
* properly. We use the same cpu mask for both the present and
227
* possible cpu map.
228
*/
229
void
__init
smp_setup_cpu_possible_map
(
void
)
230
{
231
int
instance,
mid
;
232
233
instance = 0;
234
while
(!
cpu_find_by_instance
(instance,
NULL
, &mid)) {
235
if
(mid <
NR_CPUS
) {
236
set_cpu_possible
(mid,
true
);
237
set_cpu_present
(mid,
true
);
238
}
239
instance++;
240
}
241
}
242
243
void
__init
smp_prepare_boot_cpu
(
void
)
244
{
245
int
cpuid
=
hard_smp_processor_id
();
246
247
if
(cpuid >=
NR_CPUS
) {
248
prom_printf
(
"Serious problem, boot cpu id >= NR_CPUS\n"
);
249
prom_halt
();
250
}
251
if
(cpuid != 0)
252
printk
(
"boot cpu id != 0, this could work but is untested\n"
);
253
254
current_thread_info
()->cpu =
cpuid
;
255
set_cpu_online
(cpuid,
true
);
256
set_cpu_possible
(cpuid,
true
);
257
}
258
259
int
__cpuinit
__cpu_up
(
unsigned
int
cpu
,
struct
task_struct
*tidle)
260
{
261
extern
int
__cpuinit
smp4m_boot_one_cpu
(
int
,
struct
task_struct
*);
262
extern
int
__cpuinit
smp4d_boot_one_cpu
(
int
,
struct
task_struct
*);
263
int
ret
=0;
264
265
switch
(
sparc_cpu_model
) {
266
case
sun4m
:
267
ret =
smp4m_boot_one_cpu
(cpu, tidle);
268
break
;
269
case
sun4d
:
270
ret =
smp4d_boot_one_cpu
(cpu, tidle);
271
break
;
272
case
sparc_leon
:
273
ret =
leon_boot_one_cpu
(cpu, tidle);
274
break
;
275
case
sun4e
:
276
printk
(
"SUN4E\n"
);
277
BUG
();
278
break
;
279
case
sun4u
:
280
printk
(
"SUN4U\n"
);
281
BUG
();
282
break
;
283
default
:
284
printk
(
"UNKNOWN!\n"
);
285
BUG
();
286
break
;
287
}
288
289
if
(!ret) {
290
cpumask_set_cpu(cpu, &smp_commenced_mask);
291
while
(!
cpu_online
(cpu))
292
mb
();
293
}
294
return
ret
;
295
}
296
297
void
smp_bogo
(
struct
seq_file
*
m
)
298
{
299
int
i
;
300
301
for_each_online_cpu
(i) {
302
seq_printf
(m,
303
"Cpu%dBogo\t: %lu.%02lu\n"
,
304
i,
305
cpu_data
(i).
udelay_val
/(500000/
HZ
),
306
(
cpu_data
(i).
udelay_val
/(5000/
HZ
))%100);
307
}
308
}
309
310
void
smp_info
(
struct
seq_file
*
m
)
311
{
312
int
i
;
313
314
seq_printf
(m,
"State:\n"
);
315
for_each_online_cpu
(i)
316
seq_printf
(m,
"CPU%d\t\t: online\n"
, i);
317
}
Generated on Thu Jan 10 2013 13:18:32 for Linux Kernel by
1.8.2