Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
mips
kernel
sync-r4k.c
Go to the documentation of this file.
1
/*
2
* Count register synchronisation.
3
*
4
* All CPUs will have their count registers synchronised to the CPU0 next time
5
* value. This can cause a small timewarp for CPU0. All other CPU's should
6
* not have done anything significant (but they may have had interrupts
7
* enabled briefly - prom_smp_finish() should not be responsible for enabling
8
* interrupts...)
9
*
10
* FIXME: broken for SMTC
11
*/
12
13
#include <linux/kernel.h>
14
#include <
linux/init.h
>
15
#include <
linux/irqflags.h
>
16
#include <
linux/cpumask.h
>
17
18
#include <
asm/r4k-timer.h
>
19
#include <
linux/atomic.h
>
20
#include <asm/barrier.h>
21
#include <
asm/mipsregs.h
>
22
23
static
atomic_t
__cpuinitdata
count_start_flag =
ATOMIC_INIT
(0);
24
static
atomic_t
__cpuinitdata
count_count_start =
ATOMIC_INIT
(0);
25
static
atomic_t
__cpuinitdata
count_count_stop =
ATOMIC_INIT
(0);
26
static
atomic_t
__cpuinitdata
count_reference =
ATOMIC_INIT
(0);
27
28
#define COUNTON 100
29
#define NR_LOOPS 5
30
31
void
__cpuinit
synchronise_count_master
(
int
cpu
)
32
{
33
int
i
;
34
unsigned
long
flags
;
35
unsigned
int
initcount;
36
37
#ifdef CONFIG_MIPS_MT_SMTC
38
/*
39
* SMTC needs to synchronise per VPE, not per CPU
40
* ignore for now
41
*/
42
return
;
43
#endif
44
45
printk
(
KERN_INFO
"Synchronize counters for CPU %u: "
, cpu);
46
47
local_irq_save
(flags);
48
49
/*
50
* Notify the slaves that it's time to start
51
*/
52
atomic_set
(&count_reference,
read_c0_count
());
53
atomic_set
(&count_start_flag, cpu);
54
smp_wmb
();
55
56
/* Count will be initialised to current timer for all CPU's */
57
initcount =
read_c0_count
();
58
59
/*
60
* We loop a few times to get a primed instruction cache,
61
* then the last pass is more or less synchronised and
62
* the master and slaves each set their cycle counters to a known
63
* value all at once. This reduces the chance of having random offsets
64
* between the processors, and guarantees that the maximum
65
* delay between the cycle counters is never bigger than
66
* the latency of information-passing (cachelines) between
67
* two CPUs.
68
*/
69
70
for
(i = 0; i <
NR_LOOPS
; i++) {
71
/* slaves loop on '!= 2' */
72
while
(
atomic_read
(&count_count_start) != 1)
73
mb
();
74
atomic_set
(&count_count_stop, 0);
75
smp_wmb
();
76
77
/* this lets the slaves write their count register */
78
atomic_inc
(&count_count_start);
79
80
/*
81
* Everyone initialises count in the last loop:
82
*/
83
if
(i == NR_LOOPS-1)
84
write_c0_count
(initcount);
85
86
/*
87
* Wait for all slaves to leave the synchronization point:
88
*/
89
while
(
atomic_read
(&count_count_stop) != 1)
90
mb
();
91
atomic_set
(&count_count_start, 0);
92
smp_wmb
();
93
atomic_inc
(&count_count_stop);
94
}
95
/* Arrange for an interrupt in a short while */
96
write_c0_compare
(
read_c0_count
() +
COUNTON
);
97
atomic_set
(&count_start_flag, 0);
98
99
local_irq_restore
(flags);
100
101
/*
102
* i386 code reported the skew here, but the
103
* count registers were almost certainly out of sync
104
* so no point in alarming people
105
*/
106
printk
(
"done.\n"
);
107
}
108
109
void
__cpuinit
synchronise_count_slave
(
int
cpu
)
110
{
111
int
i
;
112
unsigned
int
initcount;
113
114
#ifdef CONFIG_MIPS_MT_SMTC
115
/*
116
* SMTC needs to synchronise per VPE, not per CPU
117
* ignore for now
118
*/
119
return
;
120
#endif
121
122
/*
123
* Not every cpu is online at the time this gets called,
124
* so we first wait for the master to say everyone is ready
125
*/
126
127
while
(
atomic_read
(&count_start_flag) != cpu)
128
mb
();
129
130
/* Count will be initialised to next expire for all CPU's */
131
initcount =
atomic_read
(&count_reference);
132
133
for
(i = 0; i <
NR_LOOPS
; i++) {
134
atomic_inc
(&count_count_start);
135
while
(
atomic_read
(&count_count_start) != 2)
136
mb
();
137
138
/*
139
* Everyone initialises count in the last loop:
140
*/
141
if
(i == NR_LOOPS-1)
142
write_c0_count
(initcount);
143
144
atomic_inc
(&count_count_stop);
145
while
(
atomic_read
(&count_count_stop) != 2)
146
mb
();
147
}
148
/* Arrange for an interrupt in a short while */
149
write_c0_compare
(
read_c0_count
() +
COUNTON
);
150
}
151
#undef NR_LOOPS
Generated on Thu Jan 10 2013 13:11:26 for Linux Kernel by
1.8.2