Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
linux
smp.h
Go to the documentation of this file.
1
#ifndef __LINUX_SMP_H
2
#define __LINUX_SMP_H
3
4
/*
5
* Generic SMP support
6
* Alan Cox. <
[email protected]
>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/types.h>
11
#include <linux/list.h>
12
#include <
linux/cpumask.h
>
13
#include <
linux/init.h
>
14
15
extern
void
cpu_idle
(
void
);
16
17
typedef
void
(*
smp_call_func_t
)(
void
*
info
);
18
struct
call_single_data
{
19
struct
list_head
list
;
20
smp_call_func_t
func
;
21
void
*
info
;
22
u16
flags
;
23
u16
priv
;
24
};
25
26
/* total number of cpus in this system (may exceed NR_CPUS) */
27
extern
unsigned
int
total_cpus
;
28
29
int
smp_call_function_single
(
int
cpuid
,
smp_call_func_t
func
,
void
*
info
,
30
int
wait
);
31
32
#ifdef CONFIG_SMP
33
34
#include <
linux/preempt.h
>
35
#include <linux/kernel.h>
36
#include <linux/compiler.h>
37
#include <
linux/thread_info.h
>
38
#include <asm/smp.h>
39
40
/*
41
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
42
* (defined in asm header):
43
*/
44
45
/*
46
* stops all CPUs but the current one:
47
*/
48
extern
void
smp_send_stop
(
void
);
49
50
/*
51
* sends a 'reschedule' event to another CPU:
52
*/
53
extern
void
smp_send_reschedule
(
int
cpu
);
54
55
56
/*
57
* Prepare machine for booting other CPUs.
58
*/
59
extern
void
smp_prepare_cpus
(
unsigned
int
max_cpus);
60
61
/*
62
* Bring a CPU up
63
*/
64
extern
int
__cpu_up
(
unsigned
int
cpunum,
struct
task_struct
*tidle);
65
66
/*
67
* Final polishing of CPUs
68
*/
69
extern
void
smp_cpus_done
(
unsigned
int
max_cpus);
70
71
/*
72
* Call a function on all other processors
73
*/
74
int
smp_call_function
(
smp_call_func_t
func
,
void
*
info
,
int
wait
);
75
void
smp_call_function_many
(
const
struct
cpumask
*
mask
,
76
smp_call_func_t
func
,
void
*
info
,
bool
wait
);
77
78
void
__smp_call_function_single(
int
cpuid
,
struct
call_single_data
*
data
,
79
int
wait
);
80
81
int
smp_call_function_any(
const
struct
cpumask
*
mask
,
82
smp_call_func_t
func
,
void
*
info
,
int
wait
);
83
84
void
kick_all_cpus_sync
(
void
);
85
86
/*
87
* Generic and arch helpers
88
*/
89
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
90
void
__init
call_function_init(
void
);
91
void
generic_smp_call_function_single_interrupt(
void
);
92
void
generic_smp_call_function_interrupt(
void
);
93
#else
94
static
inline
void
call_function_init(
void
) { }
95
#endif
96
97
/*
98
* Call a function on all processors
99
*/
100
int
on_each_cpu
(
smp_call_func_t
func
,
void
*
info
,
int
wait
);
101
102
/*
103
* Call a function on processors specified by mask, which might include
104
* the local one.
105
*/
106
void
on_each_cpu_mask
(
const
struct
cpumask
*
mask
,
smp_call_func_t
func
,
107
void
*
info
,
bool
wait
);
108
109
/*
110
* Call a function on each processor for which the supplied function
111
* cond_func returns a positive value. This may include the local
112
* processor.
113
*/
114
void
on_each_cpu_cond
(
bool
(*cond_func)(
int
cpu
,
void
*
info
),
115
smp_call_func_t
func
,
void
*info,
bool
wait
,
116
gfp_t
gfp_flags);
117
118
/*
119
* Mark the boot cpu "online" so that it can call console drivers in
120
* printk() and can access its per-cpu storage.
121
*/
122
void
smp_prepare_boot_cpu
(
void
);
123
124
extern
unsigned
int
setup_max_cpus
;
125
extern
void
__init
setup_nr_cpu_ids
(
void
);
126
extern
void
__init
smp_init
(
void
);
127
128
#else
/* !SMP */
129
130
static
inline
void
smp_send_stop
(
void
) { }
131
132
/*
133
* These macros fold the SMP functionality into a single CPU system
134
*/
135
#define raw_smp_processor_id() 0
136
static
inline
int
up_smp_call_function(
smp_call_func_t
func
,
void
*
info
)
137
{
138
return
0;
139
}
140
#define smp_call_function(func, info, wait) \
141
(up_smp_call_function(func, info))
142
#define on_each_cpu(func,info,wait) \
143
({ \
144
local_irq_disable(); \
145
func(info); \
146
local_irq_enable(); \
147
0; \
148
})
149
/*
150
* Note we still need to test the mask even for UP
151
* because we actually can get an empty mask from
152
* code that on SMP might call us without the local
153
* CPU in the mask.
154
*/
155
#define on_each_cpu_mask(mask, func, info, wait) \
156
do { \
157
if (cpumask_test_cpu(0, (mask))) { \
158
local_irq_disable(); \
159
(func)(info); \
160
local_irq_enable(); \
161
} \
162
} while (0)
163
/*
164
* Preemption is disabled here to make sure the cond_func is called under the
165
* same condtions in UP and SMP.
166
*/
167
#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
168
do { \
169
void *__info = (info); \
170
preempt_disable(); \
171
if ((cond_func)(0, __info)) { \
172
local_irq_disable(); \
173
(func)(__info); \
174
local_irq_enable(); \
175
} \
176
preempt_enable(); \
177
} while (0)
178
179
static
inline
void
smp_send_reschedule
(
int
cpu
) { }
180
#define smp_prepare_boot_cpu() do {} while (0)
181
#define smp_call_function_many(mask, func, info, wait) \
182
(up_smp_call_function(func, info))
183
static
inline
void
call_function_init(
void
) { }
184
185
static
inline
int
186
smp_call_function_any(
const
struct
cpumask
*
mask
,
smp_call_func_t
func
,
187
void
*
info
,
int
wait
)
188
{
189
return
smp_call_function_single
(0, func, info, wait);
190
}
191
192
static
inline
void
kick_all_cpus_sync
(
void
) { }
193
194
#endif
/* !SMP */
195
196
/*
197
* smp_processor_id(): get the current CPU ID.
198
*
199
* if DEBUG_PREEMPT is enabled then we check whether it is
200
* used in a preemption-safe way. (smp_processor_id() is safe
201
* if it's used in a preemption-off critical section, or in
202
* a thread that is bound to the current CPU.)
203
*
204
* NOTE: raw_smp_processor_id() is for internal use only
205
* (smp_processor_id() is the preferred variant), but in rare
206
* instances it might also be used to turn off false positives
207
* (i.e. smp_processor_id() use that the debugging code reports but
208
* which use for some reason is legal). Don't use this to hack around
209
* the warning message, as your code might not work under PREEMPT.
210
*/
211
#ifdef CONFIG_DEBUG_PREEMPT
212
extern
unsigned
int
debug_smp_processor_id
(
void
);
213
# define smp_processor_id() debug_smp_processor_id()
214
#else
215
# define smp_processor_id() raw_smp_processor_id()
216
#endif
217
218
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
219
#define put_cpu() preempt_enable()
220
221
/*
222
* Callback to arch code if there's nosmp or maxcpus=0 on the
223
* boot command line:
224
*/
225
extern
void
arch_disable_smp_support
(
void
);
226
227
void
smp_setup_processor_id
(
void
);
228
229
#endif
/* __LINUX_SMP_H */
Generated on Thu Jan 10 2013 12:50:33 for Linux Kernel by
1.8.2