Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
kernel
trace
trace_clock.c
Go to the documentation of this file.
1
/*
2
* tracing clocks
3
*
4
* Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <
[email protected]
>
5
*
6
* Implements 3 trace clock variants, with differing scalability/precision
7
* tradeoffs:
8
*
9
* - local: CPU-local trace clock
10
* - medium: scalable global clock with some jitter
11
* - global: globally monotonic, serialized clock
12
*
13
* Tracer plugins will chose a default from these clocks.
14
*/
15
#include <
linux/spinlock.h
>
16
#include <
linux/irqflags.h
>
17
#include <
linux/hardirq.h
>
18
#include <linux/module.h>
19
#include <
linux/percpu.h
>
20
#include <linux/sched.h>
21
#include <
linux/ktime.h
>
22
#include <
linux/trace_clock.h
>
23
24
#include "
trace.h
"
25
26
/*
27
* trace_clock_local(): the simplest and least coherent tracing clock.
28
*
29
* Useful for tracing that does not cross to other CPUs nor
30
* does it go through idle events.
31
*/
32
u64
notrace
trace_clock_local
(
void
)
33
{
34
u64
clock
;
35
36
/*
37
* sched_clock() is an architecture implemented, fast, scalable,
38
* lockless clock. It is not guaranteed to be coherent across
39
* CPUs, nor across CPU idle events.
40
*/
41
preempt_disable_notrace
();
42
clock =
sched_clock
();
43
preempt_enable_notrace
();
44
45
return
clock
;
46
}
47
48
/*
49
* trace_clock(): 'between' trace clock. Not completely serialized,
50
* but not completely incorrect when crossing CPUs either.
51
*
52
* This is based on cpu_clock(), which will allow at most ~1 jiffy of
53
* jitter between CPUs. So it's a pretty scalable clock, but there
54
* can be offsets in the trace data.
55
*/
56
u64
notrace
trace_clock
(
void
)
57
{
58
return
local_clock
();
59
}
60
61
62
/*
63
* trace_clock_global(): special globally coherent trace clock
64
*
65
* It has higher overhead than the other trace clocks but is still
66
* an order of magnitude faster than GTOD derived hardware clocks.
67
*
68
* Used by plugins that need globally coherent timestamps.
69
*/
70
71
/* keep prev_time and lock in the same cacheline. */
72
static
struct
{
73
u64
prev_time
;
74
arch_spinlock_t
lock
;
75
} trace_clock_struct
____cacheline_aligned_in_smp
=
76
{
77
.lock = (
arch_spinlock_t
)
__ARCH_SPIN_LOCK_UNLOCKED
,
78
};
79
80
u64
notrace
trace_clock_global
(
void
)
81
{
82
unsigned
long
flags
;
83
int
this_cpu;
84
u64
now;
85
86
local_irq_save
(flags);
87
88
this_cpu =
raw_smp_processor_id
();
89
now =
cpu_clock
(this_cpu);
90
/*
91
* If in an NMI context then dont risk lockups and return the
92
* cpu_clock() time:
93
*/
94
if
(
unlikely
(
in_nmi
()))
95
goto
out
;
96
97
arch_spin_lock
(&trace_clock_struct.lock);
98
99
/*
100
* TODO: if this happens often then maybe we should reset
101
* my_scd->clock to prev_time+1, to make sure
102
* we start ticking with the local clock from now on?
103
*/
104
if
((
s64
)(now - trace_clock_struct.prev_time) < 0)
105
now = trace_clock_struct.prev_time + 1;
106
107
trace_clock_struct.prev_time = now;
108
109
arch_spin_unlock
(&trace_clock_struct.lock);
110
111
out
:
112
local_irq_restore
(flags);
113
114
return
now;
115
}
116
117
static
atomic64_t
trace_counter;
118
119
/*
120
* trace_clock_counter(): simply an atomic counter.
121
* Use the trace_counter "counter" for cases where you do not care
122
* about timings, but are interested in strict ordering.
123
*/
124
u64
notrace
trace_clock_counter
(
void
)
125
{
126
return
atomic64_add_return
(1, &trace_counter);
127
}
Generated on Thu Jan 10 2013 14:55:10 for Linux Kernel by
1.8.2