Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
x86
kvm
kvm_cache_regs.h
Go to the documentation of this file.
1
#ifndef ASM_KVM_CACHE_REGS_H
2
#define ASM_KVM_CACHE_REGS_H
3
4
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5
#define KVM_POSSIBLE_CR4_GUEST_BITS \
6
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8
9
static
inline
unsigned
long
kvm_register_read(
struct
kvm_vcpu
*vcpu,
10
enum
kvm_reg
reg
)
11
{
12
if
(!
test_bit
(reg, (
unsigned
long
*)&vcpu->
arch
.regs_avail))
13
kvm_x86_ops
->
cache_reg
(vcpu, reg);
14
15
return
vcpu->
arch
.regs[
reg
];
16
}
17
18
static
inline
void
kvm_register_write(
struct
kvm_vcpu
*vcpu,
19
enum
kvm_reg
reg
,
20
unsigned
long
val
)
21
{
22
vcpu->
arch
.regs[
reg
] =
val
;
23
__set_bit
(reg, (
unsigned
long
*)&vcpu->
arch
.regs_dirty);
24
__set_bit
(reg, (
unsigned
long
*)&vcpu->
arch
.regs_avail);
25
}
26
27
static
inline
unsigned
long
kvm_rip_read(
struct
kvm_vcpu
*vcpu)
28
{
29
return
kvm_register_read(vcpu,
VCPU_REGS_RIP
);
30
}
31
32
static
inline
void
kvm_rip_write(
struct
kvm_vcpu
*vcpu,
unsigned
long
val)
33
{
34
kvm_register_write(vcpu,
VCPU_REGS_RIP
, val);
35
}
36
37
static
inline
u64
kvm_pdptr_read(
struct
kvm_vcpu
*vcpu,
int
index
)
38
{
39
might_sleep
();
/* on svm */
40
41
if
(!
test_bit
(
VCPU_EXREG_PDPTR
,
42
(
unsigned
long
*)&vcpu->
arch
.regs_avail))
43
kvm_x86_ops
->
cache_reg
(vcpu,
VCPU_EXREG_PDPTR
);
44
45
return
vcpu->
arch
.walk_mmu->pdptrs[
index
];
46
}
47
48
static
inline
ulong
kvm_read_cr0_bits(
struct
kvm_vcpu
*vcpu,
ulong
mask
)
49
{
50
ulong
tmask = mask &
KVM_POSSIBLE_CR0_GUEST_BITS
;
51
if
(tmask & vcpu->
arch
.cr0_guest_owned_bits)
52
kvm_x86_ops
->
decache_cr0_guest_bits
(vcpu);
53
return
vcpu->
arch
.cr0 &
mask
;
54
}
55
56
static
inline
ulong
kvm_read_cr0(
struct
kvm_vcpu
*vcpu)
57
{
58
return
kvm_read_cr0_bits(vcpu, ~0
UL
);
59
}
60
61
static
inline
ulong
kvm_read_cr4_bits(
struct
kvm_vcpu
*vcpu,
ulong
mask)
62
{
63
ulong
tmask = mask &
KVM_POSSIBLE_CR4_GUEST_BITS
;
64
if
(tmask & vcpu->
arch
.cr4_guest_owned_bits)
65
kvm_x86_ops
->
decache_cr4_guest_bits
(vcpu);
66
return
vcpu->
arch
.cr4 &
mask
;
67
}
68
69
static
inline
ulong
kvm_read_cr3(
struct
kvm_vcpu
*vcpu)
70
{
71
if
(!
test_bit
(
VCPU_EXREG_CR3
, (
ulong
*)&vcpu->
arch
.regs_avail))
72
kvm_x86_ops
->
decache_cr3
(vcpu);
73
return
vcpu->
arch
.cr3;
74
}
75
76
static
inline
ulong
kvm_read_cr4(
struct
kvm_vcpu
*vcpu)
77
{
78
return
kvm_read_cr4_bits(vcpu, ~0
UL
);
79
}
80
81
static
inline
u64
kvm_read_edx_eax(
struct
kvm_vcpu
*vcpu)
82
{
83
return
(kvm_register_read(vcpu,
VCPU_REGS_RAX
) & -1
u
)
84
| ((
u64
)(kvm_register_read(vcpu,
VCPU_REGS_RDX
) & -1
u
) << 32);
85
}
86
87
static
inline
void
enter_guest_mode(
struct
kvm_vcpu
*vcpu)
88
{
89
vcpu->
arch
.hflags |=
HF_GUEST_MASK
;
90
}
91
92
static
inline
void
leave_guest_mode(
struct
kvm_vcpu
*vcpu)
93
{
94
vcpu->
arch
.hflags &= ~
HF_GUEST_MASK
;
95
}
96
97
static
inline
bool
is_guest_mode(
struct
kvm_vcpu
*vcpu)
98
{
99
return
vcpu->
arch
.hflags &
HF_GUEST_MASK
;
100
}
101
102
#endif
Generated on Thu Jan 10 2013 13:21:09 for Linux Kernel by
1.8.2