@@ -51,13 +51,41 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
51
51
sizeof (kvm_vcpu_stats_desc ),
52
52
};
53
53
54
- static void kvm_riscv_reset_vcpu (struct kvm_vcpu * vcpu )
54
+ static void kvm_riscv_vcpu_context_reset (struct kvm_vcpu * vcpu )
55
55
{
56
56
struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
57
- struct kvm_vcpu_csr * reset_csr = & vcpu -> arch .guest_reset_csr ;
58
57
struct kvm_cpu_context * cntx = & vcpu -> arch .guest_context ;
59
- struct kvm_cpu_context * reset_cntx = & vcpu -> arch .guest_reset_context ;
58
+ struct kvm_vcpu_reset_state * reset_state = & vcpu -> arch .reset_state ;
60
59
void * vector_datap = cntx -> vector .datap ;
60
+
61
+ memset (cntx , 0 , sizeof (* cntx ));
62
+ memset (csr , 0 , sizeof (* csr ));
63
+ memset (& vcpu -> arch .smstateen_csr , 0 , sizeof (vcpu -> arch .smstateen_csr ));
64
+
65
+ /* Restore datap as it's not a part of the guest context. */
66
+ cntx -> vector .datap = vector_datap ;
67
+
68
+ /* Load SBI reset values */
69
+ cntx -> a0 = vcpu -> vcpu_id ;
70
+
71
+ spin_lock (& reset_state -> lock );
72
+ cntx -> sepc = reset_state -> pc ;
73
+ cntx -> a1 = reset_state -> a1 ;
74
+ spin_unlock (& reset_state -> lock );
75
+
76
+ /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
77
+ cntx -> sstatus = SR_SPP | SR_SPIE ;
78
+
79
+ cntx -> hstatus |= HSTATUS_VTW ;
80
+ cntx -> hstatus |= HSTATUS_SPVP ;
81
+ cntx -> hstatus |= HSTATUS_SPV ;
82
+
83
+ /* By default, make CY, TM, and IR counters accessible in VU mode */
84
+ csr -> scounteren = 0x7 ;
85
+ }
86
+
87
+ static void kvm_riscv_reset_vcpu (struct kvm_vcpu * vcpu )
88
+ {
61
89
bool loaded ;
62
90
63
91
/**
@@ -72,18 +100,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
72
100
73
101
vcpu -> arch .last_exit_cpu = -1 ;
74
102
75
- memcpy (csr , reset_csr , sizeof (* csr ));
76
-
77
- spin_lock (& vcpu -> arch .reset_cntx_lock );
78
- memcpy (cntx , reset_cntx , sizeof (* cntx ));
79
- spin_unlock (& vcpu -> arch .reset_cntx_lock );
80
-
81
- memset (& vcpu -> arch .smstateen_csr , 0 , sizeof (vcpu -> arch .smstateen_csr ));
103
+ kvm_riscv_vcpu_context_reset (vcpu );
82
104
83
105
kvm_riscv_vcpu_fp_reset (vcpu );
84
106
85
- /* Restore datap as it's not a part of the guest context. */
86
- cntx -> vector .datap = vector_datap ;
87
107
kvm_riscv_vcpu_vector_reset (vcpu );
88
108
89
109
kvm_riscv_vcpu_timer_reset (vcpu );
@@ -115,8 +135,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
115
135
int kvm_arch_vcpu_create (struct kvm_vcpu * vcpu )
116
136
{
117
137
int rc ;
118
- struct kvm_cpu_context * cntx ;
119
- struct kvm_vcpu_csr * reset_csr = & vcpu -> arch .guest_reset_csr ;
120
138
121
139
spin_lock_init (& vcpu -> arch .mp_state_lock );
122
140
@@ -136,24 +154,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
136
154
/* Setup VCPU hfence queue */
137
155
spin_lock_init (& vcpu -> arch .hfence_lock );
138
156
139
- /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
140
- spin_lock_init (& vcpu -> arch .reset_cntx_lock );
141
-
142
- spin_lock (& vcpu -> arch .reset_cntx_lock );
143
- cntx = & vcpu -> arch .guest_reset_context ;
144
- cntx -> sstatus = SR_SPP | SR_SPIE ;
145
- cntx -> hstatus = 0 ;
146
- cntx -> hstatus |= HSTATUS_VTW ;
147
- cntx -> hstatus |= HSTATUS_SPVP ;
148
- cntx -> hstatus |= HSTATUS_SPV ;
149
- spin_unlock (& vcpu -> arch .reset_cntx_lock );
157
+ spin_lock_init (& vcpu -> arch .reset_state .lock );
150
158
151
159
if (kvm_riscv_vcpu_alloc_vector_context (vcpu ))
152
160
return - ENOMEM ;
153
161
154
- /* By default, make CY, TM, and IR counters accessible in VU mode */
155
- reset_csr -> scounteren = 0x7 ;
156
-
157
162
/* Setup VCPU timer */
158
163
kvm_riscv_vcpu_timer_init (vcpu );
159
164
0 commit comments