@@ -45,8 +45,8 @@ int hv_init(void)
45
45
* This involves a hypercall.
46
46
*/
47
47
int hv_post_message (union hv_connection_id connection_id ,
48
- enum hv_message_type message_type ,
49
- void * payload , size_t payload_size )
48
+ enum hv_message_type message_type ,
49
+ void * payload , size_t payload_size )
50
50
{
51
51
struct hv_input_post_message * aligned_msg ;
52
52
unsigned long flags ;
@@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
86
86
status = HV_STATUS_INVALID_PARAMETER ;
87
87
} else {
88
88
status = hv_do_hypercall (HVCALL_POST_MESSAGE ,
89
- aligned_msg , NULL );
89
+ aligned_msg , NULL );
90
90
}
91
91
92
92
local_irq_restore (flags );
@@ -111,7 +111,7 @@ int hv_synic_alloc(void)
111
111
112
112
hv_context .hv_numa_map = kcalloc (nr_node_ids , sizeof (struct cpumask ),
113
113
GFP_KERNEL );
114
- if (hv_context .hv_numa_map == NULL ) {
114
+ if (! hv_context .hv_numa_map ) {
115
115
pr_err ("Unable to allocate NUMA map\n" );
116
116
goto err ;
117
117
}
@@ -120,11 +120,11 @@ int hv_synic_alloc(void)
120
120
hv_cpu = per_cpu_ptr (hv_context .cpu_context , cpu );
121
121
122
122
tasklet_init (& hv_cpu -> msg_dpc ,
123
- vmbus_on_msg_dpc , (unsigned long ) hv_cpu );
123
+ vmbus_on_msg_dpc , (unsigned long )hv_cpu );
124
124
125
125
if (ms_hyperv .paravisor_present && hv_isolation_type_tdx ()) {
126
126
hv_cpu -> post_msg_page = (void * )get_zeroed_page (GFP_ATOMIC );
127
- if (hv_cpu -> post_msg_page == NULL ) {
127
+ if (! hv_cpu -> post_msg_page ) {
128
128
pr_err ("Unable to allocate post msg page\n" );
129
129
goto err ;
130
130
}
@@ -147,14 +147,14 @@ int hv_synic_alloc(void)
147
147
if (!ms_hyperv .paravisor_present && !hv_root_partition ) {
148
148
hv_cpu -> synic_message_page =
149
149
(void * )get_zeroed_page (GFP_ATOMIC );
150
- if (hv_cpu -> synic_message_page == NULL ) {
150
+ if (! hv_cpu -> synic_message_page ) {
151
151
pr_err ("Unable to allocate SYNIC message page\n" );
152
152
goto err ;
153
153
}
154
154
155
155
hv_cpu -> synic_event_page =
156
156
(void * )get_zeroed_page (GFP_ATOMIC );
157
- if (hv_cpu -> synic_event_page == NULL ) {
157
+ if (! hv_cpu -> synic_event_page ) {
158
158
pr_err ("Unable to allocate SYNIC event page\n" );
159
159
160
160
free_page ((unsigned long )hv_cpu -> synic_message_page );
@@ -203,14 +203,13 @@ int hv_synic_alloc(void)
203
203
return ret ;
204
204
}
205
205
206
-
207
206
void hv_synic_free (void )
208
207
{
209
208
int cpu , ret ;
210
209
211
210
for_each_present_cpu (cpu ) {
212
- struct hv_per_cpu_context * hv_cpu
213
- = per_cpu_ptr (hv_context .cpu_context , cpu );
211
+ struct hv_per_cpu_context * hv_cpu =
212
+ per_cpu_ptr (hv_context .cpu_context , cpu );
214
213
215
214
/* It's better to leak the page if the encryption fails. */
216
215
if (ms_hyperv .paravisor_present && hv_isolation_type_tdx ()) {
@@ -262,8 +261,8 @@ void hv_synic_free(void)
262
261
*/
263
262
void hv_synic_enable_regs (unsigned int cpu )
264
263
{
265
- struct hv_per_cpu_context * hv_cpu
266
- = per_cpu_ptr (hv_context .cpu_context , cpu );
264
+ struct hv_per_cpu_context * hv_cpu =
265
+ per_cpu_ptr (hv_context .cpu_context , cpu );
267
266
union hv_synic_simp simp ;
268
267
union hv_synic_siefp siefp ;
269
268
union hv_synic_sint shared_sint ;
@@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
277
276
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
278
277
u64 base = (simp .base_simp_gpa << HV_HYP_PAGE_SHIFT ) &
279
278
~ms_hyperv .shared_gpa_boundary ;
280
- hv_cpu -> synic_message_page
281
- = (void * )ioremap_cache (base , HV_HYP_PAGE_SIZE );
279
+ hv_cpu -> synic_message_page =
280
+ (void * )ioremap_cache (base , HV_HYP_PAGE_SIZE );
282
281
if (!hv_cpu -> synic_message_page )
283
282
pr_err ("Fail to map synic message page.\n" );
284
283
} else {
@@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
296
295
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
297
296
u64 base = (siefp .base_siefp_gpa << HV_HYP_PAGE_SHIFT ) &
298
297
~ms_hyperv .shared_gpa_boundary ;
299
- hv_cpu -> synic_event_page
300
- = (void * )ioremap_cache (base , HV_HYP_PAGE_SIZE );
298
+ hv_cpu -> synic_event_page =
299
+ (void * )ioremap_cache (base , HV_HYP_PAGE_SIZE );
301
300
if (!hv_cpu -> synic_event_page )
302
301
pr_err ("Fail to map synic event page.\n" );
303
302
} else {
@@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
348
347
*/
349
348
void hv_synic_disable_regs (unsigned int cpu )
350
349
{
351
- struct hv_per_cpu_context * hv_cpu
352
- = per_cpu_ptr (hv_context .cpu_context , cpu );
350
+ struct hv_per_cpu_context * hv_cpu =
351
+ per_cpu_ptr (hv_context .cpu_context , cpu );
353
352
union hv_synic_sint shared_sint ;
354
353
union hv_synic_simp simp ;
355
354
union hv_synic_siefp siefp ;
0 commit comments