@@ -2269,9 +2269,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2269
2269
*/
2270
2270
static int kvm_handle_cp_64 (struct kvm_vcpu * vcpu ,
2271
2271
const struct sys_reg_desc * global ,
2272
- size_t nr_global ,
2273
- const struct sys_reg_desc * target_specific ,
2274
- size_t nr_specific )
2272
+ size_t nr_global )
2275
2273
{
2276
2274
struct sys_reg_params params ;
2277
2275
u32 hsr = kvm_vcpu_get_hsr (vcpu );
@@ -2298,14 +2296,11 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2298
2296
}
2299
2297
2300
2298
/*
2301
- * Try to emulate the coprocessor access using the target
2302
- * specific table first, and using the global table afterwards.
2303
- * If either of the tables contains a handler, handle the
2299
+ * If the table contains a handler, handle the
2304
2300
* potential register operation in the case of a read and return
2305
2301
* with success.
2306
2302
*/
2307
- if (!emulate_cp (vcpu , & params , target_specific , nr_specific ) ||
2308
- !emulate_cp (vcpu , & params , global , nr_global )) {
2303
+ if (!emulate_cp (vcpu , & params , global , nr_global )) {
2309
2304
/* Split up the value between registers for the read side */
2310
2305
if (!params .is_write ) {
2311
2306
vcpu_set_reg (vcpu , Rt , lower_32_bits (params .regval ));
@@ -2326,9 +2321,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2326
2321
*/
2327
2322
static int kvm_handle_cp_32 (struct kvm_vcpu * vcpu ,
2328
2323
const struct sys_reg_desc * global ,
2329
- size_t nr_global ,
2330
- const struct sys_reg_desc * target_specific ,
2331
- size_t nr_specific )
2324
+ size_t nr_global )
2332
2325
{
2333
2326
struct sys_reg_params params ;
2334
2327
u32 hsr = kvm_vcpu_get_hsr (vcpu );
@@ -2344,8 +2337,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2344
2337
params .Op1 = (hsr >> 14 ) & 0x7 ;
2345
2338
params .Op2 = (hsr >> 17 ) & 0x7 ;
2346
2339
2347
- if (!emulate_cp (vcpu , & params , target_specific , nr_specific ) ||
2348
- !emulate_cp (vcpu , & params , global , nr_global )) {
2340
+ if (!emulate_cp (vcpu , & params , global , nr_global )) {
2349
2341
if (!params .is_write )
2350
2342
vcpu_set_reg (vcpu , Rt , params .regval );
2351
2343
return 1 ;
@@ -2357,38 +2349,22 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2357
2349
2358
2350
int kvm_handle_cp15_64 (struct kvm_vcpu * vcpu , struct kvm_run * run )
2359
2351
{
2360
- const struct sys_reg_desc * target_specific ;
2361
- size_t num ;
2362
-
2363
- target_specific = get_target_table (vcpu -> arch .target , false, & num );
2364
- return kvm_handle_cp_64 (vcpu ,
2365
- cp15_64_regs , ARRAY_SIZE (cp15_64_regs ),
2366
- target_specific , num );
2352
+ return kvm_handle_cp_64 (vcpu , cp15_64_regs , ARRAY_SIZE (cp15_64_regs ));
2367
2353
}
2368
2354
2369
2355
int kvm_handle_cp15_32 (struct kvm_vcpu * vcpu , struct kvm_run * run )
2370
2356
{
2371
- const struct sys_reg_desc * target_specific ;
2372
- size_t num ;
2373
-
2374
- target_specific = get_target_table (vcpu -> arch .target , false, & num );
2375
- return kvm_handle_cp_32 (vcpu ,
2376
- cp15_regs , ARRAY_SIZE (cp15_regs ),
2377
- target_specific , num );
2357
+ return kvm_handle_cp_32 (vcpu , cp15_regs , ARRAY_SIZE (cp15_regs ));
2378
2358
}
2379
2359
2380
2360
int kvm_handle_cp14_64 (struct kvm_vcpu * vcpu , struct kvm_run * run )
2381
2361
{
2382
- return kvm_handle_cp_64 (vcpu ,
2383
- cp14_64_regs , ARRAY_SIZE (cp14_64_regs ),
2384
- NULL , 0 );
2362
+ return kvm_handle_cp_64 (vcpu , cp14_64_regs , ARRAY_SIZE (cp14_64_regs ));
2385
2363
}
2386
2364
2387
2365
int kvm_handle_cp14_32 (struct kvm_vcpu * vcpu , struct kvm_run * run )
2388
2366
{
2389
- return kvm_handle_cp_32 (vcpu ,
2390
- cp14_regs , ARRAY_SIZE (cp14_regs ),
2391
- NULL , 0 );
2367
+ return kvm_handle_cp_32 (vcpu , cp14_regs , ARRAY_SIZE (cp14_regs ));
2392
2368
}
2393
2369
2394
2370
static bool is_imp_def_sys_reg (struct sys_reg_params * params )
@@ -2400,15 +2376,9 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2400
2376
static int emulate_sys_reg (struct kvm_vcpu * vcpu ,
2401
2377
struct sys_reg_params * params )
2402
2378
{
2403
- size_t num ;
2404
- const struct sys_reg_desc * table , * r ;
2405
-
2406
- table = get_target_table (vcpu -> arch .target , true, & num );
2379
+ const struct sys_reg_desc * r ;
2407
2380
2408
- /* Search target-specific then generic table. */
2409
- r = find_reg (params , table , num );
2410
- if (!r )
2411
- r = find_reg (params , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
2381
+ r = find_reg (params , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
2412
2382
2413
2383
if (likely (r )) {
2414
2384
perform_access (vcpu , params , r );
@@ -2512,8 +2482,7 @@ const struct sys_reg_desc *find_reg_by_id(u64 id,
2512
2482
static const struct sys_reg_desc * index_to_sys_reg_desc (struct kvm_vcpu * vcpu ,
2513
2483
u64 id )
2514
2484
{
2515
- size_t num ;
2516
- const struct sys_reg_desc * table , * r ;
2485
+ const struct sys_reg_desc * r ;
2517
2486
struct sys_reg_params params ;
2518
2487
2519
2488
/* We only do sys_reg for now. */
@@ -2523,10 +2492,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2523
2492
if (!index_to_params (id , & params ))
2524
2493
return NULL ;
2525
2494
2526
- table = get_target_table (vcpu -> arch .target , true, & num );
2527
- r = find_reg (& params , table , num );
2528
- if (!r )
2529
- r = find_reg (& params , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
2495
+ r = find_reg (& params , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
2530
2496
2531
2497
/* Not saved in the sys_reg array and not otherwise accessible? */
2532
2498
if (r && !(r -> reg || r -> get_user ))
@@ -2826,38 +2792,17 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2826
2792
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
2827
2793
static int walk_sys_regs (struct kvm_vcpu * vcpu , u64 __user * uind )
2828
2794
{
2829
- const struct sys_reg_desc * i1 , * i2 , * end1 , * end2 ;
2795
+ const struct sys_reg_desc * i2 , * end2 ;
2830
2796
unsigned int total = 0 ;
2831
- size_t num ;
2832
2797
int err ;
2833
2798
2834
- /* We check for duplicates here, to allow arch-specific overrides. */
2835
- i1 = get_target_table (vcpu -> arch .target , true, & num );
2836
- end1 = i1 + num ;
2837
2799
i2 = sys_reg_descs ;
2838
2800
end2 = sys_reg_descs + ARRAY_SIZE (sys_reg_descs );
2839
2801
2840
- if (i1 == end1 )
2841
- i1 = NULL ;
2842
-
2843
- BUG_ON (i2 == end2 );
2844
-
2845
- /* Walk carefully, as both tables may refer to the same register. */
2846
- while (i1 || i2 ) {
2847
- int cmp = cmp_sys_reg (i1 , i2 );
2848
- /* target-specific overrides generic entry. */
2849
- if (cmp <= 0 )
2850
- err = walk_one_sys_reg (vcpu , i1 , & uind , & total );
2851
- else
2852
- err = walk_one_sys_reg (vcpu , i2 , & uind , & total );
2853
-
2802
+ while (i2 != end2 ) {
2803
+ err = walk_one_sys_reg (vcpu , i2 ++ , & uind , & total );
2854
2804
if (err )
2855
2805
return err ;
2856
-
2857
- if (cmp <= 0 && ++ i1 == end1 )
2858
- i1 = NULL ;
2859
- if (cmp >= 0 && ++ i2 == end2 )
2860
- i2 = NULL ;
2861
2806
}
2862
2807
return total ;
2863
2808
}
0 commit comments