@@ -190,7 +190,7 @@ static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
190
190
}
191
191
/* Create the list of IDAL words for a page_array. */
192
192
static inline void page_array_idal_create_words (struct page_array * pa ,
193
- unsigned long * idaws )
193
+ dma64_t * idaws )
194
194
{
195
195
int i ;
196
196
@@ -203,10 +203,10 @@ static inline void page_array_idal_create_words(struct page_array *pa,
203
203
*/
204
204
205
205
for (i = 0 ; i < pa -> pa_nr ; i ++ ) {
206
- idaws [i ] = page_to_phys ( pa -> pa_page [i ]);
206
+ idaws [i ] = virt_to_dma64 ( page_to_virt ( pa -> pa_page [i ]) );
207
207
208
208
/* Incorporate any offset from each starting address */
209
- idaws [i ] += pa -> pa_iova [i ] & ( PAGE_SIZE - 1 );
209
+ idaws [i ] = dma64_add ( idaws [ i ], pa -> pa_iova [i ] & ~ PAGE_MASK );
210
210
}
211
211
}
212
212
@@ -227,7 +227,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
227
227
pccw1 -> flags = ccw0 .flags ;
228
228
pccw1 -> count = ccw0 .count ;
229
229
}
230
- pccw1 -> cda = ccw0 .cda ;
230
+ pccw1 -> cda = u32_to_dma32 ( ccw0 .cda ) ;
231
231
pccw1 ++ ;
232
232
}
233
233
}
@@ -299,11 +299,12 @@ static inline int ccw_does_data_transfer(struct ccw1 *ccw)
299
299
*
300
300
* Returns 1 if yes, 0 if no.
301
301
*/
302
- static inline int is_cpa_within_range (u32 cpa , u32 head , int len )
302
+ static inline int is_cpa_within_range (dma32_t cpa , u32 head , int len )
303
303
{
304
304
u32 tail = head + (len - 1 ) * sizeof (struct ccw1 );
305
+ u32 gcpa = dma32_to_u32 (cpa );
305
306
306
- return ( head <= cpa && cpa <= tail ) ;
307
+ return head <= gcpa && gcpa <= tail ;
307
308
}
308
309
309
310
static inline int is_tic_within_range (struct ccw1 * ccw , u32 head , int len )
@@ -356,7 +357,7 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
356
357
if (ccw_is_tic (ccw ))
357
358
return ;
358
359
359
- kfree (phys_to_virt (ccw -> cda ));
360
+ kfree (dma32_to_virt (ccw -> cda ));
360
361
}
361
362
362
363
/**
@@ -417,15 +418,17 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
417
418
static int ccwchain_loop_tic (struct ccwchain * chain ,
418
419
struct channel_program * cp );
419
420
420
- static int ccwchain_handle_ccw (u32 cda , struct channel_program * cp )
421
+ static int ccwchain_handle_ccw (dma32_t cda , struct channel_program * cp )
421
422
{
422
423
struct vfio_device * vdev =
423
424
& container_of (cp , struct vfio_ccw_private , cp )-> vdev ;
424
425
struct ccwchain * chain ;
425
426
int len , ret ;
427
+ u32 gcda ;
426
428
429
+ gcda = dma32_to_u32 (cda );
427
430
/* Copy 2K (the most we support today) of possible CCWs */
428
- ret = vfio_dma_rw (vdev , cda , cp -> guest_cp , CCWCHAIN_LEN_MAX * sizeof (struct ccw1 ), false);
431
+ ret = vfio_dma_rw (vdev , gcda , cp -> guest_cp , CCWCHAIN_LEN_MAX * sizeof (struct ccw1 ), false);
429
432
if (ret )
430
433
return ret ;
431
434
@@ -434,7 +437,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
434
437
convert_ccw0_to_ccw1 (cp -> guest_cp , CCWCHAIN_LEN_MAX );
435
438
436
439
/* Count the CCWs in the current chain */
437
- len = ccwchain_calc_length (cda , cp );
440
+ len = ccwchain_calc_length (gcda , cp );
438
441
if (len < 0 )
439
442
return len ;
440
443
@@ -444,7 +447,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
444
447
return - ENOMEM ;
445
448
446
449
chain -> ch_len = len ;
447
- chain -> ch_iova = cda ;
450
+ chain -> ch_iova = gcda ;
448
451
449
452
/* Copy the actual CCWs into the new chain */
450
453
memcpy (chain -> ch_ccw , cp -> guest_cp , len * sizeof (struct ccw1 ));
@@ -487,28 +490,26 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
487
490
struct channel_program * cp )
488
491
{
489
492
struct ccwchain * iter ;
490
- u32 ccw_head ;
493
+ u32 cda , ccw_head ;
491
494
492
495
list_for_each_entry (iter , & cp -> ccwchain_list , next ) {
493
496
ccw_head = iter -> ch_iova ;
494
497
if (is_cpa_within_range (ccw -> cda , ccw_head , iter -> ch_len )) {
495
- ccw -> cda = (__u32 ) ( addr_t ) ((( char * ) iter -> ch_ccw ) +
496
- ( ccw -> cda - ccw_head ) );
498
+ cda = (u64 ) iter -> ch_ccw + dma32_to_u32 ( ccw -> cda ) - ccw_head ;
499
+ ccw -> cda = u32_to_dma32 ( cda );
497
500
return 0 ;
498
501
}
499
502
}
500
503
501
504
return - EFAULT ;
502
505
}
503
506
504
- static unsigned long * get_guest_idal (struct ccw1 * ccw ,
505
- struct channel_program * cp ,
506
- int idaw_nr )
507
+ static dma64_t * get_guest_idal (struct ccw1 * ccw , struct channel_program * cp , int idaw_nr )
507
508
{
508
509
struct vfio_device * vdev =
509
510
& container_of (cp , struct vfio_ccw_private , cp )-> vdev ;
510
- unsigned long * idaws ;
511
- unsigned int * idaws_f1 ;
511
+ dma64_t * idaws ;
512
+ dma32_t * idaws_f1 ;
512
513
int idal_len = idaw_nr * sizeof (* idaws );
513
514
int idaw_size = idal_is_2k (cp ) ? PAGE_SIZE / 2 : PAGE_SIZE ;
514
515
int idaw_mask = ~(idaw_size - 1 );
@@ -520,22 +521,26 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
520
521
521
522
if (ccw_is_idal (ccw )) {
522
523
/* Copy IDAL from guest */
523
- ret = vfio_dma_rw (vdev , ccw -> cda , idaws , idal_len , false);
524
+ ret = vfio_dma_rw (vdev , dma32_to_u32 ( ccw -> cda ) , idaws , idal_len , false);
524
525
if (ret ) {
525
526
kfree (idaws );
526
527
return ERR_PTR (ret );
527
528
}
528
529
} else {
529
530
/* Fabricate an IDAL based off CCW data address */
530
531
if (cp -> orb .cmd .c64 ) {
531
- idaws [0 ] = ccw -> cda ;
532
- for (i = 1 ; i < idaw_nr ; i ++ )
533
- idaws [i ] = (idaws [i - 1 ] + idaw_size ) & idaw_mask ;
532
+ idaws [0 ] = u64_to_dma64 (dma32_to_u32 (ccw -> cda ));
533
+ for (i = 1 ; i < idaw_nr ; i ++ ) {
534
+ idaws [i ] = dma64_add (idaws [i - 1 ], idaw_size );
535
+ idaws [i ] = dma64_and (idaws [i ], idaw_mask );
536
+ }
534
537
} else {
535
- idaws_f1 = (unsigned int * )idaws ;
538
+ idaws_f1 = (dma32_t * )idaws ;
536
539
idaws_f1 [0 ] = ccw -> cda ;
537
- for (i = 1 ; i < idaw_nr ; i ++ )
538
- idaws_f1 [i ] = (idaws_f1 [i - 1 ] + idaw_size ) & idaw_mask ;
540
+ for (i = 1 ; i < idaw_nr ; i ++ ) {
541
+ idaws_f1 [i ] = dma32_add (idaws_f1 [i - 1 ], idaw_size );
542
+ idaws_f1 [i ] = dma32_and (idaws_f1 [i ], idaw_mask );
543
+ }
539
544
}
540
545
}
541
546
@@ -572,7 +577,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
572
577
if (ccw_is_idal (ccw )) {
573
578
/* Read first IDAW to check its starting address. */
574
579
/* All subsequent IDAWs will be 2K- or 4K-aligned. */
575
- ret = vfio_dma_rw (vdev , ccw -> cda , & iova , size , false);
580
+ ret = vfio_dma_rw (vdev , dma32_to_u32 ( ccw -> cda ) , & iova , size , false);
576
581
if (ret )
577
582
return ret ;
578
583
@@ -583,7 +588,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
583
588
if (!cp -> orb .cmd .c64 )
584
589
iova = iova >> 32 ;
585
590
} else {
586
- iova = ccw -> cda ;
591
+ iova = dma32_to_u32 ( ccw -> cda ) ;
587
592
}
588
593
589
594
/* Format-1 IDAWs operate on 2K each */
@@ -604,8 +609,8 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
604
609
{
605
610
struct vfio_device * vdev =
606
611
& container_of (cp , struct vfio_ccw_private , cp )-> vdev ;
607
- unsigned long * idaws ;
608
- unsigned int * idaws_f1 ;
612
+ dma64_t * idaws ;
613
+ dma32_t * idaws_f1 ;
609
614
int ret ;
610
615
int idaw_nr ;
611
616
int i ;
@@ -636,12 +641,12 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
636
641
* Copy guest IDAWs into page_array, in case the memory they
637
642
* occupy is not contiguous.
638
643
*/
639
- idaws_f1 = (unsigned int * )idaws ;
644
+ idaws_f1 = (dma32_t * )idaws ;
640
645
for (i = 0 ; i < idaw_nr ; i ++ ) {
641
646
if (cp -> orb .cmd .c64 )
642
- pa -> pa_iova [i ] = idaws [i ];
647
+ pa -> pa_iova [i ] = dma64_to_u64 ( idaws [i ]) ;
643
648
else
644
- pa -> pa_iova [i ] = idaws_f1 [i ];
649
+ pa -> pa_iova [i ] = dma32_to_u32 ( idaws_f1 [i ]) ;
645
650
}
646
651
647
652
if (ccw_does_data_transfer (ccw )) {
@@ -652,7 +657,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
652
657
pa -> pa_nr = 0 ;
653
658
}
654
659
655
- ccw -> cda = ( __u32 ) virt_to_phys (idaws );
660
+ ccw -> cda = virt_to_dma32 (idaws );
656
661
ccw -> flags |= CCW_FLAG_IDA ;
657
662
658
663
/* Populate the IDAL with pinned/translated addresses from page */
@@ -874,7 +879,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
874
879
875
880
chain = list_first_entry (& cp -> ccwchain_list , struct ccwchain , next );
876
881
cpa = chain -> ch_ccw ;
877
- orb -> cmd .cpa = ( __u32 ) virt_to_phys (cpa );
882
+ orb -> cmd .cpa = virt_to_dma32 (cpa );
878
883
879
884
return orb ;
880
885
}
@@ -896,7 +901,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
896
901
void cp_update_scsw (struct channel_program * cp , union scsw * scsw )
897
902
{
898
903
struct ccwchain * chain ;
899
- u32 cpa = scsw -> cmd .cpa ;
904
+ dma32_t cpa = scsw -> cmd .cpa ;
900
905
u32 ccw_head ;
901
906
902
907
if (!cp -> initialized )
@@ -919,9 +924,10 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
919
924
* (cpa - ccw_head) is the offset value of the host
920
925
* physical ccw to its chain head.
921
926
* Adding this value to the guest physical ccw chain
922
- * head gets us the guest cpa.
927
+ * head gets us the guest cpa:
928
+ * cpa = chain->ch_iova + (cpa - ccw_head)
923
929
*/
924
- cpa = chain -> ch_iova + ( cpa - ccw_head );
930
+ cpa = dma32_add ( cpa , chain -> ch_iova - ccw_head );
925
931
break ;
926
932
}
927
933
}
0 commit comments