46
46
#define ARM_LPAE_PGD_SIZE (d ) \
47
47
(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48
48
49
+ #define ARM_LPAE_PTES_PER_TABLE (d ) \
50
+ (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
+
49
52
/*
50
53
* Calculate the index at level l used to map virtual address a using the
51
54
* pagetable in d.
@@ -239,22 +242,19 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
239
242
sizeof (* ptep ) * num_entries , DMA_TO_DEVICE );
240
243
}
241
244
242
- static void __arm_lpae_set_pte (arm_lpae_iopte * ptep , arm_lpae_iopte pte ,
243
- int num_entries , struct io_pgtable_cfg * cfg )
245
+ static void __arm_lpae_clear_pte (arm_lpae_iopte * ptep , struct io_pgtable_cfg * cfg )
244
246
{
245
- int i ;
246
247
247
- for (i = 0 ; i < num_entries ; i ++ )
248
- ptep [i ] = pte ;
248
+ * ptep = 0 ;
249
249
250
250
if (!cfg -> coherent_walk )
251
- __arm_lpae_sync_pte (ptep , num_entries , cfg );
251
+ __arm_lpae_sync_pte (ptep , 1 , cfg );
252
252
}
253
253
254
254
static size_t __arm_lpae_unmap (struct arm_lpae_io_pgtable * data ,
255
255
struct iommu_iotlb_gather * gather ,
256
- unsigned long iova , size_t size , int lvl ,
257
- arm_lpae_iopte * ptep );
256
+ unsigned long iova , size_t size , size_t pgcount ,
257
+ int lvl , arm_lpae_iopte * ptep );
258
258
259
259
static void __arm_lpae_init_pte (struct arm_lpae_io_pgtable * data ,
260
260
phys_addr_t paddr , arm_lpae_iopte prot ,
@@ -298,7 +298,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
298
298
size_t sz = ARM_LPAE_BLOCK_SIZE (lvl , data );
299
299
300
300
tblp = ptep - ARM_LPAE_LVL_IDX (iova , lvl , data );
301
- if (__arm_lpae_unmap (data , NULL , iova + i * sz , sz ,
301
+ if (__arm_lpae_unmap (data , NULL , iova + i * sz , sz , 1 ,
302
302
lvl , tblp ) != sz ) {
303
303
WARN_ON (1 );
304
304
return - EINVAL ;
@@ -526,14 +526,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
526
526
struct iommu_iotlb_gather * gather ,
527
527
unsigned long iova , size_t size ,
528
528
arm_lpae_iopte blk_pte , int lvl ,
529
- arm_lpae_iopte * ptep )
529
+ arm_lpae_iopte * ptep , size_t pgcount )
530
530
{
531
531
struct io_pgtable_cfg * cfg = & data -> iop .cfg ;
532
532
arm_lpae_iopte pte , * tablep ;
533
533
phys_addr_t blk_paddr ;
534
534
size_t tablesz = ARM_LPAE_GRANULE (data );
535
535
size_t split_sz = ARM_LPAE_BLOCK_SIZE (lvl , data );
536
- int i , unmap_idx = -1 ;
536
+ int ptes_per_table = ARM_LPAE_PTES_PER_TABLE (data );
537
+ int i , unmap_idx_start = -1 , num_entries = 0 , max_entries ;
537
538
538
539
if (WARN_ON (lvl == ARM_LPAE_MAX_LEVELS ))
539
540
return 0 ;
@@ -542,15 +543,18 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
542
543
if (!tablep )
543
544
return 0 ; /* Bytes unmapped */
544
545
545
- if (size == split_sz )
546
- unmap_idx = ARM_LPAE_LVL_IDX (iova , lvl , data );
546
+ if (size == split_sz ) {
547
+ unmap_idx_start = ARM_LPAE_LVL_IDX (iova , lvl , data );
548
+ max_entries = ptes_per_table - unmap_idx_start ;
549
+ num_entries = min_t (int , pgcount , max_entries );
550
+ }
547
551
548
552
blk_paddr = iopte_to_paddr (blk_pte , data );
549
553
pte = iopte_prot (blk_pte );
550
554
551
- for (i = 0 ; i < tablesz / sizeof ( pte ) ; i ++ , blk_paddr += split_sz ) {
555
+ for (i = 0 ; i < ptes_per_table ; i ++ , blk_paddr += split_sz ) {
552
556
/* Unmap! */
553
- if (i == unmap_idx )
557
+ if (i >= unmap_idx_start && i < ( unmap_idx_start + num_entries ) )
554
558
continue ;
555
559
556
560
__arm_lpae_init_pte (data , blk_paddr , pte , lvl , 1 , & tablep [i ]);
@@ -568,84 +572,107 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
568
572
return 0 ;
569
573
570
574
tablep = iopte_deref (pte , data );
571
- } else if (unmap_idx >= 0 ) {
572
- io_pgtable_tlb_add_page (& data -> iop , gather , iova , size );
573
- return size ;
575
+ } else if (unmap_idx_start >= 0 ) {
576
+ for (i = 0 ; i < num_entries ; i ++ )
577
+ io_pgtable_tlb_add_page (& data -> iop , gather , iova + i * size , size );
578
+
579
+ return num_entries * size ;
574
580
}
575
581
576
- return __arm_lpae_unmap (data , gather , iova , size , lvl , tablep );
582
+ return __arm_lpae_unmap (data , gather , iova , size , pgcount , lvl , tablep );
577
583
}
578
584
579
585
static size_t __arm_lpae_unmap (struct arm_lpae_io_pgtable * data ,
580
586
struct iommu_iotlb_gather * gather ,
581
- unsigned long iova , size_t size , int lvl ,
582
- arm_lpae_iopte * ptep )
587
+ unsigned long iova , size_t size , size_t pgcount ,
588
+ int lvl , arm_lpae_iopte * ptep )
583
589
{
584
590
arm_lpae_iopte pte ;
585
591
struct io_pgtable * iop = & data -> iop ;
592
+ int i = 0 , num_entries , max_entries , unmap_idx_start ;
586
593
587
594
/* Something went horribly wrong and we ran out of page table */
588
595
if (WARN_ON (lvl == ARM_LPAE_MAX_LEVELS ))
589
596
return 0 ;
590
597
591
- ptep += ARM_LPAE_LVL_IDX (iova , lvl , data );
598
+ unmap_idx_start = ARM_LPAE_LVL_IDX (iova , lvl , data );
599
+ ptep += unmap_idx_start ;
592
600
pte = READ_ONCE (* ptep );
593
601
if (WARN_ON (!pte ))
594
602
return 0 ;
595
603
596
604
/* If the size matches this level, we're in the right place */
597
605
if (size == ARM_LPAE_BLOCK_SIZE (lvl , data )) {
598
- __arm_lpae_set_pte (ptep , 0 , 1 , & iop -> cfg );
599
-
600
- if (!iopte_leaf (pte , lvl , iop -> fmt )) {
601
- /* Also flush any partial walks */
602
- io_pgtable_tlb_flush_walk (iop , iova , size ,
603
- ARM_LPAE_GRANULE (data ));
604
- ptep = iopte_deref (pte , data );
605
- __arm_lpae_free_pgtable (data , lvl + 1 , ptep );
606
- } else if (iop -> cfg .quirks & IO_PGTABLE_QUIRK_NON_STRICT ) {
607
- /*
608
- * Order the PTE update against queueing the IOVA, to
609
- * guarantee that a flush callback from a different CPU
610
- * has observed it before the TLBIALL can be issued.
611
- */
612
- smp_wmb ();
613
- } else {
614
- io_pgtable_tlb_add_page (iop , gather , iova , size );
606
+ max_entries = ARM_LPAE_PTES_PER_TABLE (data ) - unmap_idx_start ;
607
+ num_entries = min_t (int , pgcount , max_entries );
608
+
609
+ while (i < num_entries ) {
610
+ pte = READ_ONCE (* ptep );
611
+ if (WARN_ON (!pte ))
612
+ break ;
613
+
614
+ __arm_lpae_clear_pte (ptep , & iop -> cfg );
615
+
616
+ if (!iopte_leaf (pte , lvl , iop -> fmt )) {
617
+ /* Also flush any partial walks */
618
+ io_pgtable_tlb_flush_walk (iop , iova + i * size , size ,
619
+ ARM_LPAE_GRANULE (data ));
620
+ __arm_lpae_free_pgtable (data , lvl + 1 , iopte_deref (pte , data ));
621
+ } else if (iop -> cfg .quirks & IO_PGTABLE_QUIRK_NON_STRICT ) {
622
+ /*
623
+ * Order the PTE update against queueing the IOVA, to
624
+ * guarantee that a flush callback from a different CPU
625
+ * has observed it before the TLBIALL can be issued.
626
+ */
627
+ smp_wmb ();
628
+ } else {
629
+ io_pgtable_tlb_add_page (iop , gather , iova + i * size , size );
630
+ }
631
+
632
+ ptep ++ ;
633
+ i ++ ;
615
634
}
616
635
617
- return size ;
636
+ return i * size ;
618
637
} else if (iopte_leaf (pte , lvl , iop -> fmt )) {
619
638
/*
620
639
* Insert a table at the next level to map the old region,
621
640
* minus the part we want to unmap
622
641
*/
623
642
return arm_lpae_split_blk_unmap (data , gather , iova , size , pte ,
624
- lvl + 1 , ptep );
643
+ lvl + 1 , ptep , pgcount );
625
644
}
626
645
627
646
/* Keep on walkin' */
628
647
ptep = iopte_deref (pte , data );
629
- return __arm_lpae_unmap (data , gather , iova , size , lvl + 1 , ptep );
648
+ return __arm_lpae_unmap (data , gather , iova , size , pgcount , lvl + 1 , ptep );
630
649
}
631
650
632
- static size_t arm_lpae_unmap (struct io_pgtable_ops * ops , unsigned long iova ,
633
- size_t size , struct iommu_iotlb_gather * gather )
651
+ static size_t arm_lpae_unmap_pages (struct io_pgtable_ops * ops , unsigned long iova ,
652
+ size_t pgsize , size_t pgcount ,
653
+ struct iommu_iotlb_gather * gather )
634
654
{
635
655
struct arm_lpae_io_pgtable * data = io_pgtable_ops_to_data (ops );
636
656
struct io_pgtable_cfg * cfg = & data -> iop .cfg ;
637
657
arm_lpae_iopte * ptep = data -> pgd ;
638
658
long iaext = (s64 )iova >> cfg -> ias ;
639
659
640
- if (WARN_ON (!size || (size & cfg -> pgsize_bitmap ) != size ))
660
+ if (WARN_ON (!pgsize || (pgsize & cfg -> pgsize_bitmap ) != pgsize || ! pgcount ))
641
661
return 0 ;
642
662
643
663
if (cfg -> quirks & IO_PGTABLE_QUIRK_ARM_TTBR1 )
644
664
iaext = ~iaext ;
645
665
if (WARN_ON (iaext ))
646
666
return 0 ;
647
667
648
- return __arm_lpae_unmap (data , gather , iova , size , data -> start_level , ptep );
668
+ return __arm_lpae_unmap (data , gather , iova , pgsize , pgcount ,
669
+ data -> start_level , ptep );
670
+ }
671
+
672
+ static size_t arm_lpae_unmap (struct io_pgtable_ops * ops , unsigned long iova ,
673
+ size_t size , struct iommu_iotlb_gather * gather )
674
+ {
675
+ return arm_lpae_unmap_pages (ops , iova , size , 1 , gather );
649
676
}
650
677
651
678
static phys_addr_t arm_lpae_iova_to_phys (struct io_pgtable_ops * ops ,
@@ -761,6 +788,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
761
788
data -> iop .ops = (struct io_pgtable_ops ) {
762
789
.map = arm_lpae_map ,
763
790
.unmap = arm_lpae_unmap ,
791
+ .unmap_pages = arm_lpae_unmap_pages ,
764
792
.iova_to_phys = arm_lpae_iova_to_phys ,
765
793
};
766
794
0 commit comments