@@ -335,10 +335,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
335
335
return 0 ;
336
336
}
337
337
338
+ static struct arm_smmu_cmdq * arm_smmu_get_cmdq (struct arm_smmu_device * smmu )
339
+ {
340
+ return & smmu -> cmdq ;
341
+ }
342
+
338
343
static void arm_smmu_cmdq_build_sync_cmd (u64 * cmd , struct arm_smmu_device * smmu ,
339
- u32 prod )
344
+ struct arm_smmu_queue * q , u32 prod )
340
345
{
341
- struct arm_smmu_queue * q = & smmu -> cmdq .q ;
342
346
struct arm_smmu_cmdq_ent ent = {
343
347
.opcode = CMDQ_OP_CMD_SYNC ,
344
348
};
@@ -355,7 +359,8 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
355
359
arm_smmu_cmdq_build_cmd (cmd , & ent );
356
360
}
357
361
358
- static void arm_smmu_cmdq_skip_err (struct arm_smmu_device * smmu )
362
+ static void __arm_smmu_cmdq_skip_err (struct arm_smmu_device * smmu ,
363
+ struct arm_smmu_queue * q )
359
364
{
360
365
static const char * const cerror_str [] = {
361
366
[CMDQ_ERR_CERROR_NONE_IDX ] = "No error" ,
@@ -366,7 +371,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
366
371
367
372
int i ;
368
373
u64 cmd [CMDQ_ENT_DWORDS ];
369
- struct arm_smmu_queue * q = & smmu -> cmdq .q ;
370
374
u32 cons = readl_relaxed (q -> cons_reg );
371
375
u32 idx = FIELD_GET (CMDQ_CONS_ERR , cons );
372
376
struct arm_smmu_cmdq_ent cmd_sync = {
@@ -413,6 +417,11 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
413
417
queue_write (Q_ENT (q , cons ), cmd , q -> ent_dwords );
414
418
}
415
419
420
+ static void arm_smmu_cmdq_skip_err (struct arm_smmu_device * smmu )
421
+ {
422
+ __arm_smmu_cmdq_skip_err (smmu , & smmu -> cmdq .q );
423
+ }
424
+
416
425
/*
417
426
* Command queue locking.
418
427
* This is a form of bastardised rwlock with the following major changes:
@@ -579,7 +588,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
579
588
{
580
589
unsigned long flags ;
581
590
struct arm_smmu_queue_poll qp ;
582
- struct arm_smmu_cmdq * cmdq = & smmu -> cmdq ;
591
+ struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq ( smmu ) ;
583
592
int ret = 0 ;
584
593
585
594
/*
@@ -595,7 +604,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
595
604
596
605
queue_poll_init (smmu , & qp );
597
606
do {
598
- llq -> val = READ_ONCE (smmu -> cmdq . q .llq .val );
607
+ llq -> val = READ_ONCE (cmdq -> q .llq .val );
599
608
if (!queue_full (llq ))
600
609
break ;
601
610
@@ -614,7 +623,7 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
614
623
{
615
624
int ret = 0 ;
616
625
struct arm_smmu_queue_poll qp ;
617
- struct arm_smmu_cmdq * cmdq = & smmu -> cmdq ;
626
+ struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq ( smmu ) ;
618
627
u32 * cmd = (u32 * )(Q_ENT (& cmdq -> q , llq -> prod ));
619
628
620
629
queue_poll_init (smmu , & qp );
@@ -637,12 +646,12 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
637
646
struct arm_smmu_ll_queue * llq )
638
647
{
639
648
struct arm_smmu_queue_poll qp ;
640
- struct arm_smmu_cmdq * cmdq = & smmu -> cmdq ;
649
+ struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq ( smmu ) ;
641
650
u32 prod = llq -> prod ;
642
651
int ret = 0 ;
643
652
644
653
queue_poll_init (smmu , & qp );
645
- llq -> val = READ_ONCE (smmu -> cmdq . q .llq .val );
654
+ llq -> val = READ_ONCE (cmdq -> q .llq .val );
646
655
do {
647
656
if (queue_consumed (llq , prod ))
648
657
break ;
@@ -732,12 +741,12 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
732
741
u32 prod ;
733
742
unsigned long flags ;
734
743
bool owner ;
735
- struct arm_smmu_cmdq * cmdq = & smmu -> cmdq ;
736
- struct arm_smmu_ll_queue llq = {
737
- .max_n_shift = cmdq -> q .llq .max_n_shift ,
738
- }, head = llq ;
744
+ struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq (smmu );
745
+ struct arm_smmu_ll_queue llq , head ;
739
746
int ret = 0 ;
740
747
748
+ llq .max_n_shift = cmdq -> q .llq .max_n_shift ;
749
+
741
750
/* 1. Allocate some space in the queue */
742
751
local_irq_save (flags );
743
752
llq .val = READ_ONCE (cmdq -> q .llq .val );
@@ -772,7 +781,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
772
781
arm_smmu_cmdq_write_entries (cmdq , cmds , llq .prod , n );
773
782
if (sync ) {
774
783
prod = queue_inc_prod_n (& llq , n );
775
- arm_smmu_cmdq_build_sync_cmd (cmd_sync , smmu , prod );
784
+ arm_smmu_cmdq_build_sync_cmd (cmd_sync , smmu , & cmdq -> q , prod );
776
785
queue_write (Q_ENT (& cmdq -> q , prod ), cmd_sync , CMDQ_ENT_DWORDS );
777
786
778
787
/*
@@ -845,8 +854,9 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
845
854
return ret ;
846
855
}
847
856
848
- static int arm_smmu_cmdq_issue_cmd (struct arm_smmu_device * smmu ,
849
- struct arm_smmu_cmdq_ent * ent )
857
+ static int __arm_smmu_cmdq_issue_cmd (struct arm_smmu_device * smmu ,
858
+ struct arm_smmu_cmdq_ent * ent ,
859
+ bool sync )
850
860
{
851
861
u64 cmd [CMDQ_ENT_DWORDS ];
852
862
@@ -856,12 +866,19 @@ static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
856
866
return - EINVAL ;
857
867
}
858
868
859
- return arm_smmu_cmdq_issue_cmdlist (smmu , cmd , 1 , false);
869
+ return arm_smmu_cmdq_issue_cmdlist (smmu , cmd , 1 , sync );
870
+ }
871
+
872
+ static int arm_smmu_cmdq_issue_cmd (struct arm_smmu_device * smmu ,
873
+ struct arm_smmu_cmdq_ent * ent )
874
+ {
875
+ return __arm_smmu_cmdq_issue_cmd (smmu , ent , false);
860
876
}
861
877
862
- static int arm_smmu_cmdq_issue_sync (struct arm_smmu_device * smmu )
878
+ static int arm_smmu_cmdq_issue_cmd_with_sync (struct arm_smmu_device * smmu ,
879
+ struct arm_smmu_cmdq_ent * ent )
863
880
{
864
- return arm_smmu_cmdq_issue_cmdlist (smmu , NULL , 0 , true);
881
+ return __arm_smmu_cmdq_issue_cmd (smmu , ent , true);
865
882
}
866
883
867
884
static void arm_smmu_cmdq_batch_add (struct arm_smmu_device * smmu ,
@@ -929,8 +946,7 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
929
946
.tlbi .asid = asid ,
930
947
};
931
948
932
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
933
- arm_smmu_cmdq_issue_sync (smmu );
949
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
934
950
}
935
951
936
952
static void arm_smmu_sync_cd (struct arm_smmu_domain * smmu_domain ,
@@ -939,7 +955,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
939
955
size_t i ;
940
956
unsigned long flags ;
941
957
struct arm_smmu_master * master ;
942
- struct arm_smmu_cmdq_batch cmds = {} ;
958
+ struct arm_smmu_cmdq_batch cmds ;
943
959
struct arm_smmu_device * smmu = smmu_domain -> smmu ;
944
960
struct arm_smmu_cmdq_ent cmd = {
945
961
.opcode = CMDQ_OP_CFGI_CD ,
@@ -949,6 +965,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
949
965
},
950
966
};
951
967
968
+ cmds .num = 0 ;
969
+
952
970
spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
953
971
list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
954
972
for (i = 0 ; i < master -> num_streams ; i ++ ) {
@@ -1211,8 +1229,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1211
1229
},
1212
1230
};
1213
1231
1214
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
1215
- arm_smmu_cmdq_issue_sync (smmu );
1232
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
1216
1233
}
1217
1234
1218
1235
static void arm_smmu_write_strtab_ent (struct arm_smmu_master * master , u32 sid ,
@@ -1747,15 +1764,16 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
1747
1764
{
1748
1765
int i ;
1749
1766
struct arm_smmu_cmdq_ent cmd ;
1767
+ struct arm_smmu_cmdq_batch cmds = {};
1750
1768
1751
1769
arm_smmu_atc_inv_to_cmd (0 , 0 , 0 , & cmd );
1752
1770
1753
1771
for (i = 0 ; i < master -> num_streams ; i ++ ) {
1754
1772
cmd .atc .sid = master -> streams [i ].id ;
1755
- arm_smmu_cmdq_issue_cmd (master -> smmu , & cmd );
1773
+ arm_smmu_cmdq_batch_add (master -> smmu , & cmds , & cmd );
1756
1774
}
1757
1775
1758
- return arm_smmu_cmdq_issue_sync (master -> smmu );
1776
+ return arm_smmu_cmdq_batch_submit (master -> smmu , & cmds );
1759
1777
}
1760
1778
1761
1779
int arm_smmu_atc_inv_domain (struct arm_smmu_domain * smmu_domain , int ssid ,
@@ -1765,7 +1783,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
1765
1783
unsigned long flags ;
1766
1784
struct arm_smmu_cmdq_ent cmd ;
1767
1785
struct arm_smmu_master * master ;
1768
- struct arm_smmu_cmdq_batch cmds = {} ;
1786
+ struct arm_smmu_cmdq_batch cmds ;
1769
1787
1770
1788
if (!(smmu_domain -> smmu -> features & ARM_SMMU_FEAT_ATS ))
1771
1789
return 0 ;
@@ -1789,6 +1807,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
1789
1807
1790
1808
arm_smmu_atc_inv_to_cmd (ssid , iova , size , & cmd );
1791
1809
1810
+ cmds .num = 0 ;
1811
+
1792
1812
spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
1793
1813
list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
1794
1814
if (!master -> ats_enabled )
@@ -1823,8 +1843,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
1823
1843
} else {
1824
1844
cmd .opcode = CMDQ_OP_TLBI_S12_VMALL ;
1825
1845
cmd .tlbi .vmid = smmu_domain -> s2_cfg .vmid ;
1826
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
1827
- arm_smmu_cmdq_issue_sync (smmu );
1846
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
1828
1847
}
1829
1848
arm_smmu_atc_inv_domain (smmu_domain , 0 , 0 , 0 );
1830
1849
}
@@ -1837,7 +1856,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
1837
1856
struct arm_smmu_device * smmu = smmu_domain -> smmu ;
1838
1857
unsigned long end = iova + size , num_pages = 0 , tg = 0 ;
1839
1858
size_t inv_range = granule ;
1840
- struct arm_smmu_cmdq_batch cmds = {} ;
1859
+ struct arm_smmu_cmdq_batch cmds ;
1841
1860
1842
1861
if (!size )
1843
1862
return ;
@@ -1855,6 +1874,8 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
1855
1874
num_pages = size >> tg ;
1856
1875
}
1857
1876
1877
+ cmds .num = 0 ;
1878
+
1858
1879
while (iova < end ) {
1859
1880
if (smmu -> features & ARM_SMMU_FEAT_RANGE_INV ) {
1860
1881
/*
@@ -3338,18 +3359,16 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
3338
3359
3339
3360
/* Invalidate any cached configuration */
3340
3361
cmd .opcode = CMDQ_OP_CFGI_ALL ;
3341
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
3342
- arm_smmu_cmdq_issue_sync (smmu );
3362
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
3343
3363
3344
3364
/* Invalidate any stale TLB entries */
3345
3365
if (smmu -> features & ARM_SMMU_FEAT_HYP ) {
3346
3366
cmd .opcode = CMDQ_OP_TLBI_EL2_ALL ;
3347
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
3367
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
3348
3368
}
3349
3369
3350
3370
cmd .opcode = CMDQ_OP_TLBI_NSNH_ALL ;
3351
- arm_smmu_cmdq_issue_cmd (smmu , & cmd );
3352
- arm_smmu_cmdq_issue_sync (smmu );
3371
+ arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
3353
3372
3354
3373
/* Event queue */
3355
3374
writeq_relaxed (smmu -> evtq .q .q_base , smmu -> base + ARM_SMMU_EVTQ_BASE );
0 commit comments