@@ -624,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
624
624
qaic_free_sgt (bo -> sgt );
625
625
}
626
626
627
+ mutex_destroy (& bo -> lock );
627
628
drm_gem_object_release (obj );
628
629
kfree (bo );
629
630
}
@@ -641,6 +642,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
641
642
bo -> sliced = false;
642
643
reinit_completion (& bo -> xfer_done );
643
644
} else {
645
+ mutex_init (& bo -> lock );
644
646
init_completion (& bo -> xfer_done );
645
647
}
646
648
complete_all (& bo -> xfer_done );
@@ -1002,10 +1004,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
1002
1004
}
1003
1005
1004
1006
bo = to_qaic_bo (obj );
1007
+ ret = mutex_lock_interruptible (& bo -> lock );
1008
+ if (ret )
1009
+ goto put_bo ;
1005
1010
1006
1011
if (bo -> sliced ) {
1007
1012
ret = - EINVAL ;
1008
- goto put_bo ;
1013
+ goto unlock_bo ;
1009
1014
}
1010
1015
1011
1016
dbc = & qdev -> dbc [args -> hdr .dbc_id ];
@@ -1029,7 +1034,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
1029
1034
bo -> sliced = true;
1030
1035
list_add_tail (& bo -> bo_list , & bo -> dbc -> bo_lists );
1031
1036
srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1032
- drm_gem_object_put ( obj );
1037
+ mutex_unlock ( & bo -> lock );
1033
1038
srcu_read_unlock (& qdev -> dev_lock , qdev_rcu_id );
1034
1039
srcu_read_unlock (& usr -> qddev_lock , usr_rcu_id );
1035
1040
@@ -1039,6 +1044,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
1039
1044
qaic_unprepare_bo (qdev , bo );
1040
1045
unlock_ch_srcu :
1041
1046
srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1047
+ unlock_bo :
1048
+ mutex_unlock (& bo -> lock );
1042
1049
put_bo :
1043
1050
drm_gem_object_put (obj );
1044
1051
free_slice_ent :
@@ -1193,15 +1200,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
1193
1200
}
1194
1201
1195
1202
bo = to_qaic_bo (obj );
1203
+ ret = mutex_lock_interruptible (& bo -> lock );
1204
+ if (ret )
1205
+ goto failed_to_send_bo ;
1196
1206
1197
1207
if (!bo -> sliced ) {
1198
1208
ret = - EINVAL ;
1199
- goto failed_to_send_bo ;
1209
+ goto unlock_bo ;
1200
1210
}
1201
1211
1202
1212
if (is_partial && pexec [i ].resize > bo -> base .size ) {
1203
1213
ret = - EINVAL ;
1204
- goto failed_to_send_bo ;
1214
+ goto unlock_bo ;
1205
1215
}
1206
1216
1207
1217
spin_lock_irqsave (& dbc -> xfer_lock , flags );
@@ -1210,7 +1220,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
1210
1220
if (queued ) {
1211
1221
spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1212
1222
ret = - EINVAL ;
1213
- goto failed_to_send_bo ;
1223
+ goto unlock_bo ;
1214
1224
}
1215
1225
1216
1226
bo -> req_id = dbc -> next_req_id ++ ;
@@ -1241,17 +1251,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
1241
1251
if (ret ) {
1242
1252
bo -> queued = false;
1243
1253
spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1244
- goto failed_to_send_bo ;
1254
+ goto unlock_bo ;
1245
1255
}
1246
1256
}
1247
1257
reinit_completion (& bo -> xfer_done );
1248
1258
list_add_tail (& bo -> xfer_list , & dbc -> xfer_list );
1249
1259
spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1250
1260
dma_sync_sgtable_for_device (& qdev -> pdev -> dev , bo -> sgt , bo -> dir );
1261
+ mutex_unlock (& bo -> lock );
1251
1262
}
1252
1263
1253
1264
return 0 ;
1254
1265
1266
+ unlock_bo :
1267
+ mutex_unlock (& bo -> lock );
1255
1268
failed_to_send_bo :
1256
1269
if (likely (obj ))
1257
1270
drm_gem_object_put (obj );
@@ -1807,6 +1820,91 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
1807
1820
return ret ;
1808
1821
}
1809
1822
1823
+ static void detach_slice_bo (struct qaic_device * qdev , struct qaic_bo * bo )
1824
+ {
1825
+ qaic_free_slices_bo (bo );
1826
+ qaic_unprepare_bo (qdev , bo );
1827
+ qaic_init_bo (bo , true);
1828
+ list_del (& bo -> bo_list );
1829
+ drm_gem_object_put (& bo -> base );
1830
+ }
1831
+
1832
+ int qaic_detach_slice_bo_ioctl (struct drm_device * dev , void * data , struct drm_file * file_priv )
1833
+ {
1834
+ struct qaic_detach_slice * args = data ;
1835
+ int rcu_id , usr_rcu_id , qdev_rcu_id ;
1836
+ struct dma_bridge_chan * dbc ;
1837
+ struct drm_gem_object * obj ;
1838
+ struct qaic_device * qdev ;
1839
+ struct qaic_user * usr ;
1840
+ unsigned long flags ;
1841
+ struct qaic_bo * bo ;
1842
+ int ret ;
1843
+
1844
+ if (args -> pad != 0 )
1845
+ return - EINVAL ;
1846
+
1847
+ usr = file_priv -> driver_priv ;
1848
+ usr_rcu_id = srcu_read_lock (& usr -> qddev_lock );
1849
+ if (!usr -> qddev ) {
1850
+ ret = - ENODEV ;
1851
+ goto unlock_usr_srcu ;
1852
+ }
1853
+
1854
+ qdev = usr -> qddev -> qdev ;
1855
+ qdev_rcu_id = srcu_read_lock (& qdev -> dev_lock );
1856
+ if (qdev -> in_reset ) {
1857
+ ret = - ENODEV ;
1858
+ goto unlock_dev_srcu ;
1859
+ }
1860
+
1861
+ obj = drm_gem_object_lookup (file_priv , args -> handle );
1862
+ if (!obj ) {
1863
+ ret = - ENOENT ;
1864
+ goto unlock_dev_srcu ;
1865
+ }
1866
+
1867
+ bo = to_qaic_bo (obj );
1868
+ ret = mutex_lock_interruptible (& bo -> lock );
1869
+ if (ret )
1870
+ goto put_bo ;
1871
+
1872
+ if (!bo -> sliced ) {
1873
+ ret = - EINVAL ;
1874
+ goto unlock_bo ;
1875
+ }
1876
+
1877
+ dbc = bo -> dbc ;
1878
+ rcu_id = srcu_read_lock (& dbc -> ch_lock );
1879
+ if (dbc -> usr != usr ) {
1880
+ ret = - EINVAL ;
1881
+ goto unlock_ch_srcu ;
1882
+ }
1883
+
1884
+ /* Check if BO is committed to H/W for DMA */
1885
+ spin_lock_irqsave (& dbc -> xfer_lock , flags );
1886
+ if (bo -> queued ) {
1887
+ spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1888
+ ret = - EBUSY ;
1889
+ goto unlock_ch_srcu ;
1890
+ }
1891
+ spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1892
+
1893
+ detach_slice_bo (qdev , bo );
1894
+
1895
+ unlock_ch_srcu :
1896
+ srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1897
+ unlock_bo :
1898
+ mutex_unlock (& bo -> lock );
1899
+ put_bo :
1900
+ drm_gem_object_put (obj );
1901
+ unlock_dev_srcu :
1902
+ srcu_read_unlock (& qdev -> dev_lock , qdev_rcu_id );
1903
+ unlock_usr_srcu :
1904
+ srcu_read_unlock (& usr -> qddev_lock , usr_rcu_id );
1905
+ return ret ;
1906
+ }
1907
+
1810
1908
static void empty_xfer_list (struct qaic_device * qdev , struct dma_bridge_chan * dbc )
1811
1909
{
1812
1910
unsigned long flags ;
@@ -1888,10 +1986,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
1888
1986
dbc -> usr = NULL ;
1889
1987
1890
1988
list_for_each_entry_safe (bo , bo_temp , & dbc -> bo_lists , bo_list ) {
1891
- qaic_free_slices_bo (bo );
1892
- qaic_unprepare_bo (qdev , bo );
1893
- qaic_init_bo (bo , true);
1894
- list_del (& bo -> bo_list );
1989
+ drm_gem_object_get (& bo -> base );
1990
+ mutex_lock (& bo -> lock );
1991
+ detach_slice_bo (qdev , bo );
1992
+ mutex_unlock (& bo -> lock );
1993
+ drm_gem_object_put (& bo -> base );
1895
1994
}
1896
1995
1897
1996
dbc -> in_use = false;
0 commit comments