@@ -254,6 +254,7 @@ int efa_query_device(struct ib_device *ibdev,
254
254
resp .max_rdma_size = dev_attr -> max_rdma_size ;
255
255
256
256
resp .device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID ;
257
+ resp .device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM ;
257
258
if (EFA_DEV_CAP (dev , RDMA_READ ))
258
259
resp .device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ ;
259
260
@@ -1087,8 +1088,11 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1087
1088
xa_erase (& dev -> cqs_xa , cq -> cq_idx );
1088
1089
synchronize_irq (cq -> eq -> irq .irqn );
1089
1090
}
1090
- efa_free_mapped (dev , cq -> cpu_addr , cq -> dma_addr , cq -> size ,
1091
- DMA_FROM_DEVICE );
1091
+
1092
+ if (cq -> umem )
1093
+ ib_umem_release (cq -> umem );
1094
+ else
1095
+ efa_free_mapped (dev , cq -> cpu_addr , cq -> dma_addr , cq -> size , DMA_FROM_DEVICE );
1092
1096
return 0 ;
1093
1097
}
1094
1098
@@ -1127,8 +1131,8 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1127
1131
return 0 ;
1128
1132
}
1129
1133
1130
- int efa_create_cq (struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
1131
- struct uverbs_attr_bundle * attrs )
1134
+ int efa_create_cq_umem (struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
1135
+ struct ib_umem * umem , struct uverbs_attr_bundle * attrs )
1132
1136
{
1133
1137
struct ib_udata * udata = & attrs -> driver_udata ;
1134
1138
struct efa_ucontext * ucontext = rdma_udata_to_drv_context (
@@ -1207,11 +1211,30 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1207
1211
1208
1212
cq -> ucontext = ucontext ;
1209
1213
cq -> size = PAGE_ALIGN (cmd .cq_entry_size * entries * cmd .num_sub_cqs );
1210
- cq -> cpu_addr = efa_zalloc_mapped (dev , & cq -> dma_addr , cq -> size ,
1211
- DMA_FROM_DEVICE );
1212
- if (!cq -> cpu_addr ) {
1213
- err = - ENOMEM ;
1214
- goto err_out ;
1214
+
1215
+ if (umem ) {
1216
+ if (umem -> length < cq -> size ) {
1217
+ ibdev_dbg (& dev -> ibdev , "External memory too small\n" );
1218
+ err = - EINVAL ;
1219
+ goto err_free_mem ;
1220
+ }
1221
+
1222
+ if (!ib_umem_is_contiguous (umem )) {
1223
+ ibdev_dbg (& dev -> ibdev , "Non contiguous CQ unsupported\n" );
1224
+ err = - EINVAL ;
1225
+ goto err_free_mem ;
1226
+ }
1227
+
1228
+ cq -> cpu_addr = NULL ;
1229
+ cq -> dma_addr = ib_umem_start_dma_addr (umem );
1230
+ cq -> umem = umem ;
1231
+ } else {
1232
+ cq -> cpu_addr = efa_zalloc_mapped (dev , & cq -> dma_addr , cq -> size ,
1233
+ DMA_FROM_DEVICE );
1234
+ if (!cq -> cpu_addr ) {
1235
+ err = - ENOMEM ;
1236
+ goto err_out ;
1237
+ }
1215
1238
}
1216
1239
1217
1240
params .uarn = cq -> ucontext -> uarn ;
@@ -1228,15 +1251,17 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1228
1251
1229
1252
err = efa_com_create_cq (& dev -> edev , & params , & result );
1230
1253
if (err )
1231
- goto err_free_mapped ;
1254
+ goto err_free_mem ;
1232
1255
1233
1256
resp .db_off = result .db_off ;
1234
1257
resp .cq_idx = result .cq_idx ;
1235
1258
cq -> cq_idx = result .cq_idx ;
1236
1259
cq -> ibcq .cqe = result .actual_depth ;
1237
1260
WARN_ON_ONCE (entries != result .actual_depth );
1238
1261
1239
- err = cq_mmap_entries_setup (dev , cq , & resp , result .db_valid );
1262
+ if (!umem )
1263
+ err = cq_mmap_entries_setup (dev , cq , & resp , result .db_valid );
1264
+
1240
1265
if (err ) {
1241
1266
ibdev_dbg (ibdev , "Could not setup cq[%u] mmap entries\n" ,
1242
1267
cq -> cq_idx );
@@ -1274,15 +1299,23 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1274
1299
efa_cq_user_mmap_entries_remove (cq );
1275
1300
err_destroy_cq :
1276
1301
efa_destroy_cq_idx (dev , cq -> cq_idx );
1277
- err_free_mapped :
1278
- efa_free_mapped (dev , cq -> cpu_addr , cq -> dma_addr , cq -> size ,
1279
- DMA_FROM_DEVICE );
1302
+ err_free_mem :
1303
+ if (umem )
1304
+ ib_umem_release (umem );
1305
+ else
1306
+ efa_free_mapped (dev , cq -> cpu_addr , cq -> dma_addr , cq -> size , DMA_FROM_DEVICE );
1280
1307
1281
1308
err_out :
1282
1309
atomic64_inc (& dev -> stats .create_cq_err );
1283
1310
return err ;
1284
1311
}
1285
1312
1313
+ int efa_create_cq (struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
1314
+ struct uverbs_attr_bundle * attrs )
1315
+ {
1316
+ return efa_create_cq_umem (ibcq , attr , NULL , attrs );
1317
+ }
1318
+
1286
1319
static int umem_to_page_list (struct efa_dev * dev ,
1287
1320
struct ib_umem * umem ,
1288
1321
u64 * page_list ,
0 commit comments