Skip to content

Commit b7a0e75

Browse files
committed
accel/ivpu: Disable buffer sharing among VPU contexts
This was not supported properly. A buffer was imported to another VPU context as a separate buffer object with duplicated sgt. Both exported and imported buffers could be DMA mapped causing a double mapping on the same device. Buffers imported from another VPU context will now just increase reference count, leaving only a single sgt, fixing the problem above. Buffers still can't be shared among VPU contexts because each has its own MMU mapping and ivpu_bo only supports single MMU mappings. The solution would be to use a mapping list as in panfrost or etnaviv drivers and it will be implemented in future if required. Signed-off-by: Jacek Lawrynowicz <[email protected]> Reviewed-by: Andrzej Kacprowski <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent a8c099d commit b7a0e75

File tree

1 file changed

+6
-38
lines changed

1 file changed

+6
-38
lines changed

drivers/accel/ivpu/ivpu_gem.c

Lines changed: 6 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
222222
struct ivpu_bo *bo = to_ivpu_bo(obj);
223223
struct ivpu_addr_range *range;
224224

225+
if (bo->ctx) {
226+
ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
227+
file_priv->ctx.id, bo->ctx->id);
228+
return -EALREADY;
229+
}
230+
225231
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
226232
range = &vdev->hw->ranges.shave;
227233
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -252,47 +258,9 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
252258
drm_gem_shmem_free(&bo->base);
253259
}
254260

255-
static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
256-
.cache_sgt_mapping = true,
257-
.attach = drm_gem_map_attach,
258-
.detach = drm_gem_map_detach,
259-
.map_dma_buf = drm_gem_map_dma_buf,
260-
.unmap_dma_buf = drm_gem_unmap_dma_buf,
261-
.release = drm_gem_dmabuf_release,
262-
.mmap = drm_gem_dmabuf_mmap,
263-
.vmap = drm_gem_dmabuf_vmap,
264-
.vunmap = drm_gem_dmabuf_vunmap,
265-
};
266-
267-
static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
268-
{
269-
struct drm_device *dev = obj->dev;
270-
struct dma_buf_export_info exp_info = {
271-
.exp_name = KBUILD_MODNAME,
272-
.owner = dev->driver->fops->owner,
273-
.ops = &ivpu_bo_dmabuf_ops,
274-
.size = obj->size,
275-
.flags = flags,
276-
.priv = obj,
277-
.resv = obj->resv,
278-
};
279-
void *sgt;
280-
281-
/*
282-
* Make sure that pages are allocated and dma-mapped before exporting the bo.
283-
* DMA-mapping is required if the bo will be imported to the same device.
284-
*/
285-
sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
286-
if (IS_ERR(sgt))
287-
return sgt;
288-
289-
return drm_gem_dmabuf_export(dev, &exp_info);
290-
}
291-
292261
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
293262
.free = ivpu_bo_free,
294263
.open = ivpu_bo_open,
295-
.export = ivpu_bo_export,
296264
.print_info = drm_gem_shmem_object_print_info,
297265
.pin = drm_gem_shmem_object_pin,
298266
.unpin = drm_gem_shmem_object_unpin,

0 commit comments

Comments
 (0)