Skip to content

Commit 3cd23b8

Browse files
cxdongzhenyw
authored andcommitted
drm/i915/gvt: GVT pin/unpin shadow context
When handling guest request, GVT needs to populate/update shadow_ctx with guest context. This behavior needs to make sure the shadow_ctx is pinned. The current implementation is relying on i195 allocate request to pin but this way cannot guarantee the i915 not to unpin the shadow_ctx when GVT update the guest context from shadow_ctx. So GVT should pin/unpin the shadow_ctx by itself. Signed-off-by: Chuanxiao Dong <[email protected]> Signed-off-by: Zhenyu Wang <[email protected]>
1 parent 17f1b1a commit 3cd23b8

File tree

1 file changed

+27
-0
lines changed

1 file changed

+27
-0
lines changed

drivers/gpu/drm/i915/gvt/scheduler.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
175175
int ring_id = workload->ring_id;
176176
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
177177
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
178+
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
178179
struct drm_i915_gem_request *rq;
179180
struct intel_vgpu *vgpu = workload->vgpu;
180181
int ret;
@@ -188,6 +189,21 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
188189

189190
mutex_lock(&dev_priv->drm.struct_mutex);
190191

192+
/* pin shadow context by gvt even the shadow context will be pinned
193+
* when i915 alloc request. That is because gvt will update the guest
194+
* context from shadow context when workload is completed, and at that
195+
* moment, i915 may already unpined the shadow context to make the
196+
* shadow_ctx pages invalid. So gvt need to pin itself. After update
197+
* the guest context, gvt can unpin the shadow_ctx safely.
198+
*/
199+
ret = engine->context_pin(engine, shadow_ctx);
200+
if (ret) {
201+
gvt_vgpu_err("fail to pin shadow context\n");
202+
workload->status = ret;
203+
mutex_unlock(&dev_priv->drm.struct_mutex);
204+
return ret;
205+
}
206+
191207
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
192208
if (IS_ERR(rq)) {
193209
gvt_vgpu_err("fail to allocate gem request\n");
@@ -231,6 +247,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
231247

232248
if (!IS_ERR_OR_NULL(rq))
233249
i915_add_request_no_flush(rq);
250+
else
251+
engine->context_unpin(engine, shadow_ctx);
252+
234253
mutex_unlock(&dev_priv->drm.struct_mutex);
235254
return ret;
236255
}
@@ -380,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
380399
* For the workload w/o request, directly complete the workload.
381400
*/
382401
if (workload->req) {
402+
struct drm_i915_private *dev_priv =
403+
workload->vgpu->gvt->dev_priv;
404+
struct intel_engine_cs *engine =
405+
dev_priv->engine[workload->ring_id];
383406
wait_event(workload->shadow_ctx_status_wq,
384407
!atomic_read(&workload->shadow_ctx_active));
385408

@@ -392,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
392415
INTEL_GVT_EVENT_MAX)
393416
intel_vgpu_trigger_virtual_event(vgpu, event);
394417
}
418+
mutex_lock(&dev_priv->drm.struct_mutex);
419+
/* unpin shadow ctx as the shadow_ctx update is done */
420+
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
421+
mutex_unlock(&dev_priv->drm.struct_mutex);
395422
}
396423

397424
gvt_dbg_sched("ring id %d complete workload %p status %d\n",

0 commit comments

Comments
 (0)