Skip to content

Commit f8c08d8

Browse files
drm/i915/cmdparser: Add support for backward jumps
To keep things manageable, the pre-gen9 cmdparser does not attempt to track any form of nested BB_START's. This did not prevent usermode from using nested starts, or even chained batches because the cmdparser is not strictly enforced pre gen9. Instead, the existence of a nested BB_START would cause the batch to be emitted in insecure mode, and any privileged capabilities would not be available. For Gen9, the cmdparser becomes mandatory (for BCS at least), and so not providing any form of nested BB_START support becomes overly restrictive. Any such batch will simply not run. We make heavy use of backward jumps in igt, and it is much easier to add support for this restricted subset of nested jumps, than to rewrite the whole of our test suite to avoid them. Add the required logic to support limited backward jumps, to instructions that have already been validated by the parser. Note that it's not sufficient to simply approve any BB_START that jumps backwards in the buffer because this would allow an attacker to embed a rogue instruction sequence within the operand words of a harmless instruction (say LRI) and jump to that. We introduce a bit array to track every instr offset successfully validated, and test the target of BB_START against this. If the target offset hits, it is re-written to the same offset in the shadow buffer and the BB_START cmd is allowed. Note: This patch deliberately ignores checkpatch issues in the cmdtables, in order to match the style of the surrounding code. We'll correct the entire file in one go in a later patch. v2: set dispatch secure late (Mika) v3: rebase (Mika) v4: Clear whitelist on each parse Minor review updates (Chris) v5: Correct backward jump batching v6: fix compilation error due to struct eb shuffle (Mika) Cc: Tony Luck <[email protected]> Cc: Dave Airlie <[email protected]> Cc: Takashi Iwai <[email protected]> Cc: Tyler Hicks <[email protected]> Signed-off-by: Jon Bloomfield <[email protected]> Signed-off-by: Mika Kuoppala <[email protected]> Reviewed-by: Chris Wilson <[email protected]>
1 parent 0546a29 commit f8c08d8

File tree

5 files changed

+178
-26
lines changed

5 files changed

+178
-26
lines changed

drivers/gpu/drm/i915/gem/i915_gem_context.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
319319
free_engines(rcu_access_pointer(ctx->engines));
320320
mutex_destroy(&ctx->engines_mutex);
321321

322+
kfree(ctx->jump_whitelist);
323+
322324
if (ctx->timeline)
323325
intel_timeline_put(ctx->timeline);
324326

@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
441443
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
442444
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
443445

446+
ctx->jump_whitelist = NULL;
447+
ctx->jump_whitelist_cmds = 0;
448+
444449
return ctx;
445450

446451
err_free:

drivers/gpu/drm/i915/gem/i915_gem_context_types.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,13 @@ struct i915_gem_context {
192192
* per vm, which may be one per context or shared with the global GTT)
193193
*/
194194
struct radix_tree_root handles_vma;
195+
196+
/** jump_whitelist: Bit array for tracking cmds during cmdparsing
197+
* Guarded by struct_mutex
198+
*/
199+
unsigned long *jump_whitelist;
200+
/** jump_whitelist_cmds: No of cmd slots available */
201+
u32 jump_whitelist_cmds;
195202
};
196203

197204
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */

drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1972,7 +1972,6 @@ shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
19721972
if (CMDPARSER_USES_GGTT(dev_priv)) {
19731973
flags = PIN_GLOBAL;
19741974
vm = &dev_priv->ggtt.vm;
1975-
eb->batch_flags |= I915_DISPATCH_SECURE;
19761975
} else if (vma->vm->has_read_only) {
19771976
flags = PIN_USER;
19781977
vm = vma->vm;
@@ -1989,18 +1988,35 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
19891988
{
19901989
struct intel_engine_pool_node *pool;
19911990
struct i915_vma *vma;
1991+
u64 batch_start;
1992+
u64 shadow_batch_start;
19921993
int err;
19931994

19941995
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
19951996
if (IS_ERR(pool))
19961997
return ERR_CAST(pool);
19971998

1998-
err = intel_engine_cmd_parser(eb->engine,
1999+
vma = shadow_batch_pin(eb, pool->obj);
2000+
if (IS_ERR(vma))
2001+
goto err;
2002+
2003+
batch_start = gen8_canonical_addr(eb->batch->node.start) +
2004+
eb->batch_start_offset;
2005+
2006+
shadow_batch_start = gen8_canonical_addr(vma->node.start);
2007+
2008+
err = intel_engine_cmd_parser(eb->gem_context,
2009+
eb->engine,
19992010
eb->batch->obj,
2000-
pool->obj,
2011+
batch_start,
20012012
eb->batch_start_offset,
2002-
eb->batch_len);
2013+
eb->batch_len,
2014+
pool->obj,
2015+
shadow_batch_start);
2016+
20032017
if (err) {
2018+
i915_vma_unpin(vma);
2019+
20042020
/*
20052021
* Unsafe GGTT-backed buffers can still be submitted safely
20062022
* as non-secure.
@@ -2015,10 +2031,6 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
20152031
goto err;
20162032
}
20172033

2018-
vma = shadow_batch_pin(eb, pool->obj);
2019-
if (IS_ERR(vma))
2020-
goto err;
2021-
20222034
eb->vma[eb->buffer_count] = i915_vma_get(vma);
20232035
eb->flags[eb->buffer_count] =
20242036
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
@@ -2027,6 +2039,10 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
20272039

20282040
eb->batch_start_offset = 0;
20292041
eb->batch = vma;
2042+
2043+
if (CMDPARSER_USES_GGTT(eb->i915))
2044+
eb->batch_flags |= I915_DISPATCH_SECURE;
2045+
20302046
/* eb->batch_len unchanged */
20312047

20322048
vma->private = pool;

drivers/gpu/drm/i915/i915_cmd_parser.c

Lines changed: 136 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,19 @@ static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
483483
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
484484
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
485485
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
486+
487+
/*
488+
* We allow BB_START but apply further checks. We just sanitize the
489+
* basic fields here.
490+
*/
491+
#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
492+
#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
493+
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
494+
.bits = {{
495+
.offset = 0,
496+
.mask = MI_BB_START_OPERAND_MASK,
497+
.expected = MI_BB_START_OPERAND_EXPECT,
498+
}}, ),
486499
};
487500

488501
static const struct drm_i915_cmd_descriptor noop_desc =
@@ -1293,29 +1306,131 @@ static bool check_cmd(const struct intel_engine_cs *engine,
12931306
return true;
12941307
}
12951308

1309+
static int check_bbstart(const struct i915_gem_context *ctx,
1310+
u32 *cmd, u32 offset, u32 length,
1311+
u32 batch_len,
1312+
u64 batch_start,
1313+
u64 shadow_batch_start)
1314+
{
1315+
u64 jump_offset, jump_target;
1316+
u32 target_cmd_offset, target_cmd_index;
1317+
1318+
/* For igt compatibility on older platforms */
1319+
if (CMDPARSER_USES_GGTT(ctx->i915)) {
1320+
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1321+
return -EACCES;
1322+
}
1323+
1324+
if (length != 3) {
1325+
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1326+
length);
1327+
return -EINVAL;
1328+
}
1329+
1330+
jump_target = *(u64*)(cmd+1);
1331+
jump_offset = jump_target - batch_start;
1332+
1333+
/*
1334+
* Any underflow of jump_target is guaranteed to be outside the range
1335+
* of a u32, so >= test catches both too large and too small
1336+
*/
1337+
if (jump_offset >= batch_len) {
1338+
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1339+
jump_target);
1340+
return -EINVAL;
1341+
}
1342+
1343+
/*
1344+
* This cannot overflow a u32 because we already checked jump_offset
1345+
* is within the BB, and the batch_len is a u32
1346+
*/
1347+
target_cmd_offset = lower_32_bits(jump_offset);
1348+
target_cmd_index = target_cmd_offset / sizeof(u32);
1349+
1350+
*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
1351+
1352+
if (target_cmd_index == offset)
1353+
return 0;
1354+
1355+
if (ctx->jump_whitelist_cmds <= target_cmd_index) {
1356+
DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
1357+
return -EINVAL;
1358+
} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
1359+
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1360+
jump_target);
1361+
return -EINVAL;
1362+
}
1363+
1364+
return 0;
1365+
}
1366+
1367+
static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
1368+
{
1369+
const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
1370+
const u32 exact_size = BITS_TO_LONGS(batch_cmds);
1371+
u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
1372+
unsigned long *next_whitelist;
1373+
1374+
if (CMDPARSER_USES_GGTT(ctx->i915))
1375+
return;
1376+
1377+
if (batch_cmds <= ctx->jump_whitelist_cmds) {
1378+
memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
1379+
return;
1380+
}
1381+
1382+
again:
1383+
next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
1384+
if (next_whitelist) {
1385+
kfree(ctx->jump_whitelist);
1386+
ctx->jump_whitelist = next_whitelist;
1387+
ctx->jump_whitelist_cmds =
1388+
next_size * BITS_PER_BYTE * sizeof(long);
1389+
return;
1390+
}
1391+
1392+
if (next_size > exact_size) {
1393+
next_size = exact_size;
1394+
goto again;
1395+
}
1396+
1397+
DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
1398+
memset(ctx->jump_whitelist, 0,
1399+
BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
1400+
1401+
return;
1402+
}
1403+
12961404
#define LENGTH_BIAS 2
12971405

12981406
/**
12991407
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1408+
* @ctx: the context in which the batch is to execute
13001409
* @engine: the engine on which the batch is to execute
13011410
* @batch_obj: the batch buffer in question
1302-
* @shadow_batch_obj: copy of the batch buffer in question
1411+
* @batch_start: Canonical base address of batch
13031412
* @batch_start_offset: byte offset in the batch at which execution starts
13041413
* @batch_len: length of the commands in batch_obj
1414+
* @shadow_batch_obj: copy of the batch buffer in question
1415+
* @shadow_batch_start: Canonical base address of shadow_batch_obj
13051416
*
13061417
* Parses the specified batch buffer looking for privilege violations as
13071418
* described in the overview.
13081419
*
13091420
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
13101421
* if the batch appears legal but should use hardware parsing
13111422
*/
1312-
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1423+
1424+
int intel_engine_cmd_parser(struct i915_gem_context *ctx,
1425+
struct intel_engine_cs *engine,
13131426
struct drm_i915_gem_object *batch_obj,
1314-
struct drm_i915_gem_object *shadow_batch_obj,
1427+
u64 batch_start,
13151428
u32 batch_start_offset,
1316-
u32 batch_len)
1429+
u32 batch_len,
1430+
struct drm_i915_gem_object *shadow_batch_obj,
1431+
u64 shadow_batch_start)
13171432
{
1318-
u32 *cmd, *batch_end;
1433+
u32 *cmd, *batch_end, offset = 0;
13191434
struct drm_i915_cmd_descriptor default_desc = noop_desc;
13201435
const struct drm_i915_cmd_descriptor *desc = &default_desc;
13211436
bool needs_clflush_after = false;
@@ -1329,6 +1444,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
13291444
return PTR_ERR(cmd);
13301445
}
13311446

1447+
init_whitelist(ctx, batch_len);
1448+
13321449
/*
13331450
* We use the batch length as size because the shadow object is as
13341451
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1349,16 +1466,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
13491466
goto err;
13501467
}
13511468

1352-
/*
1353-
* We don't try to handle BATCH_BUFFER_START because it adds
1354-
* non-trivial complexity. Instead we abort the scan and return
1355-
* and error to indicate that the batch is unsafe.
1356-
*/
1357-
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1358-
ret = -EACCES;
1359-
goto err;
1360-
}
1361-
13621469
if (desc->flags & CMD_DESC_FIXED)
13631470
length = desc->length.fixed;
13641471
else
@@ -1378,7 +1485,21 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
13781485
goto err;
13791486
}
13801487

1488+
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1489+
ret = check_bbstart(ctx, cmd, offset, length,
1490+
batch_len, batch_start,
1491+
shadow_batch_start);
1492+
1493+
if (ret)
1494+
goto err;
1495+
break;
1496+
}
1497+
1498+
if (ctx->jump_whitelist_cmds > offset)
1499+
set_bit(offset, ctx->jump_whitelist);
1500+
13811501
cmd += length;
1502+
offset += length;
13821503
if (cmd >= batch_end) {
13831504
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
13841505
ret = -EINVAL;

drivers/gpu/drm/i915/i915_drv.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2408,11 +2408,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
24082408
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
24092409
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
24102410
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
2411-
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
2411+
int intel_engine_cmd_parser(struct i915_gem_context *cxt,
2412+
struct intel_engine_cs *engine,
24122413
struct drm_i915_gem_object *batch_obj,
2413-
struct drm_i915_gem_object *shadow_batch_obj,
2414+
u64 user_batch_start,
24142415
u32 batch_start_offset,
2415-
u32 batch_len);
2416+
u32 batch_len,
2417+
struct drm_i915_gem_object *shadow_batch_obj,
2418+
u64 shadow_batch_start);
24162419

24172420
/* intel_device_info.c */
24182421
static inline struct intel_device_info *

0 commit comments

Comments
 (0)