@@ -298,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
298
298
299
299
static inline bool eb_use_cmdparser (const struct i915_execbuffer * eb )
300
300
{
301
- return intel_engine_needs_cmd_parser (eb -> engine ) && eb -> batch_len ;
301
+ return intel_engine_requires_cmd_parser (eb -> engine ) ||
302
+ (intel_engine_using_cmd_parser (eb -> engine ) &&
303
+ eb -> args -> batch_len );
302
304
}
303
305
304
306
static int eb_create (struct i915_execbuffer * eb )
@@ -1990,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1990
1992
return 0 ;
1991
1993
}
1992
1994
1993
- static struct i915_vma * eb_parse (struct i915_execbuffer * eb , bool is_master )
1995
+ static struct i915_vma *
1996
+ shadow_batch_pin (struct i915_execbuffer * eb , struct drm_i915_gem_object * obj )
1997
+ {
1998
+ struct drm_i915_private * dev_priv = eb -> i915 ;
1999
+ struct i915_vma * const vma = * eb -> vma ;
2000
+ struct i915_address_space * vm ;
2001
+ u64 flags ;
2002
+
2003
+ /*
2004
+ * PPGTT backed shadow buffers must be mapped RO, to prevent
2005
+ * post-scan tampering
2006
+ */
2007
+ if (CMDPARSER_USES_GGTT (dev_priv )) {
2008
+ flags = PIN_GLOBAL ;
2009
+ vm = & dev_priv -> ggtt .vm ;
2010
+ } else if (vma -> vm -> has_read_only ) {
2011
+ flags = PIN_USER ;
2012
+ vm = vma -> vm ;
2013
+ i915_gem_object_set_readonly (obj );
2014
+ } else {
2015
+ DRM_DEBUG ("Cannot prevent post-scan tampering without RO capable vm\n" );
2016
+ return ERR_PTR (- EINVAL );
2017
+ }
2018
+
2019
+ return i915_gem_object_pin (obj , vm , NULL , 0 , 0 , flags );
2020
+ }
2021
+
2022
+ static struct i915_vma * eb_parse (struct i915_execbuffer * eb )
1994
2023
{
1995
2024
struct intel_engine_pool_node * pool ;
1996
2025
struct i915_vma * vma ;
2026
+ u64 batch_start ;
2027
+ u64 shadow_batch_start ;
1997
2028
int err ;
1998
2029
1999
2030
pool = intel_engine_get_pool (eb -> engine , eb -> batch_len );
2000
2031
if (IS_ERR (pool ))
2001
2032
return ERR_CAST (pool );
2002
2033
2003
- err = intel_engine_cmd_parser (eb -> engine ,
2034
+ vma = shadow_batch_pin (eb , pool -> obj );
2035
+ if (IS_ERR (vma ))
2036
+ goto err ;
2037
+
2038
+ batch_start = gen8_canonical_addr (eb -> batch -> node .start ) +
2039
+ eb -> batch_start_offset ;
2040
+
2041
+ shadow_batch_start = gen8_canonical_addr (vma -> node .start );
2042
+
2043
+ err = intel_engine_cmd_parser (eb -> gem_context ,
2044
+ eb -> engine ,
2004
2045
eb -> batch -> obj ,
2005
- pool -> obj ,
2046
+ batch_start ,
2006
2047
eb -> batch_start_offset ,
2007
2048
eb -> batch_len ,
2008
- is_master );
2049
+ pool -> obj ,
2050
+ shadow_batch_start );
2051
+
2009
2052
if (err ) {
2010
- if (err == - EACCES ) /* unhandled chained batch */
2053
+ i915_vma_unpin (vma );
2054
+
2055
+ /*
2056
+ * Unsafe GGTT-backed buffers can still be submitted safely
2057
+ * as non-secure.
2058
+ * For PPGTT backing however, we have no choice but to forcibly
2059
+ * reject unsafe buffers
2060
+ */
2061
+ if (CMDPARSER_USES_GGTT (eb -> i915 ) && (err == - EACCES ))
2062
+ /* Execute original buffer non-secure */
2011
2063
vma = NULL ;
2012
2064
else
2013
2065
vma = ERR_PTR (err );
2014
2066
goto err ;
2015
2067
}
2016
2068
2017
- vma = i915_gem_object_ggtt_pin (pool -> obj , NULL , 0 , 0 , 0 );
2018
- if (IS_ERR (vma ))
2019
- goto err ;
2020
-
2021
2069
eb -> vma [eb -> buffer_count ] = i915_vma_get (vma );
2022
2070
eb -> flags [eb -> buffer_count ] =
2023
2071
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF ;
2024
2072
vma -> exec_flags = & eb -> flags [eb -> buffer_count ];
2025
2073
eb -> buffer_count ++ ;
2026
2074
2075
+ eb -> batch_start_offset = 0 ;
2076
+ eb -> batch = vma ;
2077
+
2078
+ if (CMDPARSER_USES_GGTT (eb -> i915 ))
2079
+ eb -> batch_flags |= I915_DISPATCH_SECURE ;
2080
+
2081
+ /* eb->batch_len unchanged */
2082
+
2027
2083
vma -> private = pool ;
2028
2084
return vma ;
2029
2085
@@ -2430,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2430
2486
struct drm_i915_gem_exec_object2 * exec ,
2431
2487
struct drm_syncobj * * fences )
2432
2488
{
2489
+ struct drm_i915_private * i915 = to_i915 (dev );
2433
2490
struct i915_execbuffer eb ;
2434
2491
struct dma_fence * in_fence = NULL ;
2435
2492
struct dma_fence * exec_fence = NULL ;
@@ -2441,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2441
2498
BUILD_BUG_ON (__EXEC_OBJECT_INTERNAL_FLAGS &
2442
2499
~__EXEC_OBJECT_UNKNOWN_FLAGS );
2443
2500
2444
- eb .i915 = to_i915 ( dev ) ;
2501
+ eb .i915 = i915 ;
2445
2502
eb .file = file ;
2446
2503
eb .args = args ;
2447
2504
if (DBG_FORCE_RELOC || !(args -> flags & I915_EXEC_NO_RELOC ))
@@ -2461,8 +2518,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2461
2518
2462
2519
eb .batch_flags = 0 ;
2463
2520
if (args -> flags & I915_EXEC_SECURE ) {
2521
+ if (INTEL_GEN (i915 ) >= 11 )
2522
+ return - ENODEV ;
2523
+
2524
+ /* Return -EPERM to trigger fallback code on old binaries. */
2525
+ if (!HAS_SECURE_BATCHES (i915 ))
2526
+ return - EPERM ;
2527
+
2464
2528
if (!drm_is_current_master (file ) || !capable (CAP_SYS_ADMIN ))
2465
- return - EPERM ;
2529
+ return - EPERM ;
2466
2530
2467
2531
eb .batch_flags |= I915_DISPATCH_SECURE ;
2468
2532
}
@@ -2539,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2539
2603
goto err_vma ;
2540
2604
}
2541
2605
2606
+ if (eb .batch_len == 0 )
2607
+ eb .batch_len = eb .batch -> size - eb .batch_start_offset ;
2608
+
2542
2609
if (eb_use_cmdparser (& eb )) {
2543
2610
struct i915_vma * vma ;
2544
2611
2545
- vma = eb_parse (& eb , drm_is_current_master ( file ) );
2612
+ vma = eb_parse (& eb );
2546
2613
if (IS_ERR (vma )) {
2547
2614
err = PTR_ERR (vma );
2548
2615
goto err_vma ;
2549
2616
}
2550
-
2551
- if (vma ) {
2552
- /*
2553
- * Batch parsed and accepted:
2554
- *
2555
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2556
- * bit from MI_BATCH_BUFFER_START commands issued in
2557
- * the dispatch_execbuffer implementations. We
2558
- * specifically don't want that set on batches the
2559
- * command parser has accepted.
2560
- */
2561
- eb .batch_flags |= I915_DISPATCH_SECURE ;
2562
- eb .batch_start_offset = 0 ;
2563
- eb .batch = vma ;
2564
- }
2565
2617
}
2566
2618
2567
- if (eb .batch_len == 0 )
2568
- eb .batch_len = eb .batch -> size - eb .batch_start_offset ;
2569
-
2570
2619
/*
2571
2620
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2572
2621
* batch" bit. Hence we need to pin secure batches into the global gtt.
0 commit comments