Skip to content

Commit 30c185d

Browse files
committed
Merge tag 'drm-intel-next-fixes-2019-11-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- Includes gvt-next-fixes-2019-11-12 - Fix Bugzilla #112051: Fix detection for a CMP-V PCH - Fix Bugzilla #112256: Corrupted page table at address on plymouth splash - Fix Bugzilla #111594: Avoid losing RC6 when HuC authentication is used - Fix for OA/perf metric coherency, restore GT coarse power gating workaround - Avoid atomic context on error capture - Avoid MST bitmask overflowing to EDP/DPI input select - Fixes to CI found dmesg splats Signed-off-by: Dave Airlie <[email protected]> From: Joonas Lahtinen <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
2 parents c22fe76 + 0122baa commit 30c185d

15 files changed

+89
-53
lines changed

drivers/gpu/drm/i915/display/intel_fbdev.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
234234
info->apertures->ranges[0].base = ggtt->gmadr.start;
235235
info->apertures->ranges[0].size = ggtt->mappable_end;
236236

237+
/* Our framebuffer is the entirety of fbdev's system memory */
238+
info->fix.smem_start =
239+
(unsigned long)(ggtt->gmadr.start + vma->node.start);
240+
info->fix.smem_len = vma->node.size;
241+
237242
vaddr = i915_vma_pin_iomap(vma);
238243
if (IS_ERR(vaddr)) {
239244
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -243,10 +248,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
243248
info->screen_base = vaddr;
244249
info->screen_size = vma->node.size;
245250

246-
/* Our framebuffer is the entirety of fbdev's system memory */
247-
info->fix.smem_start = (unsigned long)info->screen_base;
248-
info->fix.smem_len = info->screen_size;
249-
250251
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
251252

252253
/* If the object is shmemfs backed, it will have given us zeroed pages.

drivers/gpu/drm/i915/display/intel_sprite.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2885,7 +2885,7 @@ struct intel_plane *
28852885
skl_universal_plane_create(struct drm_i915_private *dev_priv,
28862886
enum pipe pipe, enum plane_id plane_id)
28872887
{
2888-
static const struct drm_plane_funcs *plane_funcs;
2888+
const struct drm_plane_funcs *plane_funcs;
28892889
struct intel_plane *plane;
28902890
enum drm_plane_type plane_type;
28912891
unsigned int supported_rotations;

drivers/gpu/drm/i915/gem/i915_gem_context.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -727,6 +727,7 @@ int i915_gem_init_contexts(struct drm_i915_private *i915)
727727
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
728728
{
729729
destroy_kernel_context(&i915->kernel_context);
730+
flush_work(&i915->gem.contexts.free_work);
730731
}
731732

732733
static int context_idr_cleanup(int id, void *p, void *data)

drivers/gpu/drm/i915/gt/intel_gt_requests.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
3333
{
3434
struct intel_gt_timelines *timelines = &gt->timelines;
3535
struct intel_timeline *tl, *tn;
36-
unsigned long active_count = 0;
3736
unsigned long flags;
3837
bool interruptible;
3938
LIST_HEAD(free);
@@ -46,10 +45,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
4645

4746
spin_lock_irqsave(&timelines->lock, flags);
4847
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
49-
if (!mutex_trylock(&tl->mutex)) {
50-
active_count++; /* report busy to caller, try again? */
48+
if (!mutex_trylock(&tl->mutex))
5149
continue;
52-
}
5350

5451
intel_timeline_get(tl);
5552
GEM_BUG_ON(!tl->active_count);
@@ -74,9 +71,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
7471

7572
/* Resume iteration after dropping lock */
7673
list_safe_reset_next(tl, tn, link);
77-
if (--tl->active_count)
78-
active_count += !!rcu_access_pointer(tl->last_request.fence);
79-
else
74+
if (!--tl->active_count)
8075
list_del(&tl->link);
8176

8277
mutex_unlock(&tl->mutex);
@@ -92,7 +87,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
9287
list_for_each_entry_safe(tl, tn, &free, link)
9388
__intel_timeline_free(&tl->kref);
9489

95-
return active_count ? timeout : 0;
90+
return list_empty(&timelines->active_list) ? 0 : timeout;
9691
}
9792

9893
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)

drivers/gpu/drm/i915/gt/intel_rc6.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,13 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
178178
GEN6_RC_CTL_RC6_ENABLE |
179179
rc6_mode);
180180

181-
set(uncore, GEN9_PG_ENABLE,
182-
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
181+
/*
182+
* WaRsDisableCoarsePowerGating:skl,cnl
183+
* - Render/Media PG need to be disabled with RC6.
184+
*/
185+
if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
186+
set(uncore, GEN9_PG_ENABLE,
187+
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
183188
}
184189

185190
static void gen8_rc6_enable(struct intel_rc6 *rc6)

drivers/gpu/drm/i915/gt/uc/intel_guc.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -553,6 +553,13 @@ int intel_guc_suspend(struct intel_guc *guc)
553553
GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
554554
};
555555

556+
/*
557+
* If GuC communication is enabled but submission is not supported,
558+
* we do not need to suspend the GuC.
559+
*/
560+
if (!intel_guc_submission_is_enabled(guc))
561+
return 0;
562+
556563
/*
557564
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
558565
* and then returns, so waiting on the H2G is not enough to guarantee
@@ -610,6 +617,14 @@ int intel_guc_resume(struct intel_guc *guc)
610617
GUC_POWER_D0,
611618
};
612619

620+
/*
621+
* If GuC communication is enabled but submission is not supported,
622+
* we do not need to resume the GuC but we do need to enable the
623+
* GuC communication on resume (above).
624+
*/
625+
if (!intel_guc_submission_is_enabled(guc))
626+
return 0;
627+
613628
return intel_guc_send(guc, action, ARRAY_SIZE(action));
614629
}
615630

drivers/gpu/drm/i915/gvt/handlers.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3420,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
34203420
}
34213421

34223422
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3423+
/* pvinfo data doesn't come from hw mmio */
3424+
if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3425+
continue;
3426+
34233427
for (j = 0; j < block->size; j += 4) {
34243428
ret = handler(gvt,
34253429
i915_mmio_reg_offset(block->offset) + j,

drivers/gpu/drm/i915/i915_active.c

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
9191

9292
static void debug_active_activate(struct i915_active *ref)
9393
{
94-
lockdep_assert_held(&ref->mutex);
94+
spin_lock_irq(&ref->tree_lock);
9595
if (!atomic_read(&ref->count)) /* before the first inc */
9696
debug_object_activate(ref, &active_debug_desc);
97+
spin_unlock_irq(&ref->tree_lock);
9798
}
9899

99100
static void debug_active_deactivate(struct i915_active *ref)
100101
{
101-
lockdep_assert_held(&ref->mutex);
102+
lockdep_assert_held(&ref->tree_lock);
102103
if (!atomic_read(&ref->count)) /* after the last dec */
103104
debug_object_deactivate(ref, &active_debug_desc);
104105
}
@@ -128,36 +129,34 @@ __active_retire(struct i915_active *ref)
128129
{
129130
struct active_node *it, *n;
130131
struct rb_root root;
131-
bool retire = false;
132+
unsigned long flags;
132133

133-
lockdep_assert_held(&ref->mutex);
134134
GEM_BUG_ON(i915_active_is_idle(ref));
135135

136136
/* return the unused nodes to our slabcache -- flushing the allocator */
137-
if (atomic_dec_and_test(&ref->count)) {
138-
debug_active_deactivate(ref);
139-
root = ref->tree;
140-
ref->tree = RB_ROOT;
141-
ref->cache = NULL;
142-
retire = true;
143-
}
144-
145-
mutex_unlock(&ref->mutex);
146-
if (!retire)
137+
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
147138
return;
148139

149140
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
150-
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
151-
GEM_BUG_ON(i915_active_fence_isset(&it->base));
152-
kmem_cache_free(global.slab_cache, it);
153-
}
141+
debug_active_deactivate(ref);
142+
143+
root = ref->tree;
144+
ref->tree = RB_ROOT;
145+
ref->cache = NULL;
146+
147+
spin_unlock_irqrestore(&ref->tree_lock, flags);
154148

155149
/* After the final retire, the entire struct may be freed */
156150
if (ref->retire)
157151
ref->retire(ref);
158152

159153
/* ... except if you wait on it, you must manage your own references! */
160154
wake_up_var(ref);
155+
156+
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
157+
GEM_BUG_ON(i915_active_fence_isset(&it->base));
158+
kmem_cache_free(global.slab_cache, it);
159+
}
161160
}
162161

163162
static void
@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
169168
if (atomic_add_unless(&ref->count, -1, 1))
170169
return;
171170

172-
mutex_lock(&ref->mutex);
173171
__active_retire(ref);
174172
}
175173

@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
180178
if (atomic_add_unless(&ref->count, -1, 1))
181179
return;
182180

183-
/* If we are inside interrupt context (fence signaling), defer */
184-
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
185-
!mutex_trylock(&ref->mutex)) {
181+
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
186182
queue_work(system_unbound_wq, &ref->work);
187183
return;
188184
}
@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
227223
if (!prealloc)
228224
return NULL;
229225

230-
mutex_lock(&ref->mutex);
226+
spin_lock_irq(&ref->tree_lock);
231227
GEM_BUG_ON(i915_active_is_idle(ref));
232228

233229
parent = NULL;
@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
257253

258254
out:
259255
ref->cache = node;
260-
mutex_unlock(&ref->mutex);
256+
spin_unlock_irq(&ref->tree_lock);
261257

262258
BUILD_BUG_ON(offsetof(typeof(*node), base));
263259
return &node->base;
@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
278274
if (bits & I915_ACTIVE_MAY_SLEEP)
279275
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
280276

277+
spin_lock_init(&ref->tree_lock);
281278
ref->tree = RB_ROOT;
282279
ref->cache = NULL;
280+
283281
init_llist_head(&ref->preallocated_barriers);
284282
atomic_set(&ref->count, 0);
285283
__mutex_init(&ref->mutex, "i915_active", key);
@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
510508
if (RB_EMPTY_ROOT(&ref->tree))
511509
return NULL;
512510

513-
mutex_lock(&ref->mutex);
511+
spin_lock_irq(&ref->tree_lock);
514512
GEM_BUG_ON(i915_active_is_idle(ref));
515513

516514
/*
@@ -575,15 +573,15 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
575573
goto match;
576574
}
577575

578-
mutex_unlock(&ref->mutex);
576+
spin_unlock_irq(&ref->tree_lock);
579577

580578
return NULL;
581579

582580
match:
583581
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
584582
if (p == &ref->cache->node)
585583
ref->cache = NULL;
586-
mutex_unlock(&ref->mutex);
584+
spin_unlock_irq(&ref->tree_lock);
587585

588586
return rb_entry(p, struct active_node, node);
589587
}
@@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
664662
void i915_active_acquire_barrier(struct i915_active *ref)
665663
{
666664
struct llist_node *pos, *next;
665+
unsigned long flags;
667666

668667
GEM_BUG_ON(i915_active_is_idle(ref));
669668

@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
673672
* populated by i915_request_add_active_barriers() to point to the
674673
* request that will eventually release them.
675674
*/
676-
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
675+
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
677676
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
678677
struct active_node *node = barrier_from_ll(pos);
679678
struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
699698
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
700699
intel_engine_pm_put(engine);
701700
}
702-
mutex_unlock(&ref->mutex);
701+
spin_unlock_irqrestore(&ref->tree_lock, flags);
703702
}
704703

705704
void i915_request_add_active_barriers(struct i915_request *rq)

drivers/gpu/drm/i915/i915_active_types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct i915_active {
4848
atomic_t count;
4949
struct mutex mutex;
5050

51+
spinlock_t tree_lock;
5152
struct active_node *cache;
5253
struct rb_root tree;
5354

drivers/gpu/drm/i915/i915_drv.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2045,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915)
20452045
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
20462046
}
20472047

2048+
static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
2049+
{
2050+
return intel_guc_is_submission_supported(guc) &&
2051+
intel_guc_is_running(guc);
2052+
}
2053+
20482054
#endif

0 commit comments

Comments
 (0)