@@ -2094,17 +2094,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
20942094 }
20952095}
20962096
2097- static unsigned long list_count (struct list_head * list )
2098- {
2099- struct list_head * pos ;
2100- unsigned long count = 0 ;
2101-
2102- list_for_each (pos , list )
2103- count ++ ;
2104-
2105- return count ;
2106- }
2107-
21082097static unsigned long read_ul (void * p , size_t x )
21092098{
21102099 return * (unsigned long * )(p + x );
@@ -2196,11 +2185,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
21962185 }
21972186}
21982187
2199- static void engine_dump_active_requests (struct intel_engine_cs * engine , struct drm_printer * m )
2188+ static void engine_dump_active_requests (struct intel_engine_cs * engine ,
2189+ struct drm_printer * m )
22002190{
2191+ struct intel_context * hung_ce = NULL ;
22012192 struct i915_request * hung_rq = NULL ;
2202- struct intel_context * ce ;
2203- bool guc ;
22042193
22052194 /*
22062195 * No need for an engine->irq_seqno_barrier() before the seqno reads.
@@ -2209,27 +2198,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
22092198 * But the intention here is just to report an instantaneous snapshot
22102199 * so that's fine.
22112200 */
2212- lockdep_assert_held ( & engine -> sched_engine -> lock );
2201+ intel_engine_get_hung_entity ( engine , & hung_ce , & hung_rq );
22132202
22142203 drm_printf (m , "\tRequests:\n" );
22152204
2216- guc = intel_uc_uses_guc_submission (& engine -> gt -> uc );
2217- if (guc ) {
2218- ce = intel_engine_get_hung_context (engine );
2219- if (ce )
2220- hung_rq = intel_context_find_active_request (ce );
2221- } else {
2222- hung_rq = intel_engine_execlist_find_hung_request (engine );
2223- }
2224-
22252205 if (hung_rq )
22262206 engine_dump_request (hung_rq , m , "\t\thung" );
2207+ else if (hung_ce )
2208+ drm_printf (m , "\t\tGot hung ce but no hung rq!\n" );
22272209
2228- if (guc )
2210+ if (intel_uc_uses_guc_submission ( & engine -> gt -> uc ) )
22292211 intel_guc_dump_active_requests (engine , hung_rq , m );
22302212 else
2231- intel_engine_dump_active_requests (& engine -> sched_engine -> requests ,
2232- hung_rq , m );
2213+ intel_execlists_dump_active_requests (engine , hung_rq , m );
2214+
2215+ if (hung_rq )
2216+ i915_request_put (hung_rq );
22332217}
22342218
22352219void intel_engine_dump (struct intel_engine_cs * engine ,
@@ -2239,7 +2223,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
22392223 struct i915_gpu_error * const error = & engine -> i915 -> gpu_error ;
22402224 struct i915_request * rq ;
22412225 intel_wakeref_t wakeref ;
2242- unsigned long flags ;
22432226 ktime_t dummy ;
22442227
22452228 if (header ) {
@@ -2276,13 +2259,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
22762259 i915_reset_count (error ));
22772260 print_properties (engine , m );
22782261
2279- spin_lock_irqsave (& engine -> sched_engine -> lock , flags );
22802262 engine_dump_active_requests (engine , m );
22812263
2282- drm_printf (m , "\tOn hold?: %lu\n" ,
2283- list_count (& engine -> sched_engine -> hold ));
2284- spin_unlock_irqrestore (& engine -> sched_engine -> lock , flags );
2285-
22862264 drm_printf (m , "\tMMIO base: 0x%08x\n" , engine -> mmio_base );
22872265 wakeref = intel_runtime_pm_get_if_in_use (engine -> uncore -> rpm );
22882266 if (wakeref ) {
@@ -2328,8 +2306,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
23282306 return siblings [0 ]-> cops -> create_virtual (siblings , count , flags );
23292307}
23302308
2331- struct i915_request *
2332- intel_engine_execlist_find_hung_request (struct intel_engine_cs * engine )
2309+ static struct i915_request * engine_execlist_find_hung_request (struct intel_engine_cs * engine )
23332310{
23342311 struct i915_request * request , * active = NULL ;
23352312
@@ -2381,6 +2358,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
23812358 return active ;
23822359}
23832360
2361+ void intel_engine_get_hung_entity (struct intel_engine_cs * engine ,
2362+ struct intel_context * * ce , struct i915_request * * rq )
2363+ {
2364+ unsigned long flags ;
2365+
2366+ * ce = intel_engine_get_hung_context (engine );
2367+ if (* ce ) {
2368+ intel_engine_clear_hung_context (engine );
2369+
2370+ * rq = intel_context_get_active_request (* ce );
2371+ return ;
2372+ }
2373+
2374+ /*
2375+ * Getting here with GuC enabled means it is a forced error capture
2376+ * with no actual hang. So, no need to attempt the execlist search.
2377+ */
2378+ if (intel_uc_uses_guc_submission (& engine -> gt -> uc ))
2379+ return ;
2380+
2381+ spin_lock_irqsave (& engine -> sched_engine -> lock , flags );
2382+ * rq = engine_execlist_find_hung_request (engine );
2383+ if (* rq )
2384+ * rq = i915_request_get_rcu (* rq );
2385+ spin_unlock_irqrestore (& engine -> sched_engine -> lock , flags );
2386+ }
2387+
23842388void xehp_enable_ccs_engines (struct intel_engine_cs * engine )
23852389{
23862390 /*
0 commit comments