File tree Expand file tree Collapse file tree 2 files changed +22
-22
lines changed Expand file tree Collapse file tree 2 files changed +22
-22
lines changed Original file line number Diff line number Diff line change 11
11
12
12
#include "i915_drv.h"
13
13
14
- static void user_forcewake (struct intel_gt * gt , bool suspend )
15
- {
16
- int count = atomic_read (& gt -> user_wakeref );
17
-
18
- /* Inside suspend/resume so single threaded, no races to worry about. */
19
- if (likely (!count ))
20
- return ;
21
-
22
- intel_gt_pm_get (gt );
23
- if (suspend ) {
24
- GEM_BUG_ON (count > atomic_read (& gt -> wakeref .count ));
25
- atomic_sub (count , & gt -> wakeref .count );
26
- } else {
27
- atomic_add (count , & gt -> wakeref .count );
28
- }
29
- intel_gt_pm_put (gt );
30
- }
31
-
32
14
void i915_gem_suspend (struct drm_i915_private * i915 )
33
15
{
34
16
GEM_TRACE ("\n" );
35
17
36
18
intel_wakeref_auto (& i915 -> ggtt .userfault_wakeref , 0 );
37
19
flush_workqueue (i915 -> wq );
38
20
39
- user_forcewake (& i915 -> gt , true);
40
-
41
21
/*
42
22
* We have to flush all the executing contexts to main memory so
43
23
* that they can saved in the hibernation image. To ensure the last
@@ -132,8 +112,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
132
112
if (intel_gt_resume (& i915 -> gt ))
133
113
goto err_wedged ;
134
114
135
- user_forcewake (& i915 -> gt , false);
136
-
137
115
out_unlock :
138
116
intel_uncore_forcewake_put (& i915 -> uncore , FORCEWAKE_ALL );
139
117
return ;
Original file line number Diff line number Diff line change 18
18
#include "intel_rps.h"
19
19
#include "intel_wakeref.h"
20
20
21
+ static void user_forcewake (struct intel_gt * gt , bool suspend )
22
+ {
23
+ int count = atomic_read (& gt -> user_wakeref );
24
+
25
+ /* Inside suspend/resume so single threaded, no races to worry about. */
26
+ if (likely (!count ))
27
+ return ;
28
+
29
+ intel_gt_pm_get (gt );
30
+ if (suspend ) {
31
+ GEM_BUG_ON (count > atomic_read (& gt -> wakeref .count ));
32
+ atomic_sub (count , & gt -> wakeref .count );
33
+ } else {
34
+ atomic_add (count , & gt -> wakeref .count );
35
+ }
36
+ intel_gt_pm_put (gt );
37
+ }
38
+
21
39
static int __gt_unpark (struct intel_wakeref * wf )
22
40
{
23
41
struct intel_gt * gt = container_of (wf , typeof (* gt ), wakeref );
@@ -210,6 +228,8 @@ int intel_gt_resume(struct intel_gt *gt)
210
228
211
229
intel_uc_resume (& gt -> uc );
212
230
231
+ user_forcewake (gt , false);
232
+
213
233
intel_uncore_forcewake_put (gt -> uncore , FORCEWAKE_ALL );
214
234
intel_gt_pm_put (gt );
215
235
@@ -233,6 +253,8 @@ void intel_gt_suspend(struct intel_gt *gt)
233
253
{
234
254
intel_wakeref_t wakeref ;
235
255
256
+ user_forcewake (gt , true);
257
+
236
258
/* We expect to be idle already; but also want to be independent */
237
259
wait_for_idle (gt );
238
260
You can’t perform that action at this time.
0 commit comments