Skip to content

Commit ec0bd60

Browse files
committed
Merge tag 'drm-msm-fixes-2020-02-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
+ fix UBWC on GPU and display side for sc7180 + fix DSI suspend/resume issue encountered on sc7180 + fix some breakage on so called "linux-android" devices (fallout from sc7180/a618 support, not seen earlier due to bootloader/firmware differences) + couple other misc fixes Signed-off-by: Dave Airlie <[email protected]> From: Rob Clark <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGshz5K3tJd=NsBSHq6HGT-ZRa67qt+iN=U2ZFO2oD8kuw@mail.gmail.com
2 parents 99edb18 + 8fc7036 commit ec0bd60

File tree

9 files changed

+170
-100
lines changed

9 files changed

+170
-100
lines changed

drivers/gpu/drm/msm/adreno/a6xx_gmu.c

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
796796
return true;
797797
}
798798

799+
#define GBIF_CLIENT_HALT_MASK BIT(0)
800+
#define GBIF_ARB_HALT_MASK BIT(1)
801+
802+
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
803+
{
804+
struct msm_gpu *gpu = &adreno_gpu->base;
805+
806+
if (!a6xx_has_gbif(adreno_gpu)) {
807+
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
808+
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
809+
0xf) == 0xf);
810+
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
811+
812+
return;
813+
}
814+
815+
/* Halt new client requests on GBIF */
816+
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
817+
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
818+
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
819+
820+
/* Halt all AXI requests on GBIF */
821+
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
822+
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
823+
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
824+
825+
/* The GBIF halt needs to be explicitly cleared */
826+
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
827+
}
828+
799829
/* Gracefully try to shut down the GMU and by extension the GPU */
800830
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
801831
{
802832
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
803833
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
804-
struct msm_gpu *gpu = &adreno_gpu->base;
805834
u32 val;
806835

807836
/*
@@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
819848
return;
820849
}
821850

822-
/* Clear the VBIF pipe before shutting down */
823-
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
824-
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
825-
== 0xf);
826-
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
851+
a6xx_bus_clear_pending_transactions(adreno_gpu);
827852

828853
/* tell the GMU we want to slumber */
829854
a6xx_gmu_notify_slumber(gmu);

drivers/gpu/drm/msm/adreno/a6xx_gpu.c

Lines changed: 6 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
378378
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
379379
int ret;
380380

381-
/*
382-
* During a previous slumber, GBIF halt is asserted to ensure
383-
* no further transaction can go through GPU before GPU
384-
* headswitch is turned off.
385-
*
386-
* This halt is deasserted once headswitch goes off but
387-
* incase headswitch doesn't goes off clear GBIF halt
388-
* here to ensure GPU wake-up doesn't fail because of
389-
* halted GPU transactions.
390-
*/
391-
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
392-
393381
/* Make sure the GMU keeps the GPU on while we set it up */
394382
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
395383

@@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
470458
/* Select CP0 to always count cycles */
471459
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
472460

473-
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
474-
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
475-
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
476-
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
461+
if (adreno_is_a630(adreno_gpu)) {
462+
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
463+
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
464+
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
465+
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
466+
}
477467

478468
/* Enable fault detection */
479469
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
748738
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
749739
};
750740

751-
#define GBIF_CLIENT_HALT_MASK BIT(0)
752-
#define GBIF_ARB_HALT_MASK BIT(1)
753-
754-
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
755-
{
756-
struct msm_gpu *gpu = &adreno_gpu->base;
757-
758-
if(!a6xx_has_gbif(adreno_gpu)){
759-
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
760-
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
761-
0xf) == 0xf);
762-
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
763-
764-
return;
765-
}
766-
767-
/* Halt new client requests on GBIF */
768-
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
769-
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
770-
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
771-
772-
/* Halt all AXI requests on GBIF */
773-
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
774-
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
775-
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
776-
777-
/*
778-
* GMU needs DDR access in slumber path. Deassert GBIF halt now
779-
* to allow for GMU to access system memory.
780-
*/
781-
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
782-
}
783-
784741
static int a6xx_pm_resume(struct msm_gpu *gpu)
785742
{
786743
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
805762

806763
devfreq_suspend_device(gpu->devfreq.devfreq);
807764

808-
/*
809-
* Make sure the GMU is idle before continuing (because some transitions
810-
* may use VBIF
811-
*/
812-
a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
813-
814-
/* Clear the VBIF pipe before shutting down */
815-
/* FIXME: This accesses the GPU - do we need to make sure it is on? */
816-
a6xx_bus_clear_pending_transactions(adreno_gpu);
817-
818765
return a6xx_gmu_stop(a6xx_gpu);
819766
}
820767

drivers/gpu/drm/msm/adreno/a6xx_hfi.c

Lines changed: 60 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#include "a6xx_gmu.h"
99
#include "a6xx_gmu.xml.h"
10+
#include "a6xx_gpu.h"
1011

1112
#define HFI_MSG_ID(val) [val] = #val
1213

@@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
216217
NULL, 0);
217218
}
218219

219-
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
220+
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
220221
{
221-
struct a6xx_hfi_msg_bw_table msg = { 0 };
222+
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
223+
msg->bw_level_num = 1;
224+
225+
msg->ddr_cmds_num = 3;
226+
msg->ddr_wait_bitmask = 0x01;
227+
228+
msg->ddr_cmds_addrs[0] = 0x50000;
229+
msg->ddr_cmds_addrs[1] = 0x5003c;
230+
msg->ddr_cmds_addrs[2] = 0x5000c;
231+
232+
msg->ddr_cmds_data[0][0] = 0x40000000;
233+
msg->ddr_cmds_data[0][1] = 0x40000000;
234+
msg->ddr_cmds_data[0][2] = 0x40000000;
222235

223236
/*
224-
* The sdm845 GMU doesn't do bus frequency scaling on its own but it
225-
* does need at least one entry in the list because it might be accessed
226-
* when the GMU is shutting down. Send a single "off" entry.
237+
* These are the CX (CNOC) votes - these are used by the GMU but the
238+
* votes are known and fixed for the target
227239
*/
240+
msg->cnoc_cmds_num = 1;
241+
msg->cnoc_wait_bitmask = 0x01;
242+
243+
msg->cnoc_cmds_addrs[0] = 0x5007c;
244+
msg->cnoc_cmds_data[0][0] = 0x40000000;
245+
msg->cnoc_cmds_data[1][0] = 0x60000001;
246+
}
228247

229-
msg.bw_level_num = 1;
248+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
249+
{
250+
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
251+
msg->bw_level_num = 1;
230252

231-
msg.ddr_cmds_num = 3;
232-
msg.ddr_wait_bitmask = 0x07;
253+
msg->ddr_cmds_num = 3;
254+
msg->ddr_wait_bitmask = 0x07;
233255

234-
msg.ddr_cmds_addrs[0] = 0x50000;
235-
msg.ddr_cmds_addrs[1] = 0x5005c;
236-
msg.ddr_cmds_addrs[2] = 0x5000c;
256+
msg->ddr_cmds_addrs[0] = 0x50000;
257+
msg->ddr_cmds_addrs[1] = 0x5005c;
258+
msg->ddr_cmds_addrs[2] = 0x5000c;
237259

238-
msg.ddr_cmds_data[0][0] = 0x40000000;
239-
msg.ddr_cmds_data[0][1] = 0x40000000;
240-
msg.ddr_cmds_data[0][2] = 0x40000000;
260+
msg->ddr_cmds_data[0][0] = 0x40000000;
261+
msg->ddr_cmds_data[0][1] = 0x40000000;
262+
msg->ddr_cmds_data[0][2] = 0x40000000;
241263

242264
/*
243265
* These are the CX (CNOC) votes. This is used but the values for the
244266
* sdm845 GMU are known and fixed so we can hard code them.
245267
*/
246268

247-
msg.cnoc_cmds_num = 3;
248-
msg.cnoc_wait_bitmask = 0x05;
269+
msg->cnoc_cmds_num = 3;
270+
msg->cnoc_wait_bitmask = 0x05;
249271

250-
msg.cnoc_cmds_addrs[0] = 0x50034;
251-
msg.cnoc_cmds_addrs[1] = 0x5007c;
252-
msg.cnoc_cmds_addrs[2] = 0x5004c;
272+
msg->cnoc_cmds_addrs[0] = 0x50034;
273+
msg->cnoc_cmds_addrs[1] = 0x5007c;
274+
msg->cnoc_cmds_addrs[2] = 0x5004c;
253275

254-
msg.cnoc_cmds_data[0][0] = 0x40000000;
255-
msg.cnoc_cmds_data[0][1] = 0x00000000;
256-
msg.cnoc_cmds_data[0][2] = 0x40000000;
276+
msg->cnoc_cmds_data[0][0] = 0x40000000;
277+
msg->cnoc_cmds_data[0][1] = 0x00000000;
278+
msg->cnoc_cmds_data[0][2] = 0x40000000;
279+
280+
msg->cnoc_cmds_data[1][0] = 0x60000001;
281+
msg->cnoc_cmds_data[1][1] = 0x20000001;
282+
msg->cnoc_cmds_data[1][2] = 0x60000001;
283+
}
284+
285+
286+
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
287+
{
288+
struct a6xx_hfi_msg_bw_table msg = { 0 };
289+
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
290+
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
257291

258-
msg.cnoc_cmds_data[1][0] = 0x60000001;
259-
msg.cnoc_cmds_data[1][1] = 0x20000001;
260-
msg.cnoc_cmds_data[1][2] = 0x60000001;
292+
if (adreno_is_a618(adreno_gpu))
293+
a618_build_bw_table(&msg);
294+
else
295+
a6xx_build_bw_table(&msg);
261296

262297
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
263298
NULL, 0);

drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
255255

256256
INTERLEAVED_RGB_FMT(RGB565,
257257
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
258-
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
258+
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
259259
false, 2, 0,
260260
DPU_FETCH_LINEAR, 1),
261261

262262
INTERLEAVED_RGB_FMT(BGR565,
263263
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
264-
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
264+
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
265265
false, 2, 0,
266266
DPU_FETCH_LINEAR, 1),
267267

drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c

Lines changed: 57 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
1414

15+
#define HW_REV 0x0
1516
#define HW_INTR_STATUS 0x0010
1617

1718
/* Max BW defined in KBps */
@@ -22,6 +23,17 @@ struct dpu_irq_controller {
2223
struct irq_domain *domain;
2324
};
2425

26+
struct dpu_hw_cfg {
27+
u32 val;
28+
u32 offset;
29+
};
30+
31+
struct dpu_mdss_hw_init_handler {
32+
u32 hw_rev;
33+
u32 hw_reg_count;
34+
struct dpu_hw_cfg* hw_cfg;
35+
};
36+
2537
struct dpu_mdss {
2638
struct msm_mdss base;
2739
void __iomem *mmio;
@@ -32,6 +44,44 @@ struct dpu_mdss {
3244
u32 num_paths;
3345
};
3446

47+
static struct dpu_hw_cfg hw_cfg[] = {
48+
{
49+
/* UBWC global settings */
50+
.val = 0x1E,
51+
.offset = 0x144,
52+
}
53+
};
54+
55+
static struct dpu_mdss_hw_init_handler cfg_handler[] = {
56+
{ .hw_rev = DPU_HW_VER_620,
57+
.hw_reg_count = ARRAY_SIZE(hw_cfg),
58+
.hw_cfg = hw_cfg
59+
},
60+
};
61+
62+
static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
63+
{
64+
int i;
65+
u32 count = 0;
66+
struct dpu_hw_cfg *hw_cfg = NULL;
67+
68+
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
69+
if (cfg_handler[i].hw_rev == hw_rev) {
70+
hw_cfg = cfg_handler[i].hw_cfg;
71+
count = cfg_handler[i].hw_reg_count;
72+
break;
73+
}
74+
}
75+
76+
for (i = 0; i < count; i++ ) {
77+
writel_relaxed(hw_cfg->val,
78+
dpu_mdss->mmio + hw_cfg->offset);
79+
hw_cfg++;
80+
}
81+
82+
return;
83+
}
84+
3585
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
3686
struct dpu_mdss *dpu_mdss)
3787
{
@@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
174224
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
175225
struct dss_module_power *mp = &dpu_mdss->mp;
176226
int ret;
227+
u32 mdss_rev;
177228

178229
dpu_mdss_icc_request_bw(mdss);
179230

180231
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
181-
if (ret)
232+
if (ret) {
182233
DPU_ERROR("clock enable failed, ret:%d\n", ret);
234+
return ret;
235+
}
236+
237+
mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
238+
dpu_mdss_hw_init(dpu_mdss, mdss_rev);
183239

184240
return ret;
185241
}

drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
11091109
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
11101110
msecs_to_jiffies(50));
11111111
if (ret == 0)
1112-
dev_warn(dev->dev, "pp done time out, lm=%d\n",
1113-
mdp5_cstate->pipeline.mixer->lm);
1112+
dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
1113+
mdp5_cstate->pipeline.mixer->lm);
11141114
}
11151115

11161116
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)

0 commit comments

Comments
 (0)