|
16 | 16 |
|
17 | 17 | #define QCOM_DUMMY_VAL -1
|
18 | 18 |
|
| 19 | +#define GFX_ACTLR_PRR (1 << 5) |
| 20 | + |
19 | 21 | static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
|
20 | 22 | {
|
21 | 23 | return container_of(smmu, struct qcom_smmu, smmu);
|
@@ -99,6 +101,47 @@ static void qcom_adreno_smmu_resume_translation(const void *cookie, bool termina
|
99 | 101 | arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
|
100 | 102 | }
|
101 | 103 |
|
| 104 | +static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set) |
| 105 | +{ |
| 106 | + struct arm_smmu_domain *smmu_domain = (void *)cookie; |
| 107 | + struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 108 | + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
| 109 | + u32 reg = 0; |
| 110 | + int ret; |
| 111 | + |
| 112 | + ret = pm_runtime_resume_and_get(smmu->dev); |
| 113 | + if (ret < 0) { |
| 114 | + dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret); |
| 115 | + return; |
| 116 | + } |
| 117 | + |
| 118 | + reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR); |
| 119 | + reg &= ~GFX_ACTLR_PRR; |
| 120 | + if (set) |
| 121 | + reg |= FIELD_PREP(GFX_ACTLR_PRR, 1); |
| 122 | + arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg); |
| 123 | + pm_runtime_put_autosuspend(smmu->dev); |
| 124 | +} |
| 125 | + |
| 126 | +static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr) |
| 127 | +{ |
| 128 | + struct arm_smmu_domain *smmu_domain = (void *)cookie; |
| 129 | + struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 130 | + int ret; |
| 131 | + |
| 132 | + ret = pm_runtime_resume_and_get(smmu->dev); |
| 133 | + if (ret < 0) { |
| 134 | + dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret); |
| 135 | + return; |
| 136 | + } |
| 137 | + |
| 138 | + writel_relaxed(lower_32_bits(page_addr), |
| 139 | + smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR); |
| 140 | + writel_relaxed(upper_32_bits(page_addr), |
| 141 | + smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR); |
| 142 | + pm_runtime_put_autosuspend(smmu->dev); |
| 143 | +} |
| 144 | + |
102 | 145 | #define QCOM_ADRENO_SMMU_GPU_SID 0
|
103 | 146 |
|
104 | 147 | static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
|
@@ -210,6 +253,7 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
|
210 | 253 | static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
|
211 | 254 | struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
|
212 | 255 | {
|
| 256 | + const struct device_node *np = smmu_domain->smmu->dev->of_node; |
213 | 257 | struct adreno_smmu_priv *priv;
|
214 | 258 |
|
215 | 259 | smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
|
@@ -239,6 +283,14 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
|
239 | 283 | priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
|
240 | 284 | priv->set_stall = qcom_adreno_smmu_set_stall;
|
241 | 285 | priv->resume_translation = qcom_adreno_smmu_resume_translation;
|
| 286 | + priv->set_prr_bit = NULL; |
| 287 | + priv->set_prr_addr = NULL; |
| 288 | + |
| 289 | + if (of_device_is_compatible(np, "qcom,smmu-500") && |
| 290 | + of_device_is_compatible(np, "qcom,adreno-smmu")) { |
| 291 | + priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit; |
| 292 | + priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr; |
| 293 | + } |
242 | 294 |
|
243 | 295 | return 0;
|
244 | 296 | }
|
|
0 commit comments