|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Qualcomm ICE (Inline Crypto Engine) support. |
| 4 | + * |
| 5 | + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. |
| 6 | + * Copyright (c) 2019, Google LLC |
| 7 | + * Copyright (c) 2023, Linaro Limited |
| 8 | + */ |
| 9 | + |
| 10 | +#include <linux/bitfield.h> |
| 11 | +#include <linux/clk.h> |
| 12 | +#include <linux/delay.h> |
| 13 | +#include <linux/iopoll.h> |
| 14 | +#include <linux/of_platform.h> |
| 15 | + |
| 16 | +#include <linux/firmware/qcom/qcom_scm.h> |
| 17 | + |
| 18 | +#include <soc/qcom/ice.h> |
| 19 | + |
| 20 | +#define AES_256_XTS_KEY_SIZE 64 |
| 21 | + |
| 22 | +/* QCOM ICE registers */ |
| 23 | +#define QCOM_ICE_REG_VERSION 0x0008 |
| 24 | +#define QCOM_ICE_REG_FUSE_SETTING 0x0010 |
| 25 | +#define QCOM_ICE_REG_BIST_STATUS 0x0070 |
| 26 | +#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 |
| 27 | + |
| 28 | +/* BIST ("built-in self-test") status flags */ |
| 29 | +#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) |
| 30 | + |
| 31 | +#define QCOM_ICE_FUSE_SETTING_MASK 0x1 |
| 32 | +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 |
| 33 | +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 |
| 34 | + |
| 35 | +#define qcom_ice_writel(engine, val, reg) \ |
| 36 | + writel((val), (engine)->base + (reg)) |
| 37 | + |
| 38 | +#define qcom_ice_readl(engine, reg) \ |
| 39 | + readl((engine)->base + (reg)) |
| 40 | + |
| 41 | +struct qcom_ice { |
| 42 | + struct device *dev; |
| 43 | + void __iomem *base; |
| 44 | + struct device_link *link; |
| 45 | + |
| 46 | + struct clk *core_clk; |
| 47 | +}; |
| 48 | + |
| 49 | +static bool qcom_ice_check_supported(struct qcom_ice *ice) |
| 50 | +{ |
| 51 | + u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION); |
| 52 | + struct device *dev = ice->dev; |
| 53 | + int major = FIELD_GET(GENMASK(31, 24), regval); |
| 54 | + int minor = FIELD_GET(GENMASK(23, 16), regval); |
| 55 | + int step = FIELD_GET(GENMASK(15, 0), regval); |
| 56 | + |
| 57 | + /* For now this driver only supports ICE version 3 and 4. */ |
| 58 | + if (major != 3 && major != 4) { |
| 59 | + dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", |
| 60 | + major, minor, step); |
| 61 | + return false; |
| 62 | + } |
| 63 | + |
| 64 | + dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", |
| 65 | + major, minor, step); |
| 66 | + |
| 67 | + /* If fuses are blown, ICE might not work in the standard way. */ |
| 68 | + regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING); |
| 69 | + if (regval & (QCOM_ICE_FUSE_SETTING_MASK | |
| 70 | + QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | |
| 71 | + QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { |
| 72 | + dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); |
| 73 | + return false; |
| 74 | + } |
| 75 | + |
| 76 | + return true; |
| 77 | +} |
| 78 | + |
| 79 | +static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice) |
| 80 | +{ |
| 81 | + u32 regval; |
| 82 | + |
| 83 | + regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 84 | + |
| 85 | + /* Enable low power mode sequence */ |
| 86 | + regval |= 0x7000; |
| 87 | + qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 88 | +} |
| 89 | + |
| 90 | +static void qcom_ice_optimization_enable(struct qcom_ice *ice) |
| 91 | +{ |
| 92 | + u32 regval; |
| 93 | + |
| 94 | + /* ICE Optimizations Enable Sequence */ |
| 95 | + regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 96 | + regval |= 0xd807100; |
| 97 | + /* ICE HPG requires delay before writing */ |
| 98 | + udelay(5); |
| 99 | + qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 100 | + udelay(5); |
| 101 | +} |
| 102 | + |
| 103 | +/* |
| 104 | + * Wait until the ICE BIST (built-in self-test) has completed. |
| 105 | + * |
| 106 | + * This may be necessary before ICE can be used. |
| 107 | + * Note that we don't really care whether the BIST passed or failed; |
| 108 | + * we really just want to make sure that it isn't still running. This is |
| 109 | + * because (a) the BIST is a FIPS compliance thing that never fails in |
| 110 | + * practice, (b) ICE is documented to reject crypto requests if the BIST |
| 111 | + * fails, so we needn't do it in software too, and (c) properly testing |
| 112 | + * storage encryption requires testing the full storage stack anyway, |
| 113 | + * and not relying on hardware-level self-tests. |
| 114 | + */ |
| 115 | +static int qcom_ice_wait_bist_status(struct qcom_ice *ice) |
| 116 | +{ |
| 117 | + u32 regval; |
| 118 | + int err; |
| 119 | + |
| 120 | + err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, |
| 121 | + regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), |
| 122 | + 50, 5000); |
| 123 | + if (err) |
| 124 | + dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n"); |
| 125 | + |
| 126 | + return err; |
| 127 | +} |
| 128 | + |
| 129 | +int qcom_ice_enable(struct qcom_ice *ice) |
| 130 | +{ |
| 131 | + qcom_ice_low_power_mode_enable(ice); |
| 132 | + qcom_ice_optimization_enable(ice); |
| 133 | + |
| 134 | + return qcom_ice_wait_bist_status(ice); |
| 135 | +} |
| 136 | +EXPORT_SYMBOL_GPL(qcom_ice_enable); |
| 137 | + |
| 138 | +int qcom_ice_resume(struct qcom_ice *ice) |
| 139 | +{ |
| 140 | + struct device *dev = ice->dev; |
| 141 | + int err; |
| 142 | + |
| 143 | + err = clk_prepare_enable(ice->core_clk); |
| 144 | + if (err) { |
| 145 | + dev_err(dev, "failed to enable core clock (%d)\n", |
| 146 | + err); |
| 147 | + return err; |
| 148 | + } |
| 149 | + |
| 150 | + return qcom_ice_wait_bist_status(ice); |
| 151 | +} |
| 152 | +EXPORT_SYMBOL_GPL(qcom_ice_resume); |
| 153 | + |
| 154 | +int qcom_ice_suspend(struct qcom_ice *ice) |
| 155 | +{ |
| 156 | + clk_disable_unprepare(ice->core_clk); |
| 157 | + |
| 158 | + return 0; |
| 159 | +} |
| 160 | +EXPORT_SYMBOL_GPL(qcom_ice_suspend); |
| 161 | + |
| 162 | +int qcom_ice_program_key(struct qcom_ice *ice, |
| 163 | + u8 algorithm_id, u8 key_size, |
| 164 | + const u8 crypto_key[], u8 data_unit_size, |
| 165 | + int slot) |
| 166 | +{ |
| 167 | + struct device *dev = ice->dev; |
| 168 | + union { |
| 169 | + u8 bytes[AES_256_XTS_KEY_SIZE]; |
| 170 | + u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; |
| 171 | + } key; |
| 172 | + int i; |
| 173 | + int err; |
| 174 | + |
| 175 | + /* Only AES-256-XTS has been tested so far. */ |
| 176 | + if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS || |
| 177 | + key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) { |
| 178 | + dev_err_ratelimited(dev, |
| 179 | + "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", |
| 180 | + algorithm_id, key_size); |
| 181 | + return -EINVAL; |
| 182 | + } |
| 183 | + |
| 184 | + memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE); |
| 185 | + |
| 186 | + /* The SCM call requires that the key words are encoded in big endian */ |
| 187 | + for (i = 0; i < ARRAY_SIZE(key.words); i++) |
| 188 | + __cpu_to_be32s(&key.words[i]); |
| 189 | + |
| 190 | + err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, |
| 191 | + QCOM_SCM_ICE_CIPHER_AES_256_XTS, |
| 192 | + data_unit_size); |
| 193 | + |
| 194 | + memzero_explicit(&key, sizeof(key)); |
| 195 | + |
| 196 | + return err; |
| 197 | +} |
| 198 | +EXPORT_SYMBOL_GPL(qcom_ice_program_key); |
| 199 | + |
| 200 | +int qcom_ice_evict_key(struct qcom_ice *ice, int slot) |
| 201 | +{ |
| 202 | + return qcom_scm_ice_invalidate_key(slot); |
| 203 | +} |
| 204 | +EXPORT_SYMBOL_GPL(qcom_ice_evict_key); |
| 205 | + |
| 206 | +static struct qcom_ice *qcom_ice_create(struct device *dev, |
| 207 | + void __iomem *base) |
| 208 | +{ |
| 209 | + struct qcom_ice *engine; |
| 210 | + |
| 211 | + if (!qcom_scm_is_available()) |
| 212 | + return ERR_PTR(-EPROBE_DEFER); |
| 213 | + |
| 214 | + if (!qcom_scm_ice_available()) { |
| 215 | + dev_warn(dev, "ICE SCM interface not found\n"); |
| 216 | + return NULL; |
| 217 | + } |
| 218 | + |
| 219 | + engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); |
| 220 | + if (!engine) |
| 221 | + return ERR_PTR(-ENOMEM); |
| 222 | + |
| 223 | + engine->dev = dev; |
| 224 | + engine->base = base; |
| 225 | + |
| 226 | + /* |
| 227 | + * Legacy DT binding uses different clk names for each consumer, |
| 228 | + * so lets try those first. If none of those are a match, it means |
| 229 | + * the we only have one clock and it is part of the dedicated DT node. |
| 230 | + * Also, enable the clock before we check what HW version the driver |
| 231 | + * supports. |
| 232 | + */ |
| 233 | + engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk"); |
| 234 | + if (!engine->core_clk) |
| 235 | + engine->core_clk = devm_clk_get_optional_enabled(dev, "ice"); |
| 236 | + if (!engine->core_clk) |
| 237 | + engine->core_clk = devm_clk_get_enabled(dev, NULL); |
| 238 | + if (IS_ERR(engine->core_clk)) |
| 239 | + return ERR_CAST(engine->core_clk); |
| 240 | + |
| 241 | + if (!qcom_ice_check_supported(engine)) |
| 242 | + return ERR_PTR(-EOPNOTSUPP); |
| 243 | + |
| 244 | + dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n"); |
| 245 | + |
| 246 | + return engine; |
| 247 | +} |
| 248 | + |
| 249 | +/** |
| 250 | + * of_qcom_ice_get() - get an ICE instance from a DT node |
| 251 | + * @dev: device pointer for the consumer device |
| 252 | + * |
| 253 | + * This function will provide an ICE instance either by creating one for the |
| 254 | + * consumer device if its DT node provides the 'ice' reg range and the 'ice' |
| 255 | + * clock (for legacy DT style). On the other hand, if consumer provides a |
| 256 | + * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already |
| 257 | + * be created and so this function will return that instead. |
| 258 | + * |
| 259 | + * Return: ICE pointer on success, NULL if there is no ICE data provided by the |
| 260 | + * consumer or ERR_PTR() on error. |
| 261 | + */ |
| 262 | +struct qcom_ice *of_qcom_ice_get(struct device *dev) |
| 263 | +{ |
| 264 | + struct platform_device *pdev = to_platform_device(dev); |
| 265 | + struct qcom_ice *ice; |
| 266 | + struct device_node *node; |
| 267 | + struct resource *res; |
| 268 | + void __iomem *base; |
| 269 | + |
| 270 | + if (!dev || !dev->of_node) |
| 271 | + return ERR_PTR(-ENODEV); |
| 272 | + |
| 273 | + /* |
| 274 | + * In order to support legacy style devicetree bindings, we need |
| 275 | + * to create the ICE instance using the consumer device and the reg |
| 276 | + * range called 'ice' it provides. |
| 277 | + */ |
| 278 | + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); |
| 279 | + if (res) { |
| 280 | + base = devm_ioremap_resource(&pdev->dev, res); |
| 281 | + if (IS_ERR(base)) |
| 282 | + return ERR_CAST(base); |
| 283 | + |
| 284 | + /* create ICE instance using consumer dev */ |
| 285 | + return qcom_ice_create(&pdev->dev, base); |
| 286 | + } |
| 287 | + |
| 288 | + /* |
| 289 | + * If the consumer node does not provider an 'ice' reg range |
| 290 | + * (legacy DT binding), then it must at least provide a phandle |
| 291 | + * to the ICE devicetree node, otherwise ICE is not supported. |
| 292 | + */ |
| 293 | + node = of_parse_phandle(dev->of_node, "qcom,ice", 0); |
| 294 | + if (!node) |
| 295 | + return NULL; |
| 296 | + |
| 297 | + pdev = of_find_device_by_node(node); |
| 298 | + if (!pdev) { |
| 299 | + dev_err(dev, "Cannot find device node %s\n", node->name); |
| 300 | + ice = ERR_PTR(-EPROBE_DEFER); |
| 301 | + goto out; |
| 302 | + } |
| 303 | + |
| 304 | + ice = platform_get_drvdata(pdev); |
| 305 | + if (!ice) { |
| 306 | + dev_err(dev, "Cannot get ice instance from %s\n", |
| 307 | + dev_name(&pdev->dev)); |
| 308 | + platform_device_put(pdev); |
| 309 | + ice = ERR_PTR(-EPROBE_DEFER); |
| 310 | + goto out; |
| 311 | + } |
| 312 | + |
| 313 | + ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); |
| 314 | + if (!ice->link) { |
| 315 | + dev_err(&pdev->dev, |
| 316 | + "Failed to create device link to consumer %s\n", |
| 317 | + dev_name(dev)); |
| 318 | + platform_device_put(pdev); |
| 319 | + ice = ERR_PTR(-EINVAL); |
| 320 | + } |
| 321 | + |
| 322 | +out: |
| 323 | + of_node_put(node); |
| 324 | + |
| 325 | + return ice; |
| 326 | +} |
| 327 | +EXPORT_SYMBOL_GPL(of_qcom_ice_get); |
| 328 | + |
| 329 | +static int qcom_ice_probe(struct platform_device *pdev) |
| 330 | +{ |
| 331 | + struct qcom_ice *engine; |
| 332 | + void __iomem *base; |
| 333 | + |
| 334 | + base = devm_platform_ioremap_resource(pdev, 0); |
| 335 | + if (IS_ERR(base)) { |
| 336 | + dev_warn(&pdev->dev, "ICE registers not found\n"); |
| 337 | + return PTR_ERR(base); |
| 338 | + } |
| 339 | + |
| 340 | + engine = qcom_ice_create(&pdev->dev, base); |
| 341 | + if (IS_ERR(engine)) |
| 342 | + return PTR_ERR(engine); |
| 343 | + |
| 344 | + platform_set_drvdata(pdev, engine); |
| 345 | + |
| 346 | + return 0; |
| 347 | +} |
| 348 | + |
| 349 | +static const struct of_device_id qcom_ice_of_match_table[] = { |
| 350 | + { .compatible = "qcom,inline-crypto-engine" }, |
| 351 | + { }, |
| 352 | +}; |
| 353 | +MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table); |
| 354 | + |
| 355 | +static struct platform_driver qcom_ice_driver = { |
| 356 | + .probe = qcom_ice_probe, |
| 357 | + .driver = { |
| 358 | + .name = "qcom-ice", |
| 359 | + .of_match_table = qcom_ice_of_match_table, |
| 360 | + }, |
| 361 | +}; |
| 362 | + |
| 363 | +module_platform_driver(qcom_ice_driver); |
| 364 | + |
| 365 | +MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver"); |
| 366 | +MODULE_LICENSE("GPL"); |
0 commit comments