|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * ARM cacheinfo support |
| 4 | + * |
| 5 | + * Copyright (C) 2023 Linaro Ltd. |
| 6 | + * Copyright (C) 2015 ARM Ltd. |
| 7 | + * All Rights Reserved |
| 8 | + */ |
| 9 | + |
| 10 | +#include <linux/bitfield.h> |
| 11 | +#include <linux/cacheinfo.h> |
| 12 | +#include <linux/of.h> |
| 13 | + |
| 14 | +#include <asm/cachetype.h> |
| 15 | +#include <asm/cputype.h> |
| 16 | +#include <asm/system_info.h> |
| 17 | + |
| 18 | +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ |
| 19 | +#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) |
| 20 | +#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) |
| 21 | +#define CLIDR_CTYPE(clidr, level) \ |
| 22 | + (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) |
| 23 | + |
| 24 | +#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */ |
| 25 | + |
| 26 | +#define CTR_FORMAT_MASK GENMASK(27, 24) |
| 27 | +#define CTR_FORMAT_ARMV6 0 |
| 28 | +#define CTR_FORMAT_ARMV7 4 |
| 29 | +#define CTR_CWG_MASK GENMASK(27, 24) |
| 30 | +#define CTR_DSIZE_LEN_MASK GENMASK(13, 12) |
| 31 | +#define CTR_ISIZE_LEN_MASK GENMASK(1, 0) |
| 32 | + |
| 33 | +/* Also valid for v7m */ |
| 34 | +static inline int cache_line_size_cp15(void) |
| 35 | +{ |
| 36 | + u32 ctr = read_cpuid_cachetype(); |
| 37 | + u32 format = FIELD_GET(CTR_FORMAT_MASK, ctr); |
| 38 | + |
| 39 | + if (format == CTR_FORMAT_ARMV7) { |
| 40 | + u32 cwg = FIELD_GET(CTR_CWG_MASK, ctr); |
| 41 | + |
| 42 | + return cwg ? 4 << cwg : ARCH_DMA_MINALIGN; |
| 43 | + } else if (WARN_ON_ONCE(format != CTR_FORMAT_ARMV6)) { |
| 44 | + return ARCH_DMA_MINALIGN; |
| 45 | + } |
| 46 | + |
| 47 | + return 8 << max(FIELD_GET(CTR_ISIZE_LEN_MASK, ctr), |
| 48 | + FIELD_GET(CTR_DSIZE_LEN_MASK, ctr)); |
| 49 | +} |
| 50 | + |
| 51 | +int cache_line_size(void) |
| 52 | +{ |
| 53 | + if (coherency_max_size != 0) |
| 54 | + return coherency_max_size; |
| 55 | + |
| 56 | + /* CP15 is optional / implementation defined before ARMv6 */ |
| 57 | + if (cpu_architecture() < CPU_ARCH_ARMv6) |
| 58 | + return ARCH_DMA_MINALIGN; |
| 59 | + |
| 60 | + return cache_line_size_cp15(); |
| 61 | +} |
| 62 | +EXPORT_SYMBOL_GPL(cache_line_size); |
| 63 | + |
| 64 | +static inline enum cache_type get_cache_type(int level) |
| 65 | +{ |
| 66 | + u32 clidr; |
| 67 | + |
| 68 | + if (level > MAX_CACHE_LEVEL) |
| 69 | + return CACHE_TYPE_NOCACHE; |
| 70 | + |
| 71 | + clidr = read_clidr(); |
| 72 | + |
| 73 | + return CLIDR_CTYPE(clidr, level); |
| 74 | +} |
| 75 | + |
| 76 | +static void ci_leaf_init(struct cacheinfo *this_leaf, |
| 77 | + enum cache_type type, unsigned int level) |
| 78 | +{ |
| 79 | + this_leaf->level = level; |
| 80 | + this_leaf->type = type; |
| 81 | +} |
| 82 | + |
| 83 | +static int detect_cache_level(unsigned int *level_p, unsigned int *leaves_p) |
| 84 | +{ |
| 85 | + unsigned int ctype, level, leaves; |
| 86 | + u32 ctr, format; |
| 87 | + |
| 88 | + /* CLIDR is not present before ARMv7/v7m */ |
| 89 | + if (cpu_architecture() < CPU_ARCH_ARMv7) |
| 90 | + return -EOPNOTSUPP; |
| 91 | + |
| 92 | + /* Don't try reading CLIDR if CTR declares old format */ |
| 93 | + ctr = read_cpuid_cachetype(); |
| 94 | + format = FIELD_GET(CTR_FORMAT_MASK, ctr); |
| 95 | + if (format != CTR_FORMAT_ARMV7) |
| 96 | + return -EOPNOTSUPP; |
| 97 | + |
| 98 | + for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { |
| 99 | + ctype = get_cache_type(level); |
| 100 | + if (ctype == CACHE_TYPE_NOCACHE) { |
| 101 | + level--; |
| 102 | + break; |
| 103 | + } |
| 104 | + /* Separate instruction and data caches */ |
| 105 | + leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; |
| 106 | + } |
| 107 | + |
| 108 | + *level_p = level; |
| 109 | + *leaves_p = leaves; |
| 110 | + |
| 111 | + return 0; |
| 112 | +} |
| 113 | + |
| 114 | +int early_cache_level(unsigned int cpu) |
| 115 | +{ |
| 116 | + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 117 | + |
| 118 | + return detect_cache_level(&this_cpu_ci->num_levels, &this_cpu_ci->num_leaves); |
| 119 | +} |
| 120 | + |
| 121 | +int init_cache_level(unsigned int cpu) |
| 122 | +{ |
| 123 | + unsigned int level, leaves; |
| 124 | + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 125 | + int fw_level; |
| 126 | + int ret; |
| 127 | + |
| 128 | + ret = detect_cache_level(&level, &leaves); |
| 129 | + if (ret) |
| 130 | + return ret; |
| 131 | + |
| 132 | + fw_level = of_find_last_cache_level(cpu); |
| 133 | + |
| 134 | + if (level < fw_level) { |
| 135 | + /* |
| 136 | + * some external caches not specified in CLIDR_EL1 |
| 137 | + * the information may be available in the device tree |
| 138 | + * only unified external caches are considered here |
| 139 | + */ |
| 140 | + leaves += (fw_level - level); |
| 141 | + level = fw_level; |
| 142 | + } |
| 143 | + |
| 144 | + this_cpu_ci->num_levels = level; |
| 145 | + this_cpu_ci->num_leaves = leaves; |
| 146 | + return 0; |
| 147 | +} |
| 148 | + |
| 149 | +int populate_cache_leaves(unsigned int cpu) |
| 150 | +{ |
| 151 | + unsigned int level, idx; |
| 152 | + enum cache_type type; |
| 153 | + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 154 | + struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
| 155 | + unsigned int arch = cpu_architecture(); |
| 156 | + |
| 157 | + /* CLIDR is not present before ARMv7/v7m */ |
| 158 | + if (arch < CPU_ARCH_ARMv7) |
| 159 | + return -EOPNOTSUPP; |
| 160 | + |
| 161 | + for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && |
| 162 | + idx < this_cpu_ci->num_leaves; idx++, level++) { |
| 163 | + type = get_cache_type(level); |
| 164 | + if (type == CACHE_TYPE_SEPARATE) { |
| 165 | + ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); |
| 166 | + ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); |
| 167 | + } else { |
| 168 | + ci_leaf_init(this_leaf++, type, level); |
| 169 | + } |
| 170 | + } |
| 171 | + |
| 172 | + return 0; |
| 173 | +} |
0 commit comments