|
| 1 | +/* |
| 2 | + * Copyright (c) 2023 - 2024 Advanced Micro Devices, Inc. (AMD) |
| 3 | + * Copyright (c) 2023 Alp Sayin <[email protected]> |
| 4 | + * |
| 5 | + * SPDX-License-Identifier: Apache-2.0 |
| 6 | + */ |
| 7 | + |
| 8 | +#include <errno.h> |
| 9 | +#include <zephyr/init.h> |
| 10 | +#include <zephyr/devicetree.h> |
| 11 | +#include <zephyr/drivers/timer/system_timer.h> |
| 12 | +#include <zephyr/sys_clock.h> |
| 13 | +#include <zephyr/arch/cpu.h> |
| 14 | +#include <zephyr/logging/log.h> |
| 15 | + |
| 16 | +LOG_MODULE_REGISTER(amd_timer); |
| 17 | + |
| 18 | +#define DT_DRV_COMPAT amd_xps_timer_1_00_a |
| 19 | + |
| 20 | +/* Register definitions */ |
| 21 | +#define XTC_TCSR_OFFSET 0 /* Control/Status register */ |
| 22 | +#define XTC_TLR_OFFSET 4 /* Load register */ |
| 23 | +#define XTC_TCR_OFFSET 8 /* Timer counter register */ |
| 24 | + |
| 25 | +/* Control status register mask */ |
| 26 | +#define XTC_CSR_CASC_MASK BIT(11) |
| 27 | +#define XTC_CSR_ENABLE_ALL_MASK BIT(10) |
| 28 | +#define XTC_CSR_ENABLE_PWM_MASK BIT(9) |
| 29 | +#define XTC_CSR_INT_OCCURRED_MASK BIT(8) |
| 30 | +#define XTC_CSR_ENABLE_TMR_MASK BIT(7) |
| 31 | +#define XTC_CSR_ENABLE_INT_MASK BIT(6) |
| 32 | +#define XTC_CSR_LOAD_MASK BIT(5) |
| 33 | +#define XTC_CSR_AUTO_RELOAD_MASK BIT(4) |
| 34 | +#define XTC_CSR_EXT_CAPTURE_MASK BIT(3) |
| 35 | +#define XTC_CSR_EXT_GENERATE_MASK BIT(2) |
| 36 | +#define XTC_CSR_DOWN_COUNT_MASK BIT(1) |
| 37 | +#define XTC_CSR_CAPTURE_MODE_MASK BIT(0) |
| 38 | + |
| 39 | +/* Offset of second timer */ |
| 40 | +#define TIMER_REG_OFFSET 0x10 |
| 41 | + |
| 42 | +static uint32_t last_cycles; |
| 43 | + |
| 44 | +struct xilinx_timer_config { |
| 45 | + uint32_t instance; |
| 46 | + mem_addr_t base; |
| 47 | + uint32_t clock_rate; |
| 48 | + uint32_t cycles_per_tick; |
| 49 | + uint32_t irq; |
| 50 | + bool one_timer; |
| 51 | +#if DT_ANY_INST_HAS_PROP_STATUS_OKAY(interrupts) |
| 52 | + void (*irq_config_func)(const struct device *dev); |
| 53 | +#endif |
| 54 | +}; |
| 55 | + |
| 56 | +/* private data */ |
| 57 | +struct xilinx_timer_data { |
| 58 | + uint32_t clocksource_offset; |
| 59 | + uint32_t clockevent_offset; |
| 60 | +}; |
| 61 | + |
| 62 | +/* Pointing to timer instance which is system timer */ |
| 63 | +static const struct device *sys_dev; |
| 64 | + |
| 65 | +static inline uint32_t xlnx_tmrctr_read32(const struct device *dev, uint32_t timer_offset, |
| 66 | + uint32_t offset) |
| 67 | +{ |
| 68 | + const struct xilinx_timer_config *config = dev->config; |
| 69 | + uint32_t reg = (uint32_t)(config->base) + timer_offset + offset; |
| 70 | + |
| 71 | + LOG_DBG("%s: 0x%x (base = 0x%lx, timer_offset = 0x%x, offset = 0x%x", |
| 72 | + __func__, reg, config->base, timer_offset, offset); |
| 73 | + |
| 74 | + return sys_read32(reg); |
| 75 | +} |
| 76 | + |
| 77 | +static inline void xlnx_tmrctr_write32(const struct device *dev, uint32_t timer_offset, |
| 78 | + uint32_t value, uint32_t offset) |
| 79 | +{ |
| 80 | + const struct xilinx_timer_config *config = dev->config; |
| 81 | + uint32_t reg = (uint32_t)(config->base) + timer_offset + offset; |
| 82 | + |
| 83 | + LOG_DBG("%s: 0x%x (base = 0x%lx, timer_offset = 0x%x, offset = 0x%x", |
| 84 | + __func__, reg, config->base, timer_offset, offset); |
| 85 | + |
| 86 | + sys_write32(value, reg); |
| 87 | +} |
| 88 | + |
| 89 | +volatile uint32_t xlnx_tmrctr_read_count(const struct device *dev) |
| 90 | +{ |
| 91 | + struct xilinx_timer_data *data = dev->data; |
| 92 | + |
| 93 | + return xlnx_tmrctr_read32(dev, data->clocksource_offset, XTC_TCR_OFFSET); |
| 94 | +} |
| 95 | + |
| 96 | +volatile uint32_t xlnx_tmrctr_read_hw_cycle_count(const struct device *dev) |
| 97 | +{ |
| 98 | + return xlnx_tmrctr_read_count(dev); |
| 99 | +} |
| 100 | + |
| 101 | +static void xlnx_tmrctr_clear_interrupt(const struct device *dev) |
| 102 | +{ |
| 103 | + struct xilinx_timer_data *data = dev->data; |
| 104 | + |
| 105 | + uint32_t control_status_register = xlnx_tmrctr_read32(dev, data->clockevent_offset, |
| 106 | + XTC_TCSR_OFFSET); |
| 107 | + |
| 108 | + xlnx_tmrctr_write32(dev, data->clockevent_offset, |
| 109 | + control_status_register | XTC_CSR_INT_OCCURRED_MASK, XTC_TCSR_OFFSET); |
| 110 | +} |
| 111 | + |
| 112 | +static void xlnx_tmrctr_irq_handler(const struct device *dev) |
| 113 | +{ |
| 114 | + uint32_t cycles; |
| 115 | + uint32_t delta_ticks; |
| 116 | + const struct xilinx_timer_config *config = dev->config; |
| 117 | + |
| 118 | + cycles = xlnx_tmrctr_read_count(dev); |
| 119 | + /* Calculate the number of ticks since last announcement */ |
| 120 | + delta_ticks = (cycles - last_cycles) / config->cycles_per_tick; |
| 121 | + /* Update last cycles count without the rounding error */ |
| 122 | + last_cycles += (delta_ticks * config->cycles_per_tick); |
| 123 | + |
| 124 | + if (sys_dev == dev) { |
| 125 | + /* Announce to the kernel */ |
| 126 | + sys_clock_announce(delta_ticks); |
| 127 | + } |
| 128 | + |
| 129 | + xlnx_tmrctr_clear_interrupt(dev); |
| 130 | +} |
| 131 | + |
| 132 | +uint32_t sys_clock_elapsed(void) |
| 133 | +{ |
| 134 | + return 0; |
| 135 | +} |
| 136 | + |
| 137 | +uint32_t sys_clock_cycle_get_32(void) |
| 138 | +{ |
| 139 | + return xlnx_tmrctr_read_hw_cycle_count(sys_dev); |
| 140 | +} |
| 141 | + |
| 142 | +static int xlnx_tmrctr_initialize(const struct device *dev) |
| 143 | +{ |
| 144 | + const struct xilinx_timer_config *config = dev->config; |
| 145 | + uint32_t num_counters = config->one_timer ? 1 : 2; |
| 146 | + |
| 147 | + for (uint8_t counter_number = 0; counter_number < num_counters; counter_number++) { |
| 148 | + uint32_t reg_offset = counter_number * TIMER_REG_OFFSET; |
| 149 | + |
| 150 | + /* Set the compare register to 0. */ |
| 151 | + xlnx_tmrctr_write32(dev, reg_offset, 0, XTC_TLR_OFFSET); |
| 152 | + /* Reset the timer and the interrupt. */ |
| 153 | + xlnx_tmrctr_write32(dev, reg_offset, XTC_CSR_INT_OCCURRED_MASK | XTC_CSR_LOAD_MASK, |
| 154 | + XTC_TCSR_OFFSET); |
| 155 | + /* Release the reset. */ |
| 156 | + xlnx_tmrctr_write32(dev, reg_offset, 0, XTC_TCSR_OFFSET); |
| 157 | + } |
| 158 | + |
| 159 | + return 0; |
| 160 | +} |
| 161 | + |
| 162 | +static inline void xlnx_tmrctr_set_reset_value(const struct device *dev, uint8_t counter_number, |
| 163 | + uint32_t reset_value) |
| 164 | +{ |
| 165 | + xlnx_tmrctr_write32(dev, counter_number, reset_value, XTC_TLR_OFFSET); |
| 166 | +} |
| 167 | + |
| 168 | +static inline void xlnx_tmrctr_set_options(const struct device *dev, uint8_t counter_number, |
| 169 | + uint32_t options) |
| 170 | +{ |
| 171 | + xlnx_tmrctr_write32(dev, counter_number, options, XTC_TCSR_OFFSET); |
| 172 | +} |
| 173 | + |
| 174 | +static int xlnx_tmrctr_start(const struct device *dev) |
| 175 | +{ |
| 176 | + struct xilinx_timer_data *data = dev->data; |
| 177 | + uint32_t control_status_register; |
| 178 | + |
| 179 | + control_status_register = xlnx_tmrctr_read32(dev, data->clockevent_offset, |
| 180 | + XTC_TCSR_OFFSET); |
| 181 | + xlnx_tmrctr_write32(dev, data->clockevent_offset, XTC_CSR_LOAD_MASK, XTC_TCSR_OFFSET); |
| 182 | + xlnx_tmrctr_write32(dev, data->clockevent_offset, |
| 183 | + control_status_register | XTC_CSR_ENABLE_TMR_MASK, XTC_TCSR_OFFSET); |
| 184 | + |
| 185 | + |
| 186 | + control_status_register = xlnx_tmrctr_read32(dev, data->clocksource_offset, |
| 187 | + XTC_TCSR_OFFSET); |
| 188 | + xlnx_tmrctr_write32(dev, data->clocksource_offset, XTC_CSR_LOAD_MASK, XTC_TCSR_OFFSET); |
| 189 | + xlnx_tmrctr_write32(dev, data->clocksource_offset, |
| 190 | + control_status_register | XTC_CSR_ENABLE_TMR_MASK, XTC_TCSR_OFFSET); |
| 191 | + |
| 192 | + return 0; |
| 193 | +} |
| 194 | + |
| 195 | +static int xilinx_timer_init(const struct device *dev) |
| 196 | +{ |
| 197 | + int status; |
| 198 | + const struct xilinx_timer_config *config = dev->config; |
| 199 | + struct xilinx_timer_data *data = dev->data; |
| 200 | + |
| 201 | + LOG_DBG("%s: %d: Timer init at base 0x%lx, IRQ %d, clock %d, one_timer %d", |
| 202 | + __func__, config->instance, config->base, config->irq, config->clock_rate, |
| 203 | + config->one_timer); |
| 204 | + |
| 205 | + /* Initialize both timers - pretty much timer reset */ |
| 206 | + status = xlnx_tmrctr_initialize(dev); |
| 207 | + if (status != 0) { |
| 208 | + return status; |
| 209 | + } |
| 210 | + |
| 211 | + if (!sys_dev && !config->one_timer) { |
| 212 | + /* Doing assignment which timer is clockevent/clocksource by it's offset in IP */ |
| 213 | + data->clockevent_offset = 0; |
| 214 | + data->clocksource_offset = TIMER_REG_OFFSET; |
| 215 | + |
| 216 | + xlnx_tmrctr_set_reset_value(dev, data->clockevent_offset, config->cycles_per_tick); |
| 217 | + xlnx_tmrctr_set_options(dev, data->clockevent_offset, |
| 218 | + XTC_CSR_ENABLE_INT_MASK | |
| 219 | + XTC_CSR_AUTO_RELOAD_MASK | |
| 220 | + XTC_CSR_DOWN_COUNT_MASK); |
| 221 | + |
| 222 | + xlnx_tmrctr_set_options(dev, data->clocksource_offset, XTC_CSR_AUTO_RELOAD_MASK); |
| 223 | + |
| 224 | + status = xlnx_tmrctr_start(dev); |
| 225 | + if (status != 0) { |
| 226 | + return status; |
| 227 | + } |
| 228 | + |
| 229 | + last_cycles = xlnx_tmrctr_read_hw_cycle_count(dev); |
| 230 | + |
| 231 | + /* Assigning this instance as default timer */ |
| 232 | + sys_dev = dev; |
| 233 | + } |
| 234 | + |
| 235 | + if (config->irq_config_func != NULL) { |
| 236 | + config->irq_config_func(dev); |
| 237 | + } |
| 238 | + |
| 239 | + return 0; |
| 240 | +} |
| 241 | + |
| 242 | +#define XILINX_TIMER_INIT(inst) \ |
| 243 | + IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, interrupts), \ |
| 244 | + (static void xilinx_timer_##inst##_irq_config(const struct device *dev) \ |
| 245 | + { \ |
| 246 | + ARG_UNUSED(dev); \ |
| 247 | + IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), \ |
| 248 | + xlnx_tmrctr_irq_handler, DEVICE_DT_INST_GET(inst), 0); \ |
| 249 | + irq_enable(DT_INST_IRQN(inst)); \ |
| 250 | + })) \ |
| 251 | + \ |
| 252 | + static struct xilinx_timer_data xilinx_timer_##inst##_data; \ |
| 253 | + \ |
| 254 | + static const struct xilinx_timer_config xilinx_timer_##inst##_cfg = { \ |
| 255 | + .instance = inst, \ |
| 256 | + .base = DT_INST_REG_ADDR(inst), \ |
| 257 | + .clock_rate = DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency), \ |
| 258 | + .cycles_per_tick = DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency) / \ |
| 259 | + CONFIG_SYS_CLOCK_TICKS_PER_SEC, \ |
| 260 | + .irq = DT_INST_IRQN(inst), \ |
| 261 | + .one_timer = DT_INST_PROP_OR(inst, xlnx_one_timer_only, 0), \ |
| 262 | + IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, interrupts), \ |
| 263 | + (.irq_config_func = xilinx_timer_##inst##_irq_config)) \ |
| 264 | + }; \ |
| 265 | + DEVICE_DT_INST_DEFINE(inst, &xilinx_timer_init, NULL, &xilinx_timer_##inst##_data, \ |
| 266 | + &xilinx_timer_##inst##_cfg, PRE_KERNEL_2, \ |
| 267 | + CONFIG_SYSTEM_CLOCK_INIT_PRIORITY, NULL); \ |
| 268 | + \ |
| 269 | + BUILD_ASSERT(DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency) >= \ |
| 270 | + CONFIG_SYS_CLOCK_TICKS_PER_SEC, \ |
| 271 | + "Timer clock frequency must be greater than the system tick frequency"); \ |
| 272 | + BUILD_ASSERT((DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency) % \ |
| 273 | + CONFIG_SYS_CLOCK_TICKS_PER_SEC) == 0, \ |
| 274 | + "Timer clock frequency is not divisible by the system tick frequency"); \ |
| 275 | + BUILD_ASSERT((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % \ |
| 276 | + DT_INST_PROP_BY_PHANDLE(inst, clocks, clock_frequency)) == 0, \ |
| 277 | + "CPU clock frequency is not divisible by the Timer clock frequency"); |
| 278 | + |
| 279 | +DT_INST_FOREACH_STATUS_OKAY(XILINX_TIMER_INIT) |
0 commit comments