|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | +/* |
| 3 | + * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h |
| 4 | + * which was based on arch/arm/include/io.h |
| 5 | + * |
| 6 | + * Copyright (C) 1996-2000 Russell King |
| 7 | + * Copyright (C) 2012 ARM Ltd. |
| 8 | + * Copyright (C) 2014 Regents of the University of California |
| 9 | + */ |
| 10 | + |
| 11 | +#ifndef _ASM_RISCV_MMIO_H |
| 12 | +#define _ASM_RISCV_MMIO_H |
| 13 | + |
| 14 | +#include <linux/types.h> |
| 15 | +#include <asm/mmiowb.h> |
| 16 | + |
| 17 | +void __iomem *ioremap(phys_addr_t offset, unsigned long size); |
| 18 | + |
| 19 | +/* |
| 20 | + * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't |
| 21 | + * change the properties of memory regions. This should be fixed by the |
| 22 | + * upcoming platform spec. |
| 23 | + */ |
| 24 | +#define ioremap_nocache(addr, size) ioremap((addr), (size)) |
| 25 | +#define ioremap_wc(addr, size) ioremap((addr), (size)) |
| 26 | +#define ioremap_wt(addr, size) ioremap((addr), (size)) |
| 27 | + |
| 28 | +void iounmap(volatile void __iomem *addr); |
| 29 | + |
| 30 | +/* Generic IO read/write. These perform native-endian accesses. */ |
| 31 | +#define __raw_writeb __raw_writeb |
| 32 | +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) |
| 33 | +{ |
| 34 | + asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); |
| 35 | +} |
| 36 | + |
| 37 | +#define __raw_writew __raw_writew |
| 38 | +static inline void __raw_writew(u16 val, volatile void __iomem *addr) |
| 39 | +{ |
| 40 | + asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); |
| 41 | +} |
| 42 | + |
| 43 | +#define __raw_writel __raw_writel |
| 44 | +static inline void __raw_writel(u32 val, volatile void __iomem *addr) |
| 45 | +{ |
| 46 | + asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); |
| 47 | +} |
| 48 | + |
| 49 | +#ifdef CONFIG_64BIT |
| 50 | +#define __raw_writeq __raw_writeq |
| 51 | +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) |
| 52 | +{ |
| 53 | + asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); |
| 54 | +} |
| 55 | +#endif |
| 56 | + |
| 57 | +#define __raw_readb __raw_readb |
| 58 | +static inline u8 __raw_readb(const volatile void __iomem *addr) |
| 59 | +{ |
| 60 | + u8 val; |
| 61 | + |
| 62 | + asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); |
| 63 | + return val; |
| 64 | +} |
| 65 | + |
| 66 | +#define __raw_readw __raw_readw |
| 67 | +static inline u16 __raw_readw(const volatile void __iomem *addr) |
| 68 | +{ |
| 69 | + u16 val; |
| 70 | + |
| 71 | + asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); |
| 72 | + return val; |
| 73 | +} |
| 74 | + |
| 75 | +#define __raw_readl __raw_readl |
| 76 | +static inline u32 __raw_readl(const volatile void __iomem *addr) |
| 77 | +{ |
| 78 | + u32 val; |
| 79 | + |
| 80 | + asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); |
| 81 | + return val; |
| 82 | +} |
| 83 | + |
| 84 | +#ifdef CONFIG_64BIT |
| 85 | +#define __raw_readq __raw_readq |
| 86 | +static inline u64 __raw_readq(const volatile void __iomem *addr) |
| 87 | +{ |
| 88 | + u64 val; |
| 89 | + |
| 90 | + asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); |
| 91 | + return val; |
| 92 | +} |
| 93 | +#endif |
| 94 | + |
| 95 | +/* |
| 96 | + * Unordered I/O memory access primitives. These are even more relaxed than |
| 97 | + * the relaxed versions, as they don't even order accesses between successive |
| 98 | + * operations to the I/O regions. |
| 99 | + */ |
| 100 | +#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) |
| 101 | +#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) |
| 102 | +#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) |
| 103 | + |
| 104 | +#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c))) |
| 105 | +#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c))) |
| 106 | +#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c))) |
| 107 | + |
| 108 | +#ifdef CONFIG_64BIT |
| 109 | +#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) |
| 110 | +#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c))) |
| 111 | +#endif |
| 112 | + |
| 113 | +/* |
| 114 | + * Relaxed I/O memory access primitives. These follow the Device memory |
| 115 | + * ordering rules but do not guarantee any ordering relative to Normal memory |
| 116 | + * accesses. These are defined to order the indicated access (either a read or |
| 117 | + * write) with all other I/O memory accesses. Since the platform specification |
| 118 | + * defines that all I/O regions are strongly ordered on channel 2, no explicit |
| 119 | + * fences are required to enforce this ordering. |
| 120 | + */ |
| 121 | +/* FIXME: These are now the same as asm-generic */ |
| 122 | +#define __io_rbr() do {} while (0) |
| 123 | +#define __io_rar() do {} while (0) |
| 124 | +#define __io_rbw() do {} while (0) |
| 125 | +#define __io_raw() do {} while (0) |
| 126 | + |
| 127 | +#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) |
| 128 | +#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) |
| 129 | +#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) |
| 130 | + |
| 131 | +#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); }) |
| 132 | +#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); }) |
| 133 | +#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); }) |
| 134 | + |
| 135 | +#ifdef CONFIG_64BIT |
| 136 | +#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) |
| 137 | +#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); }) |
| 138 | +#endif |
| 139 | + |
| 140 | +/* |
| 141 | + * I/O memory access primitives. Reads are ordered relative to any |
| 142 | + * following Normal memory access. Writes are ordered relative to any prior |
| 143 | + * Normal memory access. The memory barriers here are necessary as RISC-V |
| 144 | + * doesn't define any ordering between the memory space and the I/O space. |
| 145 | + */ |
| 146 | +#define __io_br() do {} while (0) |
| 147 | +#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") |
| 148 | +#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") |
| 149 | +#define __io_aw() mmiowb_set_pending() |
| 150 | + |
| 151 | +#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) |
| 152 | +#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) |
| 153 | +#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) |
| 154 | + |
| 155 | +#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); }) |
| 156 | +#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); }) |
| 157 | +#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); }) |
| 158 | + |
| 159 | +#ifdef CONFIG_64BIT |
| 160 | +#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) |
| 161 | +#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) |
| 162 | +#endif |
| 163 | + |
| 164 | +#endif /* _ASM_RISCV_MMIO_H */ |
0 commit comments