Skip to content

Commit 0c3ac28

Browse files
riscv: separate MMIO functions into their own header file
Separate the low-level MMIO static inline functions and macros, such as {read,write}{b,w,l,q}(), into their own header file under arch/riscv/include: asm/mmio.h. This is done to break a header dependency chain that arises when both asm/pgtable.h and asm/io.h are included by asm/timex.h. Since the problem is related to the legacy I/O port support in asm/io.h, this allows files under arch/riscv that encounter those issues to simply include asm/mmio.h instead, and bypass the legacy I/O port functions. Existing users of asm/io.h don't need to change anything, since asm/mmio.h is included by asm/io.h. While here, clean up some checkpatch.pl-related issues with the original code. Signed-off-by: Paul Walmsley <[email protected]>
1 parent 86fe639 commit 0c3ac28

File tree

2 files changed

+167
-144
lines changed

2 files changed

+167
-144
lines changed

arch/riscv/include/asm/io.h

Lines changed: 3 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -15,152 +15,11 @@
1515
#include <asm/mmiowb.h>
1616
#include <asm/pgtable.h>
1717

18-
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
19-
20-
/*
21-
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
22-
* change the properties of memory regions. This should be fixed by the
23-
* upcoming platform spec.
24-
*/
25-
#define ioremap_nocache(addr, size) ioremap((addr), (size))
26-
#define ioremap_wc(addr, size) ioremap((addr), (size))
27-
#define ioremap_wt(addr, size) ioremap((addr), (size))
28-
29-
extern void iounmap(volatile void __iomem *addr);
30-
31-
/* Generic IO read/write. These perform native-endian accesses. */
32-
#define __raw_writeb __raw_writeb
33-
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
34-
{
35-
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
36-
}
37-
38-
#define __raw_writew __raw_writew
39-
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
40-
{
41-
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
42-
}
43-
44-
#define __raw_writel __raw_writel
45-
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
46-
{
47-
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
48-
}
49-
50-
#ifdef CONFIG_64BIT
51-
#define __raw_writeq __raw_writeq
52-
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
53-
{
54-
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
55-
}
56-
#endif
57-
58-
#define __raw_readb __raw_readb
59-
static inline u8 __raw_readb(const volatile void __iomem *addr)
60-
{
61-
u8 val;
62-
63-
asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
64-
return val;
65-
}
66-
67-
#define __raw_readw __raw_readw
68-
static inline u16 __raw_readw(const volatile void __iomem *addr)
69-
{
70-
u16 val;
71-
72-
asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
73-
return val;
74-
}
75-
76-
#define __raw_readl __raw_readl
77-
static inline u32 __raw_readl(const volatile void __iomem *addr)
78-
{
79-
u32 val;
80-
81-
asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
82-
return val;
83-
}
84-
85-
#ifdef CONFIG_64BIT
86-
#define __raw_readq __raw_readq
87-
static inline u64 __raw_readq(const volatile void __iomem *addr)
88-
{
89-
u64 val;
90-
91-
asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
92-
return val;
93-
}
94-
#endif
95-
9618
/*
97-
* Unordered I/O memory access primitives. These are even more relaxed than
98-
* the relaxed versions, as they don't even order accesses between successive
99-
* operations to the I/O regions.
19+
* MMIO access functions are separated out to break dependency cycles
20+
* when using {read,write}* fns in low-level headers
10021
*/
101-
#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
102-
#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
103-
#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
104-
105-
#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c)))
106-
#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
107-
#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
108-
109-
#ifdef CONFIG_64BIT
110-
#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
111-
#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
112-
#endif
113-
114-
/*
115-
* Relaxed I/O memory access primitives. These follow the Device memory
116-
* ordering rules but do not guarantee any ordering relative to Normal memory
117-
* accesses. These are defined to order the indicated access (either a read or
118-
* write) with all other I/O memory accesses. Since the platform specification
119-
* defines that all I/O regions are strongly ordered on channel 2, no explicit
120-
* fences are required to enforce this ordering.
121-
*/
122-
/* FIXME: These are now the same as asm-generic */
123-
#define __io_rbr() do {} while (0)
124-
#define __io_rar() do {} while (0)
125-
#define __io_rbw() do {} while (0)
126-
#define __io_raw() do {} while (0)
127-
128-
#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
129-
#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
130-
#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
131-
132-
#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
133-
#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
134-
#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
135-
136-
#ifdef CONFIG_64BIT
137-
#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
138-
#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
139-
#endif
140-
141-
/*
142-
* I/O memory access primitives. Reads are ordered relative to any
143-
* following Normal memory access. Writes are ordered relative to any prior
144-
* Normal memory access. The memory barriers here are necessary as RISC-V
145-
* doesn't define any ordering between the memory space and the I/O space.
146-
*/
147-
#define __io_br() do {} while (0)
148-
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
149-
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
150-
#define __io_aw() mmiowb_set_pending()
151-
152-
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
153-
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
154-
#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
155-
156-
#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
157-
#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
158-
#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
159-
160-
#ifdef CONFIG_64BIT
161-
#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
162-
#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
163-
#endif
22+
#include <asm/mmio.h>
16423

16524
/*
16625
* I/O port access constants.

arch/riscv/include/asm/mmio.h

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
4+
* which was based on arch/arm/include/io.h
5+
*
6+
* Copyright (C) 1996-2000 Russell King
7+
* Copyright (C) 2012 ARM Ltd.
8+
* Copyright (C) 2014 Regents of the University of California
9+
*/
10+
11+
#ifndef _ASM_RISCV_MMIO_H
12+
#define _ASM_RISCV_MMIO_H
13+
14+
#include <linux/types.h>
15+
#include <asm/mmiowb.h>
16+
17+
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
18+
19+
/*
20+
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
21+
* change the properties of memory regions. This should be fixed by the
22+
* upcoming platform spec.
23+
*/
24+
#define ioremap_nocache(addr, size) ioremap((addr), (size))
25+
#define ioremap_wc(addr, size) ioremap((addr), (size))
26+
#define ioremap_wt(addr, size) ioremap((addr), (size))
27+
28+
void iounmap(volatile void __iomem *addr);
29+
30+
/* Generic IO read/write. These perform native-endian accesses. */
31+
#define __raw_writeb __raw_writeb
32+
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
33+
{
34+
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
35+
}
36+
37+
#define __raw_writew __raw_writew
38+
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
39+
{
40+
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
41+
}
42+
43+
#define __raw_writel __raw_writel
44+
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
45+
{
46+
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
47+
}
48+
49+
#ifdef CONFIG_64BIT
50+
#define __raw_writeq __raw_writeq
51+
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
52+
{
53+
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
54+
}
55+
#endif
56+
57+
#define __raw_readb __raw_readb
58+
static inline u8 __raw_readb(const volatile void __iomem *addr)
59+
{
60+
u8 val;
61+
62+
asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
63+
return val;
64+
}
65+
66+
#define __raw_readw __raw_readw
67+
static inline u16 __raw_readw(const volatile void __iomem *addr)
68+
{
69+
u16 val;
70+
71+
asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
72+
return val;
73+
}
74+
75+
#define __raw_readl __raw_readl
76+
static inline u32 __raw_readl(const volatile void __iomem *addr)
77+
{
78+
u32 val;
79+
80+
asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
81+
return val;
82+
}
83+
84+
#ifdef CONFIG_64BIT
85+
#define __raw_readq __raw_readq
86+
static inline u64 __raw_readq(const volatile void __iomem *addr)
87+
{
88+
u64 val;
89+
90+
asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
91+
return val;
92+
}
93+
#endif
94+
95+
/*
96+
* Unordered I/O memory access primitives. These are even more relaxed than
97+
* the relaxed versions, as they don't even order accesses between successive
98+
* operations to the I/O regions.
99+
*/
100+
#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
101+
#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
102+
#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
103+
104+
#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
105+
#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
106+
#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
107+
108+
#ifdef CONFIG_64BIT
109+
#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
110+
#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
111+
#endif
112+
113+
/*
114+
* Relaxed I/O memory access primitives. These follow the Device memory
115+
* ordering rules but do not guarantee any ordering relative to Normal memory
116+
* accesses. These are defined to order the indicated access (either a read or
117+
* write) with all other I/O memory accesses. Since the platform specification
118+
* defines that all I/O regions are strongly ordered on channel 2, no explicit
119+
* fences are required to enforce this ordering.
120+
*/
121+
/* FIXME: These are now the same as asm-generic */
122+
#define __io_rbr() do {} while (0)
123+
#define __io_rar() do {} while (0)
124+
#define __io_rbw() do {} while (0)
125+
#define __io_raw() do {} while (0)
126+
127+
#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
128+
#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
129+
#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
130+
131+
#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
132+
#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
133+
#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
134+
135+
#ifdef CONFIG_64BIT
136+
#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
137+
#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
138+
#endif
139+
140+
/*
141+
* I/O memory access primitives. Reads are ordered relative to any
142+
* following Normal memory access. Writes are ordered relative to any prior
143+
* Normal memory access. The memory barriers here are necessary as RISC-V
144+
* doesn't define any ordering between the memory space and the I/O space.
145+
*/
146+
#define __io_br() do {} while (0)
147+
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
148+
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
149+
#define __io_aw() mmiowb_set_pending()
150+
151+
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
152+
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
153+
#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
154+
155+
#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
156+
#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
157+
#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
158+
159+
#ifdef CONFIG_64BIT
160+
#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
161+
#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
162+
#endif
163+
164+
#endif /* _ASM_RISCV_MMIO_H */

0 commit comments

Comments
 (0)