Skip to content

Commit 7153c3c

Browse files
committed
LoongArch: Add misc common routines
Add some misc common routines for LoongArch, including: asm-offsets routines, futex functions, i/o memory access functions, frame-buffer functions, procfs information display, etc. Reviewed-by: WANG Xuerui <[email protected]> Reviewed-by: Jiaxun Yang <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent fcdfe9d commit 7153c3c

File tree

7 files changed

+734
-0
lines changed

7 files changed

+734
-0
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#include <generated/asm-offsets.h>

arch/loongarch/include/asm/fb.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_FB_H_
6+
#define _ASM_FB_H_
7+
8+
#include <linux/fb.h>
9+
#include <linux/fs.h>
10+
#include <asm/page.h>
11+
12+
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
13+
unsigned long off)
14+
{
15+
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
16+
}
17+
18+
static inline int fb_is_primary_device(struct fb_info *info)
19+
{
20+
return 0;
21+
}
22+
23+
#endif /* _ASM_FB_H_ */

arch/loongarch/include/asm/futex.h

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_FUTEX_H
6+
#define _ASM_FUTEX_H
7+
8+
#include <linux/futex.h>
9+
#include <linux/uaccess.h>
10+
#include <asm/barrier.h>
11+
#include <asm/compiler.h>
12+
#include <asm/errno.h>
13+
14+
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
15+
{ \
16+
__asm__ __volatile__( \
17+
"1: ll.w %1, %4 # __futex_atomic_op\n" \
18+
" " insn " \n" \
19+
"2: sc.w $t0, %2 \n" \
20+
" beq $t0, $zero, 1b \n" \
21+
"3: \n" \
22+
" .section .fixup,\"ax\" \n" \
23+
"4: li.w %0, %6 \n" \
24+
" b 3b \n" \
25+
" .previous \n" \
26+
" .section __ex_table,\"a\" \n" \
27+
" "__UA_ADDR "\t1b, 4b \n" \
28+
" "__UA_ADDR "\t2b, 4b \n" \
29+
" .previous \n" \
30+
: "=r" (ret), "=&r" (oldval), \
31+
"=ZC" (*uaddr) \
32+
: "0" (0), "ZC" (*uaddr), "Jr" (oparg), \
33+
"i" (-EFAULT) \
34+
: "memory", "t0"); \
35+
}
36+
37+
static inline int
38+
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
39+
{
40+
int oldval = 0, ret = 0;
41+
42+
pagefault_disable();
43+
44+
switch (op) {
45+
case FUTEX_OP_SET:
46+
__futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg);
47+
break;
48+
case FUTEX_OP_ADD:
49+
__futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg);
50+
break;
51+
case FUTEX_OP_OR:
52+
__futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg);
53+
break;
54+
case FUTEX_OP_ANDN:
55+
__futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg);
56+
break;
57+
case FUTEX_OP_XOR:
58+
__futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg);
59+
break;
60+
default:
61+
ret = -ENOSYS;
62+
}
63+
64+
pagefault_enable();
65+
66+
if (!ret)
67+
*oval = oldval;
68+
69+
return ret;
70+
}
71+
72+
static inline int
73+
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
74+
{
75+
int ret = 0;
76+
u32 val = 0;
77+
78+
if (!access_ok(uaddr, sizeof(u32)))
79+
return -EFAULT;
80+
81+
__asm__ __volatile__(
82+
"# futex_atomic_cmpxchg_inatomic \n"
83+
"1: ll.w %1, %3 \n"
84+
" bne %1, %z4, 3f \n"
85+
" or $t0, %z5, $zero \n"
86+
"2: sc.w $t0, %2 \n"
87+
" beq $zero, $t0, 1b \n"
88+
"3: \n"
89+
" .section .fixup,\"ax\" \n"
90+
"4: li.d %0, %6 \n"
91+
" b 3b \n"
92+
" .previous \n"
93+
" .section __ex_table,\"a\" \n"
94+
" "__UA_ADDR "\t1b, 4b \n"
95+
" "__UA_ADDR "\t2b, 4b \n"
96+
" .previous \n"
97+
: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
98+
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
99+
"i" (-EFAULT)
100+
: "memory", "t0");
101+
102+
*uval = val;
103+
104+
return ret;
105+
}
106+
107+
#endif /* _ASM_FUTEX_H */

arch/loongarch/include/asm/io.h

Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_IO_H
6+
#define _ASM_IO_H
7+
8+
#define ARCH_HAS_IOREMAP_WC
9+
10+
#include <linux/compiler.h>
11+
#include <linux/kernel.h>
12+
#include <linux/types.h>
13+
14+
#include <asm/addrspace.h>
15+
#include <asm/bug.h>
16+
#include <asm/byteorder.h>
17+
#include <asm/cpu.h>
18+
#include <asm/page.h>
19+
#include <asm/pgtable-bits.h>
20+
#include <asm/string.h>
21+
22+
/*
23+
* On LoongArch, I/O ports mappring is following:
24+
*
25+
* | .... |
26+
* |-----------------------|
27+
* | pci io ports(64K~32M) |
28+
* |-----------------------|
29+
* | isa io ports(0 ~16K) |
30+
* PCI_IOBASE ->|-----------------------|
31+
* | .... |
32+
*/
33+
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
34+
#define PCI_IOSIZE SZ_32M
35+
#define ISA_IOSIZE SZ_16K
36+
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
37+
38+
/*
39+
* Change "struct page" to physical address.
40+
*/
41+
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
42+
43+
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
44+
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
45+
46+
#define early_memremap early_ioremap
47+
#define early_memunmap early_iounmap
48+
49+
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
50+
unsigned long prot_val)
51+
{
52+
if (prot_val == _CACHE_CC)
53+
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
54+
else
55+
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
56+
}
57+
58+
/*
59+
* ioremap - map bus memory into CPU space
60+
* @offset: bus address of the memory
61+
* @size: size of the resource to map
62+
*
63+
* ioremap performs a platform specific sequence of operations to
64+
* make bus memory CPU accessible via the readb/readw/readl/writeb/
65+
* writew/writel functions and the other mmio helpers. The returned
66+
* address is not guaranteed to be usable directly as a virtual
67+
* address.
68+
*/
69+
#define ioremap(offset, size) \
70+
ioremap_prot((offset), (size), _CACHE_SUC)
71+
72+
/*
73+
* ioremap_wc - map bus memory into CPU space
74+
* @offset: bus address of the memory
75+
* @size: size of the resource to map
76+
*
77+
* ioremap_wc performs a platform specific sequence of operations to
78+
* make bus memory CPU accessible via the readb/readw/readl/writeb/
79+
* writew/writel functions and the other mmio helpers. The returned
80+
* address is not guaranteed to be usable directly as a virtual
81+
* address.
82+
*
83+
* This version of ioremap ensures that the memory is marked uncachable
84+
* but accelerated by means of write-combining feature. It is specifically
85+
* useful for PCIe prefetchable windows, which may vastly improve a
86+
* communications performance. If it was determined on boot stage, what
87+
* CPU CCA doesn't support WUC, the method shall fall-back to the
88+
* _CACHE_SUC option (see cpu_probe() method).
89+
*/
90+
#define ioremap_wc(offset, size) \
91+
ioremap_prot((offset), (size), _CACHE_WUC)
92+
93+
/*
94+
* ioremap_cache - map bus memory into CPU space
95+
* @offset: bus address of the memory
96+
* @size: size of the resource to map
97+
*
98+
* ioremap_cache performs a platform specific sequence of operations to
99+
* make bus memory CPU accessible via the readb/readw/readl/writeb/
100+
* writew/writel functions and the other mmio helpers. The returned
101+
* address is not guaranteed to be usable directly as a virtual
102+
* address.
103+
*
104+
* This version of ioremap ensures that the memory is marked cachable by
105+
* the CPU. Also enables full write-combining. Useful for some
106+
* memory-like regions on I/O busses.
107+
*/
108+
#define ioremap_cache(offset, size) \
109+
ioremap_prot((offset), (size), _CACHE_CC)
110+
111+
static inline void iounmap(const volatile void __iomem *addr)
112+
{
113+
}
114+
115+
#define mmiowb() asm volatile ("dbar 0" ::: "memory")
116+
117+
/*
118+
* String version of I/O memory access operations.
119+
*/
120+
extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
121+
extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
122+
extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
123+
#define memset_io(c, v, l) __memset_io((c), (v), (l))
124+
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
125+
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
126+
127+
#include <asm-generic/io.h>
128+
129+
#endif /* _ASM_IO_H */

0 commit comments

Comments
 (0)