Skip to content

Commit c23f864

Browse files
committed
Merge tag 'loongarch-fixes-6.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch fixes from Huacai Chen: "Fix a bunch of build errors/warnings, a poweroff error and an unbalanced locking in do_page_fault()" * tag 'loongarch-fixes-6.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: mm: Avoid unnecessary page fault retires on shared memory types LoongArch: Add subword xchg/cmpxchg emulation LoongArch: Cleanup headers to avoid circular dependency LoongArch: Cleanup reset routines with new API LoongArch: Fix build warnings in VDSO LoongArch: Select PCI_QUIRKS to avoid build error
2 parents 78effb4 + b83699e commit c23f864

File tree

13 files changed

+164
-98
lines changed

13 files changed

+164
-98
lines changed

arch/loongarch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ config LOONGARCH
111111
select PCI_ECAM if ACPI
112112
select PCI_LOONGSON
113113
select PCI_MSI_ARCH_FALLBACKS
114+
select PCI_QUIRKS
114115
select PERF_USE_VMALLOC
115116
select RTC_LIB
116117
select SMP

arch/loongarch/include/asm/addrspace.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,4 +109,20 @@ extern unsigned long vm_map_base;
109109
*/
110110
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
111111

112+
/*
113+
* On LoongArch, I/O ports mappring is following:
114+
*
115+
* | .... |
116+
* |-----------------------|
117+
* | pci io ports(16K~32M) |
118+
* |-----------------------|
119+
* | isa io ports(0 ~16K) |
120+
* PCI_IOBASE ->|-----------------------|
121+
* | .... |
122+
*/
123+
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
124+
#define PCI_IOSIZE SZ_32M
125+
#define ISA_IOSIZE SZ_16K
126+
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
127+
112128
#endif /* _ASM_ADDRSPACE_H */

arch/loongarch/include/asm/cmpxchg.h

Lines changed: 97 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,9 @@
55
#ifndef __ASM_CMPXCHG_H
66
#define __ASM_CMPXCHG_H
77

8-
#include <asm/barrier.h>
8+
#include <linux/bits.h>
99
#include <linux/build_bug.h>
10+
#include <asm/barrier.h>
1011

1112
#define __xchg_asm(amswap_db, m, val) \
1213
({ \
@@ -21,10 +22,53 @@
2122
__ret; \
2223
})
2324

25+
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
26+
unsigned int size)
27+
{
28+
unsigned int shift;
29+
u32 old32, mask, temp;
30+
volatile u32 *ptr32;
31+
32+
/* Mask value to the correct size. */
33+
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
34+
val &= mask;
35+
36+
/*
37+
* Calculate a shift & mask that correspond to the value we wish to
38+
* exchange within the naturally aligned 4 byte integerthat includes
39+
* it.
40+
*/
41+
shift = (unsigned long)ptr & 0x3;
42+
shift *= BITS_PER_BYTE;
43+
mask <<= shift;
44+
45+
/*
46+
* Calculate a pointer to the naturally aligned 4 byte integer that
47+
* includes our byte of interest, and load its value.
48+
*/
49+
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
50+
51+
asm volatile (
52+
"1: ll.w %0, %3 \n"
53+
" andn %1, %0, %z4 \n"
54+
" or %1, %1, %z5 \n"
55+
" sc.w %1, %2 \n"
56+
" beqz %1, 1b \n"
57+
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
58+
: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
59+
: "memory");
60+
61+
return (old32 & mask) >> shift;
62+
}
63+
2464
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
2565
int size)
2666
{
2767
switch (size) {
68+
case 1:
69+
case 2:
70+
return __xchg_small(ptr, x, size);
71+
2872
case 4:
2973
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
3074

@@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
67111
__ret; \
68112
})
69113

114+
static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
115+
unsigned int new, unsigned int size)
116+
{
117+
unsigned int shift;
118+
u32 old32, mask, temp;
119+
volatile u32 *ptr32;
120+
121+
/* Mask inputs to the correct size. */
122+
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
123+
old &= mask;
124+
new &= mask;
125+
126+
/*
127+
* Calculate a shift & mask that correspond to the value we wish to
128+
* compare & exchange within the naturally aligned 4 byte integer
129+
* that includes it.
130+
*/
131+
shift = (unsigned long)ptr & 0x3;
132+
shift *= BITS_PER_BYTE;
133+
old <<= shift;
134+
new <<= shift;
135+
mask <<= shift;
136+
137+
/*
138+
* Calculate a pointer to the naturally aligned 4 byte integer that
139+
* includes our byte of interest, and load its value.
140+
*/
141+
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
142+
143+
asm volatile (
144+
"1: ll.w %0, %3 \n"
145+
" and %1, %0, %z4 \n"
146+
" bne %1, %z5, 2f \n"
147+
" andn %1, %0, %z4 \n"
148+
" or %1, %1, %z6 \n"
149+
" sc.w %1, %2 \n"
150+
" beqz %1, 1b \n"
151+
" b 3f \n"
152+
"2: \n"
153+
__WEAK_LLSC_MB
154+
"3: \n"
155+
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
156+
: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
157+
: "memory");
158+
159+
return (old32 & mask) >> shift;
160+
}
161+
70162
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
71163
unsigned long new, unsigned int size)
72164
{
73165
switch (size) {
166+
case 1:
167+
case 2:
168+
return __cmpxchg_small(ptr, old, new, size);
169+
74170
case 4:
75171
return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
76172
(u32)old, new);

arch/loongarch/include/asm/io.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -7,34 +7,15 @@
77

88
#define ARCH_HAS_IOREMAP_WC
99

10-
#include <linux/compiler.h>
1110
#include <linux/kernel.h>
1211
#include <linux/types.h>
1312

1413
#include <asm/addrspace.h>
15-
#include <asm/bug.h>
16-
#include <asm/byteorder.h>
1714
#include <asm/cpu.h>
1815
#include <asm/page.h>
1916
#include <asm/pgtable-bits.h>
2017
#include <asm/string.h>
2118

22-
/*
23-
* On LoongArch, I/O ports mappring is following:
24-
*
25-
* | .... |
26-
* |-----------------------|
27-
* | pci io ports(64K~32M) |
28-
* |-----------------------|
29-
* | isa io ports(0 ~16K) |
30-
* PCI_IOBASE ->|-----------------------|
31-
* | .... |
32-
*/
33-
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
34-
#define PCI_IOSIZE SZ_32M
35-
#define ISA_IOSIZE SZ_16K
36-
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
37-
3819
/*
3920
* Change "struct page" to physical address.
4021
*/

arch/loongarch/include/asm/page.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn)
9595

9696
#endif
9797

98-
#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
98+
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
9999
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
100100

101101
extern int __virt_addr_valid(volatile void *kaddr);

arch/loongarch/include/asm/percpu.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
123123
int size)
124124
{
125125
switch (size) {
126+
case 1:
127+
case 2:
128+
return __xchg_small((volatile void *)ptr, val, size);
129+
126130
case 4:
127131
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
128132

@@ -204,9 +208,13 @@ do { \
204208
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
205209
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
206210

211+
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
212+
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
207213
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
208214
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
209215

216+
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
217+
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
210218
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
211219
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
212220

arch/loongarch/include/asm/pgtable.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@
5959
#include <linux/mm_types.h>
6060
#include <linux/mmzone.h>
6161
#include <asm/fixmap.h>
62-
#include <asm/io.h>
6362

6463
struct mm_struct;
6564
struct vm_area_struct;
@@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
145144
*p4d = p4dval;
146145
}
147146

148-
#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
147+
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
149148
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
150149

151150
#endif
@@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
188187

189188
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
190189

191-
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
190+
#define pud_phys(pud) PHYSADDR(pud_val(pud))
192191
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
193192

194193
#endif
@@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp)
221220

222221
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
223222

224-
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
223+
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
225224

226225
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
227226
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))

arch/loongarch/include/asm/reboot.h

Lines changed: 0 additions & 10 deletions
This file was deleted.

arch/loongarch/kernel/reset.c

Lines changed: 21 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,16 @@
1515
#include <acpi/reboot.h>
1616
#include <asm/idle.h>
1717
#include <asm/loongarch.h>
18-
#include <asm/reboot.h>
1918

20-
static void default_halt(void)
19+
void (*pm_power_off)(void);
20+
EXPORT_SYMBOL(pm_power_off);
21+
22+
void machine_halt(void)
2123
{
24+
#ifdef CONFIG_SMP
25+
preempt_disable();
26+
smp_send_stop();
27+
#endif
2228
local_irq_disable();
2329
clear_csr_ecfg(ECFG0_IM);
2430

@@ -30,18 +36,29 @@ static void default_halt(void)
3036
}
3137
}
3238

33-
static void default_poweroff(void)
39+
void machine_power_off(void)
3440
{
41+
#ifdef CONFIG_SMP
42+
preempt_disable();
43+
smp_send_stop();
44+
#endif
45+
do_kernel_power_off();
3546
#ifdef CONFIG_EFI
3647
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
3748
#endif
49+
3850
while (true) {
3951
__arch_cpu_idle();
4052
}
4153
}
4254

43-
static void default_restart(void)
55+
void machine_restart(char *command)
4456
{
57+
#ifdef CONFIG_SMP
58+
preempt_disable();
59+
smp_send_stop();
60+
#endif
61+
do_kernel_restart(command);
4562
#ifdef CONFIG_EFI
4663
if (efi_capsule_pending(NULL))
4764
efi_reboot(REBOOT_WARM, NULL);
@@ -55,47 +72,3 @@ static void default_restart(void)
5572
__arch_cpu_idle();
5673
}
5774
}
58-
59-
void (*pm_restart)(void);
60-
EXPORT_SYMBOL(pm_restart);
61-
62-
void (*pm_power_off)(void);
63-
EXPORT_SYMBOL(pm_power_off);
64-
65-
void machine_halt(void)
66-
{
67-
#ifdef CONFIG_SMP
68-
preempt_disable();
69-
smp_send_stop();
70-
#endif
71-
default_halt();
72-
}
73-
74-
void machine_power_off(void)
75-
{
76-
#ifdef CONFIG_SMP
77-
preempt_disable();
78-
smp_send_stop();
79-
#endif
80-
pm_power_off();
81-
}
82-
83-
void machine_restart(char *command)
84-
{
85-
#ifdef CONFIG_SMP
86-
preempt_disable();
87-
smp_send_stop();
88-
#endif
89-
do_kernel_restart(command);
90-
pm_restart();
91-
}
92-
93-
static int __init loongarch_reboot_setup(void)
94-
{
95-
pm_restart = default_restart;
96-
pm_power_off = default_poweroff;
97-
98-
return 0;
99-
}
100-
101-
arch_initcall(loongarch_reboot_setup);

arch/loongarch/mm/fault.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
216216
return;
217217
}
218218

219+
/* The fault is fully completed (including releasing mmap lock) */
220+
if (fault & VM_FAULT_COMPLETED)
221+
return;
222+
219223
if (unlikely(fault & VM_FAULT_RETRY)) {
220224
flags |= FAULT_FLAG_TRIED;
221225

0 commit comments

Comments
 (0)