Skip to content

Commit f36edc5

Browse files
committed
Merge tag 'arc-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC fixes from Vineet Gupta: - PAE fixes - syscall num check off-by-one bug - misc fixes * tag 'arc-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: mm: Use max_high_pfn as a HIGHMEM zone border ARC: mm: PAE: use 40-bit physical page mask ARC: entry: fix off-by-one error in syscall number validation ARC: kgdb: add 'fallthrough' to prevent a warning arc: Fix typos/spellos
2 parents 8f4ae0f + 1d5e464 commit f36edc5

File tree

12 files changed

+41
-25
lines changed

12 files changed

+41
-25
lines changed

arch/arc/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ endif
3131

3232

3333
ifdef CONFIG_ARC_CURR_IN_REG
34-
# For a global register defintion, make sure it gets passed to every file
34+
# For a global register definition, make sure it gets passed to every file
3535
# We had a customer reported bug where some code built in kernel was NOT using
3636
# any kernel headers, and missing the r25 global register
3737
# Can't do unconditionally because of recursive include issues

arch/arc/include/asm/cmpxchg.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
116116
*
117117
* Technically the lock is also needed for UP (boils down to irq save/restore)
118118
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
119-
* be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
119+
* be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
120120
* Other way around, xchg is one instruction anyways, so can't be interrupted
121121
* as such
122122
*/
@@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
143143
/*
144144
* "atomic" variant of xchg()
145145
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
146-
* Since xchg() doesn't always do that, it would seem that following defintion
146+
* Since xchg() doesn't always do that, it would seem that following definition
147147
* is incorrect. But here's the rationale:
148148
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
149149
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC

arch/arc/include/asm/page.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,18 @@
77

88
#include <uapi/asm/page.h>
99

10+
#ifdef CONFIG_ARC_HAS_PAE40
11+
12+
#define MAX_POSSIBLE_PHYSMEM_BITS 40
13+
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
14+
15+
#else /* CONFIG_ARC_HAS_PAE40 */
16+
17+
#define MAX_POSSIBLE_PHYSMEM_BITS 32
18+
#define PAGE_MASK_PHYS PAGE_MASK
19+
20+
#endif /* CONFIG_ARC_HAS_PAE40 */
21+
1022
#ifndef __ASSEMBLY__
1123

1224
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)

arch/arc/include/asm/pgtable.h

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@
107107
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
108108

109109
/* Set of bits not changed in pte_modify */
110-
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
111-
110+
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
111+
_PAGE_SPECIAL)
112112
/* More Abbrevaited helpers */
113113
#define PAGE_U_NONE __pgprot(___DEF)
114114
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@
132132
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
133133
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
134134

135-
#ifdef CONFIG_ARC_HAS_PAE40
136-
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
137-
#define MAX_POSSIBLE_PHYSMEM_BITS 40
138-
#else
139-
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
140-
#define MAX_POSSIBLE_PHYSMEM_BITS 32
141-
#endif
135+
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
142136

143137
/**************************************************************************
144138
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)

arch/arc/include/uapi/asm/page.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,5 +33,4 @@
3333

3434
#define PAGE_MASK (~(PAGE_SIZE-1))
3535

36-
3736
#endif /* _UAPI__ASM_ARC_PAGE_H */

arch/arc/kernel/entry.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ tracesys:
177177

178178
; Do the Sys Call as we normally would.
179179
; Validate the Sys Call number
180-
cmp r8, NR_syscalls
180+
cmp r8, NR_syscalls - 1
181181
mov.hi r0, -ENOSYS
182182
bhi tracesys_exit
183183

@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
255255
;============ Normal syscall case
256256

257257
; syscall num shd not exceed the total system calls avail
258-
cmp r8, NR_syscalls
258+
cmp r8, NR_syscalls - 1
259259
mov.hi r0, -ENOSYS
260260
bhi .Lret_from_system_call
261261

arch/arc/kernel/kgdb.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
140140
ptr = &remcomInBuffer[1];
141141
if (kgdb_hex2long(&ptr, &addr))
142142
regs->ret = addr;
143+
fallthrough;
143144

144145
case 'D':
145146
case 'k':

arch/arc/kernel/process.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
5050
int ret;
5151

5252
/*
53-
* This is only for old cores lacking LLOCK/SCOND, which by defintion
53+
* This is only for old cores lacking LLOCK/SCOND, which by definition
5454
* can't possibly be SMP. Thus doesn't need to be SMP safe.
5555
* And this also helps reduce the overhead for serializing in
5656
* the UP case
5757
*/
5858
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
5959

60-
/* Z indicates to userspace if operation succeded */
60+
/* Z indicates to userspace if operation succeeded */
6161
regs->status32 &= ~STATUS_Z_MASK;
6262

6363
ret = access_ok(uaddr, sizeof(*uaddr));
@@ -107,7 +107,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
107107

108108
void arch_cpu_idle(void)
109109
{
110-
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
110+
/* Re-enable interrupts <= default irq priority before committing SLEEP */
111111
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
112112

113113
__asm__ __volatile__(
@@ -120,7 +120,7 @@ void arch_cpu_idle(void)
120120

121121
void arch_cpu_idle(void)
122122
{
123-
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
123+
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
124124
__asm__ __volatile__("sleep 0x3 \n");
125125
}
126126

arch/arc/kernel/signal.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
259259
regs->r2 = (unsigned long)&sf->uc;
260260

261261
/*
262-
* small optim to avoid unconditonally calling do_sigaltstack
262+
* small optim to avoid unconditionally calling do_sigaltstack
263263
* in sigreturn path, now that we only have rt_sigreturn
264264
*/
265265
magic = MAGIC_SIGALTSTK;
@@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
391391
void do_notify_resume(struct pt_regs *regs)
392392
{
393393
/*
394-
* ASM glue gaurantees that this is only called when returning to
394+
* ASM glue guarantees that this is only called when returning to
395395
* user mode
396396
*/
397397
if (test_thread_flag(TIF_NOTIFY_RESUME))

arch/arc/mm/init.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
157157
min_high_pfn = PFN_DOWN(high_mem_start);
158158
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
159159

160-
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
160+
/*
161+
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
162+
* For HIGHMEM without PAE max_high_pfn should be less than
163+
* min_low_pfn to guarantee that these two regions don't overlap.
164+
* For PAE case highmem is greater than lowmem, so it is natural
165+
* to use max_high_pfn.
166+
*
167+
* In both cases, holes should be handled by pfn_valid().
168+
*/
169+
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
161170

162171
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
163172

0 commit comments

Comments
 (0)