Skip to content

Commit a418417

Browse files
committed
alpha: drop pre-EV56 support
All EV4 machines are already gone, and the remaining EV5 based machines all support the slightly more modern EV56 generation as well. Debian only supports EV56 and later. Drop both of these and build kernels optimized for EV56 and higher when the "generic" options is selected, tuning for an out-of-order EV6 pipeline, same as Debian userspace. Since this was the only supported architecture without 8-bit and 16-bit stores, common kernel code no longer has to worry about aligning struct members, and existing workarounds from the block and tty layers can be removed. The alpha memory management code no longer needs an abstraction for the differences between EV4 and EV5+. Link: https://lists.debian.org/debian-alpha/2023/05/msg00009.html Acked-by: Paul E. McKenney <[email protected]> Acked-by: Matt Turner <[email protected]> Signed-off-by: Arnd Bergmann <[email protected]>
1 parent 4bf8590 commit a418417

File tree

13 files changed

+19
-317
lines changed

13 files changed

+19
-317
lines changed

arch/alpha/Kconfig

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -240,29 +240,14 @@ config ISA_DMA_API
240240
bool
241241
default y
242242

243-
config ALPHA_EV4
244-
bool
245-
246-
config ALPHA_EV5
247-
bool
248-
default y if ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_MIKASA || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
249-
250243
config ALPHA_CIA
251244
bool
252245
depends on ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_NORITAKE || ALPHA_MIKASA || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_ALCOR
253246
default y
254247

255248
config ALPHA_EV56
256-
bool "EV56 CPU (speed >= 366MHz)?" if ALPHA_ALCOR
257-
default y if ALPHA_RX164 || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_PC164 || ALPHA_TAKARA
258-
259-
config ALPHA_EV56
260-
prompt "EV56 CPU (speed >= 333MHz)?"
261-
depends on ALPHA_NORITAKE || ALPHA_MIKASA
262-
263-
config ALPHA_EV56
264-
prompt "EV56 CPU (speed >= 400MHz)?"
265-
depends on ALPHA_RAWHIDE
249+
bool
250+
default y if ALPHA_ALCOR || ALPHA_RX164 || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_NORITAKE || ALPHA_MIKASA || ALPHA_RAWHIDE || ALPHA_SABLE
266251

267252
config ALPHA_T2
268253
bool
@@ -403,7 +388,7 @@ config ARCH_SPARSEMEM_ENABLE
403388
config ALPHA_WTINT
404389
bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
405390
default y if ALPHA_QEMU
406-
default n if ALPHA_EV5 || ALPHA_EV56
391+
default n if ALPHA_EV56
407392
default n if !ALPHA_SRM && !ALPHA_GENERIC
408393
default y if SMP
409394
help

arch/alpha/Makefile

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,14 @@ CHECKFLAGS += -D__alpha__
1515
cflags-y := -pipe -mno-fp-regs -ffixed-8
1616
cflags-y += $(call cc-option, -fno-jump-tables)
1717

18-
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
19-
cpuflags-$(CONFIG_ALPHA_EV5) := -mcpu=ev5
2018
cpuflags-$(CONFIG_ALPHA_EV56) := -mcpu=ev56
2119
cpuflags-$(CONFIG_ALPHA_POLARIS) := -mcpu=pca56
2220
cpuflags-$(CONFIG_ALPHA_SX164) := -mcpu=pca56
2321
cpuflags-$(CONFIG_ALPHA_EV6) := -mcpu=ev6
2422
cpuflags-$(CONFIG_ALPHA_EV67) := -mcpu=ev67
2523
# If GENERIC, make sure to turn off any instruction set extensions that
26-
# the host compiler might have on by default. Given that EV4 and EV5
27-
# have the same instruction set, prefer EV5 because an EV5 schedule is
28-
# more likely to keep an EV4 processor busy than vice-versa.
29-
cpuflags-$(CONFIG_ALPHA_GENERIC) := -mcpu=ev5
24+
# the host compiler might have on by default.
25+
cpuflags-$(CONFIG_ALPHA_GENERIC) := -mcpu=ev56 -mtune=ev6
3026

3127
cflags-y += $(cpuflags-y)
3228

arch/alpha/include/asm/elf.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,7 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
133133
#define ELF_PLATFORM \
134134
({ \
135135
enum implver_enum i_ = implver(); \
136-
( i_ == IMPLVER_EV4 ? "ev4" \
137-
: i_ == IMPLVER_EV5 \
138-
? (amask(AMASK_BWX) ? "ev5" : "ev56") \
136+
( i_ == IMPLVER_EV5 ? "ev56" \
139137
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
140138
})
141139

arch/alpha/include/asm/machvec.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,15 +72,6 @@ struct alpha_machine_vector
7272
int (*mv_is_ioaddr)(unsigned long);
7373
int (*mv_is_mmio)(const volatile void __iomem *);
7474

75-
void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
76-
struct task_struct *);
77-
void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
78-
79-
void (*mv_flush_tlb_current)(struct mm_struct *);
80-
void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
81-
struct vm_area_struct *vma,
82-
unsigned long addr);
83-
8475
void (*update_irq_hw)(unsigned long, unsigned long, int);
8576
void (*ack_irq)(unsigned long);
8677
void (*device_interrupt)(unsigned long vector);

arch/alpha/include/asm/mmu_context.h

Lines changed: 3 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,7 @@ __reload_thread(struct pcb_struct *pcb)
7171
#ifdef CONFIG_ALPHA_GENERIC
7272
# define MAX_ASN (alpha_mv.max_asn)
7373
#else
74-
# ifdef CONFIG_ALPHA_EV4
75-
# define MAX_ASN EV4_MAX_ASN
76-
# elif defined(CONFIG_ALPHA_EV5)
74+
# if defined(CONFIG_ALPHA_EV56)
7775
# define MAX_ASN EV5_MAX_ASN
7876
# else
7977
# define MAX_ASN EV6_MAX_ASN
@@ -162,26 +160,6 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
162160
task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
163161
}
164162

165-
__EXTERN_INLINE void
166-
ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
167-
struct task_struct *next)
168-
{
169-
/* As described, ASN's are broken for TLB usage. But we can
170-
optimize for switching between threads -- if the mm is
171-
unchanged from current we needn't flush. */
172-
/* ??? May not be needed because EV4 PALcode recognizes that
173-
ASN's are broken and does a tbiap itself on swpctx, under
174-
the "Must set ASN or flush" rule. At least this is true
175-
for a 1992 SRM, reports Joseph Martin ([email protected]).
176-
I'm going to leave this here anyway, just to Be Sure. -- r~ */
177-
if (prev_mm != next_mm)
178-
tbiap();
179-
180-
/* Do continue to allocate ASNs, because we can still use them
181-
to avoid flushing the icache. */
182-
ev5_switch_mm(prev_mm, next_mm, next);
183-
}
184-
185163
extern void __load_new_mm_context(struct mm_struct *);
186164
asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr,
187165
long cause, struct pt_regs *regs);
@@ -209,25 +187,8 @@ ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
209187
__load_new_mm_context(next_mm);
210188
}
211189

212-
__EXTERN_INLINE void
213-
ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
214-
{
215-
__load_new_mm_context(next_mm);
216-
tbiap();
217-
}
218-
219-
#ifdef CONFIG_ALPHA_GENERIC
220-
# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
221-
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
222-
#else
223-
# ifdef CONFIG_ALPHA_EV4
224-
# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
225-
# define activate_mm(x,y) ev4_activate_mm((x),(y))
226-
# else
227-
# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
228-
# define activate_mm(x,y) ev5_activate_mm((x),(y))
229-
# endif
230-
#endif
190+
#define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
191+
#define activate_mm(x,y) ev5_activate_mm((x),(y))
231192

232193
#define init_new_context init_new_context
233194
static inline int

arch/alpha/include/asm/special_insns.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,7 @@ enum implver_enum {
1515
(enum implver_enum) __implver; })
1616
#else
1717
/* Try to eliminate some dead code. */
18-
#ifdef CONFIG_ALPHA_EV4
19-
#define implver() IMPLVER_EV4
20-
#endif
21-
#ifdef CONFIG_ALPHA_EV5
18+
#ifdef CONFIG_ALPHA_EV56
2219
#define implver() IMPLVER_EV5
2320
#endif
2421
#if defined(CONFIG_ALPHA_EV6)

arch/alpha/include/asm/tlbflush.h

Lines changed: 2 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,6 @@
1414
extern void __load_new_mm_context(struct mm_struct *);
1515

1616

17-
/* Use a few helper functions to hide the ugly broken ASN
18-
numbers on early Alphas (ev4 and ev45). */
19-
20-
__EXTERN_INLINE void
21-
ev4_flush_tlb_current(struct mm_struct *mm)
22-
{
23-
__load_new_mm_context(mm);
24-
tbiap();
25-
}
26-
2717
__EXTERN_INLINE void
2818
ev5_flush_tlb_current(struct mm_struct *mm)
2919
{
@@ -34,19 +24,6 @@ ev5_flush_tlb_current(struct mm_struct *mm)
3424
careful about the icache here, there is no way to invalidate a
3525
specific icache page. */
3626

37-
__EXTERN_INLINE void
38-
ev4_flush_tlb_current_page(struct mm_struct * mm,
39-
struct vm_area_struct *vma,
40-
unsigned long addr)
41-
{
42-
int tbi_flag = 2;
43-
if (vma->vm_flags & VM_EXEC) {
44-
__load_new_mm_context(mm);
45-
tbi_flag = 3;
46-
}
47-
tbi(tbi_flag, addr);
48-
}
49-
5027
__EXTERN_INLINE void
5128
ev5_flush_tlb_current_page(struct mm_struct * mm,
5229
struct vm_area_struct *vma,
@@ -59,18 +36,8 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
5936
}
6037

6138

62-
#ifdef CONFIG_ALPHA_GENERIC
63-
# define flush_tlb_current alpha_mv.mv_flush_tlb_current
64-
# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
65-
#else
66-
# ifdef CONFIG_ALPHA_EV4
67-
# define flush_tlb_current ev4_flush_tlb_current
68-
# define flush_tlb_current_page ev4_flush_tlb_current_page
69-
# else
70-
# define flush_tlb_current ev5_flush_tlb_current
71-
# define flush_tlb_current_page ev5_flush_tlb_current_page
72-
# endif
73-
#endif
39+
#define flush_tlb_current ev5_flush_tlb_current
40+
#define flush_tlb_current_page ev5_flush_tlb_current_page
7441

7542
#ifdef __MMU_EXTERN_INLINE
7643
#undef __EXTERN_INLINE

arch/alpha/include/asm/uaccess.h

Lines changed: 0 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,6 @@ struct __large_struct { unsigned long buf[100]; };
9696
: "=r"(__gu_val), "=r"(__gu_err) \
9797
: "m"(__m(addr)), "1"(__gu_err))
9898

99-
#ifdef __alpha_bwx__
100-
/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
101-
10299
#define __get_user_16(addr) \
103100
__asm__("1: ldwu %0,%2\n" \
104101
"2:\n" \
@@ -112,33 +109,6 @@ struct __large_struct { unsigned long buf[100]; };
112109
EXC(1b,2b,%0,%1) \
113110
: "=r"(__gu_val), "=r"(__gu_err) \
114111
: "m"(__m(addr)), "1"(__gu_err))
115-
#else
116-
/* Unfortunately, we can't get an unaligned access trap for the sub-word
117-
load, so we have to do a general unaligned operation. */
118-
119-
#define __get_user_16(addr) \
120-
{ \
121-
long __gu_tmp; \
122-
__asm__("1: ldq_u %0,0(%3)\n" \
123-
"2: ldq_u %1,1(%3)\n" \
124-
" extwl %0,%3,%0\n" \
125-
" extwh %1,%3,%1\n" \
126-
" or %0,%1,%0\n" \
127-
"3:\n" \
128-
EXC(1b,3b,%0,%2) \
129-
EXC(2b,3b,%0,%2) \
130-
: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
131-
: "r"(addr), "2"(__gu_err)); \
132-
}
133-
134-
#define __get_user_8(addr) \
135-
__asm__("1: ldq_u %0,0(%2)\n" \
136-
" extbl %0,%2,%0\n" \
137-
"2:\n" \
138-
EXC(1b,2b,%0,%1) \
139-
: "=&r"(__gu_val), "=r"(__gu_err) \
140-
: "r"(addr), "1"(__gu_err))
141-
#endif
142112

143113
extern void __put_user_unknown(void);
144114

@@ -192,9 +162,6 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
192162
: "=r"(__pu_err) \
193163
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
194164

195-
#ifdef __alpha_bwx__
196-
/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
197-
198165
#define __put_user_16(x, addr) \
199166
__asm__ __volatile__("1: stw %r2,%1\n" \
200167
"2:\n" \
@@ -208,53 +175,6 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
208175
EXC(1b,2b,$31,%0) \
209176
: "=r"(__pu_err) \
210177
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
211-
#else
212-
/* Unfortunately, we can't get an unaligned access trap for the sub-word
213-
write, so we have to do a general unaligned operation. */
214-
215-
#define __put_user_16(x, addr) \
216-
{ \
217-
long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
218-
__asm__ __volatile__( \
219-
"1: ldq_u %2,1(%5)\n" \
220-
"2: ldq_u %1,0(%5)\n" \
221-
" inswh %6,%5,%4\n" \
222-
" inswl %6,%5,%3\n" \
223-
" mskwh %2,%5,%2\n" \
224-
" mskwl %1,%5,%1\n" \
225-
" or %2,%4,%2\n" \
226-
" or %1,%3,%1\n" \
227-
"3: stq_u %2,1(%5)\n" \
228-
"4: stq_u %1,0(%5)\n" \
229-
"5:\n" \
230-
EXC(1b,5b,$31,%0) \
231-
EXC(2b,5b,$31,%0) \
232-
EXC(3b,5b,$31,%0) \
233-
EXC(4b,5b,$31,%0) \
234-
: "=r"(__pu_err), "=&r"(__pu_tmp1), \
235-
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
236-
"=&r"(__pu_tmp4) \
237-
: "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
238-
}
239-
240-
#define __put_user_8(x, addr) \
241-
{ \
242-
long __pu_tmp1, __pu_tmp2; \
243-
__asm__ __volatile__( \
244-
"1: ldq_u %1,0(%4)\n" \
245-
" insbl %3,%4,%2\n" \
246-
" mskbl %1,%4,%1\n" \
247-
" or %1,%2,%1\n" \
248-
"2: stq_u %1,0(%4)\n" \
249-
"3:\n" \
250-
EXC(1b,3b,$31,%0) \
251-
EXC(2b,3b,$31,%0) \
252-
: "=r"(__pu_err), \
253-
"=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
254-
: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
255-
}
256-
#endif
257-
258178

259179
/*
260180
* Complex access routines

arch/alpha/include/uapi/asm/compiler.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -95,24 +95,6 @@
9595
#define __kernel_ldwu(mem) (mem)
9696
#define __kernel_stb(val,mem) ((mem) = (val))
9797
#define __kernel_stw(val,mem) ((mem) = (val))
98-
#else
99-
#define __kernel_ldbu(mem) \
100-
({ unsigned char __kir; \
101-
__asm__(".arch ev56; \
102-
ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
103-
__kir; })
104-
#define __kernel_ldwu(mem) \
105-
({ unsigned short __kir; \
106-
__asm__(".arch ev56; \
107-
ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
108-
__kir; })
109-
#define __kernel_stb(val,mem) \
110-
__asm__(".arch ev56; \
111-
stb %1,%0" : "=m"(mem) : "r"(val))
112-
#define __kernel_stw(val,mem) \
113-
__asm__(".arch ev56; \
114-
stw %1,%0" : "=m"(mem) : "r"(val))
11598
#endif
11699

117-
118100
#endif /* _UAPI__ALPHA_COMPILER_H */

arch/alpha/kernel/machvec_impl.h

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -44,33 +44,14 @@
4444

4545
#define DO_DEFAULT_RTC .rtc_port = 0x70
4646

47-
#define DO_EV4_MMU \
48-
.max_asn = EV4_MAX_ASN, \
49-
.mv_switch_mm = ev4_switch_mm, \
50-
.mv_activate_mm = ev4_activate_mm, \
51-
.mv_flush_tlb_current = ev4_flush_tlb_current, \
52-
.mv_flush_tlb_current_page = ev4_flush_tlb_current_page
53-
5447
#define DO_EV5_MMU \
55-
.max_asn = EV5_MAX_ASN, \
56-
.mv_switch_mm = ev5_switch_mm, \
57-
.mv_activate_mm = ev5_activate_mm, \
58-
.mv_flush_tlb_current = ev5_flush_tlb_current, \
59-
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
48+
.max_asn = EV5_MAX_ASN \
6049

6150
#define DO_EV6_MMU \
62-
.max_asn = EV6_MAX_ASN, \
63-
.mv_switch_mm = ev5_switch_mm, \
64-
.mv_activate_mm = ev5_activate_mm, \
65-
.mv_flush_tlb_current = ev5_flush_tlb_current, \
66-
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
51+
.max_asn = EV6_MAX_ASN \
6752

6853
#define DO_EV7_MMU \
69-
.max_asn = EV6_MAX_ASN, \
70-
.mv_switch_mm = ev5_switch_mm, \
71-
.mv_activate_mm = ev5_activate_mm, \
72-
.mv_flush_tlb_current = ev5_flush_tlb_current, \
73-
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
54+
.max_asn = EV6_MAX_ASN \
7455

7556
#define IO_LITE(UP,low) \
7657
.hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \

0 commit comments

Comments
 (0)