Skip to content

Commit f9db97d

Browse files
committed
Merge branch 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: "Some cleanups in arch_rw locking functions, improved interrupt handling in arch spinlocks, coversions to request_irq() and syscall table generation cleanups" * 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: remove nargs from __SYSCALL parisc: Refactor alternative code to accept multiple conditions parisc: Rework arch_rw locking functions parisc: Improve interrupt handling in arch_spin_lock_flags() parisc: Replace setup_irq() by request_irq()
2 parents 12782fb + 106c909 commit f9db97d

File tree

7 files changed

+114
-133
lines changed

7 files changed

+114
-133
lines changed

arch/parisc/include/asm/spinlock.h

Lines changed: 73 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -10,25 +10,34 @@
1010
static inline int arch_spin_is_locked(arch_spinlock_t *x)
1111
{
1212
volatile unsigned int *a = __ldcw_align(x);
13+
smp_mb();
1314
return *a == 0;
1415
}
1516

16-
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
17+
static inline void arch_spin_lock(arch_spinlock_t *x)
18+
{
19+
volatile unsigned int *a;
20+
21+
a = __ldcw_align(x);
22+
while (__ldcw(a) == 0)
23+
while (*a == 0)
24+
cpu_relax();
25+
}
1726

1827
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
1928
unsigned long flags)
2029
{
2130
volatile unsigned int *a;
31+
unsigned long flags_dis;
2232

2333
a = __ldcw_align(x);
24-
while (__ldcw(a) == 0)
34+
while (__ldcw(a) == 0) {
35+
local_save_flags(flags_dis);
36+
local_irq_restore(flags);
2537
while (*a == 0)
26-
if (flags & PSW_SM_I) {
27-
local_irq_enable();
28-
cpu_relax();
29-
local_irq_disable();
30-
} else
31-
cpu_relax();
38+
cpu_relax();
39+
local_irq_restore(flags_dis);
40+
}
3241
}
3342
#define arch_spin_lock_flags arch_spin_lock_flags
3443

@@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
5867

5968
/*
6069
* Read-write spinlocks, allowing multiple readers but only one writer.
61-
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
62-
* time by readers. With care, they can also be taken in interrupt context.
70+
* Unfair locking as Writers could be starved indefinitely by Reader(s)
6371
*
64-
* In the PA-RISC implementation, we have a spinlock and a counter.
65-
* Readers use the lock to serialise their access to the counter (which
66-
* records how many readers currently hold the lock).
67-
* Writers hold the spinlock, preventing any readers or other writers from
68-
* grabbing the rwlock.
72+
* The spinlock itself is contained in @counter and access to it is
73+
* serialized with @lock_mutex.
6974
*/
7075

71-
/* Note that we have to ensure interrupts are disabled in case we're
72-
* interrupted by some other code that wants to grab the same read lock */
73-
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
76+
/* 1 - lock taken successfully */
77+
static inline int arch_read_trylock(arch_rwlock_t *rw)
7478
{
79+
int ret = 0;
7580
unsigned long flags;
76-
local_irq_save(flags);
77-
arch_spin_lock_flags(&rw->lock, flags);
78-
rw->counter++;
79-
arch_spin_unlock(&rw->lock);
80-
local_irq_restore(flags);
81-
}
8281

83-
/* Note that we have to ensure interrupts are disabled in case we're
84-
* interrupted by some other code that wants to grab the same read lock */
85-
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
86-
{
87-
unsigned long flags;
8882
local_irq_save(flags);
89-
arch_spin_lock_flags(&rw->lock, flags);
90-
rw->counter--;
91-
arch_spin_unlock(&rw->lock);
83+
arch_spin_lock(&(rw->lock_mutex));
84+
85+
/*
86+
* zero means writer holds the lock exclusively, deny Reader.
87+
* Otherwise grant lock to first/subseq reader
88+
*/
89+
if (rw->counter > 0) {
90+
rw->counter--;
91+
ret = 1;
92+
}
93+
94+
arch_spin_unlock(&(rw->lock_mutex));
9295
local_irq_restore(flags);
96+
97+
return ret;
9398
}
9499

95-
/* Note that we have to ensure interrupts are disabled in case we're
96-
* interrupted by some other code that wants to grab the same read lock */
97-
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
100+
/* 1 - lock taken successfully */
101+
static inline int arch_write_trylock(arch_rwlock_t *rw)
98102
{
103+
int ret = 0;
99104
unsigned long flags;
100-
retry:
105+
101106
local_irq_save(flags);
102-
if (arch_spin_trylock(&rw->lock)) {
103-
rw->counter++;
104-
arch_spin_unlock(&rw->lock);
105-
local_irq_restore(flags);
106-
return 1;
107+
arch_spin_lock(&(rw->lock_mutex));
108+
109+
/*
110+
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111+
* deny writer. Otherwise if unlocked grant to writer
112+
* Hence the claim that Linux rwlocks are unfair to writers.
113+
* (can be starved for an indefinite time by readers).
114+
*/
115+
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
116+
rw->counter = 0;
117+
ret = 1;
107118
}
108-
119+
arch_spin_unlock(&(rw->lock_mutex));
109120
local_irq_restore(flags);
110-
/* If write-locked, we fail to acquire the lock */
111-
if (rw->counter < 0)
112-
return 0;
113121

114-
/* Wait until we have a realistic chance at the lock */
115-
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
122+
return ret;
123+
}
124+
125+
static inline void arch_read_lock(arch_rwlock_t *rw)
126+
{
127+
while (!arch_read_trylock(rw))
116128
cpu_relax();
129+
}
117130

118-
goto retry;
131+
static inline void arch_write_lock(arch_rwlock_t *rw)
132+
{
133+
while (!arch_write_trylock(rw))
134+
cpu_relax();
119135
}
120136

121-
/* Note that we have to ensure interrupts are disabled in case we're
122-
* interrupted by some other code that wants to read_trylock() this lock */
123-
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
137+
static inline void arch_read_unlock(arch_rwlock_t *rw)
124138
{
125139
unsigned long flags;
126-
retry:
127-
local_irq_save(flags);
128-
arch_spin_lock_flags(&rw->lock, flags);
129140

130-
if (rw->counter != 0) {
131-
arch_spin_unlock(&rw->lock);
132-
local_irq_restore(flags);
133-
134-
while (rw->counter != 0)
135-
cpu_relax();
136-
137-
goto retry;
138-
}
139-
140-
rw->counter = -1; /* mark as write-locked */
141-
mb();
141+
local_irq_save(flags);
142+
arch_spin_lock(&(rw->lock_mutex));
143+
rw->counter++;
144+
arch_spin_unlock(&(rw->lock_mutex));
142145
local_irq_restore(flags);
143146
}
144147

145-
static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
146-
{
147-
rw->counter = 0;
148-
arch_spin_unlock(&rw->lock);
149-
}
150-
151-
/* Note that we have to ensure interrupts are disabled in case we're
152-
* interrupted by some other code that wants to read_trylock() this lock */
153-
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
148+
static inline void arch_write_unlock(arch_rwlock_t *rw)
154149
{
155150
unsigned long flags;
156-
int result = 0;
157151

158152
local_irq_save(flags);
159-
if (arch_spin_trylock(&rw->lock)) {
160-
if (rw->counter == 0) {
161-
rw->counter = -1;
162-
result = 1;
163-
} else {
164-
/* Read-locked. Oh well. */
165-
arch_spin_unlock(&rw->lock);
166-
}
167-
}
153+
arch_spin_lock(&(rw->lock_mutex));
154+
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
155+
arch_spin_unlock(&(rw->lock_mutex));
168156
local_irq_restore(flags);
169-
170-
return result;
171157
}
172158

173159
#endif /* __ASM_SPINLOCK_H */

arch/parisc/include/asm/spinlock_types.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,19 @@ typedef struct {
1212
#endif
1313
} arch_spinlock_t;
1414

15+
16+
/* counter:
17+
* Unlocked : 0x0100_0000
18+
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
19+
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
20+
*/
1521
typedef struct {
16-
arch_spinlock_t lock;
17-
volatile int counter;
22+
arch_spinlock_t lock_mutex;
23+
volatile unsigned int counter;
1824
} arch_rwlock_t;
1925

20-
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
26+
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
27+
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
28+
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
2129

2230
#endif

arch/parisc/kernel/alternative.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,22 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
2525
struct alt_instr *entry;
2626
int index = 0, applied = 0;
2727
int num_cpus = num_online_cpus();
28+
u32 cond_check;
29+
30+
cond_check = ALT_COND_ALWAYS |
31+
((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
32+
((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) |
33+
((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) |
34+
(running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) |
35+
((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) |
36+
/*
37+
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
38+
* set (bit #61, big endian), we have to flush and sync every
39+
* time IO-PDIR is changed in Ike/Astro.
40+
*/
41+
(((boot_cpu_data.cpu_type > pcxw_) &&
42+
((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0))
43+
? ALT_COND_NO_IOC_FDC : 0);
2844

2945
for (entry = start; entry < end; entry++, index++) {
3046

@@ -38,29 +54,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
3854

3955
WARN_ON(!cond);
4056

41-
if (cond != ALT_COND_ALWAYS && no_alternatives)
57+
if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives)
4258
continue;
4359

4460
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
4561
index, cond, len, from, replacement);
4662

47-
if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1))
48-
continue;
49-
if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
50-
continue;
51-
if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
52-
continue;
53-
if ((cond & ALT_COND_RUN_ON_QEMU) && !running_on_qemu)
54-
continue;
55-
56-
/*
57-
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
58-
* set (bit #61, big endian), we have to flush and sync every
59-
* time IO-PDIR is changed in Ike/Astro.
60-
*/
61-
if ((cond & ALT_COND_NO_IOC_FDC) &&
62-
((boot_cpu_data.cpu_type <= pcxw_) ||
63-
(boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
63+
/* Bounce out if none of the conditions are true. */
64+
if ((cond & cond_check) == 0)
6465
continue;
6566

6667
/* Want to replace pdtlb by a pdtlb,l instruction? */

arch/parisc/kernel/irq.c

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -560,33 +560,23 @@ void do_cpu_irq_mask(struct pt_regs *regs)
560560
goto out;
561561
}
562562

563-
static struct irqaction timer_action = {
564-
.handler = timer_interrupt,
565-
.name = "timer",
566-
.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
567-
};
568-
569-
#ifdef CONFIG_SMP
570-
static struct irqaction ipi_action = {
571-
.handler = ipi_interrupt,
572-
.name = "IPI",
573-
.flags = IRQF_PERCPU,
574-
};
575-
#endif
576-
577563
static void claim_cpu_irqs(void)
578564
{
565+
unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
579566
int i;
567+
580568
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
581569
irq_set_chip_and_handler(i, &cpu_interrupt_type,
582570
handle_percpu_irq);
583571
}
584572

585573
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
586-
setup_irq(TIMER_IRQ, &timer_action);
574+
if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
575+
pr_err("Failed to register timer interrupt\n");
587576
#ifdef CONFIG_SMP
588577
irq_set_handler(IPI_IRQ, handle_percpu_irq);
589-
setup_irq(IPI_IRQ, &ipi_action);
578+
if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
579+
pr_err("Failed to register IPI interrupt\n");
590580
#endif
591581
}
592582

arch/parisc/kernel/syscall.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -935,7 +935,7 @@ ENTRY(lws_table)
935935
END(lws_table)
936936
/* End of lws table */
937937

938-
#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry
938+
#define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
939939
.align 8
940940
ENTRY(sys_call_table)
941941
.export sys_call_table,data

arch/parisc/kernel/syscalls/syscalltbl.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@ emit() {
1313
t_entry="$3"
1414

1515
while [ $t_nxt -lt $t_nr ]; do
16-
printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
16+
printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
1717
t_nxt=$((t_nxt+1))
1818
done
19-
printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
19+
printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
2020
}
2121

2222
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (

drivers/parisc/eisa.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
243243
return IRQ_HANDLED;
244244
}
245245

246-
static struct irqaction irq2_action = {
247-
.handler = dummy_irq2_handler,
248-
.name = "cascade",
249-
};
250-
251246
static void init_eisa_pic(void)
252247
{
253248
unsigned long flags;
@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
335330
}
336331

337332
/* Reserve IRQ2 */
338-
setup_irq(2, &irq2_action);
333+
if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
334+
pr_err("Failed to request irq 2 (cascade)\n");
339335
for (i = 0; i < 16; i++) {
340336
irq_set_chip_and_handler(i, &eisa_interrupt_type,
341337
handle_simple_irq);

0 commit comments

Comments
 (0)