Skip to content

Commit 260d165

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Martin Schwidefsky: "The second patch set for the 4.14 merge window: - Convert the dasd device driver to the blk-mq interface. - Provide three zcrypt interfaces for vfio_ap. These will be required for KVM guest access to the crypto cards attached via the AP bus. - A couple of memory management bug fixes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/dasd: blk-mq conversion s390/mm: use a single lock for the fields in mm_context_t s390/mm: fix race on mm->context.flush_mm s390/mm: fix local TLB flushing vs. detach of an mm address space s390/zcrypt: externalize AP queue interrupt control s390/zcrypt: externalize AP config info query s390/zcrypt: externalize test AP queue s390/mm: use VM_BUG_ON in crst_table_[upgrade|downgrade]
2 parents c971aa3 + e443343 commit 260d165

File tree

13 files changed

+416
-274
lines changed

13 files changed

+416
-274
lines changed

arch/s390/include/asm/ap.h

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
/*
2+
* Adjunct processor (AP) interfaces
3+
*
4+
* Copyright IBM Corp. 2017
5+
*
6+
* This program is free software; you can redistribute it and/or modify
7+
* it under the terms of the GNU General Public License (version 2 only)
8+
* as published by the Free Software Foundation.
9+
*
10+
* Author(s): Tony Krowiak <[email protected]>
11+
* Martin Schwidefsky <[email protected]>
12+
* Harald Freudenberger <[email protected]>
13+
*/
14+
15+
#ifndef _ASM_S390_AP_H_
16+
#define _ASM_S390_AP_H_
17+
18+
/**
19+
* The ap_qid_t identifier of an ap queue.
20+
* If the AP facilities test (APFT) facility is available,
21+
* card and queue index are 8 bit values, otherwise
22+
* card index is 6 bit and queue index a 4 bit value.
23+
*/
24+
typedef unsigned int ap_qid_t;
25+
26+
#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
27+
#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
28+
#define AP_QID_QUEUE(_qid) ((_qid) & 255)
29+
30+
/**
31+
* struct ap_queue_status - Holds the AP queue status.
32+
* @queue_empty: Shows if queue is empty
33+
* @replies_waiting: Waiting replies
34+
* @queue_full: Is 1 if the queue is full
35+
* @irq_enabled: Shows if interrupts are enabled for the AP
36+
* @response_code: Holds the 8 bit response code
37+
*
38+
* The ap queue status word is returned by all three AP functions
39+
* (PQAP, NQAP and DQAP). There's a set of flags in the first
40+
* byte, followed by a 1 byte response code.
41+
*/
42+
struct ap_queue_status {
43+
unsigned int queue_empty : 1;
44+
unsigned int replies_waiting : 1;
45+
unsigned int queue_full : 1;
46+
unsigned int _pad1 : 4;
47+
unsigned int irq_enabled : 1;
48+
unsigned int response_code : 8;
49+
unsigned int _pad2 : 16;
50+
};
51+
52+
/**
53+
* ap_test_queue(): Test adjunct processor queue.
54+
* @qid: The AP queue number
55+
* @tbit: Test facilities bit
56+
* @info: Pointer to queue descriptor
57+
*
58+
* Returns AP queue status structure.
59+
*/
60+
struct ap_queue_status ap_test_queue(ap_qid_t qid,
61+
int tbit,
62+
unsigned long *info);
63+
64+
struct ap_config_info {
65+
unsigned int apsc : 1; /* S bit */
66+
unsigned int apxa : 1; /* N bit */
67+
unsigned int qact : 1; /* C bit */
68+
unsigned int rc8a : 1; /* R bit */
69+
unsigned char _reserved1 : 4;
70+
unsigned char _reserved2[3];
71+
unsigned char Na; /* max # of APs - 1 */
72+
unsigned char Nd; /* max # of Domains - 1 */
73+
unsigned char _reserved3[10];
74+
unsigned int apm[8]; /* AP ID mask */
75+
unsigned int aqm[8]; /* AP queue mask */
76+
unsigned int adm[8]; /* AP domain mask */
77+
unsigned char _reserved4[16];
78+
} __aligned(8);
79+
80+
/*
81+
* ap_query_configuration(): Fetch cryptographic config info
82+
*
83+
* Returns the ap configuration info fetched via PQAP(QCI).
84+
* On success 0 is returned, on failure a negative errno
85+
* is returned, e.g. if the PQAP(QCI) instruction is not
86+
* available, the return value will be -EOPNOTSUPP.
87+
*/
88+
int ap_query_configuration(struct ap_config_info *info);
89+
90+
/*
91+
* struct ap_qirq_ctrl - convenient struct for easy invocation
92+
* of the ap_queue_irq_ctrl() function. This struct is passed
93+
* as GR1 parameter to the PQAP(AQIC) instruction. For details
94+
* please see the AR documentation.
95+
*/
96+
struct ap_qirq_ctrl {
97+
unsigned int _res1 : 8;
98+
unsigned int zone : 8; /* zone info */
99+
unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
100+
unsigned int _res2 : 4;
101+
unsigned int gisc : 3; /* guest isc field */
102+
unsigned int _res3 : 6;
103+
unsigned int gf : 2; /* gisa format */
104+
unsigned int _res4 : 1;
105+
unsigned int gisa : 27; /* gisa origin */
106+
unsigned int _res5 : 1;
107+
unsigned int isc : 3; /* irq sub class */
108+
};
109+
110+
/**
111+
* ap_queue_irq_ctrl(): Control interruption on a AP queue.
112+
* @qid: The AP queue number
113+
* @qirqctrl: struct ap_qirq_ctrl, see above
114+
* @ind: The notification indicator byte
115+
*
116+
* Returns AP queue status.
117+
*
118+
* Control interruption on the given AP queue.
119+
* Just a simple wrapper function for the low level PQAP(AQIC)
120+
* instruction available for other kernel modules.
121+
*/
122+
struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
123+
struct ap_qirq_ctrl qirqctrl,
124+
void *ind);
125+
126+
#endif /* _ASM_S390_AP_H_ */

arch/s390/include/asm/mmu.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,11 @@
55
#include <linux/errno.h>
66

77
typedef struct {
8+
spinlock_t lock;
89
cpumask_t cpu_attach_mask;
910
atomic_t flush_count;
1011
unsigned int flush_mm;
11-
spinlock_t pgtable_lock;
1212
struct list_head pgtable_list;
13-
spinlock_t gmap_lock;
1413
struct list_head gmap_list;
1514
unsigned long gmap_asce;
1615
unsigned long asce;
@@ -27,10 +26,8 @@ typedef struct {
2726
} mm_context_t;
2827

2928
#define INIT_MM_CONTEXT(name) \
30-
.context.pgtable_lock = \
31-
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
29+
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
3230
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
33-
.context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
3431
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
3532

3633
static inline int tprot(unsigned long addr)

arch/s390/include/asm/mmu_context.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@
1717
static inline int init_new_context(struct task_struct *tsk,
1818
struct mm_struct *mm)
1919
{
20-
spin_lock_init(&mm->context.pgtable_lock);
20+
spin_lock_init(&mm->context.lock);
2121
INIT_LIST_HEAD(&mm->context.pgtable_list);
22-
spin_lock_init(&mm->context.gmap_lock);
2322
INIT_LIST_HEAD(&mm->context.gmap_list);
2423
cpumask_clear(&mm->context.cpu_attach_mask);
2524
atomic_set(&mm->context.flush_count, 0);
@@ -103,7 +102,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103102
if (prev == next)
104103
return;
105104
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
106-
cpumask_set_cpu(cpu, mm_cpumask(next));
107105
/* Clear old ASCE by loading the kernel ASCE. */
108106
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
109107
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
@@ -121,9 +119,8 @@ static inline void finish_arch_post_lock_switch(void)
121119
preempt_disable();
122120
while (atomic_read(&mm->context.flush_count))
123121
cpu_relax();
124-
125-
if (mm->context.flush_mm)
126-
__tlb_flush_mm(mm);
122+
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
123+
__tlb_flush_mm_lazy(mm);
127124
preempt_enable();
128125
}
129126
set_fs(current->thread.mm_segment);
@@ -136,6 +133,7 @@ static inline void activate_mm(struct mm_struct *prev,
136133
struct mm_struct *next)
137134
{
138135
switch_mm(prev, next, current);
136+
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
139137
set_user_asce(next);
140138
}
141139

arch/s390/include/asm/tlbflush.h

Lines changed: 8 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
4848
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
4949
* this implicates multiple ASCEs!).
5050
*/
51-
static inline void __tlb_flush_full(struct mm_struct *mm)
52-
{
53-
preempt_disable();
54-
atomic_inc(&mm->context.flush_count);
55-
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
56-
/* Local TLB flush */
57-
__tlb_flush_local();
58-
} else {
59-
/* Global TLB flush */
60-
__tlb_flush_global();
61-
/* Reset TLB flush mask */
62-
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
63-
}
64-
atomic_dec(&mm->context.flush_count);
65-
preempt_enable();
66-
}
67-
6851
static inline void __tlb_flush_mm(struct mm_struct *mm)
6952
{
7053
unsigned long gmap_asce;
@@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
7659
*/
7760
preempt_disable();
7861
atomic_inc(&mm->context.flush_count);
62+
/* Reset TLB flush mask */
63+
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
64+
barrier();
7965
gmap_asce = READ_ONCE(mm->context.gmap_asce);
8066
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
8167
if (gmap_asce)
8268
__tlb_flush_idte(gmap_asce);
8369
__tlb_flush_idte(mm->context.asce);
8470
} else {
85-
__tlb_flush_full(mm);
71+
/* Global TLB flush */
72+
__tlb_flush_global();
8673
}
87-
/* Reset TLB flush mask */
88-
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
8974
atomic_dec(&mm->context.flush_count);
9075
preempt_enable();
9176
}
@@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
9984
}
10085
#else
10186
#define __tlb_flush_global() __tlb_flush_local()
102-
#define __tlb_flush_full(mm) __tlb_flush_local()
10387

10488
/*
10589
* Flush TLB entries for a specific ASCE on all CPUs.
@@ -117,10 +101,12 @@ static inline void __tlb_flush_kernel(void)
117101

118102
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
119103
{
104+
spin_lock(&mm->context.lock);
120105
if (mm->context.flush_mm) {
121-
__tlb_flush_mm(mm);
122106
mm->context.flush_mm = 0;
107+
__tlb_flush_mm(mm);
123108
}
109+
spin_unlock(&mm->context.lock);
124110
}
125111

126112
/*

arch/s390/mm/gmap.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
100100
if (!gmap)
101101
return NULL;
102102
gmap->mm = mm;
103-
spin_lock(&mm->context.gmap_lock);
103+
spin_lock(&mm->context.lock);
104104
list_add_rcu(&gmap->list, &mm->context.gmap_list);
105105
if (list_is_singular(&mm->context.gmap_list))
106106
gmap_asce = gmap->asce;
107107
else
108108
gmap_asce = -1UL;
109109
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
110-
spin_unlock(&mm->context.gmap_lock);
110+
spin_unlock(&mm->context.lock);
111111
return gmap;
112112
}
113113
EXPORT_SYMBOL_GPL(gmap_create);
@@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap)
248248
spin_unlock(&gmap->shadow_lock);
249249
}
250250
/* Remove gmap from the pre-mm list */
251-
spin_lock(&gmap->mm->context.gmap_lock);
251+
spin_lock(&gmap->mm->context.lock);
252252
list_del_rcu(&gmap->list);
253253
if (list_empty(&gmap->mm->context.gmap_list))
254254
gmap_asce = 0;
@@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap)
258258
else
259259
gmap_asce = -1UL;
260260
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
261-
spin_unlock(&gmap->mm->context.gmap_lock);
261+
spin_unlock(&gmap->mm->context.lock);
262262
synchronize_rcu();
263263
/* Put reference */
264264
gmap_put(gmap);

arch/s390/mm/pgalloc.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
8383
int rc, notify;
8484

8585
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86-
BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
86+
VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
8787
if (end >= TASK_SIZE_MAX)
8888
return -ENOMEM;
8989
rc = 0;
@@ -124,7 +124,7 @@ void crst_table_downgrade(struct mm_struct *mm)
124124
pgd_t *pgd;
125125

126126
/* downgrade should only happen from 3 to 2 levels (compat only) */
127-
BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
127+
VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
128128

129129
if (current->active_mm == mm) {
130130
clear_user_asce();
@@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
188188
/* Try to get a fragment of a 4K page as a 2K page table */
189189
if (!mm_alloc_pgste(mm)) {
190190
table = NULL;
191-
spin_lock_bh(&mm->context.pgtable_lock);
191+
spin_lock_bh(&mm->context.lock);
192192
if (!list_empty(&mm->context.pgtable_list)) {
193193
page = list_first_entry(&mm->context.pgtable_list,
194194
struct page, lru);
@@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
203203
list_del(&page->lru);
204204
}
205205
}
206-
spin_unlock_bh(&mm->context.pgtable_lock);
206+
spin_unlock_bh(&mm->context.lock);
207207
if (table)
208208
return table;
209209
}
@@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
227227
/* Return the first 2K fragment of the page */
228228
atomic_set(&page->_mapcount, 1);
229229
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
230-
spin_lock_bh(&mm->context.pgtable_lock);
230+
spin_lock_bh(&mm->context.lock);
231231
list_add(&page->lru, &mm->context.pgtable_list);
232-
spin_unlock_bh(&mm->context.pgtable_lock);
232+
spin_unlock_bh(&mm->context.lock);
233233
}
234234
return table;
235235
}
@@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
243243
if (!mm_alloc_pgste(mm)) {
244244
/* Free 2K page table fragment of a 4K page */
245245
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
246-
spin_lock_bh(&mm->context.pgtable_lock);
246+
spin_lock_bh(&mm->context.lock);
247247
mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
248248
if (mask & 3)
249249
list_add(&page->lru, &mm->context.pgtable_list);
250250
else
251251
list_del(&page->lru);
252-
spin_unlock_bh(&mm->context.pgtable_lock);
252+
spin_unlock_bh(&mm->context.lock);
253253
if (mask != 0)
254254
return;
255255
}
@@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
275275
return;
276276
}
277277
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
278-
spin_lock_bh(&mm->context.pgtable_lock);
278+
spin_lock_bh(&mm->context.lock);
279279
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
280280
if (mask & 3)
281281
list_add_tail(&page->lru, &mm->context.pgtable_list);
282282
else
283283
list_del(&page->lru);
284-
spin_unlock_bh(&mm->context.pgtable_lock);
284+
spin_unlock_bh(&mm->context.lock);
285285
table = (unsigned long *) (__pa(table) | (1U << bit));
286286
tlb_remove_table(tlb, table);
287287
}

0 commit comments

Comments
 (0)