Skip to content

Commit a9815a4

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of x86 fixes and functional updates: - Prevent stale huge I/O TLB mappings on 32bit. A long standing bug which got exposed by KPTI support for 32bit - Prevent bogus access_ok() warnings in arch_stack_walk_user() - Add display quirks for Lenovo devices which have height and width swapped - Add the missing CR2 fixup for 32 bit async pagefaults. Fallout of the CR2 bug fix series. - Unbreak handling of force enabled HPET by moving the 'is HPET counting' check back to the original place. - A more accurate check for running on a hypervisor platform in the MDS mitigation code. Not perfect, but more accurate than the previous one. - Update a stale and confusing comment vs. IRQ stacks" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/speculation/mds: Apply more accurate check on hypervisor platform x86/hpet: Undo the early counter is counting check x86/entry/32: Pass cr2 to do_async_page_fault() x86/irq/64: Update stale comment x86/sysfb_efi: Add quirks for some devices with swapped width and height x86/stacktrace: Prevent access_ok() warnings in arch_stack_walk_user() mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy() x86/mm: Sync also unmappings in vmalloc_sync_all() x86/mm: Check for pfn instead of page in vmalloc_sync_one()
2 parents e24ce84 + 517c3ba commit a9815a4

File tree

8 files changed

+84
-23
lines changed

8 files changed

+84
-23
lines changed

arch/x86/entry/entry_32.S

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1443,15 +1443,20 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
14431443

14441444
ENTRY(page_fault)
14451445
ASM_CLAC
1446-
pushl $0; /* %gs's slot on the stack */
1446+
pushl $do_page_fault
1447+
jmp common_exception_read_cr2
1448+
END(page_fault)
14471449

1450+
common_exception_read_cr2:
1451+
/* the function address is in %gs's slot on the stack */
14481452
SAVE_ALL switch_stacks=1 skip_gs=1
14491453

14501454
ENCODE_FRAME_POINTER
14511455
UNWIND_ESPFIX_STACK
14521456

14531457
/* fixup %gs */
14541458
GS_TO_REG %ecx
1459+
movl PT_GS(%esp), %edi
14551460
REG_TO_PTGS %ecx
14561461
SET_KERNEL_GS %ecx
14571462

@@ -1463,9 +1468,9 @@ ENTRY(page_fault)
14631468

14641469
TRACE_IRQS_OFF
14651470
movl %esp, %eax # pt_regs pointer
1466-
call do_page_fault
1471+
CALL_NOSPEC %edi
14671472
jmp ret_from_exception
1468-
END(page_fault)
1473+
END(common_exception_read_cr2)
14691474

14701475
common_exception:
14711476
/* the function address is in %gs's slot on the stack */
@@ -1595,7 +1600,7 @@ END(general_protection)
15951600
ENTRY(async_page_fault)
15961601
ASM_CLAC
15971602
pushl $do_async_page_fault
1598-
jmp common_exception
1603+
jmp common_exception_read_cr2
15991604
END(async_page_fault)
16001605
#endif
16011606

arch/x86/kernel/cpu/bugs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf)
12261226

12271227
static ssize_t mds_show_state(char *buf)
12281228
{
1229-
if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1229+
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
12301230
return sprintf(buf, "%s; SMT Host state unknown\n",
12311231
mds_strings[mds_mitigation]);
12321232
}

arch/x86/kernel/head_64.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64)
193193

194194
/* Set up %gs.
195195
*
196-
* The base of %gs always points to the bottom of the irqstack
197-
* union. If the stack protector canary is enabled, it is
198-
* located at %gs:40. Note that, on SMP, the boot cpu uses
199-
* init data section till per cpu areas are set up.
196+
* The base of %gs always points to fixed_percpu_data. If the
197+
* stack protector canary is enabled, it is located at %gs:40.
198+
* Note that, on SMP, the boot cpu uses init data section until
199+
* the per cpu areas are set up.
200200
*/
201201
movl $MSR_GS_BASE,%ecx
202202
movl initial_gs(%rip),%eax

arch/x86/kernel/hpet.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -827,10 +827,6 @@ int __init hpet_enable(void)
827827
if (!hpet_cfg_working())
828828
goto out_nohpet;
829829

830-
/* Validate that the counter is counting */
831-
if (!hpet_counting())
832-
goto out_nohpet;
833-
834830
/*
835831
* Read the period and check for a sane value:
836832
*/
@@ -896,6 +892,14 @@ int __init hpet_enable(void)
896892
}
897893
hpet_print_config();
898894

895+
/*
896+
* Validate that the counter is counting. This needs to be done
897+
* after sanitizing the config registers to properly deal with
898+
* force enabled HPETs.
899+
*/
900+
if (!hpet_counting())
901+
goto out_nohpet;
902+
899903
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
900904

901905
if (id & HPET_ID_LEGSUP) {

arch/x86/kernel/stacktrace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
100100
{
101101
int ret;
102102

103-
if (!access_ok(fp, sizeof(*frame)))
103+
if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
104104
return 0;
105105

106106
ret = 1;

arch/x86/kernel/sysfb_efi.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
230230
{},
231231
};
232232

233+
/*
234+
* Some devices have a portrait LCD but advertise a landscape resolution (and
235+
* pitch). We simply swap width and height for these devices so that we can
236+
* correctly deal with some of them coming with multiple resolutions.
237+
*/
238+
static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
239+
{
240+
/*
241+
* Lenovo MIIX310-10ICR, only some batches have the troublesome
242+
* 800x1280 portrait screen. Luckily the portrait version has
243+
* its own BIOS version, so we match on that.
244+
*/
245+
.matches = {
246+
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
247+
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
248+
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
249+
},
250+
},
251+
{
252+
/* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
253+
.matches = {
254+
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
255+
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
256+
"Lenovo MIIX 320-10ICR"),
257+
},
258+
},
259+
{
260+
/* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
261+
.matches = {
262+
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
263+
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
264+
"Lenovo ideapad D330-10IGM"),
265+
},
266+
},
267+
{},
268+
};
269+
233270
__init void sysfb_apply_efi_quirks(void)
234271
{
235272
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
236273
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
237274
dmi_check_system(efifb_dmi_system_table);
275+
276+
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
277+
dmi_check_system(efifb_dmi_swap_width_height)) {
278+
u16 temp = screen_info.lfb_width;
279+
280+
screen_info.lfb_width = screen_info.lfb_height;
281+
screen_info.lfb_height = temp;
282+
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
283+
}
238284
}

arch/x86/mm/fault.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
177177

178178
pmd = pmd_offset(pud, address);
179179
pmd_k = pmd_offset(pud_k, address);
180-
if (!pmd_present(*pmd_k))
181-
return NULL;
182180

183-
if (!pmd_present(*pmd))
181+
if (pmd_present(*pmd) != pmd_present(*pmd_k))
184182
set_pmd(pmd, *pmd_k);
183+
184+
if (!pmd_present(*pmd_k))
185+
return NULL;
185186
else
186-
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
187+
BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
187188

188189
return pmd_k;
189190
}
@@ -203,17 +204,13 @@ void vmalloc_sync_all(void)
203204
spin_lock(&pgd_lock);
204205
list_for_each_entry(page, &pgd_list, lru) {
205206
spinlock_t *pgt_lock;
206-
pmd_t *ret;
207207

208208
/* the pgt_lock only for Xen */
209209
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
210210

211211
spin_lock(pgt_lock);
212-
ret = vmalloc_sync_one(page_address(page), address);
212+
vmalloc_sync_one(page_address(page), address);
213213
spin_unlock(pgt_lock);
214-
215-
if (!ret)
216-
break;
217214
}
218215
spin_unlock(&pgd_lock);
219216
}

mm/vmalloc.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1258,6 +1258,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
12581258
if (unlikely(valist == NULL))
12591259
return false;
12601260

1261+
/*
1262+
* First make sure the mappings are removed from all page-tables
1263+
* before they are freed.
1264+
*/
1265+
vmalloc_sync_all();
1266+
12611267
/*
12621268
* TODO: to calculate a flush range without looping.
12631269
* The list can be up to lazy_max_pages() elements.
@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
30383044
/*
30393045
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
30403046
* have one.
3047+
*
3048+
* The purpose of this function is to make sure the vmalloc area
3049+
* mappings are identical in all page-tables in the system.
30413050
*/
30423051
void __weak vmalloc_sync_all(void)
30433052
{

0 commit comments

Comments
 (0)