Skip to content

Commit 06e11b2

Browse files
xry111Kernel Patches Daemon
authored andcommitted
LoongArch: Fix calling smp_processor_id() in preemptible code
Fix the warning: BUG: using smp_processor_id() in preemptible [00000000] code: sys temd/1 caller is larch_insn_text_copy+0x40/0xf0 Simply changing it to raw_smp_processor_id() is not enough: if preempt and CPU hotplug happens after raw_smp_processor_id() but before stop_machine(), the CPU where raw_smp_processor_id() has run may be offline when stop_machine() and no CPU will run copy_to_kernel_nofault() in text_copy_cb(). Thus guard the larch_insn_text_copy() calls with cpus_read_lock() and change stop_machine() to stop_machine_cpuslocked() to prevent this. I've considered moving the locks inside larch_insn_text_copy() but doing so seems not an easy hack. In bpf_arch_text_poke() obviously the memcpy() call must be guarded by text_mutex, so we have to leave the acquire of text_mutex out of larch_insn_text_copy. But in the entire kernel the acquire of mutexes is always after cpus_read_lock(), so we cannot put cpus_read_lock() into larch_insn_text_copy() while leaving the text_mutex acquire out (or we risk a deadlock due to inconsistent lock acquire order). So let's fix the bug first and leave the posssible refactor as future work. Fixes: 9fbd18c ("LoongArch: BPF: Add dynamic code modification support") Signed-off-by: Xi Ruoyao <xry111@xry111.site>
1 parent 69a44ca commit 06e11b2

File tree

2 files changed

+14
-2
lines changed

2 files changed

+14
-2
lines changed

arch/loongarch/kernel/inst.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,14 +263,20 @@ int larch_insn_text_copy(void *dst, void *src, size_t len)
263263
.dst = dst,
264264
.src = src,
265265
.len = len,
266-
.cpu = smp_processor_id(),
266+
.cpu = raw_smp_processor_id(),
267267
};
268268

269+
/*
270+
* Ensure copy.cpu won't be hot removed before stop_machine. If
271+
* it's removed nobody will really update the text.
272+
*/
273+
lockdep_assert_cpus_held();
274+
269275
start = round_down((size_t)dst, PAGE_SIZE);
270276
end = round_up((size_t)dst + len, PAGE_SIZE);
271277

272278
set_memory_rw(start, (end - start) / PAGE_SIZE);
273-
ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
279+
ret = stop_machine_cpuslocked(text_copy_cb, &copy, cpu_online_mask);
274280
set_memory_rox(start, (end - start) / PAGE_SIZE);
275281

276282
return ret;

arch/loongarch/net/bpf_jit.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1379,9 +1379,11 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
13791379
{
13801380
int ret;
13811381

1382+
cpus_read_lock();
13821383
mutex_lock(&text_mutex);
13831384
ret = larch_insn_text_copy(dst, src, len);
13841385
mutex_unlock(&text_mutex);
1386+
cpus_read_unlock();
13851387

13861388
return ret ? ERR_PTR(-EINVAL) : dst;
13871389
}
@@ -1429,10 +1431,12 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
14291431
if (ret)
14301432
return ret;
14311433

1434+
cpus_read_lock();
14321435
mutex_lock(&text_mutex);
14331436
if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
14341437
ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
14351438
mutex_unlock(&text_mutex);
1439+
cpus_read_unlock();
14361440

14371441
return ret;
14381442
}
@@ -1450,10 +1454,12 @@ int bpf_arch_text_invalidate(void *dst, size_t len)
14501454
for (i = 0; i < (len / sizeof(u32)); i++)
14511455
inst[i] = INSN_BREAK;
14521456

1457+
cpus_read_lock();
14531458
mutex_lock(&text_mutex);
14541459
if (larch_insn_text_copy(dst, inst, len))
14551460
ret = -EINVAL;
14561461
mutex_unlock(&text_mutex);
1462+
cpus_read_unlock();
14571463

14581464
kvfree(inst);
14591465

0 commit comments

Comments
 (0)