Skip to content

Commit 1fb85d0

Browse files
ahunter6Peter Zijlstra
authored andcommitted
x86: Share definition of __is_canonical_address()
Reduce code duplication by moving canonical address code to a common header file. Signed-off-by: Adrian Hunter <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent c243cec commit 1fb85d0

File tree

6 files changed

+17
-27
lines changed

6 files changed

+17
-27
lines changed

arch/x86/events/intel/pt.c

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1350,28 +1350,18 @@ static void pt_addr_filters_fini(struct perf_event *event)
13501350
}
13511351

13521352
#ifdef CONFIG_X86_64
1353-
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
1354-
{
1355-
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
1356-
}
1357-
1358-
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
1359-
{
1360-
return canonical_address(vaddr, vaddr_bits) == vaddr;
1361-
}
1362-
13631353
/* Clamp to a canonical address greater-than-or-equal-to the address given */
13641354
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
13651355
{
1366-
return is_canonical_address(vaddr, vaddr_bits) ?
1356+
return __is_canonical_address(vaddr, vaddr_bits) ?
13671357
vaddr :
13681358
-BIT_ULL(vaddr_bits - 1);
13691359
}
13701360

13711361
/* Clamp to a canonical address less-than-or-equal-to the address given */
13721362
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
13731363
{
1374-
return is_canonical_address(vaddr, vaddr_bits) ?
1364+
return __is_canonical_address(vaddr, vaddr_bits) ?
13751365
vaddr :
13761366
BIT_ULL(vaddr_bits - 1) - 1;
13771367
}

arch/x86/include/asm/page.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
7171
extern bool __virt_addr_valid(unsigned long kaddr);
7272
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
7373

74+
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
75+
{
76+
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
77+
}
78+
79+
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
80+
{
81+
return __canonical_address(vaddr, vaddr_bits) == vaddr;
82+
}
83+
7484
#endif /* __ASSEMBLY__ */
7585

7686
#include <asm-generic/memory_model.h>

arch/x86/kvm/emulate.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
665665
static inline bool emul_is_noncanonical_address(u64 la,
666666
struct x86_emulate_ctxt *ctxt)
667667
{
668-
return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
668+
return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
669669
}
670670

671671
/*
@@ -715,7 +715,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
715715
case X86EMUL_MODE_PROT64:
716716
*linear = la;
717717
va_bits = ctxt_virt_addr_bits(ctxt);
718-
if (get_canonical(la, va_bits) != la)
718+
if (!__is_canonical_address(la, va_bits))
719719
goto bad;
720720

721721
*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);

arch/x86/kvm/x86.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1735,7 +1735,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
17351735
* value, and that something deterministic happens if the guest
17361736
* invokes 64-bit SYSENTER.
17371737
*/
1738-
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
1738+
data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
17391739
break;
17401740
case MSR_TSC_AUX:
17411741
if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))

arch/x86/kvm/x86.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -211,14 +211,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
211211
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
212212
}
213213

214-
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
215-
{
216-
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
217-
}
218-
219214
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
220215
{
221-
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
216+
return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
222217
}
223218

224219
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,

arch/x86/mm/maccess.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,6 @@
44
#include <linux/kernel.h>
55

66
#ifdef CONFIG_X86_64
7-
static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
8-
{
9-
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
10-
}
11-
127
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
138
{
149
unsigned long vaddr = (unsigned long)unsafe_src;
@@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
1914
* we also need to include the userspace guard page.
2015
*/
2116
return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
22-
canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
17+
__is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
2318
}
2419
#else
2520
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)

0 commit comments

Comments
 (0)