@@ -35,7 +35,7 @@ config HOTPLUG_SMT
35
35
bool
36
36
37
37
config GENERIC_ENTRY
38
- bool
38
+ bool
39
39
40
40
config KPROBES
41
41
bool "Kprobes"
@@ -55,26 +55,26 @@ config JUMP_LABEL
55
55
depends on HAVE_ARCH_JUMP_LABEL
56
56
select OBJTOOL if HAVE_JUMP_LABEL_HACK
57
57
help
58
- This option enables a transparent branch optimization that
59
- makes certain almost-always-true or almost-always-false branch
60
- conditions even cheaper to execute within the kernel.
58
+ This option enables a transparent branch optimization that
59
+ makes certain almost-always-true or almost-always-false branch
60
+ conditions even cheaper to execute within the kernel.
61
61
62
- Certain performance-sensitive kernel code, such as trace points,
63
- scheduler functionality, networking code and KVM have such
64
- branches and include support for this optimization technique.
62
+ Certain performance-sensitive kernel code, such as trace points,
63
+ scheduler functionality, networking code and KVM have such
64
+ branches and include support for this optimization technique.
65
65
66
- If it is detected that the compiler has support for "asm goto",
67
- the kernel will compile such branches with just a nop
68
- instruction. When the condition flag is toggled to true, the
69
- nop will be converted to a jump instruction to execute the
70
- conditional block of instructions.
66
+ If it is detected that the compiler has support for "asm goto",
67
+ the kernel will compile such branches with just a nop
68
+ instruction. When the condition flag is toggled to true, the
69
+ nop will be converted to a jump instruction to execute the
70
+ conditional block of instructions.
71
71
72
- This technique lowers overhead and stress on the branch prediction
73
- of the processor and generally makes the kernel faster. The update
74
- of the condition is slower, but those are always very rare.
72
+ This technique lowers overhead and stress on the branch prediction
73
+ of the processor and generally makes the kernel faster. The update
74
+ of the condition is slower, but those are always very rare.
75
75
76
- ( On 32-bit x86, the necessary options added to the compiler
77
- flags may increase the size of the kernel slightly. )
76
+ ( On 32-bit x86, the necessary options added to the compiler
77
+ flags may increase the size of the kernel slightly. )
78
78
79
79
config STATIC_KEYS_SELFTEST
80
80
bool "Static key selftest"
@@ -98,9 +98,9 @@ config KPROBES_ON_FTRACE
98
98
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
99
99
depends on DYNAMIC_FTRACE_WITH_REGS
100
100
help
101
- If function tracer is enabled and the arch supports full
102
- passing of pt_regs to function tracing, then kprobes can
103
- optimize on top of function tracing.
101
+ If function tracer is enabled and the arch supports full
102
+ passing of pt_regs to function tracing, then kprobes can
103
+ optimize on top of function tracing.
104
104
105
105
config UPROBES
106
106
def_bool n
@@ -154,21 +154,21 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
154
154
config ARCH_USE_BUILTIN_BSWAP
155
155
bool
156
156
help
157
- Modern versions of GCC (since 4.4) have builtin functions
158
- for handling byte-swapping. Using these, instead of the old
159
- inline assembler that the architecture code provides in the
160
- __arch_bswapXX() macros, allows the compiler to see what's
161
- happening and offers more opportunity for optimisation. In
162
- particular, the compiler will be able to combine the byteswap
163
- with a nearby load or store and use load-and-swap or
164
- store-and-swap instructions if the architecture has them. It
165
- should almost *never* result in code which is worse than the
166
- hand-coded assembler in <asm/swab.h>. But just in case it
167
- does, the use of the builtins is optional.
157
+ Modern versions of GCC (since 4.4) have builtin functions
158
+ for handling byte-swapping. Using these, instead of the old
159
+ inline assembler that the architecture code provides in the
160
+ __arch_bswapXX() macros, allows the compiler to see what's
161
+ happening and offers more opportunity for optimisation. In
162
+ particular, the compiler will be able to combine the byteswap
163
+ with a nearby load or store and use load-and-swap or
164
+ store-and-swap instructions if the architecture has them. It
165
+ should almost *never* result in code which is worse than the
166
+ hand-coded assembler in <asm/swab.h>. But just in case it
167
+ does, the use of the builtins is optional.
168
168
169
- Any architecture with load-and-swap or store-and-swap
170
- instructions should set this. And it shouldn't hurt to set it
171
- on architectures that don't have such instructions.
169
+ Any architecture with load-and-swap or store-and-swap
170
+ instructions should set this. And it shouldn't hurt to set it
171
+ on architectures that don't have such instructions.
172
172
173
173
config KRETPROBES
174
174
def_bool y
@@ -720,13 +720,13 @@ config LTO_CLANG_FULL
720
720
depends on !COMPILE_TEST
721
721
select LTO_CLANG
722
722
help
723
- This option enables Clang's full Link Time Optimization (LTO), which
724
- allows the compiler to optimize the kernel globally. If you enable
725
- this option, the compiler generates LLVM bitcode instead of ELF
726
- object files, and the actual compilation from bitcode happens at
727
- the LTO link step, which may take several minutes depending on the
728
- kernel configuration. More information can be found from LLVM's
729
- documentation:
723
+ This option enables Clang's full Link Time Optimization (LTO), which
724
+ allows the compiler to optimize the kernel globally. If you enable
725
+ this option, the compiler generates LLVM bitcode instead of ELF
726
+ object files, and the actual compilation from bitcode happens at
727
+ the LTO link step, which may take several minutes depending on the
728
+ kernel configuration. More information can be found from LLVM's
729
+ documentation:
730
730
731
731
https://llvm.org/docs/LinkTimeOptimization.html
732
732
@@ -1330,9 +1330,9 @@ config ARCH_HAS_CC_PLATFORM
1330
1330
bool
1331
1331
1332
1332
config HAVE_SPARSE_SYSCALL_NR
1333
- bool
1334
- help
1335
- An architecture should select this if its syscall numbering is sparse
1333
+ bool
1334
+ help
1335
+ An architecture should select this if its syscall numbering is sparse
1336
1336
to save space. For example, MIPS architecture has a syscall array with
1337
1337
entries at 4000, 5000 and 6000 locations. This option turns on syscall
1338
1338
related optimizations for a given architecture.
@@ -1356,35 +1356,35 @@ config HAVE_PREEMPT_DYNAMIC_CALL
1356
1356
depends on HAVE_STATIC_CALL
1357
1357
select HAVE_PREEMPT_DYNAMIC
1358
1358
help
1359
- An architecture should select this if it can handle the preemption
1360
- model being selected at boot time using static calls.
1359
+ An architecture should select this if it can handle the preemption
1360
+ model being selected at boot time using static calls.
1361
1361
1362
- Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
1363
- preemption function will be patched directly.
1362
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
1363
+ preemption function will be patched directly.
1364
1364
1365
- Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
1366
- call to a preemption function will go through a trampoline, and the
1367
- trampoline will be patched.
1365
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
1366
+ call to a preemption function will go through a trampoline, and the
1367
+ trampoline will be patched.
1368
1368
1369
- It is strongly advised to support inline static call to avoid any
1370
- overhead.
1369
+ It is strongly advised to support inline static call to avoid any
1370
+ overhead.
1371
1371
1372
1372
config HAVE_PREEMPT_DYNAMIC_KEY
1373
1373
bool
1374
1374
depends on HAVE_ARCH_JUMP_LABEL
1375
1375
select HAVE_PREEMPT_DYNAMIC
1376
1376
help
1377
- An architecture should select this if it can handle the preemption
1378
- model being selected at boot time using static keys.
1377
+ An architecture should select this if it can handle the preemption
1378
+ model being selected at boot time using static keys.
1379
1379
1380
- Each preemption function will be given an early return based on a
1381
- static key. This should have slightly lower overhead than non-inline
1382
- static calls, as this effectively inlines each trampoline into the
1383
- start of its callee. This may avoid redundant work, and may
1384
- integrate better with CFI schemes.
1380
+ Each preemption function will be given an early return based on a
1381
+ static key. This should have slightly lower overhead than non-inline
1382
+ static calls, as this effectively inlines each trampoline into the
1383
+ start of its callee. This may avoid redundant work, and may
1384
+ integrate better with CFI schemes.
1385
1385
1386
- This will have greater overhead than using inline static calls as
1387
- the call to the preemption function cannot be entirely elided.
1386
+ This will have greater overhead than using inline static calls as
1387
+ the call to the preemption function cannot be entirely elided.
1388
1388
1389
1389
config ARCH_WANT_LD_ORPHAN_WARN
1390
1390
bool
@@ -1407,8 +1407,8 @@ config ARCH_SUPPORTS_PAGE_TABLE_CHECK
1407
1407
config ARCH_SPLIT_ARG64
1408
1408
bool
1409
1409
help
1410
- If a 32-bit architecture requires 64-bit arguments to be split into
1411
- pairs of 32-bit arguments, select this option.
1410
+ If a 32-bit architecture requires 64-bit arguments to be split into
1411
+ pairs of 32-bit arguments, select this option.
1412
1412
1413
1413
config ARCH_HAS_ELFCORE_COMPAT
1414
1414
bool
0 commit comments