Skip to content

Commit 658ac53

Browse files
committed
x86 asm: move rotation and bit instructoins in from x86-assembly-cheat
1 parent 89084d2 commit 658ac53

File tree

10 files changed

+313
-0
lines changed

10 files changed

+313
-0
lines changed

README.adoc

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12378,6 +12378,63 @@ Bibliography:
1237812378
* link:userland/arch/x86_64/or.S[OR]
1237912379
* link:userland/arch/x86_64/xor.S[XOR]
1238012380

12381+
=== x86 shift and rotate instructions
12382+
12383+
<<intel-manual-1>> 5.1.5 "Shift and Rotate Instructions"
12384+
12385+
* link:userland/arch/x86_64/shl.S[SHL and SHR]
12386+
+
12387+
SHift left or Right and insert 0.
12388+
+
12389+
CF == the bit that got shifted out.
12390+
+
12391+
Application: quick unsigned multiply and divide by powers of 2.
12392+
* link:userland/arch/x86_64/sal.S[SAL and SAR]
12393+
+
12394+
Application: signed multiply and divide by powers of 2.
12395+
+
12396+
Mnemonics: Shift Arithmetic Left and Right
12397+
+
12398+
Keeps the same sign on right shift.
12399+
+
12400+
Not directly exposed in C, for which signed shift is undetermined behavior, but does exist in Java via the `>>>` operator. C compilers can omit it however.
12401+
+
12402+
SHL and SAL are exactly the same and have the same encoding: https://stackoverflow.com/questions/8373415/difference-between-shl-and-sal-in-80x86/56621271#56621271
12403+
* link:userland/arch/x86_64/rol.S[ROL and ROR]
12404+
+
12405+
Rotates the bit that is going out around to the other side.
12406+
* link:userland/arch/x86_64/rol.S[RCL and RCR]
12407+
+
12408+
Like ROL and ROR, but insert the carry bit instead, which effectively generates a rotation of 8 + 1 bits. TODO application.
12409+
12410+
=== x86 bit and byte instructions
12411+
12412+
<<intel-manual-1>> 5.1.6 "Bit and Byte Instructions"
12413+
12414+
* link:userland/arch/x86_64/bt.S[BT]
12415+
+
12416+
Bit test: test if the Nth bit a bit of a register is set and store the result in the CF FLAG.
12417+
+
12418+
....
12419+
CF = reg[N]
12420+
....
12421+
* link:userland/arch/x86_64/btr.S[BTR]
12422+
+
12423+
Do a BT and then set the bit to 0.
12424+
* link:userland/arch/x86_64/btc.S[BTC]
12425+
+
12426+
Do a BT and then swap the value of the tested bit.
12427+
* link:userland/arch/x86_64/setcc.S[SETcc]
12428+
+
12429+
Set a a byte of a register to 0 or 1 depending on the cc condition.
12430+
* link:userland/arch/x86_64/test.S[TEST]
12431+
+
12432+
Like <<x86-binary-arithmetic-instructions,CMP>> but does AND instead of SUB:
12433+
+
12434+
....
12435+
ZF = (!(X && Y)) ? 1 : 0
12436+
....
12437+
1238112438
=== x86 control transfer instructions
1238212439

1238312440
<<intel-manual-1>> 5.1.7 "Control Transfer Instructions"

userland/arch/x86_64/bt.S

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
/* 0101 1010 */
7+
mov $0x5A, %r12
8+
9+
bt $0, %r12w
10+
LKMC_ASSERT(jnc)
11+
12+
bt $1, %r12w
13+
LKMC_ASSERT(jc)
14+
15+
bt $2, %r12w
16+
LKMC_ASSERT(jnc)
17+
18+
bt $3, %r12w
19+
LKMC_ASSERT(jc)
20+
21+
bt $4, %r12w
22+
LKMC_ASSERT(jc)
23+
24+
bt $5, %r12w
25+
LKMC_ASSERT(jnc)
26+
27+
bt $6, %r12w
28+
LKMC_ASSERT(jc)
29+
30+
bt $7, %r12w
31+
LKMC_ASSERT(jnc)
32+
33+
/* The register is unchanged. */
34+
LKMC_ASSERT_EQ(%r12, $0x5A)
35+
36+
#if 0
37+
/* There is no Byte decoding for bt:
38+
* Error: operand size mismatch for `bt'
39+
*/
40+
bt $0, %r12b
41+
#endif
42+
LKMC_EPILOGUE

userland/arch/x86_64/btc.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
/* 0101 1010 */
7+
mov $0x5A, %r12
8+
btc $0, %r12
9+
LKMC_ASSERT(jnc)
10+
LKMC_ASSERT_EQ(%r12, $0x5B)
11+
12+
/* 0101 1010 */
13+
btc $0, %r12
14+
LKMC_ASSERT(jc)
15+
LKMC_ASSERT_EQ(%r12, $0x5A)
16+
LKMC_EPILOGUE

userland/arch/x86_64/btr.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
/* 0101 1010 */
7+
mov $0x5A, %r12
8+
btr $1, %r12
9+
LKMC_ASSERT(jc)
10+
LKMC_ASSERT_EQ(%r12, $0x58)
11+
12+
/* 0101 1000 */
13+
btr $1, %r12
14+
LKMC_ASSERT(jnc)
15+
LKMC_ASSERT_EQ(%r12, $0x58)
16+
LKMC_EPILOGUE

userland/arch/x86_64/rcl.S

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
mov $0x81, %r12
7+
clc
8+
9+
rcl $1, %r12b
10+
/* We'll have to save and restore flags across our asserts!
11+
* 2x PUSHF to maintain 16-bit stack alignment.
12+
* https://github.com/cirosantilli/linux-kernel-module-cheat#x86_64-calling-convention
13+
*/
14+
pushf
15+
pushf
16+
LKMC_ASSERT(jc)
17+
LKMC_ASSERT_EQ(%r12, $2)
18+
19+
popf
20+
rcl $1, %r12b
21+
pushf
22+
LKMC_ASSERT(jnc)
23+
LKMC_ASSERT_EQ(%r12, $5)
24+
25+
popf
26+
rcr $2, %r12b
27+
pushf
28+
LKMC_ASSERT(jnc)
29+
LKMC_ASSERT_EQ(%r12, $0x81)
30+
31+
popf
32+
rcr $1, %r12b
33+
pushf
34+
LKMC_ASSERT(jc)
35+
LKMC_ASSERT_EQ(%r12, $0x40)
36+
37+
add $16, %rsp
38+
LKMC_EPILOGUE

userland/arch/x86_64/rol.S

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
mov $0x81, %r12
7+
8+
/* axl = 03h, CF = 1 */
9+
rol $1, %r12b
10+
LKMC_ASSERT(jc)
11+
LKMC_ASSERT_EQ(%r12, $3)
12+
13+
/* axl = 04h, CF = 0 */
14+
rol $1, %r12b
15+
LKMC_ASSERT(jnc)
16+
LKMC_ASSERT_EQ(%r12, $6)
17+
18+
/* axl = 03h, CF = 0 */
19+
ror $2, %r12b
20+
LKMC_ASSERT(jc)
21+
LKMC_ASSERT_EQ(%r12, $0x81)
22+
23+
/* axl = 81h, CF = 1 */
24+
ror $1, %r12b
25+
LKMC_ASSERT(jc)
26+
LKMC_ASSERT_EQ(%r12, $0x0C0)
27+
LKMC_EPILOGUE

userland/arch/x86_64/sal.S

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
/* 0xFF == -1 in 2's complement with 8-bits. */
7+
mov $0xFF, %r12
8+
sal %r12b
9+
LKMC_ASSERT(jc)
10+
/* 0xFE == -2 in 2's complement with 8-bits. */
11+
LKMC_ASSERT_EQ(%r12, $0xFE)
12+
13+
/* SAR*/
14+
sar %r12b
15+
LKMC_ASSERT(jnc)
16+
/* -1 */
17+
LKMC_ASSERT_EQ(%r12, $0xFF)
18+
19+
/* SAR rounds to -infinity: -1 goes to -1 again. */
20+
sar %r12b
21+
LKMC_ASSERT(jc)
22+
/* -1 */
23+
LKMC_ASSERT_EQ(%r12, $0xFF)
24+
LKMC_EPILOGUE

userland/arch/x86_64/setcc.S

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
mov $0xFF, %r12
7+
8+
/* Set Carry flag. */
9+
stc
10+
/* Check for carry flag. */
11+
setc %r12b
12+
/* Carry flag was set, so set the r12b to 1. */
13+
LKMC_ASSERT_EQ(%r12, $1)
14+
15+
/* Clear carry flag. */
16+
clc
17+
setc %r12b
18+
LKMC_ASSERT_EQ(%r12, $0)
19+
20+
#if 0
21+
/* The operand size can only be one byte:
22+
* Error: operand size mismatch for `setc'
23+
*/
24+
setc %eax
25+
#endif
26+
LKMC_EPILOGUE

userland/arch/x86_64/shl.S

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
mov $0x81, %r12
7+
8+
/* Shift left by one. */
9+
shl %r12b
10+
LKMC_ASSERT(jc)
11+
LKMC_ASSERT_EQ(%r12, $2)
12+
13+
/* Shift left by one. */
14+
shl %r12b
15+
LKMC_ASSERT(jnc)
16+
LKMC_ASSERT_EQ(%r12, $4)
17+
18+
/* Shift right by one. */
19+
shr %r12b
20+
LKMC_ASSERT(jnc)
21+
LKMC_ASSERT_EQ(%r12, $2)
22+
23+
/* Shift left by 2 immediate.
24+
* Differentent coding than shift by 1.
25+
*/
26+
shl $2, %r12b
27+
LKMC_ASSERT(jnc)
28+
LKMC_ASSERT_EQ(%r12, $8)
29+
30+
/* Shift left by 2 in cl register. */
31+
mov $2, %cl
32+
shl %cl, %r12b
33+
LKMC_ASSERT(jnc)
34+
LKMC_ASSERT_EQ(%r12, $0x20)
35+
36+
#if 0
37+
/* cl is the only possible register choice
38+
* Error: operand type mismatch for `shr'
39+
*/
40+
shr %bl, %ax
41+
#endif
42+
43+
LKMC_EPILOGUE

userland/arch/x86_64/test.S

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
2+
3+
#include <lkmc.h>
4+
5+
LKMC_PROLOGUE
6+
/* 0xF0 & 0x00 == 0x00 */
7+
mov $0xF0, %r12
8+
test $0, %r12b
9+
/* The comparison was equal 0. */
10+
LKMC_ASSERT(jz)
11+
/* r12 is unchanged. */
12+
LKMC_ASSERT_EQ(%r12, $0x0F0)
13+
14+
/* 0xF0 & 0x18 == 0x10 != 0x00 */
15+
mov $0xF0, %r12
16+
test $0x18, %r12b
17+
LKMC_ASSERT(jnz)
18+
LKMC_ASSERT_EQ(%r12, $0x0F0)
19+
20+
/* test %rax, %rax vs cmp $0, %rax: test produces a shorter
21+
* encoding to decide if a register equals zero or not.
22+
* http://stackoverflow.com/questions/147173/x86-assembly-testl-eax-against-eax
23+
*/
24+
LKMC_EPILOGUE

0 commit comments

Comments
 (0)