@@ -82,12 +82,49 @@ typedef enum {
82
82
SM_WRITE ,
83
83
} Sleep_Mode ;
84
84
85
+ static void * alloc_stack (void )
86
+ {
87
+ #ifdef __APPLE__ // macOS does not support MAP_STACK and MAP_GROWSDOWN
88
+ char * addr = (char * )mmap (NULL , STACK_CAPACITY , PROT_WRITE |PROT_READ , MAP_PRIVATE |MAP_ANONYMOUS , -1 , 0 );
89
+ #else
90
+ char * addr = (char * )mmap (NULL , STACK_CAPACITY , PROT_WRITE |PROT_READ , MAP_PRIVATE |MAP_STACK |MAP_ANONYMOUS |MAP_GROWSDOWN , -1 , 0 );
91
+ #endif
92
+ assert (addr != MAP_FAILED );
93
+ return addr ;
94
+ }
95
+
96
+ static void free_stack (void * addr )
97
+ {
98
+ munmap (addr , STACK_CAPACITY );
99
+ }
100
+
101
+ static void * setup_stack (void * stack , void (* f )(void * ), void * arg )
102
+ {
103
+ void * * rsp = (void * * )stack ;
104
+ // @arch
105
+ #if __x86_64__
106
+ * (-- rsp ) = coroutine_finish ;
107
+ * (-- rsp ) = f ;
108
+ * (-- rsp ) = arg ; // push rdi
109
+ * (-- rsp ) = 0 ; // push rbx
110
+ * (-- rsp ) = 0 ; // push rbp
111
+ * (-- rsp ) = 0 ; // push r12
112
+ * (-- rsp ) = 0 ; // push r13
113
+ * (-- rsp ) = 0 ; // push r14
114
+ * (-- rsp ) = 0 ; // push r15
115
+ #else
116
+ #error unsupported cpu
117
+ #endif
118
+ return rsp ;
119
+ }
120
+
85
121
// Linux x86_64 call convention
86
122
// %rdi, %rsi, %rdx, %rcx, %r8, and %r9
87
123
88
124
void __attribute__((naked )) coroutine_yield (void )
89
125
{
90
126
// @arch
127
+ #if __x86_64__ && __linux__
91
128
asm(
92
129
" pushq %rdi\n"
93
130
" pushq %rbp\n"
@@ -99,11 +136,27 @@ void __attribute__((naked)) coroutine_yield(void)
99
136
" movq %rsp, %rdi\n" // rsp
100
137
" movq $0, %rsi\n" // sm = SM_NONE
101
138
" jmp coroutine_switch_context\n" );
139
+ #elif __x86_64__ && __APPLE__
140
+ asm(
141
+ " pushq %rdi\n"
142
+ " pushq %rbp\n"
143
+ " pushq %rbx\n"
144
+ " pushq %r12\n"
145
+ " pushq %r13\n"
146
+ " pushq %r14\n"
147
+ " pushq %r15\n"
148
+ " movq %rsp, %rdi\n" // rsp
149
+ " movq $0, %rsi\n" // sm = SM_NONE
150
+ " jmp _coroutine_switch_context\n" );
151
+ #else
152
+ #error weird cpu/os combo
153
+ #endif
102
154
}
103
155
104
156
void __attribute__((naked )) coroutine_sleep_read (int fd )
105
157
{
106
158
// @arch
159
+ #if __x86_64__ && __linux__
107
160
asm(
108
161
" pushq %rdi\n"
109
162
" pushq %rbp\n"
@@ -116,11 +169,28 @@ void __attribute__((naked)) coroutine_sleep_read(int fd)
116
169
" movq %rsp, %rdi\n" // rsp
117
170
" movq $1, %rsi\n" // sm = SM_READ
118
171
" jmp coroutine_switch_context\n" );
172
+ #elif __x86_64__ && __APPLE__
173
+ asm(
174
+ " pushq %rdi\n"
175
+ " pushq %rbp\n"
176
+ " pushq %rbx\n"
177
+ " pushq %r12\n"
178
+ " pushq %r13\n"
179
+ " pushq %r14\n"
180
+ " pushq %r15\n"
181
+ " movq %rdi, %rdx\n" // fd
182
+ " movq %rsp, %rdi\n" // rsp
183
+ " movq $1, %rsi\n" // sm = SM_READ
184
+ " jmp _coroutine_switch_context\n" );
185
+ #else
186
+ #error weird cpu/os combo
187
+ #endif
119
188
}
120
189
121
190
void __attribute__((naked )) coroutine_sleep_write (int fd )
122
191
{
123
192
// @arch
193
+ #if __x86_64__ && __linux__
124
194
asm(
125
195
" pushq %rdi\n"
126
196
" pushq %rbp\n"
@@ -133,11 +203,39 @@ void __attribute__((naked)) coroutine_sleep_write(int fd)
133
203
" movq %rsp, %rdi\n" // rsp
134
204
" movq $2, %rsi\n" // sm = SM_WRITE
135
205
" jmp coroutine_switch_context\n" );
206
+ #elif __x86_64__ && __APPLE__
207
+ asm(
208
+ " pushq %rdi\n"
209
+ " pushq %rbp\n"
210
+ " pushq %rbx\n"
211
+ " pushq %r12\n"
212
+ " pushq %r13\n"
213
+ " pushq %r14\n"
214
+ " pushq %r15\n"
215
+ " movq %rdi, %rdx\n" // fd
216
+ " movq %rsp, %rdi\n" // rsp
217
+ " movq $2, %rsi\n" // sm = SM_WRITE
218
+ " jmp _coroutine_switch_context\n" );
219
+ #else
220
+ #error weird cpu/os combo
221
+ #endif
136
222
}
137
223
138
224
void __attribute__((naked )) coroutine_restore_context (void * rsp )
139
225
{
140
226
// @arch
227
+ #if __x86_64__ && __linux__
228
+ asm(
229
+ " movq %rdi, %rsp\n"
230
+ " popq %r15\n"
231
+ " popq %r14\n"
232
+ " popq %r13\n"
233
+ " popq %r12\n"
234
+ " popq %rbx\n"
235
+ " popq %rbp\n"
236
+ " popq %rdi\n"
237
+ " ret\n" );
238
+ #elif __x86_64__ && __APPLE__
141
239
asm(
142
240
" movq %rdi, %rsp\n"
143
241
" popq %r15\n"
@@ -148,6 +246,9 @@ void __attribute__((naked)) coroutine_restore_context(void *rsp)
148
246
" popq %rbp\n"
149
247
" popq %rdi\n"
150
248
" ret\n" );
249
+ #else
250
+ #error unsupported cpu/os combo
251
+ #endif
151
252
}
152
253
153
254
void coroutine_switch_context (void * rsp , Sleep_Mode sm , int fd )
@@ -207,7 +308,7 @@ void coroutine_finish(void)
207
308
if (contexts .count == 0 ) return ;
208
309
if (active .items [current ] == 0 ) {
209
310
for (size_t i = 1 ; i < contexts .count ; ++ i ) {
210
- munmap (contexts .items [i ].stack_base , STACK_CAPACITY );
311
+ free_stack (contexts .items [i ].stack_base );
211
312
}
212
313
free (contexts .items );
213
314
free (active .items );
@@ -255,22 +356,11 @@ void coroutine_go(void (*f)(void*), void *arg)
255
356
} else {
256
357
da_append (& contexts , ((Context ){0 }));
257
358
id = contexts .count - 1 ;
258
- contexts .items [id ].stack_base = mmap (NULL , STACK_CAPACITY , PROT_WRITE |PROT_READ , MAP_PRIVATE |MAP_STACK |MAP_ANONYMOUS |MAP_GROWSDOWN , -1 , 0 );
259
- assert (contexts .items [id ].stack_base != MAP_FAILED );
359
+ contexts .items [id ].stack_base = alloc_stack ();
260
360
}
261
361
262
- void * * rsp = (void * * )((char * )contexts .items [id ].stack_base + STACK_CAPACITY );
263
- // @arch
264
- * (-- rsp ) = coroutine_finish ;
265
- * (-- rsp ) = f ;
266
- * (-- rsp ) = arg ; // push rdi
267
- * (-- rsp ) = 0 ; // push rbx
268
- * (-- rsp ) = 0 ; // push rbp
269
- * (-- rsp ) = 0 ; // push r12
270
- * (-- rsp ) = 0 ; // push r13
271
- * (-- rsp ) = 0 ; // push r14
272
- * (-- rsp ) = 0 ; // push r15
273
- contexts .items [id ].rsp = rsp ;
362
+ void * rsp = ((char * )contexts .items [id ].stack_base + STACK_CAPACITY );
363
+ contexts .items [id ].rsp = setup_stack (rsp , f , arg );
274
364
275
365
da_append (& active , id );
276
366
}
0 commit comments