@@ -166,29 +166,30 @@ boolean isSeekable(VirtualFrame frame, PBuffered self,
166
166
167
167
@ ImportStatic (PGuards .class )
168
168
@ TypeSystemReference (PythonArithmeticTypes .class )
169
+ @ GenerateInline
170
+ @ GenerateCached (false )
169
171
// PyNumber_AsOff_t
170
172
abstract static class AsOffNumberNode extends PNodeWithContext {
171
173
172
- public abstract long execute (VirtualFrame frame , Object number , PythonBuiltinClassType err );
174
+ public abstract long execute (VirtualFrame frame , Node inliningTarget , Object number , PythonBuiltinClassType err );
173
175
174
176
@ Specialization
175
177
static long doInt (long number , @ SuppressWarnings ("unused" ) PythonBuiltinClassType err ) {
176
178
return number ;
177
179
}
178
180
179
181
@ Specialization
180
- static long toLong (VirtualFrame frame , Object number , PythonBuiltinClassType err ,
181
- @ Bind ("this" ) Node inliningTarget ,
182
- @ Cached PRaiseNode raiseNode ,
183
- @ Cached PyNumberIndexNode indexNode ,
184
- @ Cached CastToJavaLongExactNode cast ,
182
+ static long toLong (VirtualFrame frame , Node inliningTarget , Object number , PythonBuiltinClassType err ,
183
+ @ Cached PRaiseNode .Lazy raiseNode ,
184
+ @ Cached (inline = false ) PyNumberIndexNode indexNode ,
185
+ @ Cached (inline = false ) CastToJavaLongExactNode cast ,
185
186
@ Cached IsBuiltinObjectProfile errorProfile ) {
186
187
Object index = indexNode .execute (frame , number );
187
188
try {
188
189
return cast .execute (index );
189
190
} catch (PException e ) {
190
191
e .expect (inliningTarget , OverflowError , errorProfile );
191
- throw raiseNode .raise (err , CANNOT_FIT_P_IN_OFFSET_SIZE , number );
192
+ throw raiseNode .get ( inliningTarget ). raise (err , CANNOT_FIT_P_IN_OFFSET_SIZE , number );
192
193
} catch (CannotCastException e ) {
193
194
throw CompilerDirectives .shouldNotReachHere ();
194
195
}
@@ -205,11 +206,11 @@ public RawTellNode(boolean ignore) {
205
206
206
207
public abstract long execute (VirtualFrame frame , PBuffered self );
207
208
208
- private static long tell (VirtualFrame frame , Object raw ,
209
+ private static long tell (VirtualFrame frame , Node inliningTarget , Object raw ,
209
210
PyObjectCallMethodObjArgs callMethod ,
210
211
AsOffNumberNode asOffNumberNode ) {
211
212
Object res = callMethod .execute (frame , raw , T_TELL );
212
- return asOffNumberNode .execute (frame , res , ValueError );
213
+ return asOffNumberNode .execute (frame , inliningTarget , res , ValueError );
213
214
}
214
215
215
216
/**
@@ -220,7 +221,7 @@ long bufferedRawTell(VirtualFrame frame, PBuffered self,
220
221
@ Bind ("this" ) Node inliningTarget ,
221
222
@ Shared ("callMethod" ) @ Cached PyObjectCallMethodObjArgs callMethod ,
222
223
@ Shared ("asOffT" ) @ Cached AsOffNumberNode asOffNumberNode ) {
223
- long n = tell (frame , self .getRaw (), callMethod , asOffNumberNode );
224
+ long n = tell (frame , inliningTarget , self .getRaw (), callMethod , asOffNumberNode );
224
225
if (n < 0 ) {
225
226
throw raise (OSError , IO_STREAM_INVALID_POS , n );
226
227
}
@@ -230,11 +231,12 @@ long bufferedRawTell(VirtualFrame frame, PBuffered self,
230
231
231
232
@ Specialization (guards = "ignore" )
232
233
static long bufferedRawTellIgnoreException (VirtualFrame frame , PBuffered self ,
234
+ @ Bind ("this" ) Node inliningTarget ,
233
235
@ Shared ("callMethod" ) @ Cached PyObjectCallMethodObjArgs callMethod ,
234
236
@ Shared ("asOffT" ) @ Cached AsOffNumberNode asOffNumberNode ) {
235
237
long n ;
236
238
try {
237
- n = tell (frame , self .getRaw (), callMethod , asOffNumberNode );
239
+ n = tell (frame , inliningTarget , self .getRaw (), callMethod , asOffNumberNode );
238
240
} catch (PException e ) {
239
241
n = -1 ;
240
242
// ignore
@@ -254,19 +256,21 @@ public static RawTellNode create() {
254
256
/**
255
257
* implementation of cpython/Modules/_io/bufferedio.c:_buffered_raw_seek
256
258
*/
259
+ @ GenerateInline
260
+ @ GenerateCached (false )
257
261
abstract static class RawSeekNode extends PNodeWithContext {
258
262
259
- public abstract long execute (VirtualFrame frame , PBuffered self , long target , int whence );
263
+ public abstract long execute (VirtualFrame frame , Node inliningTarget , PBuffered self , long target , int whence );
260
264
261
265
@ Specialization
262
- static long bufferedRawSeek (VirtualFrame frame , PBuffered self , long target , int whence ,
263
- @ Cached PRaiseNode raise ,
264
- @ Cached PyObjectCallMethodObjArgs callMethod ,
266
+ static long bufferedRawSeek (VirtualFrame frame , Node inliningTarget , PBuffered self , long target , int whence ,
267
+ @ Cached PRaiseNode . Lazy raise ,
268
+ @ Cached ( inline = false ) PyObjectCallMethodObjArgs callMethod ,
265
269
@ Cached AsOffNumberNode asOffNumberNode ) {
266
270
Object res = callMethod .execute (frame , self .getRaw (), T_SEEK , target , whence );
267
- long n = asOffNumberNode .execute (frame , res , ValueError );
271
+ long n = asOffNumberNode .execute (frame , inliningTarget , res , ValueError );
268
272
if (n < 0 ) {
269
- raise .raise (OSError , IO_STREAM_INVALID_POS , n );
273
+ raise .get ( inliningTarget ). raise (OSError , IO_STREAM_INVALID_POS , n );
270
274
}
271
275
self .setAbsPos (n );
272
276
return n ;
@@ -282,12 +286,13 @@ abstract static class FlushAndRewindUnlockedNode extends PNodeWithContext {
282
286
283
287
@ Specialization (guards = {"self.isReadable()" , "!self.isWritable()" })
284
288
protected static void readOnly (VirtualFrame frame , PBuffered self ,
289
+ @ Bind ("this" ) Node inliningTarget ,
285
290
@ Cached RawSeekNode rawSeekNode ) {
286
291
/*
287
292
* Rewind the raw stream so that its position corresponds to the current logical
288
293
* position.
289
294
*/
290
- long n = rawSeekNode .execute (frame , self , -rawOffset (self ), 1 );
295
+ long n = rawSeekNode .execute (frame , inliningTarget , self , -rawOffset (self ), 1 );
291
296
self .resetRead (); // _bufferedreader_reset_buf
292
297
assert n != -1 ;
293
298
}
@@ -300,14 +305,15 @@ protected static void writeOnly(VirtualFrame frame, PBuffered self,
300
305
301
306
@ Specialization (guards = {"self.isReadable()" , "self.isWritable()" })
302
307
protected static void readWrite (VirtualFrame frame , PBuffered self ,
308
+ @ Bind ("this" ) Node inliningTarget ,
303
309
@ Cached BufferedWriterNodes .FlushUnlockedNode flushUnlockedNode ,
304
310
@ Cached RawSeekNode rawSeekNode ) {
305
311
flushUnlockedNode .execute (frame , self );
306
312
/*
307
313
* Rewind the raw stream so that its position corresponds to the current logical
308
314
* position.
309
315
*/
310
- long n = rawSeekNode .execute (frame , self , -rawOffset (self ), 1 );
316
+ long n = rawSeekNode .execute (frame , inliningTarget , self , -rawOffset (self ), 1 );
311
317
self .resetRead (); // _bufferedreader_reset_buf
312
318
assert n != -1 ;
313
319
}
@@ -362,7 +368,7 @@ static long seek(VirtualFrame frame, PBuffered self, long off, int whence,
362
368
}
363
369
}
364
370
365
- lock .enter (self );
371
+ lock .enter (inliningTarget , self );
366
372
try {
367
373
/* Fallback: invoke raw seek() method and clear buffer */
368
374
if (isWriteableProfile .profile (inliningTarget , self .isWritable ())) {
@@ -372,7 +378,7 @@ static long seek(VirtualFrame frame, PBuffered self, long off, int whence,
372
378
if (whenceSeekCur ) {
373
379
target -= rawOffset (self );
374
380
}
375
- long n = rawSeekNode .execute (frame , self , target , whence );
381
+ long n = rawSeekNode .execute (frame , inliningTarget , self , target , whence );
376
382
self .setRawPos (-1 );
377
383
if (selfIsReadable ) {
378
384
self .resetRead (); // _bufferedreader_reset_buf
@@ -385,23 +391,24 @@ static long seek(VirtualFrame frame, PBuffered self, long off, int whence,
385
391
}
386
392
387
393
// TODO: experiment with threads count to avoid locking.
394
+ @ GenerateInline
395
+ @ GenerateCached (false )
388
396
abstract static class EnterBufferedNode extends Node {
389
397
390
- public abstract void execute (PBuffered self );
398
+ public abstract void execute (Node inliningTarget , PBuffered self );
391
399
392
400
@ Specialization
393
- static void doEnter (PBuffered self ,
394
- @ Bind ("this" ) Node inliningTarget ,
401
+ static void doEnter (Node inliningTarget , PBuffered self ,
395
402
@ Cached EnterBufferedBusyNode enterBufferedBusyNode ,
396
403
@ Cached InlinedConditionProfile isBusy ) {
397
404
if (isBusy .profile (inliningTarget , !self .getLock ().acquireNonBlocking ())) {
398
- enterBufferedBusyNode .execute (self );
405
+ enterBufferedBusyNode .execute (inliningTarget , self );
399
406
}
400
407
self .setOwner (ThreadModuleBuiltins .GetCurrentThreadIdNode .getId ());
401
408
}
402
409
403
- void enter (PBuffered self ) {
404
- execute (self );
410
+ void enter (Node inliningTarget , PBuffered self ) {
411
+ execute (inliningTarget , self );
405
412
}
406
413
407
414
static void leave (PBuffered self ) {
@@ -413,37 +420,41 @@ static void leave(PBuffered self) {
413
420
/**
414
421
* implementation of cpython/Modules/_io/bufferedio.c:_enter_buffered_busy
415
422
*/
416
- abstract static class EnterBufferedBusyNode extends PNodeWithRaise {
423
+ @ GenerateInline
424
+ @ GenerateCached (false )
425
+ abstract static class EnterBufferedBusyNode extends PNodeWithContext {
417
426
418
- public abstract void execute (PBuffered self );
427
+ public abstract void execute (Node inliningTarget , PBuffered self );
419
428
420
429
@ Specialization (guards = {"!self.isOwn()" , "!getContext().isFinalizing()" })
421
- void normal (PBuffered self ,
422
- @ Cached GilNode gil ) {
430
+ static void normal (Node inliningTarget , PBuffered self ,
431
+ @ Cached ( inline = false ) GilNode gil ) {
423
432
gil .release (true );
424
433
try {
425
- self .getLock ().acquireBlocking (this );
434
+ self .getLock ().acquireBlocking (inliningTarget );
426
435
} finally {
427
436
gil .acquire ();
428
437
}
429
438
}
430
439
431
440
@ Specialization (guards = {"!self.isOwn()" , "getContext().isFinalizing()" })
432
- void finalizing (PBuffered self ) {
441
+ static void finalizing (Node inliningTarget , PBuffered self ,
442
+ @ Shared @ Cached PRaiseNode .Lazy lazyRaise ) {
433
443
/*
434
444
* When finalizing, we don't want a deadlock to happen with daemon threads abruptly shut
435
445
* down while they owned the lock. Therefore, only wait for a grace period (1 s.). Note
436
446
* that non-daemon threads have already exited here, so this shouldn't affect carefully
437
447
* written threaded I/O code.
438
448
*/
439
- if (!self .getLock ().acquireTimeout (this , (long ) 1e3 )) {
440
- throw raise (SystemError , SHUTDOWN_POSSIBLY_DUE_TO_DAEMON_THREADS );
449
+ if (!self .getLock ().acquireTimeout (inliningTarget , (long ) 1e3 )) {
450
+ throw lazyRaise . get ( inliningTarget ). raise (SystemError , SHUTDOWN_POSSIBLY_DUE_TO_DAEMON_THREADS );
441
451
}
442
452
}
443
453
444
454
@ Specialization (guards = "self.isOwn()" )
445
- void error (PBuffered self ) {
446
- throw raise (RuntimeError , REENTRANT_CALL_INSIDE_P , self );
455
+ static void error (Node inliningTarget , PBuffered self ,
456
+ @ Shared @ Cached PRaiseNode .Lazy lazyRaise ) {
457
+ throw lazyRaise .get (inliningTarget ).raise (RuntimeError , REENTRANT_CALL_INSIDE_P , self );
447
458
}
448
459
}
449
460
}
0 commit comments