@@ -449,16 +449,6 @@ def _tokenize(rl_gen, encoding):
449449 source = b"" .join (rl_gen ).decode (encoding )
450450 token = None
451451 for token in _generate_tokens_from_c_tokenizer (source , extra_tokens = True ):
452- # TODO: Marta -> limpiar esto
453- if 6 < token .type <= 54 :
454- token = token ._replace (type = OP )
455- if token .type in {ASYNC , AWAIT }:
456- token = token ._replace (type = NAME )
457- if token .type == NEWLINE :
458- l_start , c_start = token .start
459- l_end , c_end = token .end
460- token = token ._replace (string = '\n ' , start = (l_start , c_start ), end = (l_end , c_end + 1 ))
461-
462452 yield token
463453 if token is not None :
464454 last_line , _ = token .start
@@ -550,8 +540,7 @@ def _generate_tokens_from_c_tokenizer(source, extra_tokens=False):
550540 """Tokenize a source reading Python code as unicode strings using the internal C tokenizer"""
551541 import _tokenize as c_tokenizer
552542 for info in c_tokenizer .TokenizerIter (source , extra_tokens = extra_tokens ):
553- tok , type , lineno , end_lineno , col_off , end_col_off , line = info
554- yield TokenInfo (type , tok , (lineno , col_off ), (end_lineno , end_col_off ), line )
543+ yield TokenInfo ._make (info )
555544
556545
557546if __name__ == "__main__" :
0 commit comments