|
55 | 55 | import time |
56 | 56 | import tokenize |
57 | 57 | import warnings |
58 | | - |
59 | | -try: |
60 | | - from functools import lru_cache |
61 | | -except ImportError: |
62 | | - def lru_cache(maxsize=128): # noqa as it's a fake implementation. |
63 | | - """Does not really need a real a lru_cache, it's just |
64 | | - optimization, so let's just do nothing here. Python 3.2+ will |
65 | | - just get better performances, time to upgrade? |
66 | | - """ |
67 | | - return lambda function: function |
68 | | - |
69 | 58 | from fnmatch import fnmatch |
| 59 | +from functools import lru_cache |
70 | 60 | from optparse import OptionParser |
71 | 61 |
|
72 | 62 | try: |
@@ -301,12 +291,6 @@ def maximum_line_length(physical_line, max_line_length, multiline, |
301 | 291 | (len(chunks) == 2 and chunks[0] == '#')) and \ |
302 | 292 | len(line) - len(chunks[-1]) < max_line_length - 7: |
303 | 293 | return |
304 | | - if hasattr(line, 'decode'): # Python 2 |
305 | | - # The line could contain multi-byte characters |
306 | | - try: |
307 | | - length = len(line.decode('utf-8')) |
308 | | - except UnicodeError: |
309 | | - pass |
310 | 294 | if length > max_line_length: |
311 | 295 | return (max_line_length, "E501 line too long " |
312 | 296 | "(%d > %d characters)" % (length, max_line_length)) |
@@ -1459,12 +1443,6 @@ def comparison_type(logical_line, noqa): |
1459 | 1443 |
|
1460 | 1444 | Okay: if isinstance(obj, int): |
1461 | 1445 | E721: if type(obj) is type(1): |
1462 | | -
|
1463 | | - When checking if an object is a string, keep in mind that it might |
1464 | | - be a unicode string too! In Python 2.3, str and unicode have a |
1465 | | - common base class, basestring, so you can do: |
1466 | | -
|
1467 | | - Okay: if isinstance(obj, basestring): |
1468 | 1446 | """ |
1469 | 1447 | match = COMPARE_TYPE_REGEX.search(logical_line) |
1470 | 1448 | if match and not noqa: |
@@ -1787,12 +1765,6 @@ def maximum_doc_length(logical_line, max_doc_length, noqa, tokens): |
1787 | 1765 | if prev_token is None or prev_token in SKIP_TOKENS: |
1788 | 1766 | lines = line.splitlines() |
1789 | 1767 | for line_num, physical_line in enumerate(lines): |
1790 | | - if hasattr(physical_line, 'decode'): # Python 2 |
1791 | | - # The line could contain multi-byte characters |
1792 | | - try: |
1793 | | - physical_line = physical_line.decode('utf-8') |
1794 | | - except UnicodeError: |
1795 | | - pass |
1796 | 1768 | if start[0] + line_num == 1 and line.startswith('#!'): |
1797 | 1769 | return |
1798 | 1770 | length = len(physical_line) |
|
0 commit comments