9
9
""" Context manager openers for various fileobject types
10
10
"""
11
11
12
- from os .path import splitext
13
- import gzip
14
12
import bz2
13
+ import gzip
14
+ import sys
15
+ from os .path import splitext
16
+ from distutils .version import LooseVersion
17
+
15
18
16
19
# The largest memory chunk that gzip can use for reads
17
20
GZIP_MAX_READ_CHUNK = 100 * 1024 * 1024 # 100Mb
18
21
19
22
20
23
class BufferedGzipFile (gzip .GzipFile ):
21
- """GzipFile able to readinto buffer >= 2**32 bytes."""
22
- def __init__ (self , fileish , mode = 'rb' , compresslevel = 9 , buffer_size = 2 ** 32 - 1 ):
23
- super (BufferedGzipFile , self ).__init__ (fileish , mode = mode , compresslevel = compresslevel )
24
- self .buffer_size = buffer_size
25
-
26
- # Speedup for #209; attribute not present in in Python 3.5
27
- # open gzip files with faster reads on large files using larger chunks
28
- # See https://github.com/nipy/nibabel/pull/210 for discussion
29
- if hasattr (self , 'max_chunk_read' ):
30
- gzip_file .max_read_chunk = GZIP_MAX_READ_CHUNK
31
-
32
- def readinto (self , buf ):
33
- """Uses self.buffer_size to do a buffered read.
34
-
35
- This works around a known issue in Python 3.5.
36
- See https://bugs.python.org/issue25626"""
37
- n_bytes = len (buf )
38
- try :
24
+ """GzipFile able to readinto buffer >= 2**32 bytes.
25
+
26
+ This class only differs from gzip.GzipFile
27
+ in Python 3.5.0.
28
+
29
+ This works around a known issue in Python 3.5.
30
+ See https://bugs.python.org/issue25626"""
31
+
32
+ # This helps avoid defining readinto in Python 2.6,
33
+ # where it is undefined on gzip.GzipFile.
34
+ # It also helps limit the exposure to this code.
35
+ if sys .version_info [:3 ] == (3 , 5 , 0 ):
36
+ def __init__ (self , fileish , mode = 'rb' , compresslevel = 9 ,
37
+ buffer_size = 2 ** 32 - 1 ):
38
+ super (BufferedGzipFile , self ).__init__ (fileish , mode = mode ,
39
+ compresslevel = compresslevel )
40
+ self .buffer_size = buffer_size
41
+
42
+ def readinto (self , buf ):
43
+ """Uses self.buffer_size to do a buffered read."""
44
+ n_bytes = len (buf )
45
+ if n_bytes < 2 ** 32 :
46
+ return super (BufferedGzipFile , self ).readinto (buf )
47
+
39
48
# This works around a known issue in Python 3.5.
40
49
# See https://bugs.python.org/issue25626
41
50
mv = memoryview (buf )
@@ -48,14 +57,18 @@ def readinto(self, buf):
48
57
n_read += n_got
49
58
if n_got != n_wanted :
50
59
break
51
- except NameError : # Python 2.6 or old 2.7: memoryview does not exist.
52
- raise
53
- n_read = super (BufferedGzipFile , self ).readinto (buf )
54
- return n_read
60
+ return n_read
55
61
56
62
57
63
def _gzip_open (fileish , * args , ** kwargs ):
58
64
gzip_file = BufferedGzipFile (fileish , * args , ** kwargs )
65
+
66
+ # Speedup for #209; attribute not present in in Python 3.5
67
+ # open gzip files with faster reads on large files using larger
68
+ # See https://github.com/nipy/nibabel/pull/210 for discussion
69
+ if hasattr (gzip_file , 'max_chunk_read' ):
70
+ gzip_file .max_read_chunk = GZIP_MAX_READ_CHUNK
71
+
59
72
return gzip_file
60
73
61
74
0 commit comments