|
35 | 35 | python2 = sys.version_info[0] < 3 |
36 | 36 |
|
37 | 37 | if python2: # pragma: nocover |
38 | | - import backports.csv as csv # NOQA |
39 | 38 | from itertools import izip_longest as zip_longest # NOQA |
40 | 39 | else: # pragma: nocover |
41 | | - import csv # NOQA |
42 | 40 | from itertools import zip_longest # NOQA |
43 | 41 |
|
| 42 | +if python2: # pragma: nocover |
| 43 | + from backports import csv # NOQA |
| 44 | + # monkey patch backports.csv until bug is fixed |
| 45 | + # https://github.com/ryanhiebert/backports.csv/issues/30 |
| 46 | + csv.dict = OrderedDict |
| 47 | +else: # pragma: nocover |
| 48 | + import csv # NOQA |
| 49 | + |
44 | 50 |
|
45 | 51 | on_windows = 'win32' in sys.platform |
46 | 52 |
|
@@ -117,48 +123,6 @@ def check_file_names(paths): |
117 | 123 | return errors |
118 | 124 |
|
119 | 125 |
|
120 | | -def check_duplicate_keys_about_file(about_text): |
121 | | - """ |
122 | | - Return a list of duplicated keys given a ABOUT text string. |
123 | | - """ |
124 | | - seen = set() |
125 | | - duplicates = set() |
126 | | - for line in about_text.splitlines(): |
127 | | - """ |
128 | | - Ignore all the continuation string, mapping/list dahs, string block and empty line. |
129 | | - """ |
130 | | - if not line.strip() : |
131 | | - continue |
132 | | - if line.startswith((' ', '\t')): |
133 | | - continue |
134 | | - if line.strip().startswith('-'): |
135 | | - continue |
136 | | - if ':' not in line: |
137 | | - continue |
138 | | - # Get the key name |
139 | | - key, _, _val = line.partition(':') |
140 | | - if key in seen: |
141 | | - duplicates.add(key) |
142 | | - else: |
143 | | - seen.add(key) |
144 | | - return sorted(duplicates) |
145 | | - |
146 | | - |
147 | | -def wrap_boolean_value(context): |
148 | | - bool_fields = ['redistribute', 'attribute', 'track_changes', 'modified'] |
149 | | - input = [] # NOQA |
150 | | - for line in context.splitlines(): |
151 | | - key = line.partition(':')[0] |
152 | | - if key in bool_fields: |
153 | | - value = "'" + line.partition(':')[2].strip() + "'" |
154 | | - updated_line = key + ': ' + value |
155 | | - input.append(updated_line) |
156 | | - else: |
157 | | - input.append(line) |
158 | | - updated_context = '\n'.join(input) |
159 | | - return updated_context |
160 | | - |
161 | | - |
162 | 126 | # TODO: rename to normalize_path |
163 | 127 | def get_absolute(location): |
164 | 128 | """ |
@@ -273,40 +237,6 @@ def resource_name(path): |
273 | 237 | return right.strip() |
274 | 238 |
|
275 | 239 |
|
276 | | -if python2: |
277 | | - class OrderedDictReader(csv.DictReader): |
278 | | - """ |
279 | | - A DictReader that return OrderedDicts |
280 | | - Copied from csv.DictReader itself backported from Python 3 |
281 | | - license: python |
282 | | - """ |
283 | | - def __next__(self): |
284 | | - if self.line_num == 0: |
285 | | - # Used only for its side effect. |
286 | | - self.fieldnames |
287 | | - row = next(self.reader) |
288 | | - self.line_num = self.reader.line_num |
289 | | - |
290 | | - # unlike the basic reader, we prefer not to return blanks, |
291 | | - # because we will typically wind up with a dict full of None |
292 | | - # values |
293 | | - while row == []: |
294 | | - row = next(self.reader) |
295 | | - d = OrderedDict(zip(self.fieldnames, row)) |
296 | | - lf = len(self.fieldnames) |
297 | | - lr = len(row) |
298 | | - if lf < lr: |
299 | | - d[self.restkey] = row[lf:] |
300 | | - elif lf > lr: |
301 | | - for key in self.fieldnames[lr:]: |
302 | | - d[key] = self.restval |
303 | | - return d |
304 | | - |
305 | | - next = __next__ |
306 | | -else: |
307 | | - OrderedDictReader = csv.DictReader |
308 | | - |
309 | | - |
310 | 240 | # FIXME: we should use a proper YAML file for this instead |
311 | 241 | def load_mapping(location, lowercase=True): |
312 | 242 | """ |
@@ -444,7 +374,7 @@ def load_csv(location, mapping_file=None): |
444 | 374 | # FIXME: why ignore encoding errors here? |
445 | 375 | with codecs.open(location, mode='rb', encoding='utf-8', |
446 | 376 | errors='ignore') as csvfile: |
447 | | - for row in OrderedDictReader(csvfile): |
| 377 | + for row in csv.DictReader(csvfile): |
448 | 378 | # convert all the column keys to lower case as the same |
449 | 379 | # behavior as when user use the --mapping |
450 | 380 | updated_row = OrderedDict( |
|
0 commit comments