Skip to content

Commit 54e5265

Browse files
committed
Monkey patch csv and remove unused code
* Monkey patch backports.csv to use OrderedDict and remove OrderedDictReader * Remove unused funtion check_duplicate_keys_about_file Signed-off-by: Philippe Ombredanne <[email protected]>
1 parent 45696f1 commit 54e5265

File tree

1 file changed

+9
-79
lines changed

1 file changed

+9
-79
lines changed

src/attributecode/util.py

Lines changed: 9 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,18 @@
3535
python2 = sys.version_info[0] < 3
3636

3737
if python2: # pragma: nocover
38-
import backports.csv as csv # NOQA
3938
from itertools import izip_longest as zip_longest # NOQA
4039
else: # pragma: nocover
41-
import csv # NOQA
4240
from itertools import zip_longest # NOQA
4341

42+
if python2: # pragma: nocover
43+
from backports import csv # NOQA
44+
# monkey patch backports.csv until bug is fixed
45+
# https://github.com/ryanhiebert/backports.csv/issues/30
46+
csv.dict = OrderedDict
47+
else: # pragma: nocover
48+
import csv # NOQA
49+
4450

4551
on_windows = 'win32' in sys.platform
4652

@@ -117,48 +123,6 @@ def check_file_names(paths):
117123
return errors
118124

119125

120-
def check_duplicate_keys_about_file(about_text):
121-
"""
122-
Return a list of duplicated keys given a ABOUT text string.
123-
"""
124-
seen = set()
125-
duplicates = set()
126-
for line in about_text.splitlines():
127-
"""
128-
Ignore all the continuation string, mapping/list dahs, string block and empty line.
129-
"""
130-
if not line.strip() :
131-
continue
132-
if line.startswith((' ', '\t')):
133-
continue
134-
if line.strip().startswith('-'):
135-
continue
136-
if ':' not in line:
137-
continue
138-
# Get the key name
139-
key, _, _val = line.partition(':')
140-
if key in seen:
141-
duplicates.add(key)
142-
else:
143-
seen.add(key)
144-
return sorted(duplicates)
145-
146-
147-
def wrap_boolean_value(context):
148-
bool_fields = ['redistribute', 'attribute', 'track_changes', 'modified']
149-
input = [] # NOQA
150-
for line in context.splitlines():
151-
key = line.partition(':')[0]
152-
if key in bool_fields:
153-
value = "'" + line.partition(':')[2].strip() + "'"
154-
updated_line = key + ': ' + value
155-
input.append(updated_line)
156-
else:
157-
input.append(line)
158-
updated_context = '\n'.join(input)
159-
return updated_context
160-
161-
162126
# TODO: rename to normalize_path
163127
def get_absolute(location):
164128
"""
@@ -273,40 +237,6 @@ def resource_name(path):
273237
return right.strip()
274238

275239

276-
if python2:
277-
class OrderedDictReader(csv.DictReader):
278-
"""
279-
A DictReader that return OrderedDicts
280-
Copied from csv.DictReader itself backported from Python 3
281-
license: python
282-
"""
283-
def __next__(self):
284-
if self.line_num == 0:
285-
# Used only for its side effect.
286-
self.fieldnames
287-
row = next(self.reader)
288-
self.line_num = self.reader.line_num
289-
290-
# unlike the basic reader, we prefer not to return blanks,
291-
# because we will typically wind up with a dict full of None
292-
# values
293-
while row == []:
294-
row = next(self.reader)
295-
d = OrderedDict(zip(self.fieldnames, row))
296-
lf = len(self.fieldnames)
297-
lr = len(row)
298-
if lf < lr:
299-
d[self.restkey] = row[lf:]
300-
elif lf > lr:
301-
for key in self.fieldnames[lr:]:
302-
d[key] = self.restval
303-
return d
304-
305-
next = __next__
306-
else:
307-
OrderedDictReader = csv.DictReader
308-
309-
310240
# FIXME: we should use a proper YAML file for this instead
311241
def load_mapping(location, lowercase=True):
312242
"""
@@ -444,7 +374,7 @@ def load_csv(location, mapping_file=None):
444374
# FIXME: why ignore encoding errors here?
445375
with codecs.open(location, mode='rb', encoding='utf-8',
446376
errors='ignore') as csvfile:
447-
for row in OrderedDictReader(csvfile):
377+
for row in csv.DictReader(csvfile):
448378
# convert all the column keys to lower case as the same
449379
# behavior as when user use the --mapping
450380
updated_row = OrderedDict(

0 commit comments

Comments
 (0)