Skip to content

Commit 314033d

Browse files
gfyoungjreback
authored andcommitted
MAINT: Remove E501 flake8 errors (#321)
1 parent 90f5257 commit 314033d

File tree

17 files changed

+226
-119
lines changed

17 files changed

+226
-119
lines changed

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ script:
7272
- export ENIGMA_API_KEY=$ENIGMA_API_KEY
7373
- pytest -s --cov=pandas_datareader --cov-report xml:/tmp/cov-datareader.xml --junitxml=/tmp/datareader.xml
7474
- flake8 --version
75-
- flake8 --ignore E501 pandas_datareader
75+
- flake8 pandas_datareader
7676

7777
after_success:
7878
- coveralls

pandas_datareader/base.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ def _read_lines(self, out):
127127
rs = rs[:-1]
128128
# Get rid of unicode characters in index name.
129129
try:
130-
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
130+
rs.index.name = rs.index.name.decode(
131+
'unicode_escape').encode('ascii', 'ignore')
131132
except AttributeError:
132133
# Python 3 string has no decode method.
133134
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
@@ -152,7 +153,8 @@ def read(self):
152153
""" read data """
153154
# If a single symbol, (e.g., 'GOOG')
154155
if isinstance(self.symbols, (compat.string_types, int)):
155-
df = self._read_one_data(self.url, params=self._get_params(self.symbols))
156+
df = self._read_one_data(self.url,
157+
params=self._get_params(self.symbols))
156158
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
157159
elif isinstance(self.symbols, DataFrame):
158160
df = self._dl_mult_symbols(self.symbols.index)
@@ -167,7 +169,8 @@ def _dl_mult_symbols(self, symbols):
167169
for sym_group in _in_chunks(symbols, self.chunksize):
168170
for sym in sym_group:
169171
try:
170-
stocks[sym] = self._read_one_data(self.url, self._get_params(sym))
172+
stocks[sym] = self._read_one_data(self.url,
173+
self._get_params(sym))
171174
passed.append(sym)
172175
except IOError:
173176
msg = 'Failed to read symbol: {0!r}, replacing with NaN.'

pandas_datareader/data.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,8 @@ def DataReader(name, data_source=None, start=None, end=None,
9090
# Data from Yahoo! Finance
9191
gs = DataReader("GS", "yahoo")
9292
93-
# Corporate Actions (Dividend and Split Data) with ex-dates from Yahoo! Finance
93+
# Corporate Actions (Dividend and Split Data)
94+
# with ex-dates from Yahoo! Finance
9495
gs = DataReader("GS", "yahoo-actions")
9596
9697
# Data from Google Finance

pandas_datareader/enigma.py

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,24 @@
1212

1313
class EnigmaReader(_BaseReader):
1414
"""
15-
Collects Enigma data located at the specified datapath and returns a pandas DataFrame.
15+
Collects Enigma data located at the specified datapath and
16+
returns a pandas DataFrame.
1617
1718
Usage (high-level):
1819
```
1920
import pandas_datareader as pdr
2021
df = pdr.get_data_enigma('enigma.inspections.restaurants.fl')
2122
22-
#in the event that ENIGMA_API_KEY does not exist in your env, it can be supplied as the second arg:
23-
df = prd.get_data_enigma('enigma.inspections.restaurants.fl', 'ARIAMFHKJMISF38UT')
23+
# in the event that ENIGMA_API_KEY does not exist in your env,
24+
# it can be supplied as the second arg:
25+
df = prd.get_data_enigma('enigma.inspections.restaurants.fl',
26+
... 'ARIAMFHKJMISF38UT')
2427
```
2528
2629
Usage:
2730
```
28-
df = EnigmaReader(datapath='enigma.inspections.restaurants.fl', api_key='ARIAMFHKJMISF38UT').read()
31+
df = EnigmaReader(datapath='enigma.inspections.restaurants.fl',
32+
... api_key='ARIAMFHKJMISF38UT').read()
2933
```
3034
"""
3135

@@ -42,16 +46,18 @@ def __init__(self,
4246
if api_key is None:
4347
self._api_key = os.getenv('ENIGMA_API_KEY')
4448
if self._api_key is None:
45-
raise ValueError(
46-
"""Please provide an Enigma API key or set the ENIGMA_API_KEY environment variable\n
47-
If you do not have an API key, you can get one here: https://app.enigma.io/signup""")
49+
raise ValueError("Please provide an Enigma API key or set "
50+
"the ENIGMA_API_KEY environment variable\n"
51+
"If you do not have an API key, you can get "
52+
"one here: https://app.enigma.io/signup")
4853
else:
4954
self._api_key = api_key
5055

5156
self._datapath = datapath
5257
if not isinstance(self._datapath, compat.string_types):
5358
raise ValueError(
54-
"The Enigma datapath must be a string (ex: 'enigma.inspections.restaurants.fl')")
59+
"The Enigma datapath must be a string (ex: "
60+
"'enigma.inspections.restaurants.fl')")
5561

5662
@property
5763
def url(self):
@@ -95,5 +101,6 @@ def extract_export_url(self, delay=10, max_attempts=10):
95101

96102
def read(self):
97103
export_gzipped_req = self._request(self.extract_export_url())
98-
decompressed_data = self._decompress_export(export_gzipped_req.content).decode("utf-8")
104+
decompressed_data = self._decompress_export(
105+
export_gzipped_req.content).decode("utf-8")
99106
return pd.read_csv(StringIO(decompressed_data))

pandas_datareader/eurostat.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ def dsd_url(self):
2727
if not isinstance(self.symbols, compat.string_types):
2828
raise ValueError('data name must be string')
2929

30-
return '{0}/datastructure/ESTAT/DSD_{1}'.format(self._URL, self.symbols)
30+
return '{0}/datastructure/ESTAT/DSD_{1}'.format(
31+
self._URL, self.symbols)
3132

3233
def _read_one_data(self, url, params):
3334
resp_dsd = self._get_response(self.dsd_url)

pandas_datareader/famafrench.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,10 +115,12 @@ def _read_one_data(self, url, params):
115115
shape = '({0} rows x {1} cols)'.format(*df.shape)
116116
table_desc.append('{0} {1}'.format(title, shape).strip())
117117

118-
descr = '{0}\n{1}\n\n'.format(self.symbols.replace('_', ' '), len(self.symbols) * '-')
118+
descr = '{0}\n{1}\n\n'.format(self.symbols.replace('_', ' '),
119+
len(self.symbols) * '-')
119120
if doc_chunks:
120121
descr += ' '.join(doc_chunks).replace(2 * ' ', ' ') + '\n\n'
121-
table_descr = map(lambda x: '{0:3} : {1}'.format(*x), enumerate(table_desc))
122+
table_descr = map(lambda x: '{0:3} : {1}'.format(*x),
123+
enumerate(table_desc))
122124
datasets['DESCR'] = descr + '\n'.join(table_descr)
123125

124126
return datasets
@@ -139,7 +141,9 @@ def get_available_datasets(self):
139141
response = self.session.get(_URL + 'data_library.html')
140142
root = document_fromstring(response.content)
141143

142-
l = filter(lambda x: x.startswith(_URL_PREFIX) and x.endswith(_URL_SUFFIX),
143-
[e.attrib['href'] for e in root.findall('.//a') if 'href' in e.attrib])
144+
l = filter(lambda x: (x.startswith(_URL_PREFIX) and
145+
x.endswith(_URL_SUFFIX)),
146+
[(e.attrib['href'] for e in root.findall('.//a')
147+
if 'href' in e.attrib)])
144148

145149
return lmap(lambda x: x[len(_URL_PREFIX):-len(_URL_SUFFIX)], l)

pandas_datareader/fred.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ def fetch_data(url, name):
3737
return data.truncate(self.start, self.end)
3838
except KeyError: # pragma: no cover
3939
if data.ix[3].name[7:12] == 'Error':
40-
raise IOError("Failed to get the data. Check that {0!r} is "
41-
"a valid FRED series.".format(name))
40+
raise IOError("Failed to get the data. Check that "
41+
"{0!r} is a valid FRED series.".format(name))
4242
raise
4343
df = concat([fetch_data(url, n) for url, n in zip(urls, names)],
4444
axis=1, join='outer')

pandas_datareader/google/options.py

Lines changed: 23 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -60,14 +60,15 @@ def get_options_data(self, month=None, year=None, expiry=None):
6060
6161
Parameters
6262
----------
63-
month : number, int, optional(default=None)
63+
month : number, int, optional (default=None)
6464
The month the options expire. This should be either 1 or 2
6565
digits.
6666
67-
year : number, int, optional(default=None)
67+
year : number, int, optional (default=None)
6868
The year the options expire. This should be a 4 digit int.
6969
70-
expiry : date-like or convertible or list-like object, optional (default=None)
70+
expiry : date-like or convertible or
71+
list-like object, optional (default=None)
7172
The date (or dates) when options expire (defaults to current month)
7273
7374
Returns
@@ -93,14 +94,16 @@ def get_options_data(self, month=None, year=None, expiry=None):
9394
9495
Notes
9596
-----
96-
Note: Format of returned data frame is dependent on Google and may change.
97+
Note: Format of returned data frame is dependent on
98+
Google and may change.
9799
98100
>>> goog = Options('goog', 'google') # Create object
99101
>>> goog.get_options_data(expiry=goog.expiry_dates[0]) # Get data
100102
101103
"""
102104
if month is not None or year is not None:
103-
raise NotImplementedError('month and year parameters cannot be used')
105+
raise NotImplementedError('month and year parameters '
106+
'cannot be used')
104107
if expiry is None:
105108
raise ValueError('expiry has to be set')
106109
d = self._load_data(expiry)
@@ -116,12 +119,14 @@ def expiry_dates(self):
116119
except AttributeError:
117120
# has to be a non-valid date, to trigger returning 'expirations'
118121
d = self._load_data(dt.datetime(2016, 1, 3))
119-
self._expiry_dates = [dt.date(x['y'], x['m'], x['d']) for x in d['expirations']]
122+
self._expiry_dates = [dt.date(x['y'], x['m'], x['d'])
123+
for x in d['expirations']]
120124
return self._expiry_dates
121125

122126
def _load_data(self, expiry):
123-
url = self._OPTIONS_BASE_URL.format(sym=self.symbol, day=expiry.day,
124-
month=expiry.month, year=expiry.year)
127+
url = self._OPTIONS_BASE_URL.format(
128+
sym=self.symbol, day=expiry.day,
129+
month=expiry.month, year=expiry.year)
125130
s = re.sub(r'(\w+):', '"\\1":', self._read_url_as_StringIO(url).read())
126131
return json.loads(s)
127132

@@ -136,7 +141,8 @@ def _process_data(self, jd, expiry):
136141
'Open_Int', 'Root', 'Underlying_Price', 'Quote_Time']
137142
indexes = ['Strike', 'Expiry', 'Type', 'Symbol']
138143
rows_list, index = self._process_rows(jd, now, expiry)
139-
df = DataFrame(rows_list, columns=columns, index=MultiIndex.from_tuples(index, names=indexes))
144+
df = DataFrame(rows_list, columns=columns,
145+
index=MultiIndex.from_tuples(index, names=indexes))
140146

141147
# Make dtype consistent, requires float64 as there can be NaNs
142148
df['Vol'] = df['Vol'].astype('float64')
@@ -150,9 +156,12 @@ def _process_rows(self, jd, now, expiry):
150156
for key, typ in [['calls', 'call'], ['puts', 'put']]:
151157
for row in jd[key]:
152158
d = {}
153-
for dkey, rkey, ntype in [('Last', 'p', float), ('Bid', 'b', float),
154-
('Ask', 'a', float), ('Chg', 'c', float),
155-
('PctChg', 'cp', float), ('Vol', 'vol', int),
159+
for dkey, rkey, ntype in [('Last', 'p', float),
160+
('Bid', 'b', float),
161+
('Ask', 'a', float),
162+
('Chg', 'c', float),
163+
('PctChg', 'cp', float),
164+
('Vol', 'vol', int),
156165
('Open_Int', 'oi', int)]:
157166
try:
158167
d[dkey] = ntype(row[rkey].replace(',', ''))
@@ -162,5 +171,6 @@ def _process_rows(self, jd, now, expiry):
162171
d['Underlying_Price'] = jd['underlying_price']
163172
d['Quote_Time'] = now
164173
rows_list.append(d)
165-
index.append((float(row['strike'].replace(',', '')), expiry, typ, row['s']))
174+
index.append((float(row['strike'].replace(',', '')),
175+
expiry, typ, row['s']))
166176
return rows_list, index

pandas_datareader/google/quotes.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ def _read_lines(self, out):
2828
buffer = out.read()
2929
m = re.search('// ', buffer)
3030
result = json.loads(buffer[m.start() + len('// '):])
31-
return pd.DataFrame([[float(x['cp']), float(x['l']), np.datetime64(parse(x['lt']).isoformat())]
32-
for x in result], columns=['change_pct', 'last', 'time'],
31+
return pd.DataFrame([[float(x['cp']), float(x['l']),
32+
np.datetime64(parse(x['lt']).isoformat())]
33+
for x in result], columns=['change_pct',
34+
'last', 'time'],
3335
index=[x['t'] for x in result])

pandas_datareader/io/jsdmx.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def read_jsdmx(path_or_buf):
1818
1919
Parameters
2020
----------
21-
filepath_or_buffer : a valid SDMX-JSON string or file-like
21+
path_or_buf : a valid SDMX-JSON string or file-like
2222
http://sdmx.org/wp-content/uploads/2014/07/sdmx-json-data-message.pdf
2323
2424
Returns
@@ -58,7 +58,8 @@ def _get_indexer(index):
5858
if index.nlevels == 1:
5959
return [str(i) for i in compat.range(len(index))]
6060
else:
61-
it = itertools.product(*[compat.range(len(level)) for level in index.levels])
61+
it = itertools.product(*[compat.range(
62+
len(level)) for level in index.levels])
6263
return [':'.join(map(str, i)) for i in it]
6364

6465

0 commit comments

Comments
 (0)