Skip to content

Commit 7b49520

Browse files
committed
deprecate parse_psm3
1 parent d4dbe55 commit 7b49520

File tree

2 files changed

+48
-73
lines changed

2 files changed

+48
-73
lines changed

pvlib/iotools/psm3.py

Lines changed: 37 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
import requests
88
import pandas as pd
99
from json import JSONDecodeError
10-
import warnings
11-
from pvlib._deprecation import pvlibDeprecationWarning
10+
from pvlib._deprecation import deprecated
11+
from pvlib import tools
1212

1313
NSRDB_API_BASE = "https://developer.nrel.gov"
1414
PSM_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-2-2-download.csv"
@@ -127,7 +127,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
127127
timeseries data from NREL PSM3
128128
metadata : dict
129129
metadata from NREL PSM3 about the record, see
130-
:func:`pvlib.iotools.parse_psm3` for fields
130+
:func:`pvlib.iotools.read_psm3` for fields
131131
132132
Raises
133133
------
@@ -152,7 +152,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
152152
153153
See Also
154154
--------
155-
pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3
155+
pvlib.iotools.read_psm3
156156
157157
References
158158
----------
@@ -216,12 +216,12 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
216216
# the CSV is in the response content as a UTF-8 bytestring
217217
# to use pandas we need to create a file buffer from the response
218218
fbuf = io.StringIO(response.content.decode('utf-8'))
219-
return parse_psm3(fbuf, map_variables)
219+
return read_psm3(fbuf, map_variables)
220220

221221

222-
def parse_psm3(fbuf, map_variables=True):
222+
def read_psm3(filename, map_variables=True):
223223
"""
224-
Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
224+
Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
225225
is described in [1]_ and the SAM CSV format is described in [2]_.
226226
227227
.. versionchanged:: 0.9.0
@@ -231,8 +231,8 @@ def parse_psm3(fbuf, map_variables=True):
231231
232232
Parameters
233233
----------
234-
fbuf: file-like object
235-
File-like object containing data to read.
234+
filename: str, path-like, or buffer
235+
Filename or in-memory buffer of a file containing data to read.
236236
map_variables: bool, default True
237237
When true, renames columns of the Dataframe to pvlib variable names
238238
where applicable. See variable :const:`VARIABLE_MAP`.
@@ -302,12 +302,15 @@ def parse_psm3(fbuf, map_variables=True):
302302
Examples
303303
--------
304304
>>> # Read a local PSM3 file:
305+
>>> df, metadata = iotools.read_psm3("data.csv") # doctest: +SKIP
306+
307+
>>> # Read a file object or an in-memory buffer:
305308
>>> with open(filename, 'r') as f: # doctest: +SKIP
306-
... df, metadata = iotools.parse_psm3(f) # doctest: +SKIP
309+
... df, metadata = iotools.read_psm3(f) # doctest: +SKIP
307310
308311
See Also
309312
--------
310-
pvlib.iotools.read_psm3, pvlib.iotools.get_psm3
313+
pvlib.iotools.get_psm3
311314
312315
References
313316
----------
@@ -316,34 +319,35 @@ def parse_psm3(fbuf, map_variables=True):
316319
.. [2] `Standard Time Series Data File Format
317320
<https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
318321
"""
319-
# The first 2 lines of the response are headers with metadata
320-
metadata_fields = fbuf.readline().split(',')
321-
metadata_fields[-1] = metadata_fields[-1].strip() # strip trailing newline
322-
metadata_values = fbuf.readline().split(',')
323-
metadata_values[-1] = metadata_values[-1].strip() # strip trailing newline
322+
with tools._file_context_manager(filename) as fbuf:
323+
# The first 2 lines of the response are headers with metadata
324+
metadata_fields = fbuf.readline().split(',')
325+
metadata_values = fbuf.readline().split(',')
326+
# get the column names so we can set the dtypes
327+
columns = fbuf.readline().split(',')
328+
columns[-1] = columns[-1].strip() # strip trailing newline
329+
# Since the header has so many columns, excel saves blank cols in the
330+
# data below the header lines.
331+
columns = [col for col in columns if col != '']
332+
dtypes = dict.fromkeys(columns, float) # all floats except datevec
333+
dtypes.update({'Year': int, 'Month': int, 'Day': int, 'Hour': int,
334+
'Minute': int, 'Cloud Type': int, 'Fill Flag': int})
335+
data = pd.read_csv(
336+
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
337+
delimiter=',', lineterminator='\n') # skip carriage returns \r
338+
339+
metadata_fields[-1] = metadata_fields[-1].strip() # trailing newline
340+
metadata_values[-1] = metadata_values[-1].strip() # trailing newline
324341
metadata = dict(zip(metadata_fields, metadata_values))
325342
# the response is all strings, so set some metadata types to numbers
326343
metadata['Local Time Zone'] = int(metadata['Local Time Zone'])
327344
metadata['Time Zone'] = int(metadata['Time Zone'])
328345
metadata['Latitude'] = float(metadata['Latitude'])
329346
metadata['Longitude'] = float(metadata['Longitude'])
330347
metadata['Elevation'] = int(metadata['Elevation'])
331-
# get the column names so we can set the dtypes
332-
columns = fbuf.readline().split(',')
333-
columns[-1] = columns[-1].strip() # strip trailing newline
334-
# Since the header has so many columns, excel saves blank cols in the
335-
# data below the header lines.
336-
columns = [col for col in columns if col != '']
337-
dtypes = dict.fromkeys(columns, float) # all floats except datevec
338-
dtypes.update(Year=int, Month=int, Day=int, Hour=int, Minute=int)
339-
dtypes['Cloud Type'] = int
340-
dtypes['Fill Flag'] = int
341-
data = pd.read_csv(
342-
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
343-
delimiter=',', lineterminator='\n') # skip carriage returns \r
348+
344349
# the response 1st 5 columns are a date vector, convert to datetime
345-
dtidx = pd.to_datetime(
346-
data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
350+
dtidx = pd.to_datetime(data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
347351
# in USA all timezones are integers
348352
tz = 'Etc/GMT%+d' % -metadata['Time Zone']
349353
data.index = pd.DatetimeIndex(dtidx).tz_localize(tz)
@@ -357,43 +361,5 @@ def parse_psm3(fbuf, map_variables=True):
357361
return data, metadata
358362

359363

360-
def read_psm3(filename, map_variables=True):
361-
"""
362-
Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
363-
is described in [1]_ and the SAM CSV format is described in [2]_.
364-
365-
.. versionchanged:: 0.9.0
366-
The function now returns a tuple where the first element is a dataframe
367-
and the second element is a dictionary containing metadata. Previous
368-
versions of this function had the return values switched.
369-
370-
Parameters
371-
----------
372-
filename: str
373-
Filename of a file containing data to read.
374-
map_variables: bool, default True
375-
When true, renames columns of the Dataframe to pvlib variable names
376-
where applicable. See variable :const:`VARIABLE_MAP`.
377-
378-
Returns
379-
-------
380-
data : pandas.DataFrame
381-
timeseries data from NREL PSM3
382-
metadata : dict
383-
metadata from NREL PSM3 about the record, see
384-
:func:`pvlib.iotools.parse_psm3` for fields
385-
386-
See Also
387-
--------
388-
pvlib.iotools.parse_psm3, pvlib.iotools.get_psm3
389-
390-
References
391-
----------
392-
.. [1] `NREL National Solar Radiation Database (NSRDB)
393-
<https://nsrdb.nrel.gov/>`_
394-
.. [2] `Standard Time Series Data File Format
395-
<https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
396-
"""
397-
with open(str(filename), 'r') as fbuf:
398-
content = parse_psm3(fbuf, map_variables)
399-
return content
364+
parse_psm3 = deprecated(since="0.12.1", name="parse_psm3",
365+
alternative="read_psm3")(read_psm3)

tests/iotools/test_psm3.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
from requests import HTTPError
1717
from io import StringIO
1818

19+
from pvlib._deprecation import pvlibDeprecationWarning
20+
1921

2022
TMY_TEST_DATA = TESTS_DATA_DIR / 'test_psm3_tmy-2017.csv'
2123
YEAR_TEST_DATA = TESTS_DATA_DIR / 'test_psm3_2017.csv'
@@ -130,7 +132,7 @@ def test_get_psm3_tmy_errors(
130132

131133
@pytest.fixture
132134
def io_input(request):
133-
"""file-like object for parse_psm3"""
135+
"""file-like object for read_psm3"""
134136
with MANUAL_TEST_DATA.open() as f:
135137
data = f.read()
136138
obj = StringIO(data)
@@ -139,7 +141,8 @@ def io_input(request):
139141

140142
def test_parse_psm3(io_input):
141143
"""test parse_psm3"""
142-
data, metadata = psm3.parse_psm3(io_input, map_variables=False)
144+
with pytest.warns(pvlibDeprecationWarning, match='Use read_psm3 instead'):
145+
data, metadata = psm3.parse_psm3(io_input, map_variables=False)
143146
expected = pd.read_csv(YEAR_TEST_DATA)
144147
assert_psm3_equal(data, metadata, expected)
145148

@@ -151,6 +154,12 @@ def test_read_psm3():
151154
assert_psm3_equal(data, metadata, expected)
152155

153156

157+
def test_read_psm3_buffer(io_input):
158+
data, metadata = psm3.read_psm3(io_input, map_variables=False)
159+
expected = pd.read_csv(YEAR_TEST_DATA)
160+
assert_psm3_equal(data, metadata, expected)
161+
162+
154163
def test_read_psm3_map_variables():
155164
"""test read_psm3 map_variables=True"""
156165
data, metadata = psm3.read_psm3(MANUAL_TEST_DATA, map_variables=True)

0 commit comments

Comments
 (0)