Skip to content

Commit 9ea4234

Browse files
authored
Merge branch 'main' into edit-on-gh-hotfix
2 parents 387b084 + f33be83 commit 9ea4234

File tree

6 files changed

+122
-142
lines changed

6 files changed

+122
-142
lines changed

docs/sphinx/source/whatsnew/v0.12.1.rst

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,19 @@ Breaking Changes
88
~~~~~~~~~~~~~~~~
99

1010

11+
Deprecations
12+
~~~~~~~~~~~~
13+
* The following ``parse_`` functions in :py:mod:`pvlib.iotools` are deprecated,
14+
with the corresponding ``read_`` functions taking their place: (:issue:`2444`, :pull:`2458`)
15+
16+
- :py:func:`~pvlib.iotools.parse_psm3`
17+
- :py:func:`~pvlib.iotools.parse_cams`
18+
19+
1120
Bug fixes
1221
~~~~~~~~~
13-
22+
* :py:func:`pvlib.iotools.get_pvgis_tmy` now returns the correct dtypes when
23+
when ``outputformat='epw'``. (:issue:`2399`, :pull:`2417`)
1424

1525
Enhancements
1626
~~~~~~~~~~~~
@@ -41,10 +51,13 @@ Documentation
4151
(:issue:`2423`, :pull:`2426`)
4252
* Clarify which SAPM coefficients are required by the ``module`` parameter in
4353
:py:func:`~pvlib.pvsystem.sapm` (:issue:`2392`, :pull:`2435`)
44-
* Update references in :py:func`~pvlib.irradiance.get_extra_radiation`
45-
(:issue:`2333`, :pull:`2347`)
54+
* Update references in :py:func:`~pvlib.irradiance.get_extra_radiation`
55+
(:issue:`2333`, :pull:`2437`)
56+
* Update references in :py:func:`~pvlib.iotools.get_cams` and :py:func:`~pvlib.iotools.read_cams`
57+
(:issue:`2427`, :pull:`2457`)
4658
* Fix ``Edit on GitHub`` links in stable documentation so they point to the tagged repository version matching the build environment (e.g., v0.12.0). (:issue:`2456`, :pull:`2460`)
4759

60+
4861
Requirements
4962
~~~~~~~~~~~~
5063
* ``wheel`` is no longer a build dependency. (:pull:`2439`)
@@ -69,3 +82,5 @@ Contributors
6982
* Muhammad Rebaal (:ghuser:`Muhammad-Rebaal`)
7083
* Echedey Luis (:ghuser:`echedey-ls`)
7184
* omahs (:ghuser:`omahs`)
85+
* Adam R. Jensen (:ghuser:`AdamRJensen`)
86+
* Marion Schroedter-Homscheidt (:ghuser:`mschroedter`)

pvlib/iotools/psm3.py

Lines changed: 37 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
import requests
88
import pandas as pd
99
from json import JSONDecodeError
10-
import warnings
11-
from pvlib._deprecation import pvlibDeprecationWarning
10+
from pvlib._deprecation import deprecated
11+
from pvlib import tools
1212

1313
NSRDB_API_BASE = "https://developer.nrel.gov"
1414
PSM_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-2-2-download.csv"
@@ -127,7 +127,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
127127
timeseries data from NREL PSM3
128128
metadata : dict
129129
metadata from NREL PSM3 about the record, see
130-
:func:`pvlib.iotools.parse_psm3` for fields
130+
:func:`pvlib.iotools.read_psm3` for fields
131131
132132
Raises
133133
------
@@ -152,7 +152,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
152152
153153
See Also
154154
--------
155-
pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3
155+
pvlib.iotools.read_psm3
156156
157157
References
158158
----------
@@ -216,12 +216,12 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
216216
# the CSV is in the response content as a UTF-8 bytestring
217217
# to use pandas we need to create a file buffer from the response
218218
fbuf = io.StringIO(response.content.decode('utf-8'))
219-
return parse_psm3(fbuf, map_variables)
219+
return read_psm3(fbuf, map_variables)
220220

221221

222-
def parse_psm3(fbuf, map_variables=True):
222+
def read_psm3(filename, map_variables=True):
223223
"""
224-
Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
224+
Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
225225
is described in [1]_ and the SAM CSV format is described in [2]_.
226226
227227
.. versionchanged:: 0.9.0
@@ -231,8 +231,8 @@ def parse_psm3(fbuf, map_variables=True):
231231
232232
Parameters
233233
----------
234-
fbuf: file-like object
235-
File-like object containing data to read.
234+
filename: str, path-like, or buffer
235+
Filename or in-memory buffer of a file containing data to read.
236236
map_variables: bool, default True
237237
When true, renames columns of the Dataframe to pvlib variable names
238238
where applicable. See variable :const:`VARIABLE_MAP`.
@@ -302,12 +302,15 @@ def parse_psm3(fbuf, map_variables=True):
302302
Examples
303303
--------
304304
>>> # Read a local PSM3 file:
305+
>>> df, metadata = iotools.read_psm3("data.csv") # doctest: +SKIP
306+
307+
>>> # Read a file object or an in-memory buffer:
305308
>>> with open(filename, 'r') as f: # doctest: +SKIP
306-
... df, metadata = iotools.parse_psm3(f) # doctest: +SKIP
309+
... df, metadata = iotools.read_psm3(f) # doctest: +SKIP
307310
308311
See Also
309312
--------
310-
pvlib.iotools.read_psm3, pvlib.iotools.get_psm3
313+
pvlib.iotools.get_psm3
311314
312315
References
313316
----------
@@ -316,34 +319,35 @@ def parse_psm3(fbuf, map_variables=True):
316319
.. [2] `Standard Time Series Data File Format
317320
<https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
318321
"""
319-
# The first 2 lines of the response are headers with metadata
320-
metadata_fields = fbuf.readline().split(',')
321-
metadata_fields[-1] = metadata_fields[-1].strip() # strip trailing newline
322-
metadata_values = fbuf.readline().split(',')
323-
metadata_values[-1] = metadata_values[-1].strip() # strip trailing newline
322+
with tools._file_context_manager(filename) as fbuf:
323+
# The first 2 lines of the response are headers with metadata
324+
metadata_fields = fbuf.readline().split(',')
325+
metadata_values = fbuf.readline().split(',')
326+
# get the column names so we can set the dtypes
327+
columns = fbuf.readline().split(',')
328+
columns[-1] = columns[-1].strip() # strip trailing newline
329+
# Since the header has so many columns, excel saves blank cols in the
330+
# data below the header lines.
331+
columns = [col for col in columns if col != '']
332+
dtypes = dict.fromkeys(columns, float) # all floats except datevec
333+
dtypes.update({'Year': int, 'Month': int, 'Day': int, 'Hour': int,
334+
'Minute': int, 'Cloud Type': int, 'Fill Flag': int})
335+
data = pd.read_csv(
336+
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
337+
delimiter=',', lineterminator='\n') # skip carriage returns \r
338+
339+
metadata_fields[-1] = metadata_fields[-1].strip() # trailing newline
340+
metadata_values[-1] = metadata_values[-1].strip() # trailing newline
324341
metadata = dict(zip(metadata_fields, metadata_values))
325342
# the response is all strings, so set some metadata types to numbers
326343
metadata['Local Time Zone'] = int(metadata['Local Time Zone'])
327344
metadata['Time Zone'] = int(metadata['Time Zone'])
328345
metadata['Latitude'] = float(metadata['Latitude'])
329346
metadata['Longitude'] = float(metadata['Longitude'])
330347
metadata['Elevation'] = int(metadata['Elevation'])
331-
# get the column names so we can set the dtypes
332-
columns = fbuf.readline().split(',')
333-
columns[-1] = columns[-1].strip() # strip trailing newline
334-
# Since the header has so many columns, excel saves blank cols in the
335-
# data below the header lines.
336-
columns = [col for col in columns if col != '']
337-
dtypes = dict.fromkeys(columns, float) # all floats except datevec
338-
dtypes.update(Year=int, Month=int, Day=int, Hour=int, Minute=int)
339-
dtypes['Cloud Type'] = int
340-
dtypes['Fill Flag'] = int
341-
data = pd.read_csv(
342-
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
343-
delimiter=',', lineterminator='\n') # skip carriage returns \r
348+
344349
# the response 1st 5 columns are a date vector, convert to datetime
345-
dtidx = pd.to_datetime(
346-
data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
350+
dtidx = pd.to_datetime(data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
347351
# in USA all timezones are integers
348352
tz = 'Etc/GMT%+d' % -metadata['Time Zone']
349353
data.index = pd.DatetimeIndex(dtidx).tz_localize(tz)
@@ -357,43 +361,5 @@ def parse_psm3(fbuf, map_variables=True):
357361
return data, metadata
358362

359363

360-
def read_psm3(filename, map_variables=True):
361-
"""
362-
Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
363-
is described in [1]_ and the SAM CSV format is described in [2]_.
364-
365-
.. versionchanged:: 0.9.0
366-
The function now returns a tuple where the first element is a dataframe
367-
and the second element is a dictionary containing metadata. Previous
368-
versions of this function had the return values switched.
369-
370-
Parameters
371-
----------
372-
filename: str
373-
Filename of a file containing data to read.
374-
map_variables: bool, default True
375-
When true, renames columns of the Dataframe to pvlib variable names
376-
where applicable. See variable :const:`VARIABLE_MAP`.
377-
378-
Returns
379-
-------
380-
data : pandas.DataFrame
381-
timeseries data from NREL PSM3
382-
metadata : dict
383-
metadata from NREL PSM3 about the record, see
384-
:func:`pvlib.iotools.parse_psm3` for fields
385-
386-
See Also
387-
--------
388-
pvlib.iotools.parse_psm3, pvlib.iotools.get_psm3
389-
390-
References
391-
----------
392-
.. [1] `NREL National Solar Radiation Database (NSRDB)
393-
<https://nsrdb.nrel.gov/>`_
394-
.. [2] `Standard Time Series Data File Format
395-
<https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
396-
"""
397-
with open(str(filename), 'r') as fbuf:
398-
content = parse_psm3(fbuf, map_variables)
399-
return content
364+
parse_psm3 = deprecated(since="0.12.1", name="parse_psm3",
365+
alternative="read_psm3")(read_psm3)

pvlib/iotools/pvgis.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,9 @@ def _coerce_and_roll_tmy(tmy_data, tz, year):
410410
np.roll(tmy_data, tz, axis=0),
411411
columns=tmy_data.columns,
412412
index=new_index)
413+
# GH 2399
414+
new_tmy_data = \
415+
new_tmy_data.astype(dtype=dict(zip(tmy_data.columns, tmy_data.dtypes)))
413416
return new_tmy_data
414417

415418

0 commit comments

Comments
 (0)