Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,15 @@ __pycache__/

# Local data and outputs
data/
rate_design/ny/hp_rates/data/buildstock_raw/
rate_design/ny/hp_rates/data/buildstock_processed/
rate_design/ny/hp_rates/data/cairo_cases/
outputs/
site/

# Test outputs (generated during tests)
tests/test_outputs/

# Python packaging/build artifacts
*.pyc
*.pyo
Expand Down
1 change: 1 addition & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Placeholder license. Replace with the appropriate license text for this project.
15 changes: 10 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@ This repository is a clean scaffold for rate design analysis, focused on New Yor

## Layout

- `src/rate_design/` — package skeleton for shared logic, utilities, and New York–specific code.
- `data/ny/` — local cache for BuildStock and CAIRO inputs/outputs (kept out of git).
- `scripts/` — helper scripts (e.g., running a NY heat pump rate scenario).
- `tests/` — placeholder test files to fill in alongside new code.
- `rate_design/` — package root.
- `ny/hp_rates/`
- `data/` — local inputs/outputs; `buildstock_*` and `cairo_cases/` are git-ignored. Configs under `tariff_structure/` and `tariff_mapping/` stay versioned.
- `scenarios/` — YAML configs selecting tariffs/mappings and other simulation parameters.
- `scripts/` — helpers such as customer selection, tariff builders, and case path helpers.
- `Justfile` — NY HP-specific recipes (stub).
- `ny/ev_rates/` — stubbed EV structure (data, scenarios, scripts, Justfile).
- `utils/` — cross-jurisdiction utilities (buildstock IO, S3 sync, conversions).
- `tests/` — placeholder test files to expand alongside code.

## Notes
- Data under `data/` should remain local or synced via S3 tooling you add; keep large artifacts out of git.
- Data under `rate_design/ny/hp_rates/data/` (buildstock raw/processed, cairo cases) should remain local or synced via S3 tooling you add; keep large artifacts out of git.
20 changes: 16 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ description = "Skeleton for the rate design platform."
readme = "README.md"
requires-python = ">=3.11"
authors = [{ name = "Switchbox Data" }]
license = { file = "LICENSE" }
license = "LicenseRef-Proprietary"
dependencies = [
"buildstock-fetch",
"cairo",
Expand All @@ -22,11 +22,23 @@ requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"

[tool.setuptools]
package-dir = { "" = "src" }
package-dir = { "" = "." }

[tool.setuptools.packages.find]
where = ["src"]
include = ["rate_design*"]
where = ["."]
include = ["rate_design*", "utils*"]

[tool.setuptools.package-data]
"rate_design.ny.hp_rates" = [
"scenarios/*.yaml",
"data/tariff_structure/*.json",
"data/tariff_mapping/*.csv",
]

[tool.uv.sources]
cairo = { git = "https://github.com/NREL/CAIRO.git", rev = "tb/dev_package" }

[tool.pytest.ini_options]
markers = [
"integration: marks tests as integration tests that require API access (deselect with '-m \"not integration\"')",
]
File renamed without changes.
File renamed without changes.
1 change: 1 addition & 0 deletions rate_design/ny/hp_rates/Justfile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# NY heat pump tasks (placeholder).
1 change: 1 addition & 0 deletions rate_design/ny/hp_rates/scripts/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""NY heat pump helper scripts."""
99 changes: 99 additions & 0 deletions rate_design/ny/hp_rates/scripts/generate_ny_hp_scenarios.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
#!/usr/bin/env python
"""Generate NY heat pump adoption scenarios with cumulative adoption."""

from pathlib import Path

from utils.mixed_adoption_trajectory import (
build_adoption_trajectory,
fetch_baseline_sample,
)

# Base data directory for NY HP rates (git-ignored raw/processed, configs versioned)
BASE_DATA_DIR = Path("rate_design/ny/hp_rates/data")

# Configuration
CONFIG = {
# ResStock release parameters
"release_year": "2024",
"weather_file": "tmy3",
"release_version": "2",
"state": "NY",
# Heat pump upgrade ID (adjust based on your ResStock release)
"hp_upgrade_id": "1",
# Download settings
"output_dir": BASE_DATA_DIR / "buildstock_raw",
"max_workers": 5,
# Sampling settings
"sample_size": 1000, # Number of buildings to sample
"sample_seed": 123, # Seed for sampling reproducibility (determines building ordering)
# Adoption scenario settings
"adoption_fractions": [0.1, 0.2, 0.3, 0.5, 0.8, 1.0],
# Output settings
"processed_dir": BASE_DATA_DIR / "buildstock_processed",
}


def main():
"""Run the complete workflow to generate adoption scenarios."""
print("=" * 80)
print("NY Heat Pump Cumulative Adoption Scenario Generator")
print("=" * 80)
print("\nConfiguration:")
for key, value in CONFIG.items():
print(f" {key}: {value}")
print("\n")

# Step 1: Fetch baseline sample and establish building ID ordering
print("\n" + "=" * 80)
print("STEP 1: Fetching baseline sample")
print("=" * 80)
print(f"Fetching {CONFIG['sample_size']} baseline buildings (seed={CONFIG['sample_seed']})")

baseline_metadata_path, building_ids = fetch_baseline_sample(
sample_size=CONFIG["sample_size"],
random_seed=CONFIG["sample_seed"],
release_year=CONFIG["release_year"],
weather_file=CONFIG["weather_file"],
release_version=CONFIG["release_version"],
state=CONFIG["state"],
output_dir=CONFIG["output_dir"],
max_workers=CONFIG["max_workers"],
)

print(f"\n✓ Fetched {len(building_ids)} baseline buildings")
print(f"✓ Baseline metadata: {baseline_metadata_path}")
print(f"✓ Building ID ordering established (deterministic from seed)")

# Step 2: Build adoption trajectory
print("\n" + "=" * 80)
print("STEP 2: Building adoption trajectory")
print("=" * 80)
print(f"Creating scenarios for adoption fractions: {CONFIG['adoption_fractions']}")
print("Note: Upgrade data will be fetched incrementally for each fraction")

scenario_paths = build_adoption_trajectory(
baseline_metadata_path=baseline_metadata_path,
baseline_building_ids=building_ids,
adoption_fractions=CONFIG["adoption_fractions"],
upgrade_id=CONFIG["hp_upgrade_id"],
release_year=CONFIG["release_year"],
weather_file=CONFIG["weather_file"],
release_version=CONFIG["release_version"],
state=CONFIG["state"],
output_dir=CONFIG["output_dir"],
max_workers=CONFIG["max_workers"],
output_processed_dir=CONFIG["processed_dir"],
)

# Summary
print("\n" + "=" * 80)
print("COMPLETE - Scenario Summary")
print("=" * 80)
print(f"\nGenerated {len(scenario_paths)} adoption scenarios:")
for fraction, path in sorted(scenario_paths.items()):
n_adopters = int(round(fraction * len(building_ids)))
print(f" {fraction*100:3.0f}% adoption ({n_adopters:4d} buildings) → {path.name}")


if __name__ == "__main__":
main()
1 change: 0 additions & 1 deletion src/rate_design/core/__init__.py

This file was deleted.

3 changes: 0 additions & 3 deletions src/rate_design/core/pipeline.py

This file was deleted.

3 changes: 0 additions & 3 deletions src/rate_design/core/postprocess.py

This file was deleted.

3 changes: 0 additions & 3 deletions src/rate_design/core/rates.py

This file was deleted.

3 changes: 0 additions & 3 deletions src/rate_design/core/scenarios.py

This file was deleted.

1 change: 0 additions & 1 deletion src/rate_design/ny/Justfile

This file was deleted.

Empty file.
3 changes: 0 additions & 3 deletions src/rate_design/utils/buildstock_io.py

This file was deleted.

1 change: 1 addition & 0 deletions tests/test_utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests for utils modules."""
191 changes: 191 additions & 0 deletions tests/test_utils/test_buildstock_io.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
"""Tests for utils/buildstock_io.py.

Each test function corresponds to one key function in buildstock_io.py.
Tests that interact with buildstock-fetch save outputs to tests/test_outputs/buildstock_io/ (git-ignored).
Path construction tests are self-contained and don't require fixtures or API calls.
"""

from pathlib import Path

import pytest

from utils.buildstock_io import (
fetch_for_building_ids,
fetch_sample,
get_buildstock_release_dir,
get_load_curve_dir,
get_load_curve_path,
get_metadata_path,
)

# Test output directory (git-ignored)
TEST_OUTPUT_DIR = Path(__file__).parent.parent / "test_outputs" / "buildstock_io"


# ==============================================================================
# Unit tests - No fixtures or API calls required
# ==============================================================================


def test_get_buildstock_release_dir():
"""Test get_buildstock_release_dir constructs correct directory path."""
output_dir = Path("/data/resstock")
release_dir = get_buildstock_release_dir(
output_dir=output_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
)
assert release_dir == output_dir / "res_2024_tmy3_2"


def test_get_metadata_path():
"""Test get_metadata_path constructs correct metadata file path."""
output_dir = Path("/data/resstock")
metadata_path = get_metadata_path(
output_dir=output_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
upgrade_id="0",
state="NY",
)
expected = output_dir / "res_2024_tmy3_2" / "metadata" / "state=NY" / "upgrade=00" / "metadata.parquet"
assert metadata_path == expected


def test_get_load_curve_dir():
"""Test get_load_curve_dir constructs correct load curve directory path."""
output_dir = Path("/data/resstock")
load_curve_dir = get_load_curve_dir(
output_dir=output_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
curve_subdir="load_curve_hourly",
)
expected = output_dir / "res_2024_tmy3_2" / "load_curve_hourly"
assert load_curve_dir == expected


def test_get_load_curve_path():
"""Test get_load_curve_path constructs correct load curve file path."""
load_curve_dir = Path("/data/resstock/load_curve_hourly")
path = get_load_curve_path(
load_curve_dir=load_curve_dir,
bldg_id=12345,
state="NY",
upgrade_id="0",
)
expected = load_curve_dir / "state=NY" / "upgrade=00" / "12345-0.parquet"
assert path == expected


# ==============================================================================
# Integration tests - Require buildstock-fetch and generate test outputs
# ==============================================================================


@pytest.mark.integration
def test_fetch_sample():
"""Test fetch_sample with sample_size=1.

Outputs saved to: tests/test_outputs/buildstock_io/sample_1/
"""
output_dir = TEST_OUTPUT_DIR / "sample_1"
output_dir.mkdir(parents=True, exist_ok=True)

paths, failed = fetch_sample(
upgrade_id="0",
release_year="2024",
weather_file="tmy3",
release_version="2",
state="NY",
output_dir=output_dir,
max_workers=1,
sample_size=1,
random_seed=42,
file_type=("metadata", "load_curve_hourly"),
)

assert len(failed) == 0
assert len(paths) > 0

metadata_path = get_metadata_path(
output_dir=output_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
upgrade_id="0",
state="NY",
)
assert metadata_path.exists()

import polars as pl

metadata = pl.read_parquet(metadata_path)
assert len(metadata) == 1


@pytest.mark.integration
def test_fetch_for_building_ids():
"""Test fetch_for_building_ids with 1 specific building.

Outputs saved to: tests/test_outputs/buildstock_io/specific_building/
"""
baseline_dir = TEST_OUTPUT_DIR / "baseline_for_upgrade_test"
baseline_dir.mkdir(parents=True, exist_ok=True)

_, _ = fetch_sample(
upgrade_id="0",
release_year="2024",
weather_file="tmy3",
release_version="2",
state="NY",
output_dir=baseline_dir,
max_workers=1,
sample_size=1,
random_seed=42,
file_type=("metadata",),
)

import polars as pl

metadata_path = get_metadata_path(
output_dir=baseline_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
upgrade_id="0",
state="NY",
)
metadata = pl.read_parquet(metadata_path)
building_id = metadata["bldg_id"][0]

output_dir = TEST_OUTPUT_DIR / "specific_building"
output_dir.mkdir(parents=True, exist_ok=True)

paths, failed = fetch_for_building_ids(
building_ids=[building_id],
upgrade_id="1",
release_year="2024",
weather_file="tmy3",
release_version="2",
state="NY",
output_dir=output_dir,
max_workers=1,
file_type=("metadata", "load_curve_hourly"),
)

assert len(failed) == 0
assert len(paths) > 0

upgrade_metadata_path = get_metadata_path(
output_dir=output_dir,
release_year="2024",
weather_file="tmy3",
release_version="2",
upgrade_id="1",
state="NY",
)
assert upgrade_metadata_path.exists()
Loading