|
| 1 | +# SPDX-License-Identifier: BSD-3-Clause |
| 2 | +# /usr/bin/env python3 |
| 3 | +# -*- coding: utf-8 -*- |
| 4 | + |
| 5 | +from __future__ import annotations |
| 6 | + |
| 7 | +__coding__ = "utf-8" |
| 8 | +__authors__ = ["Brian R. Pauw"] |
| 9 | +__copyright__ = "Copyright 2025, The MoDaCor team" |
| 10 | +__date__ = "12/12/2025" |
| 11 | +__status__ = "Development" # "Development", "Production" |
| 12 | +# end of header and standard imports |
| 13 | + |
| 14 | +__all__ = ["CSVSource"] |
| 15 | + |
| 16 | +from collections.abc import Callable |
| 17 | +from pathlib import Path |
| 18 | +from typing import Any |
| 19 | + |
| 20 | +import numpy as np |
| 21 | +from attrs import define, field, validators |
| 22 | + |
| 23 | +from modacor.dataclasses.messagehandler import MessageHandler |
| 24 | +from modacor.io.io_source import ArraySlice |
| 25 | + |
| 26 | +from ..io_source import IoSource |
| 27 | + |
| 28 | + |
| 29 | +def _is_callable(_, __, value): |
| 30 | + if not callable(value): |
| 31 | + raise TypeError("method must be callable") |
| 32 | + |
| 33 | + |
| 34 | +@define(kw_only=True) |
| 35 | +class CSVSource(IoSource): |
| 36 | + """ |
| 37 | + IoSource for loading columnar data from CSV-like text files using NumPy's |
| 38 | + loadtxt or genfromtxt. |
| 39 | +
|
| 40 | + Expected usage |
| 41 | + -------------- |
| 42 | + - Data is 1D per column (no multi-dimensional fields). |
| 43 | + - Columns are returned as 1D arrays; each column corresponds to one data_key. |
| 44 | + - for np.loadtxt, column names must be provided via dtype with field names, e.g.: |
| 45 | + dtype=[("q", float), ("I", float), ("I_sigma", float)] |
| 46 | + - for np.genfromtxt, column names come from the first row or are specified explicitly via the `names` parameter. Typical patterns: |
| 47 | + * np.genfromtxt(..., names=True, delimiter=..., ...) # use first row as names |
| 48 | + * np.genfromtxt(..., names=["q", "I", "I_sigma"], ...) # specify names explicitly |
| 49 | + so that they can be clearly identified later. |
| 50 | +
|
| 51 | + Configuration |
| 52 | + ------------- |
| 53 | + `iosource_method_kwargs` is passed directly to the NumPy function `method`. |
| 54 | + This allows you to use all standard NumPy options, e.g.: |
| 55 | +
|
| 56 | + For np.genfromtxt: |
| 57 | + delimiter="," |
| 58 | + skip_header=3 |
| 59 | + max_rows=1000 |
| 60 | + usecols=(0, 1, 2) |
| 61 | + names=True or names=["q", "I", "sigma"] |
| 62 | + dtype=None or dtype=float |
| 63 | + encoding="utf-8" |
| 64 | + comments="#" |
| 65 | + ... |
| 66 | +
|
| 67 | + For np.loadtxt: |
| 68 | + delimiter="," |
| 69 | + skiprows=3 |
| 70 | + max_rows=1000 |
| 71 | + usecols=(0, 1, 2) |
| 72 | + dtype=float |
| 73 | + encoding="utf-8" |
| 74 | + comments="#" |
| 75 | + ... |
| 76 | +
|
| 77 | + Notes |
| 78 | + ----- |
| 79 | + - 2D arrays (no field names) are not supported in this implementation. |
| 80 | + If the resulting array does not have `dtype.names`, a ValueError is raised. |
| 81 | + """ |
| 82 | + |
| 83 | + # external API: |
| 84 | + resource_location: Path = field(converter=Path, validator=validators.instance_of((Path))) |
| 85 | + method: Callable[..., np.ndarray] = field( |
| 86 | + default=np.genfromtxt, validator=_is_callable |
| 87 | + ) # default to genfromtxt, better for names |
| 88 | + # internal use (type hints; real values set per-instance) |
| 89 | + _data_cache: np.ndarray | None = field(init=False, default=None) |
| 90 | + _data_dict_cache: dict[str, np.ndarray] = field(factory=dict) |
| 91 | + _file_datasets_dtypes: dict[str, np.dtype] = field(init=False) |
| 92 | + _file_datasets_shapes: dict[str, tuple[int, ...]] = field(init=False) |
| 93 | + logger: MessageHandler = field(init=False) |
| 94 | + |
| 95 | + def __attrs_post_init__(self) -> None: |
| 96 | + # super().__init__(source_reference=self.source_reference, iosource_method_kwargs=self.iosource_method_kwargs) |
| 97 | + self.logger = MessageHandler(level=self.logging_level, name="CSVSource") |
| 98 | + # Set file path |
| 99 | + if not self.resource_location.is_file(): |
| 100 | + self.logger.error(f"CSVSource: file {self.resource_location} does not exist.") |
| 101 | + |
| 102 | + # Bookkeeping structures for IoSource API |
| 103 | + self._file_datasets_shapes: dict[str, tuple[int, ...]] = {} |
| 104 | + self._file_datasets_dtypes: dict[str, np.dtype] = {} |
| 105 | + |
| 106 | + # Load and preprocess data immediately |
| 107 | + self._load_data() |
| 108 | + self._preload() |
| 109 | + |
| 110 | + # ------------------------------------------------------------------ # |
| 111 | + # Internal loading / preprocessing # |
| 112 | + # ------------------------------------------------------------------ # |
| 113 | + |
| 114 | + def _load_data(self) -> None: |
| 115 | + """ |
| 116 | + Load the CSV data into a structured NumPy array using the configured |
| 117 | + method (np.genfromtxt or np.loadtxt). |
| 118 | +
|
| 119 | + iosource_method_kwargs are passed directly to that method. |
| 120 | + """ |
| 121 | + self.logger.info( |
| 122 | + f"CSVSource loading data from {self.resource_location} " |
| 123 | + f"using {self.method.__name__} with options: {self.iosource_method_kwargs}" |
| 124 | + ) |
| 125 | + |
| 126 | + try: |
| 127 | + self._data_cache = self.method(self.resource_location, **self.iosource_method_kwargs) |
| 128 | + except Exception as exc: # noqa: BLE001 |
| 129 | + self.logger.error(f"Error while loading CSV data from {self.resource_location}: {exc}") |
| 130 | + raise |
| 131 | + |
| 132 | + if self._data_cache is None: |
| 133 | + raise ValueError(f"CSVSource: no data loaded from file {self.resource_location}.") |
| 134 | + # Ensure we have a structured array with named fields |
| 135 | + if self._data_cache.dtype.names is None: |
| 136 | + raise ValueError( |
| 137 | + "CSVSource expected a structured array with named fields, " |
| 138 | + "but dtype.names is None.\n" |
| 139 | + "Hint: use np.genfromtxt with 'names=True' or 'names=[...]', " |
| 140 | + "or provide an appropriate 'dtype' with field names." |
| 141 | + ) |
| 142 | + |
| 143 | + def _preload(self) -> None: |
| 144 | + """ |
| 145 | + Populate dataset lists, shapes, and dtypes from the structured array. |
| 146 | + """ |
| 147 | + assert self._data_cache is not None # for type checkers |
| 148 | + |
| 149 | + self._data_dict_cache = {} |
| 150 | + self._file_datasets_shapes.clear() |
| 151 | + self._file_datasets_dtypes.clear() |
| 152 | + |
| 153 | + for name in self._data_cache.dtype.names: |
| 154 | + column = self._data_cache[name] |
| 155 | + self._data_dict_cache[name] = self._data_cache[name] |
| 156 | + self._file_datasets_shapes[name] = column.shape |
| 157 | + self._file_datasets_dtypes[name] = column.dtype |
| 158 | + |
| 159 | + self.logger.info(f"CSVSource loaded datasets: {self._file_datasets_shapes.keys()}") |
| 160 | + |
| 161 | + # ------------------------------------------------------------------ # |
| 162 | + # IoSource API # |
| 163 | + # ------------------------------------------------------------------ # |
| 164 | + |
| 165 | + def get_static_metadata(self, data_key: str) -> None: |
| 166 | + """ |
| 167 | + CSVSource does not support static metadata; always returns None. |
| 168 | + """ |
| 169 | + self.logger.warning( |
| 170 | + f"You asked for static metadata '{data_key}', but CSVSource does not support static metadata." |
| 171 | + ) |
| 172 | + return None |
| 173 | + |
| 174 | + def get_data(self, data_key: str, load_slice: ArraySlice = ...) -> np.ndarray: |
| 175 | + """ |
| 176 | + Return the data column corresponding to `data_key`, cast to float, apply `load_slice`. |
| 177 | +
|
| 178 | + - data_key must match one of the field names in the structured array. |
| 179 | + - `load_slice` is applied to that 1D column (e.g. ellipsis, slice, array of indices). |
| 180 | + """ |
| 181 | + if self._data_cache is None: |
| 182 | + raise RuntimeError("CSVSource data cache is empty; loading may have failed.") |
| 183 | + |
| 184 | + try: |
| 185 | + column = self._data_dict_cache[data_key] |
| 186 | + except KeyError: |
| 187 | + raise KeyError( |
| 188 | + f"Data key '{data_key}' not found in CSV data. Available keys: {list(self._data_dict_cache.keys())}" # noqa: E713 |
| 189 | + ) from None |
| 190 | + |
| 191 | + return np.asarray(column[load_slice]).astype(float) |
| 192 | + |
| 193 | + def get_data_shape(self, data_key: str) -> tuple[int, ...]: |
| 194 | + if data_key in self._file_datasets_shapes: |
| 195 | + return self._file_datasets_shapes[data_key] |
| 196 | + return () |
| 197 | + |
| 198 | + def get_data_dtype(self, data_key: str) -> np.dtype | None: |
| 199 | + if data_key in self._file_datasets_dtypes: |
| 200 | + return self._file_datasets_dtypes[data_key] |
| 201 | + return None |
| 202 | + |
| 203 | + def get_data_attributes(self, data_key: str) -> dict[str, Any]: |
| 204 | + """ |
| 205 | + CSV has no per-dataset attributes; return a dict with None. |
| 206 | + """ |
| 207 | + self.logger.warning( |
| 208 | + f"You asked for attributes of '{data_key}', but CSVSource does not support data attributes." |
| 209 | + ) |
| 210 | + return {data_key: None} |
0 commit comments