diff --git a/CHANGELOG.md b/CHANGELOG.md index 57438ff..80e4bac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [v0.4.2] - 2025-07-17 + +### Changed + +- Reorganized the `examples/` directory into categorized subdirectories (`dsc/`, `dilatometry/`, `kinetic_methods/`, etc.) to improve structure and user navigation. +- Renamed the generic `dsc_example.py` to `legacy_dsc_analysis_example.py` and simplified its content to serve as a basic demonstration. + + ## [v0.4.1] - 2025-07-15 ### Fixed diff --git a/examples/custom_importer_example.py b/examples/data_import/custom_importer_example.py similarity index 100% rename from examples/custom_importer_example.py rename to examples/data_import/custom_importer_example.py diff --git a/examples/importer_examples.py b/examples/data_import/importer_examples.py similarity index 100% rename from examples/importer_examples.py rename to examples/data_import/importer_examples.py diff --git a/examples/dilatometry_example.py b/examples/dilatometry/dilatometry_example.py similarity index 100% rename from examples/dilatometry_example.py rename to examples/dilatometry/dilatometry_example.py diff --git a/examples/dsc/legacy_dsc_analysis_example.py b/examples/dsc/legacy_dsc_analysis_example.py new file mode 100644 index 0000000..abda78f --- /dev/null +++ b/examples/dsc/legacy_dsc_analysis_example.py @@ -0,0 +1,208 @@ +""" +Legacy DSC analysis example. + +NOTE: This script is a simplified version of the analysis workflow and serves +as a legacy example. For more detailed and specialized use cases, please +refer to the other examples in the `examples/dsc/` directory. + +This script demonstrates: +- Loading a DSC dataset. +- Applying baseline correction. +- Detecting peaks. +- Identifying thermal events. +- Plotting the results. +""" + +import logging +import os +from typing import Dict + +import matplotlib.pyplot as plt +import numpy as np + +from pkynetics.data_import import dsc_importer +from pkynetics.technique_analysis.dsc.baseline import BaselineCorrector +from pkynetics.technique_analysis.dsc.peak_analysis import PeakAnalyzer +from pkynetics.technique_analysis.dsc.thermal_events import ThermalEventDetector +from pkynetics.technique_analysis.dsc.types import DSCExperiment + +# --- Configuration --- +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +# --- Helper Functions --- +def analyze_experiment(experiment: DSCExperiment) -> Dict: + """ + Analyzes a full DSC experiment or a segment. + """ + results = {} + logger.info( + f"Starting analysis of '{experiment.sample_name}' with {len(experiment.temperature)} points." + ) + + try: + # 1. Baseline Correction + logger.info("Performing baseline correction...") + baseline_corrector = BaselineCorrector() + baseline_result = baseline_corrector.correct( + experiment.temperature, experiment.heat_flow, method="asymmetric" + ) + results["baseline"] = baseline_result + logger.info(f"Baseline correction complete. Method: {baseline_result.method}") + + # 2. Peak Analysis + logger.info("Finding peaks...") + peak_analyzer = PeakAnalyzer() + # Use the corrected data for peak finding + peaks = peak_analyzer.find_peaks( + experiment.temperature, + baseline_result.corrected_data, + baseline=baseline_result.baseline, + ) + results["peaks"] = peaks + logger.info(f"Found {len(peaks)} peaks.") + + # 3. Thermal Event Detection + logger.info("Detecting thermal events...") + event_detector = ThermalEventDetector() + events = event_detector.detect_events( + experiment.temperature, baseline_result.corrected_data, peaks + ) + results["events"] = events + logger.info("Thermal event detection complete.") + + except Exception as e: + logger.error(f"An error occurred during analysis: {e}", exc_info=True) + results["error"] = str(e) + + return results + + +def plot_analysis_results(experiment: DSCExperiment, results: Dict) -> None: + """ + Plots the results of the DSC analysis. + """ + if "error" in results: + logger.error(f"Cannot plot results due to analysis error: {results['error']}") + return + + fig, (ax1, ax2) = plt.subplots( + 2, 1, figsize=(12, 10), sharex=True, gridspec_kw={"height_ratios": [2, 1]} + ) + fig.suptitle(f"DSC Analysis: {experiment.sample_name}", fontsize=16) + + # --- Top Plot: Data, Baseline, and Corrected Signal --- + ax1.plot( + experiment.temperature, + experiment.heat_flow, + label="Original Signal", + color="blue", + alpha=0.6, + ) + if "baseline" in results: + ax1.plot( + experiment.temperature, + results["baseline"].baseline, + "r--", + label=f"'{results['baseline'].method}' Baseline", + ) + ax1.plot( + experiment.temperature, + results["baseline"].corrected_data, + label="Baseline-Corrected Signal", + color="green", + linewidth=2, + ) + ax1.set_ylabel("Heat Flow (mW)") + ax1.legend() + ax1.grid(True, linestyle="--", alpha=0.6) + ax1.set_title("Baseline Correction and Signal") + + # --- Bottom Plot: Annotations for Events --- + ax2.plot( + experiment.temperature, + results.get("baseline", {}).get("corrected_data", experiment.heat_flow), + color="green", + alpha=0.7, + ) + + if "events" in results: + if "melting" in results["events"]: + for event in results["events"]["melting"]: + ax2.axvline( + event.peak_temperature, color="red", linestyle="--", label="Melting" + ) + if "crystallization" in results["events"]: + for event in results["events"]["crystallization"]: + ax2.axvline( + event.peak_temperature, + color="purple", + linestyle="--", + label="Crystallization", + ) + if "glass_transitions" in results["events"]: + for event in results["events"]["glass_transitions"]: + ax2.axvspan( + event.onset_temperature, + event.endpoint_temperature, + color="orange", + alpha=0.3, + label="Glass Transition (Tg)", + ) + + # Avoid duplicate labels in legend + handles, labels = ax2.get_legend_handles_labels() + by_label = dict(zip(labels, handles)) + ax2.legend(by_label.values(), by_label.keys()) + + ax2.set_xlabel("Temperature (K)") + ax2.set_ylabel("Corrected Heat Flow (mW)") + ax2.grid(True, linestyle="--", alpha=0.6) + ax2.set_title("Detected Thermal Events") + + plt.tight_layout(rect=[0, 0.03, 1, 0.95]) + plt.show() + + +def main(): + """ + Main execution function. + """ + # Define the path to the sample data file + # This assumes the script is run from the project's root directory. + file_path = os.path.join( + "src", "pkynetics", "data", "dsc", "sample_dsc_setaram.txt" + ) + if not os.path.exists(file_path): + logger.error(f"Data file not found at: {file_path}") + logger.error( + "Please ensure you are running this script from the project root directory." + ) + return + + try: + # Load data using the dsc_importer + data = dsc_importer(file_path=file_path, manufacturer="Setaram") + experiment = DSCExperiment( + temperature=data["temperature"], + heat_flow=data["heat_flow"], + time=data["time"], + mass=10.0, + sample_name="Setaram Sample", + ) + + # Analyze the full experiment + analysis_results = analyze_experiment(experiment) + + # Plot the results + plot_analysis_results(experiment, analysis_results) + + except Exception as e: + logger.critical(f"A critical error occurred: {e}", exc_info=True) + + +if __name__ == "__main__": + main() diff --git a/examples/dsc_example.py b/examples/dsc_example.py deleted file mode 100644 index ae7a9cb..0000000 --- a/examples/dsc_example.py +++ /dev/null @@ -1,356 +0,0 @@ -"""Example of DSC analysis with segment selection and improved error handling.""" - -import logging -import os -from typing import Dict, List, Optional, Tuple - -import matplotlib.pyplot as plt -import numpy as np -from scipy import signal -from scipy.signal import find_peaks - -from pkynetics.data_import import dsc_importer -from pkynetics.technique_analysis.dsc.baseline import BaselineCorrector -from pkynetics.technique_analysis.dsc.peak_analysis import PeakAnalyzer -from pkynetics.technique_analysis.dsc.thermal_events import ThermalEventDetector -from pkynetics.technique_analysis.dsc.types import DSCExperiment, DSCPeak - -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" -) -logger = logging.getLogger(__name__) - -PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -PKG_DATA_DIR = os.path.join( - os.path.dirname(__file__), "..", "src", "pkynetics", "data", "dsc" -) - - -class DSCSegmentAnalyzer: - def __init__( - self, - experiment: DSCExperiment, - manual_range: Optional[Tuple[float, float]] = None, - ): - self.experiment = experiment - self.segments = [] - if manual_range is not None: - self._create_manual_segment(manual_range) - else: - self._detect_segments() - - def _create_manual_segment(self, temp_range: Tuple[float, float]) -> None: - """Create segment from manual temperature range.""" - start_temp, end_temp = temp_range - start_idx = np.abs(self.experiment.temperature - start_temp).argmin() - end_idx = np.abs(self.experiment.temperature - end_temp).argmin() - - if end_idx - start_idx > 100: - self.segments = [(start_idx, end_idx)] - else: - raise ValueError("Selected range too small (minimum 100 points required)") - - # En DSCSegmentAnalyzer - def _detect_segments(self) -> None: - """Detect different segments in DSC curve.""" - logger.info("Starting segment detection") - - # Calculate rate of temperature change - time_diff = np.gradient(self.experiment.time) - rate = np.gradient(self.experiment.temperature) / time_diff - - # Smooth the rate signal - window_size = min(21, len(rate) // 5) - window_size = window_size if window_size % 2 == 1 else window_size + 1 - rate_smooth = signal.savgol_filter(rate, window_size, 3) - - # Find significant changes using prominence-based peak detection - logger.info("Looking for rate changes...") - rate_changes = signal.find_peaks( - np.abs(np.gradient(rate_smooth)), - prominence=np.std(rate_smooth), # Adaptive threshold - width=5, - )[0] - - if len(rate_changes) == 0: - logger.info("No segments detected, using full range") - self.segments = [(0, len(self.experiment.temperature) - 1)] - return - - # Create segments with overlap - start_idx = 0 - min_segment_size = 100 - overlap = 50 # Points of overlap - - for end_idx in rate_changes: - if end_idx - start_idx > min_segment_size: - self.segments.append( - ( - max(0, start_idx - overlap), - min(len(self.experiment.temperature), end_idx + overlap), - ) - ) - logger.info(f"Added segment: {start_idx} to {end_idx}") - start_idx = end_idx - - # Add final segment if needed - if len(self.experiment.temperature) - start_idx > min_segment_size: - self.segments.append( - (max(0, start_idx - overlap), len(self.experiment.temperature) - 1) - ) - logger.info("Added final segment") - - # En PeakAnalyzer - def find_peaks( - self, - temperature: np.ndarray, - heat_flow: np.ndarray, - baseline: Optional[np.ndarray] = None, - ) -> List[DSCPeak]: - """Find peaks with adaptive thresholds.""" - logger.info("Starting peak detection") - - if len(temperature) < self.smoothing_window: - logger.warning("Data length too short for peak detection") - return [] - - # Apply baseline correction if provided - signal = heat_flow - baseline if baseline is not None else heat_flow - - # Smooth signal - window_size = min(self.smoothing_window, len(signal) // 5) - window_size = window_size if window_size % 2 == 1 else window_size + 1 - smooth_signal = signal.savgol_filter(signal, window_size, 3) - - # Calculate adaptive threshold - noise_level = np.std(smooth_signal[:20]) - prominence = max(self.peak_prominence, 2 * noise_level) - - logger.info(f"Using prominence threshold: {prominence}") - - # Find peaks - peaks, properties = signal.find_peaks( - smooth_signal, - prominence=prominence, - width=window_size // 2, - height=self.height_threshold * np.max(np.abs(smooth_signal)), - ) - - # Analyze peaks - peak_list = [] - for i, peak_idx in enumerate(peaks): - try: - peak_info = self._analyze_peak_region( - temperature, signal, peak_idx, baseline - ) - peak_list.append(peak_info) - logger.info( - f"Analyzed peak {i + 1} at temperature {peak_info.peak_temperature:.2f}K" - ) - except Exception as e: - logger.warning(f"Failed to analyze peak {i}: {str(e)}") - - return peak_list - - def plot_segments(self) -> None: - """Plot identified segments for user selection.""" - plt.figure(figsize=(12, 6)) - plt.plot( - self.experiment.temperature, self.experiment.heat_flow, "b-", alpha=0.5 - ) - - for i, (start, end) in enumerate(self.segments): - plt.plot( - self.experiment.temperature[start:end], - self.experiment.heat_flow[start:end], - label=f"Segment {i + 1}", - ) - - plt.xlabel("Temperature (K)") - plt.ylabel("Heat Flow (mW)") - plt.title("DSC Curve Segments") - plt.legend() - plt.grid(True) - plt.show() - - def get_segment(self, segment_idx: int) -> DSCExperiment: - """Get a specific segment as a new DSCExperiment object.""" - if not 0 <= segment_idx < len(self.segments): - raise ValueError( - f"Invalid segment index. Available segments: 0-{len(self.segments) - 1}" - ) - - start, end = self.segments[segment_idx] - - return DSCExperiment( - temperature=self.experiment.temperature[start:end], - heat_flow=self.experiment.heat_flow[start:end], - time=self.experiment.time[start:end], - mass=self.experiment.mass, - heating_rate=self.experiment.heating_rate, - sample_name=f"{self.experiment.sample_name}_segment_{segment_idx}", - ) - - -def analyze_segment(segment: DSCExperiment) -> Dict: - """Analyze a single DSC segment.""" - results = {} - - try: - logger.info( - f"Starting analysis of segment with {len(segment.temperature)} points" - ) - - # Validate data - if len(segment.temperature) < 100: - raise ValueError("Segment too short for analysis") - - logger.info("Checking temperature ordering...") - if not np.all(np.diff(segment.temperature) > 0): - logger.info("Sorting data by temperature...") - sort_idx = np.argsort(segment.temperature) - segment.temperature = segment.temperature[sort_idx] - segment.heat_flow = segment.heat_flow[sort_idx] - segment.time = segment.time[sort_idx] - - # Baseline correction - logger.info("Starting baseline correction...") - window_size = min(21, len(segment.temperature) // 5) - window_size = window_size if window_size % 2 == 1 else window_size + 1 - logger.info(f"Using smoothing window size: {window_size}") - - baseline_corrector = BaselineCorrector(smoothing_window=window_size) - baseline_result = baseline_corrector.correct( - segment.temperature, - segment.heat_flow, - method="linear", # Cambio a 'linear' por ser más robusto - ) - logger.info("Baseline correction completed") - results["baseline"] = baseline_result - - # Peak analysis - logger.info("Starting peak analysis...") - peak_analyzer = PeakAnalyzer() - peaks = peak_analyzer.find_peaks( - segment.temperature, - baseline_result.corrected_data, - baseline_result.baseline, - ) - logger.info(f"Found {len(peaks)} peaks") - results["peaks"] = peaks - - # Event detection - if len(peaks) > 0: - logger.info("Starting event detection...") - event_detector = ThermalEventDetector() - events = event_detector.detect_events( - segment.temperature, - baseline_result.corrected_data, - peaks, - baseline_result.baseline, - ) - logger.info("Event detection completed") - results["events"] = events - - except Exception as e: - logger.error(f"Error analyzing segment: {str(e)}", exc_info=True) - results["error"] = str(e) - - return results - - -def plot_segment_analysis(segment: DSCExperiment, results: Dict) -> None: - """Plot analysis results for a segment.""" - fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8)) - - # Original data and baseline - ax1.plot(segment.temperature, segment.heat_flow, "b-", label="Original") - if "baseline" in results: - ax1.plot( - segment.temperature, results["baseline"].baseline, "r--", label="Baseline" - ) - ax1.plot( - segment.temperature, - results["baseline"].corrected_data, - "g-", - label="Corrected", - ) - ax1.set_xlabel("Temperature (K)") - ax1.set_ylabel("Heat Flow (mW)") - ax1.legend() - ax1.grid(True) - - # Peaks and events - if "peaks" in results: - for peak in results["peaks"]: - ax2.axvline(peak.peak_temperature, color="r", linestyle="--", alpha=0.5) - ax2.axvline(peak.onset_temperature, color="g", linestyle=":", alpha=0.5) - ax2.axvline(peak.endset_temperature, color="g", linestyle=":", alpha=0.5) - ax2.plot(segment.temperature, segment.heat_flow, "b-") - ax2.set_xlabel("Temperature (K)") - ax2.set_ylabel("Heat Flow (mW)") - ax2.grid(True) - - plt.tight_layout() - plt.show() - - -def main(): - """Main execution function.""" - # File path - file_path = os.path.join(PKG_DATA_DIR, "sample_dsc_setaram.txt") - - try: - # Load data - data = dsc_importer(file_path=file_path, manufacturer="Setaram") - experiment = DSCExperiment( - temperature=data["temperature"], - heat_flow=data["heat_flow"], - time=data["time"], - mass=10.0, # Example mass in mg - heating_rate=10.0, # Example heating rate in K/min - ) - - # Initialize segment analyzer - use_manual = input("Use manual temperature range? (y/n): ").lower() == "y" - - if use_manual: - start_temp = float(input("Enter start temperature (K): ")) - end_temp = float(input("Enter end temperature (K): ")) - segment_analyzer = DSCSegmentAnalyzer( - experiment, manual_range=(start_temp, end_temp) - ) - else: - segment_analyzer = DSCSegmentAnalyzer(experiment) - - # Show available segments - print(f"\nFound {len(segment_analyzer.segments)} segments") - segment_analyzer.plot_segments() - - # Analyze each segment - for i in range(len(segment_analyzer.segments)): - print(f"\nAnalyzing segment {i + 1}...") - segment = segment_analyzer.get_segment(i) - results = analyze_segment(segment) - - if "error" not in results: - plot_segment_analysis(segment, results) - - if "peaks" in results: - print(f"\nFound {len(results['peaks'])} peaks in segment {i + 1}:") - for j, peak in enumerate(results["peaks"]): - print(f"\nPeak {j + 1}:") - print(f"Onset Temperature: {peak.onset_temperature:.2f} K") - print(f"Peak Temperature: {peak.peak_temperature:.2f} K") - print(f"Endset Temperature: {peak.endset_temperature:.2f} K") - print(f"Enthalpy: {peak.enthalpy:.2f} J/g") - else: - print(f"Error analyzing segment {i + 1}: {results['error']}") - - except Exception as e: - logger.error(f"Error in DSC analysis: {str(e)}") - raise - - -if __name__ == "__main__": - main() diff --git a/examples/coats_redfern_method_example.py b/examples/kinetic_methods/coats_redfern_method_example.py similarity index 100% rename from examples/coats_redfern_method_example.py rename to examples/kinetic_methods/coats_redfern_method_example.py diff --git a/examples/freeman_carroll_method_example.py b/examples/kinetic_methods/freeman_carroll_method_example.py similarity index 100% rename from examples/freeman_carroll_method_example.py rename to examples/kinetic_methods/freeman_carroll_method_example.py diff --git a/examples/friedman_method_example.py b/examples/kinetic_methods/friedman_method_example.py similarity index 100% rename from examples/friedman_method_example.py rename to examples/kinetic_methods/friedman_method_example.py diff --git a/examples/horowitz_metzger_method_example.py b/examples/kinetic_methods/horowitz_metzger_method_example.py similarity index 100% rename from examples/horowitz_metzger_method_example.py rename to examples/kinetic_methods/horowitz_metzger_method_example.py diff --git a/examples/jmak_method_example.py b/examples/kinetic_methods/jmak_method_example.py similarity index 100% rename from examples/jmak_method_example.py rename to examples/kinetic_methods/jmak_method_example.py diff --git a/examples/kas_method_example.py b/examples/kinetic_methods/kas_method_example.py similarity index 100% rename from examples/kas_method_example.py rename to examples/kinetic_methods/kas_method_example.py diff --git a/examples/kissinger_method_example.py b/examples/kinetic_methods/kissinger_method_example.py similarity index 100% rename from examples/kissinger_method_example.py rename to examples/kinetic_methods/kissinger_method_example.py diff --git a/examples/ofw_method_example.py b/examples/kinetic_methods/ofw_method_example.py similarity index 100% rename from examples/ofw_method_example.py rename to examples/kinetic_methods/ofw_method_example.py diff --git a/examples/synthetic_data_example.py b/examples/synthetic_data/synthetic_data_example.py similarity index 100% rename from examples/synthetic_data_example.py rename to examples/synthetic_data/synthetic_data_example.py diff --git a/src/pkynetics/__about__.py b/src/pkynetics/__about__.py index bae42c3..4f1ebd6 100644 --- a/src/pkynetics/__about__.py +++ b/src/pkynetics/__about__.py @@ -1,3 +1,3 @@ """Version information.""" -__version__ = "0.4.1" +__version__ = "0.4.2"