Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
295 changes: 151 additions & 144 deletions pina/callback/adaptive_refinement_callback.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,24 @@
"""PINA Callbacks Implementations"""

import importlib.metadata
import torch
from lightning.pytorch.callbacks import Callback
from ..label_tensor import LabelTensor
from ..utils import check_consistency


class R3Refinement(Callback):
"""
PINA Implementation of an R3 Refinement Callback.
"""

def __init__(self, sample_every):
"""
PINA Implementation of an R3 Refinement Callback.

This callback implements the R3 (Retain-Resample-Release) routine for
sampling new points based on adaptive search.
The algorithm incrementally accumulates collocation points in regions
of high PDE residuals, and releases those
with low residuals. Points are sampled uniformly in all regions
where sampling is needed.
of high PDE residuals, and releases those with low residuals.
Points are sampled uniformly in all regions where sampling is needed.

.. seealso::

Expand All @@ -33,142 +34,148 @@ def __init__(self, sample_every):
Example:
>>> r3_callback = R3Refinement(sample_every=5)
"""
super().__init__()

# sample every
check_consistency(sample_every, int)
self._sample_every = sample_every
self._const_pts = None

def _compute_residual(self, trainer):
"""
Computes the residuals for a PINN object.

:return: the total loss, and pointwise loss.
:rtype: tuple
"""

# extract the solver and device from trainer
solver = trainer.solver
device = trainer._accelerator_connector._accelerator_flag
precision = trainer.precision
if precision == "64-true":
precision = torch.float64
elif precision == "32-true":
precision = torch.float32
else:
raise RuntimeError(
"Currently R3Refinement is only implemented "
"for precision '32-true' and '64-true', set "
"Trainer precision to match one of the "
"available precisions."
)

# compute residual
res_loss = {}
tot_loss = []
for location in self._sampling_locations: # TODO fix for new collector
condition = solver.problem.conditions[location]
pts = solver.problem.input_pts[location]
# send points to correct device
pts = pts.to(device=device, dtype=precision)
pts = pts.requires_grad_(True)
pts.retain_grad()
# PINN loss: equation evaluated only for sampling locations
target = condition.equation.residual(pts, solver.forward(pts))
res_loss[location] = torch.abs(target).as_subclass(torch.Tensor)
tot_loss.append(torch.abs(target))

print(tot_loss)

return torch.vstack(tot_loss), res_loss

def _r3_routine(self, trainer):
"""
R3 refinement main routine.

:param Trainer trainer: PINA Trainer.
"""
# compute residual (all device possible)
tot_loss, res_loss = self._compute_residual(trainer)
tot_loss = tot_loss.as_subclass(torch.Tensor)

# !!!!!! From now everything is performed on CPU !!!!!!

# average loss
avg = (tot_loss.mean()).to("cpu")
old_pts = {} # points to be retained
for location in self._sampling_locations:
pts = trainer._model.problem.input_pts[location]
labels = pts.labels
pts = pts.cpu().detach().as_subclass(torch.Tensor)
residuals = res_loss[location].cpu()
mask = (residuals > avg).flatten()
if any(mask): # append residuals greater than average
pts = (pts[mask]).as_subclass(LabelTensor)
pts.labels = labels
old_pts[location] = pts
numb_pts = self._const_pts[location] - len(old_pts[location])
# sample new points
trainer._model.problem.discretise_domain(
numb_pts, "random", locations=[location]
)

else: # if no res greater than average, samples all uniformly
numb_pts = self._const_pts[location]
# sample new points
trainer._model.problem.discretise_domain(
numb_pts, "random", locations=[location]
)
# adding previous population points
trainer._model.problem.add_points(old_pts)

# update dataloader
trainer._create_or_update_loader()

def on_train_start(self, trainer, _):
"""
Callback function called at the start of training.

This method extracts the locations for sampling from the problem
conditions and calculates the total population.

:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param _: Placeholder argument (not used).

:return: None
:rtype: None
"""
# extract locations for sampling
problem = trainer.solver.problem
locations = []
for condition_name in problem.conditions:
condition = problem.conditions[condition_name]
if hasattr(condition, "location"):
locations.append(condition_name)
self._sampling_locations = locations

# extract total population
const_pts = {} # for each location, store the # of pts to keep constant
for location in self._sampling_locations:
pts = trainer._model.problem.input_pts[location]
const_pts[location] = len(pts)
self._const_pts = const_pts

def on_train_epoch_end(self, trainer, __):
"""
Callback function called at the end of each training epoch.

This method triggers the R3 routine for refinement if the current
epoch is a multiple of `_sample_every`.

:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param __: Placeholder argument (not used).

:return: None
:rtype: None
"""
if trainer.current_epoch % self._sample_every == 0:
self._r3_routine(trainer)
raise NotImplementedError(
"R3Refinement callback is being refactored in the pina "
f"{importlib.metadata.metadata('pina-mathlab')['Version']} "
"version. Please use version 0.1 if R3Refinement is required."
)

# super().__init__()

# # sample every
# check_consistency(sample_every, int)
# self._sample_every = sample_every
# self._const_pts = None

# def _compute_residual(self, trainer):
# """
# Computes the residuals for a PINN object.

# :return: the total loss, and pointwise loss.
# :rtype: tuple
# """

# # extract the solver and device from trainer
# solver = trainer.solver
# device = trainer._accelerator_connector._accelerator_flag
# precision = trainer.precision
# if precision == "64-true":
# precision = torch.float64
# elif precision == "32-true":
# precision = torch.float32
# else:
# raise RuntimeError(
# "Currently R3Refinement is only implemented "
# "for precision '32-true' and '64-true', set "
# "Trainer precision to match one of the "
# "available precisions."
# )

# # compute residual
# res_loss = {}
# tot_loss = []
# for location in self._sampling_locations:
# condition = solver.problem.conditions[location]
# pts = solver.problem.input_pts[location]
# # send points to correct device
# pts = pts.to(device=device, dtype=precision)
# pts = pts.requires_grad_(True)
# pts.retain_grad()
# # PINN loss: equation evaluated only for sampling locations
# target = condition.equation.residual(pts, solver.forward(pts))
# res_loss[location] = torch.abs(target).as_subclass(torch.Tensor)
# tot_loss.append(torch.abs(target))

# print(tot_loss)

# return torch.vstack(tot_loss), res_loss

# def _r3_routine(self, trainer):
# """
# R3 refinement main routine.

# :param Trainer trainer: PINA Trainer.
# """
# # compute residual (all device possible)
# tot_loss, res_loss = self._compute_residual(trainer)
# tot_loss = tot_loss.as_subclass(torch.Tensor)

# # !!!!!! From now everything is performed on CPU !!!!!!

# # average loss
# avg = (tot_loss.mean()).to("cpu")
# old_pts = {} # points to be retained
# for location in self._sampling_locations:
# pts = trainer._model.problem.input_pts[location]
# labels = pts.labels
# pts = pts.cpu().detach().as_subclass(torch.Tensor)
# residuals = res_loss[location].cpu()
# mask = (residuals > avg).flatten()
# if any(mask): # append residuals greater than average
# pts = (pts[mask]).as_subclass(LabelTensor)
# pts.labels = labels
# old_pts[location] = pts
# numb_pts = self._const_pts[location] - len(old_pts[location])
# # sample new points
# trainer._model.problem.discretise_domain(
# numb_pts, "random", locations=[location]
# )

# else: # if no res greater than average, samples all uniformly
# numb_pts = self._const_pts[location]
# # sample new points
# trainer._model.problem.discretise_domain(
# numb_pts, "random", locations=[location]
# )
# # adding previous population points
# trainer._model.problem.add_points(old_pts)

# # update dataloader
# trainer._create_or_update_loader()

# def on_train_start(self, trainer, _):
# """
# Callback function called at the start of training.

# This method extracts the locations for sampling from the problem
# conditions and calculates the total population.

# :param trainer: The trainer object managing the training process.
# :type trainer: pytorch_lightning.Trainer
# :param _: Placeholder argument (not used).

# :return: None
# :rtype: None
# """
# # extract locations for sampling
# problem = trainer.solver.problem
# locations = []
# for condition_name in problem.conditions:
# condition = problem.conditions[condition_name]
# if hasattr(condition, "location"):
# locations.append(condition_name)
# self._sampling_locations = locations

# # extract total population
# const_pts = {} # for each location, store the pts to keep constant
# for location in self._sampling_locations:
# pts = trainer._model.problem.input_pts[location]
# const_pts[location] = len(pts)
# self._const_pts = const_pts

# def on_train_epoch_end(self, trainer, __):
# """
# Callback function called at the end of each training epoch.

# This method triggers the R3 routine for refinement if the current
# epoch is a multiple of `_sample_every`.

# :param trainer: The trainer object managing the training process.
# :type trainer: pytorch_lightning.Trainer
# :param __: Placeholder argument (not used).

# :return: None
# :rtype: None
# """
# if trainer.current_epoch % self._sample_every == 0:
# self._r3_routine(trainer)
22 changes: 12 additions & 10 deletions pina/callback/linear_weight_update_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,13 @@ def __init__(
check_consistency(self.initial_value, (float, int), subclass=False)
check_consistency(self.target_value, (float, int), subclass=False)

def on_train_start(self, trainer, solver):
def on_train_start(self, trainer, pl_module):
"""
Initialize the weight of the condition to the specified `initial_value`.

:param Trainer trainer: a pina:class:`Trainer` instance.
:param SolverInterface solver: a pina:class:`SolverInterface` instance.
:param Trainer trainer: A :class:`~pina.trainer.Trainer` instance.
:param SolverInterface pl_module: A
:class:`~pina.solver.solver.SolverInterface` instance.
"""
# Check that the target epoch is valid
if not 0 < self.target_epoch <= trainer.max_epochs:
Expand All @@ -52,7 +53,7 @@ def on_train_start(self, trainer, solver):
)

# Check that the condition is a problem condition
if self.condition_name not in solver.problem.conditions:
if self.condition_name not in pl_module.problem.conditions:
raise ValueError(
f"`{self.condition_name}` must be a problem condition."
)
Expand All @@ -66,20 +67,21 @@ def on_train_start(self, trainer, solver):
)

# Check that the weighting schema is ScalarWeighting
if not isinstance(solver.weighting, ScalarWeighting):
if not isinstance(pl_module.weighting, ScalarWeighting):
raise ValueError("The weighting schema must be ScalarWeighting.")

# Initialize the weight of the condition
solver.weighting.weights[self.condition_name] = self.initial_value
pl_module.weighting.weights[self.condition_name] = self.initial_value

def on_train_epoch_start(self, trainer, solver):
def on_train_epoch_start(self, trainer, pl_module):
"""
Adjust at each epoch the weight of the condition.

:param Trainer trainer: a pina:class:`Trainer` instance.
:param SolverInterface solver: a pina:class:`SolverInterface` instance.
:param Trainer trainer: A :class:`~pina.trainer.Trainer` instance.
:param SolverInterface pl_module: A
:class:`~pina.solver.solver.SolverInterface` instance.
"""
if 0 < trainer.current_epoch <= self.target_epoch:
solver.weighting.weights[self.condition_name] += (
pl_module.weighting.weights[self.condition_name] += (
self.target_value - self.initial_value
) / (self.target_epoch - 1)
Loading
Loading