-
Notifications
You must be signed in to change notification settings - Fork 19
Home
Unai Alegre-Ibarra edited this page Sep 15, 2022
·
1 revision
Welcome to the brainspy-tasks wiki!
By default the custom algorithms that are included in brains-py allow to add a custom logger in order to enable gathering and studying further what is going on during the training process. Below, a simple implementation with a Tensorboard logger is introduced, which would enable to log the cost function. Your custom logger would require you to add each of the functions provided in the example. For more information about tensorboard in pytorch visit the official tutorial.
class Logger:
def __init__(self, log_dir, comment="DEFAULT_LOGGER"):
# TODO: LOG HIPERPARAMETERS IN THE COMMENT e.g. "LR_0.1_BATCH_16"
self.log = SummaryWriter(log_dir, comment=comment)
self.gate = ""
def log_train_inputs(self, inputs, targets):
# self.log.add_graph(net, images)
pass
def log_train_predictions(self, predictions):
pass
def log_ios_train(self, inputs, targets, predictions, epoch):
pass
def log_val_predictions(self, inputs, targets):
pass
def log_performance(self, train_losses, val_losses, epoch):
if val_losses == []:
self.log.add_scalar("Cost/train/" + self.gate, train_losses[-1],
epoch)
else:
self.log.add_scalars(
"Cost/" + self.gate,
{
"train": train_losses[-1],
"dev": val_losses[-1]
},
epoch,
)
def log_outputs(self, outputs):
pass
def close(self):
self.log.close()
from bspytasks.ring.tasks.searcher import search_solution
from brainspy.utils import manager
from brainspy.utils.io import load_configs
from ring_logger import Logger
import matplotlib
matplotlib.use('Agg')
# Load configurations
configs = load_configs(
'/home/unai/Documents/3-Programming/bspy/examples-multiple-devices/configs.yaml'
)
criterion = manager.get_criterion(configs["algorithm"]['criterion'])
algorithm = manager.get_algorithm(configs["algorithm"]['type'])
search_solution(configs,
model.MultipleDNPUCustomModel,
criterion,
algorithm,
custom_logger=Logger)
Where the configs.yaml are:
results_dir: "tmp/TEST/output/ring/exp"
runs: 3
start_gap: 0.4
stop_gap: 0.00825
data:
gap: 0.5
load: false # If load is false, it generates a new dataset. If load is a path to the data, it loads it to the data
sample_no: 2000
batch_size: 128
worker_no: 0
pin_memory: True
split_percentages: # The data is divided into training, validation and test datasets respectively
- 0.8 # Percentage of the data used for training
- 0.1 # Percentage of the data used for validation
- 0.1 # Percentage of the data used for test
algorithm:
type: "gradient"
epochs: 250
learning_rate: 0.001
criterion: "fisher"
optimizer: "adam"
constraint_control_voltages: "clip"
processor:
processor_type: "simulation" # Possible values are: simulation, simulation_debug, cdaq_to_cdaq, and cdaq_to_nidaq
model_dir: "training_data.pt"
input_indices: # It specifies the indices of the activation data array that will be considered as inputs
- 1
- 2
electrode_effects:
# amplification: [28.5] # It always has to be a list
# output_clipping: null
voltage_ranges:
[
[-0.7, 0.3],
[-1.2, 0.7],
[-1.2, 0.7],
[-1.2, 0.7],
[-1.2, 0.7],
[-1.2, 0.7],
[-0.7, 0.3],
]
noise:
type: gaussian
variance: 2.07
waveform:
plateau_length: 10
slope_length: 30
accuracy: # Configurations for the perceptron
epochs: 300
learning_rate: 0.02
batch_size: 128
worker_no: 0
pin_memory: False