diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 18cd098b..7fc74c48 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -27,7 +27,7 @@ jobs: python-version: '3.9' - name: install deps run: | - pip install -e . + pip install -e .[whetstone] - name: Run tests with pytest run: | - pytest \ No newline at end of file + pytest diff --git a/fugu/simulators/SpikingNeuralNetwork/LearningRules/learning_rules.py b/fugu/simulators/SpikingNeuralNetwork/LearningRules/learning_rules.py new file mode 100644 index 00000000..9f86aa29 --- /dev/null +++ b/fugu/simulators/SpikingNeuralNetwork/LearningRules/learning_rules.py @@ -0,0 +1,328 @@ +from abc import ABC, abstractmethod + +import numpy as np + + +class LearningRule(ABC): + """ + Abstract base class for learning rules. All learning rules should inherit from this class + """ + + def __init__(self, name: str = None, three_factor: bool = False) -> None: + """ + Constructor for the base learning class + + Parameters: + name (str): Name of the learning rule + three_factor (bool): Whether the learning rule is three factor or a simple homosynaptic rule + + Returns: + None + """ + self.name = name + self.three_factor = three_factor + self.weight_trace = [] + + @abstractmethod + def update_weights(self): + """ + Abstract method to update weights based on the learning rule + """ + pass + + +class STDPupdate(LearningRule): + """ + The STDP learning rule class. Inherits from the Learning_Rules class. The STDP learning rule + is a simple homosynaptic learning rule that updates weights based on the relative timing of + pre and post synaptic spikes. The update rule is given by: + dw = A_p * exp((pre_spike - post_spike) / tau) if post_spike - pre_spike >= 0 + dw = A_n * exp((post_spike - pre_spike) / tau) if post_spike - pre_spike < 0 + """ + + def __init__(self, name=None, weight=None, A_p: float = 0.01, A_n: float = -0.01, tau: float = 0.001, time_step: int = 1): + """ + Constructor for the STDP learning rule class. Inherits from the Learning_Rules class + + Parameters: + name(any): String, optional. String name of a neuron. Default is None + weight(float or list): Float or list, optional. The weight of the synapse. Default is None + A_p(float): Float, optional. The potentiation factor. Default is 0.01 + A_n(float): Float, optional. The depression factor. Default is -0.01 + tau(float): Float, optional. The time constant. Default is 0.001 + time_step(int): Integer, optional. The time step. Default is 1 + """ + # TODO: Add validation conditions for the parameters + + self.name = name + self._w = weight + self._A_p = A_p + self._A_n = A_n + self._tau = tau + self.time_step = time_step + + def update_weights(self, pre_spike_hist: list, post_spike_hist: list) -> float: + """ + Update weights based on the STDP learning rule + """ + + # Check if a pre-synaptic spike has occured and keep track of its timing + if pre_spike_hist[-1] == 1: + pre_spike = self.time_step + pre_status = 1 + else: + pre_spike = self.calculate_spike_timing(pre_spike_hist) + + # Calculate the timing of the post synaptic spike + if post_spike_hist[-1] == 1: + post_spike = self.time_step + post_status = 1 + else: + post_spike = self.calculate_spike_timing(post_spike_hist) + + # If the post synaptic spike occurs before the pre synaptic spike, reduce the weight + if post_spike - pre_spike < 0 and pre_status == 1: + dw = self._A_n * np.exp((post_spike - pre_spike) / self._tau) + self._w += dw + post_status = 0 + + # If the post synaptic spike occurs after or at the same time as the pre synaptic spike, increase the weight + elif post_spike - pre_spike >= 0 and post_status == 1: + dw = self._A_p * np.exp((pre_spike - post_spike) / self._tau) + self._w += dw + post_status = 0 + + # Else keep it unchanged + else: + dw = 0 + self.weight_trace.append(self._w) + + return self._w + + @staticmethod + def calculate_spike_timing(spike_hist: list) -> int: + """ + Calculate the timing between the reference and either pre or post synaptic spike + + Parameters: + spike_hist (deque): Spike history of the post synaptic neuron + + Returns: + int: delay between the pre and post synaptic spikes + """ + spike_ind = max([ind for ind, val in enumerate(spike_hist) if val]) + spike_time = len(spike_hist) - spike_ind + return spike_time + + +class RSTDPupdate(LearningRule): + + def __init__(self, name=None, weight=None, reward: float = 0.01, A_p: float = 0.01, A_n: float = -0.01, tau: float = 0.001, time_step: int = 1): + """ + Constructor for the R-STDP learning rule class. Inherits from the Learning_Rules class + + Parameters: + name(any): String, optional. String name of a neuron. Default is None + weight(float or list): Float or list, optional. The weight of the synapse. Default is None + reward(float): Float, optional. The reward factor to scale the eligibility trace. Default is 0.01 + A_p(float): Float, optional. The potentiation factor. Default is 0.01 + A_n(float): Float, optional. The depression factor. Default is -0.01 + tau(float): Float, optional. The time constant for potentiation. Default is 0.001 + time_step(int): Integer, optional. The time step. Default is 1 + + """ + + self.name = name + self._w = weight + self._A_p = A_p + self._A_n = A_n + self._tau = tau + self._reward = reward + self.time_step = time_step + self.base_update_obj = STDPupdate(weight=self._w, A_p=self._A_p, A_n=self._A_n, tau=self._tau, time_step=self.time_step) + self.eligibility_trace = 0.0 + + def update_weights(self, pre_spike_hist: list, post_spike_hist: list): + """ + Update weights based on the r-stdp learning rule + e = -(e/tau) + STDP(pre, post) + dw = reward * e + """ + # Getting the STDP update value + base_update = self.base_update_obj.update_weights(pre_spike_hist, post_spike_hist) + + # Update the eligibility trace + self.eligibility_trace = -self.eligibility_trace / self._tau + base_update + + # Update the weight as a function of the reward and the eligibility trace + self._w += self._reward * self.eligibility_trace + + +# class STDP_update(): + +# def __init__(self, A_p, A_n, tau, time_step): +# self.A_p = A_p +# self.A_n = A_n +# self.tau = tau +# self.time_step = time_step + +# def __call__(self, w: np.ndarray, pre_spike: np.ndarray, post_spike: np.ndarray): + +# for weight in range(w.size): +# if pre_spike[weight] == 1: +# pre_spike = self.time_step +# post_status = 1 +# pre_index[self.time_step + 1] = pre_spike +# post_index[self.time_step + 1] = post_spike +# if post_spike - pre_spike < 0 and post_status == 1: +# dw[weight][self.time_step + 1] = A_n * np.exp((post_spike - pre_spike) / tau) +# w[weight] += dw[weight][self.time_step + 1] +# post_status = 0 +# elif post_spike - pre_spike >= 0: +# dw[weight][self.time_step + 1] = A_p * np.exp((pre_spike - post_spike) / tau) +# w[weight] += dw[weight][self.time_step + 1] +# post_status = 0 +# else: +# dw[weight][self.time_step + 1] = 0 + + +class AveragingTraceRule: + + def __init__(self, Npre, Npos, beta1, beta2, beta3, base_confidence_val, trace_threshold, trace_scaler, lr, lr_decay): + self.beta1 = beta1 + self.beta2 = beta2 + self.Npre = Npre + self.Npos = Npos + self.threshold = trace_threshold + self.trace_scaler = trace_scaler + self.base_confidence_val = base_confidence_val + self.lr_base = lr + self.lr_decay = lr_decay + + def __call__(self, W, pre, post, mod, trace_prev, curr_activ): + """ + Trace(t) = Base_conf_val + (activ_curr/a_mean)*Trace(t-1) + This uses the general dynamic learning rule ( The clamping can also be added to the weight update ) + :param trace_prev: [Nhidden x 1] The previous trace value of the selected neuron + :param curr_activ: [Nhidden x 1] The current activation values in the hidden layer + :return: W: The updated weights + :return: T: The modified trace + """ + + dW = np.zeros_like(W) + + activ_mean = np.mean(curr_activ) + # Modify the trace value + dT = self.trace_scaler * ((curr_activ / activ_mean) * trace_prev) + # Update the trace + T = self.base_confidence_val + dT + + for j in range(self.Npos): + # Modify the learning rate based on the updated trace + if T[j] > self.threshold: + self.lr = self.lr_base - self.lr_decay + else: + self.lr = self.lr_base + dW[j, :] = self.lr * mod[j] * (pre * (post[j] - self.beta1) - self.beta2 * (post[j] - self.beta1)) + W = W + dW + return W, T + + +class MovingAverageTraceRule: + + def __init__(self, Npre, Npos, beta1, beta2, beta3, base_confidence_val, trace_threshold, trace_scaler, lr, lr_decay): + self.beta1 = beta1 + self.beta2 = beta2 + self.Npre = Npre + self.Npos = Npos + self.threshold = trace_threshold + self.trace_scaler = trace_scaler + self.base_confidence_val = base_confidence_val + self.lr_base = lr + self.lr_decay = lr_decay + + def __call__(self, W, pre, post, mod, trace_prev, curr_activ, CMA_prev, sample_count): + """ + Trace(t) = Base_conf_val*Trace(t-1) + CMA(curr_activ)*alpha + This uses the general dynamic learning rule ( The clamping can also be added to the weight update ) + :param trace_prev: [Nhidden x 1] The previous trace value of the selected neuron + :param curr_activ: [Nhidden x 1] The current activation values in the hidden layer + :return: W: The updated weights + :return: T: The modified trace + """ + dW = np.zeros_like(W) + # Modify the trace value + CMA_new = CMA_prev + (curr_activ - CMA_prev) / (sample_count + 1) + # Sign parameter that triggers based on network response and the current activation value + alpha = np.ones_like(curr_activ) + alpha[curr_activ < 0.75] = -1 + T = self.base_confidence_val * trace_prev + alpha * CMA_new + + for j in range(self.Npos): + + # Modify the learning rate based on the updated trace + if T[j] > self.threshold: + self.lr = self.lr_base - self.lr_decay + else: + self.lr = self.lr_base + dW[j, :] = self.lr * mod[j] * (pre * (post[j] - self.beta1) - self.beta2 * (post[j] - self.beta1)) + W = W + dW + return W, T + + +class ExponentialMovingAverageTraceRule: + """ + sample_count: Start the sample count with 0 and keep on updating while changing the trace + """ + + def __init__(self, Npre, Npos, beta1, beta2, beta3, base_confidence_val, trace_threshold, trace_scaler, lr, lr_decay, sample_count, activ_arr): + self.beta1 = beta1 + self.beta2 = beta2 + self.Npre = Npre + self.Npos = Npos + self.threshold = trace_threshold + self.trace_scaler = trace_scaler + self.base_confidence_val = base_confidence_val + self.lr_base = lr + self.sample_count = sample_count + self.lr_decay = lr_decay + self.activ_arr = activ_arr + + def __call__(self, W, pre, post, mod, trace_prev, curr_activ, CMA_prev): + """ + Trace(t) = Base_conf_val*Trace(t-1)*(1/std(current_activ)*exp(-1/2*(current_activ-mean(current_activ)/std(a))) + + CMA(curr_activ)*alpha + This uses the general dynamic learning rule ( The clamping can also be added to the weight update ) + :param trace_prev: [Nhidden x 1] The previous trace value of the selected neuron + :param curr_activ: [Nhidden x 1] The current activation values in the hidden layer + :return: W: The updated weights + :return: T: The modified trace + """ + self.sample_count += 1 + + if self.sample_count == 100: + self.sample_count = 0 + self.activ_arr = [] + self.activ_arr.append(curr_activ) + self.activ_arr.append(curr_activ) + temp_activ_arr = np.array(self.activ_arr) + temp = -0.5 * ((curr_activ - np.mean(temp_activ_arr)) / np.std(temp_activ_arr)) + gauss_factor = 1 / (np.std(temp_activ_arr)) * (np.exp(temp)) + dW = np.zeros_like(W) + # Modify the trace value + CMA_new = CMA_prev + (curr_activ - CMA_prev) / (self.sample_count + 1) + # Sign parameter that triggers based on network response and the current activation value + alpha = np.ones_like(curr_activ) + alpha[curr_activ < 0.75] = -1 + T = self.base_confidence_val * trace_prev * gauss_factor + alpha * CMA_new + + for j in range(self.Npos): + + # Modify the learning rate based on the updated trace + if T[j] > self.threshold: + self.lr = self.lr_base - self.lr_decay + else: + self.lr = self.lr_base + dW[j, :] = self.lr * mod[j] * (pre * (post[j] - self.beta1) - self.beta2 * (post[j] - self.beta1)) + W = W + dW + return W, T diff --git a/fugu/simulators/SpikingNeuralNetwork/__init__.py b/fugu/simulators/SpikingNeuralNetwork/__init__.py index 98a0050a..84b7e268 100644 --- a/fugu/simulators/SpikingNeuralNetwork/__init__.py +++ b/fugu/simulators/SpikingNeuralNetwork/__init__.py @@ -6,5 +6,5 @@ """ from .neuron import Neuron, LIFNeuron, InputNeuron -from .synapse import Synapse +from .synapse import Synapse, LearningSynapse from .neuralnetwork import NeuralNetwork diff --git a/fugu/simulators/SpikingNeuralNetwork/input_encoding.py b/fugu/simulators/SpikingNeuralNetwork/input_encoding.py new file mode 100644 index 00000000..eb97ce04 --- /dev/null +++ b/fugu/simulators/SpikingNeuralNetwork/input_encoding.py @@ -0,0 +1,52 @@ + + +import numpy as np + +from fugu.utils.types import bool_types, float_types, str_types +from fugu.utils.validation import int_to_float, validate_type + + +class InputEncoding(): + ''' + Base class for input encoding schemes that convert input data into a format suitable for input neurons. + This class provides a method to get an iterable representation of the input data based on the specified encoding scheme. + + ''' + + def __init__(self, encoding_scheme: str = None, in_stream=None, frequency:int = None, bins: int = None): + self.encoding = encoding_scheme + self.in_stream = in_stream + self.fr = frequency + self.bins = bins + + def get_iterable(self): + + if self.encoding == "Poisson": + # Convert to poisson iterable if the input stream is an array + if type(self.in_stream) is np.ndarray: + self.in_stream = self.in_stream.flatten() + + # If the data is already a stream, pass it as is + if len(self.in_stream) > 1: + return self.in_stream + else: + # If the data is a singular value, convert it into a poisson stream iterable of length equalling bin sizes + dt = 0.001 + fr2 = self.fr * self.in_stream[0] + poisson_output = np.random.rand(1, self.bins) < fr2 * dt + poisson_output = poisson_output.astype(int) + return poisson_output[0] + + if self.encoding == "Binary": + # Convert to binary iterable if the input stream is an array + if type(self.in_stream) is np.ndarray: + self.in_stream = self.in_stream.flatten() + + # If the data is already a stream, pass it as is + if len(self.in_stream) > 1: + return self.in_stream + else: + # If the data is a singular value, convert it into a binary stream iterable of length equalling bin sizes + binary_output = np.zeros(self.bins) + binary_output[int(self.in_stream[0])] = 1 + return binary_output \ No newline at end of file diff --git a/fugu/simulators/SpikingNeuralNetwork/learning_params.py b/fugu/simulators/SpikingNeuralNetwork/learning_params.py new file mode 100644 index 00000000..fa26631d --- /dev/null +++ b/fugu/simulators/SpikingNeuralNetwork/learning_params.py @@ -0,0 +1,39 @@ +from dataclasses import dataclass + + +@dataclass +class LearningParams: + """Parameters for learning. + learning_rule: str: The learning rule to be selected from the available options. Currently from "STDP, r-STDP and None" + A_p: float: The potentiation constant for the STDP learning rule + A_n: float: The depression constant for the STDP learning rule + tau: float: The time constant for the STDP learning rule + bins: int: The number of time steps for the simulation window + dt: float: The time step for the simulation + Rm: float: The membrane resistance of the neuron + Cm: float: The membrane capacitance of the neuron + Tm: float: The membrane time constant of the neuron necessary for update calculation + X_tar: float: The target value for the trace based learning rule + """ + + learning_rule: str = "STDP" + A_p: float = 0.01 + A_n: float = -0.01 + tau: float = 100 + bins: int = 300 + dt: float = 0.001 + Rm: float = 1e8 + Cm: float = 1e-7 + Tm: float = Rm * Cm + X_tar: float = 0.5 + + +if __name__ == "__main__": + learn_params = LearningParams() + updated_params = LearningParams(A_p=0.02, A_n=-0.02, tau=200) + print(learn_params, "\n", updated_params) + print(learn_params.A_p, updated_params.A_p) + data_keys = list(learn_params.__dict__) + data_dict = learn_params.__dict__ + print(data_dict) + print(data_keys, len(data_keys)) diff --git a/fugu/simulators/SpikingNeuralNetwork/neuralnetwork.py b/fugu/simulators/SpikingNeuralNetwork/neuralnetwork.py index 862903c3..1fadce5b 100644 --- a/fugu/simulators/SpikingNeuralNetwork/neuralnetwork.py +++ b/fugu/simulators/SpikingNeuralNetwork/neuralnetwork.py @@ -3,15 +3,17 @@ from __future__ import print_function +import os from collections import defaultdict from collections.abc import Iterable import pandas as pd +from fugu.simulators.SpikingNeuralNetwork.neuron import InputNeuron, LIFNeuron, Neuron +from fugu.simulators.SpikingNeuralNetwork.synapse import LearningSynapse, Synapse from fugu.utils.validation import validate_instance, validate_type +from fugu.simulators.SpikingNeuralNetwork.input_encoding import InputEncoding -from .neuron import LIFNeuron, Neuron -from .synapse import Synapse class NeuralNetwork: @@ -28,7 +30,7 @@ def add_neuron(self, new_neuron=None): if not new_neuron: self._nrn_count += 1 neuron = LIFNeuron(str(self._nrn_count)) - elif isinstance(new_neuron, str): + elif type(new_neuron) == str: self._nrn_count += 1 neuron = LIFNeuron(new_neuron) elif isinstance(new_neuron, Neuron): @@ -56,22 +58,27 @@ def list_neurons(self): print("{},".format(self.nrns[n].name), end=" ") print("\b\b}") - def add_synapse(self, new_synapse=None): + def add_synapse(self, new_synapse=None, learning_flag=False): """ - Add synapse to a network. If a tuple is provided, a new Synapse object is created and added + Add synapse to a network. If a tuple is provided, a new synapse object is created and added """ + # Can simplify this a bit + synapse_obj = LearningSynapse if type(new_synapse) == tuple and any(isinstance(item, str) for item in new_synapse) else Synapse + if not new_synapse: raise TypeError("Needs synapse object with pre and post neurons") - elif isinstance(new_synapse, tuple) and len(new_synapse) >= 2 and len(new_synapse) < 5: - tmpsyn = Synapse(*new_synapse) - elif isinstance(new_synapse, Synapse): + elif type(new_synapse) == tuple and len(new_synapse) >= 2 and len(new_synapse) < 7: + tmpsyn = synapse_obj(*new_synapse) + elif type(new_synapse) == synapse_obj: + tmpsyn = new_synapse + elif isinstance(new_synapse, synapse_obj): tmpsyn = new_synapse else: - raise TypeError("Must provide Synapse Object") + raise TypeError("Must provide valid Synapse Object") if tmpsyn.get_key() not in self.synps: self.synps[tmpsyn.get_key()] = tmpsyn - self.update_network(tmpsyn) + self.update_network(tmpsyn, learning_flag=learning_flag) else: print( "Warning! Not Added! " @@ -91,13 +98,15 @@ def add_multiple_synapses(self, synapse_iterable=None): def update_input_neuron(self, neuron_name, input_values): self.nrns[neuron_name].connect_to_input(input_values) - + # Will be called automatically if a synapse is added - def update_network(self, new_synapse): + # TODO: learning_flat is unused. Needs to be implemented or removed. + def update_network(self, new_synapse, learning_flag=False): """ - build the connection map from the Synapses and Neuron information contained in them + build the connection map from the simple_synapses and Neuron information contained in them """ - validate_instance(new_synapse, Synapse) + synapse_obj = LearningSynapse if type(new_synapse) == tuple and any(isinstance(item, str) for item in new_synapse) else Synapse + validate_instance(new_synapse, synapse_obj) new_synapse._post.presyn.add(new_synapse) def step(self): @@ -156,14 +165,94 @@ def run(self, n_steps=1, debug_mode=False, record_potentials=False): if record_potentials: final_potentials = pd.DataFrame({"potential": [], "neuron_number": []}) neuron_number = 0 + to_append = [] for neuron in self.nrns: - final_potentials = final_potentials.append( + to_append.append( { "potential": self.nrns[neuron].voltage, "neuron_number": neuron_number, - }, - ignore_index=True, + } ) neuron_number += 1 + final_potentials = pd.concat([final_potentials, pd.DataFrame(to_append)], ignore_index=True) return df, final_potentials return df + + +if __name__ == "__main__": + import numpy as np + + base_image = np.array( + [ + [0, 0, 1, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 1], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + ] + ) + noisy = 0 + np.random.seed(1) + noise = np.random.rand(5, 5) + noise_image = base_image + noise * 0.2 + noise_image[noise_image > 1] = 1 + image = noise_image if noisy else base_image + image = image.flatten() + nn = NeuralNetwork() + neuron_obj_dict = {} + n_bins = 150 + for in_neuron in range(25): + neuron_obj_dict[f"N{in_neuron}"] = InputNeuron( + name=f"N{in_neuron}", + threshold=-0.055, + voltage=-0.065, + frequency=100, + bins=n_bins, + record=False, + ) + nn.add_neuron(neuron_obj_dict[f"N{in_neuron}"]) + input_stream = InputEncoding("Poisson", in_stream = np.array([image[in_neuron]]), frequency = 100, bins = n_bins).get_iterable() + nn.update_input_neuron(neuron_name=f"N{in_neuron}", input_values=input_stream) + if in_neuron == 6 or in_neuron == 16: + neuron_obj_dict[f"N{in_neuron}"].show_iterable() + + out = LIFNeuron( + "O1", + threshold=-0.055, + reset_voltage=-0.065, + leakage_constant=0.1, + voltage=-0.065, + ) + nn.add_neuron(out) + for synapse in range(25): + nn.add_synapse( + (nn.nrns[f"N{synapse}"], nn.nrns[f"O1"], "STDP", 1, 0.04), + ) + print(len(nn.synps)) + df = nn.run(n_bins) + i = 0 + weight_arr = [] + for key, value in nn.synps.items(): + i += 1 + weight_arr.append(value.weight) + + print(image.reshape(5, 5)) + weight_arr = np.array(weight_arr) + print(weight_arr.reshape(5, 5)) + import matplotlib.pyplot as plt + plt.imshow(1 - weight_arr.reshape(5, 5)) + plt.title("Trained weights", fontdict={"fontsize": 20, "weight": "bold"}) + plt.savefig("nn_base_ss_noise.png") + + plt.show() + + plt.imshow(1 - image.reshape(5, 5)) + plt.title("Input Image", fontdict={"fontsize": 20, "weight": "bold"}) + plt.savefig("imput_image_noise.png") + + plt.show() + + weights = np.full((5, 5), 0.04) + plt.imshow(weights) + plt.title("Input weights") + plt.show() diff --git a/fugu/simulators/SpikingNeuralNetwork/neuron.py b/fugu/simulators/SpikingNeuralNetwork/neuron.py index a690abac..29d878f3 100644 --- a/fugu/simulators/SpikingNeuralNetwork/neuron.py +++ b/fugu/simulators/SpikingNeuralNetwork/neuron.py @@ -6,14 +6,14 @@ import sys import numpy as np - -from fugu.utils.types import bool_types, float_types, str_types +from fugu.utils.types import bool_types, float_types, int_types, str_types from fugu.utils.validation import int_to_float, validate_type if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta("ABC", (), {"__slots__": ()}) +import sys class Neuron(ABC): @@ -63,7 +63,9 @@ def __init__( leakage_constant=1.0, voltage=0.0, bias=0.0, - p=1.0, + p=1, + scaling_factor=0.1, + scaling=False, record=False, ): """ @@ -71,20 +73,20 @@ def __init__( Parameters: name (any): String, optional. String name of a neuron. The default is None. - threshold : Double, optional. Threshold value above while the neuron spikes. The default is 0.0. - reset_voltage : Double, optional. The voltage to which the neuron resets after spiking. The default is 0.0. - leakage_constant : Double, optional - The rate at which the neuron voltage decays. The leakage with rate - m is calculated as m*v. A rate of m=1 indicates no leak. For - realistic models, 0<= m <=1. The default is 1.0. - voltage : Double, optional. Internal voltage of the neuron. The default is 0.0. - bias : Double, optional. Constant bias voltage value that is added at every timestep. The default is 0.0 - p (double): optional. Probability of spiking if voltage exceeds threshold. - p=1 indicates a deterministic neuron. The default is 1.0. - record (bool): optional. Indicates if a neuron spike state should be sensed with probes. Default is False. - + threshold (Double) : optional. Threshold value above while the neuron spikes. The default is 0.0. + reset_voltage (Double) : optional. The voltage to which the neuron resets after spiking. The default is 0.0. + leakage_constant (Double) : optional. The rate at which the neuron voltage decays. The leakage with rate + m is calculated as m*v. A rate of m=1 indicates no leak. For + realistic models, 0<= m <=1. The default is 1.0. + voltage (Double) : optional. Internal voltage of the neuron. The default is 0.0. + bias (Double) : optional. Constant bias voltage value that is added at every timestep. The default is 0.0 + p (Double) : optional. Probability of spiking if voltage exceeds threshold. p=1 indicates a deterministic neuron. The default is 1.0. + scaling_factor (Double) : optional. The factor by which the weights should be scaled down to. Since we are scaling the + weights, the range should lie between 0 < scaling_factor <=1. The default is 0.1. + scaling (Bool) : optional. Indicates if the weights of the neuron need to undergo synaptic scaling or not. + record (Bool) : optional. Indicates if a neuron spike state should be sensed with probes. Default is False. Returns: - none + None """ threshold = int_to_float(threshold) @@ -93,6 +95,7 @@ def __init__( voltage = int_to_float(voltage) bias = int_to_float(bias) p = int_to_float(p) + scaling_factor = int_to_float(scaling_factor) validate_type(name, str_types) validate_type(threshold, float_types) @@ -101,6 +104,8 @@ def __init__( validate_type(voltage, float_types) validate_type(bias, float_types) validate_type(p, float_types) + validate_type(scaling_factor, float_types) + validate_type(scaling, bool_types) validate_type(record, bool_types) if leakage_constant < 0 or leakage_constant > 1: @@ -109,6 +114,9 @@ def __init__( if p < 0 or p > 1: raise ValueError("Probability p must be in the interval [0, 1].") + if scaling_factor <= 0 or scaling_factor >=1: + raise ValueError("Scaling factor must be in the interval (0,1]") + super(LIFNeuron, self).__init__() self.name = name self._T = threshold @@ -118,7 +126,23 @@ def __init__( self.v = voltage self.presyn = set() self.record = record + self.scaling = scaling self.prob = p + self._S = scaling_factor + + @staticmethod + def scale_weights(weight_arr, scale): + """ + Scales the weights of the synapses by applying synaptic scaling based on the maximum weight value + Parameters: + weight_arr (np.ndarray): array of weights of presynaptic neurons + scale: The factor by which the weights need to be scaled + Returns: + None + + """ + weight_arr /= np.sum(weight_arr) * scale + return weight_arr def update_state(self): """ @@ -134,6 +158,10 @@ def update_state(self): """Update the states for one time step""" input_v = 0.0 + if self.scaling: + scaled_weights = self.scale_weights(self.get_presynaptic_weights(), self._S) + self.set_presynaptic_weights(scaled_weights) + if self.presyn: for s in self.presyn: if len(s._hist) > 0: @@ -182,7 +210,7 @@ def show_params(self): def show_presynapses(self): """ - Display the synapses the feed into the neuron. + Display the synapses that feed into the neuron. Returns: none @@ -195,6 +223,92 @@ def show_presynapses(self): else: print("{0} receives input via synapses: {1}".format(self.__repr__(), self.presyn)) + def get_presynapses(self): + """ + Returns the presynaptic neurons that feed into the neuron. + + Returns: + set: set of presynaptic neurons + """ + + return self.presyn + + def get_presynaptic_weights(self): + """ + Returns the weights of the presynaptic neurons that feed into the neuron. + + Returns: + list: list of weights of presynaptic neurons + """ + + return np.array([s.weight for s in self.presyn]) + + def set_presynaptic_weights(self, weight_arr): + """ + Set the weights of the presynaptic neurons that feed into the neuron. + + Parameters: + weight_arr (np.ndarray): array of weights of presynaptic neurons + Returns: + None + """ + + for i, s in enumerate(self.presyn): + s.weight = weight_arr[i] + + @property + def scaling_factor(self): + return self._S + + @scaling_factor.setter + def scaling_factor(self, new_factor): + new_factor = int_to_float(new_factor) + validate_type(new_factor, float_types) + self._S = new_factor + + def get_presynapses(self): + """ + Returns the presynaptic neurons that feed into the neuron. + + Returns: + set: set of presynaptic neurons + """ + + return self.presyn + + def get_presynaptic_weights(self): + """ + Returns the weights of the presynaptic neurons that feed into the neuron. + + Returns: + list: list of weights of presynaptic neurons + """ + + return np.array([s.weight for s in self.presyn]) + + def set_presynaptic_weights(self, weight_arr): + """ + Set the weights of the presynaptic neurons that feed into the neuron. + + Parameters: + weight_arr (np.ndarray): array of weights of presynaptic neurons + Returns: + None + """ + + for i, s in enumerate(self.presyn): + s.weight = weight_arr[i] + + @property + def scaling_factor(self): + return self._S + + @scaling_factor.setter + def scaling_factor(self, new_factor): + new_factor = int_to_float(new_factor) + validate_type(new_factor, float_types) + self._S = new_factor + @property def threshold(self): return self._T @@ -239,21 +353,30 @@ def __repr__(self): class InputNeuron(Neuron): """ Input Neuron. Inherits from class Neuron. - Input Neurons can read inputs and convert them to spike streams + Input Neurons can read streaming inputs """ - def __init__(self, name=None, threshold=0.1, voltage=0.0, record=False): + def __init__( + self, + name=None, + threshold=0.1, + voltage=0.0, + frequency=100, + bins=100, + record=False, + ): """ - Constructor for Input Neuron. + Constructor for the new input neuron class Parameters: - name : String, optional. Input neuron name. The default is None. - threshold (double): optional. Threashold value above which the neuron spikes. The default is 0.1. - voltage (double): optional. Membrane voltage. The default is 0.0. - record (bool): optional. Indicates if a neuron spike state should be sensed with probes. The default is False. - + name: String, optional. Input neuron name. The default is None. + threshold: double, optional. Threshold value above which the neuron spikes. The default is 0.1. + voltage: double, optional. Membrane voltage. The default is 0.0. + frequency: double, optional. Frequency of the Poisson spikes from the input data. The default is 100. + bins: double, optional. Number of bins for the Poisson spikes. The default is 100. + record: bool, optional. Indicates if a neuron spike state should be sensed with probes. The default is False. Returns: - none + None """ threshold = int_to_float(threshold) @@ -262,6 +385,8 @@ def __init__(self, name=None, threshold=0.1, voltage=0.0, record=False): validate_type(name, str_types) validate_type(threshold, float_types) validate_type(voltage, float_types) + validate_type(frequency, int_types) + validate_type(bins, int_types) validate_type(record, bool_types) super(InputNeuron, self).__init__() @@ -270,13 +395,15 @@ def __init__(self, name=None, threshold=0.1, voltage=0.0, record=False): self.v = voltage self._it = None self.record = record + self.fr = frequency + self.bins = bins def connect_to_input(self, in_stream): """ Enables a neuron to read in an input stream of data. Parameters: - in_stream (any): interable data streams of ints or floats. input data (any interable stream such as lists, arrays, etc.). + in_stream: interable data streams of ints or floats. input data (any interable stream such as lists, arrays, etc.). Raises: TypeError: if in_stream is not iterable. Returns: @@ -288,6 +415,34 @@ def connect_to_input(self, in_stream): else: self._it = iter(in_stream) + def show_iterable(self): + """ + Display the iterable input data stream. + + Returns: + None + """ + from itertools import tee + + iter_copy = tee(self._it) + iter_list = list(iter_copy) + print(f"Input Neuron {self.name} has input stream {iter_list}") + print(f"The input stream has {np.count_nonzero(np.array(iter_list))} spikes") + + def show_iterable(self): + """ + Display the iterable input data stream. + + Returns: + None + """ + from itertools import tee + + iter_copy = tee(self._it) + iter_list = list(iter_copy) + print(f"Input Neuron {self.name} has input stream {iter_list}") + print(f"The input stream has {np.count_nonzero(np.array(iter_list))} spikes") + def update_state(self): """ Updates the neuron states. The neuron spikes if the input value in @@ -301,12 +456,11 @@ def update_state(self): try: n = next(self._it) + # print (n, "THe iterable value", sum(1 for e in self._it)) if not isinstance(n, numbers.Real): raise TypeError("Inputs must be int or float") - self.v = n - - if self.v > self._T: + if self.v > 0: self.spike = True self.v = 0 else: @@ -315,6 +469,8 @@ def update_state(self): except StopIteration: self.spike = False self.v = 0 + # Need to check if there is any way to store only the timestep instead of the entire spike history + self.spike_hist.append(self.spike) @property def threshold(self): @@ -361,9 +517,10 @@ def __repr__(self): n0.connect_to_input(2) except: print("Raises TypeError because input is not iterable") - ip = [5, 4, 0, 1] + ip = np.array([5, 4, 0, 1]) n0.connect_to_input(ip) print(f"Input Neuron Spikes for 7 time steps with input stream {ip}:") for i, _ in enumerate(range(7)): n0.update_state() print(f"Time {i}: {n0.spike}") + diff --git a/fugu/simulators/SpikingNeuralNetwork/synapse.py b/fugu/simulators/SpikingNeuralNetwork/synapse.py index b5b8b664..13b11a8b 100644 --- a/fugu/simulators/SpikingNeuralNetwork/synapse.py +++ b/fugu/simulators/SpikingNeuralNetwork/synapse.py @@ -1,15 +1,20 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +import abc from collections import deque - +import sys import numpy as np - -# from ..neuron.neuron import Neuron -from fugu.simulators.SpikingNeuralNetwork import Neuron -from fugu.utils.types import float_types, int_types +from fugu.simulators.SpikingNeuralNetwork.learning_params import LearningParams +from fugu.simulators.SpikingNeuralNetwork.neuron import Neuron +from fugu.utils.types import float_types, int_types, str_types from fugu.utils.validation import int_to_float, validate_type +from fugu.simulators.SpikingNeuralNetwork.learning_params import LearningParams +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta("ABC", (), {"__slots__": ()}) class Synapse: """ @@ -19,7 +24,7 @@ class Synapse: time-steps. """ - def __init__(self, pre_neuron, post_neuron, delay=1, weight=1.0): + def __init__(self, pre_neuron : Neuron, post_neuron : Neuron, delay=1, weight=1.0): """ Parameters: pre_neuron (any): Neuron that provides input to the synapse @@ -196,20 +201,240 @@ def update_state(self): self._hist.popleft() + + +class LearningSynapse(Synapse): + """ + Synapses connect neurons in a neural network. The synapse class in a + spiking objects models a simple synapse type that scales the input by a + weight (double) and relays the information with a delay (non-negative int) of n + time-steps. + + Parameters: + Pre_neuron (Neuron): Pre-synaptic neuron that provides input to the synapse + Post_neuron (Neuron): Post-synaptic neuron that receives the signals from the synapse + learning_rule (str): optional. The learning rule to be applied to the synapse. Currently provides options among (["STDP", "three-factor"]). The default is None. + Delay (int) : non-negative Int, optional. Number of time steps needed to relay the scaled spike. The default is 1. + Weight (double): optional. Scaling value for incoming spike. The default is 1.0. + Mod_neuron (Neuron): optional. Modulatory neuron for transmitting either errors or other modulatory signals + """ + + def __init__( + self, + pre_neuron: Neuron, + post_neuron: Neuron, + learning_rule: str = "STDP", + delay: int = 1, + weight: float = 1.0, + mod_neuron: Neuron = None, + learning_params = None + ): + if not isinstance(pre_neuron, Neuron) or not isinstance(post_neuron, Neuron): + raise TypeError("Pre and Post Synaptic neurons must be of type Neuron") + + weight = int_to_float(weight) + validate_type(delay, int_types) + validate_type(weight, float_types) + validate_type(learning_rule, str_types) + + if delay < 1: + raise ValueError("delay must be a strictly positive (>0) int value") + + if learning_rule not in ["STDP", "r-STDP", "three-factor"]: + raise ValueError( + "Learning rule must be one of the following: ['STDP', 'r-STDP', 'three-factor']" + ) + + self._d = delay + self._w = weight + self._pre = pre_neuron + self._post = post_neuron + self._hist = deque(np.zeros(self._d)) + self.name = "s_" + self._pre.name + "_" + self._post.name + self._learning_rule = learning_rule + self._name_learning_rule = self._learning_rule #if learning_rule != "None" else "Simple" + # self._name_learning_rule = ( + # self._learning_rule if learning_rule != "None" else "Simple" + # ) + if learning_rule == "three_factor": + if not isinstance(mod_neuron, Neuron): + raise TypeError("Modulatory neuron must be of type neuron") + self._mod = mod_neuron + self._learning_params = learning_params if learning_params is not None else LearningParams() + self._eligibility_trace = 0.0 + + + def __str__(self): + return "{0}_Synapse {1}({2}, {3})".format( + self._name_learning_rule, self.name, self._d, self._w + ) + + def show_params(self): + """ + Display the information of the synapse (pre-synaptic neuron, post-synaptic neuron, + delay, and weight). + + Returns: + None + """ + + print( + "Synapse {0} -> {1}:\n delay : {2}\n weight : {3}\n learning_rule : {4}".format( + self._pre, + self._post, + self._d, + self._w, + self._name_learning_rule + ) + ) + + def get_learning_params(self): + """ + Get the learning parameters for the synapse + + Returns: + self._learning_params (LearningParams.__dict__): Learning parameters for the synapse as a dictionary + """ + self._learning_params = LearningParams() + return self._learning_params.__dict__ + + + def set_learning_params(self, learning_params: LearningParams): + """ + Set the learning parameters for the synapse + + Parameters: + learning_params (LearningParams): Learning parameters for the synapse + + Returns: + None + """ + if not isinstance(learning_params, LearningParams): + raise TypeError("Learning parameters must be of type LearningParams") + self._learning_params = learning_params + + + def update_state(self): + """ + Updates the time evolution of the states for one time step. The spike information is + sent through a queue of length given by the delay and scaled by the weight value. + + Returns: + None + """ + + self.apply_learning() + + # TODO introduce a trace variable instead of the weight history + # self.w_hist_trace.append(self._w) + # print ("Entered this condition", self._pre.spike_hist, self._pre.spike) + + if self._pre.spike: + self._hist.append(self._w) + else: + self._hist.append(0.0) + + self._hist.popleft() + + @staticmethod + def calculate_spike_timing(spike_hist: list) -> int: + """ + Calculate the timing between the reference and either pre or post synaptic spike + + Parameters: + spike_hist (deque): Spike history of the post or pre synaptic neuron + Returns: + int: time difference between the pre and post synaptic spikes + """ + spike_hist = list(map(int, spike_hist)) + if all(val == 0 for val in spike_hist): + return 0 + else: + spike_ind = max([ind for ind, val in enumerate(spike_hist) if val]) + spike_time = len(spike_hist) - spike_ind + return spike_time + + def apply_learning(self) -> None: + """ + Modify the weight of the synapse with STDP learning rule. + The STDP learning rule is implemented as follows: + + When a post synaptic spike occurs after a pre synaptic spike, the weight is increased according to + w += A_p * exp(-delta_t/tau), where delta_t is the difference in spike time of the post and pre synaptic neuron respectively + + When a post synaptic spike occurs before a pre synaptic spike, the weight is decreased according to + w += A_n * exp(delta_t/tau), where delta_t is the difference in spike time of the post and pre synaptic neuron respectively + + Returns: + None + """ + + pre_spike_hist = self._pre.spike_hist + post_spike_hist = self._post.spike_hist + + if len(pre_spike_hist) <= self._d: + self._w += 0 + return + + delay_shift = len(pre_spike_hist) - self._d - 1 + assert delay_shift >= 0, "Delay is greater than the length of the spike history" + post_spike_time = self.calculate_spike_timing(post_spike_hist[:-1]) if len(post_spike_hist) > 1 else 0 + assert post_spike_time >= 0, "Post spike time is negative" + pre_spike_time = self.calculate_spike_timing(pre_spike_hist[: delay_shift + 1]) + + + if self._learning_rule == "STDP": + if self._post.spike: + # If the pre synaptic neuron spikes at the same time step considering the delay + if pre_spike_hist[delay_shift : delay_shift + 1][0]: + # Increase the weight + self._w += self._learning_params.A_p + + else: + # Increase the weight as post happened after pre + if pre_spike_time != 0: + self._w += self._learning_params.A_p * np.exp( + (-pre_spike_time) / self._learning_params.tau + ) + else: + self._w += 0 + + # If the post synaptic neuron does not spike + else: + # If the pre synaptic spike occurs at the current time step, then pre after post + if pre_spike_hist[delay_shift : delay_shift + 1][0]: + + # Decrease the weight as pre happened after post + if post_spike_time != 0: + self._w += self._learning_params.A_n * np.exp( + (-post_spike_time) / self._learning_params.tau + ) + else: + self._w += 0 + else: + self._w += 0 + + elif self._learning_rule == "three-factor": + pass + + else: + pass + + if __name__ == "__main__": from fugu.simulators.SpikingNeuralNetwork.neuron import LIFNeuron n1 = LIFNeuron("n1") n2 = LIFNeuron("n2") - s = Synapse(n1, n2, delay=1, weight=1.0) + s = LearningSynapse(n1, n2, delay=1, weight=1.0) try: - s.delay(0) + s.delay = 0 except: print("Raised Value error Exception since delay is < 1") try: - s.delay(2.5) + s.delay = 2.5 except: print("Raised type error since delay was a float") diff --git a/fugu/utils/whetstone_conversion.py b/fugu/utils/whetstone_conversion.py index d7383a27..25156465 100644 --- a/fugu/utils/whetstone_conversion.py +++ b/fugu/utils/whetstone_conversion.py @@ -10,14 +10,8 @@ from fugu.bricks.keras_pooling_bricks import keras_pooling_2d_4dinput as pooling_2d from fugu.bricks.keras_dense_bricks import keras_dense_2d_4dinput as dense_layer_2d from fugu.scaffold import Scaffold -try: - from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, BatchNormalization -except ModuleNotFoundError: - import pytest - pytest.skip(reason="Tensorflow package missing.", allow_module_level=True) -except ImportError: - raise SystemExit('\n *** Tensorflow package is not installed. *** \n') +from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, BatchNormalization try: from whetstone.layers import Spiking_BRelu, Softmax_Decode from whetstone.utils.export_utils import copy_remove_batchnorm diff --git a/setup.py b/setup.py index 3a8f7745..672226a5 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ setup( name="fugu", - version="1.1", + version="0.1", description="A python library for computational neural graphs", install_requires=[ "decorator~=4.4.2", @@ -20,12 +20,9 @@ "furo~=2021.11.16", "pyyaml", "pytest", - "scipy", ], extras_require={ - "whetstone": ["tensorflow<=2.10", "keras<=2.10"], - "dev": ["pre-commit", "isort", "black", "tqdm"], - "examples": ["notebook", "matplotlib", "tqdm"], + "whetstone": ["tensorflow<=2.10", "keras<=2.10", "scipy"], }, packages=package_list, ) diff --git a/tests/integration/simulators/test_spiking_neural_network.py b/tests/integration/simulators/test_spiking_neural_network.py index 6d4f83e8..c6984b83 100644 --- a/tests/integration/simulators/test_spiking_neural_network.py +++ b/tests/integration/simulators/test_spiking_neural_network.py @@ -3,7 +3,7 @@ from fugu.simulators.SpikingNeuralNetwork.neuralnetwork import NeuralNetwork from fugu.simulators.SpikingNeuralNetwork.neuron import InputNeuron, LIFNeuron -from fugu.simulators.SpikingNeuralNetwork.synapse import Synapse +from fugu.simulators.SpikingNeuralNetwork.synapse import LearningSynapse @pytest.fixture @@ -17,18 +17,18 @@ def _inner(record): layer_neuron_3 = LIFNeuron("layer-1-3", record=record) out_neuron = LIFNeuron("out", record=record) - synapse_a_1 = Synapse(in_neuron_a, layer_neuron_1) - synapse_a_2 = Synapse(in_neuron_a, layer_neuron_2) - synapse_a_3 = Synapse(in_neuron_a, layer_neuron_3) - synapse_b_1 = Synapse(in_neuron_b, layer_neuron_1) - synapse_b_2 = Synapse(in_neuron_b, layer_neuron_2) - synapse_b_3 = Synapse(in_neuron_b, layer_neuron_3) - synapse_1_o = Synapse(layer_neuron_1, out_neuron) - synapse_2_o = Synapse(layer_neuron_2, out_neuron) - synapse_3_o = Synapse(layer_neuron_3, out_neuron) - synapse_2_1 = Synapse(layer_neuron_2, layer_neuron_1) - synapse_2_2 = Synapse(layer_neuron_2, layer_neuron_2) - synapse_2_3 = Synapse(layer_neuron_2, layer_neuron_3) + synapse_a_1 = LearningSynapse(in_neuron_a, layer_neuron_1) + synapse_a_2 = LearningSynapse(in_neuron_a, layer_neuron_2) + synapse_a_3 = LearningSynapse(in_neuron_a, layer_neuron_3) + synapse_b_1 = LearningSynapse(in_neuron_b, layer_neuron_1) + synapse_b_2 = LearningSynapse(in_neuron_b, layer_neuron_2) + synapse_b_3 = LearningSynapse(in_neuron_b, layer_neuron_3) + synapse_1_o = LearningSynapse(layer_neuron_1, out_neuron) + synapse_2_o = LearningSynapse(layer_neuron_2, out_neuron) + synapse_3_o = LearningSynapse(layer_neuron_3, out_neuron) + synapse_2_1 = LearningSynapse(layer_neuron_2, layer_neuron_1) + synapse_2_2 = LearningSynapse(layer_neuron_2, layer_neuron_2) + synapse_2_3 = LearningSynapse(layer_neuron_2, layer_neuron_3) nn.add_multiple_neurons( [ @@ -68,7 +68,7 @@ def spiking_neural_network(): nn = NeuralNetwork() a = LIFNeuron("a", voltage=0.1, record=True) b = LIFNeuron("b", record=True) - synapse = Synapse(a, b) + synapse = LearningSynapse(a, b) nn.add_multiple_neurons([a, b]) nn.add_synapse(synapse) return nn @@ -79,7 +79,7 @@ def spiking_neural_network_w_reset(): nn = NeuralNetwork() a = LIFNeuron("a", voltage=0.1, reset_voltage=0.1, record=True) b = LIFNeuron("b", record=True) - synapse = Synapse(a, b) + synapse = LearningSynapse(a, b) nn.add_multiple_neurons([a, b]) nn.add_synapse(synapse) return nn diff --git a/tests/unit/bricks/test_convolution_brick.py b/tests/unit/bricks/test_convolution_brick.py index 60f6b14f..c5843aa9 100644 --- a/tests/unit/bricks/test_convolution_brick.py +++ b/tests/unit/bricks/test_convolution_brick.py @@ -25,18 +25,14 @@ def test_full_mode_spikes(self, basep, bits, nSpikes): subt = np.zeros(3) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (3,)) - thresholds = ( - np.array([2, 5, 3]) - subt - ) # convolution answer is [2,5,3]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([2, 5, 3]) - subt # convolution answer is [2,5,3]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire self.basep = basep self.bits = bits result = self.run_convolution_1d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -56,18 +52,14 @@ def test_same_mode_spikes(self, basep, bits, nSpikes): subt = np.zeros(2) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (2,)) - thresholds = ( - np.array([2, 5]) - subt - ) # convolution answer is [2,5]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([2, 5]) - subt # convolution answer is [2,5]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire self.basep = basep self.bits = bits result = self.run_convolution_1d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -87,18 +79,14 @@ def test_valid_mode_spikes(self, basep, bits, nSpikes): subt = np.zeros(1) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (1,)) - thresholds = ( - np.array([5]) - subt - ) # convolution answer is [2,5]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([5]) - subt # convolution answer is [2,5]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire self.basep = basep self.bits = bits result = self.run_convolution_1d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -114,9 +102,7 @@ def test_valid_mode_spikes(self, basep, bits, nSpikes): @pytest.mark.parametrize("bits", [2]) @pytest.mark.parametrize("thresholds", np.arange(1.9, 5, 1)) def test_scalar_threshold(self, basep, bits, thresholds): - ans_thresholds = np.array( - [2, 5, 3] - ) # 2d convolution answer is [2,5,3]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + ans_thresholds = np.array([2, 5, 3]) # 2d convolution answer is [2,5,3]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire nSpikes = len(ans_thresholds[ans_thresholds > thresholds]) self.basep = basep @@ -124,9 +110,7 @@ def test_scalar_threshold(self, basep, bits, thresholds): result = self.run_convolution_1d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -246,9 +230,7 @@ def test_full_mode_spikes(self, basep, bits, nSpikes): result = self.run_convolution_2d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -274,9 +256,7 @@ def test_scalar_threshold(self, basep, bits, thresholds): result = self.run_convolution_2d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -296,18 +276,14 @@ def test_same_mode_spikes(self, basep, bits, nSpikes): subt = np.zeros(4) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (2, 2)) - thresholds = ( - np.array([[1, 3], [4, 10]]) - subt - ) # 2d convolution answer is [[1,3],[4,10]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([[1, 3], [4, 10]]) - subt # 2d convolution answer is [[1,3],[4,10]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire self.basep = basep self.bits = bits result = self.run_convolution_2d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -327,18 +303,14 @@ def test_valid_mode_spikes(self, basep, bits, nSpikes): subt = np.zeros(1) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (1, 1)) - thresholds = ( - np.array([[10]]) - subt - ) # 2d convolution answer is [[10]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([[10]]) - subt # 2d convolution answer is [[10]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire self.basep = basep self.bits = bits result = self.run_convolution_2d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations @@ -363,16 +335,12 @@ def test_same_mode_2x2(self, nSpikes): subt = np.zeros(4) subt[:nSpikes] = 0.1 subt = np.reshape(subt, (2, 2)) - thresholds = ( - np.array([[1, 4], [6, 20]]) - subt - ) # 2d convolution answer is [[1,4],[6,20]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire + thresholds = np.array([[1, 4], [6, 20]]) - subt # 2d convolution answer is [[1,4],[6,20]]. Spikes fire when less than threshold. Thus subtract 0.1 so that spikes fire result = self.run_convolution_2d(thresholds) # get output positions in result - output_positions = self.output_spike_positions( - self.basep, self.bits, self.pvector, self.filters, thresholds - ) + output_positions = self.output_spike_positions(self.basep, self.bits, self.pvector, self.filters, thresholds) output_mask = self.output_mask(output_positions, result) # Check calculations diff --git a/tests/unit/bricks/test_dense_layer_brick.py b/tests/unit/bricks/test_dense_layer_brick.py index 6ec80545..3d9d27e5 100644 --- a/tests/unit/bricks/test_dense_layer_brick.py +++ b/tests/unit/bricks/test_dense_layer_brick.py @@ -10,13 +10,14 @@ convolve2d = pytest.importorskip("scipy.signal", reason=f"Scipy package not installed. Skipping test file {__file__} because of module dependency.").convolve2d + class Test_DenseLayer1D: def test_simple_explicit_dense_layer_example_1(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 self.filters = [2, 3] @@ -30,7 +31,7 @@ def test_simple_explicit_dense_layer_example_1(self): self.pool_method = "max" self.dense_thresholds = [0.9, 0.9, 0.9] - self.dense_weights = [[1.0, 1.0, 1.0],[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]] + self.dense_weights = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] # self.dense_thresholds = [2.9, 8.9, 15.0] # self.dense_weights = [[1.0, 2.0, 3.0],[4.0, 5.0, 6.0],[7.0, 8.0, 9.0]] @@ -41,10 +42,10 @@ def test_simple_explicit_dense_layer_example_1(self): self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) - # expected_spikes = self.get_expected_spikes(expected_ans) + # expected_spikes = self.get_expected_spikes(expected_ans) dense_thresholds = np.array(self.dense_thresholds) dense_weights = np.array(self.dense_weights) @@ -55,17 +56,17 @@ def test_simple_explicit_dense_layer_example_1(self): result = self.run_dense_layer_1d(dense_input.shape) # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes def test_simple_explicit_dense_layer_example_2(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 self.filters = [2, 3] @@ -79,7 +80,7 @@ def test_simple_explicit_dense_layer_example_2(self): self.pool_method = "max" self.dense_thresholds = [2.9, 8.9, 15.0] - self.dense_weights = [[1.0, 2.0, 3.0],[4.0, 5.0, 6.0],[7.0, 8.0, 9.0]] + self.dense_weights = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]] self.conv_ans = np.convolve(self.pvector, self.filters, mode=self.mode) self.pixel_size = len(self.conv_ans) @@ -87,7 +88,7 @@ def test_simple_explicit_dense_layer_example_2(self): self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -99,24 +100,24 @@ def test_simple_explicit_dense_layer_example_2(self): result = self.run_dense_layer_1d(dense_input.shape) # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - #TODO: Unit test that checks dense thresholds shape - #TODO: Unit test that checks dense weights shape - #TODO: Unit test that test dense brick with a scalar threshold value - #TODO: Unit test that test dense brick with a scalar weight value - #TODO: Unit test that uses different input/output neuron shapes + # TODO: Unit test that checks dense thresholds shape + # TODO: Unit test that checks dense weights shape + # TODO: Unit test that test dense brick with a scalar threshold value + # TODO: Unit test that test dense brick with a scalar weight value + # TODO: Unit test that uses different input/output neuron shapes def run_dense_layer_1d(self, output_shape): scaffold = Scaffold() - scaffold.add_brick(BaseP_Input(np.array([self.pvector]),p=self.basep,bits=self.bits,collapse_binary=False,name="Input0",time_dimension=False),"input") - scaffold.add_brick(convolution_1d(self.plength,self.filters,self.conv_thresholds,self.basep,self.bits,name="convolution_",mode=self.mode),[(0, 0)],output=True) - scaffold.add_brick(pooling_1d(self.pool_size,self.pool_strides,thresholds=self.pool_thresholds,name="pool_",method=self.pool_method),[(1,0)],output=True) - scaffold.add_brick(dense_layer_1d(output_shape,weights=self.dense_weights,thresholds=self.dense_thresholds,name="dense_"),[(2,0)],output=True) + scaffold.add_brick(BaseP_Input(np.array([self.pvector]), p=self.basep, bits=self.bits, collapse_binary=False, name="Input0", time_dimension=False), "input") + scaffold.add_brick(convolution_1d(self.plength, self.filters, self.conv_thresholds, self.basep, self.bits, name="convolution_", mode=self.mode), [(0, 0)], output=True) + scaffold.add_brick(pooling_1d(self.pool_size, self.pool_strides, thresholds=self.pool_thresholds, name="pool_", method=self.pool_method), [(1, 0)], output=True) + scaffold.add_brick(dense_layer_1d(output_shape, weights=self.dense_weights, thresholds=self.dense_thresholds, name="dense_"), [(2, 0)], output=True) self.graph = scaffold.lay_bricks() scaffold.summary(verbose=1) @@ -129,8 +130,8 @@ def run_dense_layer_1d(self, output_shape): def get_output_neuron_numbers(self): neuron_numbers = [] for key in self.graph.nodes.keys(): - if key.startswith('dense_d'): - neuron_numbers.append(self.graph.nodes[key]['neuron_number']) + if key.startswith("dense_d"): + neuron_numbers.append(self.graph.nodes[key]["neuron_number"]) return np.array(neuron_numbers) @@ -138,57 +139,58 @@ def get_output_spike_positions(self): neuron_numbers = self.get_output_neuron_numbers() output_ini_position = np.amin(neuron_numbers) output_end_position = np.amax(neuron_numbers) - return [output_ini_position,output_end_position] + return [output_ini_position, output_end_position] - def get_output_mask(self,output_positions, result): + def get_output_mask(self, output_positions, result): ini = output_positions[0] end = output_positions[1] return (result["neuron_number"] >= ini) & (result["neuron_number"] <= end) def get_stride_positions(self): return np.arange(0, self.pixel_size, self.pool_strides, dtype=int) - + def get_output_size(self): - return int(np.floor(1.0 + (np.float64(self.pixel_size) - self.pool_size)/self.pool_strides)) - + return int(np.floor(1.0 + (np.float64(self.pixel_size) - self.pool_size) / self.pool_strides)) + def get_expected_pooling_answer(self, pool_input): stride_positions = self.get_stride_positions() output_size = self.get_output_size() expected = [] - + if self.pool_method == "max": for pos in stride_positions[:output_size]: - expected.append(np.any(pool_input[pos:pos+self.pool_size])) + expected.append(np.any(pool_input[pos : pos + self.pool_size])) expected = (np.array(expected, dtype=int) > self.pool_thresholds).astype(int) elif self.pool_method == "average": - weights = np.ones((self.pool_size,),dtype=float) / float(self.pool_size) + weights = np.ones((self.pool_size,), dtype=float) / float(self.pool_size) for pos in stride_positions[:output_size]: - expected.append(np.dot(weights,pool_input[pos:pos+self.pool_size].astype(int))) + expected.append(np.dot(weights, pool_input[pos : pos + self.pool_size].astype(int))) else: print(f"'pool_method' class member variable must be either 'max' or 'average'. But it is {self.pool_method}.") raise ValueError("Unrecognized 'pool_method' class member variable.") - + return np.array(expected) - - def get_expected_spikes(self, expected_ans, thresholds): + + def get_expected_spikes(self, expected_ans, thresholds): ans = np.array(expected_ans) - spikes = ans[ ans > thresholds].astype(float) + spikes = ans[ans > thresholds].astype(float) return list(3.0 * np.ones(spikes.shape)) - + + class Test_DenseLayer2D: def test_simple_explicit_dense_layer_example_1(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -196,19 +198,19 @@ def test_simple_explicit_dense_layer_example_1(self): self.pool_thresholds = 0.9 self.pool_method = "max" - self.dense_thresholds = np.array([[1.9, 1.9],[1.9, 2.0]]) - self.dense_weights = np.ones((self.dense_thresholds.size,self.dense_thresholds.size), dtype=float) + self.dense_thresholds = np.array([[1.9, 1.9], [1.9, 2.0]]) + self.dense_weights = np.ones((self.dense_thresholds.size, self.dense_thresholds.size), dtype=float) self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,1,1,2,3,4],[1,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 1, 1, 2, 3, 4], [1, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -220,22 +222,22 @@ def test_simple_explicit_dense_layer_example_1(self): result = self.run_dense_layer_2d(dense_input.shape) # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes def test_simple_explicit_dense_layer_example_2(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -243,19 +245,19 @@ def test_simple_explicit_dense_layer_example_2(self): self.pool_thresholds = 0.9 self.pool_method = "max" - self.dense_thresholds = [[6.9, 18.9],[30.9, 43.0]] - self.dense_weights = np.arange(1,17,dtype=float).reshape((4,4)) + self.dense_thresholds = [[6.9, 18.9], [30.9, 43.0]] + self.dense_weights = np.arange(1, 17, dtype=float).reshape((4, 4)) self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,0,1,1,2,3,4],[1,3,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 0, 1, 1, 2, 3, 4], [1, 3, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -267,8 +269,8 @@ def test_simple_explicit_dense_layer_example_2(self): result = self.run_dense_layer_2d(dense_input.shape) # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @@ -276,9 +278,9 @@ def test_simple_explicit_dense_layer_example_2(self): def test_dense_layer_thresholds_shape(self): self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -286,19 +288,19 @@ def test_dense_layer_thresholds_shape(self): self.pool_thresholds = 0.9 self.pool_method = "max" - self.dense_thresholds = [[6.9],[30.9]] - self.dense_weights = np.arange(1,17,dtype=float).reshape((4,4)) + self.dense_thresholds = [[6.9], [30.9]] + self.dense_weights = np.arange(1, 17, dtype=float).reshape((4, 4)) self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,0,1,1,2,3,4],[1,3,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 0, 1, 1, 2, 3, 4], [1, 3, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -310,13 +312,13 @@ def test_dense_layer_thresholds_shape(self): with pytest.raises(ValueError): self.run_dense_layer_2d(dense_input.shape) - @pytest.mark.parametrize("output_shape", [(2,2), (4,)]) + @pytest.mark.parametrize("output_shape", [(2, 2), (4,)]) def test_dense_layer_scalar_thresholds(self, output_shape): self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -325,18 +327,18 @@ def test_dense_layer_scalar_thresholds(self, output_shape): self.pool_method = "max" self.dense_thresholds = 42.9 - self.dense_weights = np.arange(1,17,dtype=float).reshape((4,4)) + self.dense_weights = np.arange(1, 17, dtype=float).reshape((4, 4)) self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,0,1,1,2,3,4],[1,3,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 0, 1, 1, 2, 3, 4], [1, 3, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -346,21 +348,21 @@ def test_dense_layer_scalar_thresholds(self, output_shape): expected_spikes = self.get_expected_spikes(dense_ans, dense_thresholds) result = self.run_dense_layer_2d(dense_input.shape) - + # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) - assert expected_spikes == calculated_spikes + assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("nWeights,weight_shape",[(10,(3,3)), (10, (9,)), (5, (2,2))]) + @pytest.mark.parametrize("nWeights,weight_shape", [(10, (3, 3)), (10, (9,)), (5, (2, 2))]) def test_dense_layer_weights_shape(self, nWeights, weight_shape): self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -368,19 +370,19 @@ def test_dense_layer_weights_shape(self, nWeights, weight_shape): self.pool_thresholds = 0.9 self.pool_method = "max" - self.dense_thresholds = [[6.9, 18.9],[30.9, 43.0]] - self.dense_weights = np.arange(1,nWeights,dtype=float).reshape(weight_shape) + self.dense_thresholds = [[6.9, 18.9], [30.9, 43.0]] + self.dense_weights = np.arange(1, nWeights, dtype=float).reshape(weight_shape) self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,0,1,1,2,3,4],[1,3,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 0, 1, 1, 2, 3, 4], [1, 3, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -392,13 +394,13 @@ def test_dense_layer_weights_shape(self, nWeights, weight_shape): with pytest.raises(ValueError): self.run_dense_layer_2d(dense_input.shape) - @pytest.mark.parametrize("weight_shape", [(4,4)]) + @pytest.mark.parametrize("weight_shape", [(4, 4)]) def test_dense_layer_scalar_weights(self, weight_shape): self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -406,7 +408,7 @@ def test_dense_layer_scalar_weights(self, weight_shape): self.pool_thresholds = 0.9 self.pool_method = "max" - self.dense_thresholds = [[6.9, 18.9],[30.9, 43.0]] + self.dense_thresholds = [[6.9, 18.9], [30.9, 43.0]] self.dense_weights = 2.0 self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) @@ -414,11 +416,11 @@ def test_dense_layer_scalar_weights(self, weight_shape): self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = np.zeros(self.conv_ans.shape) - self.conv_spike_pos[[0,0,1,1,2,3,4],[1,3,0,1,2,3,4]] = 0.1 + self.conv_spike_pos[[0, 0, 1, 1, 2, 3, 4], [1, 3, 0, 1, 2, 3, 4]] = 0.1 self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations dense_input = self.get_expected_pooling_answer(pool_input) @@ -428,24 +430,24 @@ def test_dense_layer_scalar_weights(self, weight_shape): expected_spikes = self.get_expected_spikes(dense_ans, dense_thresholds) result = self.run_dense_layer_2d(dense_input.shape) - + # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.xfail(reason="Not implemented.") def test_dense_layer_with_different_input_output_shapes(self): - #TODO: Unit test that uses different input/output neuron shapes + # TODO: Unit test that uses different input/output neuron shapes assert False def get_output_neuron_numbers(self): neuron_numbers = [] for key in self.graph.nodes.keys(): - if key.startswith('dense_d'): - neuron_numbers.append(self.graph.nodes[key]['neuron_number']) + if key.startswith("dense_d"): + neuron_numbers.append(self.graph.nodes[key]["neuron_number"]) return np.array(neuron_numbers) @@ -457,19 +459,19 @@ def get_output_spike_positions(self): else: output_ini_position = np.amin(neuron_numbers) output_end_position = np.amax(neuron_numbers) - return [output_ini_position,output_end_position] + return [output_ini_position, output_end_position] - def get_output_mask(self,output_positions, result): + def get_output_mask(self, output_positions, result): ini = output_positions[0] end = output_positions[1] return (result["neuron_number"] >= ini) & (result["neuron_number"] <= end) def run_dense_layer_2d(self, output_shape): scaffold = Scaffold() - scaffold.add_brick(BaseP_Input(np.array([self.pvector]),p=self.basep,bits=self.bits,collapse_binary=False,name="Input0",time_dimension=False),"input") - scaffold.add_brick(convolution_2d(self.pshape,self.filters,self.conv_thresholds,self.basep,self.bits,name="convolution_",mode=self.mode),[(0, 0)],output=True) - scaffold.add_brick(pooling_2d(self.pool_size,self.pool_strides,thresholds=self.pool_thresholds,name="pool_",method=self.pool_method),[(1,0)],output=True) - scaffold.add_brick(dense_layer_2d(output_shape,weights=self.dense_weights,thresholds=self.dense_thresholds,name="dense_"),[(2,0)],output=True) + scaffold.add_brick(BaseP_Input(np.array([self.pvector]), p=self.basep, bits=self.bits, collapse_binary=False, name="Input0", time_dimension=False), "input") + scaffold.add_brick(convolution_2d(self.pshape, self.filters, self.conv_thresholds, self.basep, self.bits, name="convolution_", mode=self.mode), [(0, 0)], output=True) + scaffold.add_brick(pooling_2d(self.pool_size, self.pool_strides, thresholds=self.pool_thresholds, name="pool_", method=self.pool_method), [(1, 0)], output=True) + scaffold.add_brick(dense_layer_2d(output_shape, weights=self.dense_weights, thresholds=self.dense_thresholds, name="dense_"), [(2, 0)], output=True) self.graph = scaffold.lay_bricks() scaffold.summary(verbose=1) @@ -478,16 +480,16 @@ def run_dense_layer_2d(self, output_shape): backend.compile(scaffold, backend_args) result = backend.run(5) return result - + def get_stride_positions(self, pixel_dim): return np.arange(0, pixel_dim, self.pool_strides, dtype=int) - + def get_output_size(self, pixel_dim): - return int(np.floor(1.0 + (np.float64(pixel_dim) - self.pool_size)/self.pool_strides)) + return int(np.floor(1.0 + (np.float64(pixel_dim) - self.pool_size) / self.pool_strides)) def get_output_shape(self): return (self.get_output_size(self.pixel_dim1), self.get_output_size(self.pixel_dim2)) - + def get_expected_pooling_answer(self, pool_input): row_stride_positions = self.get_stride_positions(self.pixel_dim1) col_stride_positions = self.get_stride_positions(self.pixel_dim2) @@ -495,27 +497,27 @@ def get_expected_pooling_answer(self, pool_input): expected = [] if self.pool_method == "max": - for row in row_stride_positions[:output_shape[0]]: - for col in col_stride_positions[:output_shape[1]]: - expected.append(np.any(pool_input[row:row+self.pool_size,col:col+self.pool_size])) + for row in row_stride_positions[: output_shape[0]]: + for col in col_stride_positions[: output_shape[1]]: + expected.append(np.any(pool_input[row : row + self.pool_size, col : col + self.pool_size])) expected = np.reshape(expected, output_shape).astype(int) expected = (np.array(expected, dtype=int) > self.pool_thresholds).astype(float) - elif self.pool_method == "average": #TODO : fix this code. - weights = np.ones((self.pool_size,self.pool_size),dtype=float) / float(self.pool_size*self.pool_size) - for row in row_stride_positions[:output_shape[0]]: - for col in col_stride_positions[:output_shape[1]]: - expected.append(np.dot(weights.flatten(),pool_input[row:row+self.pool_size,col:col+self.pool_size].astype(int).flatten())) + elif self.pool_method == "average": # TODO : fix this code. + weights = np.ones((self.pool_size, self.pool_size), dtype=float) / float(self.pool_size * self.pool_size) + for row in row_stride_positions[: output_shape[0]]: + for col in col_stride_positions[: output_shape[1]]: + expected.append(np.dot(weights.flatten(), pool_input[row : row + self.pool_size, col : col + self.pool_size].astype(int).flatten())) expected = np.reshape(expected, output_shape).astype(float) else: print(f"'pool_method' class member variable must be either 'max' or 'average'. But it is {self.pool_method}.") raise ValueError("Unrecognized 'pool_method' class member variable.") - + return np.array(expected) - - def get_expected_spikes(self,expected_ans, thresholds): + + def get_expected_spikes(self, expected_ans, thresholds): ans = np.array(expected_ans) - spikes = ans[ ans > thresholds].astype(float) + spikes = ans[ans > thresholds].astype(float) return list(3.0 * np.ones(spikes.shape)) diff --git a/tests/unit/bricks/test_keras_convolution_brick.py b/tests/unit/bricks/test_keras_convolution_brick.py index 65f65ce5..e76e7a75 100644 --- a/tests/unit/bricks/test_keras_convolution_brick.py +++ b/tests/unit/bricks/test_keras_convolution_brick.py @@ -623,4 +623,4 @@ def run_convolution_2d(self, thresholds, verbose_scaffold=1): backend_args = {} backend.compile(scaffold, backend_args) result = backend.run(5) - return result \ No newline at end of file + return result diff --git a/tests/unit/bricks/test_keras_dense_brick.py b/tests/unit/bricks/test_keras_dense_brick.py index 648aeaf9..916c567d 100644 --- a/tests/unit/bricks/test_keras_dense_brick.py +++ b/tests/unit/bricks/test_keras_dense_brick.py @@ -223,4 +223,3 @@ def get_edge_connections_inverse(graph,prefix1,shape1,prefix2,shape2): node_names2 = get_node_name_list(prefix2,shape2) adict2 = {node_name2: [node_name1 for node_name1 in node_names1 if node_name2 in adict1[node_name1]] for node_name2 in node_names2} return adict2 - diff --git a/tests/unit/bricks/test_pooling_brick.py b/tests/unit/bricks/test_pooling_brick.py index de99d0f5..5ac45b02 100644 --- a/tests/unit/bricks/test_pooling_brick.py +++ b/tests/unit/bricks/test_pooling_brick.py @@ -9,12 +9,13 @@ convolve2d = pytest.importorskip("scipy.signal", reason=f"Scipy package not installed. Skipping test file {__file__} because of module dependency.").convolve2d + class Test_Pooling1D: @pytest.fixture def convolution_mode(self): return "full" - + @pytest.fixture def default_convolution_params(self, convolution_mode): self.basep = 3 @@ -30,20 +31,20 @@ def numpy_convolution_result(self, default_convolution_params, request): self.pixel_size = len(self.conv_ans) self.conv_spike_pos = self.make_convolution_spike_positions_vector(request.param) self.conv_thresholds = self.conv_ans - self.conv_spike_pos - return self.conv_ans > self.conv_thresholds + return self.conv_ans > self.conv_thresholds @pytest.fixture def pooling_size(self): return 2 - + @pytest.fixture def pooling_stride(self): return 2 - + @pytest.fixture def pooling_method(self): return "max" - + @pytest.fixture def default_pooling_params(self, pooling_size, pooling_stride, pooling_method): self.pool_size = pooling_size @@ -51,10 +52,10 @@ def default_pooling_params(self, pooling_size, pooling_stride, pooling_method): self.thresholds = 0.9 self.method = pooling_method - @pytest.mark.parametrize("pooling_size", np.arange(1,8)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,8)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 8)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 8)) def test_max_pooling(self, numpy_convolution_result, default_pooling_params): - + pool_input = numpy_convolution_result # Check calculations @@ -63,21 +64,21 @@ def test_max_pooling(self, numpy_convolution_result, default_pooling_params): # get output positions in result result = self.run_pooling_1d() - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("pooling_size", np.arange(1,8)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,8)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 8)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 8)) def test_max_pooling_vector_thresholds(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) # pooling portion output_size = self.get_output_size() @@ -85,35 +86,35 @@ def test_max_pooling_vector_thresholds(self, numpy_convolution_result, default_p result = self.run_pooling_1d() # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,8)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,8)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 8)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 8)) def test_average_pooling(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("pooling_size", np.arange(1,8)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,8)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 8)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 8)) @pytest.mark.parametrize("convolution_mode", ["full"]) def test_max_pooling_with_full_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -121,19 +122,19 @@ def test_max_pooling_with_full_convolution_mode(self, numpy_convolution_result, # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) @pytest.mark.parametrize("convolution_mode", ["valid"]) def test_max_pooling_with_valid_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -141,19 +142,19 @@ def test_max_pooling_with_valid_convolution_mode(self, numpy_convolution_result, # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("pooling_size", np.arange(1,7)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,7)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 7)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 7)) @pytest.mark.parametrize("convolution_mode", ["same"]) def test_max_pooling_with_same_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -161,111 +162,111 @@ def test_max_pooling_with_same_convolution_mode(self, numpy_convolution_result, # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,8)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,8)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 8)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 8)) @pytest.mark.parametrize("convolution_mode", ["full"]) def test_average_pooling_with_full_convolution_mode(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) - # begin pooling brick calculation + # begin pooling brick calculation self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) @pytest.mark.parametrize("convolution_mode", ["valid"]) def test_average_pooling_with_valid_convolution_mode(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) - # begin pooling brick calculation + # begin pooling brick calculation self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,7)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,7)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 7)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 7)) @pytest.mark.parametrize("convolution_mode", ["same"]) def test_average_pooling_with_same_convolution_mode(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) - # begin pooling brick calculation + # begin pooling brick calculation self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_1d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("length", [1,2,4,5]) + @pytest.mark.parametrize("length", [1, 2, 4, 5]) def test_thresholds_shape(self, default_convolution_params, default_pooling_params, length): - ''' + """ Correct pooling threshold shape is (3,) - ''' - self.conv_thresholds = np.array([0.,1.,0.,1.,0.,0.,0.]) + """ + self.conv_thresholds = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) with pytest.raises(ValueError): - self.thresholds = 0.9*np.ones((length,)) + self.thresholds = 0.9 * np.ones((length,)) self.run_pooling_1d() - @pytest.mark.parametrize("thresholds", [1.1,2.1,np.ones((3,),dtype=float)]) + @pytest.mark.parametrize("thresholds", [1.1, 2.1, np.ones((3,), dtype=float)]) def test_max_pooling_thresholds_changes(self, default_convolution_params, default_pooling_params, thresholds): - ''' + """ Max pooling threshold must be < 0.9 for the "or" operation to be performed correctly. Otherwise, the brick no longer behaves as "max" operation. - ''' - self.conv_thresholds = np.array([0.,1.,0.,1.,0.,0.,0.]) + """ + self.conv_thresholds = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) with pytest.raises(ValueError): self.thresholds = thresholds self.run_pooling_1d() def test_simple_explicit_max_pooling(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 self.filters = [2, 3] @@ -284,25 +285,25 @@ def test_simple_explicit_max_pooling(self): self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_1d() # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes def test_simple_explicit_average_pooling(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 self.filters = [2, 3] @@ -312,7 +313,7 @@ def test_simple_explicit_average_pooling(self): self.pool_size = 2 self.strides = 2 - self.thresholds = np.array([0.4,0.9,0.9]) + self.thresholds = np.array([0.4, 0.9, 0.9]) self.method = "average" self.conv_ans = np.convolve(self.pvector, self.filters, mode=self.mode) @@ -321,15 +322,15 @@ def test_simple_explicit_average_pooling(self): self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_1d() # get output positions in result - output_positions = self.get_output_spike_positions() + output_positions = self.get_output_spike_positions() output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) @@ -338,36 +339,36 @@ def test_simple_explicit_average_pooling(self): def make_random_thresholds_vector(self, initial_vector): output_size = self.get_output_size() thresholds = initial_vector.copy() - random_ids = np.random.choice(output_size, np.ceil(0.5*output_size).astype(int), replace=False) + random_ids = np.random.choice(output_size, np.ceil(0.5 * output_size).astype(int), replace=False) thresholds[random_ids] = thresholds[random_ids] - 0.1 thresholds[thresholds < 0.0] = 0.0 return thresholds - + def make_convolution_spike_positions_vector(self, case): - ''' - The returned list is [0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0] for "fixed" case. The "random" case returns a list + """ + The returned list is [0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0] for "fixed" case. The "random" case returns a list with the positions of "0.1" randomized in the list. The 0.1 specifies the convolution brick's output positions that will spike (i.e., output neurons will fire). - ''' + """ conv_spike_pos = np.zeros(self.conv_ans.shape, dtype=float) if case.lower() == "fixed": conv_spike_pos[[1, 3]] = 0.1 elif case.lower() == "random": - random_ids = np.random.choice(conv_spike_pos.size, np.ceil(0.5*conv_spike_pos.size).astype(int), replace=False) + random_ids = np.random.choice(conv_spike_pos.size, np.ceil(0.5 * conv_spike_pos.size).astype(int), replace=False) conv_spike_pos[random_ids] = 0.1 else: print(f"Case parameter is either 'fixed' or 'random'. But you provided {case}.") raise ValueError("Unrecognized 'case' parameter value provided.") - - return conv_spike_pos - + + return conv_spike_pos + def get_output_neuron_numbers(self): neuron_numbers = [] for key in self.graph.nodes.keys(): - if key.startswith('pool_p'): - neuron_numbers.append(self.graph.nodes[key]['neuron_number']) + if key.startswith("pool_p"): + neuron_numbers.append(self.graph.nodes[key]["neuron_number"]) return np.array(neuron_numbers) @@ -375,18 +376,18 @@ def get_output_spike_positions(self): neuron_numbers = self.get_output_neuron_numbers() output_ini_position = np.amin(neuron_numbers) output_end_position = np.amax(neuron_numbers) - return [output_ini_position,output_end_position] + return [output_ini_position, output_end_position] - def get_output_mask(self,output_positions, result): + def get_output_mask(self, output_positions, result): ini = output_positions[0] end = output_positions[1] return (result["neuron_number"] >= ini) & (result["neuron_number"] <= end) def run_pooling_1d(self): scaffold = Scaffold() - scaffold.add_brick(BaseP_Input(np.array([self.pvector]),p=self.basep,bits=self.bits,collapse_binary=False,name="Input0",time_dimension=False),"input") - scaffold.add_brick(convolution_1d(self.plength,self.filters,self.conv_thresholds,self.basep,self.bits,name="convolution_",mode=self.mode),[(0, 0)],output=True) - scaffold.add_brick(pooling_1d(self.pool_size,self.strides,thresholds=self.thresholds,name="pool_",method=self.method),[(1,0)],output=True) + scaffold.add_brick(BaseP_Input(np.array([self.pvector]), p=self.basep, bits=self.bits, collapse_binary=False, name="Input0", time_dimension=False), "input") + scaffold.add_brick(convolution_1d(self.plength, self.filters, self.conv_thresholds, self.basep, self.bits, name="convolution_", mode=self.mode), [(0, 0)], output=True) + scaffold.add_brick(pooling_1d(self.pool_size, self.strides, thresholds=self.thresholds, name="pool_", method=self.method), [(1, 0)], output=True) self.graph = scaffold.lay_bricks() scaffold.summary(verbose=1) @@ -398,37 +399,38 @@ def run_pooling_1d(self): def get_stride_positions(self): return np.arange(0, self.pixel_size, self.strides, dtype=int) - + def get_output_size(self): - return int(np.floor(1.0 + (np.float64(self.pixel_size) - self.pool_size)/self.strides)) - + return int(np.floor(1.0 + (np.float64(self.pixel_size) - self.pool_size) / self.strides)) + def get_expected_pooling_answer(self, pool_input): stride_positions = self.get_stride_positions() output_size = self.get_output_size() expected = [] - + if self.method == "max": for pos in stride_positions[:output_size]: - expected.append(np.any(pool_input[pos:pos+self.pool_size])) + expected.append(np.any(pool_input[pos : pos + self.pool_size])) expected = (np.array(expected, dtype=int) > self.thresholds).astype(int) elif self.method == "average": - weights = np.ones((self.pool_size,),dtype=float) / float(self.pool_size) + weights = np.ones((self.pool_size,), dtype=float) / float(self.pool_size) for pos in stride_positions[:output_size]: - expected.append(np.dot(weights,pool_input[pos:pos+self.pool_size].astype(int))) + expected.append(np.dot(weights, pool_input[pos : pos + self.pool_size].astype(int))) else: print(f"'method' class member variable must be either 'max' or 'average'. But it is {self.method}.") raise ValueError("Unrecognized 'method' class member variable.") - + return np.array(expected) - - def get_expected_spikes(self,expected_ans): + + def get_expected_spikes(self, expected_ans): ans = np.array(expected_ans) - spikes = ans[ ans > self.thresholds].astype(float) + spikes = ans[ans > self.thresholds].astype(float) return list(2.0 * np.ones(spikes.shape)) + class Test_Pooling2D: @pytest.fixture @@ -439,9 +441,9 @@ def convolution_mode(self): def default_convolution_params(self, convolution_mode): self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = convolution_mode - self.pvector = [[1,1,4,6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape @pytest.fixture @@ -452,24 +454,24 @@ def numpy_convolution_result(self, default_convolution_params, spike_positions_v self.pixel_dim2 = self.pixel_shape[1] self.conv_spike_pos = self.make_convolution_spike_positions_vector(spike_positions_vector) self.conv_thresholds = self.conv_ans - self.conv_spike_pos - return self.conv_ans > self.conv_thresholds + return self.conv_ans > self.conv_thresholds @pytest.fixture(params=["fixed", "random"]) def spike_positions_vector(self, request): return request.param - + @pytest.fixture def pooling_size(self): return 2 - + @pytest.fixture def pooling_stride(self): return 2 - + @pytest.fixture def pooling_method(self): return "max" - + @pytest.fixture def default_pooling_params(self, pooling_size, pooling_stride, pooling_method): self.pool_size = pooling_size @@ -477,10 +479,10 @@ def default_pooling_params(self, pooling_size, pooling_stride, pooling_method): self.thresholds = 0.9 self.method = pooling_method - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) def test_max_pooling(self, numpy_convolution_result, default_pooling_params): - + pool_input = numpy_convolution_result # Check calculations @@ -489,61 +491,61 @@ def test_max_pooling(self, numpy_convolution_result, default_pooling_params): # get output positions in result result = self.run_pooling_2d() - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) def test_average_pooling(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_2d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) def test_max_pooling_vector_thresholds(self, numpy_convolution_result, default_pooling_params): pool_input = numpy_convolution_result # Check calculations expected_ans = self.get_expected_pooling_answer(pool_input) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) # pooling portion self.thresholds = 0.9 * np.ones(self.get_output_shape(), dtype=float) result = self.run_pooling_2d() # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["max"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) @pytest.mark.parametrize("convolution_mode", ["full"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_max_pooling_with_full_convolution_mode(self, numpy_convolution_result, default_pooling_params): - + pool_input = numpy_convolution_result # Check calculations @@ -552,19 +554,19 @@ def test_max_pooling_with_full_convolution_mode(self, numpy_convolution_result, # get output positions in result result = self.run_pooling_2d() - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["max"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,4)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,4)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 4)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 4)) @pytest.mark.parametrize("convolution_mode", ["valid"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_max_pooling_with_valid_convolution_mode(self, numpy_convolution_result, default_pooling_params): - + pool_input = numpy_convolution_result # Check calculations @@ -573,19 +575,19 @@ def test_max_pooling_with_valid_convolution_mode(self, numpy_convolution_result, # get output positions in result result = self.run_pooling_2d() - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["max"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,5)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,5)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 5)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 5)) @pytest.mark.parametrize("convolution_mode", ["same"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_max_pooling_with_same_convolution_mode(self, numpy_convolution_result, default_pooling_params): - + pool_input = numpy_convolution_result # Check calculations @@ -594,15 +596,15 @@ def test_max_pooling_with_same_convolution_mode(self, numpy_convolution_result, # get output positions in result result = self.run_pooling_2d() - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,6)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,6)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 6)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 6)) @pytest.mark.parametrize("convolution_mode", ["full"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_average_pooling_with_full_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -610,22 +612,22 @@ def test_average_pooling_with_full_convolution_mode(self, numpy_convolution_resu pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_2d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,4)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,4)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 4)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 4)) @pytest.mark.parametrize("convolution_mode", ["valid"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_average_pooling_with_valid_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -633,22 +635,22 @@ def test_average_pooling_with_valid_convolution_mode(self, numpy_convolution_res pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_2d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("pooling_method", ["average"]) - @pytest.mark.parametrize("pooling_size", np.arange(1,5)) - @pytest.mark.parametrize("pooling_stride", np.arange(1,5)) + @pytest.mark.parametrize("pooling_size", np.arange(1, 5)) + @pytest.mark.parametrize("pooling_stride", np.arange(1, 5)) @pytest.mark.parametrize("convolution_mode", ["same"]) @pytest.mark.parametrize("spike_positions_vector", ["random"]) def test_average_pooling_with_same_convolution_mode(self, numpy_convolution_result, default_pooling_params): @@ -656,51 +658,55 @@ def test_average_pooling_with_same_convolution_mode(self, numpy_convolution_resu pool_input = numpy_convolution_result # Check calculations - expected_ans = self.get_expected_pooling_answer(pool_input) + expected_ans = self.get_expected_pooling_answer(pool_input) self.thresholds = self.make_random_thresholds_vector(expected_ans) result = self.run_pooling_2d() # get output positions in result output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_mask = self.get_output_mask(output_positions, result) - expected_spikes = self.get_expected_spikes(expected_ans) + expected_spikes = self.get_expected_spikes(expected_ans) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @pytest.mark.parametrize("convolution_mode", ["full"]) - @pytest.mark.parametrize("shape", [(1,), (2,), (3,3), (4,4), (5,5)]) + @pytest.mark.parametrize("shape", [(1,), (2,), (3, 3), (4, 4), (5, 5)]) def test_thresholds_shape(self, default_convolution_params, default_pooling_params, shape): - ''' + """ Correct pooling threshold shape is (2,2) - ''' - self.conv_thresholds = np.array([[0.9,1.,1.,1.,0.9],[1.,1.,0.9,0.9,0.9],[0.9,0.9,0.9,1.,0.9],[1.,0.9,1.,1.,0.9],[0.9,0.9,1.,1.,1.]],dtype=float) + """ + self.conv_thresholds = np.array( + [[0.9, 1.0, 1.0, 1.0, 0.9], [1.0, 1.0, 0.9, 0.9, 0.9], [0.9, 0.9, 0.9, 1.0, 0.9], [1.0, 0.9, 1.0, 1.0, 0.9], [0.9, 0.9, 1.0, 1.0, 1.0]], dtype=float + ) with pytest.raises(ValueError): - self.thresholds = 0.9*np.ones(shape) + self.thresholds = 0.9 * np.ones(shape) self.run_pooling_2d() @pytest.mark.parametrize("convolution_mode", ["full"]) - @pytest.mark.parametrize("thresholds", [1.1,2.1,np.ones((2,2),dtype=float)]) + @pytest.mark.parametrize("thresholds", [1.1, 2.1, np.ones((2, 2), dtype=float)]) def test_max_pooling_thresholds_changes(self, default_convolution_params, default_pooling_params, thresholds): - ''' + """ Max pooling threshold must be < 0.9 for the "or" operation to be performed correctly. Otherwise, the brick no longer behaves as "max" operation. - ''' - self.conv_thresholds = np.array([[0.9,1.,1.,1.,0.9],[1.,1.,0.9,0.9,0.9],[0.9,0.9,0.9,1.,0.9],[1.,0.9,1.,1.,0.9],[0.9,0.9,1.,1.,1.]],dtype=float) + """ + self.conv_thresholds = np.array( + [[0.9, 1.0, 1.0, 1.0, 0.9], [1.0, 1.0, 0.9, 0.9, 0.9], [0.9, 0.9, 0.9, 1.0, 0.9], [1.0, 0.9, 1.0, 1.0, 0.9], [0.9, 0.9, 1.0, 1.0, 1.0]], dtype=float + ) with pytest.raises(ValueError): self.thresholds = thresholds self.run_pooling_2d() def test_simple_explicit_max_pooling(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1, 1, 4, 6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 @@ -712,61 +718,61 @@ def test_simple_explicit_max_pooling(self): self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] - self.conv_spike_pos = 0.1*np.array([[1,0,0,0,1],[0,0,1,1,1],[1,1,1,0,1],[0,1,0,0,1],[1,1,0,0,0]],dtype=float) + self.conv_spike_pos = 0.1 * np.array([[1, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 1, 1, 0, 1], [0, 1, 0, 0, 1], [1, 1, 0, 0, 0]], dtype=float) self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations # expected_ans = self.get_expected_pooling_answer(pool_input) - expected_ans = np.array([[1, 1],[1, 1]], dtype=float) + expected_ans = np.array([[1, 1], [1, 1]], dtype=float) expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_2d() # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes def test_simple_explicit_average_pooling(self): - ''' - Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the + """ + Explicit invocation of the pooling brick unit test. Removes the complexity of the tests above to show the basic structure of general unit tests for the pooling brick. - ''' + """ self.basep = 3 self.bits = 3 - self.filters = [[2, 3],[4,5]] + self.filters = [[2, 3], [4, 5]] self.mode = "full" - self.pvector = [[1, 1, 4, 6],[6,2,4,2],[2,3,5,4],[6,1,6,3]] + self.pvector = [[1, 1, 4, 6], [6, 2, 4, 2], [2, 3, 5, 4], [6, 1, 6, 3]] self.pshape = np.array(self.pvector).shape self.pool_size = 2 self.strides = 2 - self.thresholds = np.array([[0.20,0.9],[0.7,0.30]]) + self.thresholds = np.array([[0.20, 0.9], [0.7, 0.30]]) self.method = "average" self.conv_ans = convolve2d(self.pvector, self.filters, mode=self.mode) self.pixel_shape = self.conv_ans.shape self.pixel_dim1 = self.pixel_shape[0] self.pixel_dim2 = self.pixel_shape[1] - self.conv_spike_pos = 0.1*np.array([[1,0,0,0,1],[0,0,1,1,1],[1,1,1,0,1],[0,1,0,0,1],[1,1,0,0,0]],dtype=float) + self.conv_spike_pos = 0.1 * np.array([[1, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 1, 1, 0, 1], [0, 1, 0, 0, 1], [1, 1, 0, 0, 0]], dtype=float) self.conv_thresholds = self.conv_ans - self.conv_spike_pos pool_input = self.conv_ans > self.conv_thresholds - + # Check calculations # expected_ans = self.get_expected_pooling_answer(pool_input) - expected_ans = np.array([[1, 0],[1, 0]], dtype=float) + expected_ans = np.array([[1, 0], [1, 0]], dtype=float) expected_spikes = self.get_expected_spikes(expected_ans) result = self.run_pooling_2d() # get output positions in result - output_positions = self.get_output_spike_positions() - output_mask = self.get_output_mask(output_positions, result) + output_positions = self.get_output_spike_positions() + output_mask = self.get_output_mask(output_positions, result) calculated_spikes = list(result[output_mask].to_numpy()[:, 0]) assert expected_spikes == calculated_spikes @@ -774,8 +780,8 @@ def test_simple_explicit_average_pooling(self): def get_output_neuron_numbers(self): neuron_numbers = [] for key in self.graph.nodes.keys(): - if key.startswith('pool_p'): - neuron_numbers.append(self.graph.nodes[key]['neuron_number']) + if key.startswith("pool_p"): + neuron_numbers.append(self.graph.nodes[key]["neuron_number"]) return np.array(neuron_numbers) @@ -787,18 +793,18 @@ def get_output_spike_positions(self): else: output_ini_position = np.amin(neuron_numbers) output_end_position = np.amax(neuron_numbers) - return [output_ini_position,output_end_position] + return [output_ini_position, output_end_position] - def get_output_mask(self,output_positions, result): + def get_output_mask(self, output_positions, result): ini = output_positions[0] end = output_positions[1] return (result["neuron_number"] >= ini) & (result["neuron_number"] <= end) def run_pooling_2d(self): scaffold = Scaffold() - scaffold.add_brick(BaseP_Input(np.array([self.pvector]),p=self.basep,bits=self.bits,collapse_binary=False,name="Input0",time_dimension=False),"input") - scaffold.add_brick(convolution_2d(self.pshape,self.filters,self.conv_thresholds,self.basep,self.bits,name="convolution_",mode=self.mode),[(0, 0)],output=True) - scaffold.add_brick(pooling_2d(self.pool_size,self.strides,thresholds=self.thresholds,name="pool_",method=self.method),[(1,0)],output=True) + scaffold.add_brick(BaseP_Input(np.array([self.pvector]), p=self.basep, bits=self.bits, collapse_binary=False, name="Input0", time_dimension=False), "input") + scaffold.add_brick(convolution_2d(self.pshape, self.filters, self.conv_thresholds, self.basep, self.bits, name="convolution_", mode=self.mode), [(0, 0)], output=True) + scaffold.add_brick(pooling_2d(self.pool_size, self.strides, thresholds=self.thresholds, name="pool_", method=self.method), [(1, 0)], output=True) self.graph = scaffold.lay_bricks() scaffold.summary(verbose=1) @@ -810,40 +816,40 @@ def run_pooling_2d(self): def make_random_thresholds_vector(self, initial_vector): thresholds = initial_vector.copy() - random_ids = np.random.randint(2,size=thresholds.shape).astype(bool) + random_ids = np.random.randint(2, size=thresholds.shape).astype(bool) thresholds[random_ids] = thresholds[random_ids] - 0.1 thresholds[~random_ids] = thresholds[~random_ids] + 0.1 thresholds[thresholds < 0.0] = 0.0 return thresholds - + def make_convolution_spike_positions_vector(self, case): - ''' - The returned list is [0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0] for "fixed" case. The "random" case returns a list + """ + The returned list is [0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0] for "fixed" case. The "random" case returns a list with the positions of "0.1" randomized in the list. The 0.1 specifies the convolution brick's output positions that will spike (i.e., output neurons will fire). - ''' + """ conv_spike_pos = np.zeros(self.conv_ans.shape, dtype=float) if case.lower() == "fixed": - conv_spike_pos = 0.1*np.array([[1,0,0,0,1],[0,0,1,1,1],[1,1,1,0,1],[0,1,0,0,1],[1,1,0,0,0]],dtype=float) + conv_spike_pos = 0.1 * np.array([[1, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 1, 1, 0, 1], [0, 1, 0, 0, 1], [1, 1, 0, 0, 0]], dtype=float) elif case.lower() == "random": - conv_spike_pos = 0.1 * np.random.randint(2,size=self.conv_ans.shape) + conv_spike_pos = 0.1 * np.random.randint(2, size=self.conv_ans.shape) else: print(f"Case parameter is either 'fixed' or 'random'. But you provided {case}.") raise ValueError("Unrecognized 'case' parameter value provided.") - + return conv_spike_pos - + def get_stride_positions(self, pixel_dim): return np.arange(0, pixel_dim, self.strides, dtype=int) - + def get_output_size(self, pixel_dim): - return int(np.floor(1.0 + (np.float64(pixel_dim) - self.pool_size)/self.strides)) + return int(np.floor(1.0 + (np.float64(pixel_dim) - self.pool_size) / self.strides)) def get_output_shape(self): return (self.get_output_size(self.pixel_dim1), self.get_output_size(self.pixel_dim2)) - + def get_expected_pooling_answer(self, pool_input): row_stride_positions = self.get_stride_positions(self.pixel_dim1) col_stride_positions = self.get_stride_positions(self.pixel_dim2) @@ -851,28 +857,27 @@ def get_expected_pooling_answer(self, pool_input): expected = [] if self.method == "max": - for row in row_stride_positions[:output_shape[0]]: - for col in col_stride_positions[:output_shape[1]]: - expected.append(np.any(pool_input[row:row+self.pool_size,col:col+self.pool_size])) + for row in row_stride_positions[: output_shape[0]]: + for col in col_stride_positions[: output_shape[1]]: + expected.append(np.any(pool_input[row : row + self.pool_size, col : col + self.pool_size])) expected = np.reshape(expected, output_shape).astype(int) expected = (np.array(expected, dtype=int) > self.thresholds).astype(float) - elif self.method == "average": #TODO : fix this code. - weights = np.ones((self.pool_size,self.pool_size),dtype=float) / float(self.pool_size*self.pool_size) - for row in row_stride_positions[:output_shape[0]]: - for col in col_stride_positions[:output_shape[1]]: - expected.append(np.dot(weights.flatten(),pool_input[row:row+self.pool_size,col:col+self.pool_size].astype(int).flatten())) + elif self.method == "average": # TODO : fix this code. + weights = np.ones((self.pool_size, self.pool_size), dtype=float) / float(self.pool_size * self.pool_size) + for row in row_stride_positions[: output_shape[0]]: + for col in col_stride_positions[: output_shape[1]]: + expected.append(np.dot(weights.flatten(), pool_input[row : row + self.pool_size, col : col + self.pool_size].astype(int).flatten())) expected = np.reshape(expected, output_shape).astype(float) else: print(f"'method' class member variable must be either 'max' or 'average'. But it is {self.method}.") raise ValueError("Unrecognized 'method' class member variable.") - + return np.array(expected) - - def get_expected_spikes(self,expected_ans): - ans = np.array(expected_ans) - spikes = ans[ ans > self.thresholds].astype(float) - return list(2.0 * np.ones(spikes.shape)) + def get_expected_spikes(self, expected_ans): + ans = np.array(expected_ans) + spikes = ans[ans > self.thresholds].astype(float) + return list(2.0 * np.ones(spikes.shape)) diff --git a/tests/unit/helpers.py b/tests/unit/helpers.py index 830386b1..30e98239 100644 --- a/tests/unit/helpers.py +++ b/tests/unit/helpers.py @@ -4,13 +4,9 @@ import numpy as np from .keras_helpers import keras_convolve2d, keras_convolve2d_4dinput, generate_keras_kernel, generate_mock_image, keras_convolution2d_output_shape, keras_convolution2d_output_shape_4dinput -try: - from tensorflow.keras.models import Sequential - from tensorflow.keras import Model, initializers - from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D -except ModuleNotFoundError: - import pytest - pytest.skip(reason="Tensorflow package missing.", allow_module_level=True) +from tensorflow.keras.models import Sequential +from tensorflow.keras import Model, initializers +from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D class ConvolutionParams: diff --git a/tests/unit/simulators/spiking_neural_network/test_input_neuron.py b/tests/unit/simulators/spiking_neural_network/test_input_neuron.py index 422c9963..0f622316 100644 --- a/tests/unit/simulators/spiking_neural_network/test_input_neuron.py +++ b/tests/unit/simulators/spiking_neural_network/test_input_neuron.py @@ -2,23 +2,24 @@ from fugu.simulators.SpikingNeuralNetwork.neuron import InputNeuron +base_neuron = InputNeuron @pytest.fixture def default_neuron(): - return InputNeuron() + return base_neuron() @pytest.fixture def custom_voltage_neuron(): def _inner(v): - return InputNeuron(voltage=v) + return base_neuron(voltage=v) return _inner @pytest.fixture def named_neuron(): - return InputNeuron(name="Testing") + return base_neuron(name="Testing") def test_constructor_defaults(default_neuron): @@ -64,13 +65,15 @@ def test_threshold_setter(default_neuron): def test__str__(capsys, default_neuron): print(default_neuron) out, _ = capsys.readouterr() - assert out == "InputNeuron None\n" + val_str = "InputNeuron" + assert out == f"{val_str} None\n" def test__repr__(capsys, default_neuron): print(repr(default_neuron)) out, _ = capsys.readouterr() - assert out == "InputNeuron None\n" + val_str = "InputNeuron" + assert out == f"{val_str} None\n" @pytest.mark.parametrize("in_stream", [[0.1 + 0.2j]]) @@ -84,7 +87,7 @@ def test_update_state_on_default_neuron(default_neuron): with pytest.raises(TypeError): default_neuron.update_state() - default_neuron.connect_to_input([0.01, 0.2]) + default_neuron.connect_to_input([-0.01, 0.2]) default_neuron.update_state() assert default_neuron.spike == False @@ -102,10 +105,12 @@ def test_update_state_on_default_neuron(default_neuron): def test_named__str__(capsys, named_neuron): print(named_neuron) out, _ = capsys.readouterr() - assert out == "InputNeuron Testing\n" + val_str = "InputNeuron" + assert out == f"{val_str} Testing\n" def test_named__repr__(capsys, named_neuron): print(repr(named_neuron)) out, _ = capsys.readouterr() - assert out == "InputNeuron Testing\n" + val_str = "InputNeuron" + assert out == f"{val_str} Testing\n" diff --git a/tests/unit/simulators/spiking_neural_network/test_learning_synapse.py b/tests/unit/simulators/spiking_neural_network/test_learning_synapse.py new file mode 100644 index 00000000..43302cca --- /dev/null +++ b/tests/unit/simulators/spiking_neural_network/test_learning_synapse.py @@ -0,0 +1,452 @@ +from collections import deque + +import numpy as np +import pytest + +from fugu.simulators.SpikingNeuralNetwork.neuron import InputNeuron, LIFNeuron +from fugu.simulators.SpikingNeuralNetwork.synapse import LearningSynapse as Synapse + + +@pytest.fixture +def lif_neuron(): + def _inner(name=None): + return LIFNeuron(name=name) + + return _inner + + +@pytest.fixture +def input_neuron(): + def _inner(name=None): + return InputNeuron(name=name) + + return _inner + + +@pytest.fixture +def default_synapse_w_lif_neurons(): + n1 = LIFNeuron("n1") + n2 = LIFNeuron("n2") + return Synapse(n1, n2) + + +@pytest.fixture +def default_synapse_w_input_neurons(): + n1 = InputNeuron("n1") + n2 = InputNeuron("n2") + return Synapse(n1, n2) + + + +@pytest.mark.parametrize( + "pre_neuron, post_neuron", + [ + (None, lif_neuron), + (input_neuron, None), + (None, None), + (lif_neuron, object), + ([], input_neuron), + (lif_neuron, set()), + (1, 2), + (input_neuron, "fail"), + ], +) +def test_constructor_exceptions(pre_neuron, post_neuron): + with pytest.raises(TypeError): + Synapse(pre_neuron, post_neuron) + + +@pytest.mark.parametrize("delay", ["fail", [], set, object,float, tuple]) +def test_constructor_delay_type_check(lif_neuron, delay): + with pytest.raises(TypeError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), delay=delay) + + +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_constructor_weight_type_check(lif_neuron, weight): + with pytest.raises(TypeError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), weight=weight) + +@pytest.mark.parametrize( + "delay", + [ + 0, + -1, + -10 + ], +) +def test_constructor_delay_value_check(lif_neuron, delay): + with pytest.raises(ValueError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), delay=delay) + +@pytest.mark.parametrize( + "learning_rule", + [ + "None", + "fail", + " " + ] +) +def test_constructor_learning_rule_value_check(lif_neuron, learning_rule): + with pytest.raises(ValueError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), learning_rule=learning_rule) + + +@pytest.mark.parametrize( + "learning_rule", + [ + [], + object, + int, + set + ] +) +def test_constructor_learning_rule_type_check(lif_neuron, learning_rule): + with pytest.raises(TypeError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), learning_rule=learning_rule) + + +@pytest.mark.parametrize( + "learning_rule", + [ + [], + object, + int, + set + ] +) +def test_constructor_learning_rule_type_check(lif_neuron, learning_rule): + with pytest.raises(TypeError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), learning_rule=learning_rule) + + +def test_constructor_defaults( + default_synapse_w_lif_neurons, default_synapse_w_input_neurons +): + assert default_synapse_w_lif_neurons.delay == 1 + assert default_synapse_w_lif_neurons._d == 1 + assert default_synapse_w_lif_neurons.weight == 1.0 + assert default_synapse_w_lif_neurons._w == 1.0 + assert default_synapse_w_lif_neurons._hist == deque(np.zeros(1)) + assert default_synapse_w_lif_neurons._eligibility_trace == 0.0 + assert default_synapse_w_lif_neurons.name == "s_n1_n2" + assert default_synapse_w_lif_neurons._learning_rule == "STDP" + + + assert default_synapse_w_input_neurons.delay == 1 + assert default_synapse_w_input_neurons._d == 1 + assert default_synapse_w_input_neurons.weight == 1.0 + assert default_synapse_w_input_neurons._w == 1.0 + assert default_synapse_w_input_neurons._eligibility_trace == 0.0 + assert default_synapse_w_input_neurons._hist == deque(np.zeros(1)) + assert default_synapse_w_input_neurons.name == "s_n1_n2" + assert default_synapse_w_input_neurons._learning_rule == "STDP" + + +def test_neuron_getters(): + pre_neuron = LIFNeuron("pre") + post_neuron = LIFNeuron("post") + synapse = Synapse(pre_neuron, post_neuron) + + assert synapse.pre_neuron == pre_neuron + assert synapse.post_neuron == post_neuron + + +def test_synapse_key(): + pre_neuron = LIFNeuron("pre") + post_neuron = LIFNeuron("post") + synapse = Synapse(pre_neuron, post_neuron) + + assert synapse.get_key() == (pre_neuron, post_neuron) + + + +def test_weight_setter(default_synapse_w_lif_neurons, default_synapse_w_input_neurons): + assert default_synapse_w_lif_neurons.weight == 1.0 + default_synapse_w_lif_neurons.weight = 2.0 + assert default_synapse_w_lif_neurons.weight == 2.0 + + assert default_synapse_w_input_neurons.weight == 1.0 + default_synapse_w_input_neurons.weight = 3.0 + assert default_synapse_w_input_neurons.weight == 3.0 + + +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_weight_setter_type_check(default_synapse_w_lif_neurons, weight): + assert default_synapse_w_lif_neurons.weight == 1.0 + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.weight = weight + + +@pytest.mark.parametrize("delay", ["fail", [], set, object, tuple]) +def test_delay_setter_type_check(default_synapse_w_lif_neurons, delay): + assert default_synapse_w_lif_neurons.delay == 1 + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.delay = delay + + +@pytest.mark.parametrize( + "delay", + [ + 0, + ], +) +def test_delay_setter_value_check(default_synapse_w_lif_neurons, delay): + assert default_synapse_w_lif_neurons.delay == 1 + with pytest.raises(ValueError): + default_synapse_w_lif_neurons.delay = delay + + +def test_delay_setter(default_synapse_w_input_neurons): + assert default_synapse_w_input_neurons.delay == 1 + default_synapse_w_input_neurons.delay = 2 + assert default_synapse_w_input_neurons.delay == 2 + + +@pytest.mark.parametrize("delay", ["fail", [], set, object]) +def test_set_params_type_check(default_synapse_w_lif_neurons, delay): + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.set_params(new_delay=delay) + + +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_set_params_type_check(default_synapse_w_lif_neurons, weight): + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.set_params(new_weight=weight) + + +@pytest.mark.parametrize( + "delay", + [ + 0, + -1, + -10 + ], +) +def test_set_params_value_check(default_synapse_w_lif_neurons, delay): + with pytest.raises(ValueError): + default_synapse_w_lif_neurons.set_params(new_delay=delay) + + +@pytest.mark.parametrize("delay", [2, 3, 4, 100]) +def test_set_params( + default_synapse_w_lif_neurons, default_synapse_w_input_neurons, delay +): + assert default_synapse_w_lif_neurons.delay == 1 + default_synapse_w_lif_neurons.set_params(new_delay=delay) + assert default_synapse_w_lif_neurons.delay == delay + + assert default_synapse_w_input_neurons.delay == 1 + default_synapse_w_input_neurons.set_params(new_delay=delay) + assert default_synapse_w_input_neurons.delay == delay + + +def test_show_params(capsys, default_synapse_w_lif_neurons): + assert default_synapse_w_lif_neurons.show_params() == None + out, _ = capsys.readouterr() + assert ( + out + == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 1\n weight : 1.0\n learning_rule : STDP\n" + ) + + assert default_synapse_w_lif_neurons.set_params(new_delay=2, new_weight=2.0) == None + assert default_synapse_w_lif_neurons.show_params() == None + out, _ = capsys.readouterr() + assert ( + out + == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 2\n weight : 2.0\n learning_rule : STDP\n" + ) + + +def test_named__str__(capsys, default_synapse_w_lif_neurons): + print(default_synapse_w_lif_neurons) + out, _ = capsys.readouterr() + assert out == "STDP_Synapse s_n1_n2(1, 1.0)\n" + + +def test_named__repr__(capsys, default_synapse_w_lif_neurons): + print(repr(default_synapse_w_lif_neurons)) + out, _ = capsys.readouterr() + assert out == "s_n1_n2\n" + + +# @pytest.mark.parametrize( +# "delay", [1,2,3] +# ) +def test_update_state_on_default_lif_synapse(default_synapse_w_lif_neurons): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + for _ in range(50): + assert default_synapse_w_lif_neurons.update_state() == None + assert default_synapse_w_lif_neurons._learning_rule == "STDP" + assert default_synapse_w_lif_neurons._hist == reference_hist + + +def test_update_state_on_default_lif_synapse_with_pre_spike( + default_synapse_w_lif_neurons, +): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Updating the state of the pre neuron to spike + default_synapse_w_lif_neurons._pre.spike = True + assert default_synapse_w_lif_neurons.update_state() == None + reference_hist.append(default_synapse_w_lif_neurons._w) + reference_hist.popleft() + assert default_synapse_w_lif_neurons._learning_rule == "STDP" + assert default_synapse_w_lif_neurons._hist == reference_hist + + +def test_update_state_on_default_input_synapse(default_synapse_w_input_neurons): + reference_hist = deque(np.zeros(default_synapse_w_input_neurons.delay)) + for _ in range(50): + assert default_synapse_w_input_neurons.update_state() == None + assert default_synapse_w_input_neurons._learning_rule == "STDP" + assert default_synapse_w_input_neurons._hist == reference_hist + + +def test_update_state_on_default_input_synapse_with_pre_spike( + default_synapse_w_input_neurons, +): + reference_hist = deque(np.zeros(default_synapse_w_input_neurons.delay)) + assert default_synapse_w_input_neurons.update_state() == None + + # Updating the state of the pre neuron to spike + default_synapse_w_input_neurons._pre.spike = True + assert default_synapse_w_input_neurons.update_state() == None + reference_hist.append(default_synapse_w_input_neurons._w) + reference_hist.popleft() + assert default_synapse_w_input_neurons._learning_rule == "STDP" + assert default_synapse_w_input_neurons._hist == reference_hist + + +@pytest.mark.parametrize("spike_hist", [[0, 0, 1, 1], [1, 1, 0], [1, 1, 1, 1]]) +def test_update_state_on_default_lif_synapse_with_learning( + default_synapse_w_lif_neurons, spike_hist +): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Setting the learning rule to STDP + default_synapse_w_lif_neurons._learning_rule = "STDP" + + # Setting the pre spike history to test pre and post spiking at the same time + default_synapse_w_lif_neurons._pre.spike_hist = spike_hist + + # Setting the post spike to True + default_synapse_w_lif_neurons._pre.spike = True + default_synapse_w_lif_neurons._post.spike = True + + # Updating the learning param + default_synapse_w_lif_neurons._learning_params.A_p = 1 + reference_hist.append(default_synapse_w_lif_neurons._w + 1) + reference_hist.popleft() + assert default_synapse_w_lif_neurons.update_state() == None + + assert default_synapse_w_lif_neurons._hist == reference_hist + + +@pytest.mark.parametrize("spike_hist", [[0, 0, 0, 0], [0, 0, 0], [0, 0]]) +def test_update_state_on_default_lif_synapse_learning_pre_spike( + default_synapse_w_lif_neurons, spike_hist +): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Setting the learning rule to STDP + default_synapse_w_lif_neurons._learning_rule = "STDP" + + # Setting the pre spike history to test pre and post spiking at the same time + default_synapse_w_lif_neurons._pre.spike_hist = spike_hist + + # Setting the post spike to True + default_synapse_w_lif_neurons._pre.spike = True + default_synapse_w_lif_neurons._post.spike = True + + # Updating the learning param + default_synapse_w_lif_neurons._learning_params.A_p = 1 + reference_hist.append(default_synapse_w_lif_neurons._w) + reference_hist.popleft() + assert default_synapse_w_lif_neurons.update_state() == None + + assert default_synapse_w_lif_neurons._hist == reference_hist + + +@pytest.mark.parametrize( + "spike_hist", [[0, 0, 1, 0, 1], [0, 1, 1, 0, 0], [1, 1, 1, 0, 1]] +) +def test_update_state_on_default_lif_synapse_learning_pre_spike_potentiation( + default_synapse_w_lif_neurons, spike_hist +): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Setting the learning rule to STDP + default_synapse_w_lif_neurons._learning_rule = "STDP" + + # Setting the pre spike history to test pre and post spiking at the same time + default_synapse_w_lif_neurons._pre.spike_hist = spike_hist + + # Setting the post spike to True + default_synapse_w_lif_neurons._pre.spike = True + default_synapse_w_lif_neurons._post.spike = True + + # Updating the learning param + default_synapse_w_lif_neurons._learning_params.A_p = 1 + default_synapse_w_lif_neurons._learning_params.tau = 1 + dw = 1 * np.exp(-2) / 1 + reference_hist.append(default_synapse_w_lif_neurons._w + dw) + reference_hist.popleft() + assert default_synapse_w_lif_neurons.update_state() == None + + assert default_synapse_w_lif_neurons._hist == reference_hist + + +@pytest.mark.parametrize('spike_hist', [[0, 0, 1, 0, 0], [0, 1, 1, 0, 0], [1, 1, 1, 0, 0]]) +def test_update_state_on_default_lif_synapse_post_spike(default_synapse_w_lif_neurons, spike_hist): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Setting the learning rule to STDP + default_synapse_w_lif_neurons._learning_rule = "STDP" + + # Setting the pre spike history to test pre and post spiking at the same time + default_synapse_w_lif_neurons._post.spike_hist = spike_hist + default_synapse_w_lif_neurons._pre.spike_hist = [0, 0, 0, 1, 1] + # Setting the post spike to True + default_synapse_w_lif_neurons._pre.spike = True + default_synapse_w_lif_neurons._post.spike = False + + # Updating the learning param + default_synapse_w_lif_neurons._learning_params.A_n = -1 + default_synapse_w_lif_neurons._learning_params.tau = 1 + dw = -1 * np.exp(-2) / 1 + reference_hist.append(default_synapse_w_lif_neurons._w + dw) + reference_hist.popleft() + assert default_synapse_w_lif_neurons.update_state() == None + + assert default_synapse_w_lif_neurons._hist == reference_hist + + +@pytest.mark.parametrize('spike_hist', [[0, 0, 0, 0, 0], [0, 0, 0], [0]]) +def test_update_state_on_default_lif_synapse_post_spike_none(default_synapse_w_lif_neurons, spike_hist): + reference_hist = deque(np.zeros(default_synapse_w_lif_neurons.delay)) + assert default_synapse_w_lif_neurons.update_state() == None + + # Setting the learning rule to STDP + default_synapse_w_lif_neurons._learning_rule = "STDP" + + # Setting the pre spike history to test pre and post spiking at the same time + default_synapse_w_lif_neurons._post.spike_hist = spike_hist + default_synapse_w_lif_neurons._pre.spike_hist = [0, 0, 0, 1, 1] + # Setting the post spike to True + default_synapse_w_lif_neurons._pre.spike = True + default_synapse_w_lif_neurons._post.spike = False + + # Updating the learning param + + reference_hist.append(default_synapse_w_lif_neurons._w + 0) + reference_hist.popleft() + assert default_synapse_w_lif_neurons.update_state() == None + + assert default_synapse_w_lif_neurons._hist == reference_hist + diff --git a/tests/unit/simulators/spiking_neural_network/test_lif_neuron.py b/tests/unit/simulators/spiking_neural_network/test_lif_neuron.py index 765fa0f1..31414739 100644 --- a/tests/unit/simulators/spiking_neural_network/test_lif_neuron.py +++ b/tests/unit/simulators/spiking_neural_network/test_lif_neuron.py @@ -1,6 +1,7 @@ import pytest from fugu.simulators.SpikingNeuralNetwork.neuron import LIFNeuron +from fugu.simulators.SpikingNeuralNetwork.synapse import LearningSynapse @pytest.fixture @@ -8,6 +9,7 @@ def default_neuron(): return LIFNeuron() + @pytest.fixture def custom_voltage_neuron(): def _inner(v): @@ -35,6 +37,9 @@ def test_constructor_defaults(default_neuron): assert default_neuron.threshold == 0.0 assert default_neuron.reset_voltage == 0.0 assert default_neuron.leakage_constant == 1.0 + assert default_neuron.scaling == False + assert default_neuron.scaling_factor == 0.1 + # and their _ counterparts assert default_neuron._T == 0.0 assert default_neuron._R == 0.0 @@ -42,6 +47,7 @@ def test_constructor_defaults(default_neuron): assert default_neuron._b == 0.0 assert default_neuron.voltage == 0.0 assert default_neuron.v == 0.0 + assert default_neuron._S == 0.1 assert default_neuron.presyn == set() assert default_neuron.record == False @@ -58,6 +64,8 @@ def test_constructor(): voltage=1, bias=1, p=1, + scaling_factor=0.2, + scaling=True, ) assert neuron.name == "neuron" # from parent abstract class @@ -67,6 +75,8 @@ def test_constructor(): assert neuron.threshold == 1.0 assert neuron.reset_voltage == 1.0 assert neuron.leakage_constant == 1.0 + assert neuron.scaling == True + assert neuron.scaling_factor == 0.2 # and their _ counterparts assert neuron._T == 1.0 assert neuron._R == 1.0 @@ -74,6 +84,7 @@ def test_constructor(): assert neuron._b == 1.0 assert neuron.voltage == 1.0 assert neuron.v == 1.0 + assert neuron._S == 0.2 assert neuron.presyn == set() assert neuron.record == False @@ -101,11 +112,17 @@ def test_constructor_type_errors(param): with pytest.raises(TypeError): LIFNeuron(bias=param) + + with pytest.raises(TypeError): + LIFNeuron(scaling_factor=param) if type(param) is not bool: with pytest.raises(TypeError): LIFNeuron(record=param) + with pytest.raises(TypeError): + LIFNeuron(scaling=param) + @pytest.mark.parametrize("m", [-1, -0.1, 1.1, 10]) def test_warning_on_unrealistic_leakage_constant(m): @@ -129,6 +146,24 @@ def test_invalid_spiking_probablity(p, expected_error): with pytest.raises(expected_error): LIFNeuron(p=p) +@pytest.mark.parametrize( + "scaling_factor, expected_error", + [ + (1.1, ValueError), + (0.0, ValueError), + (5.0, ValueError), + (-0.1, ValueError), + (-10.0, ValueError), + (-10, ValueError), + (True, TypeError), + ({}, TypeError), + ], +) +def test_invalid_scaling_factor(scaling_factor, expected_error): + with pytest.raises(expected_error): + LIFNeuron(scaling_factor=scaling_factor) + + def test_update_state_on_default_neuron(default_neuron): reference_spike_hist = [] @@ -202,14 +237,20 @@ def test_show_state_of_custom_voltage_neuron(capsys, custom_voltage_neuron): def test_show_params_on_default_neuron(capsys, default_neuron): assert default_neuron.show_params() == None out, _ = capsys.readouterr() - assert out == "Neuron 'None':\nThreshold\t :0.0 volts,\nReset voltage\t :0.0 volts,\nLeakage Constant :1.0\nBias :0.0\n\n" + assert ( + out + == "Neuron 'None':\nThreshold\t :0.0 volts,\nReset voltage\t :0.0 volts,\nLeakage Constant :1.0\nBias :0.0\n\n" + ) def test_show_params(capsys): neuron = LIFNeuron(threshold=0.7, reset_voltage=0.2, leakage_constant=0.9) assert neuron.show_params() == None out, _ = capsys.readouterr() - assert out == "Neuron 'None':\nThreshold\t :0.7 volts,\nReset voltage\t :0.2 volts,\nLeakage Constant :0.9\nBias :0.0\n\n" + assert ( + out + == "Neuron 'None':\nThreshold\t :0.7 volts,\nReset voltage\t :0.2 volts,\nLeakage Constant :0.9\nBias :0.0\n\n" + ) def test_threshold_setter(default_neuron): @@ -218,6 +259,11 @@ def test_threshold_setter(default_neuron): assert default_neuron.threshold == 0.7 +def test_scaling_factor_setter(default_neuron): + assert default_neuron.scaling_factor == 0.1 + default_neuron.scaling_factor = 0.3 + assert default_neuron.scaling_factor == 0.3 + def test_reset_voltage_setter(default_neuron): assert default_neuron.reset_voltage == 0.0 default_neuron.reset_voltage = 0.2 @@ -254,4 +300,26 @@ def test_named__repr__(capsys, named_neuron): assert out == "LIFNeuron Testing\n" -# TODO add test(s) for show_presynapses method +def test_show_presynapses(capsys, default_neuron): + assert default_neuron.show_presynapses() == None + default_neuron.presyn = set() + out, _ = capsys.readouterr() + assert out == "Neuron None receives no external input\n" + + neuron_1 = LIFNeuron("n1") + neuron_2 = LIFNeuron("n2") + syn1 = LearningSynapse(neuron_1, neuron_2) + default_neuron.presyn.add(syn1) + assert default_neuron.show_presynapses() == None + out, _ = capsys.readouterr() + assert (out == "LIFNeuron None receives input via synapse: {s_n1_n2}\n") + + + neuron_3 = LIFNeuron("n3") + syn2 = LearningSynapse(neuron_3, neuron_2) + default_neuron.presyn.add(syn2) + assert default_neuron.show_presynapses() == None + out, _ = capsys.readouterr() + output_exp1 = "LIFNeuron None receives input via synapses: {s_n1_n2, s_n3_n2}\n" + output_exp2 = "LIFNeuron None receives input via synapses: {s_n3_n2, s_n1_n2}\n" + assert (out == output_exp1 or out == output_exp2) diff --git a/tests/unit/simulators/spiking_neural_network/test_neural_network.py b/tests/unit/simulators/spiking_neural_network/test_neural_network.py index 392e958a..816a357c 100644 --- a/tests/unit/simulators/spiking_neural_network/test_neural_network.py +++ b/tests/unit/simulators/spiking_neural_network/test_neural_network.py @@ -2,7 +2,7 @@ from fugu.simulators.SpikingNeuralNetwork.neuralnetwork import NeuralNetwork from fugu.simulators.SpikingNeuralNetwork.neuron import LIFNeuron -from fugu.simulators.SpikingNeuralNetwork.synapse import Synapse +from fugu.simulators.SpikingNeuralNetwork.synapse import Synapse, LearningSynapse @pytest.fixture @@ -37,9 +37,9 @@ def neurons_and_synapses(): neuron_d = LIFNeuron("d") neurons = [neuron_a, neuron_b, neuron_c, neuron_d] - synapse_a_b = Synapse(neuron_a, neuron_b) - synapse_b_c = Synapse(neuron_b, neuron_c) - synapse_c_d = Synapse(neuron_c, neuron_d) + synapse_a_b = LearningSynapse(neuron_a, neuron_b) + synapse_b_c = LearningSynapse(neuron_b, neuron_c) + synapse_c_d = LearningSynapse(neuron_c, neuron_d) synapses = [synapse_a_b, synapse_b_c, synapse_c_d] return {"neurons": neurons, "synapses": synapses} @@ -147,7 +147,7 @@ def test_add_synapse_subclass(blank_network): itself. """ - class SubSynapse(Synapse): + class SubSynapse(LearningSynapse): pass synapse = SubSynapse(LIFNeuron("a"), LIFNeuron("b")) @@ -163,7 +163,7 @@ def test_add_synapse_class_type_check(blank_network, neuron_a): "new_synapse", [ (LIFNeuron("0"), LIFNeuron("1")), - (LIFNeuron("0"), LIFNeuron("1"), 1, 1.0), + (LIFNeuron("0"), LIFNeuron("1"), "STDP", 1, 1.0), ], ) def test_add_synapse_tuple(blank_network, new_synapse): @@ -198,8 +198,12 @@ def test_add_synapse(capsys, blank_network, neurons_and_synapses): assert len(blank_network.synps) == 3 assert blank_network.add_synapse(synapses[0]) == None + name = synapses[0]._name_learning_rule out, _ = capsys.readouterr() - assert out == "Warning! Not Added! Synapse s_a_b(1, 1.0) already defined in network. (Use .set_params() to update synapse)\n" + assert ( + out + == f"Warning! Not Added! {name}_Synapse s_a_b(1, 1.0) already defined in network. (Use .set_params() to update synapse)\n" + ) @pytest.mark.parametrize( @@ -214,6 +218,19 @@ def test_add_multiple_synapses_check(blank_network, synapses): blank_network.add_multiple_synapses(synapses) +@pytest.mark.parametrize( + "new_synapse", + [ + set, + [], + float, + int + ] +) +def test_udpate_network_check(blank_network, new_synapse): + with pytest.raises(TypeError): + blank_network.update_network(new_synapse) + def test_add_multiple_synapse(blank_network, neurons_and_synapses): neurons = neurons_and_synapses["neurons"] synapses = neurons_and_synapses["synapses"] @@ -234,6 +251,20 @@ def test_step(blank_network, neurons_and_synapses): # TODO should add_multiple_neurons method accept {} (even though it's an instance of Iterable) + +# @pytest.mark.parametrize( +# "neuron_name, input_values" +# [ +# (None, None), +# (None, object), +# ([], "fail"), +# (object, object), +# ], +# ) +# def test_update_input_neuron_type(blank_network,neuron_name, input_values): +# with pytest.raises(TypeError): +# blank_network.update_input_neuron(neuron_name, input_values) + # TODO list_neurons: why the "\b\b"? # TODO remove NOT SURE IF THIS IS NEEDED section - after legacy test suite is ported over # TODO test update_input_neuron method diff --git a/tests/unit/simulators/spiking_neural_network/test_synapse.py b/tests/unit/simulators/spiking_neural_network/test_synapse.py index b280765e..4e6bfcf5 100644 --- a/tests/unit/simulators/spiking_neural_network/test_synapse.py +++ b/tests/unit/simulators/spiking_neural_network/test_synapse.py @@ -7,6 +7,7 @@ from fugu.simulators.SpikingNeuralNetwork.synapse import Synapse + @pytest.fixture def lif_neuron(): def _inner(name=None): @@ -30,6 +31,7 @@ def default_synapse_w_lif_neurons(): return Synapse(n1, n2) +# TODO: Should we have learning option for the InputNeuron connections? Need to find a way to explain that @pytest.fixture def default_synapse_w_input_neurons(): n1 = InputNeuron("n1") @@ -37,6 +39,7 @@ def default_synapse_w_input_neurons(): return Synapse(n1, n2) + @pytest.mark.parametrize( "pre_neuron, post_neuron", [ @@ -55,16 +58,23 @@ def test_constructor_exceptions(pre_neuron, post_neuron): Synapse(pre_neuron, post_neuron) -@pytest.mark.parametrize("delay", ["fail", [], set, object]) -def test_contructor_delay_type_check(lif_neuron, delay): +@pytest.mark.parametrize("delay", ["fail", [], set, object,float, tuple]) +def test_constructor_delay_type_check(lif_neuron, delay): with pytest.raises(TypeError): Synapse(lif_neuron("n1"), lif_neuron("n2"), delay=delay) +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_constructor_weight_type_check(lif_neuron, weight): + with pytest.raises(TypeError): + Synapse(lif_neuron("n1"), lif_neuron("n2"), weight=weight) + @pytest.mark.parametrize( "delay", [ 0, + -1, + -10 ], ) def test_constructor_delay_value_check(lif_neuron, delay): @@ -72,7 +82,9 @@ def test_constructor_delay_value_check(lif_neuron, delay): Synapse(lif_neuron("n1"), lif_neuron("n2"), delay=delay) -def test_constructor_defaults(default_synapse_w_lif_neurons, default_synapse_w_input_neurons): +def test_constructor_defaults( + default_synapse_w_lif_neurons, default_synapse_w_input_neurons +): assert default_synapse_w_lif_neurons.delay == 1 assert default_synapse_w_lif_neurons._d == 1 assert default_synapse_w_lif_neurons.weight == 1.0 @@ -105,6 +117,11 @@ def test_synapse_key(): assert synapse.get_key() == (pre_neuron, post_neuron) + +# # TODO input validation for weight +# def test_weight_setter_type_check(default_synapse_w_lif_neurons, weight) + + def test_weight_setter(default_synapse_w_lif_neurons, default_synapse_w_input_neurons): assert default_synapse_w_lif_neurons.weight == 1.0 default_synapse_w_lif_neurons.weight = 2.0 @@ -115,7 +132,14 @@ def test_weight_setter(default_synapse_w_lif_neurons, default_synapse_w_input_ne assert default_synapse_w_input_neurons.weight == 3.0 -@pytest.mark.parametrize("delay", ["fail", [], set, object]) +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_weight_setter_type_check(default_synapse_w_lif_neurons, weight): + assert default_synapse_w_lif_neurons.weight == 1.0 + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.weight = weight + + +@pytest.mark.parametrize("delay", ["fail", [], set, object, tuple]) def test_delay_setter_type_check(default_synapse_w_lif_neurons, delay): assert default_synapse_w_lif_neurons.delay == 1 with pytest.raises(TypeError): @@ -140,12 +164,18 @@ def test_delay_setter(default_synapse_w_input_neurons): assert default_synapse_w_input_neurons.delay == 2 -@pytest.mark.parametrize("delay", ["fail", [], set, object]) +@pytest.mark.parametrize("delay", ["fail", [], set, object, tuple]) def test_set_params_type_check(default_synapse_w_lif_neurons, delay): with pytest.raises(TypeError): default_synapse_w_lif_neurons.set_params(new_delay=delay) +@pytest.mark.parametrize("weight", ["fail", [], set, object, tuple]) +def test_set_params_type_check(default_synapse_w_lif_neurons, weight): + with pytest.raises(TypeError): + default_synapse_w_lif_neurons.set_params(new_weight=weight) + + @pytest.mark.parametrize( "delay", [ @@ -159,7 +189,9 @@ def test_set_params_value_check(default_synapse_w_lif_neurons, delay): @pytest.mark.parametrize("delay", [2, 3, 4, 100]) -def test_set_params(default_synapse_w_lif_neurons, default_synapse_w_input_neurons, delay): +def test_set_params( + default_synapse_w_lif_neurons, default_synapse_w_input_neurons, delay +): assert default_synapse_w_lif_neurons.delay == 1 default_synapse_w_lif_neurons.set_params(new_delay=delay) assert default_synapse_w_lif_neurons.delay == delay @@ -172,12 +204,19 @@ def test_set_params(default_synapse_w_lif_neurons, default_synapse_w_input_neuro def test_show_params(capsys, default_synapse_w_lif_neurons): assert default_synapse_w_lif_neurons.show_params() == None out, _ = capsys.readouterr() - assert out == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 1\n weight : 1.0\n" + assert ( + out + == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 1\n weight : 1.0\n" + ) assert default_synapse_w_lif_neurons.set_params(new_delay=2, new_weight=2.0) == None assert default_synapse_w_lif_neurons.show_params() == None out, _ = capsys.readouterr() - assert out == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 2\n weight : 2.0\n" + assert ( + out + == "Synapse LIFNeuron n1(0.0, 0.0, 1.0) -> LIFNeuron n2(0.0, 0.0, 1.0):\n delay : 2\n weight : 2.0\n" + + ) def test_named__str__(capsys, default_synapse_w_lif_neurons): @@ -192,6 +231,3 @@ def test_named__repr__(capsys, default_synapse_w_lif_neurons): assert out == "s_n1_n2\n" -# TODO input validation for weight -# TODO test update_state method -# TODO difference testing with LIFNeuron vs InputNeuron diff --git a/tests/unit/utils/test_keras_model_layers.py b/tests/unit/utils/test_keras_model_layers.py index e7bb691f..8df3513c 100644 --- a/tests/unit/utils/test_keras_model_layers.py +++ b/tests/unit/utils/test_keras_model_layers.py @@ -2,7 +2,6 @@ # fmt: off import numpy as np import pytest -pytest.importorskip("tensorflow") from ..keras_helpers import keras_convolve2d, keras_convolve2d_4dinput, generate_keras_kernel, generate_mock_image from scipy.signal import convolve2d diff --git a/tests/unit/utils/test_whetstone_to_fugu_convolution_layer.py b/tests/unit/utils/test_whetstone_to_fugu_convolution_layer.py index da4ea07d..c4ed4d13 100644 --- a/tests/unit/utils/test_whetstone_to_fugu_convolution_layer.py +++ b/tests/unit/utils/test_whetstone_to_fugu_convolution_layer.py @@ -2,6 +2,7 @@ # fmt: off import numpy as np import pytest +from scipy.signal import convolve2d from fugu.backends import snn_Backend from fugu.bricks.keras_convolution_bricks import keras_convolution_2d_4dinput as convolution_2d