44data_path = get_data_repo_path ()+ "Al256_reduced/"
55
66"""
7- ex01_run_singleshot.py: Shows how a neural network can be trained on material data using this framework.
8- It uses preprocessed data, that is read in from *.npy files.
7+ ex01_run_singleshot.py: Shows how a neural network can be trained on material
8+ data using this framework. It uses preprocessed data, that is read in
9+ from *.npy files.
910"""
1011
1112printout ("Welcome to FESL." )
@@ -18,48 +19,61 @@ def run_example01(desired_loss_improvement_factor=1):
1819
1920 ####################
2021 # PARAMETERS
21- # All parameters are handled from a central parameters class that contains subclasses.
22+ # All parameters are handled from a central parameters class that
23+ # contains subclasses.
2224 ####################
25+
2326 test_parameters = fesl .Parameters ()
27+ # Currently, the splitting in training, validation and test set are
28+ # done on a "by snapshot" basis. Specify how this is
29+ # done by providing a list containing entries of the form
30+ # "tr", "va" and "te".
2431 test_parameters .data .data_splitting_type = "by_snapshot"
2532 test_parameters .data .data_splitting_snapshots = ["tr" , "va" , "te" ]
33+
34+ # Specify the data scaling.
2635 test_parameters .data .input_rescaling_type = "feature-wise-standard"
2736 test_parameters .data .output_rescaling_type = "normal"
28- test_parameters . descriptors . twojmax = 11
29- test_parameters . targets . ldos_gridsize = 10
37+
38+ # Specify the used activation function.
3039 test_parameters .network .layer_activations = ["ReLU" ]
40+
41+ # Specify the training parameters.
3142 test_parameters .running .max_number_epochs = 20
3243 test_parameters .running .mini_batch_size = 40
3344 test_parameters .running .learning_rate = 0.00001
3445 test_parameters .running .trainingtype = "Adam"
35- test_parameters .running .use_gpu = False
36- test_parameters .running .use_horovod = False
37- test_parameters .running .use_compression = False
3846
3947 ####################
4048 # DATA
41- # Read data into RAM.
42- # We have to specify the directories we want to read the snapshots from.
43- # The Handlerinterface will also return input and output scaler objects. These are used internally to scale
44- # the data. The objects can be used after successful training for inference or plotting.
49+ # Add and prepare snapshots for training.
4550 ####################
4651
4752 data_handler = fesl .DataHandler (test_parameters )
4853
4954 # Add a snapshot we want to use in to the list.
50- data_handler .add_snapshot ("Al_debug_2k_nr0.in.npy" , data_path , "Al_debug_2k_nr0.out.npy" , data_path , output_units = "1/Ry" )
51- data_handler .add_snapshot ("Al_debug_2k_nr1.in.npy" , data_path , "Al_debug_2k_nr1.out.npy" , data_path , output_units = "1/Ry" )
52- data_handler .add_snapshot ("Al_debug_2k_nr2.in.npy" , data_path , "Al_debug_2k_nr2.out.npy" , data_path , output_units = "1/Ry" )
53-
55+ data_handler .add_snapshot ("Al_debug_2k_nr0.in.npy" , data_path ,
56+ "Al_debug_2k_nr0.out.npy" , data_path ,
57+ output_units = "1/Ry" )
58+ data_handler .add_snapshot ("Al_debug_2k_nr1.in.npy" , data_path ,
59+ "Al_debug_2k_nr1.out.npy" , data_path ,
60+ output_units = "1/Ry" )
61+ data_handler .add_snapshot ("Al_debug_2k_nr2.in.npy" , data_path ,
62+ "Al_debug_2k_nr2.out.npy" , data_path ,
63+ output_units = "1/Ry" )
5464 data_handler .prepare_data ()
5565 printout ("Read data: DONE." )
5666
5767 ####################
5868 # NETWORK SETUP
5969 # Set up the network and trainer we want to use.
70+ # The layer sizes can be specified before reading data,
71+ # but it is safer this way.
6072 ####################
6173
62- test_parameters .network .layer_sizes = [data_handler .get_input_dimension (), 100 , data_handler .get_output_dimension ()]
74+ test_parameters .network .layer_sizes = [data_handler .get_input_dimension (),
75+ 100 ,
76+ data_handler .get_output_dimension ()]
6377
6478 # Setup network and trainer.
6579 test_network = fesl .Network (test_parameters )
@@ -76,10 +90,15 @@ def run_example01(desired_loss_improvement_factor=1):
7690 printout ("Training: DONE." )
7791
7892 ####################
93+ # RESULTS.
94+ # Print the used parameters and check whether the loss decreased enough.
95+ ####################
96+
7997 printout ("Parameters used for this experiment:" )
8098 test_parameters .show ()
8199
82- if desired_loss_improvement_factor * test_trainer .initial_test_loss < test_trainer .final_test_loss :
100+ if desired_loss_improvement_factor * test_trainer .initial_test_loss \
101+ < test_trainer .final_test_loss :
83102 return False
84103 else :
85104 return True
@@ -89,5 +108,7 @@ def run_example01(desired_loss_improvement_factor=1):
89108 if run_example01 ():
90109 printout ("Successfully ran ex01_run_singleshot." )
91110 else :
92- raise Exception ("Ran ex01_run_singleshot but something was off. If you haven't changed any parameters in "
93- "the example, there might be a problem with your installation." )
111+ raise Exception ("Ran ex01_run_singleshot but something was off."
112+ " If you haven't changed any parameters in "
113+ "the example, there might be a problem with your"
114+ " installation." )
0 commit comments