-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain.py
More file actions
executable file
·119 lines (96 loc) · 5.62 KB
/
main.py
File metadata and controls
executable file
·119 lines (96 loc) · 5.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import sys
sys.path.append('src')
# Importing pytorch here is forbidden
import matplotlib.pyplot as plt
import numpy as np
import FileInteraction as FI
import InputManager as IM
import NeuralNetwork as NN
import argparse
import os
import warnings
#import numpy as np
def plot_results(obj_vals, cross_vals, res_path):
assert len(obj_vals) == len(
cross_vals), 'Length mismatch between the curves'
def flattern(array):
ret = []
for i in range(len(array)):
for j in range(len(array[i])):
ret.append(array[i][j].detach().numpy())
return ret
obj_valsFlat = flattern(obj_vals)
cross_valsFlat = flattern(cross_vals)
# Plot saved in results folder
plt.plot(range(len(obj_valsFlat)), obj_valsFlat,
label="Generator : ", color="blue")
plt.plot(range(len(cross_valsFlat)), cross_valsFlat,
label="Critic : ", color="green")
plt.legend()
if not os.path.exists(res_path):
# if the demo_folder directory is not present
# then create it.
os.makedirs(res_path)
plt.savefig(res_path+'/loss.pdf')
plt.close()
def main(prefix):
parser = argparse.ArgumentParser(description="""Neural network to paint halos from cosmic density fields of dark matter
""", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--param', help='Path to json file containing the parameters for the program. See example at default location.',
default=prefix+'parameters/param_used.json')
parser.add_argument(
'-r', '--result', help='Path to a folder where the results will be created. Each trials should have its own folder, so that no data get lost !', default=prefix+"result")
parser.add_argument('-i', '--input', help='Path to the input data -> need to be specified. The path focus on the folder where two files are stored : input.npy and expected.npy. The first one contains the data of the dark matter density field and the second one the data for the halo count density field.', default=prefix+'input')
parser.add_argument('-t', help='Indicate that we should train our model',
action='store_true', dest='isTraining')
parser.add_argument('-m', '--model', help='Path to a folder containing the model/where the model will be stored. If the flag -t is specified, the model will be trained and save the model when done in this file, even if a previous model was saved here. If the -t flag is not specified, it will just load the data from the model.', default=prefix+"model/model.pt")
parser.add_argument('-rt', '--resume_training', help="If this parameter is specified, training will be resumed at the latest saved state given by the file from the -m parameter. If the file do not exist, this parameter is ignored. Do not use this flag without the -t flag !", action='store_true', dest='resumeTraining')
args = parser.parse_args()
# So that we are sure that there is no problem with the resumeTraining parameter in case we are not training.
args.resumeTraining = args.resumeTraining and args.isTraining
# To save/retriev the loss
lossPath = args.result+'/'+'loss'
# Get parameters
param = FI.readFileJson(args.param)
# Prepare input data
inputManager = IM.InputManager(args.input, param['DataStructure'])
# Create network
network = NN.NeuralNetwork(param['NN_structure'], param['training'],
args.isTraining, args.model, args.resumeTraining, lossPath)
# We resume the loss if necessary, like if we are in deployement mode or we resumeTraining
lossValues = NN.NeuralNetwork.initLossArray()
if args.isTraining:
try:
if not os.path.exists(args.result):
os.makedirs(args.result)
lossValues = network.trainNetwork(
inputManager, param['training'], args.model)
except:
network.saveParameters(args.model, lossPath)
raise
# Divide the size of a data box (minus edge padding from generator footprint reduction)
# by the size of the generator output to determine how many predictions are required to span the box.
nGenBox = int(np.floor((inputManager.N - 8) / (inputManager.size - 8)))
# Initialize numpy array for output
output = np.empty((nGenBox * (inputManager.size - 8), nGenBox * (inputManager.size - 8), nGenBox * (inputManager.size - 8)))
inputCopy = np.empty((inputManager.N, inputManager.N, inputManager.N))
# Loop through the box, making predictions from the test data
for i in range(nGenBox):
xs = i * (inputManager.size - 8)
for j in range(nGenBox):
ys = j * (inputManager.size - 8)
for k in range(nGenBox):
zs = k * (inputManager.size - 8)
testData = inputManager.getTestData(xs, ys, zs)
inputCopy[xs:xs+inputManager.size, ys:ys+inputManager.size, ys:ys+inputManager.size] = testData.x[0, 0, :, :, :].cpu().detach().numpy()
generatorOutput = network.forward(testData.x)
output[i*(inputManager.size - 8):(i+1)*(inputManager.size - 8), j*(inputManager.size - 8):(j+1)*(inputManager.size - 8), k*(inputManager.size - 8):(k+1)*(inputManager.size - 8)] = generatorOutput[0, 0, :, :, :].cpu().detach().numpy()
# Saving the output
if args.isTraining:
np.save(args.result+'/'+'train_inputCopy.npy', inputCopy)
np.save(args.result+'/'+'train_output.npy', output)
else:
np.save(args.result+'/'+'test_inputCopy.npy', inputCopy)
np.save(args.result+'/'+'test_output.npy', output)
if __name__ == '__main__':
main('')