1
1
module DoublePendulum
2
2
3
- using NeuralOperators
4
- using Flux
5
- using CUDA
6
- using JLD2
3
+ using DataDeps, CSV, DataFrames, MLUtils
4
+ using NeuralOperators, Flux
5
+ using CUDA, FluxTraining, BSON
7
6
8
- include (" data.jl" )
7
+ function register_double_pendulum_chaotic ()
8
+ register (DataDep (
9
+ " DoublePendulumChaotic" ,
10
+ """
11
+ Dataset was generated on the basis of 21 individual runs of a double pendulum.
12
+ Each of the recorded sequences lasted around 40s and consisted of around 17500 frames.
9
13
10
- __init__ () = register_double_pendulum_chaotic ()
14
+ * `x_red`: Horizontal pixel coordinate of the red point (the central pivot to the first pendulum)
15
+ * `y_red`: Vertical pixel coordinate of the red point (the central pivot to the first pendulum)
16
+ * `x_green`: Horizontal pixel coordinate of the green point (the first pendulum)
17
+ * `y_green`: Vertical pixel coordinate of the green point (the first pendulum)
18
+ * `x_blue`: Horizontal pixel coordinate of the blue point (the second pendulum)
19
+ * `y_blue`: Vertical pixel coordinate of the blue point (the second pendulum)
20
+
21
+ Page: https://developer.ibm.com/exchanges/data/all/double-pendulum-chaotic/
22
+ """ ,
23
+ " https://dax-cdn.cdn.appdomain.cloud/dax-double-pendulum-chaotic/2.0.1/double-pendulum-chaotic.tar.gz" ,
24
+ " 4ca743b4b783094693d313ebedc2e8e53cf29821ee8b20abd99f8fb4c0866f8d" ,
25
+ post_fetch_method= unpack
26
+ ))
27
+ end
28
+
29
+ function get_data (; i= 0 , n= - 1 )
30
+ data_path = joinpath (datadep " DoublePendulumChaotic" , " original" , " dpc_dataset_csv" )
31
+ df = CSV. read (
32
+ joinpath (data_path, " $i .csv" ),
33
+ DataFrame,
34
+ header= [:x_red , :y_red , :x_green , :y_green , :x_blue , :y_blue ]
35
+ )
36
+ data = (n < 0 ) ? collect (Matrix (df)' ) : collect (Matrix (df)' )[:, 1 : n]
37
+
38
+ return Float32 .(data)
39
+ end
40
+
41
+ function preprocess (𝐱; Δt= 1 , nx= 30 , ny= 30 )
42
+ # move red point to (0, 0)
43
+ xs_red, ys_red = 𝐱[1 , :], 𝐱[2 , :]
44
+ 𝐱[3 , :] -= xs_red; 𝐱[5 , :] -= xs_red
45
+ 𝐱[4 , :] -= ys_red; 𝐱[6 , :] -= ys_red
46
+
47
+ # needs only green and blue points
48
+ 𝐱 = reshape (𝐱[3 : 6 , 1 : Δt: end ], 1 , 4 , :)
49
+ # velocity of green and blue points
50
+ ∇𝐱 = 𝐱[:, :, 2 : end ] - 𝐱[:, :, 1 : (end - 1 )]
51
+ # merge info of pos and velocity
52
+ 𝐱 = cat (𝐱[:, :, 1 : (end - 1 )], ∇𝐱, dims= 1 )
53
+
54
+ # with info of first nx steps to inference next ny steps
55
+ n = size (𝐱)[end ] - (nx + ny) + 1
56
+ 𝐱s = Array {Float32} (undef, size (𝐱)[1 : 2 ]. .. , nx, n)
57
+ 𝐲s = Array {Float32} (undef, size (𝐱)[1 : 2 ]. .. , ny, n)
58
+ for i in 1 : n
59
+ 𝐱s[:, :, :, i] .= 𝐱[:, :, i: (i+ nx- 1 )]
60
+ 𝐲s[:, :, :, i] .= 𝐱[:, :, (i+ nx): (i+ nx+ ny- 1 )]
61
+ end
62
+
63
+ return 𝐱s, 𝐲s
64
+ end
11
65
12
- function update_model! (model_file_path, model)
13
- model = cpu (model)
14
- jldsave (model_file_path; model)
15
- @warn " model updated!"
66
+ function get_dataloader (; n_file= 20 , Δt= 1 , nx= 30 , ny= 30 , ratio= 0.9 , batchsize= 100 )
67
+ 𝐱s, 𝐲s = Array {Float32} (undef, 2 , 4 , nx, 0 ), Array {Float32} (undef, 2 , 4 , ny, 0 )
68
+ for i in 0 : (n_file- 1 )
69
+ 𝐱s_i, 𝐲s_i = preprocess (get_data (i= i), Δt= Δt, nx= nx, ny= ny)
70
+ 𝐱s, 𝐲s = cat (𝐱s, 𝐱s_i, dims= 4 ), cat (𝐲s, 𝐲s_i, dims= 4 )
71
+ end
72
+
73
+ data = shuffleobs ((𝐱s, 𝐲s))
74
+ data_train, data_test = splitobs (data, at= ratio)
75
+
76
+ loader_train = Flux. DataLoader (data_train, batchsize= batchsize, shuffle= true )
77
+ loader_test = Flux. DataLoader (data_test, batchsize= batchsize, shuffle= false )
78
+
79
+ return loader_train, loader_test
16
80
end
17
81
18
- function train (; Δt= 1 )
82
+ __init__ () = register_double_pendulum_chaotic ()
83
+
84
+ function train (; Δt= 1 , epochs= 20 )
19
85
if has_cuda ()
20
86
@info " CUDA is on"
21
87
device = gpu
@@ -24,46 +90,27 @@ function train(; Δt=1)
24
90
device = cpu
25
91
end
26
92
27
- m = Chain (
28
- Dense (2 , 64 ),
29
- OperatorKernel (64 => 64 , (4 , 16 ), FourierTransform, gelu),
30
- OperatorKernel (64 => 64 , (4 , 16 ), FourierTransform, gelu),
31
- OperatorKernel (64 => 64 , (4 , 16 ), FourierTransform, gelu),
32
- OperatorKernel (64 => 64 , (4 , 16 ), FourierTransform),
33
- Dense (64 , 128 , gelu),
34
- Dense (128 , 2 ),
35
- ) |> device
93
+ model = FourierNeuralOperator (ch= (2 , 64 , 64 , 64 , 64 , 64 , 128 , 2 ), modes= (4 , 16 ), σ= gelu)
94
+ data = get_dataloader (Δt= Δt)
95
+ optimiser = Flux. Optimiser (WeightDecay (1f-4 ), Flux. ADAM (1f-3 ))
96
+ loss_func = l₂loss
36
97
37
- loss (𝐱, 𝐲) = sum (abs2, 𝐲 .- m (𝐱)) / size (𝐱)[end ]
98
+ learner = Learner (
99
+ model, data, optimiser, loss_func,
100
+ ToDevice (device, device),
101
+ Checkpointer (joinpath (@__DIR__ , " ../model/" ))
102
+ )
38
103
39
- opt = Flux . Optimiser ( WeightDecay ( 1f-4 ), Flux . ADAM ( 1f-3 ) )
104
+ fit! (learner, epochs )
40
105
41
- loader_train, loader_test = get_dataloader (Δt= Δt)
42
-
43
- losses = Float32[]
44
- function validate ()
45
- validation_loss = sum (loss (device (𝐱), device (𝐲)) for (𝐱, 𝐲) in loader_test)/ length (loader_test)
46
- @info " loss: $validation_loss "
47
-
48
- push! (losses, validation_loss)
49
- (losses[end ] == minimum (losses)) && update_model! (joinpath (@__DIR__ , " ../model/model.jld2" ), m)
50
- end
51
- call_back = Flux. throttle (validate, 10 , leading= false , trailing= true )
52
-
53
- data = [(𝐱, 𝐲) for (𝐱, 𝐲) in loader_train] |> device
54
- for e in 1 : 20
55
- @info " Epoch $e \n η: $(opt. os[2 ]. eta) "
56
- @time Flux. train! (loss, params (m), data, opt, cb= call_back)
57
- (e% 3 == 0 ) && (opt. os[2 ]. eta /= 2 )
58
- end
106
+ return learner
59
107
end
60
108
61
109
function get_model ()
62
- f = jldopen (joinpath (@__DIR__ , " ../model/model.jld2" ))
63
- model = f[" model" ]
64
- close (f)
110
+ model_path = joinpath (@__DIR__ , " ../model/" )
111
+ model_file = readdir (model_path)[end ]
65
112
66
- return model
113
+ return BSON . load ( joinpath (model_path, model_file), @__MODULE__ )[ : model]
67
114
end
68
115
69
- end
116
+ end # module
0 commit comments