Skip to content

Commit 5ff54a9

Browse files
committed
edit collision_avoidance
1 parent 52c30d8 commit 5ff54a9

File tree

2 files changed

+16
-15
lines changed

2 files changed

+16
-15
lines changed

src/mdp/collision_avoidance.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ function (U::CollisionAvoidanceValueFunction)(s)
170170
return GridInterpolations.interpolate(U.grid, U.U, vec(s))
171171
end
172172

173-
function MDP(mdp::CollisionAvoidanceMDP)
173+
function MDP(mdp::CollisionAvoidanceMDP; γ::Float64=1.0)
174174
return MDP(
175175
γ,
176176
nothing, # no ordered states

test/runtests.jl

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const p = DecisionMakingProblems
2323
@test !p.is_terminal(m, state)
2424
@test min_state <= p.vec(p.cart_pole_transition(m, state, rand(1:2))) <= max_state
2525
@test p.reward(m, state, rand(1:2)) in [0.0, 1.0]
26-
p.MDP(m)
26+
mdp = p.MDP(m)
2727
end
2828

2929
@testset "collision_avoidance.jl" begin
@@ -35,7 +35,8 @@ end
3535
@test length(p.vec(rand(p.transition(m, s, optimal_pol(s))))) == 4
3636
@test p.is_terminal(m, s) == (p.vec(s)[4] < 0.0)
3737
@test p.reward(m, rand(p.transition(m, s, optimal_pol(s))), rand(m.𝒜)) <= 0
38-
p.CollisionAvoidanceValueFunction(m, simple_pol)
38+
policy = p.CollisionAvoidanceValueFunction(m, simple_pol)
39+
mdp = p.MDP(m)
3940
end
4041

4142
@testset "hexworld.jl" begin
@@ -55,15 +56,15 @@ end
5556
@test p.generate_sr(m, state, action)[1] in p.ordered_states(m) && p.generate_sr(m, state, action)[2] <= 10
5657
@test p.generate_start_state(m) in p.ordered_states(m)
5758
@test p.hex_distance(rand(hexes), rand(hexes)) >= 0
58-
mdp = p.DiscreteMDP(m)
59+
mdp = p.MDP(m)
5960
end
6061
@testset "simple_lqr.jl" begin
6162
m = p.LqrMDP()
6263
@test p.discount(m) == 1.0
6364
state = p.generate_start_state(m)
6465
@test -10 <= rand(p.transition(m, state, rand())) <= 10
6566
@test p.reward(m, state, rand()) <= 0
66-
p.MDP(m)
67+
mdp = p.MDP(m)
6768
end
6869

6970
@testset "mountain_car.jl" begin
@@ -76,7 +77,7 @@ end
7677
@test all(state_min <= start_state <= state_max)
7778
@test all(state_min <= p.mountain_car_transition(start_state, 1) <= state_max)
7879
@test p.reward(m, start_state, 1) <= 0
79-
p.MDP(m)
80+
mdp = p.MDP(m)
8081
end
8182

8283

@@ -91,7 +92,7 @@ end
9192
@test 0 <= p.observation(m, rand(1:3), rand(1:2)).p <= 1
9293
@test p.reward(m, rand(1:2), rand(1:3)) <= 0
9394
@test p.reward(m, [0.1, 0.9], rand(1:3)) <= 0
94-
pomdp = p.DiscretePOMDP(m)
95+
pomdp = p.POMDP(m)
9596
end
9697

9798
@testset "machine_replacement.jl" begin
@@ -105,7 +106,7 @@ end
105106
@test rand(p.transition(m, rand(1:3), rand(1:4))) in 1:3
106107
@test rand(p.observation(m, rand(1:4), rand(1:3))) in 1:2
107108
@test p.reward(m, rand(1:3), rand(1:4)) <= 1.0
108-
p.POMDP(m)
109+
pomdp = p.POMDP(m)
109110
end
110111

111112
@testset "catch.jl" begin
@@ -119,7 +120,7 @@ end
119120
@test rand(p.transition(m, rand(1:4), rand(1:10))) in 1:4
120121
@test rand(p.observation(m, rand(1:10), rand(1:4))) in 1:2
121122
@test p.reward(m, rand(1:4), rand(1:10)) >= 0
122-
p.POMDP(m)
123+
pomdp = p.POMDP(m)
123124
end
124125

125126

@@ -130,7 +131,7 @@ end
130131
@test p.n_actions(m, rand(1:2)) == 2 && p.n_joint_actions(m) == 4
131132
@test p.reward(m, rand(1:2), [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= 0.0
132133
@test p.joint_reward(m, [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= [0.0, 0.0]
133-
p.SimpleGame(m)
134+
simplegame = p.SimpleGame(m)
134135
end
135136

136137
@testset "rock_paper_scissors.jl" begin
@@ -140,7 +141,7 @@ end
140141
@test p.n_actions(m, rand(1:2)) == 3 && p.n_joint_actions(m) == 9
141142
@test -1.0 <= p.reward(m, rand(1:2), [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= 1.0
142143
@test [-1.0, -1.0] <= p.joint_reward(m, [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= [1.0, 1.0]
143-
p.SimpleGame(m)
144+
simplegame = p.SimpleGame(m)
144145
end
145146

146147
@testset "travelers.jl" begin
@@ -150,7 +151,7 @@ end
150151
@test p.n_actions(m, rand(1:2)) == 99 && p.n_joint_actions(m) == 99^2
151152
@test 0.0 <= p.reward(m, rand(1:2), [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= 102
152153
@test [0.0, 0.0] <= p.joint_reward(m, [rand(p.ordered_actions(m, 0)), rand(p.ordered_actions(m, 0))]) <= [102, 102]
153-
p.SimpleGame(m)
154+
simplegame = p.SimpleGame(m)
154155
end
155156

156157
@testset "predator_prey.jl" begin
@@ -164,7 +165,7 @@ end
164165
@test 0.0 <= p.transition(m, rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m)), rand(p.ordered_states(m))) <= 1.0
165166
@test -1.0 <= p.reward(m, rand(1:2), rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m))) <= 10.0
166167
@test [-1.0, -1.0] <= p.joint_reward(m, rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m))) <= [10.0, 10.0]
167-
p.MG(m)
168+
mg = p.MG(m)
168169
end
169170

170171
@testset "multicaregiver.jl" begin
@@ -180,7 +181,7 @@ end
180181
@test 0.0 <= p.joint_observation(m, rand(p.ordered_joint_actions(m)), rand(p.ordered_states(m)), rand(p.ordered_joint_observations(m))) <= 1.0
181182
@test p.joint_reward(m, rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m))) <= [0.0, 0.0]
182183
@test p.joint_reward(m, rand(Float64, 2), rand(p.ordered_joint_actions(m))) <= [0.0, 0.0]
183-
p.POMG(m)
184+
pomg = p.POMG(m)
184185
end
185186

186187
@testset "collab_predator_prey.jl" begin
@@ -196,5 +197,5 @@ end
196197
@test 0.0 <= p.transition(m, rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m)), rand(p.ordered_states(m))) <= 1.0
197198
@test 0.0 <= p.joint_observation(m, rand(p.ordered_joint_actions(m)), rand(p.ordered_states(m)), rand(p.ordered_joint_observations(m))) <= 1.0
198199
@test -1.0 <= p.reward(m, rand(p.ordered_states(m)), rand(p.ordered_joint_actions(m))) <= 10.0
199-
p.DecPOMDP(m)
200+
decpomdp = p.DecPOMDP(m)
200201
end

0 commit comments

Comments
 (0)