Skip to content

Commit 225324a

Browse files
committed
Switch from linear to Gompertz
1 parent 149922a commit 225324a

File tree

7 files changed

+654
-22
lines changed

7 files changed

+654
-22
lines changed

app.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,15 @@
8585
st.plotly_chart(anm)
8686

8787
params = extract_motivation_parameters(data)
88+
mapping = params.get("mapping_block", {})
89+
if mapping:
90+
m_min = float(mapping.get("motivation_min", 0.1))
91+
m_max = 3.6 / float(params["normal_v_0"])
92+
st.caption(
93+
"Motivation mapping: "
94+
f"{mapping.get('mapping_function', 'gompertz')} "
95+
f"(clamp [{m_min:.2f}, {m_max:.2f}])"
96+
)
8897

8998
# get_agents_positions
9099
plot_motivation_model(params)

simulation.py

Lines changed: 49 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from jupedsim.internal.notebook_utils import read_sqlite_file
2626

2727
from src import motivation_model as mm
28+
from src import motivation_mapping as mmap
2829
from src.inifile_parser import (
2930
parse_accessible_areas,
3031
parse_destinations,
@@ -244,6 +245,7 @@ def init_motivation_model(
244245

245246
normal_v_0 = parse_normal_v_0(_data)
246247
normal_time_gap = parse_normal_time_gap(_data)
248+
mapping_block = mmap.ensure_mapping_block(_data["motivation_parameters"])
247249
choose_motivation_strategy = parse_motivation_strategy(_data)
248250
number_agents = parse_number_agents(_data)
249251
competition_max = _data["motivation_parameters"]["competition_max"]
@@ -279,6 +281,7 @@ def init_motivation_model(
279281
value_probability=_data["motivation_parameters"][
280282
"value_probability_sorting"
281283
],
284+
motivation_min=float(mapping_block["motivation_min"]),
282285
)
283286
if choose_motivation_strategy == "EC-V":
284287
motivation_strategy = mm.EVCStrategy(
@@ -299,14 +302,28 @@ def init_motivation_model(
299302
percent=percent,
300303
motivation_door_center=motivation_door_center,
301304
evc=False,
305+
motivation_min=float(mapping_block["motivation_min"]),
302306
)
307+
308+
a_ped, d_ped, a_wall, d_wall, a_ped_min, a_ped_max, d_ped_min, d_ped_max = (
309+
parse_velocity_init_parameters(_data)
310+
)
311+
parameter_mapper = mmap.MotivationParameterMapper(
312+
mapping_block=mapping_block,
313+
normal_v_0=normal_v_0,
314+
strength_default=a_ped,
315+
strength_min=a_ped_min,
316+
strength_max=a_ped_max,
317+
range_default=d_ped,
318+
)
303319
# =================
304320
motivation_model = mm.MotivationModel(
305321
door_point1=(motivation_doors[0][0][0], motivation_doors[0][0][1]),
306322
door_point2=(motivation_doors[0][1][0], motivation_doors[0][1][1]),
307323
normal_v_0=normal_v_0,
308324
normal_time_gap=normal_time_gap,
309325
motivation_strategy=motivation_strategy,
326+
parameter_mapper=parameter_mapper,
310327
)
311328
motivation_model.print_details()
312329
return motivation_model
@@ -553,30 +570,39 @@ def process_agent(
553570
}
554571

555572
motivation_i = motivation_model.motivation_strategy.motivation(params)
573+
if motivation_model.parameter_mapper is not None:
574+
motivation_i = motivation_model.parameter_mapper.clamp_motivation(motivation_i)
556575
agent_value = motivation_model.motivation_strategy.get_value(agent_id=agent.id)
557576
# if motivation_i > 1:
558577
# logging.error(
559578
# f"Motivation too high. Count: {simulation.iteration_count()}. Agent: {agent.id}. Motivation: {motivation_i = }"
560579
# )
561580

562581
v_0, time_gap = motivation_model.calculate_motivation_state(motivation_i, agent.id)
563-
min_motivtion = _data["motivation_parameters"]["min_value_low"]
564-
# Adjust agent parameters based on motivation
565-
# if agent_value > 0.5:
566-
agent.model.strength_neighbor_repulsion = adjust_parameter_linearly(
567-
motivation_i=motivation_i,
568-
min_value=a_ped_min,
569-
default_value=default_strength,
570-
max_value=a_ped_max,
571-
min_motivation=min_motivtion,
572-
)
573-
agent.model.range_neighbor_repulsion = adjust_parameter_linearly(
574-
motivation_i=motivation_i,
575-
min_value=d_ped_min,
576-
default_value=default_range,
577-
max_value=d_ped_max,
578-
min_motivation=min_motivtion,
579-
)
582+
if motivation_model.parameter_mapper is not None:
583+
agent.model.strength_neighbor_repulsion = (
584+
motivation_model.parameter_mapper.strength_neighbor_repulsion(motivation_i)
585+
)
586+
agent.model.range_neighbor_repulsion = (
587+
motivation_model.parameter_mapper.range_neighbor_repulsion(motivation_i)
588+
)
589+
else:
590+
min_motivtion = _data["motivation_parameters"]["min_value_low"]
591+
# Adjust agent parameters based on motivation
592+
agent.model.strength_neighbor_repulsion = adjust_parameter_linearly(
593+
motivation_i=motivation_i,
594+
min_value=a_ped_min,
595+
default_value=default_strength,
596+
max_value=a_ped_max,
597+
min_motivation=min_motivtion,
598+
)
599+
agent.model.range_neighbor_repulsion = adjust_parameter_linearly(
600+
motivation_i=motivation_i,
601+
min_value=d_ped_min,
602+
default_value=default_range,
603+
max_value=d_ped_max,
604+
min_motivation=min_motivtion,
605+
)
580606
# if simulation.elapsed_time() > 25:
581607
# agent.model.strength_neighbor_repulsion = 0.6
582608
# agent.model.range_neighbor_repulsion = 0.2
@@ -602,7 +628,12 @@ def process_agent(
602628
do_adjust_buffer = _data["motivation_parameters"]["do_adjust_buffer"]
603629

604630
if do_adjust_buffer:
605-
agent.model.agent_buffer = adjust_buffer_size_linearly(motivation_i)
631+
if motivation_model.parameter_mapper is not None:
632+
agent.model.agent_buffer = motivation_model.parameter_mapper.buffer(
633+
motivation_i
634+
)
635+
else:
636+
agent.model.agent_buffer = adjust_buffer_size_linearly(motivation_i)
606637
if False and agent.position[1] > 15:
607638
print(
608639
f"{agent.id}: ({agent.position[0]}, {agent.position[1]}), {motivation_i = :.2f}, {agent.model.agent_buffer =}"

src/docs.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,3 +188,30 @@ def main() -> None:
188188
""",
189189
unsafe_allow_html=True,
190190
)
191+
192+
st.markdown(
193+
r"""
194+
## Runtime Mapping (Gompertz)
195+
196+
During simulation, the operational model parameters are mapped from motivation
197+
with Gompertz curves and clamped motivation:
198+
199+
$$
200+
m_i^{\mathrm{used}} = \mathrm{clip}\left(m_i,\; m_{\min},\; \frac{3.6}{v_0^{\mathrm{normal}}}\right)
201+
$$
202+
203+
Each mapped parameter uses low/normal/high anchors at
204+
$m \in \{0.1,\;1,\;3\}$:
205+
206+
- desired speed $\tilde v_0(m)$
207+
- time gap $\tilde T(m)$
208+
- buffer $\tilde b(m)$
209+
- strength neighbor repulsion $\tilde A(m)$
210+
211+
The interaction range is fixed:
212+
213+
$$
214+
\tilde D(m)=d_{\text{ped}} \quad \text{(constant)}
215+
$$
216+
"""
217+
)

0 commit comments

Comments
 (0)