Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -102,4 +102,7 @@
"shared_mutex": "cpp"
},
"python.pythonPath": "${workspaceFolder}/bark_ml/python_wrapper/venv/bin/python",
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.linting.flake8Enabled": false,
}
5 changes: 5 additions & 0 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,8 @@ load("@benchmark_database//load:load.bzl", "benchmark_database_release")
benchmark_database_dependencies()
benchmark_database_release()
# --------------------------------------------------

# ------------------- LTL RuleMonitor --------------
load("@rule_monitor_project//util:deps.bzl", "rule_monitor_dependencies")
rule_monitor_dependencies()
# --------------------------------------------------
2 changes: 1 addition & 1 deletion bark_ml/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ test_suite(
"//bark_ml/tests:py_library_tfa_tests",
"//bark_ml/tests:py_bark_behavior_model_tests",
"//bark_ml/tests:py_tracer_tests",
"//experiments:py_experiment_tests"
"//bark_ml/experiment:py_experiment_tests"
]
)

Expand Down
4 changes: 3 additions & 1 deletion bark_ml/environments/blueprints/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,6 @@
from bark_ml.environments.blueprints.merging.merging import DiscreteMergingBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.intersection.intersection import ContinuousIntersectionBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.intersection.intersection import DiscreteIntersectionBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.configurable.configurable_scenario import ConfigurableScenarioBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.configurable.configurable_scenario import ConfigurableScenarioBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.single_lane.single_lane import SingleLaneBlueprint # pylint: disable=unused-import
from bark_ml.environments.blueprints.single_lane.single_lane import *
Empty file.
153 changes: 153 additions & 0 deletions bark_ml/environments/blueprints/single_lane/single_lane.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart, Julian Bernhard, Klemens Esterle, and
# Tobias Kessler
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT

import os
import numpy as np
from bark.runtime.viewer.buffered_mp_viewer import BufferedMPViewer
from bark.runtime.scenario.scenario_generation.config_with_ease import \
LaneCorridorConfig, ConfigWithEase
from bark.core.world.goal_definition import GoalDefinitionPolygon
from bark.core.geometry import Polygon2d, Point2d

from bark_ml.environments.blueprints.blueprint import Blueprint
from bark_ml.evaluators.reward_shaping_max_steps import RewardShapingEvaluatorMaxSteps
from bark_ml.behaviors.cont_behavior import BehaviorContinuousML
from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML

# from bark_ml.observers.nearest_state_observer import NearestAgentsObserver
from bark_ml.core.observers import NearestObserver


class SingleLaneLaneCorridorConfig(LaneCorridorConfig):

"""
Configures the a single lane, e.g., the goal.
"""

def __init__(self,
params=None,
**kwargs):
super(SingleLaneLaneCorridorConfig, self).__init__(params, **kwargs)

def goal(self, world):
goal_polygon = Polygon2d(
[-1000, -1000, -1000],
[Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)])
return GoalDefinitionPolygon(goal_polygon)


class SingleLaneBlueprint(Blueprint):
"""The SingleLane blueprint sets up a merging scenario with initial
conditions.

Two lane SingleLane, with the ego vehicle being placed on the right lane
and the ego vehicle's goal on the left lane.
"""

def __init__(self,
params=None,
num_scenarios=250,
random_seed=0,
ml_behavior=None,
viewer=True,
mode="medium"):
if mode == "dense":
ds_min = 30.
ds_max = 40.
if mode == "medium":
ds_min = 20.
ds_max = 35.
params["World"]["remove_agents_out_of_map"] = False

lane_configs = []
is_controlled = True
s_min = 0
s_max = 250
if is_controlled == True:
s_min = 40.
s_max = 80.
local_params = params.clone()
local_params["BehaviorIDMClassic"]["DesiredVelocity"] = np.random.uniform(12, 17)
lane_conf = SingleLaneLaneCorridorConfig(params=local_params,
road_ids=[16],
lane_corridor_id=0,
min_vel=10,
max_vel=15,
ds_min=ds_min,
ds_max=ds_max,
s_min=s_min,
s_max=s_max,
controlled_ids=is_controlled)
lane_configs.append(lane_conf)

scenario_generation = \
ConfigWithEase(
num_scenarios=num_scenarios,
map_file_name=os.path.join(os.path.dirname(__file__), "../../../environments/blueprints/single_lane/single_lane.xodr"), # pylint: disable=unused-import
random_seed=random_seed,
params=params,
lane_corridor_configs=lane_configs)
if viewer:
# viewer = MPViewer(params=params,
# use_world_bounds=True)
viewer = BufferedMPViewer(
params=params,
x_range=[-155, 155],
y_range=[-155, 155],
follow_agent_id=True)
dt = 0.2
params["ML"]["RewardShapingEvaluator"]["RewardShapingPotentials",
"Reward shaping functions.", {
"VelocityPotential" : {
"desired_vel": 20., "vel_dev_max": 20., "exponent": 0.2, "type": "positive"
}
}]
evaluator = RewardShapingEvaluatorMaxSteps(params)
observer = NearestObserver(params)
ml_behavior = ml_behavior

super().__init__(
scenario_generation=scenario_generation,
viewer=viewer,
dt=dt,
evaluator=evaluator,
observer=observer,
ml_behavior=ml_behavior)


class ContinuousSingleLaneBlueprint(SingleLaneBlueprint):
def __init__(self,
params=None,
num_scenarios=25,
random_seed=0,
viewer=True,
mode="dense"):
ml_behavior = BehaviorContinuousML(params)
SingleLaneBlueprint.__init__(self,
params=params,
num_scenarios=num_scenarios,
random_seed=random_seed,
ml_behavior=ml_behavior,
viewer=True,
mode=mode)


class DiscreteSingleLaneBlueprint(SingleLaneBlueprint):
def __init__(self,
params=None,
num_scenarios=25,
random_seed=0,
mode="dense"):
ml_behavior = BehaviorDiscreteMacroActionsML(params)
SingleLaneBlueprint.__init__(self,
params=params,
num_scenarios=num_scenarios,
random_seed=random_seed,
ml_behavior=ml_behavior,
viewer=True,
mode=mode)
47 changes: 47 additions & 0 deletions bark_ml/environments/blueprints/single_lane/single_lane.xodr
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
<?xml version="1.0" standalone="yes"?>
<OpenDRIVE>
<header revMajor="1" revMinor="4" name="" version="1.00" date="Thu Jun 6 10:01:32 2019" north="0.0000000000000000e+00" south="0.0000000000000000e+00" east="0.0000000000000000e+00" west="0.0000000000000000e+00">
</header>
<road name="" length="200" id="16" junction="-1">
<type s="0.0000000000000000e+00" type="town"/>
<planView>
<geometry s="0.0000000000000000e+00" x="5.1126832348793750e+03" y="5.0549844643744773e+03" hdg="1.5707963267948966e+00" length="200">
<arc curvature="2.0000000000000000e-3"/>
</geometry>
</planView>
<elevationProfile>
<elevation s="0.0000000000000000e+00" a="0.0000000000000000e+00" b="0.0000000000000000e+00" c="0.0000000000000000e+00" d="0.0000000000000000e+00"/>
</elevationProfile>
<lateralProfile>
</lateralProfile>
<lanes>
<laneSection s="0.0000000000000000e+00">
<left>
</left>
<center>
<lane id="0" type="driving" level= "0">
<link>
</link>
<roadMark sOffset="0.0000000000000000e+00" type="solid" weight="standard" color="standard" width="1.4999999999999999e-01" laneChange="none" height="1.9999999552965164e-02">
</roadMark>
</lane>
</center>
<right>
<lane id="-1" type="driving" level= "0">
<link>
</link>
<width sOffset="0.0000000000000000e+00" a="3.99" b="0.0000000000000000e+00" c="0.0000000000000000e+00" d="0.0000000000000000e+00"/>
<roadMark sOffset="0.0000000000000000e+00" type="solid" weight="standard" color="standard" width="1.4999999999999999e-01" laneChange="none" height="1.9999999552965164e-02">
</roadMark>
</lane>
</right>
</laneSection>
</lanes>
<objects>
</objects>
<signals>
</signals>
<surface>
</surface>
</road>
</OpenDRIVE>
42 changes: 41 additions & 1 deletion bark_ml/environments/gym.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
from bark_ml.environments.blueprints.intersection.intersection import \
ContinuousIntersectionBlueprint, DiscreteIntersectionBlueprint
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime

from bark_ml.environments.blueprints.single_lane.single_lane import \
ContinuousSingleLaneBlueprint, DiscreteSingleLaneBlueprint

# highway
class ContinuousHighwayGym(SingleAgentRuntime, gym.Env):
Expand Down Expand Up @@ -135,6 +136,37 @@ def __init__(self, *args, **kwargs):
SingleAgentRuntime.__init__(self, *args, **kwargs)


# single lane
class ContinuousSingleLaneGym(SingleAgentRuntime, gym.Env):
"""Highway scenario with continuous behavior model.

Behavior model takes the steering-rate and acceleration.
"""
def __init__(self):
params = ParameterServer(filename=
os.path.join(os.path.dirname(__file__),
"../environments/blueprints/visualization_params.json"))
cont_highway_bp = ContinuousSingleLaneBlueprint(params)
SingleAgentRuntime.__init__(self,
blueprint=cont_highway_bp, render=True)

class DiscreteSingleLaneGym(SingleAgentRuntime, gym.Env):
"""Highway scenario with discrete behavior model.

Behavior model takes integers [0, 1, 2] as specified in the
discrete behavior model.
"""

def __init__(self,
params = ParameterServer(filename= \
os.path.join(os.path.dirname(__file__),
"../environments/blueprints/visualization_params.json")),
render=False):
discrete_highway_bp = DiscreteSingleLaneBlueprint(params)
SingleAgentRuntime.__init__(self,
blueprint=discrete_highway_bp, render=render)


# register gym envs
register(
id='highway-v0',
Expand Down Expand Up @@ -163,4 +195,12 @@ def __init__(self, *args, **kwargs):
register(
id='intersection-v1',
entry_point='bark_ml.environments.gym:DiscreteIntersectionGym'
)
register(
id='singlelane-v0',
entry_point='bark_ml.environments.gym:ContinuousSingleLaneGym'
)
register(
id='singlelane-v1',
entry_point='bark_ml.environments.gym:DiscreteSingleLaneGym'
)
2 changes: 1 addition & 1 deletion bark_ml/environments/single_agent_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def reset(self, scenario=None):
eval_id = self._scenario._eval_agent_ids[0]
self._world.UpdateAgentRTree()
self._world = self._observer.Reset(self._world)
self._world = self._evaluator.Reset(self._world)
self._world = self._evaluator.Reset(self._world, eval_id)
self._world.agents[eval_id].behavior_model = self._ml_behavior

# render
Expand Down
4 changes: 1 addition & 3 deletions bark_ml/evaluators/BUILD
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@



py_library(
name = "evaluators",
srcs = ["__init__.py",
"goal_reached.py",
"reward_shaping.py",
"reward_shaping_max_steps.py",
"reward_shaping_intersection.py",
"traffic_rules.py",
"commons.py",
"evaluator.py"],
data = ["@bark_project//bark/python_wrapper:core.so"],
Expand Down
3 changes: 2 additions & 1 deletion bark_ml/evaluators/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from bark_ml.evaluators.goal_reached import GoalReached # pylint: disable=unused-import
from bark_ml.evaluators.reward_shaping import RewardShapingEvaluator # pylint: disable=unused-import
from bark_ml.evaluators.reward_shaping import RewardShapingEvaluator # pylint: disable=unused-import
from bark_ml.evaluators.traffic_rules import EvaluatorTrafficRules # pylint: disable=unused-import
4 changes: 3 additions & 1 deletion bark_ml/evaluators/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def __init__(self,
params=ParameterServer()):
self._params = params
self._viewer = None
self._eval_id = None

def Evaluate(self, observed_world, action):
"""Evaluates the observed world."""
Expand All @@ -27,7 +28,8 @@ def Evaluate(self, observed_world, action):
observed_world, eval_results, action)
return reward, done, eval_results

def Reset(self, world):
def Reset(self, world, eval_id):
self._eval_id = eval_id
world.ClearEvaluators()
evaluators = self._add_evaluators()
for key, evaluator in evaluators.items():
Expand Down
4 changes: 2 additions & 2 deletions bark_ml/evaluators/goal_reached.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,5 +63,5 @@ def _evaluate(self, observed_world, eval_results, action):
success * self._goal_reward
return reward, done, eval_results

def Reset(self, world):
return super(GoalReached, self).Reset(world)
def Reset(self, world, eval_id):
return super(GoalReached, self).Reset(world, eval_id)
4 changes: 2 additions & 2 deletions bark_ml/evaluators/reward_shaping.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,5 +161,5 @@ def _evaluate(self, observed_world, eval_results, action):
success * self._goal_reward + reward_shaping_signal
return reward, done, eval_results

def Reset(self, world):
return super(RewardShapingEvaluator, self).Reset(world)
def Reset(self, world, eval_id):
return super(RewardShapingEvaluator, self).Reset(world, eval_id)
4 changes: 2 additions & 2 deletions bark_ml/evaluators/reward_shaping_intersection.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,5 +85,5 @@ def _evaluate(self, observed_world, eval_results, action):
success * self._goal_reward + reward_shaping_signal
return reward, done, eval_results

def Reset(self, world):
return super(RewardShapingEvaluator, self).Reset(world)
def Reset(self, world, eval_id):
return super(RewardShapingEvaluator, self).Reset(world, eval_id)
4 changes: 2 additions & 2 deletions bark_ml/evaluators/reward_shaping_max_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,5 @@ def _evaluate(self, observed_world, eval_results, action):
success * self._goal_reward + reward_shaping_signal
return reward, done, eval_results

def Reset(self, world):
return super(RewardShapingEvaluator, self).Reset(world)
def Reset(self, world, eval_id):
return super(RewardShapingEvaluator, self).Reset(world, eval_id)
Loading