diff --git a/pyretic/examples/gardenlib.py b/pyretic/examples/gardenlib.py new file mode 100644 index 00000000..8ddf8ddc --- /dev/null +++ b/pyretic/examples/gardenlib.py @@ -0,0 +1,7 @@ + +from pyretic.kinetic.util.rewriting import * + +def redirectToGardenWall(): + client_ips = [IP('10.0.0.1'), IP('10.0.0.2'),IP('10.0.0.4'),IP('10.0.0.5'),IP('10.0.0.6'),IP('10.0.0.7'),IP('10.0.0.8')] + rewrite_policy = rewriteDstIPAndMAC(client_ips, '10.0.0.3') + return rewrite_policy diff --git a/pyretic/kinetic/README.md b/pyretic/kinetic/README.md new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/SETUP b/pyretic/kinetic/SETUP new file mode 100644 index 00000000..b88e6d9d --- /dev/null +++ b/pyretic/kinetic/SETUP @@ -0,0 +1,10 @@ +========================= +Set $KINETICPATH +========================= + +* Write command below in Linux command line: +$ export KINETICPATH=$HOME/pyretic/pyretic/kinetic + +* Put this in "~/.profile" to make it execute every time to log in. +Log off and log in again. + diff --git a/pyretic/kinetic/__init__.py b/pyretic/kinetic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/apps/__init__.py b/pyretic/kinetic/apps/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/apps/auth.py b/pyretic/kinetic/apps/auth.py new file mode 100644 index 00000000..9065f021 --- /dev/null +++ b/pyretic/kinetic/apps/auth.py @@ -0,0 +1,93 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.auth +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +class auth(DynamicPolicy): + def __init__(self): + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def authenticated(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.case(is_true(V('authenticated')),C(identity)) + self.default(C(drop)) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + authenticated=FSMVar(type=BoolType(), + init=False, + trans=authenticated), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=drop, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(auth,self).__init__(fsm_pol) + + +def main(): + pol = auth() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'auth') + + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated;") + + ### If authentication event is true, next policy state is 'allow' + mc.add_spec("SPEC AG (authenticated -> AX policy=policy_1)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG (!authenticated -> AX policy=drop)") + + ### It is always possible for the policy state to go to 'allow' + mc.add_spec("SPEC AG (EF policy=policy_1)") + + ### Policy state is 'drop' until authentication is true. + mc.add_spec("SPEC A [ policy=drop U authenticated ]") + + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + return pol >> flood() diff --git a/pyretic/kinetic/apps/auth_8021x.py b/pyretic/kinetic/apps/auth_8021x.py new file mode 100644 index 00000000..51273018 --- /dev/null +++ b/pyretic/kinetic/apps/auth_8021x.py @@ -0,0 +1,94 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.auth_8021x +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n authenticated_1x -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# - python json_sender.py -n authenticated_1x -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +class auth_8021x(DynamicPolicy): + def __init__(self): + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def authenticated_1x(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.case(is_true(V('authenticated_1x')),C(identity)) + self.default(C(drop)) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + authenticated_1x=FSMVar(type=BoolType(), + init=False, + trans=authenticated_1x), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=drop, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(auth_8021x,self).__init__(fsm_pol) + + +def main(): + pol = auth_8021x() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'auth_8021x') + + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated_1x;") + + ### If authentication event is true, next policy state is 'allow' + mc.add_spec("SPEC AG (authenticated_1x -> AX policy=policy_1)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG (!authenticated_1x -> AX policy=drop)") + + ### It is always possible for the policy state to go to 'allow' + mc.add_spec("SPEC AG (EF policy=policy_1)") + + ### Policy state is 'drop' until authentication is true. + mc.add_spec("SPEC A [ policy=drop U authenticated_1x ]") + + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + + return pol >> flood() diff --git a/pyretic/kinetic/apps/auth_only.py b/pyretic/kinetic/apps/auth_only.py new file mode 100644 index 00000000..75660058 --- /dev/null +++ b/pyretic/kinetic/apps/auth_only.py @@ -0,0 +1,90 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.auth +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +class auth_only(DynamicPolicy): + def __init__(self): + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def authenticated(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.case(is_true(V('authenticated')),C(identity)) + self.default(C(drop)) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + authenticated=FSMVar(type=BoolType(), + init=False, + trans=authenticated), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=drop, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(auth_only,self).__init__(fsm_pol) + + +def main(): + pol = auth_only() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'auth_only') + + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated;") + + ### If authentication event is true, next policy state is 'allow' + mc.add_spec("SPEC AG (authenticated -> AX policy=policy_2)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG (!authenticated -> AX policy=policy_1)") + + ### It is always possible for the policy state to go to 'allow' + mc.add_spec("SPEC AG (EF policy=policy_2)") + + ### Policy state is 'drop' until authentication is true. + mc.add_spec("SPEC A [ policy=policy_1 U authenticated ]") + + mc.save_as_smv_file() + mc.verify() + + return pol diff --git a/pyretic/kinetic/apps/auth_web.py b/pyretic/kinetic/apps/auth_web.py new file mode 100644 index 00000000..4176fbf2 --- /dev/null +++ b/pyretic/kinetic/apps/auth_web.py @@ -0,0 +1,93 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.auth_web +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n authenticated_web -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# - python json_sender.py -n authenticated_web -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +class auth_web(DynamicPolicy): + def __init__(self): + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def authenticated_web(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.case(is_true(V('authenticated_web')),C(identity)) + self.default(C(drop)) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + authenticated_web=FSMVar(type=BoolType(), + init=False, + trans=authenticated_web), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=drop, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(auth_web,self).__init__(fsm_pol) + + +def main(): + pol = auth_web() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'auth_web') + + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated_web;") + + ### If authentication event is true, next policy state is 'allow' + mc.add_spec("SPEC AG (authenticated_web -> AX policy=policy_1)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG (!authenticated_web -> AX policy=drop)") + + ### It is always possible for the policy state to go to 'allow' + mc.add_spec("SPEC AG (EF policy=policy_1)") + + ### Policy state is 'drop' until authentication is true. + mc.add_spec("SPEC A [ policy=drop U authenticated_web ]") + + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + return pol >> flood() diff --git a/pyretic/kinetic/apps/gardenwall.py b/pyretic/kinetic/apps/gardenwall.py new file mode 100644 index 00000000..b1dd7c72 --- /dev/null +++ b/pyretic/kinetic/apps/gardenwall.py @@ -0,0 +1,123 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.util.rewriting import * +from pyretic.kinetic.apps.mac_learner import * + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.gardenwall +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Send Event to block traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Now, make h1's flow not be affected by IDS infection event(in "~/pyretic/pyretic/kinetic" directory) +# h1's traffic will be forwarded to 10.0.0.3. +# - python json_sender.py -n exempt -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Events to now allfow traffic again (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +class gardenwall(DynamicPolicy): + def __init__(self): + + # Garden Wall + def redirectToGardenWall(): + client_ips = [IP('10.0.0.1'), IP('10.0.0.2')] + rewrite_policy = rewriteDstIPAndMAC(client_ips, '10.0.0.3') + return rewrite_policy + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def exempt(self): + self.case(occurred(self.event),self.event) + + @transition + def infected(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + # If exempt, redirect to gardenwall. + # - rewrite dstip to 10.0.0.3 + self.case(is_true(V('infected')) & is_true(V('exempt')),C(redirectToGardenWall())) + + # If infected, drop + self.case(is_true(V('infected')) ,C(drop)) + + # Else, identity + self.default(C(identity)) + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + infected=FSMVar(type=BoolType(), + init=False, + trans=infected), + exempt=FSMVar(type=BoolType(), + init=False, + trans=exempt), + policy=FSMVar(type=Type(Policy,{drop,identity,redirectToGardenWall()}), + init=identity, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(gardenwall,self).__init__(fsm_pol) + + +def main(): + pol = gardenwall() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'gardenwall') + + ## Add specs + mc.add_spec("FAIRNESS\n infected;") + mc.add_spec("FAIRNESS\n exempt;") + + # Now, traffic is dropped only when exempt is false and infected is true + mc.add_spec("SPEC AG (infected & !exempt -> AX policy=policy_1)") + + # If exempt is true, next policy state to redirect to gardenwall, even if infected + mc.add_spec("SPEC AG (infected & exempt -> AX policy=policy_3)") + + # If infected is false, next policy state is always 'allow' + mc.add_spec("SPEC AG (!infected -> AX policy=policy_2)") + + ### Policy state is 'allow' until infected is true. + mc.add_spec("SPEC A [ policy=policy_2 U infected ]") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + # Ask deployment + ask_deploy() + + return pol >> mac_learner() diff --git a/pyretic/kinetic/apps/gardenwall_only.py b/pyretic/kinetic/apps/gardenwall_only.py new file mode 100644 index 00000000..b36743e5 --- /dev/null +++ b/pyretic/kinetic/apps/gardenwall_only.py @@ -0,0 +1,120 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.util.rewriting import * +from pyretic.kinetic.apps.mac_learner import * + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.gardenwall +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Send Event to block traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Now, make h1's flow not be affected by IDS infection event(in "~/pyretic/pyretic/kinetic" directory) +# h1's traffic will be forwarded to 10.0.0.3. +# - python json_sender.py -n exempt -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Events to now allfow traffic again (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +class gardenwall_only(DynamicPolicy): + def __init__(self): + + # Garden Wall + def redirectToGardenWall(): + client_ips = [IP('10.0.0.1'), IP('10.0.0.2')] + rewrite_policy = rewriteDstIPAndMAC(client_ips, '10.0.0.3') + return rewrite_policy + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def exempt(self): + self.case(occurred(self.event),self.event) + + @transition + def infected(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + # If exempt, redirect to gardenwall. + # - rewrite dstip to 10.0.0.3 + self.case(is_true(V('infected')) & is_true(V('exempt')),C(redirectToGardenWall())) + + # If infected, drop + self.case(is_true(V('infected')) ,C(drop)) + + # Else, identity + self.default(C(identity)) + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + infected=FSMVar(type=BoolType(), + init=False, + trans=infected), + exempt=FSMVar(type=BoolType(), + init=False, + trans=exempt), + policy=FSMVar(type=Type(Policy,{drop,identity,redirectToGardenWall()}), + init=identity, + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(gardenwall_only,self).__init__(fsm_pol) + + +def main(): + pol = gardenwall_only() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'gardenwall_only') + + ## Add specs + mc.add_spec("FAIRNESS\n infected;") + mc.add_spec("FAIRNESS\n exempt;") + + # Now, traffic is dropped only when exempt is false and infected is true + mc.add_spec("SPEC AG (infected & !exempt -> AX policy=policy_1)") + + # If exempt is true, next policy state to redirect to gardenwall, even if infected + mc.add_spec("SPEC AG (infected & exempt -> AX policy=policy_3)") + + # If infected is false, next policy state is always 'allow' + mc.add_spec("SPEC AG (!infected -> AX policy=policy_2)") + + ### Policy state is 'allow' until infected is true. + mc.add_spec("SPEC A [ policy=policy_2 U infected ]") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + return pol diff --git a/pyretic/kinetic/apps/ids.py b/pyretic/kinetic/apps/ids.py new file mode 100644 index 00000000..16009296 --- /dev/null +++ b/pyretic/kinetic/apps/ids.py @@ -0,0 +1,114 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.ids +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to block traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Events to again allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +### Define a class for the application, subclassed from DynamicPolicy +class ids(DynamicPolicy): + def __init__(self): + + ### 1. DEFINE THE LPEC FUNCTION + + def lpec(f): + # Packets with same source IP + # will have a same "state" (thus, same policy applied). + return match(srcip=f['srcip']) + + + ### 2. SET UP TRANSITION FUNCTIONS + + @transition + def infected(self): + # Return the variable's own value. + # If True, return True. If False, return False. + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + # If "infected" is True, change policy to "drop" + self.case(is_true(V('infected')),C(drop)) + + # Default policy is "indentity", which is "allow". + self.default(C(identity)) + + + ### 3. SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + infected=FSMVar(type=BoolType(), + init=False, + trans=infected), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=identity, + trans=policy)) + + + ### 4. SET UP POLICY AND EVENT STREAMS + + ### This part pretty much remains same for any application + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + ### This part pretty much remains same for any application + + # Specify application class name here. (e.g., "ids") + super(ids,self).__init__(fsm_pol) + + +def main(): + + # DynamicPolicy that is going to be returned + pol = ids() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'ids') + + ## Add specs + mc.add_spec("FAIRNESS\n infected;") + + ### If infected event is true, next policy state is 'drop' + mc.add_spec("SPEC AG (infected -> AX policy=drop)") + + ### If infected event is false, next policy state is 'allow' + mc.add_spec("SPEC AG (!infected -> AX policy=policy_1)") + + ### Policy state is 'allow' until infected is true. + mc.add_spec("SPEC A [ policy=policy_1 U infected ]") + + ### It is always possible to go back to 'allow' + mc.add_spec("SPEC AG EF policy=policy_1") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + # Ask deployment + ask_deploy() + + # Return DynamicPolicy. + # flood() will take for of forwarding for this simple example. + return pol >> flood() diff --git a/pyretic/kinetic/apps/ids_only.py b/pyretic/kinetic/apps/ids_only.py new file mode 100644 index 00000000..6ca5ef9c --- /dev/null +++ b/pyretic/kinetic/apps/ids_only.py @@ -0,0 +1,111 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.ids +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to block traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Events to again allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +### Define a class for the application, subclassed from DynamicPolicy +class ids_only(DynamicPolicy): + def __init__(self): + + ### 1. DEFINE THE LPEC FUNCTION + + def lpec(f): + # Packets with same source IP + # will have a same "state" (thus, same policy applied). + return match(srcip=f['srcip']) + + + ### 2. SET UP TRANSITION FUNCTIONS + + @transition + def infected(self): + # Return the variable's own value. + # If True, return True. If False, return False. + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + # If "infected" is True, change policy to "drop" + self.case(is_true(V('infected')),C(drop)) + + # Default policy is "indentity", which is "allow". + self.default(C(identity)) + + + ### 3. SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + infected=FSMVar(type=BoolType(), + init=False, + trans=infected), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=identity, + trans=policy)) + + + ### 4. SET UP POLICY AND EVENT STREAMS + + ### This part pretty much remains same for any application + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + ### This part pretty much remains same for any application + + # Specify application class name here. (e.g., "ids") + super(ids_only,self).__init__(fsm_pol) + + +def main(): + + # DynamicPolicy that is going to be returned + pol = ids_only() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'ids_only') + + ## Add specs + mc.add_spec("FAIRNESS\n infected;") + + ### If infected event is true, next policy state is 'drop' + mc.add_spec("SPEC AG (infected -> AX policy=policy_1)") + + ### If infected event is false, next policy state is 'allow' + mc.add_spec("SPEC AG (!infected -> AX policy=policy_2)") + + ### Policy state is 'allow' until infected is true. + mc.add_spec("SPEC A [ policy=policy_2 U infected ]") + + ### It is always possible to go back to 'allow' + mc.add_spec("SPEC AG EF policy=policy_2") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + # Return DynamicPolicy. + # flood() will take for of forwarding for this simple example. + return pol diff --git a/pyretic/kinetic/apps/mac_learner.py b/pyretic/kinetic/apps/mac_learner.py new file mode 100644 index 00000000..1269a02a --- /dev/null +++ b/pyretic/kinetic/apps/mac_learner.py @@ -0,0 +1,117 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * +from pyretic.kinetic.util.resetting_q import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.mac_learner +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mininet.sh --topo=clique,3,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events are internal +# - Mac Learner application will automatically react to +# topology change (e.g., link down and up) emulated from Mininet, and successfully +# forward traffic until no route exists between two hosts. +##################################################################################################### + + +class mac_learner(DynamicPolicy): + def __init__(self): + max_port = 8 + port_range = range(max_port+1) + def int_to_policy(i): + return flood() if i==0 else fwd(i) + pol_range = map(int_to_policy,port_range) + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(dstmac=f['dstmac'], + switch=f['switch']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def topo_change(self): + self.case(occurred(self.event),self.event) + self.default(C(False)) + + @transition + def port(self): + self.case(is_true(V('topo_change')),C(0)) + self.case(occurred(self.event) & (V('port')==C(0)),self.event) +# self.default(C(0)) + + @transition + def policy(self): + for i in port_range: + self.case(V('port')==C(i),C(int_to_policy(i))) + self.default(C(flood())) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + topo_change=FSMVar(type=BoolType(), + init=False, + trans=topo_change), + port=FSMVar(type=Type(int,set(port_range)), + init=0, + trans=port), + policy=FSMVar(type=Type(Policy,set(pol_range)), + init=flood(), + trans=policy)) + + ### DEFINE QUERY CALLBACKS + + def q_callback(pkt): + flow = frozendict(dstmac=pkt['srcmac'],switch=pkt['switch']) + return fsm_pol.event_handler(Event('port',pkt['inport'],flow)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + rq = resetting_q(query.packets,limit=1,group_by=['srcmac','switch']) + rq.register_callback(q_callback) + + super(mac_learner,self).__init__(fsm_pol + rq) + + +def main(): + pol = mac_learner() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'mac_learner') + + mc.add_spec("FAIRNESS\n topo_change;") + + for i in range(1): + ## Add specs + mc.add_spec("SPEC AG (topo_change -> AX port=0)") + mc.add_spec("SPEC AG (port=0 -> AG EF port>0)") + mc.add_spec("SPEC ! AG (port=1 -> EX port=2)") + mc.add_spec("SPEC AG (port>0 -> A [ port>0 U topo_change ] )") +# mc.add_spec("SPEC ! AG A [ port>0 U topo_change ]") +# mc.add_spec("SPEC AG (port=1 -> A [ port=1 U topo_change ] )") +# mc.add_spec("SPEC ! AG (port=2 -> A [ port=1 U topo_change ] )") +# mc.add_spec("SPEC AG (port=1 -> EF port=2)") +# mc.add_spec("SPEC AG (port=1 -> A [ !(port=2) U port=0 ])") +# mc.add_spec("SPEC AG (port=1 -> A [ !(port=2) U topo_change ])") + + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + return pol diff --git a/pyretic/kinetic/apps/monitor.py b/pyretic/kinetic/apps/monitor.py new file mode 100644 index 00000000..3ffeae89 --- /dev/null +++ b/pyretic/kinetic/apps/monitor.py @@ -0,0 +1,91 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.lib.query import * +import shlex, subprocess + + + +BASE_CMD = 'python /home/mininet/pyretic/pyretic/kinetic/json_sender.py -n rate -l' + +BYTES_FOR_RATE2 = 5000 +BYTES_FOR_RATE3 = 10000 + +class monitor(DynamicPolicy): + def __init__(self,port=50001): + + self.q = count_bytes(1,['srcip','dstip']) + self.set_rate_2 = False + self.set_rate_3 = False + + def packet_count_printer(counts): + print '==== Count Bytes====' + print str(counts) + '\n' + + for m in counts: + idx = str(m).find('srcip') + idx2 = str(m).find('dstip') + sip = str(m)[idx:idx2].lstrip("srcip', ").rstrip(") ('") + dip = str(m)[idx2:].lstrip("dstip', ").rstrip(")") + + if counts[m] > BYTES_FOR_RATE2 and BYTES_FOR_RATE3 > counts[m] and self.set_rate_2 is False: + cmd = BASE_CMD + ' 2 --flow="{srcip=' + sip +',dstip='+dip+'}" -a 127.0.0.1 -p ' + str(port) + p1 = subprocess.Popen([cmd], shell=True) + p1.communicate() + self.set_rate_2 = True + elif counts[m] > BYTES_FOR_RATE3 and self.set_rate_3 is False: + cmd = BASE_CMD + ' 3 --flow="{srcip=' + sip +',dstip='+dip+'}" -a 127.0.0.1 -p ' + str(port) + p1 = subprocess.Popen([cmd], shell=True) + p1.communicate() + self.set_rate_3 = True + + def monitoring(): + return self.q + passthrough + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def mon(self): + self.case(occurred(self.event),self.event) + + @transition + def policy_trans(self): + self.case(is_true(V('monitor')),C(monitoring())) + self.default(C(identity)) + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + monitor=FSMVar(type=BoolType(), + init=False, + trans=mon), + policy=FSMVar(type=Type(Policy,set([identity,monitoring()])), + init=monitoring(), + trans=policy_trans)) + + + ### Set up monitoring + self.q.register_callback(packet_count_printer) + + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(monitor,self).__init__(fsm_pol) + + +def main(): + pol = monitor() + + return pol diff --git a/pyretic/kinetic/apps/rate_limiter.py b/pyretic/kinetic/apps/rate_limiter.py new file mode 100644 index 00000000..5d72f18a --- /dev/null +++ b/pyretic/kinetic/apps/rate_limiter.py @@ -0,0 +1,125 @@ +from random import choice + +from pyretic.lib.corelib import * +from pyretic.lib.std import * +from pyretic.lib.query import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.apps.mac_learner import * +from pyretic.kinetic.apps.monitor import * + + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.rate_limiter +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom mininet_topos/example_topos.py --topo=ratelimit +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to rate limit to level '2' (100ms delay bidirectional) (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n rate -l 2 --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +# * Events to rate limit to level '3' (400ms delay bidirectional) (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n rate -l 3 --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +# * Events to rate limit back to level '1' (no delay) (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n rate -l 1 --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +##################################################################################################### + + + +class rate_limiter(DynamicPolicy): + def __init__(self): + + ### DEFINE INTERNAL METHODS + + rates = [1,2,3] + + def interswitch(): + return if_(match(inport=2),fwd(1),fwd(2)) + + def routing(): + match_inter = union([match(switch=2),match(switch=3),match(switch=4)]) + match_inport = union([match(inport=2),match(inport=3),match(inport=4)]) + + r = if_(match_inter,interswitch(), if_(match_inport, fwd(1), drop)) + + return r + + def rate_limit_policy(i): + match_from_edge = (union([match(switch=1),match(switch=5)]) & match(inport=1)) + return if_(match_from_edge, fwd(i), routing()) + + + ### DEFINE THE LPEC FUNCTION + def lpec(f): + h1 = f['srcip'] + h2 = f['dstip'] + return union([match(srcip=h1,dstip=h2),match(srcip=h2,dstip=h1)] ) + + + ### SET UP TRANSITION FUNCTIONS + + @transition + def rate(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + for i in rates: + self.case(V('rate')==C(i), C(rate_limit_policy(i+1))) + # Default policy + self.default(C(rate_limit_policy(2))) + + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + rate=FSMVar(type=Type(int,set(rates)), + init=1, + trans=rate), + policy=FSMVar(type=Type(Policy,set([rate_limit_policy(i+1) for i in rates ])), + init=rate_limit_policy(2), + trans=policy)) + + # Instantiate FSMPolicy, start/register JSON handler. + fsm_pol = FSMPolicy(lpec, self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(rate_limiter,self).__init__(fsm_pol) + + +def main(): + pol = rate_limiter() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'rate_limiter') + + ## Add specs + mc.add_spec("SPEC AG (rate=1 -> AX policy=policy_1)") + mc.add_spec("SPEC AG (rate=2 -> AX policy=policy_2)") + mc.add_spec("SPEC AG (rate=3 -> AX policy=policy_3)") + mc.add_spec("SPEC AG (EF policy=policy_1)") + mc.add_spec("SPEC policy=policy_1 -> EX policy=policy_1") + mc.add_spec("SPEC AG (policy=policy_1 -> EF policy=policy_2)") + + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + +# return pol + return pol >> monitor() + + diff --git a/pyretic/kinetic/apps/server_lb.py b/pyretic/kinetic/apps/server_lb.py new file mode 100644 index 00000000..4b4ecddf --- /dev/null +++ b/pyretic/kinetic/apps/server_lb.py @@ -0,0 +1,126 @@ +from random import choice + +from pyretic.lib.corelib import * +from pyretic.lib.std import * +from pyretic.lib.query import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.apps.mac_learner import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.server_lb +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom mininet_topos/example_topos.py --topo=server_lb +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n lb -l True --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +##################################################################################################### + + + +class serverlb(DynamicPolicy): + def __init__(self): + + # Server list. + self.servers = {'10.0.0.3': '00:00:00:00:00:03', + '10.0.0.4': '00:00:00:00:00:04', + '10.0.0.5': '00:00:00:00:00:05'} + + # Randmoly choose a server from the list + def randomly_choose_server(servermap): + return server_i_policy(choice(servermap.keys())) + + # Forward to i-th server + def server_i_policy(i): + ip_list = self.servers.keys() + ip_str = str(i) + mac_str = self.servers[ip_str] + public_ip = IP('10.0.0.100') + client_ips = [IP('10.0.0.1'), IP('10.0.0.2')] + receive_ip = [IP(ip_str)]*len(client_ips) + + rewrite_ip_policy = rewrite(zip(client_ips, receive_ip), public_ip) + rewrite_mac_policy = if_(match(dstip=IP(ip_str),ethtype=2048), + modify(dstmac=MAC(mac_str)),passthrough) + + return rewrite_ip_policy >> rewrite_mac_policy + + + # Rewrite IP address. + def rewrite(d,p): + return intersection([subs(c,r,p) for c,r in d]) + + + # subroutine of rewrite() + def subs(c,r,p): + c_to_p = match(srcip=c,dstip=p) + r_to_c = match(srcip=r,dstip=c) + return ((c_to_p >> modify(dstip=r))+(r_to_c >> modify(srcip=p))+(~r_to_c >> ~c_to_p)) + + + + ### DEFINE THE FLEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip']) + + + ## SET UP TRANSITION FUNCTIONS + + @transition + def server(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.servers = {'10.0.0.3': '00:00:00:00:00:03', + '10.0.0.4': '00:00:00:00:00:04', + '10.0.0.5': '00:00:00:00:00:05'} + + self.case(is_true(V('server')),C(randomly_choose_server(self.servers))) + self.default(C(server_i_policy(self.servers.keys()[1]))) + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + server=FSMVar(type=BoolType(), + init=False, + trans=server), + policy=FSMVar(type=Type(Policy,set([server_i_policy(i) for i in self.servers])), + init=server_i_policy(choice(self.servers.keys())), + trans=policy)) + + # Instantiate FSMPolicy, start/register JSON handler. + fsm_pol = FSMPolicy(lpec, self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(serverlb,self).__init__(fsm_pol) + + +def main(): + pol = serverlb() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'server_lb') + + ## Add specs + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + + return pol >> mac_learner() diff --git a/pyretic/kinetic/apps/sf.py b/pyretic/kinetic/apps/sf.py new file mode 100644 index 00000000..9f4232c7 --- /dev/null +++ b/pyretic/kinetic/apps/sf.py @@ -0,0 +1,141 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.util.resetting_q import * + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.sf +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mininet.sh --topo=single,5 +# +# * Internal hosts are: h3 (10.0.0.3) and h4(10.0.0.4). +# +# * Start ping from h1 to h3. Should not go through +# - mininet> h1 ping h2 +# +# * Start ping from h3 to h1. Should go through +# - mininet> h3 ping h1 +# +# * From now on, ping from h1 to h3 also works, until timeout occurs. +# +# * Events are internal +# - Mac Learner application will automatically react to +# topology change (e.g., link down and up) emulated from Mininet, and successfully +# forward traffic until no route exists between two hosts. +##################################################################################################### + + + +class sf(DynamicPolicy): + def __init__(self,internal_hosts,ih_prd): + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + hosts = list() + internal_h, external_h = None,None + + hosts.append(f['srcip']) + hosts.append(f['dstip']) + + for host in hosts: + if host in internal_hosts: + internal_h = host + else: + external_h = host + + if internal_h is None or external_h is None: + return None + + return (match(srcip=internal_h,dstip=external_h) | match(srcip=external_h,dstip=internal_h) ) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def outgoing(self): + self.case(is_true(V('timeout')),C(False)) + self.case(occurred(self.event),self.event) + + @transition + def timeout(self): + self.case(occurred(self.event),self.event) + self.default(C(False)) + + @transition + def policy(self): + self.case(is_true(V('timeout')),C(ih_prd)) + self.case(is_true(V('outgoing')),C(identity)) + self.default(C(ih_prd)) + + ### SET UP THE FSM DESCRIPTION + self.fsm_def = FSMDef( + outgoing=FSMVar(type=BoolType(), + init=False, + trans=outgoing), + timeout=FSMVar(type=BoolType(), + init=False, + trans=timeout), + policy=FSMVar(type=Type(Policy,[identity,ih_prd]), + init=ih_prd, + trans=policy)) + + ### DEFINE QUERY CALLBACKS + + def q_callback(pkt): + flow = frozendict(srcip=pkt['srcip'],dstip=pkt['dstip']) + return fsm_pol.event_handler(Event('outgoing',True,flow)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + q = FwdBucket() + q.register_callback(q_callback) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(sf,self).__init__(fsm_pol + (ih_prd >> q)) + + +def main(): + internal_hosts = [IPAddr('10.0.0.3'),IPAddr('10.0.0.4')] + ih_prd = union([match(srcip=h) for h in internal_hosts]) + pol = sf(internal_hosts,ih_prd) + + print fsm_def_to_smv_model(pol.fsm_def) + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'sf') + + ## Add specs + mc.add_spec("FAIRNESS\n outgoing;") + mc.add_spec("FAIRNESS\n timeout;") + + ### If outgoing event is true and times is not up, next policy state is 'identity' + mc.add_spec("SPEC AG (outgoing &!timeout -> AX policy=policy_1)") + + ### If outgoing event is true but also times is up, next policy state is 'match filter' + mc.add_spec("SPEC AG (outgoing & timeout -> AX policy=policy_2)") + + ### If outgoing event is false, next policy state is 'match filter' + mc.add_spec("SPEC AG (!outgoing -> AX policy=policy_2)") + +# ### Policy state is 'match filter' until outgoing is true. +# mc.add_spec("SPEC A [ policy=policy_2 U (outgoing & !timeout) ]") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + # Ask deployment + ask_deploy() + + return pol >> flood() diff --git a/pyretic/kinetic/apps/traffic_lb.py b/pyretic/kinetic/apps/traffic_lb.py new file mode 100644 index 00000000..08b9dbc8 --- /dev/null +++ b/pyretic/kinetic/apps/traffic_lb.py @@ -0,0 +1,111 @@ +from random import choice + +from pyretic.lib.corelib import * +from pyretic.lib.std import * +from pyretic.lib.query import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.apps.mac_learner import * + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.traffic_lb +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom mininet_topos/example_topos.py --topo=traffic_lb +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to make flow load balance +# - python json_sender.py -n lb -l True --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +# * Events to make flow just take default path +# - python json_sender.py -n lb -l False --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001 +# +##################################################################################################### + + + +class traffic_lb(DynamicPolicy): + def __init__(self): + + + ### DEFINE INTERNAL METHODS + + self.links = [1,2,3] + + def interswitch(): + return if_(match(inport=2),fwd(1),fwd(2)) + + def routing(): + match_inter = union([match(switch=2),match(switch=3),match(switch=4)]) + match_inport = union([match(inport=2),match(inport=3),match(inport=4)]) + + r = if_(match_inter,interswitch(), if_(match_inport, fwd(1), drop)) + + return r + + def randomly_choose_link(): + return traffic_lb_policy(choice(self.links)+1) + + def traffic_lb_policy(i): + match_from_edge = (union([match(switch=1),match(switch=5)]) & match(inport=1)) + return if_(match_from_edge, fwd(i), routing()) + + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + h1 = f['srcip'] + h2 = f['dstip'] + return union([match(srcip=h1,dstip=h2),match(srcip=h2,dstip=h1)] ) + + + ### SET UP TRANSITION FUNCTIONS + @transition + def lb(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + self.case(is_true(V('lb')),C(randomly_choose_link())) + self.default(C(traffic_lb_policy(2))) + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + lb=FSMVar(type=BoolType(), + init=False, + trans=lb), + policy=FSMVar(type=Type(Policy,set([traffic_lb_policy(i+1) for i in self.links ])), + init=traffic_lb_policy(2), + trans=policy)) + + # Instantiate FSMPolicy, start/register JSON handler. + fsm_pol = FSMPolicy(lpec, self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(traffic_lb,self).__init__(fsm_pol) + + +def main(): + pol = traffic_lb() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'traffic_lb') + + ## Add specs + mc.save_as_smv_file() + mc.verify() + + # Ask deployment + ask_deploy() + + return pol diff --git a/pyretic/kinetic/apps/ucap.py b/pyretic/kinetic/apps/ucap.py new file mode 100644 index 00000000..d74170c3 --- /dev/null +++ b/pyretic/kinetic/apps/ucap.py @@ -0,0 +1,112 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.apps.mac_learner import mac_learner + + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.ucap +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to h2 +# - mininet> h1 ping h2 +# +# * Events to block traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n capped -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Events to again allow traffic "h1 ping h2" (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n capped -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +### Define a class for the application, subclassed from DynamicPolicy +class ucap(DynamicPolicy): + def __init__(self): + + ### 1. DEFINE THE LPEC FUNCTION + + def lpec(f): + # Packets with same source IP + # will have a same "state" (thus, same policy applied). + return match(srcip=f['srcmac']) + + + ### 2. SET UP TRANSITION FUNCTIONS + + @transition + def capped(self): + # Return the variable's own value. + # If True, return True. If False, return False. + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + # If "capped" is True, change policy to "drop" + self.case(is_true(V('capped')),C(drop)) + + # Default policy is "indentity", which is "allow". + self.default(C(identity)) + + + ### 3. SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + capped=FSMVar(type=BoolType(), + init=False, + trans=capped), + policy=FSMVar(type=Type(Policy,{drop,identity}), + init=identity, + trans=policy)) + + + ### 4. SET UP POLICY AND EVENT STREAMS + + ### This part pretty much remains same for any application + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + ### This part pretty much remains same for any application + + # Specify application class name here. (e.g., "ids") + super(ucap,self).__init__(fsm_pol) + + +def main(): + + # DynamicPolicy that is going to be returned + pol = ucap() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'ucap') + + ## Add specs + mc.add_spec("FAIRNESS\n capped;") + + ### If capped event is true, next policy state is 'drop' + mc.add_spec("SPEC AG (capped -> AX policy=drop)") + + ### If capped event is false, next policy state is 'allow' + mc.add_spec("SPEC AG (!capped -> AX policy=identity)") + + ### Policy state is 'allow' until capped is true. + mc.add_spec("SPEC A [ policy=identity U capped ]") + + ### It is always possible to go back to 'allow' + mc.add_spec("SPEC AG EF policy=identity") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + # Return DynamicPolicy. + # flood() will take for of forwarding for this simple example. + return pol >> mac_learner() diff --git a/pyretic/kinetic/apps/vm_prov.py b/pyretic/kinetic/apps/vm_prov.py new file mode 100644 index 00000000..3b6fd899 --- /dev/null +++ b/pyretic/kinetic/apps/vm_prov.py @@ -0,0 +1,112 @@ + +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * +from pyretic.kinetic.util.rewriting import * +#from pyretic.kinetic.apps.mac_learner import * +from pyretic.modules.mac_learner import * + +##################################################################################################### +# * App launch +# - pyretic.py pyretic.kinetic.apps.vm_prov +# +# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory) +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# * Start ping from h1 to public IP +# - mininet> h1 ping 10.0.0.100 +# +# * Make h1's flow use backup server (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n backup -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# * Make h1's flow use primary server again (in "~/pyretic/pyretic/kinetic" directory) +# - python json_sender.py -n backup -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + +class vm_prov(DynamicPolicy): + def __init__(self): + + # List of servers + vm_prov.serverList = ['10.0.0.3','10.0.0.4','10.0.0.5','10.0.0.6','10.0.0.7','10.0.0.8'] + + # Choose randomly + def fwdToThisServer(which_srv): + client_ips = [IP('10.0.0.1'), IP('10.0.0.2')] + if which_srv > -1 and which_srv < len(vm_prov.serverList): + server_ip_str = self.serverList[which_srv] + else: + server_ip_str = '10.0.0.3' # default + rewrite_policy = rewriteDstIPAndMAC_Public(client_ips, '10.0.0.100', server_ip_str) + return rewrite_policy + + + ### DEFINE THE LPEC FUNCTION + + def lpec(f): + return match(srcip=f['srcip'],dstip=f['dstip']) + + ## SET UP TRANSITION FUNCTIONS + + @transition + def load(self): + self.case(occurred(self.event),self.event) + + @transition + def policy(self): + lightLoad = NonDetermPolicy([fwdToThisServer(i) for i in range(1)]) + mediumLoad = NonDetermPolicy([fwdToThisServer(i) for i in range(len(vm_prov.serverList)/2)]) + heavyLoad = NonDetermPolicy([fwdToThisServer(i+1) for i in range(len(vm_prov.serverList)-1)]) + + self.case((V('load')==C(1)) , C(lightLoad)) + self.case((V('load')==C(2)) , C(mediumLoad)) + self.case((V('load')==C(3)) , C(heavyLoad)) + + # Else, to primary + self.default(C(fwdToThisServer(0))) + + + ### SET UP THE FSM DESCRIPTION + + self.fsm_def = FSMDef( + load=FSMVar(type=Type(int,{1,2,3}), + init=1, + trans=load), + policy=FSMVar(type=Type(Policy,set([fwdToThisServer(i) for i in range(len(vm_prov.serverList))])), + init=fwdToThisServer(0), + trans=policy)) + + ### SET UP POLICY AND EVENT STREAMS + + fsm_pol = FSMPolicy(lpec,self.fsm_def) + json_event = JSONEvent() + json_event.register_callback(fsm_pol.event_handler) + + super(vm_prov,self).__init__(fsm_pol) + + +def main(): + pol = vm_prov() + + # For NuSMV + smv_str = fsm_def_to_smv_model(pol.fsm_def) + mc = ModelChecker(smv_str,'vm_prov') + + # If load is light, just forward to 1st server. + mc.add_spec("SPEC AG (load=1 -> AX policy=policy_1)") + + # If load is medium, forward to 1st or 2nd server. + mc.add_spec("SPEC AG (load=2 -> AX (policy=policy_1 | policy=policy_2 | policy=policy_3) )") + + # If load is high, it is possible to forward to 5th(last) server + mc.add_spec("SPEC AG (load=3 -> EX (policy=policy_5))") + + # Save NuSMV file + mc.save_as_smv_file() + + # Verify + mc.verify() + + return pol >> mac_learner() diff --git a/pyretic/kinetic/drivers/__init__.py b/pyretic/kinetic/drivers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/drivers/json_event.py b/pyretic/kinetic/drivers/json_event.py new file mode 100644 index 00000000..6b683f79 --- /dev/null +++ b/pyretic/kinetic/drivers/json_event.py @@ -0,0 +1,95 @@ +from threading import Thread +import socket +import SocketServer +import json +import datetime as dt +import select + +from pyretic.lib.corelib import * +from pyretic.kinetic.fsm_policy import Event + +class JSONEvent(): + + port = 50001 + + def __init__(self, addr='127.0.0.1'): + self.handler = None + self.addr = addr + self.port = JSONEvent.port + JSONEvent.port += 1 + p1 = Thread(target=self.event_listener) + p1.daemon = True + p1.start() + + def register_callback(self,handler): + self.handler = handler + + def event_listener(self): + + def parse_json(data): + return json.loads(data) + + def unicode_dict_to_ascii(d): + new_d = dict() + for k,v in d.items(): + if isinstance(v,str): + new_d[k.encode('ascii','ignore')] = v.encode('ascii','ignore') + elif isinstance(v,unicode): + new_d[k.encode('ascii','ignore')] = v.encode('ascii','ignore') + elif isinstance(v,dict): + new_d[k.encode('ascii','ignore')] = unicode_dict_to_ascii(v) + else: + new_d[k.encode('ascii','ignore')] = v + + return new_d + + + message = '' + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind((self.addr, self.port)) + s.listen(2048) + + while 1: + message = '' + + ready_r, ready_w, in_error = select.select([s], [], [], 60) + + for s in ready_r: + conn, addr = s.accept() +# print 'Received connection from', addr + while 1: + data = conn.recv(1024) + + if not data: + conn.close() + break + + message = message + data + unicode_dict = parse_json(message) + ascii_dict = unicode_dict_to_ascii(unicode_dict) + + def convert(field,value): + if field == 'srcip' or field == 'dstip': + return IPAddr(value) + elif field == 'srcmac' or field == 'dstmac': + return EthAddr(value) + else: + return int(value) + + name = ascii_dict['name'] + value = ascii_dict['value'] + if not 'flow' in ascii_dict: + flow = None + else: + flow = frozendict( + { k : convert(k,v) for + k,v in ascii_dict['flow'].items() + if v } ) + + return_value = -1 + + if self.handler: + return_value = self.handler(Event(name,value,flow)) + conn.sendall(str(return_value)) diff --git a/pyretic/kinetic/drivers/sflow_event.py b/pyretic/kinetic/drivers/sflow_event.py new file mode 100644 index 00000000..258a185c --- /dev/null +++ b/pyretic/kinetic/drivers/sflow_event.py @@ -0,0 +1,67 @@ +import threading +from socket import * +from SocketServer import * +import json +import requests +import os +import time + +class SFlowEvent_T(): + + def __init__(self, handler, addr, port): + self.handler = handler + self.addr = addr + self.port = port + self.target_url = 'http://' + addr + ':' + str(port) + self.event_id = -1 + + def set_max_events(self, max_events): + self.max_events = max_events + + def set_timeout(self, timeout): + self.timeout = timeout + + def set_groups(self, groups): + self.groups = groups + + def set_flows(self, flows): + self.flows = flows + + def set_threshold(self, threshold): + self.threshold = threshold + + def set_action(self, message): + self.message = message + + def start(self, queue): + t1 = threading.Thread(target=self.event_listener, args=(queue,)) + t1.daemon = True + t1.start() + + def event_listener(self, queue): + self.event_url = self.target_url + '/events/json?maxEvents=' + str(self.max_events) + '&timeout=' + str(self.timeout) + + r = requests.put(self.target_url + '/group/json',data=json.dumps(self.groups)) + r = requests.put(self.target_url + '/flow/' + self.threshold['metric'] + '/json',data=json.dumps(self.flows)) + r = requests.put(self.target_url + '/threshold/' + self.threshold['metric'] + '/json',data=json.dumps(self.threshold)) + + while 1: + r = requests.get(self.event_url + '&eventID=' + str(self.event_id)) + if r.status_code != 200: + print 'sflow_event.event_listener return invalid status_code' + break + + events = r.json() + if len(events) == 0: continue + + # Pyretic runtime crashes without this ... (need to find a solution) + time.sleep(10) + + self.event_id = events[0]["eventID"] + for event in events: + if self.threshold['metric'] == event['metric']: + self.handler(self.message, queue) + + + + diff --git a/pyretic/kinetic/examples/__init__.py b/pyretic/kinetic/examples/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/examples/allcomposed.py b/pyretic/kinetic/examples/allcomposed.py new file mode 100644 index 00000000..cd825ca9 --- /dev/null +++ b/pyretic/kinetic/examples/allcomposed.py @@ -0,0 +1,67 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + + +from pyretic.kinetic.apps.auth_web import * +from pyretic.kinetic.apps.auth_8021x import * +from pyretic.kinetic.apps.ids import * +from pyretic.kinetic.apps.gardenwall import * +from pyretic.kinetic.apps.mac_learner import * +from pyretic.kinetic.apps.rate_limiter import * +from pyretic.kinetic.apps.monitor import * + +##################################################################################################### +# App launch +# - pyretic.py pyretic.kinetic.apps.allcomposed +# +# Mininet Generation +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom example_topos.py --topo=ratelimit +# +# Events to allow traffic "h1 ping h2" +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001} +# +# Events to block traffic "h1 ping h2" +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# +# +# +##################################################################################################### + + +def main(): + + pol1 = auth_web() + pol2 = auth_8021x() + pol3 = ids() + pol4 = rate_limiter() + + # For NuSMV + cfsm_def, smv_str = fsm_def_compose(pol1.fsm_def, pol2.fsm_def,'+') + cfsm_def2, smv_str = fsm_def_compose(cfsm_def, pol3.fsm_def,'>>') + cfsm_def3, smv_str = fsm_def_compose(cfsm_def2, pol4.fsm_def,'>>') + mc = ModelChecker(smv_str,'allcomposed') + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated_web;") + mc.add_spec("FAIRNESS\n authenticated_1x;") + mc.add_spec("FAIRNESS\n infected;") +# mc.add_spec("FAIRNESS\n exempt;") +# mc.add_spec("FAIRNESS\n topo_change;") + + ### If infected, block traffic, regardless of authentication + mc.add_spec("SPEC AG (infected -> AX policy=drop)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG ( (authenticated_web | authenticated_1x) & !infected -> AX policy!=drop )") + + mc.save_as_smv_file() + mc.verify() + + ask_deploy() + + return ( (pol1 + pol2) >> pol3 >> pol4 ) >> monitor(50004) diff --git a/pyretic/kinetic/examples/auth_ids.py b/pyretic/kinetic/examples/auth_ids.py new file mode 100644 index 00000000..f8d36e0d --- /dev/null +++ b/pyretic/kinetic/examples/auth_ids.py @@ -0,0 +1,59 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.model_checker import * + +from pyretic.kinetic.apps.ids_only import * +from pyretic.kinetic.apps.gardenwall_only import * +from pyretic.kinetic.apps.auth_only import * + +##################################################################################################### +# App launch +# - pyretic.py pyretic.kinetic.apps.auth_rl_ids +# +# Mininet Generation +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom example_topos.py --topo=ratelimit +# +# Events to allow traffic "h1 ping h2" +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001} +# +# Events to block traffic "h1 ping h2" +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# +# +# +##################################################################################################### + + +def main(): + + pol1 = auth_only() + pol2 = ids_only() + + # For NuSMV + smv_str = fsm_def_to_smv_model_compose(pol1.fsm_def, pol2.fsm_def,'>>') + mc = ModelChecker(smv_str,'auth_ids') + + ## Add specs + mc.add_spec("FAIRNESS\n authenticated;") + mc.add_spec("FAIRNESS\n infected;") + + ### If infected, block traffic, regardless of authentication + mc.add_spec("SPEC AG (infected -> AX policy=drop)") + + ### If authentication event is false, next policy state is 'drop' + mc.add_spec("SPEC AG (!authenticated -> AX policy=drop)") + + ### If authentication is true and infected is false, then allow + mc.add_spec("SPEC AG (authenticated & !infected -> AX policy=identity)") + + ### It is always possible for the policy state to go to 'allow' + mc.add_spec("SPEC AG (EF policy=identity)") + + mc.save_as_smv_file() + mc.verify() + + return (pol1 >> pol2) >> flood() diff --git a/pyretic/kinetic/examples/auth_ml_ids.py b/pyretic/kinetic/examples/auth_ml_ids.py new file mode 100644 index 00000000..0922708c --- /dev/null +++ b/pyretic/kinetic/examples/auth_ml_ids.py @@ -0,0 +1,21 @@ +from pyretic.kinetic.apps.ids import ids +from pyretic.kinetic.apps.mac_learner import mac_learner +from pyretic.kinetic.apps.auth import auth + +##################################################################################################### +# App launch +# - pyretic.py pyretic.kinetic.apps.ids_and_mac_learner +# +# Mininet Generation +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# Events to block traffic "h1 ping h2" +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# Events to again allow traffic "h1 ping h2" +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +def main(): + return auth() >> mac_learner() >> ids() diff --git a/pyretic/kinetic/examples/auth_rl_ids.py b/pyretic/kinetic/examples/auth_rl_ids.py new file mode 100644 index 00000000..31f353a0 --- /dev/null +++ b/pyretic/kinetic/examples/auth_rl_ids.py @@ -0,0 +1,37 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +from pyretic.kinetic.fsm_policy import * +from pyretic.kinetic.drivers.json_event import JSONEvent +from pyretic.kinetic.smv.translate import * + +from pyretic.kinetic.apps.ids import * +from pyretic.kinetic.apps.auth import * +from pyretic.kinetic.apps.rate_limiter import * + +##################################################################################################### +# App launch +# - pyretic.py pyretic.kinetic.apps.auth_rl_ids +# +# Mininet Generation +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom example_topos.py --topo=ratelimit +# +# Events to allow traffic "h1 ping h2" +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# - python json_sender.py -n auth -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001} +# +# Events to block traffic "h1 ping h2" +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001} +# +# +# +##################################################################################################### + + +def main(): + + pol1 = auth() + pol2 = rate_limiter() + pol3 = ids() + + return pol1 >> pol2 >> pol3 diff --git a/pyretic/kinetic/examples/ids_and_mac_learner.py b/pyretic/kinetic/examples/ids_and_mac_learner.py new file mode 100644 index 00000000..12a6841a --- /dev/null +++ b/pyretic/kinetic/examples/ids_and_mac_learner.py @@ -0,0 +1,20 @@ +from pyretic.kinetic.apps.ids import ids +from pyretic.kinetic.apps.mac_learner import mac_learner + +##################################################################################################### +# App launch +# - pyretic.py pyretic.kinetic.apps.ids_and_mac_learner +# +# Mininet Generation +# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --topo=single,3 +# +# Events to block traffic "h1 ping h2" +# - python json_sender.py -n infected -l True --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +# +# Events to again allow traffic "h1 ping h2" +# - python json_sender.py -n infected -l False --flow="{srcip=10.0.0.1}" -a 127.0.0.1 -p 50001 +##################################################################################################### + + +def main(): + return ids() >> mac_learner() diff --git a/pyretic/kinetic/fsm_policy.py b/pyretic/kinetic/fsm_policy.py new file mode 100644 index 00000000..26e4ed95 --- /dev/null +++ b/pyretic/kinetic/fsm_policy.py @@ -0,0 +1,203 @@ +import ast +import copy +from collections import defaultdict +from threading import Lock +import re +import inspect +import textwrap +import datetime as dt +import pickle +import threading + +from pyretic.lib.corelib import * +from pyretic.lib.std import * +from pyretic.kinetic.language import * + +from pyretic.kinetic.resettableTimer import * +import ipaddr + +measures_list = [] + +TIMEOUT = 5.0 + +class Event(object): + def __init__(self,name,value,flow): + self.name = name + self.value = value + self.flow = flow + + +class LpecFSM(DynamicPolicy): + def __init__(self,t,s,n,x,d): + self.type = copy.copy(t) + self.state = copy.copy(s) # really variables, but all the variables together are the state + self.event = dict() + for k in self.state.keys(): + self.event[k] = None + self.trans = n + self.exogenous = x + self.dependencies = d + self._topo_init = False + self.timer_thread = TimerReset(TIMEOUT, self.resetTimer) + self.timer_thread.start() + + super(LpecFSM,self).__init__(self.state['policy']) + + def handle_change(self,var_name,is_event=False): + + # get the next value for this variable + next_val = self.trans[var_name](self.state,self.event) + + # IF THIS IS AN EVENT, WE'VE NOW TAKEN IT INTO ACCOUNT + if is_event: + self.event[var_name] = None + + # if the value has changed + if next_val != self.state[var_name]: + self.state[var_name] = next_val + + # update policy, if appropriate + if var_name == 'policy': + self.policy = self.state['policy'] + print self.policy + + # cascade the changes + for v in self.dependencies[var_name]: + self.handle_change(v) + + # IF THIS IS AN EVENT, THE CORRESPONDING STATE MAY NEED UPDATE + # (FOR DEFAULT CASE) + if is_event: + self.state[var_name] = \ + self.trans[var_name](self.state,self.event) + + def handle_event(self,event_name,event_val_rep): + # ignore events to other modules + if not event_name in self.type: + print "WARNING: '%s' events not handled by this module! Check if you are sending the '%s' event to correct TCP_port." % (event_name,event_name) + return + # ensure event_val is typed correctly + event_type = self.type[event_name].py_type + if isinstance(event_val_rep,str): + if event_type == bool: + event_val = ast.literal_eval(event_val_rep) + elif event_type == int: + event_val = int(event_val_rep) + else: + raise RuntimeError('not yet implemented') + else: + event_val = event_val_rep + if not isinstance(event_val,event_type): + raise RuntimeError('event_val type mismatch (%s,%s)' % (type(event_val),event_type) ) + + # update event data structure and handle change + self.event[event_name] = event_val + if not self.exogenous[event_name]: + raise RuntimeError('var %s cannot be affected by external events!' % event_name) + self.handle_change(event_name,is_event=True) + + def set_network(self,network): + if not self._topo_init: + self._topo_init = True + return + + # topo_change IS A RESERVED NAME!!! + if 'topo_change' in self.exogenous: + self.handle_event('topo_change',True) + + def resetTimer(self): + + # timeout IS A RESERVED NAME!!! + if 'timeout' in self.exogenous: + print 'Timeout!!!!' + self.handle_event('timeout',True) + self.timer_thread = threading.Timer(TIMEOUT,self.resetTimer) + + +class FSMPolicy(DynamicPolicy): + def __init__(self,lpec_fn,fsm_description): + self.type = dict() + self.state = dict() + self.trans = dict() + self.exogenous = dict() + self.deps = defaultdict(set) + self.lpec_fn = lpec_fn + + for var_name,var_def in fsm_description.map.items(): + self.type[var_name] = var_def['type'] + self.state[var_name] = var_def['init'] + self.trans[var_name] = var_def['trans'] + for e in events(var_def['trans']): + e.add_type(var_def['type']) + self.exogenous[var_name] = True + for v in variables(var_def['trans']): + self.deps[v].add(var_name) + self.lpec_to_fsm = dict() + self.initial_policy = self.state['policy'] + self.lock = Lock() + super(FSMPolicy,self).__init__(self.initial_policy) + + def event_handler(self,event): + print 'Event',event + global measures_list + print len(measures_list) + + if event.name=='endofworld': + pickle_fd = open('./measure_data.p','wb') + pickle.dump(measures_list,pickle_fd) + pickle_fd.close() + measures_list = [] + if event.name=='endofworld_ex': + pickle_fd = open('./measure_data_ext.p','wb') + pickle.dump(measures_list,pickle_fd) + pickle_fd.close() + measures_list = [] + + n1=dt.datetime.now() + # Events that apply to a single lpec + if event.flow: + try: + lpec = self.lpec_fn(event.flow) + except KeyError: + print 'Error: event flow must contain all fields used in lpec_relation. Ignoring.' + return + + if lpec is None: + return + + # DynamicPolicies can't be hashed + # still need to implement hashing for static policies + # in meantime, use string representation of the cannonical lpec + lpec_k = repr(lpec) + + with self.lock: + # get the lpec objects from the flow + if lpec_k in self.lpec_to_fsm: + lpec_new = False + else: + self.lpec_to_fsm[lpec_k] = LpecFSM(self.type,self.state,self.trans, + self.exogenous,self.deps) + lpec_new = True + + # have the lpec_fsm handle the event + lpec_fsm = self.lpec_to_fsm[lpec_k] + lpec_fsm.handle_event(event.name,event.value) + + # if the lpec is new, update the policy + if lpec_new: + self.policy = if_(lpec,lpec_fsm,self.policy) + + self.policy.compile() + n2=dt.datetime.now() + compile_time = float((n2-n1).microseconds)/1000.0/1000.0 + float((n2-n1).seconds) + measures_list.append(compile_time) +# print '=== Compile takes: ',float((n2-n1).microseconds)/1000.0/1000.0 + float((n2-n1).seconds),'===\n' +# print self.policy + return compile_time + # Events that apply to all lpecs + else: + with self.lock: + for lpec_fsm in self.lpec_to_fsm.values(): + lpec_fsm.handle_event(event.name,event.value) + + return '-1' diff --git a/pyretic/kinetic/json_sender.py b/pyretic/kinetic/json_sender.py new file mode 100755 index 00000000..a38f194a --- /dev/null +++ b/pyretic/kinetic/json_sender.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +from optparse import OptionParser +import socket +import sys +import json +import re + +def main(): + + desc = ( 'Send JSON Events' ) + usage = ( '%prog [options]\n' + '(type %prog -h for details)' ) + op = OptionParser( description=desc, usage=usage ) + + # Options + op.add_option( '--flow', action="store", \ + dest="flow_tuple", help = "Flow tuple. Example: --flow='{inport=1, srcmac=00:00:00:00:00:11, dstmac=00:00:00:00:00:01,srcip=10.0.0.1, dstip=10.0.0.2/24, tos=2,srcport=90, dstport=8080, ethtype=1, protocol=2048, vlan_id=43, vlan_pcp=1}'" ) + + op.add_option( '--file', action="store", \ + dest="file", help = 'File containing the flow tuple information. It should follow the format of the flow as above i.e., starts with {..' ) + + op.add_option( '--event-name', '-n', action="store",\ + dest="event_name", help = 'The event name.' ) + + op.add_option( '--event-value', '-l', action="store",\ + dest="event_value", help = 'The event value.' ) + + op.add_option( '--addr', '-a', action="store",\ + dest="addr", help = 'The address of the controller.' ) + + op.add_option( '--port', '-p', action="store",\ + dest="port", help = 'The port value of the controller.' ) + + # Parsing and processing + options, args = op.parse_args() + + flow_str=None + + if options.addr is None and options.port is None: + print 'No IP address or Port information is given. Exiting.' + return + elif options.event_name is None: + print 'No event name provided. Exiting.' + return + elif options.event_value is None: + print 'No event value provided. Exiting.' + return + + # Open file if specified + elif options.file and options.flow_tuple: + print 'Can only specify one of (file,flow_tuple)' + return + + elif options.file: + try: + fd = open(options.file, 'r') + except IOError as err: + print 'Error opening file: ', err + print 'Aborting.\n' + sys.exit(1) + + content = fd.read() + flow_str = content + + elif options.flow_tuple: + flow_str = options.flow_tuple + + if flow_str: + # Parse flow + flow_dict = dict( + switch=None, + inport=None, + srcmac=None, + dstmac=None, + srcip=None, + dstip=None, + tos=None, + srcport=None, + dstport=None, + ethtype=None, + protocol=None, + vlan_id=None, + vlan_pcp=None) + + parse_flow_str(flow_dict, flow_str) + + # Construct JSON message + json_message = dict(name=options.event_name, + value=options.event_value, + flow=flow_dict) + else: + # Construct JSON message + json_message = dict(name=options.event_name, + value=options.event_value) + + # Create socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Connect to server + s.connect((options.addr, int(options.port))) + bufsize = len(json_message) + + # Send data + totalsent = 0 + s.sendall(json.dumps(json_message)) + + # Receive return value + recvdata = s.recv(1024) + print 'return: ' + recvdata + + s.close() + +def parse_flow_str(flow_dict, flow_str): + print "\nFlow_Str = " + flow_str + m = re.search("inport=(\d+)\s*",flow_str) + if m: + flow_dict['inport'] = m.group(1) + + m = re.search("srcmac=(\w\w:\w\w:\w\w:\w\w:\w\w:\w\w)\s*",flow_str) + if m: + flow_dict['srcmac'] = m.group(1) + + m = re.search("dstmac=(\w\w:\w\w:\w\w:\w\w:\w\w:\w\w)\s*",flow_str) + if m: + flow_dict['dstmac'] = m.group(1) + + m = re.search("srcip=(\d+\.\d+\.\d+\.\d+[\/\d+]*)\s*",flow_str) + if m: + flow_dict['srcip'] = m.group(1) + + m = re.search("dstip=(\d+\.\d+\.\d+\.\d+[\/\d+]*)\s*",flow_str) + if m: + flow_dict['dstip'] = m.group(1) + + m = re.search("tos=(\d+)\s*",flow_str) + if m: + flow_dict['tos'] = m.group(1) + + m = re.search("srcport=(\d+)\s*",flow_str) + if m: + flow_dict['srcport'] = m.group(1) + + m = re.search("dstport=(\d+)\s*",flow_str) + if m: + flow_dict['dstport'] = m.group(1) + + m = re.search("ethtype=(\d+)\s*",flow_str) + if m: + flow_dict['ethtype'] = m.group(1) + + m = re.search("protocol=(\d+)\s*",flow_str) + if m: + flow_dict['protocol'] = m.group(1) + + m = re.search("vlan_id=(\d+)\s*",flow_str) + if m: + flow_dict['vlan_id'] = m.group(1) + + m = re.search("vlan_pcp=(\d+)\s*",flow_str) + if m: + flow_dict['vlan_pcp'] = m.group(1) + + print "\nData Payload = " + str(flow_dict) + '\n' + +# main ###### +if __name__ == '__main__': + main() + + + diff --git a/pyretic/kinetic/language.py b/pyretic/kinetic/language.py new file mode 100644 index 00000000..db59a60c --- /dev/null +++ b/pyretic/kinetic/language.py @@ -0,0 +1,784 @@ +from pyretic.lib.corelib import * +from random import choice +from threading import Lock +import hashlib +import operator +import copy +import sys + +## Used globally +policy_to_name_map = {} +complexPolicyStr_to_actualPolicy = {} + +def ask_deploy(): + var = raw_input("Deploy? (yes/no): ") + print "You entered: ", var + if var.lower()=='yes' or var.lower()=='y': + return + elif var.lower()=='no' or var.lower()=='n': + sys.exit() + else: + print 'Unknown answer. Please enter "yes" or "no".' + ask_deploy() + +def as_comment(s): + s = '-- ' + s + s = s.replace('\n', '\n-- ') + return s + +def policy_to_hash(p_named_set,policy_set,p): + if isinstance(p,sequential): + newp = sequential(p.policies) + for c in range(newp.policies.count(identity)): + if newp.policies!=[identity]: + newp.policies.remove(identity) + p = newp + if isinstance(p,flood): + p_named_set.add('flood') + return 'flood' + elif isinstance(p,fwd): + p_named_set.add('_'.join(str(p).split())) + return '_'.join(str(p).split()) + elif isinstance(p, Policy) and p.compile() == drop.compile(): + p_named_set.add('drop') + return 'drop' +# elif isinstance(p, Policy) and p.compile() == identity.compile(): +# p_named_set.add('identity') +# return 'identity' +# elif isinstance(p,union): +# print 'yo' +# return 'union' + s = str(p) + if s=='False': + return 'FALSE' + elif s=='True': + return 'TRUE' + elif s.isdigit(): + return s + else: + complexPolicyStr_to_actualPolicy[s] = p + policy_set.add(s) + +def to_smv(i): + if isinstance(i,sequential): + newp = sequential(i.policies) + for c in range(newp.policies.count(identity)): + if newp.policies!=[identity]: + newp.policies.remove(identity) + i = newp + + if isinstance(i,flood): + return 'flood' + elif isinstance(i,fwd): + return '_'.join(str(i).split()) + elif isinstance(i,NonDetermPolicy): + policy_list = [] + for p in i.getList(): + policy_list.append(to_smv(p)) + return ' union '.join(policy_list) + elif isinstance(i,Policy) and i.compile() == drop.compile(): + return 'drop' +# elif isinstance(i,Policy) and i.compile() == identity.compile(): +# return 'identity' + s = str(i) + if s=='False': + return 'FALSE' + elif s=='True': + return 'TRUE' + elif s.isdigit(): + return s + else: + if policy_to_name_map.has_key(s): + return policy_to_name_map[s] + else: + print "Cannot find this polcy. Given policy is:\n",str(i) + print "Keys in policy_to_name_map: " + for k in policy_to_name_map.keys(): + print k + sys.exit() + + +### Types + +class Type(object): + def __init__(self,py_type,dom): + self.py_type=py_type + self.dom=dom + +class BoolType(Type): + def __init__(self): + super(BoolType,self).__init__(bool,{True,False}) + +### Case Expressions + +class CaseExpression(object): + def __call__(self,state,event): + raise NotImplementedError + + def __eq__(self, other): + return test_eq(self,other) + + def __ne__(self,other): + return e_test_ne(self,other) + +### Case Atoms + +class CaseAtom(CaseExpression): + def model(self): + return str(self) + +class V(CaseAtom): + def __init__(self,s): + self.name=s + + def __call__(self,state,event): + return state[self.name] + + def __str__(self): + return 'V(' + self.name + ')' + + def model(self): + return str(self.name) + +class E(CaseAtom): + def __init__(self,s): + self.name=s + self.type=None + + def add_type(self,t): + self.type=t + + def __call__(self,state,event): + return event[self.name] + + def __str__(self): + return 'E(' + self.name + ')' + + def model(self): + if self.type is None: + return '{???}' + else: + return '{' + ','.join(map(to_smv,self.type.dom)) + '}' + +class C(CaseAtom): + def __init__(self,val): + self.val = val + + def __call__(self,state,event): + return self.val + + def __str__(self): + if isinstance(self.val,flood): + return 'C(flood)' + elif isinstance(self.val,fwd): + return 'C(' + '_'.join(str(self.val).split()) + ')' + elif isinstance(self.val,NonDetermPolicy): + return 'C(' + ' union '.join(self.val.getList()) + else: +# policynum = int(hashlib.md5(str(self.val)).hexdigest(), 16) +# return 'C(' + 'policy_' + str(policynum) + ')' + return 'C(' + str(self.val) + ')' + + def model(self): + return to_smv(self.val) + +### Case Tests + +class CaseTest(CaseExpression): + def __and__(self,other): + return test_and(self,other) + + def model(self): + return str(self) + +class test_eq(CaseTest): + def __init__(self,l,r): + self.l = l + self.r = r + + def __call__(self,state,event): + return self.l(state,event)==self.r(state,event) + + def __str__(self): + return '(' + str(self.l) + '=' + str(self.r) + ')' + + def model(self): + return '(' + self.l.model() + '=' + self.r.model() + ')' + +class test_ne(CaseTest): + def __init__(self,l,r): + self.l = l + self.r = r + + def __call__(self,state,event): + return self.l(state,event)!=self.r(state,event) + + def __str__(self): + return '(' + str(self.l) + '!=' + str(self.r) + ')' + + def model(self): + return '(' + self.l.model() + '!=' + self.r.model() + ')' + +class test_and(CaseTest): + def __init__(self,l,r): + self.l = l + self.r = r + + def __call__(self,state,event): + return self.l(state,event) and self.r(state,event) + + def __str__(self): + return str(self.l) + ' & ' + str(self.r) + + def model(self): + l_model = self.l.model() + if l_model == 'TRUE': + return self.r.model() +# return self.r.model() + return self.l.model() + ' & ' + self.r.model() + +class test_and_true(CaseTest): + def __init__(self,l,r): + self.l = l + self.r = r + + def __call__(self,state,event): + return self.l(state,event)==True and self.r(state,event)==True + + def __str__(self): + return str(self.l) + ' & ' + str(self.r) + + def model(self): + l_model = self.l.model() + if l_model == 'TRUE': + return self.r.model() + return self.l.model() + ' & ' + self.r.model() + + +class TrueTest(test_eq): + def __init__(self): + super(TrueTest,self).__init__(C(True),C(True)) + + def __str__(self): + return 'TrueTest' + + def model(self): + return 'TRUE' + +class is_true(test_eq): + def __init__(self,atom): + super(is_true,self).__init__(atom,C(True)) + + def __str__(self): + return 'is_true(' + str(self.l) + ')' + + def model(self): + return self.l.model() + +class occurred(test_ne): + def __init__(self,event): + super(occurred,self).__init__(event,C(None)) + + def __str__(self): + return 'occurred(' + str(self.l) + ')' + + def model(self): + return 'TRUE' + +### Case + +class case(object): + def __init__(self,tst,rslt): + self.tst=tst + self.rslt=rslt + + def __str__(self): + return str(self.tst) + '\t: ' + str(self.rslt) + + def model(self): + return self.tst.model() + '\t: ' + self.rslt.model() + +class default(case): + def __init__(self,rslt): + super(default,self).__init__(TrueTest(),rslt) + +### Transition + +class Transition(object): + def __init__(self,var_name): + self.var_name = var_name + self.cases = list() + + def case(self,tst,rslt): + new_case = case(tst,rslt) + self.cases.append(new_case) + + def default(self,rslt): + new_case = default(rslt) + self.cases.append(new_case) + + def __call__(self,state,event): + for c in self.cases: + if c.tst(state,event): + return c.rslt(state,event) + raise RuntimeError + + def __str__(self): + r = 'next(' + self.var_name + ') :=\n' + r += '\tcase\n' + for c in self.cases: + r += '\t\t' + str(c) + ';\n' + r += '\tesac;' + return r + + def model(self): + r = 'next(' + self.var_name + ') :=\n' + r += '\tcase\n' + for c in self.cases: + r += '\t\t' + c.model() + ';\n' + r += '\tesac;' + return r + +def transition(cases_fn): + var_name = cases_fn.__name__ + class DecoratedTransition(Transition): + def __init__(self,var_name): + Transition.__init__(self,var_name) + self.event = E(var_name) + cases_fn(self) + if (len(self.cases)==0 or + not isinstance(self.cases[-1].tst,TrueTest)): + self.default(V(var_name)) + + DecoratedTransition.__name__ = var_name + return DecoratedTransition(var_name) + +### helper methods + +def events(i): + if isinstance(i,E): + return {i} + elif isinstance(i,CaseAtom): + return set() + elif isinstance(i,TrueTest): + return set() + elif isinstance(i,occurred): + return events(i.l) + elif isinstance(i,is_true): + return events(i.l) + elif isinstance(i,CaseTest): + return events(i.l) | events(i.r) + elif isinstance(i,case): + return events(i.tst) | events(i.rslt) + elif isinstance(i,Transition): + ts = map(events,i.cases) + r = set() + for t in ts: + r |= t + return r + else: + raise TypeError + +def variables(i): + if isinstance(i,V): + return {i.name} + elif isinstance(i,CaseAtom): + return set() + elif isinstance(i,TrueTest): + return set() + elif isinstance(i,occurred): + return set() + elif isinstance(i,is_true): + return variables(i.l) + elif isinstance(i,CaseTest): + return variables(i.l) | variables(i.r) + elif isinstance(i,case): + return variables(i.tst) | variables(i.rslt) + elif isinstance(i,Transition): + ts = map(variables,i.cases) + r = set() + for t in ts: + r |= t + return r + else: + raise TypeError + +### FSM Variable + +class FSMVar(dict): + __slots__ = ["_dict"] + def __init__(self,**kwargs): + self._dict = dict(endogenous=False,exogenous=False) + self._dict.update(dict(**kwargs)) + + def get(self, key, default=None): + return self._dict.get(key, default) + + def __getitem__(self, item): + return self._dict[item] + +### FSM Definition + +class FSMDef(object): + def __init__(self,**kwargs): + self.map = dict(**kwargs) + + +### Nondeterminstic Policy +class NonDetermPolicy(DynamicPolicy): + def __init__(self, policy_list): + self.policy_list = policy_list + self.initial_policy = policy_list[choice(range(len(policy_list)))] + self.lock = Lock() + super(NonDetermPolicy,self).__init__(self.initial_policy) + + def getList(self): + return list([str(i) for i in self.policy_list]) + +def fsm_def_compose(fsm_def_1,fsm_def_2,comp_op_str): + + # first, define. + composed_fsm_def = FSMDef() + + # Put variables except policy from fsm1 + for k,v in fsm_def_1.map.items(): + t=v['type'] + if t.py_type!=Policy: + composed_fsm_def.map[k] = v + + # Put variables except policy from fsm2 + for k,v in fsm_def_2.map.items(): + t=v['type'] + if t.py_type!=Policy: + composed_fsm_def.map[k] = v + + # Build Policy + policy_fsmvar_map={} + + policy_set = set() + p_named_set = set() + for k1,v1 in fsm_def_1.map.items(): + t1 = v1['type'] + for k2,v2 in fsm_def_2.map.items(): + t2 = v2['type'] + if t1.py_type==Policy and t2.py_type==Policy: + for p1 in t1.dom: + for p2 in t2.dom: + if comp_op_str=='>>': + policy_to_hash(p_named_set,policy_set, p1>>p2) + elif comp_op_str=='+': + policy_to_hash(p_named_set,policy_set, p1+p2) + else: + print 'Unknown composition operator. Abort' + sys.exit() + sorted_list = sorted(list(policy_set)) + for idx,p in enumerate(sorted_list): + policy_to_name_map[p] = 'policy_'+str(idx+1) + + ### Policy's type and possible values + real = set() + pols_list = policy_to_name_map.values() + pols_list.extend(list(p_named_set)) + for i in pols_list: + if i.startswith('policy'): + for j in policy_to_name_map: + if policy_to_name_map[j] == i: + real.add(complexPolicyStr_to_actualPolicy[j]) + else: + real.add(eval(i)) + p_type_defined = Type(Policy, real) + policy_fsmvar_map['type'] = p_type_defined + + ### Policy init + for k,v in fsm_def_1.map.items(): + t = v['type'] + if t.py_type==Policy: + f1_init = v['init'] + for k,v in fsm_def_2.map.items(): + t = v['type'] + if t.py_type==Policy: + f2_init = v['init'] + if comp_op_str=='>>': + policy_fsmvar_map['init'] = f1_init >> f2_init + elif comp_op_str=='+': + policy_fsmvar_map['init'] = f1_init + f2_init + else: + print 'Unknown composition operator. Abort' + sys.exit() + + ### Policy trans + trans_p = Transition('policy') + for k,v in fsm_def_1.map.items(): + t = v['type'] + if t.py_type==Policy: + f1_trans = v['trans'] + for k,v in fsm_def_2.map.items(): + t = v['type'] + if t.py_type==Policy: + f2_trans = v['trans'] + + + #### Combined transition + tests_and_results = {} + for c1 in f1_trans.cases: + for c2 in f2_trans.cases: + if type(c1.tst)==TrueTest and type(c2.tst)==TrueTest: + this_tst = TrueTest() + test_str = 'TRUE' + elif type(c1.tst)==TrueTest: + this_tst = c2.tst + test_str = str(c2.tst.model()) + elif type(c2.tst)==TrueTest: + this_tst = c1.tst + test_str = str(c1.tst.model()) + else: + this_tst = c1.tst & c2.tst + test_str = str(c1.tst.model()) + ' & ' + str(c2.tst.model()) + c1val = None + c2val = None + if isinstance(c1.rslt,V): + print 'Default value not given. Cannot deal with that now. Put it default for each variable.' + sys.exit() + for k,v in fsm_def_1.map.items(): + if k==c1.rslt.name: + c1val = v['init'] + break + if isinstance(c2.rslt,V): + print 'Default value not given. Cannot deal with that now. Put it default for each variable.' + sys.exit() + for k,v in fsm_def_1.map.items(): + if k==c2.rslt.name: + c2val = v['init'] + break + if c1val==None: + c1val = c1.rslt.val + if c2val==None: + c2val = c2.rslt.val + if comp_op_str=='>>': + this_result = C(c1val >> c2val) + elif comp_op_str=='+': + this_result = C(c1val + c2val) + else: + print 'Unknown composition operator. Abort' + sys.exit() + tests_and_results[this_tst] = this_result + + ## sort + keys = tests_and_results.keys() + key_operator_count_map = {} + for k in keys: + key_operator_count_map[k] = str(k).count(' & ') + if isinstance(k,TrueTest): + key_operator_count_map[k] = -1 + for sk in sorted(key_operator_count_map.iteritems(), key=operator.itemgetter(1),reverse=True): + trans_p.case(sk[0],tests_and_results[sk[0]]) + + # Trans value + policy_fsmvar_map['trans'] = trans_p + composed_fsm_def.map['policy'] = policy_fsmvar_map + + +# print '========RETURN======' +# for k,v in composed_fsm_def.map.items(): +# t=v['type'] +# if t.py_type==Policy: +# print k +# print v['trans'] +# print '==============' +# + + + + + return composed_fsm_def, fsm_def_to_smv_model(composed_fsm_def) + +def fsm_def_to_smv_model_compose(fsm_def_1,fsm_def_2,comp_op_str): + # First, see if there are complex policies. + # If so, number them, and save info. + policy_set = set() + p_named_set = set() + for k1,v1 in fsm_def_1.map.items(): + t1 = v1['type'] + for k2,v2 in fsm_def_2.map.items(): + t2 = v2['type'] + if t1.py_type!=bool and t2.py_type!=bool: + for p1 in t1.dom: + for p2 in t2.dom: + if comp_op_str=='>>': + policy_to_hash(p_named_set,policy_set, p1>>p2) + elif comp_op_str=='+': + policy_to_hash(p_named_set,policy_set, p1+p2) + else: + print 'Unknown composition operator. Abort' + sys.exit() + sorted_list = sorted(list(policy_set)) + for idx,p in enumerate(sorted_list): + policy_to_name_map[p] = 'policy_'+str(idx+1) + + # Start + s = 'MODULE main\n' + + ### VAR part + s += ' VAR\n' + for k,v in fsm_def_1.map.items(): + t = v['type'] + if t.py_type==bool: + s += ' %s\t: %s;\n' % (k,'boolean') + elif t.py_type!=Policy: + s += ' %s\t: %s;\n' % (k,'{' + ','.join(map(to_smv,t.dom)) + '}') + + for k,v in fsm_def_2.map.items(): + t = v['type'] + if t.py_type==bool: + s += ' %s\t: %s;\n' % (k,'boolean') + elif t.py_type!=Policy: + s += ' %s\t: %s;\n' % (k,'{' + ','.join(map(to_smv,t.dom)) + '}') + pols_list = policy_to_name_map.values() + pols_list.extend(list(p_named_set)) + s += ' %s\t: %s;\n' % ('policy','{' + ','.join(pols_list) + '}') + + ### ASSIGN part + s+= ' ASSIGN\n' + for k,v in fsm_def_1.map.items(): + t = v['type'] + if t.py_type!=Policy: + s += ' init(%s) := %s;\n' % (k,to_smv(v['init'])) + else: + f1_init = v['init'] + for k,v in fsm_def_2.map.items(): + t = v['type'] + if t.py_type!=Policy: + s += ' init(%s) := %s;\n' % (k,to_smv(v['init'])) + else: + f2_init = v['init'] + if comp_op_str=='>>': + s += ' init(%s) := %s;\n' % ('policy',to_smv(f1_init >> f2_init)) + elif comp_op_str=='+': + s += ' init(%s) := %s;\n' % ('policy',to_smv(f1_init + f2_init)) + else: + print 'Unknown composition operator. Abort' + sys.exit() + + ### NEXT part + for k,v in fsm_def_1.map.items(): + t = v['type'] + if t.py_type!=Policy: + s += ' '+v['trans'].model()+'\n' + else: + f1_trans = v['trans'] + for k,v in fsm_def_2.map.items(): + t = v['type'] + if t.py_type!=Policy: + s += ' '+v['trans'].model()+'\n' + else: + f2_trans = v['trans'] + + + #### Combined transition + tests_and_results = {} + for c1 in f1_trans.cases: + for c2 in f2_trans.cases: + if type(c1.tst)==TrueTest and type(c2.tst)==TrueTest: + test_str = 'TRUE' + elif type(c1.tst)==TrueTest: + test_str = str(c2.tst.model()) + elif type(c2.tst)==TrueTest: + test_str = str(c1.tst.model()) + else: + test_str = str(c1.tst.model()) + ' & ' + str(c2.tst.model()) + if comp_op_str=='>>': + result = to_smv( c1.rslt.val >> c2.rslt.val) + elif comp_op_str=='+': + result = to_smv( c1.rslt.val + c2.rslt.val) + else: + print 'Unknown composition operator. Abort' + sys.exit() + tests_and_results[test_str] = result + + ## sort + keys = tests_and_results.keys() + key_operator_count_map = {} + for k in keys: + key_operator_count_map[k] = k.count(' & ') + ## print policy case + s += ' next(policy) := \n case\n' + for sk in sorted(key_operator_count_map.iteritems(), key=operator.itemgetter(1),reverse=True): + s += ' ' + sk[0] + ' : ' + tests_and_results[sk[0]] + ';\n' + s += ' esac;\n' + + # Comment about policies + mapping_str = ' \n\n=====================================================================\n' + mapping_str = mapping_str + 'PolicyName (used in NuSMV) to ActualPolicy (used in Pyretic) Mapping\n' + mapping_str = mapping_str + '=====================================================================\n' + sorted_tuple = sorted(policy_to_name_map.iteritems(), key=operator.itemgetter(1)) + + for p in sorted_tuple: + policy = p[0] + pname = p[1] + mapping_str = mapping_str + '---------------------------------------------\n' + mapping_str = mapping_str + pname + ': (shown below)\n' + mapping_str = mapping_str + '---------------------------------------------\n' + mapping_str = mapping_str + policy + '\n' + mapping_str = mapping_str + '---------------------------------------------\n\n' + + # Make it as comment, and add to NuSMV input file. + mapping_str = as_comment(mapping_str) + s = s + mapping_str + + return s + + +def fsm_def_to_smv_model(fsm_def): + + + # First, see if there are complex policies. + # If so, number them, and save info. + policy_set = set() + p_named_set = set() + for k,v in fsm_def.map.items(): + t = v['type'] + if t.py_type==Policy: + for p in t.dom: + policy_to_hash(p_named_set,policy_set, p) + sorted_list = sorted(list(policy_set)) + for idx,p in enumerate(sorted_list): + policy_to_name_map[p] = 'policy_'+str(idx+1) + + # Start + s = 'MODULE main\n' + s += ' VAR\n' + for k,v in fsm_def.map.items(): + t = v['type'] + if t.py_type==bool: + s += ' %s\t: %s;\n' % (k,'boolean') + else: + s += ' %s\t: %s;\n' % (k,'{' + ','.join(map(to_smv,t.dom)) + '}') + + s+= ' ASSIGN\n' + for k,v in fsm_def.map.items(): + s += ' init(%s) := %s;\n' % (k,to_smv(v['init'])) + + for k,v in fsm_def.map.items(): + s += ' '+v['trans'].model()+'\n' + + ## Add comment about policy_name to actual_policy mapping + mapping_str = ' \n\n=====================================================================\n' + mapping_str = mapping_str + 'PolicyName (used in NuSMV) to ActualPolicy (used in Pyretic) Mapping\n' + mapping_str = mapping_str + '=====================================================================\n' + sorted_tuple = sorted(policy_to_name_map.iteritems(), key=operator.itemgetter(1)) + + for p in sorted_tuple: + policy = p[0] + pname = p[1] + mapping_str = mapping_str + '---------------------------------------------\n' + mapping_str = mapping_str + pname + ': (shown below)\n' + mapping_str = mapping_str + '---------------------------------------------\n' + mapping_str = mapping_str + policy + '\n' + mapping_str = mapping_str + '---------------------------------------------\n\n' + + # Make it as comment, and add to NuSMV input file. + mapping_str = as_comment(mapping_str) + s = s + mapping_str + + return s + diff --git a/pyretic/kinetic/mininet_topos/example_topos.py b/pyretic/kinetic/mininet_topos/example_topos.py new file mode 100644 index 00000000..cfdecc64 --- /dev/null +++ b/pyretic/kinetic/mininet_topos/example_topos.py @@ -0,0 +1,124 @@ +from mininet.topo import Topo +from mininet.net import Mininet +from mininet.node import RemoteController +from mininet.node import CPULimitedHost +from mininet.link import TCLink +from mininet.cli import CLI +from mininet.util import irange,dumpNodeConnections +from mininet.log import setLogLevel + +################################################################################ +# sudo mn --controller=remote,ip=127.0.0.1 --custom example_topos.py --topo linear --link=tc --mac --arp +################################################################################ + + +class Linear( Topo ): + def __init__(self,nswitches,hostfanout): + + # Initialize topology + Topo.__init__( self ) + switches = [] + host_machines = [] + + # Create switches + for s in range(nswitches): + switches.append(self.addSwitch( 's%s'%(s+1) )) + + # Host creation + for h in range(hostfanout): + host_machines.append(self.addHost( 'h%s'%(h+1+s*hostfanout) )) + + # Wiring switches and hosts + for idx,s in enumerate(switches): + + if idx < len(switches)-1: + self.addLink ( s, switches[idx+1]) + + # Wiring hosts to switch + for h in range(hostfanout): + self.addLink( host_machines[h + idx*hostfanout], s ) + +class Server_LB( Topo ): + def __init__(self): + # Initialize topology + Topo.__init__( self ) + + # Add hosts and switches + h1 = self.addHost( 'h1' ) + h2 = self.addHost( 'h2' ) + h3 = self.addHost( 'h3' ) + h4 = self.addHost( 'h4' ) + h5 = self.addHost( 'h5' ) + + s1 = self.addSwitch( 's1' ) + s2 = self.addSwitch( 's2' ) + + # Add links + self.addLink( h1, s1 ) + self.addLink( h2, s1 ) + + self.addLink( h3, s2, delay='50ms') + self.addLink( h4, s2, delay='100ms' ) + self.addLink( h5, s2, delay='150ms' ) + + self.addLink( s1, s2 ) + + +class Traffic_LB( Topo ): + def __init__(self): + # Initialize topology + Topo.__init__( self ) + + # Add hosts and switches + h1 = self.addHost( 'h1' ) + h2 = self.addHost( 'h2' ) + + s1 = self.addSwitch( 's1' ) + s2 = self.addSwitch( 's2' ) + s3 = self.addSwitch( 's3' ) + s4 = self.addSwitch( 's4' ) + s5 = self.addSwitch( 's5' ) + + # Add links + self.addLink( h1, s1 ) + self.addLink( h2, s5 ) + self.addLink( s1, s2 ) + self.addLink( s1, s3 ) + self.addLink( s1, s4 ) + self.addLink( s2, s5 ) + self.addLink( s3, s5 ) + self.addLink( s4, s5 ) + + +class Ratelimit( Topo ): + def __init__(self): + # Initialize topology + Topo.__init__( self ) + + # Add hosts and switches + h1 = self.addHost( 'h1' ) + h2 = self.addHost( 'h2' ) + + s1 = self.addSwitch( 's1' ) + s2 = self.addSwitch( 's2' ) + s3 = self.addSwitch( 's3' ) + s4 = self.addSwitch( 's4' ) + s5 = self.addSwitch( 's5' ) + + # Add links + self.addLink( h1, s1 ) + self.addLink( h2, s5 ) + self.addLink( s1, s2 ) + self.addLink( s1, s3, delay='50ms') + self.addLink( s1, s4, delay='200ms') + self.addLink( s2, s5 ) + self.addLink( s3, s5 ) + self.addLink( s4, s5 ) + +##### Topologies ##### +topos = { + 'linear': ( lambda: Linear(3,2) ), \ + 'server_lb' : ( lambda: Server_LB() ), \ + 'ratelimit' : ( lambda: Ratelimit() ), \ + 'traffic_lb' : ( lambda: Traffic_LB() ), \ + } diff --git a/pyretic/kinetic/other_ctrl_apps/Firewall.java b/pyretic/kinetic/other_ctrl_apps/Firewall.java new file mode 100644 index 00000000..b5f197ae --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/Firewall.java @@ -0,0 +1,562 @@ +public class Firewall implements IFirewallService, IOFMessageListener, + IFloodlightModule { + + // service modules needed + protected IFloodlightProviderService floodlightProvider; + protected IStorageSourceService storageSource; + protected IRestApiService restApi; + protected static Logger logger; + + protected List rules; // protected by synchronized + protected boolean enabled; + protected int subnet_mask = IPv4.toIPv4Address("255.255.255.0"); + + // constant strings for storage/parsing + public static final String TABLE_NAME = "controller_firewallrules"; + public static final String COLUMN_RULEID = "ruleid"; + public static final String COLUMN_DPID = "dpid"; + public static final String COLUMN_IN_PORT = "in_port"; + public static final String COLUMN_DL_SRC = "dl_src"; + public static final String COLUMN_DL_DST = "dl_dst"; + public static final String COLUMN_DL_TYPE = "dl_type"; + public static final String COLUMN_NW_SRC_PREFIX = "nw_src_prefix"; + public static final String COLUMN_NW_SRC_MASKBITS = "nw_src_maskbits"; + public static final String COLUMN_NW_DST_PREFIX = "nw_dst_prefix"; + public static final String COLUMN_NW_DST_MASKBITS = "nw_dst_maskbits"; + public static final String COLUMN_NW_PROTO = "nw_proto"; + public static final String COLUMN_TP_SRC = "tp_src"; + public static final String COLUMN_TP_DST = "tp_dst"; + public static final String COLUMN_WILDCARD_DPID = "wildcard_dpid"; + public static final String COLUMN_WILDCARD_IN_PORT = "wildcard_in_port"; + public static final String COLUMN_WILDCARD_DL_SRC = "wildcard_dl_src"; + public static final String COLUMN_WILDCARD_DL_DST = "wildcard_dl_dst"; + public static final String COLUMN_WILDCARD_DL_TYPE = "wildcard_dl_type"; + public static final String COLUMN_WILDCARD_NW_SRC = "wildcard_nw_src"; + public static final String COLUMN_WILDCARD_NW_DST = "wildcard_nw_dst"; + public static final String COLUMN_WILDCARD_NW_PROTO = "wildcard_nw_proto"; + public static final String COLUMN_WILDCARD_TP_SRC = "wildcard_tp_src"; + public static final String COLUMN_WILDCARD_TP_DST = "wildcard_tp_dst"; + public static final String COLUMN_PRIORITY = "priority"; + public static final String COLUMN_ACTION = "action"; + public static String ColumnNames[] = { COLUMN_RULEID, COLUMN_DPID, + COLUMN_IN_PORT, COLUMN_DL_SRC, COLUMN_DL_DST, COLUMN_DL_TYPE, + COLUMN_NW_SRC_PREFIX, COLUMN_NW_SRC_MASKBITS, COLUMN_NW_DST_PREFIX, + COLUMN_NW_DST_MASKBITS, COLUMN_NW_PROTO, COLUMN_TP_SRC, + COLUMN_TP_DST, COLUMN_WILDCARD_DPID, COLUMN_WILDCARD_IN_PORT, + COLUMN_WILDCARD_DL_SRC, COLUMN_WILDCARD_DL_DST, + COLUMN_WILDCARD_DL_TYPE, COLUMN_WILDCARD_NW_SRC, + COLUMN_WILDCARD_NW_DST, COLUMN_WILDCARD_NW_PROTO, COLUMN_PRIORITY, + COLUMN_ACTION }; + + @Override + public String getName() { + return "firewall"; + } + + @Override + public boolean isCallbackOrderingPrereq(OFType type, String name) { + // no prereq + return false; + } + + @Override + public boolean isCallbackOrderingPostreq(OFType type, String name) { + return (type.equals(OFType.PACKET_IN) && name.equals("forwarding")); + } + + @Override + public Collection> getModuleServices() { + Collection> l = new ArrayList>(); + l.add(IFirewallService.class); + return l; + } + + @Override + public Map, IFloodlightService> getServiceImpls() { + Map, IFloodlightService> m = new HashMap, IFloodlightService>(); + // We are the class that implements the service + m.put(IFirewallService.class, this); + return m; + } + + @Override + public Collection> getModuleDependencies() { + Collection> l = new ArrayList>(); + l.add(IFloodlightProviderService.class); + l.add(IStorageSourceService.class); + l.add(IRestApiService.class); + return l; + } + + + protected ArrayList readRulesFromStorage() { + ArrayList l = new ArrayList(); + + try { + Map row; + + // (..., null, null) for no predicate, no ordering + IResultSet resultSet = storageSource.executeQuery(TABLE_NAME, + ColumnNames, null, null); + + // put retrieved rows into FirewallRules + for (Iterator it = resultSet.iterator(); it.hasNext();) { + row = it.next().getRow(); + // now, parse row + FirewallRule r = new FirewallRule(); + if (!row.containsKey(COLUMN_RULEID) + || !row.containsKey(COLUMN_DPID)) { + logger.error( + "skipping entry with missing required 'ruleid' or 'switchid' entry: {}", + row); + return l; + } + try { + r.ruleid = Integer + .parseInt((String) row.get(COLUMN_RULEID)); + r.dpid = Long.parseLong((String) row.get(COLUMN_DPID)); + + for (String key : row.keySet()) { + if (row.get(key) == null) + continue; + if (key.equals(COLUMN_RULEID) + || key.equals(COLUMN_DPID) + || key.equals("id")) { + continue; // already handled + } + + else if (key.equals(COLUMN_IN_PORT)) { + r.in_port = Short.parseShort((String) row + .get(COLUMN_IN_PORT)); + } + + else if (key.equals(COLUMN_DL_SRC)) { + r.dl_src = Long.parseLong((String) row + .get(COLUMN_DL_SRC)); + } + + else if (key.equals(COLUMN_DL_DST)) { + r.dl_dst = Long.parseLong((String) row + .get(COLUMN_DL_DST)); + } + + else if (key.equals(COLUMN_DL_TYPE)) { + r.dl_type = Short.parseShort((String) row + .get(COLUMN_DL_TYPE)); + } + + else if (key.equals(COLUMN_NW_SRC_PREFIX)) { + r.nw_src_prefix = Integer.parseInt((String) row + .get(COLUMN_NW_SRC_PREFIX)); + } + + else if (key.equals(COLUMN_NW_SRC_MASKBITS)) { + r.nw_src_maskbits = Integer.parseInt((String) row + .get(COLUMN_NW_SRC_MASKBITS)); + } + + else if (key.equals(COLUMN_NW_DST_PREFIX)) { + r.nw_dst_prefix = Integer.parseInt((String) row + .get(COLUMN_NW_DST_PREFIX)); + } + + else if (key.equals(COLUMN_NW_DST_MASKBITS)) { + r.nw_dst_maskbits = Integer.parseInt((String) row + .get(COLUMN_NW_DST_MASKBITS)); + } + + else if (key.equals(COLUMN_NW_PROTO)) { + r.nw_proto = Short.parseShort((String) row + .get(COLUMN_NW_PROTO)); + } + + else if (key.equals(COLUMN_TP_SRC)) { + r.tp_src = Short.parseShort((String) row + .get(COLUMN_TP_SRC)); + } + + else if (key.equals(COLUMN_TP_DST)) { + r.tp_dst = Short.parseShort((String) row + .get(COLUMN_TP_DST)); + } + + else if (key.equals(COLUMN_WILDCARD_DPID)) { + r.wildcard_dpid = Boolean.parseBoolean((String) row + .get(COLUMN_WILDCARD_DPID)); + } + + else if (key.equals(COLUMN_WILDCARD_IN_PORT)) { + r.wildcard_in_port = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_IN_PORT)); + } + + else if (key.equals(COLUMN_WILDCARD_DL_SRC)) { + r.wildcard_dl_src = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_DL_SRC)); + } + + else if (key.equals(COLUMN_WILDCARD_DL_DST)) { + r.wildcard_dl_dst = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_DL_DST)); + } + + else if (key.equals(COLUMN_WILDCARD_DL_TYPE)) { + r.wildcard_dl_type = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_DL_TYPE)); + } + + else if (key.equals(COLUMN_WILDCARD_NW_SRC)) { + r.wildcard_nw_src = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_NW_SRC)); + } + + else if (key.equals(COLUMN_WILDCARD_NW_DST)) { + r.wildcard_nw_dst = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_NW_DST)); + } + + else if (key.equals(COLUMN_WILDCARD_NW_PROTO)) { + r.wildcard_nw_proto = Boolean + .parseBoolean((String) row + .get(COLUMN_WILDCARD_NW_PROTO)); + } + + else if (key.equals(COLUMN_PRIORITY)) { + r.priority = Integer.parseInt((String) row + .get(COLUMN_PRIORITY)); + } + + else if (key.equals(COLUMN_ACTION)) { + int tmp = Integer.parseInt((String) row.get(COLUMN_ACTION)); + if (tmp == FirewallRule.FirewallAction.DENY.ordinal()) + r.action = FirewallRule.FirewallAction.DENY; + else if (tmp == FirewallRule.FirewallAction.ALLOW.ordinal()) + r.action = FirewallRule.FirewallAction.ALLOW; + else { + r.action = null; + logger.error("action not recognized"); + } + } + } + } catch (ClassCastException e) { + logger.error( + "skipping rule {} with bad data : " + + e.getMessage(), r.ruleid); + } + if (r.action != null) + l.add(r); + } + } catch (StorageException e) { + logger.error("failed to access storage: {}", e.getMessage()); + // if the table doesn't exist, then wait to populate later via + // setStorageSource() + } + + // now, sort the list based on priorities + Collections.sort(l); + + return l; + } + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + floodlightProvider = context + .getServiceImpl(IFloodlightProviderService.class); + storageSource = context.getServiceImpl(IStorageSourceService.class); + restApi = context.getServiceImpl(IRestApiService.class); + rules = new ArrayList(); + logger = LoggerFactory.getLogger(Firewall.class); + + // start disabled + enabled = false; + } + + @Override + public void startUp(FloodlightModuleContext context) { + // register REST interface + restApi.addRestletRoutable(new FirewallWebRoutable()); + + // always place firewall in pipeline at bootup + floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this); + + // storage, create table and read rules + storageSource.createTable(TABLE_NAME, null); + storageSource.setTablePrimaryKeyName(TABLE_NAME, COLUMN_RULEID); + this.rules = readRulesFromStorage(); + } + + @Override + public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) { + if (!this.enabled) + return Command.CONTINUE; + + switch (msg.getType()) { + case PACKET_IN: + IRoutingDecision decision = null; + if (cntx != null) { + decision = IRoutingDecision.rtStore.get(cntx, + IRoutingDecision.CONTEXT_DECISION); + + return this.processPacketInMessage(sw, (OFPacketIn) msg, + decision, cntx); + } + break; + default: + break; + } + + return Command.CONTINUE; + } + + @Override + public void enableFirewall(boolean enabled) { + logger.info("Setting firewall to {}", enabled); + this.enabled = enabled; + } + + @Override + public List getRules() { + return this.rules; + } + + // Only used to serve REST GET + // Similar to readRulesFromStorage(), which actually checks and stores + // record into FirewallRule list + @Override + public List> getStorageRules() { + ArrayList> l = new ArrayList>(); + try { + // null1=no predicate, null2=no ordering + IResultSet resultSet = storageSource.executeQuery(TABLE_NAME, + ColumnNames, null, null); + for (Iterator it = resultSet.iterator(); it.hasNext();) { + l.add(it.next().getRow()); + } + } catch (StorageException e) { + logger.error("failed to access storage: {}", e.getMessage()); + // if the table doesn't exist, then wait to populate later via + // setStorageSource() + } + return l; + } + + @Override + public String getSubnetMask() { + return IPv4.fromIPv4Address(this.subnet_mask); + } + + @Override + public void setSubnetMask(String newMask) { + if (newMask.trim().isEmpty()) + return; + this.subnet_mask = IPv4.toIPv4Address(newMask.trim()); + } + + @Override + public synchronized void addRule(FirewallRule rule) { + + // generate random ruleid for each newly created rule + // may want to return to caller if useful + // may want to check conflict + rule.ruleid = rule.genID(); + + int i = 0; + // locate the position of the new rule in the sorted arraylist + for (i = 0; i < this.rules.size(); i++) { + if (this.rules.get(i).priority >= rule.priority) + break; + } + // now, add rule to the list + if (i <= this.rules.size()) { + this.rules.add(i, rule); + } else { + this.rules.add(rule); + } + // add rule to database + Map entry = new HashMap(); + entry.put(COLUMN_RULEID, Integer.toString(rule.ruleid)); + entry.put(COLUMN_DPID, Long.toString(rule.dpid)); + entry.put(COLUMN_IN_PORT, Short.toString(rule.in_port)); + entry.put(COLUMN_DL_SRC, Long.toString(rule.dl_src)); + entry.put(COLUMN_DL_DST, Long.toString(rule.dl_dst)); + entry.put(COLUMN_DL_TYPE, Short.toString(rule.dl_type)); + entry.put(COLUMN_NW_SRC_PREFIX, Integer.toString(rule.nw_src_prefix)); + entry.put(COLUMN_NW_SRC_MASKBITS, Integer.toString(rule.nw_src_maskbits)); + entry.put(COLUMN_NW_DST_PREFIX, Integer.toString(rule.nw_dst_prefix)); + entry.put(COLUMN_NW_DST_MASKBITS, Integer.toString(rule.nw_dst_maskbits)); + entry.put(COLUMN_NW_PROTO, Short.toString(rule.nw_proto)); + entry.put(COLUMN_TP_SRC, Integer.toString(rule.tp_src)); + entry.put(COLUMN_TP_DST, Integer.toString(rule.tp_dst)); + entry.put(COLUMN_WILDCARD_DPID, + Boolean.toString(rule.wildcard_dpid)); + entry.put(COLUMN_WILDCARD_IN_PORT, + Boolean.toString(rule.wildcard_in_port)); + entry.put(COLUMN_WILDCARD_DL_SRC, + Boolean.toString(rule.wildcard_dl_src)); + entry.put(COLUMN_WILDCARD_DL_DST, + Boolean.toString(rule.wildcard_dl_dst)); + entry.put(COLUMN_WILDCARD_DL_TYPE, + Boolean.toString(rule.wildcard_dl_type)); + entry.put(COLUMN_WILDCARD_NW_SRC, + Boolean.toString(rule.wildcard_nw_src)); + entry.put(COLUMN_WILDCARD_NW_DST, + Boolean.toString(rule.wildcard_nw_dst)); + entry.put(COLUMN_WILDCARD_NW_PROTO, + Boolean.toString(rule.wildcard_nw_proto)); + entry.put(COLUMN_WILDCARD_TP_SRC, + Boolean.toString(rule.wildcard_tp_src)); + entry.put(COLUMN_WILDCARD_TP_DST, + Boolean.toString(rule.wildcard_tp_dst)); + entry.put(COLUMN_PRIORITY, Integer.toString(rule.priority)); + entry.put(COLUMN_ACTION, Integer.toString(rule.action.ordinal())); + storageSource.insertRow(TABLE_NAME, entry); + } + + @Override + public synchronized void deleteRule(int ruleid) { + Iterator iter = this.rules.iterator(); + while (iter.hasNext()) { + FirewallRule r = iter.next(); + if (r.ruleid == ruleid) { + // found the rule, now remove it + iter.remove(); + break; + } + } + // delete from database + storageSource.deleteRow(TABLE_NAME, Integer.toString(ruleid)); + } + + protected RuleWildcardsPair matchWithRule(IOFSwitch sw, OFPacketIn pi, + FloodlightContext cntx) { + FirewallRule matched_rule = null; + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + WildcardsPair wildcards = new WildcardsPair(); + + synchronized (rules) { + Iterator iter = this.rules.iterator(); + FirewallRule rule = null; + // iterate through list to find a matching firewall rule + while (iter.hasNext()) { + // get next rule from list + rule = iter.next(); + + // check if rule matches + if (rule.matchesFlow(sw.getId(), pi.getInPort(), eth, wildcards) == true) { + matched_rule = rule; + break; + } + } + } + + // make a pair of rule and wildcards, then return it + RuleWildcardsPair ret = new RuleWildcardsPair(); + ret.rule = matched_rule; + if (matched_rule == null || matched_rule.action == FirewallRule.FirewallAction.DENY) { + ret.wildcards = wildcards.drop; + } else { + ret.wildcards = wildcards.allow; + } + return ret; + } + + protected boolean IPIsBroadcast(int IPAddress) { + // inverted subnet mask + int inv_subnet_mask = ~this.subnet_mask; + return ((IPAddress & inv_subnet_mask) == inv_subnet_mask); + } + + public Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi, + IRoutingDecision decision, FloodlightContext cntx) { + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + + // Allowing L2 broadcast + ARP broadcast request (also deny malformed + // broadcasts -> L2 broadcast + L3 unicast) + if (eth.isBroadcast() == true) { + boolean allowBroadcast = true; + // the case to determine if we have L2 broadcast + L3 unicast + // don't allow this broadcast packet if such is the case (malformed + // packet) + if ((eth.getPayload() instanceof IPv4) + && this.IPIsBroadcast(((IPv4) eth.getPayload()) + .getDestinationAddress()) == false) { + allowBroadcast = false; + } + if (allowBroadcast == true) { + if (logger.isTraceEnabled()) + logger.trace("Allowing broadcast traffic for PacketIn={}", + pi); + + decision = new RoutingDecision(sw.getId(), pi.getInPort() + , IDeviceService.fcStore. + get(cntx, IDeviceService.CONTEXT_SRC_DEVICE), + IRoutingDecision.RoutingAction.MULTICAST); + decision.addToContext(cntx); + } else { + if (logger.isTraceEnabled()) + logger.trace( + "Blocking malformed broadcast traffic for PacketIn={}", + pi); + + decision = new RoutingDecision(sw.getId(), pi.getInPort() + , IDeviceService.fcStore. + get(cntx, IDeviceService.CONTEXT_SRC_DEVICE), + IRoutingDecision.RoutingAction.DROP); + decision.addToContext(cntx); + } + return Command.CONTINUE; + } + // check if we have a matching rule for this packet/flow + // and no decision is taken yet + if (decision == null) { + RuleWildcardsPair match_ret = this.matchWithRule(sw, pi, cntx); + FirewallRule rule = match_ret.rule; + + if (rule == null || rule.action == FirewallRule.FirewallAction.DENY) { + decision = new RoutingDecision(sw.getId(), pi.getInPort() + , IDeviceService.fcStore. + get(cntx, IDeviceService.CONTEXT_SRC_DEVICE), + IRoutingDecision.RoutingAction.DROP); + decision.setWildcards(match_ret.wildcards); + decision.addToContext(cntx); + if (logger.isTraceEnabled()) { + if (rule == null) + logger.trace( + "No firewall rule found for PacketIn={}, blocking flow", + pi); + else if (rule.action == FirewallRule.FirewallAction.DENY) { + logger.trace("Deny rule={} match for PacketIn={}", + rule, pi); + } + } + } else { + decision = new RoutingDecision(sw.getId(), pi.getInPort() + , IDeviceService.fcStore. + get(cntx, IDeviceService.CONTEXT_SRC_DEVICE), + IRoutingDecision.RoutingAction.FORWARD_OR_FLOOD); + decision.setWildcards(match_ret.wildcards); + decision.addToContext(cntx); + if (logger.isTraceEnabled()) + logger.trace("Allow rule={} match for PacketIn={}", rule, + pi); + } + } + + return Command.CONTINUE; + } + + @Override + public boolean isEnabled() { + return enabled; + } + +} diff --git a/pyretic/kinetic/other_ctrl_apps/LearningSwitch.java b/pyretic/kinetic/other_ctrl_apps/LearningSwitch.java new file mode 100644 index 00000000..afbfe68e --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/LearningSwitch.java @@ -0,0 +1,377 @@ +public class LearningSwitch + implements IFloodlightModule, ILearningSwitchService, IOFMessageListener { + protected static Logger log = LoggerFactory.getLogger(LearningSwitch.class); + protected IFloodlightProviderService floodlightProvider; + protected ICounterStoreService counterStore; + protected IRestApiService restApi; + protected Map> macVlanToSwitchPortMap; + public static final int LEARNING_SWITCH_APP_ID = 1; + public static final int APP_ID_BITS = 12; + public static final int APP_ID_SHIFT = (64 - APP_ID_BITS); + public static final long LEARNING_SWITCH_COOKIE = (long) (LEARNING_SWITCH_APP_ID & ((1 << APP_ID_BITS) - 1)) << APP_ID_SHIFT; + protected static short FLOWMOD_DEFAULT_IDLE_TIMEOUT = 5; // in seconds + protected static short FLOWMOD_DEFAULT_HARD_TIMEOUT = 0; // infinite + protected static short FLOWMOD_PRIORITY = 100; + protected static final int MAX_MACS_PER_SWITCH = 1000; + public void setFloodlightProvider(IFloodlightProviderService floodlightProvider) { + this.floodlightProvider = floodlightProvider; + } + @Override + public String getName() { + return "learningswitch"; + } + protected void addToPortMap(IOFSwitch sw, long mac, short vlan, short portVal) { + Map swMap = macVlanToSwitchPortMap.get(sw); + if (vlan == (short) 0xffff) { + vlan = 0; + } + + if (swMap == null) { + swMap = Collections.synchronizedMap(new LRULinkedHashMap(MAX_MACS_PER_SWITCH)); + macVlanToSwitchPortMap.put(sw, swMap); + } + swMap.put(new MacVlanPair(mac, vlan), portVal); + } + + protected void removeFromPortMap(IOFSwitch sw, long mac, short vlan) { + if (vlan == (short) 0xffff) { + vlan = 0; + } + Map swMap = macVlanToSwitchPortMap.get(sw); + if (swMap != null) + swMap.remove(new MacVlanPair(mac, vlan)); + } + + public Short getFromPortMap(IOFSwitch sw, long mac, short vlan) { + if (vlan == (short) 0xffff) { + vlan = 0; + } + Map swMap = macVlanToSwitchPortMap.get(sw); + if (swMap != null) + return swMap.get(new MacVlanPair(mac, vlan)); + + return null; + } + + public void clearLearnedTable() { + macVlanToSwitchPortMap.clear(); + } + + public void clearLearnedTable(IOFSwitch sw) { + Map swMap = macVlanToSwitchPortMap.get(sw); + if (swMap != null) + swMap.clear(); + } + + @Override + public synchronized Map> getTable() { + return macVlanToSwitchPortMap; + } + + private void writeFlowMod(IOFSwitch sw, short command, int bufferId, + OFMatch match, short outPort) { + OFFlowMod flowMod = (OFFlowMod) floodlightProvider.getOFMessageFactory().getMessage(OFType.FLOW_MOD); + flowMod.setMatch(match); + flowMod.setCookie(LearningSwitch.LEARNING_SWITCH_COOKIE); + flowMod.setCommand(command); + flowMod.setIdleTimeout(LearningSwitch.FLOWMOD_DEFAULT_IDLE_TIMEOUT); + flowMod.setHardTimeout(LearningSwitch.FLOWMOD_DEFAULT_HARD_TIMEOUT); + flowMod.setPriority(LearningSwitch.FLOWMOD_PRIORITY); + flowMod.setBufferId(bufferId); + flowMod.setOutPort((command == OFFlowMod.OFPFC_DELETE) ? outPort : OFPort.OFPP_NONE.getValue()); + flowMod.setFlags((command == OFFlowMod.OFPFC_DELETE) ? 0 : (short) (1 << 0)); // OFPFF_SEND_FLOW_REM + + flowMod.setActions(Arrays.asList((OFAction) new OFActionOutput(outPort, (short) 0xffff))); + flowMod.setLength((short) (OFFlowMod.MINIMUM_LENGTH + OFActionOutput.MINIMUM_LENGTH)); + + if (log.isTraceEnabled()) { + log.trace("{} {} flow mod {}", + new Object[]{ sw, (command == OFFlowMod.OFPFC_DELETE) ? "deleting" : "adding", flowMod }); + } + + counterStore.updatePktOutFMCounterStoreLocal(sw, flowMod); + + try { + sw.write(flowMod, null); + } catch (IOException e) { + log.error("Failed to write {} to switch {}", new Object[]{ flowMod, sw }, e); + } + } + + private void pushPacket(IOFSwitch sw, OFMatch match, OFPacketIn pi, short outport) { + if (pi == null) { + return; + } + + if (pi.getInPort() == outport) { + if (log.isDebugEnabled()) { + log.debug("Attempting to do packet-out to the same " + + "interface as packet-in. Dropping packet. " + + " SrcSwitch={}, match = {}, pi={}", + new Object[]{sw, match, pi}); + return; + } + } + + if (log.isTraceEnabled()) { + log.trace("PacketOut srcSwitch={} match={} pi={}", + new Object[] {sw, match, pi}); + } + + OFPacketOut po = + (OFPacketOut) floodlightProvider.getOFMessageFactory() + .getMessage(OFType.PACKET_OUT); + + List actions = new ArrayList(); + actions.add(new OFActionOutput(outport, (short) 0xffff)); + + po.setActions(actions) + .setActionsLength((short) OFActionOutput.MINIMUM_LENGTH); + short poLength = + (short) (po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH); + + if (sw.getBuffers() == 0) { + pi.setBufferId(OFPacketOut.BUFFER_ID_NONE); + po.setBufferId(OFPacketOut.BUFFER_ID_NONE); + } else { + po.setBufferId(pi.getBufferId()); + } + + po.setInPort(pi.getInPort()); + + if (pi.getBufferId() == OFPacketOut.BUFFER_ID_NONE) { + byte[] packetData = pi.getPacketData(); + poLength += packetData.length; + po.setPacketData(packetData); + } + + po.setLength(poLength); + + try { + counterStore.updatePktOutFMCounterStoreLocal(sw, po); + sw.write(po, null); + } catch (IOException e) { + log.error("Failure writing packet out", e); + } + } + + + private void writePacketOutForPacketIn(IOFSwitch sw, + OFPacketIn packetInMessage, + short egressPort) { + OFPacketOut packetOutMessage = (OFPacketOut) floodlightProvider.getOFMessageFactory().getMessage(OFType.PACKET_OUT); + short packetOutLength = (short)OFPacketOut.MINIMUM_LENGTH; // starting length + + packetOutMessage.setBufferId(packetInMessage.getBufferId()); + packetOutMessage.setInPort(packetInMessage.getInPort()); + packetOutMessage.setActionsLength((short)OFActionOutput.MINIMUM_LENGTH); + packetOutLength += OFActionOutput.MINIMUM_LENGTH; + + List actions = new ArrayList(1); + actions.add(new OFActionOutput(egressPort, (short) 0)); + packetOutMessage.setActions(actions); + + if (packetInMessage.getBufferId() == OFPacketOut.BUFFER_ID_NONE) { + byte[] packetData = packetInMessage.getPacketData(); + packetOutMessage.setPacketData(packetData); + packetOutLength += (short)packetData.length; + } + + packetOutMessage.setLength(packetOutLength); + + try { + counterStore.updatePktOutFMCounterStoreLocal(sw, packetOutMessage); + sw.write(packetOutMessage, null); + } catch (IOException e) { + log.error("Failed to write {} to switch {}: {}", new Object[]{ packetOutMessage, sw, e }); + } + } + private Command processPacketInMessage(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) { + OFMatch match = new OFMatch(); + match.loadFromPacket(pi.getPacketData(), pi.getInPort()); + Long sourceMac = Ethernet.toLong(match.getDataLayerSource()); + Long destMac = Ethernet.toLong(match.getDataLayerDestination()); + Short vlan = match.getDataLayerVirtualLan(); + if ((destMac & 0xfffffffffff0L) == 0x0180c2000000L) { + if (log.isTraceEnabled()) { + log.trace("ignoring packet addressed to 802.1D/Q reserved addr: switch {} vlan {} dest MAC {}", + new Object[]{ sw, vlan, HexString.toHexString(destMac) }); + } + return Command.STOP; + } + if ((sourceMac & 0x010000000000L) == 0) { + this.addToPortMap(sw, sourceMac, vlan, pi.getInPort()); + } + + Short outPort = getFromPortMap(sw, destMac, vlan); + if (outPort == null) { + this.writePacketOutForPacketIn(sw, pi, OFPort.OFPP_FLOOD.getValue()); + } else if (outPort == match.getInputPort()) { + log.trace("ignoring packet that arrived on same port as learned destination:" + + " switch {} vlan {} dest MAC {} port {}", + new Object[]{ sw, vlan, HexString.toHexString(destMac), outPort }); + } else { + match.setWildcards(((Integer)sw.getAttribute(IOFSwitch.PROP_FASTWILDCARDS)).intValue() + & ~OFMatch.OFPFW_IN_PORT + & ~OFMatch.OFPFW_DL_VLAN & ~OFMatch.OFPFW_DL_SRC & ~OFMatch.OFPFW_DL_DST + & ~OFMatch.OFPFW_NW_SRC_MASK & ~OFMatch.OFPFW_NW_DST_MASK); + this.pushPacket(sw, match, pi, outPort); + this.writeFlowMod(sw, OFFlowMod.OFPFC_ADD, OFPacketOut.BUFFER_ID_NONE, match, outPort); + if (LEARNING_SWITCH_REVERSE_FLOW) { + this.writeFlowMod(sw, OFFlowMod.OFPFC_ADD, -1, match.clone() + .setDataLayerSource(match.getDataLayerDestination()) + .setDataLayerDestination(match.getDataLayerSource()) + .setNetworkSource(match.getNetworkDestination()) + .setNetworkDestination(match.getNetworkSource()) + .setTransportSource(match.getTransportDestination()) + .setTransportDestination(match.getTransportSource()) + .setInputPort(outPort), + match.getInputPort()); + } + } + return Command.CONTINUE; + } + + private Command processFlowRemovedMessage(IOFSwitch sw, OFFlowRemoved flowRemovedMessage) { + if (flowRemovedMessage.getCookie() != LearningSwitch.LEARNING_SWITCH_COOKIE) { + return Command.CONTINUE; + } + if (log.isTraceEnabled()) { + log.trace("{} flow entry removed {}", sw, flowRemovedMessage); + } + OFMatch match = flowRemovedMessage.getMatch(); + this.removeFromPortMap(sw, Ethernet.toLong(match.getDataLayerSource()), + match.getDataLayerVirtualLan()); + + this.writeFlowMod(sw, OFFlowMod.OFPFC_DELETE, -1, match.clone() + .setWildcards(((Integer)sw.getAttribute(IOFSwitch.PROP_FASTWILDCARDS)).intValue() + & ~OFMatch.OFPFW_DL_VLAN & ~OFMatch.OFPFW_DL_SRC & ~OFMatch.OFPFW_DL_DST + & ~OFMatch.OFPFW_NW_SRC_MASK & ~OFMatch.OFPFW_NW_DST_MASK) + .setDataLayerSource(match.getDataLayerDestination()) + .setDataLayerDestination(match.getDataLayerSource()) + .setNetworkSource(match.getNetworkDestination()) + .setNetworkDestination(match.getNetworkSource()) + .setTransportSource(match.getTransportDestination()) + .setTransportDestination(match.getTransportSource()), + match.getInputPort()); + return Command.CONTINUE; + } + + + @Override + public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) { + switch (msg.getType()) { + case PACKET_IN: + return this.processPacketInMessage(sw, (OFPacketIn) msg, cntx); + case FLOW_REMOVED: + return this.processFlowRemovedMessage(sw, (OFFlowRemoved) msg); + case ERROR: + log.info("received an error {} from switch {}", msg, sw); + return Command.CONTINUE; + default: + break; + } + log.error("received an unexpected message {} from switch {}", msg, sw); + return Command.CONTINUE; + } + + @Override + public boolean isCallbackOrderingPrereq(OFType type, String name) { + return false; + } + + @Override + public boolean isCallbackOrderingPostreq(OFType type, String name) { + return false; + } + + + @Override + public Collection> getModuleServices() { + Collection> l = + new ArrayList>(); + l.add(ILearningSwitchService.class); + return l; + } + + @Override + public Map, IFloodlightService> + getServiceImpls() { + Map, + IFloodlightService> m = + new HashMap, + IFloodlightService>(); + m.put(ILearningSwitchService.class, this); + return m; + } + + @Override + public Collection> + getModuleDependencies() { + Collection> l = + new ArrayList>(); + l.add(IFloodlightProviderService.class); + l.add(ICounterStoreService.class); + l.add(IRestApiService.class); + return l; + } + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + macVlanToSwitchPortMap = + new ConcurrentHashMap>(); + floodlightProvider = + context.getServiceImpl(IFloodlightProviderService.class); + counterStore = + context.getServiceImpl(ICounterStoreService.class); + restApi = + context.getServiceImpl(IRestApiService.class); + } + + @Override + public void startUp(FloodlightModuleContext context) { + floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this); + floodlightProvider.addOFMessageListener(OFType.FLOW_REMOVED, this); + floodlightProvider.addOFMessageListener(OFType.ERROR, this); + restApi.addRestletRoutable(new LearningSwitchWebRoutable()); + + Map configOptions = context.getConfigParams(this); + try { + String idleTimeout = configOptions.get("idletimeout"); + if (idleTimeout != null) { + FLOWMOD_DEFAULT_IDLE_TIMEOUT = Short.parseShort(idleTimeout); + } + } catch (NumberFormatException e) { + log.warn("Error parsing flow idle timeout, " + + "using default of {} seconds", + FLOWMOD_DEFAULT_IDLE_TIMEOUT); + } + try { + String hardTimeout = configOptions.get("hardtimeout"); + if (hardTimeout != null) { + FLOWMOD_DEFAULT_HARD_TIMEOUT = Short.parseShort(hardTimeout); + } + } catch (NumberFormatException e) { + log.warn("Error parsing flow hard timeout, " + + "using default of {} seconds", + FLOWMOD_DEFAULT_HARD_TIMEOUT); + } + try { + String priority = configOptions.get("priority"); + if (priority != null) { + FLOWMOD_PRIORITY = Short.parseShort(priority); + } + } catch (NumberFormatException e) { + log.warn("Error parsing flow priority, " + + "using default of {}", + FLOWMOD_PRIORITY); + } + log.debug("FlowMod idle timeout set to {} seconds", + FLOWMOD_DEFAULT_IDLE_TIMEOUT); + log.debug("FlowMod hard timeout set to {} seconds", + FLOWMOD_DEFAULT_HARD_TIMEOUT); + log.debug("FlowMod priority set to {}", + FLOWMOD_PRIORITY); + } +} diff --git a/pyretic/kinetic/other_ctrl_apps/LoadBalancer.java b/pyretic/kinetic/other_ctrl_apps/LoadBalancer.java new file mode 100644 index 00000000..f287ba18 --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/LoadBalancer.java @@ -0,0 +1,1196 @@ +public class LoadBalancer implements IFloodlightModule, + ILoadBalancerService, IOFMessageListener { + + protected static Logger log = LoggerFactory.getLogger(LoadBalancer.class); + + // Our dependencies + protected IFloodlightProviderService floodlightProvider; + protected IRestApiService restApi; + + protected ICounterStoreService counterStore; + protected OFMessageDamper messageDamper; + protected IDeviceService deviceManager; + protected IRoutingService routingEngine; + protected ITopologyService topology; + protected IStaticFlowEntryPusherService sfp; + + protected HashMap vips; + protected HashMap pools; + protected HashMap members; + protected HashMap vipIpToId; + protected HashMap vipIpToMac; + protected HashMap memberIpToId; + protected HashMap clientToMember; + + //Copied from Forwarding with message damper routine for pushing proxy Arp + protected static int OFMESSAGE_DAMPER_CAPACITY = 10000; // ms. + protected static int OFMESSAGE_DAMPER_TIMEOUT = 250; // ms + protected static String LB_ETHER_TYPE = "0x800"; + protected static int LB_PRIORITY = 32768; + + // Comparator for sorting by SwitchCluster + public Comparator clusterIdComparator = + new Comparator() { + @Override + public int compare(SwitchPort d1, SwitchPort d2) { + Long d1ClusterId = + topology.getL2DomainId(d1.getSwitchDPID()); + Long d2ClusterId = + topology.getL2DomainId(d2.getSwitchDPID()); + return d1ClusterId.compareTo(d2ClusterId); + } + }; + + // data structure for storing connected + public class IPClient { + int ipAddress; + byte nw_proto; + short srcPort; // tcp/udp src port. icmp type (OFMatch convention) + short targetPort; // tcp/udp dst port, icmp code (OFMatch convention) + + public IPClient() { + ipAddress = 0; + nw_proto = 0; + srcPort = -1; + targetPort = -1; + } + } + + @Override + public String getName() { + return "loadbalancer"; + } + + @Override + public boolean isCallbackOrderingPrereq(OFType type, String name) { + return (type.equals(OFType.PACKET_IN) && + (name.equals("topology") || + name.equals("devicemanager") || + name.equals("virtualizer"))); + } + + @Override + public boolean isCallbackOrderingPostreq(OFType type, String name) { + return (type.equals(OFType.PACKET_IN) && name.equals("forwarding")); + } + + @Override + public net.floodlightcontroller.core.IListener.Command + receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) { + switch (msg.getType()) { + case PACKET_IN: + return processPacketIn(sw, (OFPacketIn)msg, cntx); + default: + break; + } + log.warn("Received unexpected message {}", msg); + return Command.CONTINUE; + } + + private net.floodlightcontroller.core.IListener.Command + processPacketIn(IOFSwitch sw, OFPacketIn pi, + FloodlightContext cntx) { + + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + IPacket pkt = eth.getPayload(); + + if (eth.isBroadcast() || eth.isMulticast()) { + // handle ARP for VIP + if (pkt instanceof ARP) { + // retrieve arp to determine target IP address + ARP arpRequest = (ARP) eth.getPayload(); + + int targetProtocolAddress = IPv4.toIPv4Address(arpRequest + .getTargetProtocolAddress()); + + if (vipIpToId.containsKey(targetProtocolAddress)) { + String vipId = vipIpToId.get(targetProtocolAddress); + vipProxyArpReply(sw, pi, cntx, vipId); + return Command.STOP; + } + } + } else { + // currently only load balance IPv4 packets - no-op for other traffic + if (pkt instanceof IPv4) { + IPv4 ip_pkt = (IPv4) pkt; + + // If match Vip and port, check pool and choose member + int destIpAddress = ip_pkt.getDestinationAddress(); + + if (vipIpToId.containsKey(destIpAddress)){ + IPClient client = new IPClient(); + client.ipAddress = ip_pkt.getSourceAddress(); + client.nw_proto = ip_pkt.getProtocol(); + if (ip_pkt.getPayload() instanceof TCP) { + TCP tcp_pkt = (TCP) ip_pkt.getPayload(); + client.srcPort = tcp_pkt.getSourcePort(); + client.targetPort = tcp_pkt.getDestinationPort(); + } + if (ip_pkt.getPayload() instanceof UDP) { + UDP udp_pkt = (UDP) ip_pkt.getPayload(); + client.srcPort = udp_pkt.getSourcePort(); + client.targetPort = udp_pkt.getDestinationPort(); + } + if (ip_pkt.getPayload() instanceof ICMP) { + client.srcPort = 8; + client.targetPort = 0; + } + + LBVip vip = vips.get(vipIpToId.get(destIpAddress)); + LBPool pool = pools.get(vip.pickPool(client)); + LBMember member = members.get(pool.pickMember(client)); + + // for chosen member, check device manager and find and push routes, in both directions + pushBidirectionalVipRoutes(sw, pi, cntx, client, member); + + // packet out based on table rule + pushPacket(pkt, sw, pi.getBufferId(), pi.getInPort(), OFPort.OFPP_TABLE.getValue(), + cntx, true); + + return Command.STOP; + } + } + } + // bypass non-load-balanced traffic for normal processing (forwarding) + return Command.CONTINUE; + } + + + protected void vipProxyArpReply(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx, String vipId) { + log.debug("vipProxyArpReply"); + + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + + // retrieve original arp to determine host configured gw IP address + if (! (eth.getPayload() instanceof ARP)) + return; + ARP arpRequest = (ARP) eth.getPayload(); + + // have to do proxy arp reply since at this point we cannot determine the requesting application type + byte[] vipProxyMacBytes = vips.get(vipId).proxyMac.toBytes(); + + // generate proxy ARP reply + IPacket arpReply = new Ethernet() + .setSourceMACAddress(vipProxyMacBytes) + .setDestinationMACAddress(eth.getSourceMACAddress()) + .setEtherType(Ethernet.TYPE_ARP) + .setVlanID(eth.getVlanID()) + .setPriorityCode(eth.getPriorityCode()) + .setPayload( + new ARP() + .setHardwareType(ARP.HW_TYPE_ETHERNET) + .setProtocolType(ARP.PROTO_TYPE_IP) + .setHardwareAddressLength((byte) 6) + .setProtocolAddressLength((byte) 4) + .setOpCode(ARP.OP_REPLY) + .setSenderHardwareAddress(vipProxyMacBytes) + .setSenderProtocolAddress( + arpRequest.getTargetProtocolAddress()) + .setTargetHardwareAddress( + eth.getSourceMACAddress()) + .setTargetProtocolAddress( + arpRequest.getSenderProtocolAddress())); + + // push ARP reply out + pushPacket(arpReply, sw, OFPacketOut.BUFFER_ID_NONE, OFPort.OFPP_NONE.getValue(), + pi.getInPort(), cntx, true); + log.debug("proxy ARP reply pushed as {}", IPv4.fromIPv4Address(vips.get(vipId).address)); + + return; + } + + + public void pushPacket(IPacket packet, + IOFSwitch sw, + int bufferId, + short inPort, + short outPort, + FloodlightContext cntx, + boolean flush) { + if (log.isTraceEnabled()) { + log.trace("PacketOut srcSwitch={} inPort={} outPort={}", + new Object[] {sw, inPort, outPort}); + } + + OFPacketOut po = + (OFPacketOut) floodlightProvider.getOFMessageFactory() + .getMessage(OFType.PACKET_OUT); + + // set actions + List actions = new ArrayList(); + actions.add(new OFActionOutput(outPort, (short) 0xffff)); + + po.setActions(actions) + .setActionsLength((short) OFActionOutput.MINIMUM_LENGTH); + short poLength = + (short) (po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH); + + // set buffer_id, in_port + po.setBufferId(bufferId); + po.setInPort(inPort); + + // set data - only if buffer_id == -1 + if (po.getBufferId() == OFPacketOut.BUFFER_ID_NONE) { + if (packet == null) { + log.error("BufferId is not set and packet data is null. " + + "Cannot send packetOut. " + + "srcSwitch={} inPort={} outPort={}", + new Object[] {sw, inPort, outPort}); + return; + } + byte[] packetData = packet.serialize(); + poLength += packetData.length; + po.setPacketData(packetData); + } + + po.setLength(poLength); + + try { + counterStore.updatePktOutFMCounterStoreLocal(sw, po); + messageDamper.write(sw, po, cntx, flush); + } catch (IOException e) { + log.error("Failure writing packet out", e); + } + } + + protected void pushBidirectionalVipRoutes(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx, IPClient client, LBMember member) { + + // borrowed code from Forwarding to retrieve src and dst device entities + // Check if we have the location of the destination + IDevice srcDevice = null; + IDevice dstDevice = null; + + // retrieve all known devices + Collection allDevices = deviceManager + .getAllDevices(); + + for (IDevice d : allDevices) { + for (int j = 0; j < d.getIPv4Addresses().length; j++) { + if (srcDevice == null && client.ipAddress == d.getIPv4Addresses()[j]) + srcDevice = d; + if (dstDevice == null && member.address == d.getIPv4Addresses()[j]) { + dstDevice = d; + member.macString = dstDevice.getMACAddressString(); + } + if (srcDevice != null && dstDevice != null) + break; + } + } + + // srcDevice and/or dstDevice is null, no route can be pushed + if (srcDevice == null || dstDevice == null) return; + + Long srcIsland = topology.getL2DomainId(sw.getId()); + + if (srcIsland == null) { + log.debug("No openflow island found for source {}/{}", + sw.getStringId(), pi.getInPort()); + return; + } + + // Validate that we have a destination known on the same island + // Validate that the source and destination are not on the same switchport + boolean on_same_island = false; + boolean on_same_if = false; + for (SwitchPort dstDap : dstDevice.getAttachmentPoints()) { + long dstSwDpid = dstDap.getSwitchDPID(); + Long dstIsland = topology.getL2DomainId(dstSwDpid); + if ((dstIsland != null) && dstIsland.equals(srcIsland)) { + on_same_island = true; + if ((sw.getId() == dstSwDpid) && + (pi.getInPort() == dstDap.getPort())) { + on_same_if = true; + } + break; + } + } + + if (!on_same_island) { + // Flood since we don't know the dst device + if (log.isTraceEnabled()) { + log.trace("No first hop island found for destination " + + "device {}, Action = flooding", dstDevice); + } + return; + } + + if (on_same_if) { + if (log.isTraceEnabled()) { + log.trace("Both source and destination are on the same " + + "switch/port {}/{}, Action = NOP", + sw.toString(), pi.getInPort()); + } + return; + } + + // Install all the routes where both src and dst have attachment + // points. Since the lists are stored in sorted order we can + // traverse the attachment points in O(m+n) time + SwitchPort[] srcDaps = srcDevice.getAttachmentPoints(); + Arrays.sort(srcDaps, clusterIdComparator); + SwitchPort[] dstDaps = dstDevice.getAttachmentPoints(); + Arrays.sort(dstDaps, clusterIdComparator); + + int iSrcDaps = 0, iDstDaps = 0; + + // following Forwarding's same routing routine, retrieve both in-bound and out-bound routes for + // all clusters. + while ((iSrcDaps < srcDaps.length) && (iDstDaps < dstDaps.length)) { + SwitchPort srcDap = srcDaps[iSrcDaps]; + SwitchPort dstDap = dstDaps[iDstDaps]; + Long srcCluster = + topology.getL2DomainId(srcDap.getSwitchDPID()); + Long dstCluster = + topology.getL2DomainId(dstDap.getSwitchDPID()); + + int srcVsDest = srcCluster.compareTo(dstCluster); + if (srcVsDest == 0) { + if (!srcDap.equals(dstDap) && + (srcCluster != null) && + (dstCluster != null)) { + Route routeIn = + routingEngine.getRoute(srcDap.getSwitchDPID(), + (short)srcDap.getPort(), + dstDap.getSwitchDPID(), + (short)dstDap.getPort(), 0); + Route routeOut = + routingEngine.getRoute(dstDap.getSwitchDPID(), + (short)dstDap.getPort(), + srcDap.getSwitchDPID(), + (short)srcDap.getPort(), 0); + + // use static flow entry pusher to push flow mod along in and out path + // in: match src client (ip, port), rewrite dest from vip ip/port to member ip/port, forward + // out: match dest client (ip, port), rewrite src from member ip/port to vip ip/port, forward + + if (routeIn != null) { + pushStaticVipRoute(true, routeIn, client, member, sw.getId()); + } + + if (routeOut != null) { + pushStaticVipRoute(false, routeOut, client, member, sw.getId()); + } + + } + iSrcDaps++; + iDstDaps++; + } else if (srcVsDest < 0) { + iSrcDaps++; + } else { + iDstDaps++; + } + } + return; + } + + + public void pushStaticVipRoute(boolean inBound, Route route, IPClient client, LBMember member, long pinSwitch) { + List path = route.getPath(); + if (path.size()>0) { + for (int i = 0; i < path.size(); i+=2) { + + long sw = path.get(i).getNodeId(); + String swString = HexString.toHexString(path.get(i).getNodeId()); + String entryName; + String matchString = null; + String actionString = null; + + OFFlowMod fm = (OFFlowMod) floodlightProvider.getOFMessageFactory() + .getMessage(OFType.FLOW_MOD); + + fm.setIdleTimeout((short) 0); // infinite + fm.setHardTimeout((short) 0); // infinite + fm.setBufferId(OFPacketOut.BUFFER_ID_NONE); + fm.setCommand((short) 0); + fm.setFlags((short) 0); + fm.setOutPort(OFPort.OFPP_NONE.getValue()); + fm.setCookie((long) 0); + fm.setPriority(Short.MAX_VALUE); + + if (inBound) { + entryName = "inbound-vip-"+ member.vipId+"-client-"+client.ipAddress+"-port-"+client.targetPort + +"-srcswitch-"+path.get(0).getNodeId()+"-sw-"+sw; + matchString = "nw_src="+IPv4.fromIPv4Address(client.ipAddress)+"," + + "nw_proto="+String.valueOf(client.nw_proto)+"," + + "tp_src="+String.valueOf(client.srcPort & 0xffff)+"," + + "dl_type="+LB_ETHER_TYPE+"," + + "in_port="+String.valueOf(path.get(i).getPortId()); + + if (sw == pinSwitch) { + actionString = "set-dst-ip="+IPv4.fromIPv4Address(member.address)+"," + + "set-dst-mac="+member.macString+"," + + "output="+path.get(i+1).getPortId(); + } else { + actionString = + "output="+path.get(i+1).getPortId(); + } + } else { + entryName = "outbound-vip-"+ member.vipId+"-client-"+client.ipAddress+"-port-"+client.targetPort + +"-srcswitch-"+path.get(0).getNodeId()+"-sw-"+sw; + matchString = "nw_dst="+IPv4.fromIPv4Address(client.ipAddress)+"," + + "nw_proto="+String.valueOf(client.nw_proto)+"," + + "tp_dst="+String.valueOf(client.srcPort & 0xffff)+"," + + "dl_type="+LB_ETHER_TYPE+"," + + "in_port="+String.valueOf(path.get(i).getPortId()); + + if (sw == pinSwitch) { + actionString = "set-src-ip="+IPv4.fromIPv4Address(vips.get(member.vipId).address)+"," + + "set-src-mac="+vips.get(member.vipId).proxyMac.toString()+"," + + "output="+path.get(i+1).getPortId(); + } else { + actionString = "output="+path.get(i+1).getPortId(); + } + + } + + parseActionString(fm, actionString, log); + + fm.setPriority(U16.t(LB_PRIORITY)); + + OFMatch ofMatch = new OFMatch(); + try { + ofMatch.fromString(matchString); + } catch (IllegalArgumentException e) { + log.debug("ignoring flow entry {} on switch {} with illegal OFMatch() key: " + + matchString, entryName, swString); + } + + fm.setMatch(ofMatch); + sfp.addFlow(entryName, fm, swString); + + } + } + return; + } + + + @Override + public Collection listVips() { + return vips.values(); + } + + @Override + public Collection listVip(String vipId) { + Collection result = new HashSet(); + result.add(vips.get(vipId)); + return result; + } + + @Override + public LBVip createVip(LBVip vip) { + if (vip == null) + vip = new LBVip(); + + vips.put(vip.id, vip); + vipIpToId.put(vip.address, vip.id); + vipIpToMac.put(vip.address, vip.proxyMac); + + return vip; + } + + @Override + public LBVip updateVip(LBVip vip) { + vips.put(vip.id, vip); + return vip; + } + + @Override + public int removeVip(String vipId) { + if(vips.containsKey(vipId)){ + vips.remove(vipId); + return 0; + } else { + return -1; + } + } + + @Override + public Collection listPools() { + return pools.values(); + } + + @Override + public Collection listPool(String poolId) { + Collection result = new HashSet(); + result.add(pools.get(poolId)); + return result; + } + + @Override + public LBPool createPool(LBPool pool) { + if (pool==null) + pool = new LBPool(); + + pools.put(pool.id, pool); + if (pool.vipId != null && vips.containsKey(pool.vipId)) + vips.get(pool.vipId).pools.add(pool.id); + else { + log.error("specified vip-id must exist"); + pool.vipId = null; + pools.put(pool.id, pool); + } + return pool; + } + + @Override + public LBPool updatePool(LBPool pool) { + pools.put(pool.id, pool); + return null; + } + + @Override + public int removePool(String poolId) { + LBPool pool; + if(pools!=null){ + pool = pools.get(poolId); + if (pool.vipId != null) + vips.get(pool.vipId).pools.remove(poolId); + pools.remove(poolId); + return 0; + } else { + return -1; + } + } + + @Override + public Collection listMembers() { + return members.values(); + } + + @Override + public Collection listMember(String memberId) { + Collection result = new HashSet(); + result.add(members.get(memberId)); + return result; + } + + @Override + public Collection listMembersByPool(String poolId) { + Collection result = new HashSet(); + + if(pools.containsKey(poolId)) { + ArrayList memberIds = pools.get(poolId).members; + for (int i=0; i listMonitors() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Collection listMonitor(String monitorId) { + // TODO Auto-generated method stub + return null; + } + + @Override + public LBMonitor createMonitor(LBMonitor monitor) { + // TODO Auto-generated method stub + return null; + } + + @Override + public LBMonitor updateMonitor(LBMonitor monitor) { + // TODO Auto-generated method stub + return null; + } + + @Override + public int removeMonitor(String monitorId) { + // TODO Auto-generated method stub + return 0; + } + + @Override + public Collection> + getModuleServices() { + Collection> l = + new ArrayList>(); + l.add(ILoadBalancerService.class); + return l; + } + + @Override + public Map, IFloodlightService> + getServiceImpls() { + Map, IFloodlightService> m = + new HashMap, + IFloodlightService>(); + m.put(ILoadBalancerService.class, this); + return m; + } + + @Override + public Collection> + getModuleDependencies() { + Collection> l = + new ArrayList>(); + l.add(IFloodlightProviderService.class); + l.add(IRestApiService.class); + l.add(ICounterStoreService.class); + l.add(IDeviceService.class); + l.add(ITopologyService.class); + l.add(IRoutingService.class); + l.add(IStaticFlowEntryPusherService.class); + + return l; + } + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class); + restApi = context.getServiceImpl(IRestApiService.class); + counterStore = context.getServiceImpl(ICounterStoreService.class); + deviceManager = context.getServiceImpl(IDeviceService.class); + routingEngine = context.getServiceImpl(IRoutingService.class); + topology = context.getServiceImpl(ITopologyService.class); + sfp = context.getServiceImpl(IStaticFlowEntryPusherService.class); + + messageDamper = new OFMessageDamper(OFMESSAGE_DAMPER_CAPACITY, + EnumSet.of(OFType.FLOW_MOD), + OFMESSAGE_DAMPER_TIMEOUT); + + vips = new HashMap(); + pools = new HashMap(); + members = new HashMap(); + vipIpToId = new HashMap(); + vipIpToMac = new HashMap(); + memberIpToId = new HashMap(); + } + + @Override + public void startUp(FloodlightModuleContext context) { + floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this); + restApi.addRestletRoutable(new LoadBalancerWebRoutable()); + } + + // Utilities borrowed from StaticFlowEntries + + private static class SubActionStruct { + OFAction action; + int len; + } + + + public static void parseActionString(OFFlowMod flowMod, String actionstr, Logger log) { + List actions = new LinkedList(); + int actionsLength = 0; + if (actionstr != null) { + actionstr = actionstr.toLowerCase(); + for (String subaction : actionstr.split(",")) { + String action = subaction.split("[=:]")[0]; + SubActionStruct subaction_struct = null; + + if (action.equals("output")) { + subaction_struct = decode_output(subaction, log); + } + else if (action.equals("enqueue")) { + subaction_struct = decode_enqueue(subaction, log); + } + else if (action.equals("strip-vlan")) { + subaction_struct = decode_strip_vlan(subaction, log); + } + else if (action.equals("set-vlan-id")) { + subaction_struct = decode_set_vlan_id(subaction, log); + } + else if (action.equals("set-vlan-priority")) { + subaction_struct = decode_set_vlan_priority(subaction, log); + } + else if (action.equals("set-src-mac")) { + subaction_struct = decode_set_src_mac(subaction, log); + } + else if (action.equals("set-dst-mac")) { + subaction_struct = decode_set_dst_mac(subaction, log); + } + else if (action.equals("set-tos-bits")) { + subaction_struct = decode_set_tos_bits(subaction, log); + } + else if (action.equals("set-src-ip")) { + subaction_struct = decode_set_src_ip(subaction, log); + } + else if (action.equals("set-dst-ip")) { + subaction_struct = decode_set_dst_ip(subaction, log); + } + else if (action.equals("set-src-port")) { + subaction_struct = decode_set_src_port(subaction, log); + } + else if (action.equals("set-dst-port")) { + subaction_struct = decode_set_dst_port(subaction, log); + } + else { + log.error("Unexpected action '{}', '{}'", action, subaction); + } + + if (subaction_struct != null) { + actions.add(subaction_struct.action); + actionsLength += subaction_struct.len; + } + } + } + log.debug("action {}", actions); + + flowMod.setActions(actions); + flowMod.setLengthU(OFFlowMod.MINIMUM_LENGTH + actionsLength); + } + + private static SubActionStruct decode_output(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n; + + n = Pattern.compile("output=(?:((?:0x)?\\d+)|(all)|(controller)|(local)|(ingress-port)|(normal)|(flood))").matcher(subaction); + if (n.matches()) { + OFActionOutput action = new OFActionOutput(); + action.setMaxLength(Short.MAX_VALUE); + short port = OFPort.OFPP_NONE.getValue(); + if (n.group(1) != null) { + try { + port = get_short(n.group(1)); + } + catch (NumberFormatException e) { + log.debug("Invalid port in: '{}' (error ignored)", subaction); + return null; + } + } + else if (n.group(2) != null) + port = OFPort.OFPP_ALL.getValue(); + else if (n.group(3) != null) + port = OFPort.OFPP_CONTROLLER.getValue(); + else if (n.group(4) != null) + port = OFPort.OFPP_LOCAL.getValue(); + else if (n.group(5) != null) + port = OFPort.OFPP_IN_PORT.getValue(); + else if (n.group(6) != null) + port = OFPort.OFPP_NORMAL.getValue(); + else if (n.group(7) != null) + port = OFPort.OFPP_FLOOD.getValue(); + action.setPort(port); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionOutput.MINIMUM_LENGTH; + } + else { + log.error("Invalid subaction: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_enqueue(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n; + + n = Pattern.compile("enqueue=(?:((?:0x)?\\d+)\\:((?:0x)?\\d+))").matcher(subaction); + if (n.matches()) { + short portnum = 0; + if (n.group(1) != null) { + try { + portnum = get_short(n.group(1)); + } + catch (NumberFormatException e) { + log.debug("Invalid port-num in: '{}' (error ignored)", subaction); + return null; + } + } + + int queueid = 0; + if (n.group(2) != null) { + try { + queueid = get_int(n.group(2)); + } + catch (NumberFormatException e) { + log.debug("Invalid queue-id in: '{}' (error ignored)", subaction); + return null; + } + } + + OFActionEnqueue action = new OFActionEnqueue(); + action.setPort(portnum); + action.setQueueId(queueid); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionEnqueue.MINIMUM_LENGTH; + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_strip_vlan(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("strip-vlan").matcher(subaction); + + if (n.matches()) { + OFActionStripVirtualLan action = new OFActionStripVirtualLan(); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionStripVirtualLan.MINIMUM_LENGTH; + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_vlan_id(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-vlan-id=((?:0x)?\\d+)").matcher(subaction); + + if (n.matches()) { + if (n.group(1) != null) { + try { + short vlanid = get_short(n.group(1)); + OFActionVirtualLanIdentifier action = new OFActionVirtualLanIdentifier(); + action.setVirtualLanIdentifier(vlanid); + log.debug(" action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionVirtualLanIdentifier.MINIMUM_LENGTH; + } + catch (NumberFormatException e) { + log.debug("Invalid VLAN in: {} (error ignored)", subaction); + return null; + } + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_vlan_priority(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-vlan-priority=((?:0x)?\\d+)").matcher(subaction); + + if (n.matches()) { + if (n.group(1) != null) { + try { + byte prior = get_byte(n.group(1)); + OFActionVirtualLanPriorityCodePoint action = new OFActionVirtualLanPriorityCodePoint(); + action.setVirtualLanPriorityCodePoint(prior); + log.debug(" action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionVirtualLanPriorityCodePoint.MINIMUM_LENGTH; + } + catch (NumberFormatException e) { + log.debug("Invalid VLAN priority in: {} (error ignored)", subaction); + return null; + } + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_src_mac(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-src-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction); + + if (n.matches()) { + byte[] macaddr = get_mac_addr(n, subaction, log); + if (macaddr != null) { + OFActionDataLayerSource action = new OFActionDataLayerSource(); + action.setDataLayerAddress(macaddr); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionDataLayerSource.MINIMUM_LENGTH; + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_dst_mac(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-dst-mac=(?:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+)\\:(\\p{XDigit}+))").matcher(subaction); + + if (n.matches()) { + byte[] macaddr = get_mac_addr(n, subaction, log); + if (macaddr != null) { + OFActionDataLayerDestination action = new OFActionDataLayerDestination(); + action.setDataLayerAddress(macaddr); + log.debug(" action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionDataLayerDestination.MINIMUM_LENGTH; + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_tos_bits(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-tos-bits=((?:0x)?\\d+)").matcher(subaction); + + if (n.matches()) { + if (n.group(1) != null) { + try { + byte tosbits = get_byte(n.group(1)); + OFActionNetworkTypeOfService action = new OFActionNetworkTypeOfService(); + action.setNetworkTypeOfService(tosbits); + log.debug(" action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionNetworkTypeOfService.MINIMUM_LENGTH; + } + catch (NumberFormatException e) { + log.debug("Invalid dst-port in: {} (error ignored)", subaction); + return null; + } + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_src_ip(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-src-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction); + + if (n.matches()) { + int ipaddr = get_ip_addr(n, subaction, log); + OFActionNetworkLayerSource action = new OFActionNetworkLayerSource(); + action.setNetworkAddress(ipaddr); + log.debug(" action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionNetworkLayerSource.MINIMUM_LENGTH; + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_dst_ip(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-dst-ip=(?:(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+))").matcher(subaction); + + if (n.matches()) { + int ipaddr = get_ip_addr(n, subaction, log); + OFActionNetworkLayerDestination action = new OFActionNetworkLayerDestination(); + action.setNetworkAddress(ipaddr); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionNetworkLayerDestination.MINIMUM_LENGTH; + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_src_port(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-src-port=((?:0x)?\\d+)").matcher(subaction); + + if (n.matches()) { + if (n.group(1) != null) { + try { + short portnum = get_short(n.group(1)); + OFActionTransportLayerSource action = new OFActionTransportLayerSource(); + action.setTransportPort(portnum); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionTransportLayerSource.MINIMUM_LENGTH;; + } + catch (NumberFormatException e) { + log.debug("Invalid src-port in: {} (error ignored)", subaction); + return null; + } + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static SubActionStruct decode_set_dst_port(String subaction, Logger log) { + SubActionStruct sa = null; + Matcher n = Pattern.compile("set-dst-port=((?:0x)?\\d+)").matcher(subaction); + + if (n.matches()) { + if (n.group(1) != null) { + try { + short portnum = get_short(n.group(1)); + OFActionTransportLayerDestination action = new OFActionTransportLayerDestination(); + action.setTransportPort(portnum); + log.debug("action {}", action); + + sa = new SubActionStruct(); + sa.action = action; + sa.len = OFActionTransportLayerDestination.MINIMUM_LENGTH;; + } + catch (NumberFormatException e) { + log.debug("Invalid dst-port in: {} (error ignored)", subaction); + return null; + } + } + } + else { + log.debug("Invalid action: '{}'", subaction); + return null; + } + + return sa; + } + + private static byte[] get_mac_addr(Matcher n, String subaction, Logger log) { + byte[] macaddr = new byte[6]; + + for (int i=0; i<6; i++) { + if (n.group(i+1) != null) { + try { + macaddr[i] = get_byte("0x" + n.group(i+1)); + } + catch (NumberFormatException e) { + log.debug("Invalid src-mac in: '{}' (error ignored)", subaction); + return null; + } + } + else { + log.debug("Invalid src-mac in: '{}' (null, error ignored)", subaction); + return null; + } + } + + return macaddr; + } + + private static int get_ip_addr(Matcher n, String subaction, Logger log) { + int ipaddr = 0; + + for (int i=0; i<4; i++) { + if (n.group(i+1) != null) { + try { + ipaddr = ipaddr<<8; + ipaddr = ipaddr | get_int(n.group(i+1)); + } + catch (NumberFormatException e) { + log.debug("Invalid src-ip in: '{}' (error ignored)", subaction); + return 0; + } + } + else { + log.debug("Invalid src-ip in: '{}' (null, error ignored)", subaction); + return 0; + } + } + + return ipaddr; + } + + // Parse int as decimal, hex (start with 0x or #) or octal (starts with 0) + private static int get_int(String str) { + return Integer.decode(str); + } + + // Parse short as decimal, hex (start with 0x or #) or octal (starts with 0) + private static short get_short(String str) { + return (short)(int)Integer.decode(str); + } + + // Parse byte as decimal, hex (start with 0x or #) or octal (starts with 0) + private static byte get_byte(String str) { + return Integer.decode(str).byteValue(); + } + + +} diff --git a/pyretic/kinetic/other_ctrl_apps/firewall.py b/pyretic/kinetic/other_ctrl_apps/firewall.py new file mode 100644 index 00000000..4137a440 --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/firewall.py @@ -0,0 +1,44 @@ + +from pox.core import core +import pox.openflow.libopenflow_01 as of +from pox.lib.revent import * +from pox.lib.util import dpidToStr +from pox.lib.addresses import EthAddr +from pox.lib.addresses import IPAddr +from collections import namedtuple +import os + +import csv + +log = core.getLogger() +policyFile = "%s/pox/pox/misc/firewall-policies.csv" % os.environ[ 'HOME' ] + +class Firewall (EventMixin): + + def __init__ (self): + self.listenTo(core.openflow) + log.debug("Enabling Firewall Module") + self.deny = [] + with open(policyFile, 'rb') as f: + reader = csv.DictReader(f) + for row in reader: + self.deny.append((IPAddr(row['ip_0']), IPAddr(row['ip_1']))) + self.deny.append((IPAddr(row['ip_1']), IPAddr(row['ip_0']))) + + def _handle_ConnectionUp (self, event): + for (src, dst) in self.deny: + match = of.ofp_match() +# match.dl_src = IPAddr("00:00:00:00:00:q01") + match.dl_type = 0x800 + match.nw_src = src + match.nw_dst = dst + msg = of.ofp_flow_mod() + msg.match = match + event.connection.send(msg) + log.debug("Firewall rules installed on %s", dpidToStr(event.dpid)) + +def launch (): + ''' + Starting the Firewall module + ''' + core.registerNew(Firewall) diff --git a/pyretic/kinetic/other_ctrl_apps/ip_loadbalancer.py b/pyretic/kinetic/other_ctrl_apps/ip_loadbalancer.py new file mode 100644 index 00000000..1db99cf9 --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/ip_loadbalancer.py @@ -0,0 +1,338 @@ +# Copyright 2013 James McCauley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A very sloppy IP load balancer. + +Run it with --ip= --servers=IP1,IP2,... + +Please submit improvements. :) +""" + +from pox.core import core +import pox +log = core.getLogger("iplb") + +from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST +from pox.lib.packet.ipv4 import ipv4 +from pox.lib.packet.arp import arp +from pox.lib.addresses import IPAddr, EthAddr +from pox.lib.util import str_to_bool, dpid_to_str + +import pox.openflow.libopenflow_01 as of + +import time +import random + +FLOW_IDLE_TIMEOUT = 10 +FLOW_MEMORY_TIMEOUT = 60 * 5 + + + +class MemoryEntry (object): + """ + Record for flows we are balancing + + Table entries in the switch "remember" flows for a period of time, but + rather than set their expirations to some long value (potentially leading + to lots of rules for dead connections), we let them expire from the + switch relatively quickly and remember them here in the controller for + longer. + + Another tactic would be to increase the timeouts on the switch and use + the Nicira extension which can match packets with FIN set to remove them + when the connection closes. + """ + def __init__ (self, server, first_packet, client_port): + self.server = server + self.first_packet = first_packet + self.client_port = client_port + self.refresh() + + def refresh (self): + self.timeout = time.time() + FLOW_MEMORY_TIMEOUT + + @property + def is_expired (self): + return time.time() > self.timeout + + @property + def key1 (self): + ethp = self.first_packet + ipp = ethp.find('ipv4') + tcpp = ethp.find('tcp') + + return ipp.srcip,ipp.dstip,tcpp.srcport,tcpp.dstport + + @property + def key2 (self): + ethp = self.first_packet + ipp = ethp.find('ipv4') + tcpp = ethp.find('tcp') + + return self.server,ipp.srcip,tcpp.dstport,tcpp.srcport + + +class iplb (object): + """ + A simple IP load balancer + + Give it a service_ip and a list of server IP addresses. New TCP flows + to service_ip will be randomly redirected to one of the servers. + + We probe the servers to see if they're alive by sending them ARPs. + """ + def __init__ (self, connection, service_ip, servers = []): + self.service_ip = IPAddr(service_ip) + self.servers = [IPAddr(a) for a in servers] + self.con = connection + self.mac = self.con.eth_addr + self.live_servers = {} # IP -> MAC,port + + try: + self.log = log.getChild(dpid_to_str(self.con.dpid)) + except: + # Be nice to Python 2.6 (ugh) + self.log = log + + self.outstanding_probes = {} # IP -> expire_time + + # How quickly do we probe? + self.probe_cycle_time = 5 + + # How long do we wait for an ARP reply before we consider a server dead? + self.arp_timeout = 3 + + # We remember where we directed flows so that if they start up again, + # we can send them to the same server if it's still up. Alternate + # approach: hashing. + self.memory = {} # (srcip,dstip,srcport,dstport) -> MemoryEntry + + self._do_probe() # Kick off the probing + + # As part of a gross hack, we now do this from elsewhere + #self.con.addListeners(self) + + def _do_expire (self): + """ + Expire probes and "memorized" flows + + Each of these should only have a limited lifetime. + """ + t = time.time() + + # Expire probes + for ip,expire_at in self.outstanding_probes.items(): + if t > expire_at: + self.outstanding_probes.pop(ip, None) + if ip in self.live_servers: + self.log.warn("Server %s down", ip) + del self.live_servers[ip] + + # Expire old flows + c = len(self.memory) + self.memory = {k:v for k,v in self.memory.items() + if not v.is_expired} + if len(self.memory) != c: + self.log.debug("Expired %i flows", c-len(self.memory)) + + def _do_probe (self): + """ + Send an ARP to a server to see if it's still up + """ + self._do_expire() + + server = self.servers.pop(0) + self.servers.append(server) + + r = arp() + r.hwtype = r.HW_TYPE_ETHERNET + r.prototype = r.PROTO_TYPE_IP + r.opcode = r.REQUEST + r.hwdst = ETHER_BROADCAST + r.protodst = server + r.hwsrc = self.mac + r.protosrc = self.service_ip + e = ethernet(type=ethernet.ARP_TYPE, src=self.mac, + dst=ETHER_BROADCAST) + e.set_payload(r) + #self.log.debug("ARPing for %s", server) + msg = of.ofp_packet_out() + msg.data = e.pack() + msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) + msg.in_port = of.OFPP_NONE + self.con.send(msg) + + self.outstanding_probes[server] = time.time() + self.arp_timeout + + core.callDelayed(self._probe_wait_time, self._do_probe) + + @property + def _probe_wait_time (self): + """ + Time to wait between probes + """ + r = self.probe_cycle_time / float(len(self.servers)) + r = max(.25, r) # Cap it at four per second + return r + + def _pick_server (self, key, inport): + """ + Pick a server for a (hopefully) new connection + """ + return random.choice(self.live_servers.keys()) + + def _handle_PacketIn (self, event): + inport = event.port + packet = event.parsed + + def drop (): + if event.ofp.buffer_id is not None: + # Kill the buffer + msg = of.ofp_packet_out(data = event.ofp) + self.con.send(msg) + return None + + tcpp = packet.find('tcp') + if not tcpp: + arpp = packet.find('arp') + if arpp: + # Handle replies to our server-liveness probes + if arpp.opcode == arpp.REPLY: + if arpp.protosrc in self.outstanding_probes: + # A server is (still?) up; cool. + del self.outstanding_probes[arpp.protosrc] + if (self.live_servers.get(arpp.protosrc, (None,None)) + == (arpp.hwsrc,inport)): + # Ah, nothing new here. + pass + else: + # Ooh, new server. + self.live_servers[arpp.protosrc] = arpp.hwsrc,inport + self.log.info("Server %s up", arpp.protosrc) + return + + # Not TCP and not ARP. Don't know what to do with this. Drop it. + return drop() + + # It's TCP. + + ipp = packet.find('ipv4') + + if ipp.srcip in self.servers: + # It's FROM one of our balanced servers. + # Rewrite it BACK to the client + + key = ipp.srcip,ipp.dstip,tcpp.srcport,tcpp.dstport + entry = self.memory.get(key) + + if entry is None: + # We either didn't install it, or we forgot about it. + self.log.debug("No client for %s", key) + return drop() + + # Refresh time timeout and reinstall. + entry.refresh() + + #self.log.debug("Install reverse flow for %s", key) + + # Install reverse table entry + mac,port = self.live_servers[entry.server] + + actions = [] + actions.append(of.ofp_action_dl_addr.set_src(self.mac)) + actions.append(of.ofp_action_nw_addr.set_src(self.service_ip)) + actions.append(of.ofp_action_output(port = entry.client_port)) + match = of.ofp_match.from_packet(packet, inport) + + msg = of.ofp_flow_mod(command=of.OFPFC_ADD, + idle_timeout=FLOW_IDLE_TIMEOUT, + hard_timeout=of.OFP_FLOW_PERMANENT, + data=event.ofp, + actions=actions, + match=match) + self.con.send(msg) + + elif ipp.dstip == self.service_ip: + # Ah, it's for our service IP and needs to be load balanced + + # Do we already know this flow? + key = ipp.srcip,ipp.dstip,tcpp.srcport,tcpp.dstport + entry = self.memory.get(key) + if entry is None or entry.server not in self.live_servers: + # Don't know it (hopefully it's new!) + if len(self.live_servers) == 0: + self.log.warn("No servers!") + return drop() + + # Pick a server for this flow + server = self._pick_server(key, inport) + self.log.debug("Directing traffic to %s", server) + entry = MemoryEntry(server, packet, inport) + self.memory[entry.key1] = entry + self.memory[entry.key2] = entry + + # Update timestamp + entry.refresh() + + # Set up table entry towards selected server + mac,port = self.live_servers[entry.server] + + actions = [] + actions.append(of.ofp_action_dl_addr.set_dst(mac)) + actions.append(of.ofp_action_nw_addr.set_dst(entry.server)) + actions.append(of.ofp_action_output(port = port)) + match = of.ofp_match.from_packet(packet, inport) + + msg = of.ofp_flow_mod(command=of.OFPFC_ADD, + idle_timeout=FLOW_IDLE_TIMEOUT, + hard_timeout=of.OFP_FLOW_PERMANENT, + data=event.ofp, + actions=actions, + match=match) + self.con.send(msg) + + +# Remember which DPID we're operating on (first one to connect) +_dpid = None + +def launch (ip, servers): + servers = servers.replace(","," ").split() + servers = [IPAddr(x) for x in servers] + ip = IPAddr(ip) + + # Boot up ARP Responder + from proto.arp_responder import launch as arp_launch + arp_launch(eat_packets=False,**{str(ip):True}) + import logging + logging.getLogger("proto.arp_responder").setLevel(logging.WARN) + + def _handle_ConnectionUp (event): + global _dpid + if _dpid is None: + log.info("IP Load Balancer Ready.") + core.registerNew(iplb, event.connection, IPAddr(ip), servers) + _dpid = event.dpid + + if _dpid != event.dpid: + log.warn("Ignoring switch %s", event.connection) + else: + log.info("Load Balancing on %s", event.connection) + + # Gross hack + core.iplb.con = event.connection + event.connection.addListeners(core.iplb) + + + core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) diff --git a/pyretic/kinetic/other_ctrl_apps/l2_learning.py b/pyretic/kinetic/other_ctrl_apps/l2_learning.py new file mode 100644 index 00000000..f16817ed --- /dev/null +++ b/pyretic/kinetic/other_ctrl_apps/l2_learning.py @@ -0,0 +1,201 @@ +# Copyright 2011-2012 James McCauley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +An L2 learning switch. + +It is derived from one written live for an SDN crash course. +It is somwhat similar to NOX's pyswitch in that it installs +exact-match rules for each flow. +""" + +from pox.core import core +import pox.openflow.libopenflow_01 as of +from pox.lib.util import dpid_to_str +from pox.lib.util import str_to_bool +import time + +log = core.getLogger() + +# We don't want to flood immediately when a switch connects. +# Can be overriden on commandline. +_flood_delay = 0 + +class LearningSwitch (object): + """ + The learning switch "brain" associated with a single OpenFlow switch. + + When we see a packet, we'd like to output it on a port which will + eventually lead to the destination. To accomplish this, we build a + table that maps addresses to ports. + + We populate the table by observing traffic. When we see a packet + from some source coming from some port, we know that source is out + that port. + + When we want to forward traffic, we look up the desintation in our + table. If we don't know the port, we simply send the message out + all ports except the one it came in on. (In the presence of loops, + this is bad!). + + In short, our algorithm looks like this: + + For each packet from the switch: + 1) Use source address and switch port to update address/port table + 2) Is transparent = False and either Ethertype is LLDP or the packet's + destination address is a Bridge Filtered address? + Yes: + 2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x) + DONE + 3) Is destination multicast? + Yes: + 3a) Flood the packet + DONE + 4) Port for destination address in our address/port table? + No: + 4a) Flood the packet + DONE + 5) Is output port the same as input port? + Yes: + 5a) Drop packet and similar ones for a while + 6) Install flow table entry in the switch so that this + flow goes out the appopriate port + 6a) Send the packet out appropriate port + """ + def __init__ (self, connection, transparent): + # Switch we'll be adding L2 learning switch capabilities to + self.connection = connection + self.transparent = transparent + + # Our table + self.macToPort = {} + + # We want to hear PacketIn messages, so we listen + # to the connection + connection.addListeners(self) + + # We just use this to know when to log a helpful message + self.hold_down_expired = _flood_delay == 0 + + #log.debug("Initializing LearningSwitch, transparent=%s", + # str(self.transparent)) + + def _handle_PacketIn (self, event): + """ + Handle packet in messages from the switch to implement above algorithm. + """ + + packet = event.parsed + + def flood (message = None): + """ Floods the packet """ + msg = of.ofp_packet_out() + if time.time() - self.connection.connect_time >= _flood_delay: + # Only flood if we've been connected for a little while... + + if self.hold_down_expired is False: + # Oh yes it is! + self.hold_down_expired = True + log.info("%s: Flood hold-down expired -- flooding", + dpid_to_str(event.dpid)) + + if message is not None: log.debug(message) + #log.debug("%i: flood %s -> %s", event.dpid,packet.src,packet.dst) + # OFPP_FLOOD is optional; on some switches you may need to change + # this to OFPP_ALL. + msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) + else: + pass + #log.info("Holding down flood for %s", dpid_to_str(event.dpid)) + msg.data = event.ofp + msg.in_port = event.port + self.connection.send(msg) + + def drop (duration = None): + """ + Drops this packet and optionally installs a flow to continue + dropping similar ones for a while + """ + if duration is not None: + if not isinstance(duration, tuple): + duration = (duration,duration) + msg = of.ofp_flow_mod() + msg.match = of.ofp_match.from_packet(packet) + msg.idle_timeout = duration[0] + msg.hard_timeout = duration[1] + msg.buffer_id = event.ofp.buffer_id + self.connection.send(msg) + elif event.ofp.buffer_id is not None: + msg = of.ofp_packet_out() + msg.buffer_id = event.ofp.buffer_id + msg.in_port = event.port + self.connection.send(msg) + + self.macToPort[packet.src] = event.port # 1 + + if not self.transparent: # 2 + if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered(): + drop() # 2a + return + + if packet.dst.is_multicast: + flood() # 3a + else: + if packet.dst not in self.macToPort: # 4 + flood("Port for %s unknown -- flooding" % (packet.dst,)) # 4a + else: + port = self.macToPort[packet.dst] + if port == event.port: # 5 + # 5a + log.warning("Same port for packet from %s -> %s on %s.%s. Drop." + % (packet.src, packet.dst, dpid_to_str(event.dpid), port)) + drop(10) + return + # 6 + log.debug("installing flow for %s.%i -> %s.%i" % + (packet.src, event.port, packet.dst, port)) + msg = of.ofp_flow_mod() + msg.match = of.ofp_match.from_packet(packet, event.port) + msg.idle_timeout = 10 + msg.hard_timeout = 30 + msg.actions.append(of.ofp_action_output(port = port)) + msg.data = event.ofp # 6a + self.connection.send(msg) + + +class l2_learning (object): + """ + Waits for OpenFlow switches to connect and makes them learning switches. + """ + def __init__ (self, transparent): + core.openflow.addListeners(self) + self.transparent = transparent + + def _handle_ConnectionUp (self, event): + log.debug("Connection %s" % (event.connection,)) + LearningSwitch(event.connection, self.transparent) + + +def launch (transparent=False, hold_down=_flood_delay): + """ + Starts an L2 learning switch. + """ + try: + global _flood_delay + _flood_delay = int(str(hold_down), 10) + assert _flood_delay >= 0 + except: + raise RuntimeError("Expected hold-down to be a number") + + core.registerNew(l2_learning, str_to_bool(transparent)) diff --git a/pyretic/kinetic/resettableTimer.py b/pyretic/kinetic/resettableTimer.py new file mode 100644 index 00000000..ce366fb9 --- /dev/null +++ b/pyretic/kinetic/resettableTimer.py @@ -0,0 +1,70 @@ +from threading import Thread, Event, Timer +import time + +def TimerReset(*args, **kwargs): + """ Global function for Timer """ + return _TimerReset(*args, **kwargs) + + +class _TimerReset(Thread): + """Call a function after a specified number of seconds: + + t = TimerReset(30.0, f, args=[], kwargs={}) + t.start() + t.cancel() # stop the timer's action if it's still waiting + """ + + def __init__(self, interval, function, args=[], kwargs={}): + Thread.__init__(self) + self.interval = interval + self.function = function + self.args = args + self.kwargs = kwargs + self.finished = Event() + self.resetted = True + + def cancel(self): + """Stop the timer if it hasn't finished yet""" + self.finished.set() + +# def run(self): +# print "Time: %s - timer running..." % time.asctime() +# +# while self.resetted: +# print "Time: %s - timer waiting for timeout in %.2f..." % (time.asctime(), self.interval) +# self.resetted = False +# self.finished.wait(self.interval) +# +# if not self.finished.isSet(): +# self.function(*self.args, **self.kwargs) +# self.finished.set() +# print "Time: %s - timer finished!" % time.asctime() + + def reset(self, interval=None): + """ Reset the timer """ + + if interval: +# print "Time: %s - timer resetting to %.2f..." % (time.asctime(), interval) + self.interval = interval + else: + pass +# print "Time: %s - timer resetting..." % time.asctime() + + self.resetted = True + self.finished.set() + self.finished.clear() + + + def run(self): + while not self.finished.isSet(): +# print "Time: %s - timer running..." % time.asctime() + + self.resetted = True + while self.resetted: +# print "Time: %s - timer waiting for timeout in %.2f..." % (time.asctime(), self.interval) + self.resetted = False + self.finished.wait(self.interval) + + if not self.finished.isSet(): + self.function(*self.args, **self.kwargs) +# print "Time: %s - timer finished!" % time.asctime() diff --git a/pyretic/kinetic/scripts/evaluation/compile_time.py b/pyretic/kinetic/scripts/evaluation/compile_time.py new file mode 100644 index 00000000..a55dd008 --- /dev/null +++ b/pyretic/kinetic/scripts/evaluation/compile_time.py @@ -0,0 +1,60 @@ +from optparse import OptionParser +import socket +import sys +import json +import re +import ipaddr +import subprocess + + +#python json_sender.py -n authenticated -l True --flow="{srcip=10.0.0.2}" -a 127.0.0.1 -p 50001 +BASE_CMD = "python ../../json_sender.py -a 127.0.0.1 -p 50001 " + +def main(): + desc = ( 'Measure compile time' ) + usage = ( '%prog [options]\n' + '(type %prog -h for details)' ) + op = OptionParser( description=desc, usage=usage ) + + # Options + op.add_option( '--num', '-n', action="store",\ + dest="num_events", help = 'Number of events' ) + + if len(sys.argv) != 3: + print "Wrong number of arguments" + op.print_help() + sys.exit() + + # Parsing and processing + options, args = op.parse_args() + + # Events and values + ev_tuple_list = [] + ev_tuple_list.append(('authenticated','True')) +# ev_tuple_list.append(('infected','True')) + + # Set starting IP address + the_IP = ipaddr.IPAddress('1.1.1.1') + + # Start sending events + for i in range(int(options.num_events)): + for j in ev_tuple_list: + cmd = BASE_CMD + "-n " + j[0] + " -l " + j[1] + " --flow='{srcip="+str(the_IP)+"}'" + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True) + out, err = p.communicate() + + the_IP = the_IP + 1 + if str(the_IP).endswith('0') or str(the_IP).endswith('255'): + the_IP = the_IP + 1 + + # End and flush. + end_cmd = "python ../../json_sender.py -a 127.0.0.1 -p 50001 -n endofworld -l True --flow='{srcip=1.1.1.1}'" + p = subprocess.Popen(end_cmd, stdout=subprocess.PIPE,shell=True) + out, err = p.communicate() + + + +if __name__ == '__main__': + main() + diff --git a/pyretic/kinetic/scripts/evaluation/output/dumm b/pyretic/kinetic/scripts/evaluation/output/dumm new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/scripts/evaluation/survey_parser.py b/pyretic/kinetic/scripts/evaluation/survey_parser.py new file mode 100755 index 00000000..d6b91d52 --- /dev/null +++ b/pyretic/kinetic/scripts/evaluation/survey_parser.py @@ -0,0 +1,75 @@ +#!/usr/bin/python + +import re +import sys +from collections import defaultdict, OrderedDict +import operator + +PRINT_PARSING_EXCEPTION=False +FUZZY_SPELLING=True + +if FUZZY_SPELLING: + kinetic_p = re.compile('k\w+tic',re.I) + pyretic_p = re.compile('p\w+tic',re.I) +else: + kinetic_p = re.compile('kinetic',re.I) + pyretic_p = re.compile('pyretic',re.I) +pox_p = re.compile('pox',re.I) +integer_p = re.compile('[0-9]+') +tab_p = re.compile('\t') + +if __name__ == '__main__': + + file_name = sys.argv[1] + + results = defaultdict(lambda: 0) + ordering = defaultdict(lambda: defaultdict(lambda: 0)) + + f = open(file_name,'r') + for line in f.xreadlines(): + + try: + count,line = tab_p.split(line,1) + count = int(count) + except: +# print 'skipping' + continue + + position = dict() + + kinetic_m = kinetic_p.search(line) + pyretic_m = pyretic_p.search(line) + pox_m = pox_p.search(line) + numbering = integer_p.findall(line) + numbering = map(int,numbering) + + if kinetic_m: + position['kinetic'] = kinetic_m.start() + if pyretic_m: + position['pyretic'] = pyretic_m.start() + if pox_m: + position['pox'] = pox_m.start() + + position = OrderedDict(sorted(position.items(),key=operator.itemgetter(1))) + order = [k for k,v in position.items()] + + if len(numbering) == 0 or len(numbering) > 3 or max(numbering) > 3: + numbering = [1,2,3] + + if len(order) != len(numbering): + if PRINT_PARSING_EXCEPTION: + print '---- parsing exception -----' + print line, + print len(order),len(numbering) + print '----------------------------' + continue + + ranked = zip(order,numbering) + + for platform,rank in ranked: + ordering[rank][platform] += count + + print '---- count of first/second/third place ---------' + for rank,platform_count in sorted(ordering.items()): + print 'Rank %d' % rank, + print [(platform,count) for platform,count in platform_count.items()] diff --git a/pyretic/kinetic/scripts/evaluation/survey_parser_multiple_choice.py b/pyretic/kinetic/scripts/evaluation/survey_parser_multiple_choice.py new file mode 100755 index 00000000..4444c7e3 --- /dev/null +++ b/pyretic/kinetic/scripts/evaluation/survey_parser_multiple_choice.py @@ -0,0 +1,51 @@ +#!/usr/bin/python + +import re +import sys +from collections import defaultdict, OrderedDict +import operator + +PRINT_PARSING_EXCEPTION=False +FUZZY_SPELLING=True + +answer_p = re.compile('\d+\t[\w\s]+', re.I) +answer_p = re.compile('(\d+)\t(.+)\n', re.I) + +if __name__ == '__main__': + + file_name = sys.argv[1] + f = open(file_name,'r') + + value_number_map = {} + total_responses = 0 + + for line in f.xreadlines(): + answer_m = answer_p.search(line) + + # Actual entry + if answer_m: + value = int(answer_m.group(1).rstrip('\t')) + entry_name = answer_m.group(2).rstrip('\t') + value_number_map[entry_name] = value + + total_responses = total_responses + value + else: + if line!='\n': + # Very likely a the question itself. + print '\n============================' + print 'Question:',line.rstrip('\n') + print '============================\n' + + print 'Total responses:',total_responses,'\n' + print 'Entry_Name & ' + 'Number_of_responses & ' + 'Percentage\n' + entry_string = '' + entry_string = '' + + for k in value_number_map: + percentage = (float(value_number_map[k])/float(total_responses) ) * 100.0 + line_str = '' + line_str += k + ' & ' + str(value_number_map[k]) + ' & ' + "%.2f" % percentage + '\\\\' + print line_str + print '\hline' + print 'Total & ' + str(total_responses) + ' & 100\\\\' + print '\n' diff --git a/pyretic/kinetic/scripts/evaluation/topo.py b/pyretic/kinetic/scripts/evaluation/topo.py new file mode 100644 index 00000000..332c3fd6 --- /dev/null +++ b/pyretic/kinetic/scripts/evaluation/topo.py @@ -0,0 +1,184 @@ +from mininet.topo import Topo +from mininet.net import Mininet +from mininet.node import RemoteController +from mininet.node import CPULimitedHost +from mininet.link import TCLink +from mininet.cli import CLI +from mininet.util import irange,dumpNodeConnections +from mininet.log import setLogLevel +import time +import math +from optparse import OptionParser + + +## Make linear topology +class EventTopo(Topo): + def __init__(self, N, **opts): + # Initialize topology and default options + Topo.__init__(self, **opts) + + # Create switches and hosts + hosts = [ self.addHost( 'h%s' % h ) + for h in irange( 1, N ) ] + switches = [ self.addSwitch( 's%s' % s ) + for s in irange( 1, N ) ] + + # Wire up switches + last = None + for switch in switches: + if last: + self.addLink( last, switch ) + last = switch + + # Wire up hosts + for host, switch in zip( hosts, switches ): + self.addLink( host, switch ) + +topos = { 'mytopo': ( lambda: EventTopo(5) ) } + + +### Start ping between hosts +def startpings( host, targetip, wait_time): + "Tell host to repeatedly ping targets" + + # Simple ping loop + cmd = ( 'while true; do ' + ' echo -n %s "->" %s ' % (host.IP(), targetip.IP()) + + ' `ping %s -i %s -W 0.9 -c 50 >> ./output/%s_%s`;' % (targetip.IP(), str(wait_time), host.IP(),targetip.IP()) + + ' break;' + 'done &' ) + + print ( '*** Host %s (%s) will be pinging ips: %s' % + ( host.name, host.IP(), targetip.IP() ) ) + + host.cmd( cmd ) + + +### RTT test +def RTTTest(n, wait_time): + + print "a. Firing up Mininet" + net = Mininet(topo=EventTopo(n), controller=lambda name: RemoteController( 'c0', '127.0.0.1' ), host=CPULimitedHost, link=TCLink) + net.start() + + h1 = net.get('h1') + time.sleep(5) + + # Start pings + print "b. Starting Test" + hosts = net.hosts + + for idx1,h1 in enumerate(hosts): + for idx2,h2 in enumerate(hosts): + if h1!=h2 and idx1 ./output/iperf_%s_%s &') %(h2.IP(), h1.IP(), h2.IP()) + + print ( '*** Host %s (%s) will do iperf to: %s' % + ( h1.name, h1.IP(), h2.IP() ) ) + + h1.cmd( cmd ) + + +def main(): + desc = ( 'Generate Mininet Testbed' ) + usage = ( '%prog [options]\n' + '(type %prog -h for details)' ) + op = OptionParser( description=desc, usage=usage ) + + ### Options + op.add_option( '--rate', '-r', action="store", \ + dest="rate", help = "Set rate. S for (n/second), M for (n/minute). Don't include the brackets when specifying n" ) + + op.add_option( '--switchNum', '-s', action="store", \ + dest="switchnum", help = "Specify the number of switches for this linear topology." ) + + op.add_option( '--mode', '-m', action="store", \ + dest="mode", help = "rtt or bw" ) + + + wait_time = 0.0 + options, args = op.parse_args() + + if options.rate is not None: + if options.rate.endswith('S'): + num_str = options.rate.rstrip('S') + wait_time = 1.0/float(num_str) + elif options.rate.endswith('M'): + num_str = options.rate.rstrip('M') + wait_time = 60.0/float(num_str) + else: + print 'Wrong rate format. Abort.' + op.print_usage() + return + else: + print '\nNo rate given. Abort.\n' + op.print_usage() + return + + if options.switchnum is not None and options.mode is not None and options.rate is not None: + setLogLevel('info') + if options.mode == 'rtt': + RTTTest(int(options.switchnum), wait_time) + elif options.mode == 'bw': + BWTest(int(options.switchnum), wait_time) + else: + print "wrong mode. exit" + op.print_usage() + return + + else: + print '\nNo switch number given. Abort.\n' + op.print_usage() + +if __name__ == '__main__': + main() + diff --git a/pyretic/kinetic/scripts/linenum_calc.py b/pyretic/kinetic/scripts/linenum_calc.py new file mode 100644 index 00000000..e586f7cc --- /dev/null +++ b/pyretic/kinetic/scripts/linenum_calc.py @@ -0,0 +1,75 @@ +import os,sys,re +ofile = open('lines.txt','w+') + +def getlines(dirs): + n = 0 + ln=0 + ifile = open(dirs,'r') + flag = 0 + main_flag = 0 + comma_flag = 0 + for line in ifile.readlines(): + ln+=1 + if flag== 0 and main_flag == 0: + line = line.strip(' ') + match_1 = re.match('(from .* )*import ',line) + match_pr = re.match('print ',line) + match_2 = re.search(',\s*$',line) + if line.startswith('#') or line.startswith('//') or line =='\n' or line.startswith('"""') or line.startswith("'''"): + if line.startswith('"""') or line.startswith("'''"): + if line.count('"""')!=2: + flag=1 + #continue + elif line.startswith('def main') or line.startswith('def launch'): + main_flag=1 + continue + + # import statements + elif match_1: + continue + + # print statements + elif match_pr: + continue + + + elif match_2: + comma_flag = 1 + continue + + else: + n+=1 + print line + elif main_flag == 1: + if line.startswith('def ') or line.startswith('class '): + main_flag=0 + elif comma_flag == 1: + match_3 = re.search(',\s*$',line) + if match_3 is None: + print line + n+=1 + comma_flag = 0 + + elif flag ==1: + if '"""' in line or "'''" in line: + flag=0 + else: + print 'what?' + + return n + +fdir_prev='' +for r,d,f in os.walk(os.getcwd()): + for files in f: + if files.endswith('.py') or files.endswith('.java'): + print files + dirs = os.path.join(r,files) + fdir = dirs.split(files)[0] + if fdir!=fdir_prev: + ofile.write('\n\n## Dir: '+fdir+'\n') + + n_line = getlines(dirs) + ofile.write(files+': '+str(n_line)+'\n') + + + fdir_prev= fdir diff --git a/pyretic/kinetic/scripts/mn-sflow.sh b/pyretic/kinetic/scripts/mn-sflow.sh new file mode 100755 index 00000000..047a7f20 --- /dev/null +++ b/pyretic/kinetic/scripts/mn-sflow.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +case "$1" in + enable) + echo "Enabling sFlow ..." + ovs-vsctl -- --id=@sflow create sflow agent=eth3 target=\"127.0.0.1:6343\" \ + sampling=2 polling=20 -- -- set bridge $2 sflow=@sflow + ;; + disable) + echo "Disabling sFlow ..." + ovs-vsctl -- clear bridge $2 sflow + ;; + *) + echo "Usage: mn-sflow.sh {enable|disable} {switch-id}" + ;; +esac diff --git a/pyretic/kinetic/scripts/ov-switch.sh b/pyretic/kinetic/scripts/ov-switch.sh new file mode 100755 index 00000000..19c9d59a --- /dev/null +++ b/pyretic/kinetic/scripts/ov-switch.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +case "$1" in + start) + echo "Starting Open vSwitch ..." + # Clear eth1 + ifconfig eth1 0 + ifconfig eth1 promisc + # Clear eth2 + ifconfig eth2 0 + ifconfig eth2 promisc + # Clear eth0 + ifconfig eth0 0 + ifconfig eth0 promisc + # Configure "ov-switch" + ovs-vsctl add-br ov-switch + ovs-vsctl add-port ov-switch eth1 + ovs-vsctl add-port ov-switch eth2 + ovs-vsctl add-port ov-switch eth0 + # Bring up "ov-switch" + ifconfig ov-switch up + # Assign remote controller + ovs-vsctl set-controller ov-switch tcp:127.0.0.1:6633 + ovs-vsctl set-fail-mode ov-switch secure + ;; + stop) + echo "Stopping Open vSwitch ..." + # Delete remote controller + ovs-vsctl del-controller ov-switch + ovs-vsctl del-fail-mode ov-switch + # Turn down "ov-switch" + ifconfig ov-switch down + # Delete "ov-switch" + ovs-vsctl del-br ov-switch + # Restart Network Service + ifconfig eth1 -promisc + ifconfig eth1 down + ifconfig eth2 -promisc + ifconfig eth2 down + ifconfig eth0 -promisc + ifconfig eth0 down + ;; + enable-sflow) + echo "Enabling sFlow ..." + ovs-vsctl -- --id=@sflow create sflow agent=eth3 target=\"127.0.0.1:6343\" \ + sampling=2 polling=20 -- -- set bridge ov-switch sflow=@sflow + ;; + disable-sflow) + echo "Disabling sFlow ..." + ovs-vsctl -- clear bridge ov-switch sflow + ;; + *) + echo "Usage: ov-switch.sh {start|stop}" + ;; +esac diff --git a/pyretic/kinetic/smv/NuSMV_32 b/pyretic/kinetic/smv/NuSMV_32 new file mode 100755 index 00000000..ce065cab Binary files /dev/null and b/pyretic/kinetic/smv/NuSMV_32 differ diff --git a/pyretic/kinetic/smv/NuSMV_64 b/pyretic/kinetic/smv/NuSMV_64 new file mode 100755 index 00000000..08b72766 Binary files /dev/null and b/pyretic/kinetic/smv/NuSMV_64 differ diff --git a/pyretic/kinetic/smv/__init__.py b/pyretic/kinetic/smv/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/smv/model_checker.py b/pyretic/kinetic/smv/model_checker.py new file mode 100644 index 00000000..796b44a6 --- /dev/null +++ b/pyretic/kinetic/smv/model_checker.py @@ -0,0 +1,48 @@ +import subprocess +import platform +import os +import sys + +class ModelChecker(object): + + def __init__(self, smv_str, appname_str): + arch_str = platform.architecture() + if arch_str: + if not os.environ.has_key('KINETICPATH'): + print 'KINETICPATH env variable not set. Set it with export command.\n' + sys.exit() + return + kinetic_path_str = os.environ['KINETICPATH'] + if arch_str[0].startswith('32'): + self.exec_cmd = kinetic_path_str + '/smv/NuSMV_32' + else: + self.exec_cmd = kinetic_path_str + '/smv/NuSMV_64' + self.smv_file_directory = kinetic_path_str + '/smv/smv_files/' + self.filename = self.smv_file_directory + appname_str + '.smv' + self.smv_str = smv_str +'\n' + + def add_spec(self,spec_str): + self.smv_str = self.smv_str + spec_str + '\n' + + def save_as_smv_file(self): + fd = open(self.filename, 'w') + fd.write(self.smv_str) + fd.close() + + def verify(self): + print self.smv_str + print '========================== NuSMV OUTPUT ==========================\n' + + import datetime as dt + n1=dt.datetime.now() + + p = subprocess.Popen([self.exec_cmd, '-r', self.filename], stdout=subprocess.PIPE) + out, err = p.communicate() + + n2=dt.datetime.now() + print "\n=== Verification takes (ms): ",float((n2-n1).microseconds)/1000.0,"===" + print "=== Verification takes (s): ",float((n2-n1).seconds),"===\n" + + print out + + print '======================== NuSMV OUTPUT END ========================\n' diff --git a/pyretic/kinetic/smv/smv_files/NOTES b/pyretic/kinetic/smv/smv_files/NOTES new file mode 100644 index 00000000..38559207 --- /dev/null +++ b/pyretic/kinetic/smv/smv_files/NOTES @@ -0,0 +1,3 @@ +=============================================================== +smv files that are input to NuSMV goes in this directory. +=============================================================== diff --git a/pyretic/kinetic/util/__init__.py b/pyretic/kinetic/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyretic/kinetic/util/resetting_q.py b/pyretic/kinetic/util/resetting_q.py new file mode 100644 index 00000000..1bd99239 --- /dev/null +++ b/pyretic/kinetic/util/resetting_q.py @@ -0,0 +1,27 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * +import pyretic.lib.query as query + +class resetting_q(DynamicFilter): + def __init__(self,cls,**kwargs): + self.cls = cls + self.kwargs = kwargs + super(resetting_q,self).__init__() + + def register_callback(self,cb): + self.callback = cb + self.reset() + + def reset(self): + # policy = self.cls(self.kwargs) + # print type(policy) + # print policy + # self.policy = policy + self.policy = query.packets(limit=1,group_by=['srcmac','switch']) + # print type(self.policy) + # print self.policy + self.policy.register_callback(self.callback) + + def set_network(self,network): + self.reset() + diff --git a/pyretic/kinetic/util/rewriting.py b/pyretic/kinetic/util/rewriting.py new file mode 100644 index 00000000..4e57a63c --- /dev/null +++ b/pyretic/kinetic/util/rewriting.py @@ -0,0 +1,45 @@ +from pyretic.lib.corelib import * +from pyretic.lib.std import * + +def getMACFromIP(ip): + ip_str = str(ip) + part = ip_str.split('.')[3] + mac_str = "00:00:00:00:00:" + "{:02x}".format(int(part)) + return mac_str + +def rewriteDstIPAndMAC_Public(client_ips, public_ip_str, target_ip_str): + target_ips = [IP(target_ip_str)]*len(client_ips) + d = zip(client_ips, target_ips) + return intersection([subsp(c,r,IP(public_ip_str)) for c,r in d]) + +def rewriteDstIPAndMAC(client_ips, target_ip_str): + target_ips = [IP(target_ip_str)]*len(client_ips) + d = zip(client_ips, target_ips) + pol = None + for ip in client_ips: + if pol == None: + pol = intersection([subs(c,r, IP(ip)) for c,r in d]) + else: + pol = pol + intersection([subs(c,r, IP(ip)) for c,r in d]) + return pol + + +# subroutine of rewrite() +def subs(c,r,p): + c_to_p = match(srcip=c,dstip=p) + r_to_c = match(srcip=r,dstip=c) + rewrite_mac_policy = if_(match(dstip=IP(r),ethtype=2048), + modify(dstmac=MAC(getMACFromIP(r))),if_(match(ethtype=2054),passthrough,drop)) + + return (((c_to_p >> modify(dstip=r))+(r_to_c >> modify(srcip=p))+(~r_to_c >> ~c_to_p))) >> rewrite_mac_policy + + +# subroutine of rewrite() +def subsp(c,r,p): + c_to_p = match(srcip=c,dstip=p) + r_to_c = match(srcip=r,dstip=c) + rewrite_mac_policy = if_(match(dstip=IP(r),ethtype=2048), + modify(dstmac=MAC(getMACFromIP(r))),passthrough) + + return (((c_to_p >> modify(dstip=r))+(r_to_c >> modify(srcip=p))+(~r_to_c >> ~c_to_p))) >> rewrite_mac_policy +