Skip to content
This repository was archived by the owner on Jun 27, 2018. It is now read-only.

Commit feb20a5

Browse files
author
Joshua Reich
committed
major refactor of sequential composition of classifiers
1 parent 83a50ea commit feb20a5

File tree

1 file changed

+97
-98
lines changed

1 file changed

+97
-98
lines changed

pyretic/core/classifier.py

Lines changed: 97 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -176,114 +176,113 @@ def _cross(r1,r2):
176176

177177
def __rshift__(c1, c2):
178178
from pyretic.core.language import match, modify, drop, identity, Controller, CountBucket, DerivedPolicy
179-
def _sequence_action_classifier(act, c2):
180-
# given a test b and an action p, return a test
181-
# b' such that p >> b == b' >> p.
182-
def _commute_test(act, pkts):
183-
while isinstance(act, DerivedPolicy):
184-
act = act.policy
185-
if act == identity:
186-
return pkts
187-
elif act == drop:
188-
return drop
189-
elif act == Controller or isinstance(act, CountBucket):
179+
# given a test b and an action p, return a test
180+
# b' such that p >> b == b' >> p.
181+
def _commute_test(act, pkts):
182+
while isinstance(act, DerivedPolicy):
183+
act = act.policy
184+
if act == identity:
185+
return pkts
186+
elif act == drop:
187+
return drop
188+
elif act == Controller or isinstance(act, CountBucket):
189+
return identity
190+
elif isinstance(act, modify):
191+
new_match_dict = {}
192+
if pkts == identity:
190193
return identity
191-
elif isinstance(act, modify):
192-
new_match_dict = {}
193-
if pkts == identity:
194-
return identity
195-
elif pkts == drop:
194+
elif pkts == drop:
195+
return drop
196+
for f, v in pkts.map.iteritems():
197+
if f in act.map and act.map[f] == v:
198+
continue
199+
elif f in act.map and act.map[f] != v:
196200
return drop
197-
for f, v in pkts.map.iteritems():
198-
if f in act.map and act.map[f] == v:
199-
continue
200-
elif f in act.map and act.map[f] != v:
201-
return drop
202-
else:
203-
new_match_dict[f] = v
204-
if len(new_match_dict) == 0:
205-
return identity
206-
return match(**new_match_dict)
207-
else:
208-
# TODO (cole) use compile error.
209-
# TODO (cole) what actions are allowable?
210-
raise TypeError
211-
212-
# sequentially compose actions. a1 must be a
213-
# single action. Returns a list of actions.
214-
def _sequence_actions(a1, as2):
215-
while isinstance(a1, DerivedPolicy):
216-
a1 = a1.policy
217-
# TODO: be uniform about returning copied or modified objects.
218-
new_actions = []
219-
if a1 == drop:
220-
return [drop]
221-
elif a1 == identity:
222-
return as2
223-
elif a1 == Controller or isinstance(a1, CountBucket):
224-
return [a1]
225-
elif isinstance(a1, modify):
226-
for a2 in as2:
227-
while isinstance(a2, DerivedPolicy):
228-
a2 = a2.policy
201+
else:
202+
new_match_dict[f] = v
203+
if len(new_match_dict) == 0:
204+
return identity
205+
return match(**new_match_dict)
206+
else:
207+
# TODO (cole) use compile error.
208+
# TODO (cole) what actions are allowable?
209+
raise TypeError
210+
211+
# sequentially compose actions. a1 must be a
212+
# single action. Returns a list of actions.
213+
def _sequence_actions(a1, as2):
214+
while isinstance(a1, DerivedPolicy):
215+
a1 = a1.policy
216+
# TODO: be uniform about returning copied or modified objects.
217+
new_actions = []
218+
if a1 == drop:
219+
return [drop]
220+
elif a1 == Controller or isinstance(a1, CountBucket):
221+
return [a1]
222+
elif a1 == identity:
223+
return as2
224+
elif isinstance(a1, modify):
225+
for a2 in as2:
226+
while isinstance(a2, DerivedPolicy):
227+
a2 = a2.policy
228+
if a2 == drop:
229+
new_actions.append(drop)
230+
elif a2 == Controller or isinstance(a2, CountBucket):
231+
new_actions.append(a2)
232+
elif a2 == identity:
233+
new_actions.append(a1)
234+
elif isinstance(a2, modify):
229235
new_a1 = modify(**a1.map.copy())
230-
if a2 == drop:
231-
new_actions.append(drop)
232-
elif a2 == Controller or isinstance(a2, CountBucket):
233-
new_actions.append(a2)
234-
elif a2 == identity:
235-
new_actions.append(new_a1)
236-
elif isinstance(a2, modify):
237-
new_a1.map.update(a2.map)
238-
new_actions.append(new_a1)
239-
else:
240-
raise TypeError
241-
return new_actions
242-
else:
243-
raise TypeError
244-
# END _commute_test and _sequence_actions
245-
246-
c3 = Classifier()
247-
for r2 in c2.rules:
248-
pkts = _commute_test(act, r2.match)
249-
if pkts == identity:
250-
acts = _sequence_actions(act, r2.actions)
251-
c3.append(Rule(identity, acts))
252-
break
253-
elif pkts == drop:
254-
continue
255-
else:
256-
acts = _sequence_actions(act, r2.actions)
257-
c3.append(Rule(pkts, acts))
258-
if len(c3) == 0:
259-
c3.append(Rule(identity, [drop]))
260-
return c3
236+
new_a1.map.update(a2.map)
237+
new_actions.append(new_a1)
238+
else:
239+
raise TypeError
240+
return new_actions
241+
else:
242+
raise TypeError
243+
244+
# generates a (potentially) non-total classifier
245+
# containing a single rule, or None
246+
def _cross_act(r1,act,r2):
247+
m = r1.match.intersect(_commute_test(act, r2.match))
248+
actions = _sequence_actions(act,r2.actions)
249+
if actions == [drop]:
250+
return Classifier([Rule(r1.match,actions)])
251+
elif m == drop:
252+
return None
253+
else:
254+
return Classifier([Rule(m,actions)])
261255

262-
# END _sequence_action_classifier
256+
# returns a potentially non-total classifier
257+
# suitable for concatenating w/ other potentially non-total classifiers
258+
def _cross(r1,r2):
259+
c = None
260+
for act in r1.actions:
261+
cross = _cross_act(r1,act,r2)
262+
if c is None:
263+
c = cross
264+
elif not cross is None:
265+
# parallel compose to get c_tmp
266+
c_tmp = c + cross
267+
# but since both c and cross were potentially non-total
268+
# we need to append both c and cross to c_tmp
269+
c_tmp.append(c)
270+
c_tmp.append(cross)
271+
# set c to c_tmp and optimize
272+
c = c_tmp
273+
c = c.optimize()
274+
return c
263275

264276
# core __rshift__ logic begins here.
277+
265278
# start with an empty set of rules for the output classifier
266279
# then for each rule in the first classifier (self)
267280
c3 = Classifier()
268281
for r1 in c1.rules:
269-
# sequence the actions in second classifier c2 w/ respect to r1
270-
c2_seqd = Classifier([Rule(identity, [drop])])
271-
for act in r1.actions:
272-
c2_seqd = c2_seqd + _sequence_action_classifier(act, c2)
273-
274-
# for each rule in the sequenced c2,
275-
# intersect the rule's match with r1's match
276-
for r2 in c2_seqd.rules:
277-
r2.match = r2.match.intersect(r1.match)
278-
279-
# filter out rules that cannot match any packet
280-
c_tmp = Classifier(r2 for r2 in c2_seqd.rules
281-
if r2.match != drop)
282-
c_tmp = c_tmp.optimize()
283-
284-
# append the optimized rules
285-
c3.append(c_tmp)
286-
282+
for r2 in c2.rules:
283+
c_tmp = _cross(r1,r2)
284+
if not c_tmp is None:
285+
c3.append(c_tmp)
287286
# when all rules in c1 and c2 have been crossed
288287
# optimize c3
289288
c3 = c3.optimize()

0 commit comments

Comments
 (0)