Skip to content
This repository was archived by the owner on Jun 27, 2018. It is now read-only.

Commit 9d8402d

Browse files
author
Joshua Reich
committed
Merge branch 'refactor_compilation'
Conflicts: pyretic/core/classifier.py
2 parents 28fa6ab + e703697 commit 9d8402d

File tree

3 files changed

+224
-183
lines changed

3 files changed

+224
-183
lines changed

pyretic/core/classifier.py

Lines changed: 197 additions & 150 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
2+
from collections import deque
3+
import copy
4+
15
###############################################################################
26
# Classifiers
37
# an intermediate representation for proactive compilation.
@@ -9,8 +13,10 @@ class Rule(object):
913
Pyretic actions.
1014
"""
1115

12-
# Matches m should be of the match class. Actions acts should be a list of
13-
# either modify, identity, or drop policies.
16+
# Matches m should be of the match class. Actions acts should be a set of
17+
# modify, identity, and/or Controller/CountBucket/FwdBucket policies.
18+
# Actions is Rule are semantically meant to run in parallel
19+
# unlike OpenFlow rules.
1420
def __init__(self,m,acts):
1521
self.match = m
1622
self.actions = acts
@@ -54,11 +60,13 @@ class Classifier(object):
5460
tables.
5561
"""
5662

57-
def __init__(self, new_rules=[]):
63+
def __init__(self, new_rules=list()):
5864
import types
5965
if isinstance(new_rules, types.GeneratorType):
60-
self.rules = [r for r in new_rules]
66+
self.rules = deque([r for r in new_rules])
6167
elif isinstance(new_rules,list):
68+
self.rules = deque(new_rules)
69+
elif isinstance(new_rules,deque):
6270
self.rules = new_rules
6371
else:
6472
raise TypeError
@@ -81,153 +89,204 @@ def __ne__(self, other):
8189
"""Based on syntactic equality of policies."""
8290
return not (self == other)
8391

84-
def __add__(self,c2):
85-
from pyretic.core.language import drop
86-
c1 = self
87-
if c2 is None:
88-
return None
89-
c = Classifier([])
90-
# TODO (cole): make classifiers iterable
91-
for r1 in c1.rules:
92-
for r2 in c2.rules:
93-
intersection = r1.match.intersect(r2.match)
94-
if intersection != drop:
95-
# TODO (josh) logic for detecting when sets of actions can't be combined
96-
# e.g., [modify(dstip='10.0.0.1'),fwd(1)] + [modify(srcip='10.0.0.2'),fwd(2)]
97-
actions = r1.actions + r2.actions
98-
actions = filter(lambda a: a != drop,actions)
99-
if len(actions) == 0:
100-
actions = [drop]
101-
c.rules.append(Rule(intersection, actions))
102-
return c.optimize()
103-
104-
# Helper function for rshift: given a test b and an action p, return a test
105-
# b' such that p >> b == b' >> p.
106-
def _commute_test(self, act, pkts):
107-
from pyretic.core.language import modify, drop, identity, Controller, CountBucket, DerivedPolicy, match
108-
while isinstance(act, DerivedPolicy):
109-
act = act.policy
110-
if act == identity:
111-
return pkts
112-
elif act == drop:
113-
return drop
114-
elif act == Controller or isinstance(act, CountBucket):
115-
return identity
116-
elif isinstance(act, modify):
117-
new_match_dict = {}
118-
if pkts == identity:
119-
return identity
120-
elif pkts == drop:
121-
return drop
122-
for f, v in pkts.map.iteritems():
123-
if f in act.map and act.map[f] == v:
124-
continue
125-
elif f in act.map and act.map[f] != v:
126-
return drop
127-
else:
128-
new_match_dict[f] = v
129-
if len(new_match_dict) == 0:
130-
return identity
131-
return match(**new_match_dict)
92+
def eval(self, in_pkt):
93+
"""
94+
Evaluate against each rule in the classifier, starting with the
95+
highest priority. Return the set of packets resulting from applying
96+
the actions of the first rule that matches.
97+
"""
98+
for rule in self.rules:
99+
pkts = rule.eval(in_pkt)
100+
if pkts is not None:
101+
return pkts
102+
raise TypeError('Classifier is not total.')
103+
104+
def prepend(self, item):
105+
if isinstance(item, Rule):
106+
self.rules.appendleft(item)
107+
elif isinstance(item, Classifier):
108+
self.rules.extendleft(item.rules)
132109
else:
133-
# TODO (cole) use compile error.
134-
# TODO (cole) what actions are allowable?
135-
raise TypeError
110+
raise TypeError
136111

137-
# Helper function for rshift: sequentially compose actions. a1 must be a
138-
# single action. Returns a list of actions.
139-
def _sequence_actions(self, a1, as2):
140-
from pyretic.core.language import modify, drop, identity, Controller, CountBucket, DerivedPolicy
141-
while isinstance(a1, DerivedPolicy):
142-
a1 = a1.policy
143-
# TODO: be uniform about returning copied or modified objects.
144-
new_actions = []
145-
if a1 == drop:
146-
return [drop]
147-
elif a1 == identity:
148-
return as2
149-
elif a1 == Controller or isinstance(a1, CountBucket):
150-
return [a1]
151-
elif isinstance(a1, modify):
152-
for a2 in as2:
153-
while isinstance(a2, DerivedPolicy):
154-
a2 = a2.policy
155-
new_a1 = modify(**a1.map.copy())
156-
if a2 == drop:
157-
new_actions.append(drop)
158-
elif a2 == Controller or isinstance(a2, CountBucket):
159-
new_actions.append(a2)
160-
elif a2 == identity:
161-
new_actions.append(new_a1)
162-
elif isinstance(a2, modify):
163-
new_a1.map.update(a2.map)
164-
new_actions.append(new_a1)
165-
elif isinstance(a2, fwd):
166-
new_a1.map['outport'] = a2.outport
167-
new_actions.append(new_a1)
168-
else:
169-
raise TypeError
170-
return new_actions
112+
def append(self, item):
113+
if isinstance(item, Rule):
114+
self.rules.append(item)
115+
elif isinstance(item, Classifier):
116+
self.rules.extend(item.rules)
171117
else:
172118
raise TypeError
173119

174-
# Returns a classifier.
175-
def _sequence_action_classifier(self, act, c):
120+
def remove_last_rule(self):
121+
self.rules.pop()
122+
123+
def __copy__(self):
124+
copied_rules = map(copy.copy,self.rules)
125+
return Classifier(copied_rules)
126+
127+
128+
### NEGATE ###
129+
def __invert__(self):
130+
from pyretic.core.language import identity
131+
c = copy.copy(self)
132+
for r in c.rules:
133+
if len(r.actions) == 0:
134+
r.actions = {identity}
135+
elif r.actions == {identity}:
136+
r.actions = set()
137+
else:
138+
raise TypeError # TODO MAKE A CompileError TYPE
139+
return c
140+
141+
142+
### PARALLEL COMPOSITION
143+
144+
def __add__(c1, c2):
176145
from pyretic.core.language import drop, identity
177-
# TODO (cole): make classifiers easier to use w.r.t. adding/removing
178-
# rules.
179-
if len(c.rules) == 0:
180-
return Classifier([Rule(identity, [drop])])
181-
new_rules = []
182-
for rule in c.rules:
183-
pkts = self._commute_test(act, rule.match)
184-
if pkts == identity:
185-
acts = self._sequence_actions(act, rule.actions)
186-
new_rules += [Rule(identity, acts)]
187-
break
188-
elif pkts == drop:
189-
continue
146+
def _cross(r1,r2):
147+
intersection = r1.match.intersect(r2.match)
148+
if intersection != drop:
149+
actions = r1.actions | r2.actions
150+
return Rule(intersection, actions)
190151
else:
191-
acts = self._sequence_actions(act, rule.actions)
192-
new_rules += [Rule(pkts, acts)]
193-
if new_rules == []:
194-
return Classifier([Rule(identity, [drop])])
152+
return None
153+
154+
# start with an empty set of rules for the output classifier
155+
c3 = Classifier()
156+
assert(not (c1 is None and c2 is None))
157+
# then cross all pairs of rules in the first and second classifiers
158+
for r1 in c1.rules:
159+
for r2 in c2.rules:
160+
crossed_r = _cross(r1,r2)
161+
if crossed_r:
162+
c3.append(crossed_r)
163+
# if the classifier is empty, add a drop-all rule
164+
if len(c3) == 0:
165+
c3.append(Rule(identity,set()))
166+
# and optimize the classifier
195167
else:
196-
return Classifier(new_rules)
197-
198-
def _sequence_actions_classifier(self, acts, c):
199-
from pyretic.core.language import drop, identity
200-
empty_classifier = Classifier([Rule(identity, [drop])])
201-
if acts == []:
202-
# Treat the empty list of actions as drop.
203-
return empty_classifier
204-
acc = empty_classifier
205-
for act in acts:
206-
acc = acc + self._sequence_action_classifier(act, c)
207-
return acc
208-
209-
def _sequence_rule_classifier(self, r, c):
210-
from pyretic.core.language import drop
211-
c2 = self._sequence_actions_classifier(r.actions, c)
212-
for rule in c2.rules:
213-
rule.match = rule.match.intersect(r.match)
214-
c2.rules = [r2 for r2 in c2.rules if r2.match != drop]
215-
return c2.optimize()
216-
217-
def __rshift__(self, c2):
218-
new_rules = []
219-
for rule in self.rules:
220-
c3 = self._sequence_rule_classifier(rule, c2)
221-
new_rules = new_rules + c3.rules
222-
rv = Classifier(new_rules)
223-
return rv.optimize()
168+
c3 = c3.optimize()
169+
return c3
170+
171+
172+
### SEQUENTIAL COMPOSITION
173+
174+
def __rshift__(c1, c2):
175+
from pyretic.core.language import match, modify, drop, identity, Controller, CountBucket, DerivedPolicy
176+
# given a test b and an action p, return a test
177+
# b' such that p >> b == b' >> p.
178+
def _commute_test(act, pkts):
179+
while isinstance(act, DerivedPolicy):
180+
act = act.policy
181+
if act == identity:
182+
return pkts
183+
elif act == Controller or isinstance(act, CountBucket):
184+
return identity
185+
elif isinstance(act, modify):
186+
new_match_dict = {}
187+
if pkts == identity:
188+
return identity
189+
elif pkts == drop:
190+
return drop
191+
for f, v in pkts.map.iteritems():
192+
if f in act.map and act.map[f] == v:
193+
continue
194+
elif f in act.map and act.map[f] != v:
195+
return drop
196+
else:
197+
new_match_dict[f] = v
198+
if len(new_match_dict) == 0:
199+
return identity
200+
return match(**new_match_dict)
201+
else:
202+
raise TypeError
203+
204+
# sequentially compose actions. a1 must be a
205+
# single action. Returns a list of actions.
206+
def _sequence_actions(a1, as2):
207+
while isinstance(a1, DerivedPolicy):
208+
a1 = a1.policy
209+
# TODO: be uniform about returning copied or modified objects.
210+
if a1 == Controller or isinstance(a1, CountBucket):
211+
return {a1}
212+
elif a1 == identity:
213+
return copy.copy(as2)
214+
elif isinstance(a1, modify):
215+
new_actions = set()
216+
for a2 in as2:
217+
while isinstance(a2, DerivedPolicy):
218+
a2 = a2.policy
219+
if a2 == Controller or isinstance(a2, CountBucket):
220+
new_actions.add(a2)
221+
elif a2 == identity:
222+
new_actions.add(a1)
223+
elif isinstance(a2, modify):
224+
new_a1 = modify(**a1.map.copy())
225+
new_a1.map.update(a2.map)
226+
new_actions.add(new_a1)
227+
else:
228+
raise TypeError
229+
return new_actions
230+
else:
231+
raise TypeError
232+
233+
# generates a (potentially) non-total classifier
234+
# containing a single rule, or None
235+
def _cross_act(r1,act,r2):
236+
m = r1.match.intersect(_commute_test(act, r2.match))
237+
actions = _sequence_actions(act,r2.actions)
238+
if m == drop:
239+
return None
240+
else:
241+
return Classifier([Rule(m,actions)])
242+
243+
# returns a potentially non-total classifier
244+
# suitable for concatenating w/ other potentially non-total classifiers
245+
def _cross(r1,r2):
246+
c = None
247+
for act in r1.actions:
248+
cross = _cross_act(r1,act,r2)
249+
if c is None:
250+
c = cross
251+
elif not cross is None:
252+
# parallel compose to get c_tmp
253+
c_tmp = c + cross
254+
# but since both c and cross were potentially non-total
255+
# we need to append both c and cross to c_tmp
256+
c_tmp.append(c)
257+
c_tmp.append(cross)
258+
# set c to c_tmp and optimize
259+
c = c_tmp
260+
c = c.optimize()
261+
return c
262+
263+
# core __rshift__ logic begins here.
264+
265+
# start with an empty set of rules for the output classifier
266+
# then for each rule in the first classifier (self)
267+
c3 = Classifier()
268+
for r1 in c1.rules:
269+
if len(r1.actions) == 0:
270+
c3.append(r1)
271+
else:
272+
for r2 in c2.rules:
273+
c_tmp = _cross(r1,r2)
274+
if not c_tmp is None:
275+
c3.append(c_tmp)
276+
# when all rules in c1 and c2 have been crossed
277+
# optimize c3
278+
c3 = c3.optimize()
279+
return c3
280+
281+
282+
### SHADOW OPTIMIZATION
224283

225284
def optimize(self):
226285
return self.remove_shadowed_cover_single()
227286

228287
def remove_shadowed_exact_single(self):
229288
# Eliminate every rule exactly matched by some higher priority rule
230-
opt_c = Classifier([])
289+
opt_c = Classifier()
231290
for r in self.rules:
232291
if not reduce(lambda acc, new_r: acc or
233292
new_r.match == r.match,
@@ -238,23 +297,11 @@ def remove_shadowed_exact_single(self):
238297

239298
def remove_shadowed_cover_single(self):
240299
# Eliminate every rule completely covered by some higher priority rule
241-
opt_c = Classifier([])
300+
opt_c = Classifier()
242301
for r in self.rules:
243302
if not reduce(lambda acc, new_r: acc or
244303
new_r.match.covers(r.match),
245304
opt_c.rules,
246305
False):
247306
opt_c.rules.append(r)
248307
return opt_c
249-
250-
def eval(self, in_pkt):
251-
"""
252-
Evaluate against each rule in the classifier, starting with the
253-
highest priority. Return the set of packets resulting from applying
254-
the actions of the first rule that matches.
255-
"""
256-
for rule in self.rules:
257-
pkts = rule.eval(in_pkt)
258-
if pkts is not None:
259-
return pkts
260-
raise TypeError('Classifier is not total.')

0 commit comments

Comments
 (0)