Skip to content
This repository was archived by the owner on Jun 27, 2018. It is now read-only.

Commit c190ae9

Browse files
author
Joshua Reich
committed
refactoring core
1 parent eae4c0d commit c190ae9

File tree

4 files changed

+380
-373
lines changed

4 files changed

+380
-373
lines changed

pyretic/core/classifier.py

Lines changed: 263 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,263 @@
1+
###############################################################################
2+
# Classifiers
3+
# an intermediate representation for proactive compilation.
4+
5+
class Rule(object):
6+
"""
7+
A rule contains a filter and the parallel composition of zero or more
8+
Pyretic actions.
9+
"""
10+
11+
# Matches m should be of the match class. Actions acts should be a list of
12+
# either modify, identity, or drop policies.
13+
def __init__(self,m,acts):
14+
self.match = m
15+
self.actions = acts
16+
17+
def __str__(self):
18+
return str(self.match) + '\n -> ' + str(self.actions)
19+
20+
def __repr__(self):
21+
return str(self)
22+
23+
def __eq__(self, other):
24+
"""Based on syntactic equality of policies."""
25+
return ( id(self) == id(other)
26+
or ( self.match == other.match
27+
and self.actions == other.actions ) )
28+
29+
def __ne__(self, other):
30+
"""Based on syntactic equality of policies."""
31+
return not (self == other)
32+
33+
def eval(self, in_pkt):
34+
"""
35+
If this rule matches the packet, then return the union of the sets
36+
of packets produced by the actions. Otherwise, return None.
37+
"""
38+
filtered_pkt = self.match.eval(in_pkt)
39+
if len(filtered_pkt) == 0:
40+
return None
41+
rv = set()
42+
for pkt in filtered_pkt:
43+
for act in self.actions:
44+
rv |= act.eval(pkt)
45+
return rv
46+
47+
48+
class Classifier(object):
49+
"""
50+
A classifier contains a list of rules, where the order of the list implies
51+
the relative priorities of the rules. Semantically, classifiers are
52+
functions from packets to sets of packets, similar to OpenFlow flow
53+
tables.
54+
"""
55+
56+
def __init__(self, new_rules=[]):
57+
import types
58+
if isinstance(new_rules, types.GeneratorType):
59+
self.rules = [r for r in new_rules]
60+
elif isinstance(new_rules,list):
61+
self.rules = new_rules
62+
else:
63+
raise TypeError
64+
65+
def __len__(self):
66+
return len(self.rules)
67+
68+
def __str__(self):
69+
return '\n '.join(map(str,self.rules))
70+
71+
def __repr__(self):
72+
return str(self)
73+
74+
def __eq__(self, other):
75+
"""Based on syntactic equality of policies."""
76+
return ( id(self) == id(other)
77+
or ( self.rules == other.rules ) )
78+
79+
def __ne__(self, other):
80+
"""Based on syntactic equality of policies."""
81+
return not (self == other)
82+
83+
def __add__(self,c2):
84+
from pyretic.core.language import drop
85+
c1 = self
86+
if c2 is None:
87+
return None
88+
c = Classifier([])
89+
# TODO (cole): make classifiers iterable
90+
for r1 in c1.rules:
91+
for r2 in c2.rules:
92+
intersection = r1.match.intersect(r2.match)
93+
if intersection != drop:
94+
# TODO (josh) logic for detecting when sets of actions can't be combined
95+
# e.g., [modify(dstip='10.0.0.1'),fwd(1)] + [modify(srcip='10.0.0.2'),fwd(2)]
96+
actions = r1.actions + r2.actions
97+
actions = filter(lambda a: a != drop,actions)
98+
if len(actions) == 0:
99+
actions = [drop]
100+
c.rules.append(Rule(intersection, actions))
101+
for r1 in c1.rules:
102+
c.rules.append(r1)
103+
for r2 in c2.rules:
104+
c.rules.append(r2)
105+
return c.optimize()
106+
107+
# Helper function for rshift: given a test b and an action p, return a test
108+
# b' such that p >> b == b' >> p.
109+
def _commute_test(self, act, pkts):
110+
from pyretic.core.language import drop, identity, Controller, CountBucket, DerivedPolicy
111+
while isinstance(act, DerivedPolicy):
112+
act = act.policy
113+
if act == identity:
114+
return pkts
115+
elif act == drop:
116+
return drop
117+
elif act == Controller or isinstance(act, CountBucket):
118+
return identity
119+
elif isinstance(act, modify):
120+
new_match_dict = {}
121+
if pkts == identity:
122+
return identity
123+
elif pkts == drop:
124+
return drop
125+
for f, v in pkts.map.iteritems():
126+
if f in act.map and act.map[f] == v:
127+
continue
128+
elif f in act.map and act.map[f] != v:
129+
return drop
130+
else:
131+
new_match_dict[f] = v
132+
if len(new_match_dict) == 0:
133+
return identity
134+
return match(**new_match_dict)
135+
else:
136+
# TODO (cole) use compile error.
137+
# TODO (cole) what actions are allowable?
138+
raise TypeError
139+
140+
# Helper function for rshift: sequentially compose actions. a1 must be a
141+
# single action. Returns a list of actions.
142+
def _sequence_actions(self, a1, as2):
143+
from pyretic.core.language import drop, identity, Controller, CountBucket, DerivedPolicy
144+
while isinstance(a1, DerivedPolicy):
145+
a1 = a1.policy
146+
# TODO: be uniform about returning copied or modified objects.
147+
new_actions = []
148+
if a1 == drop:
149+
return [drop]
150+
elif a1 == identity:
151+
return as2
152+
elif a1 == Controller or isinstance(a1, CountBucket):
153+
return [a1]
154+
elif isinstance(a1, modify):
155+
for a2 in as2:
156+
while isinstance(a2, DerivedPolicy):
157+
a2 = a2.policy
158+
new_a1 = modify(**a1.map.copy())
159+
if a2 == drop:
160+
new_actions.append(drop)
161+
elif a2 == Controller or isinstance(a2, CountBucket):
162+
new_actions.append(a2)
163+
elif a2 == identity:
164+
new_actions.append(new_a1)
165+
elif isinstance(a2, modify):
166+
new_a1.map.update(a2.map)
167+
new_actions.append(new_a1)
168+
elif isinstance(a2, fwd):
169+
new_a1.map['outport'] = a2.outport
170+
new_actions.append(new_a1)
171+
else:
172+
raise TypeError
173+
return new_actions
174+
else:
175+
raise TypeError
176+
177+
# Returns a classifier.
178+
def _sequence_action_classifier(self, act, c):
179+
from pyretic.core.language import drop, identity
180+
# TODO (cole): make classifiers easier to use w.r.t. adding/removing
181+
# rules.
182+
if len(c.rules) == 0:
183+
return Classifier([Rule(identity, [drop])])
184+
new_rules = []
185+
for rule in c.rules:
186+
pkts = self._commute_test(act, rule.match)
187+
if pkts == identity:
188+
acts = self._sequence_actions(act, rule.actions)
189+
new_rules += [Rule(identity, acts)]
190+
break
191+
elif pkts == drop:
192+
continue
193+
else:
194+
acts = self._sequence_actions(act, rule.actions)
195+
new_rules += [Rule(pkts, acts)]
196+
if new_rules == []:
197+
return Classifier([Rule(identity, [drop])])
198+
else:
199+
return Classifier(new_rules)
200+
201+
def _sequence_actions_classifier(self, acts, c):
202+
from pyretic.core.language import drop, identity
203+
empty_classifier = Classifier([Rule(identity, [drop])])
204+
if acts == []:
205+
# Treat the empty list of actions as drop.
206+
return empty_classifier
207+
acc = empty_classifier
208+
for act in acts:
209+
acc = acc + self._sequence_action_classifier(act, c)
210+
return acc
211+
212+
def _sequence_rule_classifier(self, r, c):
213+
from pyretic.core.language import drop
214+
c2 = self._sequence_actions_classifier(r.actions, c)
215+
for rule in c2.rules:
216+
rule.match = rule.match.intersect(r.match)
217+
c2.rules = [r2 for r2 in c2.rules if r2.match != drop]
218+
return c2.optimize()
219+
220+
def __rshift__(self, c2):
221+
new_rules = []
222+
for rule in self.rules:
223+
c3 = self._sequence_rule_classifier(rule, c2)
224+
new_rules = new_rules + c3.rules
225+
rv = Classifier(new_rules)
226+
return rv.optimize()
227+
228+
def optimize(self):
229+
return self.remove_shadowed_cover_single()
230+
231+
def remove_shadowed_exact_single(self):
232+
# Eliminate every rule exactly matched by some higher priority rule
233+
opt_c = Classifier([])
234+
for r in self.rules:
235+
if not reduce(lambda acc, new_r: acc or
236+
new_r.match == r.match,
237+
opt_c.rules,
238+
False):
239+
opt_c.rules.append(r)
240+
return opt_c
241+
242+
def remove_shadowed_cover_single(self):
243+
# Eliminate every rule completely covered by some higher priority rule
244+
opt_c = Classifier([])
245+
for r in self.rules:
246+
if not reduce(lambda acc, new_r: acc or
247+
new_r.match.covers(r.match),
248+
opt_c.rules,
249+
False):
250+
opt_c.rules.append(r)
251+
return opt_c
252+
253+
def eval(self, in_pkt):
254+
"""
255+
Evaluate against each rule in the classifier, starting with the
256+
highest priority. Return the set of packets resulting from applying
257+
the actions of the first rule that matches.
258+
"""
259+
for rule in self.rules:
260+
pkts = rule.eval(in_pkt)
261+
if pkts is not None:
262+
return pkts
263+
raise TypeError('Classifier is not total.')

0 commit comments

Comments
 (0)