Skip to content

Commit ce7f1f1

Browse files
authored
Merge pull request #1127 from vloncar/attrs_desc
Introduce optional description to layer attributes
2 parents 2fc8941 + cf91c3b commit ce7f1f1

File tree

10 files changed

+308
-44
lines changed

10 files changed

+308
-44
lines changed

docs/attr_doc_gen.py

Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
import numbers
2+
3+
import hls4ml.backends as backends
4+
import hls4ml.model.attributes as attributes
5+
import hls4ml.model.layers as layers
6+
7+
8+
class AttrList:
9+
def __init__(self, cls_name, cls_attrs) -> None:
10+
self.cls_name = cls_name
11+
self.config_attrs = [attr for attr in cls_attrs if attr.configurable is True]
12+
self.type_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'TypeAttribute']
13+
self.weight_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'WeightAttribute']
14+
self.base_attrs = [attr for attr in cls_attrs if attr not in self.config_attrs + self.type_attrs + self.weight_attrs]
15+
self.backend_attrs = {}
16+
self.reverse_backend_attrs = [] # Will hold (attr, backend_name) pairs, used temporarily
17+
self.unique_backend_attrs = []
18+
19+
def add_backend_attrs(self, backend_name, backend_attrs):
20+
self.backend_attrs[backend_name] = backend_attrs
21+
22+
for attr in backend_attrs:
23+
self.reverse_backend_attrs.append((attr, backend_name))
24+
25+
def sift_backend_attrs(self):
26+
grouped_dict = {}
27+
for attr, backend_name in self.reverse_backend_attrs:
28+
if attr not in grouped_dict:
29+
grouped_dict[attr] = []
30+
grouped_dict[attr].append(backend_name)
31+
32+
for attr, backend_names in grouped_dict.items():
33+
attr.available_in = backend_names
34+
self.unique_backend_attrs.append(attr)
35+
36+
@property
37+
def only_configurable(self):
38+
all_attrs = self.config_attrs + self.type_attrs + self.unique_backend_attrs
39+
return [attr for attr in all_attrs if attr.configurable is True]
40+
41+
42+
def convert_to_attr_list():
43+
all_backends = backends.get_available_backends()
44+
# Removing duplicates but preserving order
45+
all_layers = list(dict.fromkeys(layers.layer_map.values()))
46+
all_layers_attrs = []
47+
48+
for layer_cls in all_layers:
49+
base_attrs = layer_cls.expected_attributes
50+
51+
attr_list = AttrList(layer_cls.__name__, base_attrs)
52+
53+
for backend_name in all_backends:
54+
backend = backends.get_backend(backend_name)
55+
56+
backend_cls = backend.create_layer_class(layer_cls)
57+
backend_attrs = backend_cls.expected_attributes
58+
59+
diff_atts = [
60+
attr for attr in backend_attrs if attr not in base_attrs
61+
] # Sets are faster, but don't preserve order
62+
if len(diff_atts) > 0:
63+
attr_list.add_backend_attrs(backend.name, diff_atts)
64+
65+
all_layers_attrs.append(attr_list)
66+
67+
for attr_list in all_layers_attrs:
68+
attr_list.sift_backend_attrs()
69+
70+
return all_layers_attrs
71+
72+
73+
def print_attrs(attrs, file):
74+
for attr in attrs:
75+
if attr.value_type == numbers.Integral:
76+
vtype = 'int'
77+
elif attr.__class__ == attributes.ChoiceAttribute:
78+
choices = ','.join([str(c) for c in attr.choices])
79+
vtype = f'list [{choices}]'
80+
else:
81+
vtype = attr.value_type.__name__ if hasattr(attr.value_type, '__name__') else str(attr.value_type)
82+
83+
if attr.default is None:
84+
file.write('* ' + attr.name + ': ' + vtype + '\n\n')
85+
else:
86+
file.write('* ' + attr.name + ': ' + vtype + ' (Default: ' + str(attr.default) + ')\n\n')
87+
88+
if attr.description is not None:
89+
file.write(' * ' + attr.description + '\n\n')
90+
91+
if hasattr(attr, 'available_in'):
92+
file.write(' * Available in: ' + ', '.join(attr.available_in) + '\n\n')
93+
94+
95+
def write_all_attributes(all_layers_attrs):
96+
with open('attributes.rst', mode='w') as file:
97+
file.write('================\n')
98+
file.write('Layer attributes\n')
99+
file.write('================\n\n\n')
100+
101+
for attr_list in all_layers_attrs:
102+
file.write(attr_list.cls_name + '\n')
103+
file.write('=' * len(attr_list.cls_name) + '\n')
104+
105+
if len(attr_list.base_attrs) > 0:
106+
file.write('Base attributes\n')
107+
file.write('---------------\n')
108+
print_attrs(attr_list.type_attrs, file)
109+
110+
if len(attr_list.type_attrs) > 0:
111+
file.write('Type attributes\n')
112+
file.write('---------------\n')
113+
print_attrs(attr_list.base_attrs, file)
114+
115+
if len(attr_list.weight_attrs) > 0:
116+
file.write('Weight attributes\n')
117+
file.write('-----------------\n')
118+
print_attrs(attr_list.weight_attrs, file)
119+
120+
if len(attr_list.config_attrs) > 0:
121+
file.write('Configurable attributes\n')
122+
file.write('-----------------------\n')
123+
print_attrs(attr_list.config_attrs, file)
124+
125+
if len(attr_list.backend_attrs) > 0:
126+
file.write('Backend-specific attributes\n')
127+
file.write('---------------------------\n')
128+
print_attrs(attr_list.unique_backend_attrs, file)
129+
130+
131+
def write_only_configurable(all_layers_attrs):
132+
with open('attributes.rst', mode='w') as file:
133+
file.write('================\n')
134+
file.write('Layer attributes\n')
135+
file.write('================\n\n\n')
136+
137+
for attr_list in all_layers_attrs:
138+
file.write(attr_list.cls_name + '\n')
139+
file.write('=' * len(attr_list.cls_name) + '\n')
140+
141+
config_attrs = attr_list.only_configurable
142+
if len(config_attrs) > 0:
143+
print_attrs(config_attrs, file)
144+
145+
146+
if __name__ == '__main__':
147+
all_layers_attrs = convert_to_attr_list()
148+
write_all_attributes(all_layers_attrs)
149+
# write_only_configurable(all_layers_attrs)

hls4ml/backends/catapult/catapult_backend.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
3333
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
3434
from hls4ml.report import parse_catapult_report
35+
from hls4ml.utils import attribute_descriptions as descriptions
3536
from hls4ml.utils.fixed_point_utils import ceil_log2
3637

3738

@@ -51,10 +52,12 @@ def _register_layer_attributes(self):
5152

5253
for layer in rnn_layers:
5354
attrs = self.attribute_map.get(layer, [])
54-
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
55-
attrs.append(ConfigurableAttribute('static', value_type=bool, default=True))
56-
attrs.append(ConfigurableAttribute('table_size', default=1024))
57-
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
55+
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
56+
attrs.append(
57+
ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static)
58+
)
59+
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
60+
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
5861
self.attribute_map[layer] = attrs
5962

6063
# Add ParallelizationFactor to Conv1D/2D
@@ -65,16 +68,22 @@ def _register_layer_attributes(self):
6568

6669
for layer in pf_layers:
6770
attrs = self.attribute_map.get(layer, [])
68-
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
71+
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
6972
self.attribute_map[layer] = attrs
7073

7174
# Add ConvImplementation to Convolution+Pooling layers
7275
cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D]
7376

7477
for layer in cnn_layers:
7578
attrs = self.attribute_map.get(layer, [])
76-
# attrs.append(ConfigurableAttribute('conv_implementation', value_type=str, default='LineBuffer'))
77-
attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer'))
79+
attrs.append(
80+
ChoiceAttribute(
81+
'conv_implementation',
82+
choices=['LineBuffer', 'Encoded'],
83+
default='LineBuffer',
84+
description=descriptions.conv_implementation,
85+
)
86+
)
7887
self.attribute_map[layer] = attrs
7988

8089
sep_conv_layers = [SeparableConv1D, SeparableConv2D]

hls4ml/backends/fpga/fpga_backend.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
UnspecifiedPrecisionType,
4646
XnorPrecisionType,
4747
)
48+
from hls4ml.utils import attribute_descriptions as descriptions
4849
from hls4ml.writer import get_writer
4950

5051

@@ -74,7 +75,7 @@ def __init__(self, name):
7475

7576
for layer in accum_layers:
7677
attrs = self.attribute_map.get(layer, [])
77-
attrs.append(TypeAttribute('accum'))
78+
attrs.append(TypeAttribute('accum', description=descriptions.accum_type))
7879
self.attribute_map[layer] = attrs
7980

8081
rf_layers = accum_layers + [
@@ -90,7 +91,7 @@ def __init__(self, name):
9091

9192
for layer in rf_layers:
9293
attrs = self.attribute_map.get(layer, [])
93-
attrs.append(ConfigurableAttribute('reuse_factor', default=1))
94+
attrs.append(ConfigurableAttribute('reuse_factor', default=1, description=descriptions.reuse_factor))
9495
self.attribute_map[layer] = attrs
9596

9697
# seperable is kind of special because it is effectively two layers that will be split
@@ -104,23 +105,34 @@ def __init__(self, name):
104105
self.attribute_map[layer] = attrs
105106

106107
act_attrs = self.attribute_map.get(Activation, [])
107-
act_attrs.append(ConfigurableAttribute('table_size', default=1024))
108-
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
108+
act_attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
109+
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
109110
self.attribute_map[Activation] = act_attrs
110111

111112
softmax_attrs = self.attribute_map.get(Softmax, [])
112-
softmax_attrs.append(ChoiceAttribute('implementation', ['latency', 'stable', 'argmax', 'legacy'], default='stable'))
113-
softmax_attrs.append(ConfigurableAttribute('skip', value_type=bool, default=False))
113+
softmax_attrs.append(
114+
ChoiceAttribute(
115+
'implementation',
116+
['latency', 'stable', 'argmax', 'legacy'],
117+
default='stable',
118+
description=descriptions.softmax_implementation,
119+
)
120+
)
121+
softmax_attrs.append(
122+
ConfigurableAttribute('skip', value_type=bool, default=False, description=descriptions.softmax_skip)
123+
)
114124
softmax_attrs.append(
115125
TypeAttribute(
116126
'exp_table',
117127
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
128+
description=descriptions.table_type,
118129
)
119130
)
120131
softmax_attrs.append(
121132
TypeAttribute(
122133
'inv_table',
123134
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
135+
description=descriptions.table_type,
124136
)
125137
)
126138
self.attribute_map[Softmax] = softmax_attrs

hls4ml/backends/oneapi/oneapi_backend.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from hls4ml.model.layers import GRU, LSTM, Activation, Conv1D, Conv2D, Dense, Embedding, Layer, SimpleRNN, Softmax
1111
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
1212
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
13+
from hls4ml.utils import attribute_descriptions as descriptions
1314

1415
# from hls4ml.report import parse_oneapi_report
1516

@@ -30,9 +31,9 @@ def _register_layer_attributes(self):
3031

3132
for layer in rnn_layers:
3233
attrs = self.attribute_map.get(layer, [])
33-
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
34-
attrs.append(ConfigurableAttribute('table_size', default=1024))
35-
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
34+
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
35+
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
36+
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
3637
self.attribute_map[layer] = attrs
3738

3839
# Add ParallelizationFactor to Conv1D/2D
@@ -43,7 +44,7 @@ def _register_layer_attributes(self):
4344

4445
for layer in pf_layers:
4546
attrs = self.attribute_map.get(layer, [])
46-
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
47+
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
4748
self.attribute_map[layer] = attrs
4849

4950
def _register_flows(self):

hls4ml/backends/quartus/quartus_backend.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
1212
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
1313
from hls4ml.report import parse_quartus_report
14+
from hls4ml.utils import attribute_descriptions as descriptions
1415

1516

1617
@contextmanager
@@ -39,9 +40,9 @@ def _register_layer_attributes(self):
3940

4041
for layer in rnn_layers:
4142
attrs = self.attribute_map.get(layer, [])
42-
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
43-
attrs.append(ConfigurableAttribute('table_size', default=1024))
44-
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
43+
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
44+
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
45+
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
4546
self.attribute_map[layer] = attrs
4647

4748
def _register_flows(self):

hls4ml/backends/vivado/vivado_backend.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
3232
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
3333
from hls4ml.report import parse_vivado_report
34+
from hls4ml.utils import attribute_descriptions as descriptions
3435

3536

3637
class VivadoBackend(FPGABackend):
@@ -49,10 +50,12 @@ def _register_layer_attributes(self):
4950

5051
for layer in rnn_layers:
5152
attrs = self.attribute_map.get(layer, [])
52-
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
53-
attrs.append(ConfigurableAttribute('static', value_type=bool, default=True))
54-
attrs.append(ConfigurableAttribute('table_size', default=1024))
55-
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
53+
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
54+
attrs.append(
55+
ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static)
56+
)
57+
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
58+
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
5659
self.attribute_map[layer] = attrs
5760

5861
# Add ParallelizationFactor to Conv1D/2D
@@ -63,14 +66,21 @@ def _register_layer_attributes(self):
6366

6467
for layer in pf_layers:
6568
attrs = self.attribute_map.get(layer, [])
66-
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
69+
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
6770
self.attribute_map[layer] = attrs
6871

6972
# Add ConvImplementation to Convolution+Pooling layers
7073
cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D]
7174
for layer in cnn_layers:
7275
attrs = self.attribute_map.get(layer, [])
73-
attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer'))
76+
attrs.append(
77+
ChoiceAttribute(
78+
'conv_implementation',
79+
choices=['LineBuffer', 'Encoded'],
80+
default='LineBuffer',
81+
description=descriptions.conv_implementation,
82+
)
83+
)
7484
self.attribute_map[layer] = attrs
7585

7686
def _register_flows(self):

0 commit comments

Comments
 (0)