Skip to content

Commit cc9f2d9

Browse files
authored
Merge branch 'sonic-net:master' into dev-reset-local-users-password
2 parents 0a3c4a3 + 099d40c commit cc9f2d9

File tree

184 files changed

+1601006
-2656
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

184 files changed

+1601006
-2656
lines changed
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
steps:
2+
- checkout: self
3+
clean: true
4+
displayName: 'checkout sonic-utilities repo'
5+
6+
- script: |
7+
set -x
8+
sudo pip install pre-commit
9+
pre-commit install-hooks
10+
displayName: 'Prepare pre-commit check'
11+
12+
- script: |
13+
# Run pre-commit check and capture the output
14+
out=`pre-commit run --color never --from-ref HEAD^ --to-ref HEAD 2>&1`
15+
RC=$?
16+
if [[ $RC -ne 0 ]]; then
17+
echo -e "The [pre-commit](http://pre-commit.com/) check detected issues in the files touched by this pull request.\n\
18+
The pre-commit check is a mandatory check, please fix detected issues.\n\
19+
\n\
20+
To run the pre-commit checks locally, you can follow below steps:\n\
21+
1. Ensure that default python is python3.\n\
22+
2. Ensure that the 'pre-commit' package is installed:\n\
23+
sudo pip install pre-commit\n\
24+
3. Go to repository root folder\n\
25+
4. Install the pre-commit hooks:\n\
26+
pre-commit install\n\
27+
5. Use pre-commit to check staged file:\n\
28+
pre-commit\n\
29+
6. Alternatively, you can check committed files using:\n\
30+
pre-commit run --from-ref <commit_id> --to-ref <commit_id>\n"
31+
fi
32+
echo "Pre-commit check results:"
33+
echo "$out"
34+
exit $RC
35+
displayName: 'Run pre-commit check'

.github/workflows/semgrep.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@ jobs:
1818
- uses: actions/checkout@v3
1919
- run: semgrep ci
2020
env:
21-
SEMGREP_RULES: p/default
21+
SEMGREP_RULES: "p/default r/python.lang.security.audit.dangerous-system-call-audit.dangerous-system-call-audit"

.pre-commit-config.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# See https://pre-commit.com for more information
2+
# See https://pre-commit.com/hooks.html for more hooks
3+
repos:
4+
- repo: https://github.com/PyCQA/flake8
5+
rev: 4.0.1
6+
hooks:
7+
- id: flake8
8+
entry: bash -c 'git diff HEAD^ HEAD -U0 -- "$@" | flake8 --diff "$@"' --
9+
args: ["--max-line-length=120"]

acl_loader/main.py

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -413,17 +413,17 @@ def parse_acl_json(filename):
413413
raise AclLoaderException("Invalid input file %s" % filename)
414414
return yang_acl
415415

416-
def load_rules_from_file(self, filename):
416+
def load_rules_from_file(self, filename, skip_action_validation=False):
417417
"""
418418
Load file with ACL rules configuration in openconfig ACL format. Convert rules
419419
to Config DB schema.
420420
:param filename: File in openconfig ACL format
421421
:return:
422422
"""
423423
self.yang_acl = AclLoader.parse_acl_json(filename)
424-
self.convert_rules()
424+
self.convert_rules(skip_action_validation)
425425

426-
def convert_action(self, table_name, rule_idx, rule):
426+
def convert_action(self, table_name, rule_idx, rule, skip_validation=False):
427427
rule_props = {}
428428

429429
if rule.actions.config.forwarding_action == "ACCEPT":
@@ -452,13 +452,13 @@ def convert_action(self, table_name, rule_idx, rule):
452452
raise AclLoaderException("Unknown rule action {} in table {}, rule {}".format(
453453
rule.actions.config.forwarding_action, table_name, rule_idx))
454454

455-
if not self.validate_actions(table_name, rule_props):
455+
if not self.validate_actions(table_name, rule_props, skip_validation):
456456
raise AclLoaderException("Rule action {} is not supported in table {}, rule {}".format(
457457
rule.actions.config.forwarding_action, table_name, rule_idx))
458458

459459
return rule_props
460460

461-
def validate_actions(self, table_name, action_props):
461+
def validate_actions(self, table_name, action_props, skip_validation=False):
462462
if self.is_table_control_plane(table_name):
463463
return True
464464

@@ -481,6 +481,11 @@ def validate_actions(self, table_name, action_props):
481481
else:
482482
aclcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.ACL_STAGE_CAPABILITY_TABLE, stage.upper()))
483483
switchcapability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE))
484+
# In the load_minigraph path, it's possible that the STATE_DB entry haven't pop up because orchagent is stopped
485+
# before loading acl.json. So we skip the validation if any table is empty
486+
if skip_validation and (not aclcapability or not switchcapability):
487+
warning("Skipped action validation as capability table is not present in STATE_DB")
488+
return True
484489
for action_key in dict(action_props):
485490
action_list_key = self.ACL_ACTIONS_CAPABILITY_FIELD
486491
if action_list_key not in aclcapability:
@@ -598,6 +603,14 @@ def convert_icmp(self, table_name, rule_idx, rule):
598603
is_rule_v6 = True
599604
except Exception as e:
600605
pass
606+
else:
607+
# get the IP version type using IP_PROTOCOL.
608+
try:
609+
ip_protocol = rule.ip.config.protocol
610+
if ip_protocol == "IP_ICMPV6" or int(ip_protocol) == self.ip_protocol_map["IP_ICMPV6"]:
611+
is_rule_v6 = True
612+
except Exception as e:
613+
pass
601614

602615
type_key = "ICMPV6_TYPE" if is_rule_v6 else "ICMP_TYPE"
603616
code_key = "ICMPV6_CODE" if is_rule_v6 else "ICMP_CODE"
@@ -691,7 +704,7 @@ def validate_rule_fields(self, rule_props):
691704
if ("ICMPV6_TYPE" in rule_props or "ICMPV6_CODE" in rule_props) and protocol != 58:
692705
raise AclLoaderException("IP_PROTOCOL={} is not ICMPV6, but ICMPV6 fields were provided".format(protocol))
693706

694-
def convert_rule_to_db_schema(self, table_name, rule):
707+
def convert_rule_to_db_schema(self, table_name, rule, skip_action_validation=False):
695708
"""
696709
Convert rules format from openconfig ACL to Config DB schema
697710
:param table_name: ACL table name to which rule belong
@@ -721,7 +734,7 @@ def convert_rule_to_db_schema(self, table_name, rule):
721734
elif self.is_table_l3(table_name):
722735
rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"])
723736

724-
deep_update(rule_props, self.convert_action(table_name, rule_idx, rule))
737+
deep_update(rule_props, self.convert_action(table_name, rule_idx, rule, skip_action_validation))
725738
deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule))
726739
deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule))
727740
deep_update(rule_props, self.convert_icmp(table_name, rule_idx, rule))
@@ -753,7 +766,7 @@ def deny_rule(self, table_name):
753766
return {} # Don't add default deny rule if table is not [L3, L3V6]
754767
return rule_data
755768

756-
def convert_rules(self):
769+
def convert_rules(self, skip_aciton_validation=False):
757770
"""
758771
Convert rules in openconfig ACL format to Config DB schema
759772
:return:
@@ -772,7 +785,7 @@ def convert_rules(self):
772785
for acl_entry_name in acl_set.acl_entries.acl_entry:
773786
acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name]
774787
try:
775-
rule = self.convert_rule_to_db_schema(table_name, acl_entry)
788+
rule = self.convert_rule_to_db_schema(table_name, acl_entry, skip_aciton_validation)
776789
deep_update(self.rules_info, rule)
777790
except AclLoaderException as ex:
778791
error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex))
@@ -1141,8 +1154,9 @@ def update(ctx):
11411154
@click.option('--session_name', type=click.STRING, required=False)
11421155
@click.option('--mirror_stage', type=click.Choice(["ingress", "egress"]), default="ingress")
11431156
@click.option('--max_priority', type=click.INT, required=False)
1157+
@click.option('--skip_action_validation', is_flag=True, default=False, help="Skip action validation")
11441158
@click.pass_context
1145-
def full(ctx, filename, table_name, session_name, mirror_stage, max_priority):
1159+
def full(ctx, filename, table_name, session_name, mirror_stage, max_priority, skip_action_validation):
11461160
"""
11471161
Full update of ACL rules configuration.
11481162
If a table_name is provided, the operation will be restricted in the specified table.
@@ -1160,7 +1174,7 @@ def full(ctx, filename, table_name, session_name, mirror_stage, max_priority):
11601174
if max_priority:
11611175
acl_loader.set_max_priority(max_priority)
11621176

1163-
acl_loader.load_rules_from_file(filename)
1177+
acl_loader.load_rules_from_file(filename, skip_action_validation)
11641178
acl_loader.full_update()
11651179

11661180

azure-pipelines.yml

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,24 @@ resources:
1313
name: sonic-net/sonic-swss
1414
endpoint: sonic-net
1515

16+
variables:
17+
- name: BUILD_BRANCH
18+
${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
19+
value: $(System.PullRequest.TargetBranch)
20+
${{ else }}:
21+
value: $(Build.SourceBranchName)
22+
1623
stages:
24+
- stage: Pretest
25+
jobs:
26+
- job: static_analysis
27+
displayName: "Static Analysis"
28+
timeoutInMinutes: 10
29+
continueOnError: true
30+
pool: ubuntu-20.04
31+
steps:
32+
- template: .azure-pipelines/pre-commit-check.yml
33+
1734
- stage: Build
1835

1936
jobs:
@@ -26,7 +43,7 @@ stages:
2643
vmImage: ubuntu-20.04
2744

2845
container:
29-
image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:latest
46+
image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:$(BUILD_BRANCH)
3047

3148
steps:
3249
- script: |
@@ -52,12 +69,11 @@ stages:
5269

5370
- script: |
5471
set -xe
55-
sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev || true
72+
sudo apt-get -y purge libnl-3-dev libnl-route-3-dev || true
5673
sudo dpkg -i libnl-3-200_*.deb
5774
sudo dpkg -i libnl-genl-3-200_*.deb
5875
sudo dpkg -i libnl-route-3-200_*.deb
5976
sudo dpkg -i libnl-nf-3-200_*.deb
60-
sudo dpkg -i libhiredis0.14_*.deb
6177
sudo dpkg -i libyang_1.0.73_amd64.deb
6278
sudo dpkg -i libyang-cpp_1.0.73_amd64.deb
6379
sudo dpkg -i python3-yang_1.0.73_amd64.deb

config/fabric.py

Lines changed: 121 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,10 @@
22
import utilities_common.cli as clicommon
33
import utilities_common.multi_asic as multi_asic_util
44
from sonic_py_common import multi_asic
5-
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector
5+
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, APP_FABRIC_PORT_TABLE_NAME
6+
7+
FABRIC_PORT_STATUS_TABLE_PREFIX = APP_FABRIC_PORT_TABLE_NAME+"|"
8+
69

710
#
811
# 'config fabric ...'
@@ -66,19 +69,13 @@ def isolate(portid, namespace):
6669
#
6770
@port.command()
6871
@click.argument('portid', metavar='<portid>', required=True)
72+
@click.option('-f', '--force', is_flag=True, default=False, help='Force to unisolate a link even if it is auto isolated.')
6973
@multi_asic_util.multi_asic_click_option_namespace
70-
def unisolate(portid, namespace):
74+
def unisolate(portid, namespace, force):
7175
"""FABRIC PORT unisolate <portid>"""
7276

7377
ctx = click.get_current_context()
7478

75-
if not portid.isdigit():
76-
ctx.fail("Invalid portid")
77-
78-
n_asics = multi_asic.get_num_asics()
79-
if n_asics > 1 and namespace is None:
80-
ctx.fail('Must specify asic')
81-
8279
# Connect to config database
8380
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
8481
config_db.connect()
@@ -87,6 +84,37 @@ def unisolate(portid, namespace):
8784
state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace)
8885
state_db.connect(state_db.STATE_DB, False)
8986

87+
n_asics = multi_asic.get_num_asics()
88+
if n_asics > 1 and namespace is None:
89+
ctx.fail( 'Must specify asic' )
90+
91+
# If "all" is specified then unisolate all ports.
92+
if portid == "all":
93+
port_keys = state_db.keys(state_db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*')
94+
for port_key in port_keys:
95+
port_data = state_db.get_all(state_db.STATE_DB, port_key)
96+
if "REMOTE_PORT" in port_data:
97+
port_number = int( port_key.replace( "FABRIC_PORT_TABLE|PORT", "" ) )
98+
99+
# Make sure configuration data exists
100+
portName = f'Fabric{port_number}'
101+
portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName)
102+
if not bool( portConfigData ):
103+
ctx.fail( "Fabric monitor configuration data not present" )
104+
105+
# Update entry
106+
config_db.mod_entry( "FABRIC_PORT", portName, {'isolateStatus': False} )
107+
if force:
108+
forceShutCnt = int( portConfigData['forceUnisolateStatus'] )
109+
forceShutCnt += 1
110+
config_db.mod_entry( "FABRIC_PORT", portName,
111+
{'forceUnisolateStatus': forceShutCnt})
112+
113+
return
114+
115+
if not portid.isdigit():
116+
ctx.fail( "Invalid portid" )
117+
90118
# check if the port is actually in use
91119
portName = f'PORT{portid}'
92120
portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName)
@@ -102,6 +130,15 @@ def unisolate(portid, namespace):
102130
# Update entry
103131
config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': False})
104132

133+
if force:
134+
forceShutCnt = int( portConfigData['forceUnisolateStatus'] )
135+
forceShutCnt += 1
136+
config_db.mod_entry( "FABRIC_PORT", portName,
137+
{'forceUnisolateStatus': forceShutCnt})
138+
139+
click.echo("Force unisolate the link.")
140+
click.echo("It will clear all fabric link monitoring status for this link!")
141+
105142
#
106143
# 'config fabric port monitor ...'
107144
#
@@ -157,6 +194,39 @@ def error_threshold(crccells, rxcells, namespace):
157194
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
158195
{'monErrThreshCrcCells': crccells, 'monErrThreshRxCells': rxcells})
159196

197+
def setFabricPortMonitorState(state, namespace, ctx):
198+
""" set the fabric port monitor state"""
199+
# Connect to config database
200+
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
201+
config_db.connect()
202+
203+
# Make sure configuration data exists
204+
monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA")
205+
if not bool(monitorData):
206+
ctx.fail("Fabric monitor configuration data not present")
207+
208+
# Update entry
209+
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
210+
{'monState': state})
211+
212+
#
213+
# 'config fabric port montior state <enable/disable>'
214+
#
215+
@monitor.command()
216+
@click.argument('state', metavar='<state>', required=True)
217+
@multi_asic_util.multi_asic_click_option_namespace
218+
def state(state, namespace):
219+
"""FABRIC PORT MONITOR STATE configuration tasks"""
220+
ctx = click.get_current_context()
221+
222+
n_asics = multi_asic.get_num_asics()
223+
if n_asics > 1 and namespace is None:
224+
ns_list = multi_asic.get_namespace_list()
225+
for namespace in ns_list:
226+
setFabricPortMonitorState(state, namespace, ctx)
227+
else:
228+
setFabricPortMonitorState(state, namespace, ctx)
229+
160230
#
161231
# 'config fabric port monitor poll ...'
162232
#
@@ -245,3 +315,45 @@ def recovery(pollcount, namespace):
245315
{"monPollThreshRecovery": pollcount})
246316

247317

318+
#
319+
# 'config fabric monitor ...'
320+
#
321+
@fabric.group(cls=clicommon.AbbreviationGroup, name='monitor')
322+
def capacity_monitor():
323+
"""FABRIC MONITOR configuration tasks"""
324+
pass
325+
326+
#
327+
# 'config fabric monitor capacity...'
328+
#
329+
@capacity_monitor.group(cls=clicommon.AbbreviationGroup)
330+
def capacity():
331+
"""FABRIC MONITOR CAPACITY configuration tasks"""
332+
pass
333+
334+
#
335+
# 'config fabric monitor capacity threshold <capcityThresh>'
336+
#
337+
@capacity.command()
338+
@click.argument('capacitythreshold', metavar='<capacityThreshold>', required=True, type=int)
339+
def threshold(capacitythreshold):
340+
"""FABRIC CAPACITY MONITOR THRESHOLD configuration tasks"""
341+
ctx = click.get_current_context()
342+
343+
if capacitythreshold < 5 or capacitythreshold > 250:
344+
ctx.fail("threshold must be in range 5...250")
345+
346+
namespaces = multi_asic.get_namespace_list()
347+
for idx, namespace in enumerate(namespaces, start=1):
348+
# Connect to config database
349+
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
350+
config_db.connect()
351+
352+
# Make sure configuration data exists
353+
monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA")
354+
if not bool(monitorData):
355+
ctx.fail("Fabric monitor configuration data not present")
356+
357+
# Update entry
358+
config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",
359+
{"monCapacityThreshWarn": capacitythreshold})

0 commit comments

Comments
 (0)