Skip to content
This repository was archived by the owner on Aug 15, 2025. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 0 additions & 35 deletions interface-definitions/include/vpp_host_resources.xml.i

This file was deleted.

1 change: 0 additions & 1 deletion interface-definitions/vpp.xml.in
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,6 @@
</leafNode>
</children>
</node>
#include <include/vpp_host_resources.xml.i>
<tagNode name="interface">
<properties>
<help>Interface</help>
Expand Down
8 changes: 0 additions & 8 deletions python/vyos/vpp/config_resource_checks/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,6 @@ def get_total_hugepages_free_memory() -> int:
return hugepage_size * hugepages_free


def get_hugepages_total() -> int:
"""
Returns the total count of hugepages
"""
info = get_hugepages_info()
return info.get('HugePages_Total')


def get_numa_count():
"""
Run `numactl --hardware` and parse the 'available:' line.
Expand Down
15 changes: 0 additions & 15 deletions python/vyos/vpp/config_verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import psutil

from vyos import ConfigError
from vyos.base import Warning
from vyos.utils.cpu import get_core_count as total_core_count

from vyos.vpp.control_host import get_eth_driver
Expand Down Expand Up @@ -390,17 +389,3 @@ def verify_vpp_interfaces_dpdk_num_queues(qtype: str, num_queues: int, workers:
f'The number of {qtype} queues cannot be greater than the number of configured VPP workers: '
f'workers: {workers}, queues: {num_queues}'
)


def verify_vpp_host_resources(config: dict):
max_map_count = int(config['settings']['host_resources']['max_map_count'])

# Get HugePages total count
hugepages = mem_checks.get_hugepages_total()

if max_map_count < 2 * hugepages:
Warning(
'The max-map-count should be greater than or equal to (2 * HugePages_Total) '
'or VPP could work not properly. Please set up '
f'"vpp settings host-resources max-map-count" to {2 * hugepages} or higher'
)
28 changes: 28 additions & 0 deletions smoketest/scripts/cli/test_vpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from vyos.utils.process import process_named_running
from vyos.utils.file import read_file
from vyos.utils.process import rc_cmd
from vyos.utils.system import sysctl_read
from vyos.vpp.utils import human_page_memory_to_bytes

sys.path.append(os.getenv('vyos_completion_dir'))
Expand Down Expand Up @@ -1417,6 +1418,33 @@ def test_18_vpp_sflow(self):
self.cli_delete(base_sflow)
self.cli_commit()

def test_19_host_resources(self):
max_map_count = '100000'
shmmax = '55555555555555'
hr_path = ['system', 'option', 'host-resources']

# Check if max-map-count has default auto calculated value
# but not less than '65530'
self.assertEqual(sysctl_read('vm.max_map_count'), '65530')
# The same is with: kernel.shmmax = '8589934592'
self.assertEqual(sysctl_read('kernel.shmmax'), '8589934592')

# Change max-map-count, shmmax and check
self.cli_set(hr_path + ['max-map-count', max_map_count])
self.cli_set(hr_path + ['shmmax', shmmax])
self.cli_commit()

self.assertEqual(sysctl_read('vm.max_map_count'), max_map_count)
self.assertEqual(sysctl_read('kernel.shmmax'), shmmax)

# We expect max-map-count and shmmax will return auto calculated values
self.cli_delete(hr_path + ['max-map-count'])
self.cli_delete(hr_path + ['shmmax'])
self.cli_commit()

self.assertEqual(sysctl_read('vm.max_map_count'), '65530')
self.assertEqual(sysctl_read('kernel.shmmax'), '8589934592')


if __name__ == '__main__':
unittest.main(verbosity=2)
26 changes: 0 additions & 26 deletions src/conf_mode/vpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
from vyos.template import render
from vyos.utils.boot import boot_configuration_complete
from vyos.utils.process import call
from vyos.utils.system import sysctl_read, sysctl_apply

from vyos.vpp import VPPControl
from vyos.vpp import control_host
Expand All @@ -49,7 +48,6 @@
verify_vpp_memory,
verify_vpp_statseg_size,
verify_vpp_interfaces_dpdk_num_queues,
verify_vpp_host_resources,
)
from vyos.vpp.config_filter import iface_filter_eth
from vyos.vpp.utils import EthtoolGDrvinfo
Expand Down Expand Up @@ -358,9 +356,6 @@ def verify(config):
# Check if available memory is enough for current VPP config
verify_vpp_memory(config)

if 'max_map_count' in config['settings'].get('host_resources', {}):
verify_vpp_host_resources(config)

if 'statseg' in config['settings']:
verify_vpp_statseg_size(config['settings'])

Expand Down Expand Up @@ -471,27 +466,6 @@ def generate(config):
render(service_conf, 'vpp/startup.conf.j2', config['settings'])
render(systemd_override, 'vpp/override.conf.j2', config)

# apply sysctl values
# default: https://github.com/FDio/vpp/blob/v23.10/src/vpp/conf/80-vpp.conf
# vm.nr_hugepages are now configured in section
# 'set system option kernel memory hugepage-size 2M hugepage-count <count>'
sysctl_config: dict[str, str] = {
'vm.max_map_count': config['settings']['host_resources']['max_map_count'],
'vm.hugetlb_shm_group': '0',
'kernel.shmmax': config['settings']['host_resources']['shmmax'],
}
# we do not want to lower current values
for sysctl_key, sysctl_value in sysctl_config.items():
# perform check only for quantitative params
if sysctl_key == 'vm.hugetlb_shm_group':
pass
current_value = sysctl_read(sysctl_key)
if int(current_value) > int(sysctl_value):
sysctl_config[sysctl_key] = current_value

if not sysctl_apply(sysctl_config):
raise ConfigError('Cannot configure sysctl parameters for VPP')

return None


Expand Down