From 4c3d037f036e84c77333a400b35bb1a628a1a118 Mon Sep 17 00:00:00 2001 From: Nicolas Fort Date: Fri, 30 Aug 2024 17:54:17 +0000 Subject: [PATCH 01/85] T6687: add fqdn support to nat rules. --- data/templates/firewall/nftables-nat.j2 | 13 +++ interface-definitions/include/nat-rule.xml.i | 2 + python/vyos/firewall.py | 47 ++++---- python/vyos/nat.py | 7 ++ smoketest/scripts/cli/test_nat.py | 26 +++++ src/conf_mode/firewall.py | 21 ++-- src/conf_mode/nat.py | 20 ++++ src/helpers/vyos-domain-resolver.py | 107 ++++++++++--------- src/systemd/vyos-domain-resolver.service | 1 + 9 files changed, 167 insertions(+), 77 deletions(-) diff --git a/data/templates/firewall/nftables-nat.j2 b/data/templates/firewall/nftables-nat.j2 index 4254f6a0e00..8c8dd3a8bc1 100644 --- a/data/templates/firewall/nftables-nat.j2 +++ b/data/templates/firewall/nftables-nat.j2 @@ -19,6 +19,12 @@ table ip vyos_nat { {% endfor %} {% endif %} } +{% for set_name in ip_fqdn %} + set FQDN_nat_{{ set_name }} { + type ipv4_addr + flags interval + } +{% endfor %} # # Source NAT rules build up here @@ -31,7 +37,14 @@ table ip vyos_nat { {{ config | nat_rule(rule, 'source') }} {% endfor %} {% endif %} + + } +{% for set_name in ip_fqdn %} + set FQDN_nat_{{ set_name }} { + type ipv4_addr + flags interval } +{% endfor %} chain VYOS_PRE_DNAT_HOOK { return diff --git a/interface-definitions/include/nat-rule.xml.i b/interface-definitions/include/nat-rule.xml.i index deb13529d8f..0a7179ff1ec 100644 --- a/interface-definitions/include/nat-rule.xml.i +++ b/interface-definitions/include/nat-rule.xml.i @@ -18,6 +18,7 @@ NAT destination parameters + #include #include #include #include @@ -315,6 +316,7 @@ NAT source parameters + #include #include #include #include diff --git a/python/vyos/firewall.py b/python/vyos/firewall.py index f0cf3c924be..b9439d42b78 100755 --- a/python/vyos/firewall.py +++ b/python/vyos/firewall.py @@ -53,25 +53,32 @@ def conntrack_required(conf): # Domain Resolver -def fqdn_config_parse(firewall): - firewall['ip_fqdn'] = {} - firewall['ip6_fqdn'] = {} - - for domain, path in dict_search_recursive(firewall, 'fqdn'): - hook_name = path[1] - priority = path[2] - - fw_name = path[2] - rule = path[4] - suffix = path[5][0] - set_name = f'{hook_name}_{priority}_{rule}_{suffix}' - - if (path[0] == 'ipv4') and (path[1] == 'forward' or path[1] == 'input' or path[1] == 'output' or path[1] == 'name'): - firewall['ip_fqdn'][set_name] = domain - elif (path[0] == 'ipv6') and (path[1] == 'forward' or path[1] == 'input' or path[1] == 'output' or path[1] == 'name'): - if path[1] == 'name': - set_name = f'name6_{priority}_{rule}_{suffix}' - firewall['ip6_fqdn'][set_name] = domain +def fqdn_config_parse(config, node): + config['ip_fqdn'] = {} + config['ip6_fqdn'] = {} + + for domain, path in dict_search_recursive(config, 'fqdn'): + if node != 'nat': + hook_name = path[1] + priority = path[2] + + rule = path[4] + suffix = path[5][0] + set_name = f'{hook_name}_{priority}_{rule}_{suffix}' + + if (path[0] == 'ipv4') and (path[1] == 'forward' or path[1] == 'input' or path[1] == 'output' or path[1] == 'name'): + config['ip_fqdn'][set_name] = domain + elif (path[0] == 'ipv6') and (path[1] == 'forward' or path[1] == 'input' or path[1] == 'output' or path[1] == 'name'): + if path[1] == 'name': + set_name = f'name6_{priority}_{rule}_{suffix}' + config['ip6_fqdn'][set_name] = domain + else: + # Parse FQDN for NAT + nat_direction = path[0] + nat_rule = path[2] + suffix = path[3][0] + set_name = f'{nat_direction}_{nat_rule}_{suffix}' + config['ip_fqdn'][set_name] = domain def fqdn_resolve(fqdn, ipv6=False): try: @@ -80,8 +87,6 @@ def fqdn_resolve(fqdn, ipv6=False): except: return None -# End Domain Resolver - def find_nftables_rule(table, chain, rule_matches=[]): # Find rule in table/chain that matches all criteria and return the handle results = cmd(f'sudo nft --handle list chain {table} {chain}').split("\n") diff --git a/python/vyos/nat.py b/python/vyos/nat.py index e5454878877..4fe21ef138b 100644 --- a/python/vyos/nat.py +++ b/python/vyos/nat.py @@ -236,6 +236,13 @@ def parse_nat_rule(rule_conf, rule_id, nat_type, ipv6=False): output.append(f'{proto} {prefix}port {operator} @P_{group_name}') + if 'fqdn' in side_conf: + fqdn = side_conf['fqdn'] + operator = '' + if fqdn[0] == '!': + operator = '!=' + output.append(f' ip {prefix}addr {operator} @FQDN_nat_{nat_type}_{rule_id}_{prefix}') + output.append('counter') if 'log' in rule_conf: diff --git a/smoketest/scripts/cli/test_nat.py b/smoketest/scripts/cli/test_nat.py index 5161e47fdc2..0beafcc6c1d 100755 --- a/smoketest/scripts/cli/test_nat.py +++ b/smoketest/scripts/cli/test_nat.py @@ -304,5 +304,31 @@ def test_snat_net_port_map(self): self.verify_nftables(nftables_search, 'ip vyos_nat') + def test_nat_fqdn(self): + source_domain = 'vyos.dev' + destination_domain = 'vyos.io' + + self.cli_set(src_path + ['rule', '1', 'outbound-interface', 'name', 'eth0']) + self.cli_set(src_path + ['rule', '1', 'source', 'fqdn', source_domain]) + self.cli_set(src_path + ['rule', '1', 'translation', 'address', 'masquerade']) + + self.cli_set(dst_path + ['rule', '1', 'destination', 'fqdn', destination_domain]) + self.cli_set(dst_path + ['rule', '1', 'source', 'fqdn', source_domain]) + self.cli_set(dst_path + ['rule', '1', 'destination', 'port', '5122']) + self.cli_set(dst_path + ['rule', '1', 'protocol', 'tcp']) + self.cli_set(dst_path + ['rule', '1', 'translation', 'address', '198.51.100.1']) + self.cli_set(dst_path + ['rule', '1', 'translation', 'port', '22']) + + + self.cli_commit() + + nftables_search = [ + ['set FQDN_nat_destination_1_d'], + ['set FQDN_nat_source_1_s'], + ['oifname "eth0"', 'ip saddr @FQDN_nat_source_1_s', 'masquerade', 'comment "SRC-NAT-1"'], + ['tcp dport 5122', 'ip saddr @FQDN_nat_destination_1_s', 'ip daddr @FQDN_nat_destination_1_d', 'dnat to 198.51.100.1:22', 'comment "DST-NAT-1"'] + ] + + self.verify_nftables(nftables_search, 'ip vyos_nat') if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py index 5638a96685b..ffbd915a2d7 100755 --- a/src/conf_mode/firewall.py +++ b/src/conf_mode/firewall.py @@ -36,11 +36,15 @@ from vyos.utils.process import rc_cmd from vyos import ConfigError from vyos import airbag +from pathlib import Path from subprocess import run as subp_run airbag.enable() nftables_conf = '/run/nftables.conf' +domain_resolver_usage = '/run/use-vyos-domain-resolver-firewall' +domain_resolver_usage_nat = '/run/use-vyos-domain-resolver-nat' + sysctl_file = r'/run/sysctl/10-vyos-firewall.conf' valid_groups = [ @@ -128,7 +132,7 @@ def get_config(config=None): firewall['geoip_updated'] = geoip_updated(conf, firewall) - fqdn_config_parse(firewall) + fqdn_config_parse(firewall, 'firewall') set_dependents('conntrack', conf) @@ -570,12 +574,15 @@ def apply(firewall): call_dependents() - # T970 Enable a resolver (systemd daemon) that checks - # domain-group/fqdn addresses and update entries for domains by timeout - # If router loaded without internet connection or for synchronization - domain_action = 'stop' - if dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'] or firewall['ip6_fqdn']: - domain_action = 'restart' + ## DOMAIN RESOLVER + domain_action = 'restart' + if dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'].items() or firewall['ip6_fqdn'].items(): + text = f'# Automatically generated by firewall.py\nThis file indicates that vyos-domain-resolver service is used by the firewall.\n' + Path(domain_resolver_usage).write_text(text) + else: + Path(domain_resolver_usage).unlink(missing_ok=True) + if not Path('/run').glob('use-vyos-domain-resolver*'): + domain_action = 'stop' call(f'systemctl {domain_action} vyos-domain-resolver.service') if firewall['geoip_updated']: diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py index 39803fa0262..98b2f3f2936 100755 --- a/src/conf_mode/nat.py +++ b/src/conf_mode/nat.py @@ -26,10 +26,13 @@ from vyos.utils.kernel import check_kmod from vyos.utils.dict import dict_search from vyos.utils.dict import dict_search_args +from vyos.utils.file import write_file from vyos.utils.process import cmd from vyos.utils.process import run +from vyos.utils.process import call from vyos.utils.network import is_addr_assigned from vyos.utils.network import interface_exists +from vyos.firewall import fqdn_config_parse from vyos import ConfigError from vyos import airbag @@ -39,6 +42,8 @@ nftables_nat_config = '/run/nftables_nat.conf' nftables_static_nat_conf = '/run/nftables_static-nat-rules.nft' +domain_resolver_usage = '/run/use-vyos-domain-resolver-nat' +domain_resolver_usage_firewall = '/run/use-vyos-domain-resolver-firewall' valid_groups = [ 'address_group', @@ -71,6 +76,8 @@ def get_config(config=None): if 'dynamic_group' in nat['firewall_group']: del nat['firewall_group']['dynamic_group'] + fqdn_config_parse(nat, 'nat') + return nat def verify_rule(config, err_msg, groups_dict): @@ -251,6 +258,19 @@ def apply(nat): call_dependents() + # DOMAIN RESOLVER + if nat and 'deleted' not in nat: + domain_action = 'restart' + if nat['ip_fqdn'].items(): + text = f'# Automatically generated by nat.py\nThis file indicates that vyos-domain-resolver service is used by nat.\n' + write_file(domain_resolver_usage, text) + elif os.path.exists(domain_resolver_usage): + os.unlink(domain_resolver_usage) + if not os.path.exists(domain_resolver_usage_firewall): + # Firewall not using domain resolver + domain_action = 'stop' + call(f'systemctl {domain_action} vyos-domain-resolver.service') + return None if __name__ == '__main__': diff --git a/src/helpers/vyos-domain-resolver.py b/src/helpers/vyos-domain-resolver.py index 57cfcabd7d3..f5a1d9297ff 100755 --- a/src/helpers/vyos-domain-resolver.py +++ b/src/helpers/vyos-domain-resolver.py @@ -30,6 +30,8 @@ base = ['firewall'] timeout = 300 cache = False +base_firewall = ['firewall'] +base_nat = ['nat'] domain_state = {} @@ -46,25 +48,25 @@ 'ip6 raw' } -def get_config(conf): - firewall = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, +def get_config(conf, node): + node_config = conf.get_config_dict(node, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) - default_values = get_defaults(base, get_first_key=True) + default_values = get_defaults(node, get_first_key=True) - firewall = dict_merge(default_values, firewall) + node_config = dict_merge(default_values, node_config) global timeout, cache - if 'resolver_interval' in firewall: - timeout = int(firewall['resolver_interval']) + if 'resolver_interval' in node_config: + timeout = int(node_config['resolver_interval']) - if 'resolver_cache' in firewall: + if 'resolver_cache' in node_config: cache = True - fqdn_config_parse(firewall) + fqdn_config_parse(node_config, node[0]) - return firewall + return node_config def resolve(domains, ipv6=False): global domain_state @@ -108,55 +110,60 @@ def nft_valid_sets(): except: return [] -def update(firewall): +def update_fqdn(config, node): conf_lines = [] count = 0 - valid_sets = nft_valid_sets() - domain_groups = dict_search_args(firewall, 'group', 'domain_group') - if domain_groups: - for set_name, domain_config in domain_groups.items(): - if 'address' not in domain_config: - continue - - nft_set_name = f'D_{set_name}' - domains = domain_config['address'] - - ip_list = resolve(domains, ipv6=False) - for table in ipv4_tables: - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - - ip6_list = resolve(domains, ipv6=True) - for table in ipv6_tables: - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip6_list) + if node == 'firewall': + domain_groups = dict_search_args(config, 'group', 'domain_group') + if domain_groups: + for set_name, domain_config in domain_groups.items(): + if 'address' not in domain_config: + continue + nft_set_name = f'D_{set_name}' + domains = domain_config['address'] + + ip_list = resolve(domains, ipv6=False) + for table in ipv4_tables: + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + ip6_list = resolve(domains, ipv6=True) + for table in ipv6_tables: + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip6_list) + count += 1 + + for set_name, domain in config['ip_fqdn'].items(): + table = 'ip vyos_filter' + nft_set_name = f'FQDN_{set_name}' + ip_list = resolve([domain], ipv6=False) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) count += 1 - for set_name, domain in firewall['ip_fqdn'].items(): - table = 'ip vyos_filter' - nft_set_name = f'FQDN_{set_name}' - - ip_list = resolve([domain], ipv6=False) - - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - count += 1 - - for set_name, domain in firewall['ip6_fqdn'].items(): - table = 'ip6 vyos_filter' - nft_set_name = f'FQDN_{set_name}' + for set_name, domain in config['ip6_fqdn'].items(): + table = 'ip6 vyos_filter' + nft_set_name = f'FQDN_{set_name}' + ip_list = resolve([domain], ipv6=True) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + count += 1 - ip_list = resolve([domain], ipv6=True) - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - count += 1 + else: + # It's NAT + for set_name, domain in config['ip_fqdn'].items(): + table = 'ip vyos_nat' + nft_set_name = f'FQDN_nat_{set_name}' + ip_list = resolve([domain], ipv6=False) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + count += 1 nft_conf_str = "\n".join(conf_lines) + "\n" code = run(f'nft --file -', input=nft_conf_str) - print(f'Updated {count} sets - result: {code}') + print(f'Updated {count} sets in {node} - result: {code}') if __name__ == '__main__': print(f'VyOS domain resolver') @@ -169,10 +176,12 @@ def update(firewall): time.sleep(1) conf = ConfigTreeQuery() - firewall = get_config(conf) + firewall = get_config(conf, base_firewall) + nat = get_config(conf, base_nat) print(f'interval: {timeout}s - cache: {cache}') while True: - update(firewall) + update_fqdn(firewall, 'firewall') + update_fqdn(nat, 'nat') time.sleep(timeout) diff --git a/src/systemd/vyos-domain-resolver.service b/src/systemd/vyos-domain-resolver.service index c56b51f0c3e..e63ae5e34cd 100644 --- a/src/systemd/vyos-domain-resolver.service +++ b/src/systemd/vyos-domain-resolver.service @@ -1,6 +1,7 @@ [Unit] Description=VyOS firewall domain resolver After=vyos-router.service +ConditionPathExistsGlob=/run/use-vyos-domain-resolver* [Service] Type=simple From a0c15a159e54cb1b6abba7a0a6a0a793a53915d0 Mon Sep 17 00:00:00 2001 From: rebortg Date: Wed, 11 Sep 2024 14:15:07 +0200 Subject: [PATCH 02/85] T973: add basic node_exporter implementation --- .../node_exporter/node_exporter.service.j2 | 17 +++ debian/vyos-1x.postinst | 5 + .../service_monitoring_node_exporter.xml.in | 28 +++++ .../test_service_monitoring_node-exporter.py | 64 +++++++++++ .../service_monitoring_node-exporter.py | 104 ++++++++++++++++++ 5 files changed, 218 insertions(+) create mode 100644 data/templates/node_exporter/node_exporter.service.j2 create mode 100644 interface-definitions/service_monitoring_node_exporter.xml.in create mode 100755 smoketest/scripts/cli/test_service_monitoring_node-exporter.py create mode 100755 src/conf_mode/service_monitoring_node-exporter.py diff --git a/data/templates/node_exporter/node_exporter.service.j2 b/data/templates/node_exporter/node_exporter.service.j2 new file mode 100644 index 00000000000..01dad5aa23c --- /dev/null +++ b/data/templates/node_exporter/node_exporter.service.j2 @@ -0,0 +1,17 @@ +{% set vrf_command = 'ip vrf exec ' ~ vrf ~ ' runuser -u node_exporter -- ' if vrf is vyos_defined else '' %} +[Unit] +Description=Node Exporter +Documentation=https://github.com/prometheus/node_exporter +After=network.target + +[Service] +{% if vrf is not vyos_defined %} +User=node_exporter +{% endif %} +ExecStart={{ vrf_command }}/usr/sbin/node_exporter \ +{% for address in listen_address %} + --web.listen-address={{ address }}:{{ port }} +{% endfor %} + +[Install] +WantedBy=multi-user.target diff --git a/debian/vyos-1x.postinst b/debian/vyos-1x.postinst index dc8ada2673b..d83634cfc50 100644 --- a/debian/vyos-1x.postinst +++ b/debian/vyos-1x.postinst @@ -21,6 +21,11 @@ if ! grep -q '^openvpn' /etc/passwd; then adduser --quiet --firstuid 100 --system --group --shell /usr/sbin/nologin openvpn fi +# node_exporter should get its own user +if ! grep -q '^node_exporter' /etc/passwd; then + adduser --quiet --firstuid 100 --system --group --shell /bin/false node_exporter +fi + # We need to have a group for RADIUS service users to use it inside PAM rules if ! grep -q '^radius' /etc/group; then addgroup --firstgid 1000 --quiet radius diff --git a/interface-definitions/service_monitoring_node_exporter.xml.in b/interface-definitions/service_monitoring_node_exporter.xml.in new file mode 100644 index 00000000000..ee6d06d6d4f --- /dev/null +++ b/interface-definitions/service_monitoring_node_exporter.xml.in @@ -0,0 +1,28 @@ + + + + + + + + + Prometheus exporter for hardware and operating system metrics + 1280 + + + #include + + 0.0.0.0 + + #include + + 9100 + + #include + + + + + + + diff --git a/smoketest/scripts/cli/test_service_monitoring_node-exporter.py b/smoketest/scripts/cli/test_service_monitoring_node-exporter.py new file mode 100755 index 00000000000..6b96e3130a4 --- /dev/null +++ b/smoketest/scripts/cli/test_service_monitoring_node-exporter.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest + +from base_vyostest_shim import VyOSUnitTestSHIM +from vyos.utils.process import process_named_running +from vyos.utils.file import read_file + +PROCESS_NAME = 'node_exporter' +base_path = ['service', 'monitoring', 'node-exporter'] +service_file = '/etc/systemd/system/node_exporter.service' +listen_if = 'dum3421' +listen_ip = '192.0.2.1' + + +class TestMonitoringNodeExporter(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + # call base-classes classmethod + super(TestMonitoringNodeExporter, cls).setUpClass() + # create a test interfaces + cls.cli_set( + cls, ['interfaces', 'dummy', listen_if, 'address', listen_ip + '/32'] + ) + + @classmethod + def tearDownClass(cls): + cls.cli_delete(cls, ['interfaces', 'dummy', listen_if]) + super(TestMonitoringNodeExporter, cls).tearDownClass() + + def tearDown(self): + self.cli_delete(base_path) + self.cli_commit() + self.assertFalse(process_named_running(PROCESS_NAME)) + + def test_01_basic_config(self): + self.cli_set(base_path + ['listen-address', listen_ip]) + + # commit changes + self.cli_commit() + + file_content = read_file(service_file) + self.assertIn(f'{listen_ip}:9100', file_content) + + # Check for running process + self.assertTrue(process_named_running(PROCESS_NAME)) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/conf_mode/service_monitoring_node-exporter.py b/src/conf_mode/service_monitoring_node-exporter.py new file mode 100755 index 00000000000..e8d7c15b43d --- /dev/null +++ b/src/conf_mode/service_monitoring_node-exporter.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from sys import exit + +from vyos.config import Config +from vyos.configdict import is_node_changed +from vyos.configverify import verify_vrf +from vyos.template import render +from vyos.utils.process import call +from vyos import ConfigError +from vyos import airbag + + +airbag.enable() + +service_file = '/etc/systemd/system/node_exporter.service' +systemd_service = 'node_exporter.service' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['service', 'monitoring', 'node-exporter'] + if not conf.exists(base): + return None + + config_data = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True + ) + config_data = conf.merge_defaults(config_data, recursive=True) + + tmp = is_node_changed(conf, base + ['vrf']) + if tmp: + config_data.update({'restart_required': {}}) + + return config_data + + +def verify(config_data): + # bail out early - looks like removal from running config + if not config_data: + return None + + verify_vrf(config_data) + return None + + +def generate(config_data): + if not config_data: + # Delete systemd files + if os.path.isfile(service_file): + os.unlink(service_file) + return None + + # Render node_exporter service_file + render(service_file, 'node_exporter/node_exporter.service.j2', config_data) + return None + + +def apply(config_data): + # Reload systemd manager configuration + call('systemctl daemon-reload') + if not config_data: + call(f'systemctl stop {systemd_service}') + return + + # we need to restart the service if e.g. the VRF name changed + systemd_action = 'reload-or-restart' + if 'restart_required' in config_data: + systemd_action = 'restart' + + call(f'systemctl {systemd_action} {systemd_service}') + + # Telegraf include custom rsyslog config changes + call('systemctl reload-or-restart rsyslog') + + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) From 0b7a2a052d0f07c448b371d35863e0f8a324675e Mon Sep 17 00:00:00 2001 From: Daniil Baturin Date: Fri, 27 Sep 2024 16:51:50 +0100 Subject: [PATCH 03/85] validators: T6743: use native ipaddrcheck validator options for ranges --- src/validators/ipv4-range | 42 ++++++--------------------------------- src/validators/ipv6-range | 26 ++++++++---------------- 2 files changed, 14 insertions(+), 54 deletions(-) diff --git a/src/validators/ipv4-range b/src/validators/ipv4-range index 6492bfc5226..7bf271bbb3e 100755 --- a/src/validators/ipv4-range +++ b/src/validators/ipv4-range @@ -1,40 +1,10 @@ -#!/bin/bash +#!/bin/sh -# snippet from https://stackoverflow.com/questions/10768160/ip-address-converter -ip2dec () { - local a b c d ip=$@ - IFS=. read -r a b c d <<< "$ip" - printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))" -} +ipaddrcheck --verbose --is-ipv4-range "$1" -error_exit() { - echo "Error: $1 is not a valid IPv4 address range" - exit 1 -} - -# Only run this if there is a hypen present in $1 -if [[ "$1" =~ "-" ]]; then - # This only works with real bash (<<<) - split IP addresses into array with - # hyphen as delimiter - readarray -d - -t strarr <<< $1 - - ipaddrcheck --is-ipv4-single ${strarr[0]} - if [ $? -gt 0 ]; then - error_exit $1 - fi - - ipaddrcheck --is-ipv4-single ${strarr[1]} - if [ $? -gt 0 ]; then - error_exit $1 - fi - - start=$(ip2dec ${strarr[0]}) - stop=$(ip2dec ${strarr[1]}) - if [ $start -ge $stop ]; then - error_exit $1 - fi - - exit 0 +if [ $? -gt 0 ]; then + echo "Error: $1 is not a valid IPv4 address range" + exit 1 fi -error_exit $1 +exit 0 diff --git a/src/validators/ipv6-range b/src/validators/ipv6-range index 7080860c4c4..0d2eb638416 100755 --- a/src/validators/ipv6-range +++ b/src/validators/ipv6-range @@ -1,20 +1,10 @@ -#!/usr/bin/env python3 +#!/bin/sh -from ipaddress import IPv6Address -from sys import argv, exit +ipaddrcheck --verbose --is-ipv6-range "$1" -if __name__ == '__main__': - if len(argv) > 1: - # try to pass validation and raise an error if failed - try: - ipv6_range = argv[1] - range_left = ipv6_range.split('-')[0] - range_right = ipv6_range.split('-')[1] - if not IPv6Address(range_left) < IPv6Address(range_right): - raise ValueError(f'left element {range_left} must be less than right element {range_right}') - except Exception as err: - print(f'Error: {ipv6_range} is not a valid IPv6 range: {err}') - exit(1) - else: - print('Error: an IPv6 range argument must be provided') - exit(1) +if [ $? -gt 0 ]; then + echo "Error: $1 is not a valid IPv6 address range" + exit 1 +fi + +exit 0 From 3ad911a20620a67b6a019e86da815e2a25047de7 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Mon, 23 Sep 2024 12:53:20 -0500 Subject: [PATCH 04/85] http-api: T6736: update for deprecated/renamed in Pydantic V2 --- src/services/vyos-http-api-server | 37 ++++++++++++++++--------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 91100410c80..42c4cf387a5 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -33,7 +33,7 @@ from fastapi import BackgroundTasks from fastapi.responses import HTMLResponse from fastapi.exceptions import RequestValidationError from fastapi.routing import APIRoute -from pydantic import BaseModel, StrictStr, validator, model_validator +from pydantic import BaseModel, StrictStr, field_validator, model_validator from starlette.middleware.cors import CORSMiddleware from starlette.datastructures import FormData from starlette.formparsers import FormParser, MultiPartParser @@ -91,9 +91,9 @@ def success(data): return HTMLResponse(resp) # Pydantic models for validation -# Pydantic will cast when possible, so use StrictStr -# validators added as needed for additional constraints -# schema_extra adds anotations to OpenAPI, to add examples +# Pydantic will cast when possible, so use StrictStr validators added as +# needed for additional constraints +# json_schema_extra adds anotations to OpenAPI to add examples class ApiModel(BaseModel): key: StrictStr @@ -102,8 +102,9 @@ class BasePathModel(BaseModel): op: StrictStr path: List[StrictStr] - @validator("path") - def check_non_empty(cls, path): + @field_validator("path") + @classmethod + def check_non_empty(cls, path: str) -> str: if not len(path) > 0: raise ValueError('path must be non-empty') return path @@ -113,7 +114,7 @@ class BaseConfigureModel(BasePathModel): class ConfigureModel(ApiModel, BaseConfigureModel): class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "set | delete | comment", @@ -125,7 +126,7 @@ class ConfigureListModel(ApiModel): commands: List[BaseConfigureModel] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "commands": "list of commands", @@ -155,7 +156,7 @@ class RetrieveModel(ApiModel): configFormat: StrictStr = None class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "returnValue | returnValues | exists | showConfig", @@ -170,7 +171,7 @@ class ConfigFileModel(ApiModel): file: StrictStr = None class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "save | load", @@ -203,7 +204,7 @@ class ImageModel(ApiModel): return self class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "add | delete | show | set_default", @@ -218,7 +219,7 @@ class ImportPkiModel(ApiModel): passphrase: StrictStr = None class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "import_pki", @@ -233,7 +234,7 @@ class ContainerImageModel(ApiModel): name: StrictStr = None class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "add | delete | show", @@ -246,7 +247,7 @@ class GenerateModel(ApiModel): path: List[StrictStr] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "generate", @@ -259,7 +260,7 @@ class ShowModel(ApiModel): path: List[StrictStr] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "show", @@ -272,7 +273,7 @@ class RebootModel(ApiModel): path: List[StrictStr] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "reboot", @@ -285,7 +286,7 @@ class ResetModel(ApiModel): path: List[StrictStr] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "reset", @@ -298,7 +299,7 @@ class PoweroffModel(ApiModel): path: List[StrictStr] class Config: - schema_extra = { + json_schema_extra = { "example": { "key": "id_key", "op": "poweroff", From fc9885f859617bab36c971f4eaa56240741f52c4 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Tue, 24 Sep 2024 22:48:25 -0500 Subject: [PATCH 05/85] http-api: T6736: separate REST API and GraphQL API activation The GraphQL API was implemented as an addition to the existing REST API. As there is no necessary dependency, separate the initialization of the respective endpoints. Factor out the REST Pydantic models and FastAPI routes for symmetry and clarity. --- src/services/api/__init__.py | 0 src/services/api/graphql/bindings.py | 21 +- .../graphql/graphql/auth_token_mutation.py | 8 +- src/services/api/graphql/graphql/mutations.py | 13 +- src/services/api/graphql/graphql/queries.py | 13 +- src/services/api/graphql/libs/__init__.py | 0 src/services/api/graphql/libs/key_auth.py | 22 +- src/services/api/graphql/libs/token_auth.py | 41 +- src/services/api/graphql/routers.py | 54 + src/services/api/graphql/session/session.py | 6 +- src/services/api/graphql/state.py | 4 - src/services/api/rest/__init__.py | 0 src/services/api/rest/models.py | 276 +++++ src/services/api/rest/routers.py | 685 ++++++++++++ src/services/api/session.py | 40 + src/services/vyos-http-api-server | 972 ++---------------- 16 files changed, 1206 insertions(+), 949 deletions(-) create mode 100644 src/services/api/__init__.py create mode 100644 src/services/api/graphql/libs/__init__.py create mode 100644 src/services/api/graphql/routers.py delete mode 100644 src/services/api/graphql/state.py create mode 100644 src/services/api/rest/__init__.py create mode 100644 src/services/api/rest/models.py create mode 100644 src/services/api/rest/routers.py create mode 100644 src/services/api/session.py diff --git a/src/services/api/__init__.py b/src/services/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py index ef496646623..93dd0fbfb55 100644 --- a/src/services/api/graphql/bindings.py +++ b/src/services/api/graphql/bindings.py @@ -1,4 +1,4 @@ -# Copyright 2021 VyOS maintainers and contributors +# Copyright 2021-2024 VyOS maintainers and contributors # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -13,24 +13,35 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . + import vyos.defaults + +from ariadne import make_executable_schema +from ariadne import load_schema_from_path +from ariadne import snake_case_fallback_resolvers + from . graphql.queries import query from . graphql.mutations import mutation from . graphql.directives import directives_dict from . graphql.errors import op_mode_error from . graphql.auth_token_mutation import auth_token_mutation from . libs.token_auth import init_secret -from . import state -from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers + +from .. session import SessionState + def generate_schema(): + state = SessionState() api_schema_dir = vyos.defaults.directories['api_schema'] - if state.settings['app'].state.vyos_auth_type == 'token': + if state.auth_type == 'token': init_secret() type_defs = load_schema_from_path(api_schema_dir) - schema = make_executable_schema(type_defs, query, op_mode_error, mutation, auth_token_mutation, snake_case_fallback_resolvers, directives=directives_dict) + schema = make_executable_schema(type_defs, query, op_mode_error, + mutation, auth_token_mutation, + snake_case_fallback_resolvers, + directives=directives_dict) return schema diff --git a/src/services/api/graphql/graphql/auth_token_mutation.py b/src/services/api/graphql/graphql/auth_token_mutation.py index a53fa4d60aa..16496021745 100644 --- a/src/services/api/graphql/graphql/auth_token_mutation.py +++ b/src/services/api/graphql/graphql/auth_token_mutation.py @@ -21,7 +21,7 @@ from .. libs.token_auth import generate_token from .. session.session import get_user_info -from .. import state +from ... session import SessionState auth_token_mutation = ObjectType("Mutation") @@ -31,8 +31,10 @@ def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): user = data['username'] passwd = data['password'] - secret = state.settings['secret'] - exp_interval = int(state.settings['app'].state.vyos_token_exp) + state = SessionState() + + secret = getattr(state, 'secret', '') + exp_interval = int(state.token_exp) expiration = (datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(seconds=exp_interval)) diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py index d115a8e94b2..62031ada3c0 100644 --- a/src/services/api/graphql/graphql/mutations.py +++ b/src/services/api/graphql/graphql/mutations.py @@ -21,10 +21,10 @@ from typing import Any, Dict, Optional # pylint: disable=W0611 from graphql import GraphQLResolveInfo # pylint: disable=W0611 -from .. import state +from ... session import SessionState from .. libs import key_auth -from api.graphql.session.session import Session -from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code +from .. session.session import Session +from .. session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError mutation = ObjectType("Mutation") @@ -45,12 +45,13 @@ def make_mutation_resolver(mutation_name, class_name, session_func): func_base_name = convert_camel_case_to_snake(class_name) resolver_name = f'resolve_{func_base_name}' func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + state = SessionState() @mutation.field(mutation_name) @with_signature(func_sig, func_name=resolver_name) async def func_impl(*args, **kwargs): try: - auth_type = state.settings['app'].state.vyos_auth_type + auth_type = state.auth_type if auth_type == 'key': data = kwargs['data'] @@ -86,11 +87,11 @@ async def func_impl(*args, **kwargs): } else: # AtrributeError will have already been raised if no - # vyos_auth_type; validation and defaultValue ensure it is + # auth_type; validation and defaultValue ensure it is # one of the previous cases, so this is never reached. pass - session = state.settings['app'].state.vyos_session + session = state.session # one may override the session functions with a local subclass try: diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py index 717098259a4..1e9036574be 100644 --- a/src/services/api/graphql/graphql/queries.py +++ b/src/services/api/graphql/graphql/queries.py @@ -21,10 +21,10 @@ from typing import Any, Dict, Optional # pylint: disable=W0611 from graphql import GraphQLResolveInfo # pylint: disable=W0611 -from .. import state +from ... session import SessionState from .. libs import key_auth -from api.graphql.session.session import Session -from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code +from .. session.session import Session +from .. session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError query = ObjectType("Query") @@ -45,12 +45,13 @@ def make_query_resolver(query_name, class_name, session_func): func_base_name = convert_camel_case_to_snake(class_name) resolver_name = f'resolve_{func_base_name}' func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + state = SessionState() @query.field(query_name) @with_signature(func_sig, func_name=resolver_name) async def func_impl(*args, **kwargs): try: - auth_type = state.settings['app'].state.vyos_auth_type + auth_type = state.auth_type if auth_type == 'key': data = kwargs['data'] @@ -86,11 +87,11 @@ async def func_impl(*args, **kwargs): } else: # AtrributeError will have already been raised if no - # vyos_auth_type; validation and defaultValue ensure it is + # auth_type; validation and defaultValue ensure it is # one of the previous cases, so this is never reached. pass - session = state.settings['app'].state.vyos_session + session = state.session # one may override the session functions with a local subclass try: diff --git a/src/services/api/graphql/libs/__init__.py b/src/services/api/graphql/libs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/services/api/graphql/libs/key_auth.py b/src/services/api/graphql/libs/key_auth.py index 2db0f7d48b2..9e49a1203e5 100644 --- a/src/services/api/graphql/libs/key_auth.py +++ b/src/services/api/graphql/libs/key_auth.py @@ -1,5 +1,20 @@ +# Copyright 2021-2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . -from .. import state + +from ... session import SessionState def check_auth(key_list, key): if not key_list: @@ -11,8 +26,9 @@ def check_auth(key_list, key): return key_id def auth_required(key): + state = SessionState() api_keys = None - api_keys = state.settings['app'].state.vyos_keys + api_keys = state.keys key_id = check_auth(api_keys, key) - state.settings['app'].state.vyos_id = key_id + state.id = key_id return key_id diff --git a/src/services/api/graphql/libs/token_auth.py b/src/services/api/graphql/libs/token_auth.py index 8585485c96d..2d772e0357e 100644 --- a/src/services/api/graphql/libs/token_auth.py +++ b/src/services/api/graphql/libs/token_auth.py @@ -1,33 +1,52 @@ +# Copyright 2021-2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + + import jwt import uuid import pam from secrets import token_hex -from .. import state +from ... session import SessionState + def _check_passwd_pam(username: str, passwd: str) -> bool: if pam.authenticate(username, passwd): return True return False + def init_secret(): - length = int(state.settings['app'].state.vyos_secret_len) + state = SessionState() + length = int(state.secret_len) secret = token_hex(length) - state.settings['secret'] = secret + state.secret = secret + def generate_token(user: str, passwd: str, secret: str, exp: int) -> dict: if user is None or passwd is None: return {} + state = SessionState() if _check_passwd_pam(user, passwd): - app = state.settings['app'] try: - users = app.state.vyos_token_users + users = state.token_users except AttributeError: - app.state.vyos_token_users = {} - users = app.state.vyos_token_users + users = state.token_users = {} user_id = uuid.uuid1().hex payload_data = {'iss': user, 'sub': user_id, 'exp': exp} - secret = state.settings.get('secret') + secret = getattr(state, 'secret', None) if secret is None: return {"errors": ['missing secret']} token = jwt.encode(payload=payload_data, key=secret, algorithm="HS256") @@ -37,10 +56,12 @@ def generate_token(user: str, passwd: str, secret: str, exp: int) -> dict: else: return {"errors": ['failed pam authentication']} + def get_user_context(request): context = {} context['request'] = request context['user'] = None + state = SessionState() if 'Authorization' in request.headers: auth = request.headers['Authorization'] scheme, token = auth.split() @@ -48,7 +69,7 @@ def get_user_context(request): return context try: - secret = state.settings.get('secret') + secret = getattr(state, 'secret', None) payload = jwt.decode(token, secret, algorithms=["HS256"]) user_id: str = payload.get('sub') if user_id is None: @@ -59,7 +80,7 @@ def get_user_context(request): except jwt.PyJWTError: return context try: - users = state.settings['app'].state.vyos_token_users + users = state.token_users except AttributeError: return context diff --git a/src/services/api/graphql/routers.py b/src/services/api/graphql/routers.py new file mode 100644 index 00000000000..d04375a49e6 --- /dev/null +++ b/src/services/api/graphql/routers.py @@ -0,0 +1,54 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +# pylint: disable=import-outside-toplevel + + +import typing + +from ariadne.asgi import GraphQL +from starlette.middleware.cors import CORSMiddleware + + +if typing.TYPE_CHECKING: + from fastapi import FastAPI + + +def graphql_init(app: "FastAPI"): + from .. session import SessionState + from .libs.token_auth import get_user_context + + state = SessionState() + + # import after initializaion of state + from .bindings import generate_schema + schema = generate_schema() + + in_spec = state.introspection + + if state.origins: + origins = state.origins + app.add_route('/graphql', CORSMiddleware(GraphQL(schema, + context_value=get_user_context, + debug=True, + introspection=in_spec), + allow_origins=origins, + allow_methods=("GET", "POST", "OPTIONS"), + allow_headers=("Authorization",))) + else: + app.add_route('/graphql', GraphQL(schema, + context_value=get_user_context, + debug=True, + introspection=in_spec)) diff --git a/src/services/api/graphql/session/session.py b/src/services/api/graphql/session/session.py index 6ae44b9bfca..6e2875f3c29 100644 --- a/src/services/api/graphql/session/session.py +++ b/src/services/api/graphql/session/session.py @@ -138,7 +138,6 @@ def delete_system_image(self): return res def show_user_info(self): - session = self._session data = self._data user_info = {} @@ -151,10 +150,9 @@ def show_user_info(self): return user_info def system_status(self): - import api.graphql.session.composite.system_status as system_status + from api.graphql.session.composite import system_status session = self._session - data = self._data status = {} status['host_name'] = session.show(['host', 'name']).strip() @@ -165,7 +163,6 @@ def system_status(self): return status def gen_op_query(self): - session = self._session data = self._data name = self._name op_mode_list = self._op_mode_list @@ -189,7 +186,6 @@ def gen_op_query(self): return res def gen_op_mutation(self): - session = self._session data = self._data name = self._name op_mode_list = self._op_mode_list diff --git a/src/services/api/graphql/state.py b/src/services/api/graphql/state.py deleted file mode 100644 index 63db9f4ef5e..00000000000 --- a/src/services/api/graphql/state.py +++ /dev/null @@ -1,4 +0,0 @@ - -def init(): - global settings - settings = {} diff --git a/src/services/api/rest/__init__.py b/src/services/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/services/api/rest/models.py b/src/services/api/rest/models.py new file mode 100644 index 00000000000..034e3fcdb3c --- /dev/null +++ b/src/services/api/rest/models.py @@ -0,0 +1,276 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + + +# pylint: disable=too-few-public-methods + +import json +from enum import Enum +from typing import List +from typing import Union +from typing import Dict +from typing import Self + +from pydantic import BaseModel +from pydantic import StrictStr +from pydantic import field_validator +from pydantic import model_validator +from fastapi.responses import HTMLResponse + + +def error(code, msg): + resp = {"success": False, "error": msg, "data": None} + resp = json.dumps(resp) + return HTMLResponse(resp, status_code=code) + +def success(data): + resp = {"success": True, "data": data, "error": None} + resp = json.dumps(resp) + return HTMLResponse(resp) + +# Pydantic models for validation +# Pydantic will cast when possible, so use StrictStr validators added as +# needed for additional constraints +# json_schema_extra adds anotations to OpenAPI to add examples + +class ApiModel(BaseModel): + key: StrictStr + +class BasePathModel(BaseModel): + op: StrictStr + path: List[StrictStr] + + @field_validator("path") + @classmethod + def check_non_empty(cls, path: str) -> str: + if not len(path) > 0: + raise ValueError('path must be non-empty') + return path + +class BaseConfigureModel(BasePathModel): + value: StrictStr = None + +class ConfigureModel(ApiModel, BaseConfigureModel): + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "set | delete | comment", + "path": ["config", "mode", "path"], + } + } + +class ConfigureListModel(ApiModel): + commands: List[BaseConfigureModel] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "commands": "list of commands", + } + } + +class BaseConfigSectionModel(BasePathModel): + section: Dict + +class ConfigSectionModel(ApiModel, BaseConfigSectionModel): + pass + +class ConfigSectionListModel(ApiModel): + commands: List[BaseConfigSectionModel] + +class BaseConfigSectionTreeModel(BaseModel): + op: StrictStr + mask: Dict + config: Dict + +class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): + pass + +class RetrieveModel(ApiModel): + op: StrictStr + path: List[StrictStr] + configFormat: StrictStr = None + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "returnValue | returnValues | exists | showConfig", + "path": ["config", "mode", "path"], + "configFormat": "json (default) | json_ast | raw", + + } + } + +class ConfigFileModel(ApiModel): + op: StrictStr + file: StrictStr = None + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "save | load", + "file": "filename", + } + } + + +class ImageOp(str, Enum): + add = "add" + delete = "delete" + show = "show" + set_default = "set_default" + + +class ImageModel(ApiModel): + op: ImageOp + url: StrictStr = None + name: StrictStr = None + + @model_validator(mode='after') + def check_data(self) -> Self: + if self.op == 'add': + if not self.url: + raise ValueError("Missing required field \"url\"") + elif self.op in ['delete', 'set_default']: + if not self.name: + raise ValueError("Missing required field \"name\"") + + return self + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "add | delete | show | set_default", + "url": "imagelocation", + "name": "imagename", + } + } + +class ImportPkiModel(ApiModel): + op: StrictStr + path: List[StrictStr] + passphrase: StrictStr = None + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "import_pki", + "path": ["op", "mode", "path"], + "passphrase": "passphrase", + } + } + + +class ContainerImageModel(ApiModel): + op: StrictStr + name: StrictStr = None + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "add | delete | show", + "name": "imagename", + } + } + +class GenerateModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "generate", + "path": ["op", "mode", "path"], + } + } + +class ShowModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "show", + "path": ["op", "mode", "path"], + } + } + +class RebootModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "reboot", + "path": ["op", "mode", "path"], + } + } + +class ResetModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "reset", + "path": ["op", "mode", "path"], + } + } + +class PoweroffModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + "example": { + "key": "id_key", + "op": "poweroff", + "path": ["op", "mode", "path"], + } + } + + +class Success(BaseModel): + success: bool + data: Union[str, bool, Dict] + error: str + +class Error(BaseModel): + success: bool = False + data: Union[str, bool, Dict] + error: str + +responses = { + 200: {'model': Success}, + 400: {'model': Error}, + 422: {'model': Error, 'description': 'Validation Error'}, + 500: {'model': Error} +} diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py new file mode 100644 index 00000000000..1568f7d0c35 --- /dev/null +++ b/src/services/api/rest/routers.py @@ -0,0 +1,685 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + + +# pylint: disable=line-too-long,raise-missing-from,invalid-name +# pylint: disable=wildcard-import,unused-wildcard-import +# pylint: disable=broad-exception-caught + +import copy +import logging +import traceback +from threading import Lock +from typing import Callable +from typing import TYPE_CHECKING + +from fastapi import Depends +from fastapi import Request +from fastapi import Response +from fastapi import HTTPException +from fastapi import APIRouter +from fastapi import BackgroundTasks +from fastapi.routing import APIRoute +from starlette.datastructures import FormData +from starlette.formparsers import FormParser +from starlette.formparsers import MultiPartParser +from starlette.formparsers import MultiPartException +from multipart.multipart import parse_options_header + +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.configdiff import get_config_diff +from vyos.configsession import ConfigSessionError + +from .. session import SessionState +from . models import * + +if TYPE_CHECKING: + from fastapi import FastAPI + + +LOG = logging.getLogger('http_api.routers') + +lock = Lock() + + +def check_auth(key_list, key): + key_id = None + for k in key_list: + if k['key'] == key: + key_id = k['id'] + return key_id + + +def auth_required(data: ApiModel): + session = SessionState() + key = data.key + api_keys = session.keys + key_id = check_auth(api_keys, key) + if not key_id: + raise HTTPException(status_code=401, detail="Valid API key is required") + session.id = key_id + + +# override Request and APIRoute classes in order to convert form request to json; +# do all explicit validation here, for backwards compatability of error messages; +# the explicit validation may be dropped, if desired, in favor of native +# validation by FastAPI/Pydantic, as is used for application/json requests +class MultipartRequest(Request): + """Override Request class to convert form request to json""" + # pylint: disable=attribute-defined-outside-init + # pylint: disable=too-many-branches,too-many-statements + + _form_err = () + @property + def form_err(self): + return self._form_err + + @form_err.setter + def form_err(self, val): + if not self._form_err: + self._form_err = val + + @property + def orig_headers(self): + self._orig_headers = super().headers + return self._orig_headers + + @property + def headers(self): + self._headers = super().headers.mutablecopy() + self._headers['content-type'] = 'application/json' + return self._headers + + async def _get_form( + self, *, max_files: int | float = 1000, max_fields: int | float = 1000 + ) -> FormData: + if self._form is None: + assert ( + parse_options_header is not None + ), "The `python-multipart` library must be installed to use form parsing." + content_type_header = self.orig_headers.get("Content-Type") + content_type: bytes + content_type, _ = parse_options_header(content_type_header) + if content_type == b"multipart/form-data": + try: + multipart_parser = MultiPartParser( + self.orig_headers, + self.stream(), + max_files=max_files, + max_fields=max_fields, + ) + self._form = await multipart_parser.parse() + except MultiPartException as exc: + if "app" in self.scope: + raise HTTPException(status_code=400, detail=exc.message) + raise exc + elif content_type == b"application/x-www-form-urlencoded": + form_parser = FormParser(self.orig_headers, self.stream()) + self._form = await form_parser.parse() + else: + self._form = FormData() + return self._form + + async def body(self) -> bytes: + if not hasattr(self, "_body"): + forms = {} + merge = {} + body = await super().body() + self._body = body + + form_data = await self.form() + if form_data: + endpoint = self.url.path + LOG.debug("processing form data") + for k, v in form_data.multi_items(): + forms[k] = v + + if 'data' not in forms: + self.form_err = (422, "Non-empty data field is required") + return self._body + try: + tmp = json.loads(forms['data']) + except json.JSONDecodeError as e: + self.form_err = (400, f'Failed to parse JSON: {e}') + return self._body + if isinstance(tmp, list): + merge['commands'] = tmp + else: + merge = tmp + + if 'commands' in merge: + cmds = merge['commands'] + else: + cmds = copy.deepcopy(merge) + cmds = [cmds] + + for c in cmds: + if not isinstance(c, dict): + self.form_err = (400, + f"Malformed command '{c}': any command must be JSON of dict") + return self._body + if 'op' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'op' field") + if endpoint not in ('/config-file', '/container-image', + '/image', '/configure-section'): + if 'path' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'path' field") + elif not isinstance(c['path'], list): + self.form_err = (400, + f"Malformed command '{c}': 'path' field must be a list") + elif not all(isinstance(el, str) for el in c['path']): + self.form_err = (400, + f"Malformed command '{0}': 'path' field must be a list of strings") + if endpoint in ('/configure'): + if not c['path']: + self.form_err = (400, + f"Malformed command '{c}': 'path' list must be non-empty") + if 'value' in c and not isinstance(c['value'], str): + self.form_err = (400, + f"Malformed command '{c}': 'value' field must be a string") + if endpoint in ('/configure-section'): + if 'section' not in c and 'config' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'section' or 'config' field") + + if 'key' not in forms and 'key' not in merge: + self.form_err = (401, "Valid API key is required") + if 'key' in forms and 'key' not in merge: + merge['key'] = forms['key'] + + new_body = json.dumps(merge) + new_body = new_body.encode() + self._body = new_body + + return self._body + + +class MultipartRoute(APIRoute): + """Override APIRoute class to convert form request to json""" + + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + request = MultipartRequest(request.scope, request.receive) + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + return error(e.status_code, e.detail) + except Exception as e: + form_err = request.form_err + if form_err: + return error(*form_err) + raise e + + return response + + return custom_route_handler + + +router = APIRouter(route_class=MultipartRoute, + responses={**responses}, + dependencies=[Depends(auth_required)]) + + +self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" + +def call_commit(s: SessionState): + try: + s.session.commit() + except ConfigSessionError as e: + s.session.discard() + if s.debug: + LOG.warning(f"ConfigSessionError:\n {traceback.format_exc()}") + else: + LOG.warning(f"ConfigSessionError: {e}") + + +def _configure_op(data: Union[ConfigureModel, ConfigureListModel, + ConfigSectionModel, ConfigSectionListModel, + ConfigSectionTreeModel], + _request: Request, background_tasks: BackgroundTasks): + # pylint: disable=too-many-branches,too-many-locals,too-many-nested-blocks,too-many-statements + # pylint: disable=consider-using-with + + state = SessionState() + session = state.session + env = session.get_session_env() + + # Allow users to pass just one command + if not isinstance(data, (ConfigureListModel, ConfigSectionListModel)): + data = [data] + else: + data = data.commands + + # We don't want multiple people/apps to be able to commit at once, + # or modify the shared session while someone else is doing the same, + # so the lock is really global + lock.acquire() + + config = Config(session_env=env) + + status = 200 + msg = None + error_msg = None + try: + for c in data: + op = c.op + if not isinstance(c, BaseConfigSectionTreeModel): + path = c.path + + if isinstance(c, BaseConfigureModel): + if c.value: + value = c.value + else: + value = "" + # For vyos.configsession calls that have no separate value arguments, + # and for type checking too + cfg_path = " ".join(path + [value]).strip() + + elif isinstance(c, BaseConfigSectionModel): + section = c.section + + elif isinstance(c, BaseConfigSectionTreeModel): + mask = c.mask + config = c.config + + if isinstance(c, BaseConfigureModel): + if op == 'set': + session.set(path, value=value) + elif op == 'delete': + if state.strict and not config.exists(cfg_path): + raise ConfigSessionError(f"Cannot delete [{cfg_path}]: path/value does not exist") + session.delete(path, value=value) + elif op == 'comment': + session.comment(path, value=value) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionModel): + if op == 'set': + session.set_section(path, section) + elif op == 'load': + session.load_section(path, section) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionTreeModel): + if op == 'set': + session.set_section_tree(config) + elif op == 'load': + session.load_section_tree(mask, config) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + # end for + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, state) + msg = self_ref_msg + else: + # capture non-fatal warnings + out = session.commit() + msg = out if out else msg + + LOG.info(f"Configuration modified via HTTP API using key '{state.id}'") + except ConfigSessionError as e: + session.discard() + status = 400 + if state.debug: + LOG.critical(f"ConfigSessionError:\n {traceback.format_exc()}") + error_msg = str(e) + except Exception: + session.discard() + LOG.critical(traceback.format_exc()) + status = 500 + + # Don't give the details away to the outer world + error_msg = "An internal error occured. Check the logs for details." + finally: + lock.release() + + if status != 200: + return error(status, error_msg) + + return success(msg) + + +def create_path_import_pki_no_prompt(path): + correct_paths = ['ca', 'certificate', 'key-pair'] + if path[1] not in correct_paths: + return False + path[1] = '--' + path[1].replace('-', '') + path[3] = '--key-filename' + return path[1:] + + +@router.post('/configure') +def configure_op(data: Union[ConfigureModel, + ConfigureListModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) + + +@router.post('/configure-section') +def configure_section_op(data: Union[ConfigSectionModel, + ConfigSectionListModel, + ConfigSectionTreeModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) + + +@router.post("/retrieve") +async def retrieve_op(data: RetrieveModel): + state = SessionState() + session = state.session + env = session.get_session_env() + config = Config(session_env=env) + + op = data.op + path = " ".join(data.path) + + try: + if op == 'returnValue': + res = config.return_value(path) + elif op == 'returnValues': + res = config.return_values(path) + elif op == 'exists': + res = config.exists(path) + elif op == 'showConfig': + config_format = 'json' + if data.configFormat: + config_format = data.configFormat + + res = session.show_config(path=data.path) + if config_format == 'json': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json()) + elif config_format == 'json_ast': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json_ast()) + elif config_format == 'raw': + pass + else: + return error(400, f"'{config_format}' is not a valid config format") + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/config-file') +def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): + state = SessionState() + session = state.session + env = session.get_session_env() + op = data.op + msg = None + + try: + if op == 'save': + if data.file: + path = data.file + else: + path = '/config/config.boot' + msg = session.save_config(path) + elif op == 'load': + if data.file: + path = data.file + else: + return error(400, "Missing required field \"file\"") + + session.migrate_and_load_config(path) + + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, state) + msg = self_ref_msg + else: + session.commit() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(msg) + + +@router.post('/image') +def image_op(data: ImageModel): + state = SessionState() + session = state.session + + op = data.op + + try: + if op == 'add': + res = session.install_image(data.url) + elif op == 'delete': + res = session.remove_image(data.name) + elif op == 'show': + res = session.show(["system", "image"]) + elif op == 'set_default': + res = session.set_default_image(data.name) + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/container-image') +def container_image_op(data: ContainerImageModel): + state = SessionState() + session = state.session + + op = data.op + + try: + if op == 'add': + if data.name: + name = data.name + else: + return error(400, "Missing required field \"name\"") + res = session.add_container_image(name) + elif op == 'delete': + if data.name: + name = data.name + else: + return error(400, "Missing required field \"name\"") + res = session.delete_container_image(name) + elif op == 'show': + res = session.show_container_image() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/generate') +def generate_op(data: GenerateModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'generate': + res = session.generate(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/show') +def show_op(data: ShowModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'show': + res = session.show(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/reboot') +def reboot_op(data: RebootModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'reboot': + res = session.reboot(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/reset') +def reset_op(data: ResetModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'reset': + res = session.reset(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +@router.post('/import-pki') +def import_pki(data: ImportPkiModel): + # pylint: disable=consider-using-with + + state = SessionState() + session = state.session + + op = data.op + path = data.path + + lock.acquire() + + try: + if op == 'import-pki': + # need to get rid or interactive mode for private key + if len(path) == 5 and path[3] in ['key-file', 'private-key']: + path_no_prompt = create_path_import_pki_no_prompt(path) + if not path_no_prompt: + return error(400, f"Invalid command: {' '.join(path)}") + if data.passphrase: + path_no_prompt += ['--passphrase', data.passphrase] + res = session.import_pki_no_prompt(path_no_prompt) + else: + res = session.import_pki(path) + if not res[0].isdigit(): + return error(400, res) + # commit changes + session.commit() + res = res.split('. ')[0] + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + finally: + lock.release() + + return success(res) + + +@router.post('/poweroff') +def poweroff_op(data: PoweroffModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'poweroff': + res = session.poweroff(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +def rest_init(app: "FastAPI"): + app.include_router(router) diff --git a/src/services/api/session.py b/src/services/api/session.py new file mode 100644 index 00000000000..dcdc7246c5c --- /dev/null +++ b/src/services/api/session.py @@ -0,0 +1,40 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +class SessionState: + # pylint: disable=attribute-defined-outside-init + # pylint: disable=too-many-instance-attributes,too-few-public-methods + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super(SessionState, cls).__new__(cls) + cls._instance._initialize() + return cls._instance + + def _initialize(self): + self.session = None + self.keys = [] + self.id = None + self.rest = False + self.debug = False + self.strict = False + self.graphql = False + self.origins = [] + self.introspection = False + self.auth_type = None + self.token_exp = None + self.secret_len = None diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 42c4cf387a5..456ea3d17bf 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -17,918 +17,51 @@ import os import sys import grp -import copy import json import logging import signal -import traceback -import threading -from enum import Enum - from time import sleep -from typing import List, Union, Callable, Dict, Self -from fastapi import FastAPI, Depends, Request, Response, HTTPException -from fastapi import BackgroundTasks -from fastapi.responses import HTMLResponse +from fastapi import FastAPI from fastapi.exceptions import RequestValidationError -from fastapi.routing import APIRoute -from pydantic import BaseModel, StrictStr, field_validator, model_validator -from starlette.middleware.cors import CORSMiddleware -from starlette.datastructures import FormData -from starlette.formparsers import FormParser, MultiPartParser -from multipart.multipart import parse_options_header from uvicorn import Config as UvicornConfig from uvicorn import Server as UvicornServer -from ariadne.asgi import GraphQL - -from vyos.config import Config -from vyos.configtree import ConfigTree -from vyos.configdiff import get_config_diff from vyos.configsession import ConfigSession -from vyos.configsession import ConfigSessionError from vyos.defaults import api_config_state -import api.graphql.state +from api.session import SessionState +from api.rest.models import error CFG_GROUP = 'vyattacfg' debug = True -logger = logging.getLogger(__name__) +LOG = logging.getLogger('http_api') logs_handler = logging.StreamHandler() -logger.addHandler(logs_handler) +LOG.addHandler(logs_handler) if debug: - logger.setLevel(logging.DEBUG) + LOG.setLevel(logging.DEBUG) else: - logger.setLevel(logging.INFO) + LOG.setLevel(logging.INFO) -# Giant lock! -lock = threading.Lock() def load_server_config(): with open(api_config_state) as f: config = json.load(f) return config -def check_auth(key_list, key): - key_id = None - for k in key_list: - if k['key'] == key: - key_id = k['id'] - return key_id - -def error(code, msg): - resp = {"success": False, "error": msg, "data": None} - resp = json.dumps(resp) - return HTMLResponse(resp, status_code=code) - -def success(data): - resp = {"success": True, "data": data, "error": None} - resp = json.dumps(resp) - return HTMLResponse(resp) - -# Pydantic models for validation -# Pydantic will cast when possible, so use StrictStr validators added as -# needed for additional constraints -# json_schema_extra adds anotations to OpenAPI to add examples - -class ApiModel(BaseModel): - key: StrictStr - -class BasePathModel(BaseModel): - op: StrictStr - path: List[StrictStr] - - @field_validator("path") - @classmethod - def check_non_empty(cls, path: str) -> str: - if not len(path) > 0: - raise ValueError('path must be non-empty') - return path - -class BaseConfigureModel(BasePathModel): - value: StrictStr = None - -class ConfigureModel(ApiModel, BaseConfigureModel): - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "set | delete | comment", - "path": ['config', 'mode', 'path'], - } - } - -class ConfigureListModel(ApiModel): - commands: List[BaseConfigureModel] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "commands": "list of commands", - } - } - -class BaseConfigSectionModel(BasePathModel): - section: Dict - -class ConfigSectionModel(ApiModel, BaseConfigSectionModel): - pass - -class ConfigSectionListModel(ApiModel): - commands: List[BaseConfigSectionModel] - -class BaseConfigSectionTreeModel(BaseModel): - op: StrictStr - mask: Dict - config: Dict - -class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): - pass - -class RetrieveModel(ApiModel): - op: StrictStr - path: List[StrictStr] - configFormat: StrictStr = None - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "returnValue | returnValues | exists | showConfig", - "path": ['config', 'mode', 'path'], - "configFormat": "json (default) | json_ast | raw", - - } - } - -class ConfigFileModel(ApiModel): - op: StrictStr - file: StrictStr = None - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "save | load", - "file": "filename", - } - } - - -class ImageOp(str, Enum): - add = "add" - delete = "delete" - show = "show" - set_default = "set_default" - - -class ImageModel(ApiModel): - op: ImageOp - url: StrictStr = None - name: StrictStr = None - - @model_validator(mode='after') - def check_data(self) -> Self: - if self.op == 'add': - if not self.url: - raise ValueError("Missing required field \"url\"") - elif self.op in ['delete', 'set_default']: - if not self.name: - raise ValueError("Missing required field \"name\"") - - return self - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show | set_default", - "url": "imagelocation", - "name": "imagename", - } - } - -class ImportPkiModel(ApiModel): - op: StrictStr - path: List[StrictStr] - passphrase: StrictStr = None - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "import_pki", - "path": ["op", "mode", "path"], - "passphrase": "passphrase", - } - } - - -class ContainerImageModel(ApiModel): - op: StrictStr - name: StrictStr = None - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show", - "name": "imagename", - } - } - -class GenerateModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "generate", - "path": ["op", "mode", "path"], - } - } - -class ShowModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "show", - "path": ["op", "mode", "path"], - } - } - -class RebootModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "reboot", - "path": ["op", "mode", "path"], - } - } - -class ResetModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "reset", - "path": ["op", "mode", "path"], - } - } - -class PoweroffModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - json_schema_extra = { - "example": { - "key": "id_key", - "op": "poweroff", - "path": ["op", "mode", "path"], - } - } - - -class Success(BaseModel): - success: bool - data: Union[str, bool, Dict] - error: str - -class Error(BaseModel): - success: bool = False - data: Union[str, bool, Dict] - error: str - -responses = { - 200: {'model': Success}, - 400: {'model': Error}, - 422: {'model': Error, 'description': 'Validation Error'}, - 500: {'model': Error} -} - -def auth_required(data: ApiModel): - key = data.key - api_keys = app.state.vyos_keys - key_id = check_auth(api_keys, key) - if not key_id: - raise HTTPException(status_code=401, detail="Valid API key is required") - app.state.vyos_id = key_id - -# override Request and APIRoute classes in order to convert form request to json; -# do all explicit validation here, for backwards compatability of error messages; -# the explicit validation may be dropped, if desired, in favor of native -# validation by FastAPI/Pydantic, as is used for application/json requests -class MultipartRequest(Request): - _form_err = () - @property - def form_err(self): - return self._form_err - - @form_err.setter - def form_err(self, val): - if not self._form_err: - self._form_err = val - - @property - def orig_headers(self): - self._orig_headers = super().headers - return self._orig_headers - - @property - def headers(self): - self._headers = super().headers.mutablecopy() - self._headers['content-type'] = 'application/json' - return self._headers - - async def form(self) -> FormData: - if self._form is None: - assert ( - parse_options_header is not None - ), "The `python-multipart` library must be installed to use form parsing." - content_type_header = self.orig_headers.get("Content-Type") - content_type, options = parse_options_header(content_type_header) - if content_type == b"multipart/form-data": - multipart_parser = MultiPartParser(self.orig_headers, self.stream()) - self._form = await multipart_parser.parse() - elif content_type == b"application/x-www-form-urlencoded": - form_parser = FormParser(self.orig_headers, self.stream()) - self._form = await form_parser.parse() - else: - self._form = FormData() - return self._form - - async def body(self) -> bytes: - if not hasattr(self, "_body"): - forms = {} - merge = {} - body = await super().body() - self._body = body - - form_data = await self.form() - if form_data: - endpoint = self.url.path - logger.debug("processing form data") - for k, v in form_data.multi_items(): - forms[k] = v - - if 'data' not in forms: - self.form_err = (422, "Non-empty data field is required") - return self._body - else: - try: - tmp = json.loads(forms['data']) - except json.JSONDecodeError as e: - self.form_err = (400, f'Failed to parse JSON: {e}') - return self._body - if isinstance(tmp, list): - merge['commands'] = tmp - else: - merge = tmp - - if 'commands' in merge: - cmds = merge['commands'] - else: - cmds = copy.deepcopy(merge) - cmds = [cmds] - - for c in cmds: - if not isinstance(c, dict): - self.form_err = (400, - f"Malformed command '{c}': any command must be JSON of dict") - return self._body - if 'op' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'op' field") - if endpoint not in ('/config-file', '/container-image', - '/image', '/configure-section'): - if 'path' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'path' field") - elif not isinstance(c['path'], list): - self.form_err = (400, - f"Malformed command '{c}': 'path' field must be a list") - elif not all(isinstance(el, str) for el in c['path']): - self.form_err = (400, - f"Malformed command '{0}': 'path' field must be a list of strings") - if endpoint in ('/configure'): - if not c['path']: - self.form_err = (400, - f"Malformed command '{c}': 'path' list must be non-empty") - if 'value' in c and not isinstance(c['value'], str): - self.form_err = (400, - f"Malformed command '{c}': 'value' field must be a string") - if endpoint in ('/configure-section'): - if 'section' not in c and 'config' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'section' or 'config' field") - - if 'key' not in forms and 'key' not in merge: - self.form_err = (401, "Valid API key is required") - if 'key' in forms and 'key' not in merge: - merge['key'] = forms['key'] - - new_body = json.dumps(merge) - new_body = new_body.encode() - self._body = new_body - - return self._body - -class MultipartRoute(APIRoute): - def get_route_handler(self) -> Callable: - original_route_handler = super().get_route_handler() - - async def custom_route_handler(request: Request) -> Response: - request = MultipartRequest(request.scope, request.receive) - try: - response: Response = await original_route_handler(request) - except HTTPException as e: - return error(e.status_code, e.detail) - except Exception as e: - form_err = request.form_err - if form_err: - return error(*form_err) - raise e - - return response - - return custom_route_handler app = FastAPI(debug=True, title="VyOS API", - version="0.1.0", - responses={**responses}, - dependencies=[Depends(auth_required)]) - -app.router.route_class = MultipartRoute + version="0.1.0") @app.exception_handler(RequestValidationError) -async def validation_exception_handler(request, exc): +async def validation_exception_handler(_request, exc): return error(400, str(exc.errors()[0])) -self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" - -def call_commit(s: ConfigSession): - try: - s.commit() - except ConfigSessionError as e: - s.discard() - if app.state.vyos_debug: - logger.warning(f"ConfigSessionError:\n {traceback.format_exc()}") - else: - logger.warning(f"ConfigSessionError: {e}") - -def _configure_op(data: Union[ConfigureModel, ConfigureListModel, - ConfigSectionModel, ConfigSectionListModel, - ConfigSectionTreeModel], - request: Request, background_tasks: BackgroundTasks): - session = app.state.vyos_session - env = session.get_session_env() - - endpoint = request.url.path - - # Allow users to pass just one command - if not isinstance(data, (ConfigureListModel, ConfigSectionListModel)): - data = [data] - else: - data = data.commands - - # We don't want multiple people/apps to be able to commit at once, - # or modify the shared session while someone else is doing the same, - # so the lock is really global - lock.acquire() - - config = Config(session_env=env) - - status = 200 - msg = None - error_msg = None - try: - for c in data: - op = c.op - if not isinstance(c, BaseConfigSectionTreeModel): - path = c.path - - if isinstance(c, BaseConfigureModel): - if c.value: - value = c.value - else: - value = "" - # For vyos.configsession calls that have no separate value arguments, - # and for type checking too - cfg_path = " ".join(path + [value]).strip() - - elif isinstance(c, BaseConfigSectionModel): - section = c.section - - elif isinstance(c, BaseConfigSectionTreeModel): - mask = c.mask - config = c.config - - if isinstance(c, BaseConfigureModel): - if op == 'set': - session.set(path, value=value) - elif op == 'delete': - if app.state.vyos_strict and not config.exists(cfg_path): - raise ConfigSessionError(f"Cannot delete [{cfg_path}]: path/value does not exist") - session.delete(path, value=value) - elif op == 'comment': - session.comment(path, value=value) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - - elif isinstance(c, BaseConfigSectionModel): - if op == 'set': - session.set_section(path, section) - elif op == 'load': - session.load_section(path, section) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - - elif isinstance(c, BaseConfigSectionTreeModel): - if op == 'set': - session.set_section_tree(config) - elif op == 'load': - session.load_section_tree(mask, config) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - # end for - config = Config(session_env=env) - d = get_config_diff(config) - - if d.is_node_changed(['service', 'https']): - background_tasks.add_task(call_commit, session) - msg = self_ref_msg - else: - # capture non-fatal warnings - out = session.commit() - msg = out if out else msg - - logger.info(f"Configuration modified via HTTP API using key '{app.state.vyos_id}'") - except ConfigSessionError as e: - session.discard() - status = 400 - if app.state.vyos_debug: - logger.critical(f"ConfigSessionError:\n {traceback.format_exc()}") - error_msg = str(e) - except Exception as e: - session.discard() - logger.critical(traceback.format_exc()) - status = 500 - - # Don't give the details away to the outer world - error_msg = "An internal error occured. Check the logs for details." - finally: - lock.release() - - if status != 200: - return error(status, error_msg) - - return success(msg) - -def create_path_import_pki_no_prompt(path): - correct_paths = ['ca', 'certificate', 'key-pair'] - if path[1] not in correct_paths: - return False - path[1] = '--' + path[1].replace('-', '') - path[3] = '--key-filename' - return path[1:] - -@app.post('/configure') -def configure_op(data: Union[ConfigureModel, - ConfigureListModel], - request: Request, background_tasks: BackgroundTasks): - return _configure_op(data, request, background_tasks) - -@app.post('/configure-section') -def configure_section_op(data: Union[ConfigSectionModel, - ConfigSectionListModel, - ConfigSectionTreeModel], - request: Request, background_tasks: BackgroundTasks): - return _configure_op(data, request, background_tasks) - -@app.post("/retrieve") -async def retrieve_op(data: RetrieveModel): - session = app.state.vyos_session - env = session.get_session_env() - config = Config(session_env=env) - - op = data.op - path = " ".join(data.path) - try: - if op == 'returnValue': - res = config.return_value(path) - elif op == 'returnValues': - res = config.return_values(path) - elif op == 'exists': - res = config.exists(path) - elif op == 'showConfig': - config_format = 'json' - if data.configFormat: - config_format = data.configFormat - - res = session.show_config(path=data.path) - if config_format == 'json': - config_tree = ConfigTree(res) - res = json.loads(config_tree.to_json()) - elif config_format == 'json_ast': - config_tree = ConfigTree(res) - res = json.loads(config_tree.to_json_ast()) - elif config_format == 'raw': - pass - else: - return error(400, f"'{config_format}' is not a valid config format") - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/config-file') -def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): - session = app.state.vyos_session - env = session.get_session_env() - op = data.op - msg = None - - try: - if op == 'save': - if data.file: - path = data.file - else: - path = '/config/config.boot' - msg = session.save_config(path) - elif op == 'load': - if data.file: - path = data.file - else: - return error(400, "Missing required field \"file\"") - - session.migrate_and_load_config(path) - - config = Config(session_env=env) - d = get_config_diff(config) - - if d.is_node_changed(['service', 'https']): - background_tasks.add_task(call_commit, session) - msg = self_ref_msg - else: - session.commit() - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(msg) - -@app.post('/image') -def image_op(data: ImageModel): - session = app.state.vyos_session - - op = data.op - - try: - if op == 'add': - res = session.install_image(data.url) - elif op == 'delete': - res = session.remove_image(data.name) - elif op == 'show': - res = session.show(["system", "image"]) - elif op == 'set_default': - res = session.set_default_image(data.name) - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/container-image') -def container_image_op(data: ContainerImageModel): - session = app.state.vyos_session - - op = data.op - - try: - if op == 'add': - if data.name: - name = data.name - else: - return error(400, "Missing required field \"name\"") - res = session.add_container_image(name) - elif op == 'delete': - if data.name: - name = data.name - else: - return error(400, "Missing required field \"name\"") - res = session.delete_container_image(name) - elif op == 'show': - res = session.show_container_image() - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/generate') -def generate_op(data: GenerateModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'generate': - res = session.generate(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/show') -def show_op(data: ShowModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'show': - res = session.show(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/reboot') -def reboot_op(data: RebootModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'reboot': - res = session.reboot(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/reset') -def reset_op(data: ResetModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'reset': - res = session.reset(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/import-pki') -def import_pki(data: ImportPkiModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - lock.acquire() - - try: - if op == 'import-pki': - # need to get rid or interactive mode for private key - if len(path) == 5 and path[3] in ['key-file', 'private-key']: - path_no_prompt = create_path_import_pki_no_prompt(path) - if not path_no_prompt: - return error(400, f"Invalid command: {' '.join(path)}") - if data.passphrase: - path_no_prompt += ['--passphrase', data.passphrase] - res = session.import_pki_no_prompt(path_no_prompt) - else: - res = session.import_pki(path) - if not res[0].isdigit(): - return error(400, res) - # commit changes - session.commit() - res = res.split('. ')[0] - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - finally: - lock.release() - - return success(res) - -@app.post('/poweroff') -def poweroff_op(data: PoweroffModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'poweroff': - res = session.poweroff(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - - -### -# GraphQL integration -### - -def graphql_init(app: FastAPI = app): - from api.graphql.libs.token_auth import get_user_context - api.graphql.state.init() - api.graphql.state.settings['app'] = app - - # import after initializaion of state - from api.graphql.bindings import generate_schema - schema = generate_schema() - - in_spec = app.state.vyos_introspection - - if app.state.vyos_origins: - origins = app.state.vyos_origins - app.add_route('/graphql', CORSMiddleware(GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec), - allow_origins=origins, - allow_methods=("GET", "POST", "OPTIONS"), - allow_headers=("Authorization",))) - else: - app.add_route('/graphql', GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec)) ### # Modify uvicorn to allow reloading server within the configsession ### @@ -936,30 +69,41 @@ def graphql_init(app: FastAPI = app): server = None shutdown = False + class ApiServerConfig(UvicornConfig): pass + class ApiServer(UvicornServer): def install_signal_handlers(self): pass + def reload_handler(signum, frame): + # pylint: disable=global-statement + global server - logger.debug('Reload signal received...') + LOG.debug('Reload signal received...') if server is not None: server.handle_exit(signum, frame) server = None - logger.info('Server stopping for reload...') + LOG.info('Server stopping for reload...') else: - logger.warning('Reload called for non-running server...') + LOG.warning('Reload called for non-running server...') + def shutdown_handler(signum, frame): + # pylint: disable=global-statement + global shutdown - logger.debug('Shutdown signal received...') + LOG.debug('Shutdown signal received...') server.handle_exit(signum, frame) - logger.info('Server shutdown...') + LOG.info('Server shutdown...') shutdown = True +# end modify uvicorn + + def flatten_keys(d: dict) -> list[dict]: keys_list = [] for el in list(d['keys'].get('id', {})): @@ -968,49 +112,62 @@ def flatten_keys(d: dict) -> list[dict]: keys_list.append({'id': el, 'key': key}) return keys_list -def initialization(session: ConfigSession, app: FastAPI = app): + +def initialization(session: SessionState, app: FastAPI = app): + # pylint: disable=global-statement,broad-exception-caught,import-outside-toplevel + global server try: server_config = load_server_config() except Exception as e: - logger.critical(f'Failed to load the HTTP API server config: {e}') + LOG.critical(f'Failed to load the HTTP API server config: {e}') sys.exit(1) - app.state.vyos_session = session - app.state.vyos_keys = [] - if 'keys' in server_config: - app.state.vyos_keys = flatten_keys(server_config) + session.keys = flatten_keys(server_config) + + rest_config = server_config.get('rest', {}) + session.debug = bool('debug' in rest_config) + session.strict = bool('strict' in rest_config) + + graphql_config = server_config.get('graphql', {}) + session.origins = graphql_config.get('cors', {}).get('allow_origin', []) + + if 'rest' in server_config: + session.rest = True + else: + session.rest = False - app.state.vyos_debug = bool('debug' in server_config) - app.state.vyos_strict = bool('strict' in server_config) - app.state.vyos_origins = server_config.get('cors', {}).get('allow_origin', []) if 'graphql' in server_config: - app.state.vyos_graphql = True + session.graphql = True if isinstance(server_config['graphql'], dict): if 'introspection' in server_config['graphql']: - app.state.vyos_introspection = True + session.introspection = True else: - app.state.vyos_introspection = False + session.introspection = False # default values if not set explicitly - app.state.vyos_auth_type = server_config['graphql']['authentication']['type'] - app.state.vyos_token_exp = server_config['graphql']['authentication']['expiration'] - app.state.vyos_secret_len = server_config['graphql']['authentication']['secret_length'] + session.auth_type = server_config['graphql']['authentication']['type'] + session.token_exp = server_config['graphql']['authentication']['expiration'] + session.secret_len = server_config['graphql']['authentication']['secret_length'] else: - app.state.vyos_graphql = False + session.graphql = False + + # pass session state + app.state = session - if app.state.vyos_graphql: + # add REST routes + if session.rest: + from api.rest.routers import rest_init + rest_init(app) + + # add GraphQL route + if session.graphql: + from api.graphql.routers import graphql_init graphql_init(app) config = ApiServerConfig(app, uds="/run/api.sock", proxy_headers=True) server = ApiServer(config) -def run_server(): - try: - server.run() - except OSError as e: - logger.critical(e) - sys.exit(1) if __name__ == '__main__': # systemd's user and group options don't work, do it by hand here, @@ -1025,13 +182,14 @@ if __name__ == '__main__': signal.signal(signal.SIGHUP, reload_handler) signal.signal(signal.SIGTERM, shutdown_handler) - config_session = ConfigSession(os.getpid()) + session_state = SessionState() + session_state.session = ConfigSession(os.getpid()) while True: - logger.debug('Enter main loop...') + LOG.debug('Enter main loop...') if shutdown: break if server is None: - initialization(config_session) + initialization(session_state) server.run() sleep(1) From 954be34bc938acc9e14d9fb3b32c8f96cd999959 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 25 Sep 2024 14:02:52 -0500 Subject: [PATCH 06/85] http-api: T6736: remove routes on config delete Avoid duplicate entries in the list of routes when adding/deleting endpoints. --- src/services/api/graphql/routers.py | 10 ++++++++++ src/services/api/rest/routers.py | 8 ++++++++ src/services/vyos-http-api-server | 10 ++++++++++ 3 files changed, 28 insertions(+) diff --git a/src/services/api/graphql/routers.py b/src/services/api/graphql/routers.py index d04375a49e6..f02380cdc88 100644 --- a/src/services/api/graphql/routers.py +++ b/src/services/api/graphql/routers.py @@ -38,6 +38,10 @@ def graphql_init(app: "FastAPI"): in_spec = state.introspection + # remove route and reinstall below, for any changes; alternatively, test + # for config_diff before proceeding + graphql_clear(app) + if state.origins: origins = state.origins app.add_route('/graphql', CORSMiddleware(GraphQL(schema, @@ -52,3 +56,9 @@ def graphql_init(app: "FastAPI"): context_value=get_user_context, debug=True, introspection=in_spec)) + + +def graphql_clear(app: "FastAPI"): + for r in app.routes: + if r.path == '/graphql': + app.routes.remove(r) diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py index 1568f7d0c35..38b10ef7d25 100644 --- a/src/services/api/rest/routers.py +++ b/src/services/api/rest/routers.py @@ -682,4 +682,12 @@ def poweroff_op(data: PoweroffModel): def rest_init(app: "FastAPI"): + if all(r in app.routes for r in router.routes): + return app.include_router(router) + + +def rest_clear(app: "FastAPI"): + for r in router.routes: + if r in app.routes: + app.routes.remove(r) diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 456ea3d17bf..6bfc2c4352c 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -159,11 +159,21 @@ def initialization(session: SessionState, app: FastAPI = app): if session.rest: from api.rest.routers import rest_init rest_init(app) + else: + from api.rest.routers import rest_clear + rest_clear(app) # add GraphQL route if session.graphql: from api.graphql.routers import graphql_init graphql_init(app) + else: + from api.graphql.routers import graphql_clear + graphql_clear(app) + + LOG.debug('Active routes are:') + for r in app.routes: + LOG.debug(f'{r.path}') config = ApiServerConfig(app, uds="/run/api.sock", proxy_headers=True) server = ApiServer(config) From 9f7767c8022f5e6ad70253ef1c94fc22dbd04f1e Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 25 Sep 2024 22:10:34 -0500 Subject: [PATCH 07/85] http-api: T6736: regenerate openapi docs --- src/services/vyos-http-api-server | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 6bfc2c4352c..5585611824a 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -113,6 +113,19 @@ def flatten_keys(d: dict) -> list[dict]: return keys_list +def regenerate_docs(app: FastAPI) -> None: + docs = ('/openapi.json', '/docs', '/docs/oauth2-redirect', '/redoc') + remove = [] + for r in app.routes: + if r.path in docs: + remove.append(r) + for r in remove: + app.routes.remove(r) + + app.openapi_schema = None + app.setup() + + def initialization(session: SessionState, app: FastAPI = app): # pylint: disable=global-statement,broad-exception-caught,import-outside-toplevel @@ -171,6 +184,8 @@ def initialization(session: SessionState, app: FastAPI = app): from api.graphql.routers import graphql_clear graphql_clear(app) + regenerate_docs(app) + LOG.debug('Active routes are:') for r in app.routes: LOG.debug(f'{r.path}') From 8183e2cc8178f5da67a918ee37c21797a05c142d Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 25 Sep 2024 08:43:41 -0500 Subject: [PATCH 08/85] http-api: T6736: add distinct XML path for REST API --- interface-definitions/service_https.xml.in | 53 ++++++++++++---------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/interface-definitions/service_https.xml.in b/interface-definitions/service_https.xml.in index afe430c0c83..7bb63fa5a65 100644 --- a/interface-definitions/service_https.xml.in +++ b/interface-definitions/service_https.xml.in @@ -32,22 +32,29 @@ - + - Enforce strict path checking - + REST API - - - - Debug - - - - + + + + Enforce strict path checking + + + + + + Debug + + + + + + - GraphQL support + GraphQL API @@ -109,19 +116,19 @@ - - - - - Set CORS options - - - + - Allow resource request from origin - + Set CORS options - + + + + Allow resource request from origin + + + + + From 8322cc4d82067d7efe5051250c58d8cfbc895bce Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Sun, 29 Sep 2024 16:44:29 -0500 Subject: [PATCH 09/85] http-api: T6736: update smoketest for syntax change --- smoketest/scripts/cli/test_service_https.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/smoketest/scripts/cli/test_service_https.py b/smoketest/scripts/cli/test_service_https.py index 8a6386e4f91..2f4fcd6abcf 100755 --- a/smoketest/scripts/cli/test_service_https.py +++ b/smoketest/scripts/cli/test_service_https.py @@ -154,6 +154,8 @@ def test_api_auth(self): key = 'MySuperSecretVyOS' self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) + self.cli_set(base_path + ['listen-address', address]) self.cli_commit() @@ -304,6 +306,7 @@ def test_api_add_delete(self): self.assertEqual(r.status_code, 503) self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() sleep(2) @@ -326,6 +329,7 @@ def test_api_show(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload = { @@ -343,6 +347,7 @@ def test_api_generate(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload = { @@ -362,6 +367,7 @@ def test_api_configure(self): conf_address = '192.0.2.44/32' self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload_path = [ @@ -385,6 +391,7 @@ def test_api_config_file(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload = { @@ -402,6 +409,7 @@ def test_api_reset(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload = { @@ -419,6 +427,7 @@ def test_api_image(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() payload = { @@ -462,6 +471,7 @@ def test_api_config_file_load_http(self): headers = {} self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + self.cli_set(base_path + ['api', 'rest']) self.cli_commit() # load config via HTTP requires nginx config From 40d966310cb5d8d758f7fa801facd0a560795783 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Sun, 29 Sep 2024 20:49:30 -0500 Subject: [PATCH 10/85] http-api: T6736: add migration script and update version --- .../include/version/https-version.xml.i | 2 +- src/migration-scripts/https/6-to-7 | 43 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 src/migration-scripts/https/6-to-7 diff --git a/interface-definitions/include/version/https-version.xml.i b/interface-definitions/include/version/https-version.xml.i index 525314dbd98..a889a7805f2 100644 --- a/interface-definitions/include/version/https-version.xml.i +++ b/interface-definitions/include/version/https-version.xml.i @@ -1,3 +1,3 @@ - + diff --git a/src/migration-scripts/https/6-to-7 b/src/migration-scripts/https/6-to-7 new file mode 100644 index 00000000000..571f3b6ae51 --- /dev/null +++ b/src/migration-scripts/https/6-to-7 @@ -0,0 +1,43 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +# T6736: move REST API to distinct node + + +from vyos.configtree import ConfigTree + + +base = ['service', 'https', 'api'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + # Move REST API configuration to new node + # REST API was previously enabled if base path exists + config.set(['service', 'https', 'api', 'rest']) + for entry in ('debug', 'strict'): + if config.exists(base + [entry]): + config.set(base + ['rest', entry]) + config.delete(base + [entry]) + + # Move CORS settings under GraphQL + # CORS is not implemented for REST API + if config.exists(base + ['cors']): + old_base = base + ['cors'] + new_base = base + ['graphql', 'cors'] + config.copy(old_base, new_base) + config.delete(old_base) From 3e94e5e318b852dfca36e64d078728d4f5d5304c Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 30 Sep 2024 16:11:55 +0200 Subject: [PATCH 11/85] policy: T6751: add missing completion helpers for community-list Add all missing, well-known values for the community-list regex. --- interface-definitions/policy.xml.in | 48 +++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/interface-definitions/policy.xml.in b/interface-definitions/policy.xml.in index eb907cb9e40..cbab6173f64 100644 --- a/interface-definitions/policy.xml.in +++ b/interface-definitions/policy.xml.in @@ -202,11 +202,11 @@ Regular expression to match against a community-list - local-AS no-advertise no-export internet additive + local-AS no-advertise no-export internet graceful-shutdown accept-own-nexthop accept-own route-filter-translated-v4 route-filter-v4 route-filter-translated-v6 route-filter-v6 llgr-stale no-llgr blackhole no-peer additive <aa:nn> - Community number in AA:NN format + Community number in AA:NN format where AA and NN are (0-65535) local-AS @@ -224,6 +224,50 @@ internet Well-known communities value 0 + + graceful-shutdown + Well-known communities value GRACEFUL_SHUTDOWN 0xFFFF0000 + + + accept-own-nexthop + Well-known communities value ACCEPT_OWN_NEXTHOP 0xFFFF0008 + + + accept-own + Well-known communities value ACCEPT_OWN 0xFFFF0001 65535:1 + + + route-filter-translated-v4 + Well-known communities value ROUTE_FILTER_TRANSLATED_v4 0xFFFF0002 65535:2 + + + route-filter-v4 + Well-known communities value ROUTE_FILTER_v4 0xFFFF0003 65535:3 + + + route-filter-translated-v6 + Well-known communities value ROUTE_FILTER_TRANSLATED_v6 0xFFFF0004 65535:4 + + + route-filter-v6 + Well-known communities value ROUTE_FILTER_v6 0xFFFF0005 65535:5 + + + llgr-stale + Well-known communities value LLGR_STALE 0xFFFF0006 65535:6 + + + no-llgr + Well-known communities value NO_LLGR 0xFFFF0007 65535:7 + + + blackhole + Well-known communities value BLACKHOLE 0xFFFF029A 65535:666 + + + no-peer + Well-known communities value NOPEER 0xFFFFFF04 65535:65284 + additive New value is appended to the existing value From 7e23fd9da028b3c623b69fda8a6bcfd887f1c18c Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Mon, 30 Sep 2024 20:40:02 -0500 Subject: [PATCH 12/85] http-api: T6736: normalize formatting --- smoketest/scripts/cli/test_service_https.py | 75 +++++-- src/services/api/graphql/bindings.py | 29 ++- .../graphql/graphql/auth_token_mutation.py | 31 +-- src/services/api/graphql/graphql/mutations.py | 71 +++--- src/services/api/graphql/graphql/queries.py | 71 +++--- src/services/api/graphql/libs/key_auth.py | 4 +- src/services/api/graphql/libs/token_auth.py | 10 +- src/services/api/graphql/routers.py | 41 ++-- src/services/api/graphql/session/session.py | 33 ++- src/services/api/rest/models.py | 143 +++++++----- src/services/api/rest/routers.py | 209 +++++++++++------- src/services/api/session.py | 1 + 12 files changed, 427 insertions(+), 291 deletions(-) diff --git a/smoketest/scripts/cli/test_service_https.py b/smoketest/scripts/cli/test_service_https.py index 2f4fcd6abcf..04c4a2e5191 100755 --- a/smoketest/scripts/cli/test_service_https.py +++ b/smoketest/scripts/cli/test_service_https.py @@ -89,6 +89,7 @@ PROCESS_NAME = 'nginx' + class TestHTTPSService(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): @@ -120,19 +121,29 @@ def test_certificate(self): # verify() - certificates do not exist (yet) with self.assertRaises(ConfigSessionError): self.cli_commit() - self.cli_set(pki_base + ['certificate', cert_name, 'certificate', cert_data.replace('\n','')]) - self.cli_set(pki_base + ['certificate', cert_name, 'private', 'key', key_data.replace('\n','')]) + self.cli_set( + pki_base + + ['certificate', cert_name, 'certificate', cert_data.replace('\n', '')] + ) + self.cli_set( + pki_base + + ['certificate', cert_name, 'private', 'key', key_data.replace('\n', '')] + ) self.cli_set(base_path + ['certificates', 'dh-params', dh_name]) # verify() - dh-params do not exist (yet) with self.assertRaises(ConfigSessionError): self.cli_commit() - self.cli_set(pki_base + ['dh', dh_name, 'parameters', dh_1024.replace('\n','')]) + self.cli_set( + pki_base + ['dh', dh_name, 'parameters', dh_1024.replace('\n', '')] + ) # verify() - dh-param minimum length is 2048 bit with self.assertRaises(ConfigSessionError): self.cli_commit() - self.cli_set(pki_base + ['dh', dh_name, 'parameters', dh_2048.replace('\n','')]) + self.cli_set( + pki_base + ['dh', dh_name, 'parameters', dh_2048.replace('\n', '')] + ) self.cli_commit() self.assertTrue(process_named_running(PROCESS_NAME)) @@ -162,7 +173,7 @@ def test_api_auth(self): nginx_config = read_file('/etc/nginx/sites-enabled/default') self.assertIn(f'listen {address}:{port} ssl;', nginx_config) - self.assertIn(f'ssl_protocols TLSv1.2 TLSv1.3;', nginx_config) # default + self.assertIn('ssl_protocols TLSv1.2 TLSv1.3;', nginx_config) # default url = f'https://{address}/retrieve' payload = {'data': '{"op": "showConfig", "path": []}', 'key': f'{key}'} @@ -182,11 +193,16 @@ def test_api_auth(self): self.assertEqual(r.status_code, 401) # Check path config - payload = {'data': '{"op": "showConfig", "path": ["system", "login"]}', 'key': f'{key}'} + payload = { + 'data': '{"op": "showConfig", "path": ["system", "login"]}', + 'key': f'{key}', + } r = request('POST', url, verify=False, headers=headers, data=payload) response = r.json() vyos_user_exists = 'vyos' in response.get('data', {}).get('user', {}) - self.assertTrue(vyos_user_exists, "The 'vyos' user does not exist in the response.") + self.assertTrue( + vyos_user_exists, "The 'vyos' user does not exist in the response." + ) # GraphQL auth test: a missing key will return status code 400, as # 'key' is a non-nullable field in the schema; an incorrect key is @@ -210,7 +226,13 @@ def test_api_auth(self): }} """ - r = request('POST', graphql_url, verify=False, headers=headers, json={'query': query_valid_key}) + r = request( + 'POST', + graphql_url, + verify=False, + headers=headers, + json={'query': query_valid_key}, + ) success = r.json()['data']['SystemStatus']['success'] self.assertTrue(success) @@ -226,7 +248,13 @@ def test_api_auth(self): } """ - r = request('POST', graphql_url, verify=False, headers=headers, json={'query': query_invalid_key}) + r = request( + 'POST', + graphql_url, + verify=False, + headers=headers, + json={'query': query_invalid_key}, + ) success = r.json()['data']['SystemStatus']['success'] self.assertFalse(success) @@ -242,7 +270,13 @@ def test_api_auth(self): } """ - r = request('POST', graphql_url, verify=False, headers=headers, json={'query': query_no_key}) + r = request( + 'POST', + graphql_url, + verify=False, + headers=headers, + json={'query': query_no_key}, + ) success = r.json()['data']['SystemStatus']['success'] self.assertFalse(success) @@ -263,7 +297,9 @@ def test_api_auth(self): } } """ - r = request('POST', graphql_url, verify=False, headers=headers, json={'query': mutation}) + r = request( + 'POST', graphql_url, verify=False, headers=headers, json={'query': mutation} + ) token = r.json()['data']['AuthToken']['data']['result']['token'] @@ -286,7 +322,9 @@ def test_api_auth(self): } """ - r = request('POST', graphql_url, verify=False, headers=headers, json={'query': query}) + r = request( + 'POST', graphql_url, verify=False, headers=headers, json={'query': query} + ) success = r.json()['data']['ShowVersion']['success'] self.assertTrue(success) @@ -371,14 +409,14 @@ def test_api_configure(self): self.cli_commit() payload_path = [ - "interfaces", - "dummy", - f"{conf_interface}", - "address", - f"{conf_address}", + 'interfaces', + 'dummy', + f'{conf_interface}', + 'address', + f'{conf_address}', ] - payload = {'data': json.dumps({"op": "set", "path": payload_path}), 'key': key} + payload = {'data': json.dumps({'op': 'set', 'path': payload_path}), 'key': key} r = request('POST', url, verify=False, headers=headers, data=payload) self.assertEqual(r.status_code, 200) @@ -508,5 +546,6 @@ def test_api_config_file_load_http(self): call(f'sudo rm -f {nginx_tmp_site}') call('sudo systemctl reload nginx') + if __name__ == '__main__': unittest.main(verbosity=5) diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py index 93dd0fbfb55..ebf745f3251 100644 --- a/src/services/api/graphql/bindings.py +++ b/src/services/api/graphql/bindings.py @@ -20,18 +20,18 @@ from ariadne import load_schema_from_path from ariadne import snake_case_fallback_resolvers -from . graphql.queries import query -from . graphql.mutations import mutation -from . graphql.directives import directives_dict -from . graphql.errors import op_mode_error -from . graphql.auth_token_mutation import auth_token_mutation -from . libs.token_auth import init_secret +from .graphql.queries import query +from .graphql.mutations import mutation +from .graphql.directives import directives_dict +from .graphql.errors import op_mode_error +from .graphql.auth_token_mutation import auth_token_mutation +from .libs.token_auth import init_secret -from .. session import SessionState +from ..session import SessionState def generate_schema(): - state = SessionState() + state = SessionState() api_schema_dir = vyos.defaults.directories['api_schema'] if state.auth_type == 'token': @@ -39,9 +39,14 @@ def generate_schema(): type_defs = load_schema_from_path(api_schema_dir) - schema = make_executable_schema(type_defs, query, op_mode_error, - mutation, auth_token_mutation, - snake_case_fallback_resolvers, - directives=directives_dict) + schema = make_executable_schema( + type_defs, + query, + op_mode_error, + mutation, + auth_token_mutation, + snake_case_fallback_resolvers, + directives=directives_dict, + ) return schema diff --git a/src/services/api/graphql/graphql/auth_token_mutation.py b/src/services/api/graphql/graphql/auth_token_mutation.py index 16496021745..c74364603d8 100644 --- a/src/services/api/graphql/graphql/auth_token_mutation.py +++ b/src/services/api/graphql/graphql/auth_token_mutation.py @@ -19,11 +19,12 @@ from ariadne import ObjectType from graphql import GraphQLResolveInfo -from .. libs.token_auth import generate_token -from .. session.session import get_user_info -from ... session import SessionState +from ..libs.token_auth import generate_token +from ..session.session import get_user_info +from ...session import SessionState + +auth_token_mutation = ObjectType('Mutation') -auth_token_mutation = ObjectType("Mutation") @auth_token_mutation.field('AuthToken') def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): @@ -35,8 +36,9 @@ def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): secret = getattr(state, 'secret', '') exp_interval = int(state.token_exp) - expiration = (datetime.datetime.now(tz=datetime.timezone.utc) + - datetime.timedelta(seconds=exp_interval)) + expiration = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta( + seconds=exp_interval + ) res = generate_token(user, passwd, secret, expiration) try: @@ -46,18 +48,9 @@ def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): pass if 'token' in res: data['result'] = res - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} if 'errors' in res: - return { - "success": False, - "errors": res['errors'] - } - - return { - "success": False, - "errors": ['token generation failed'] - } + return {'success': False, 'errors': res['errors']} + + return {'success': False, 'errors': ['token generation failed']} diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py index 62031ada3c0..0b391c07089 100644 --- a/src/services/api/graphql/graphql/mutations.py +++ b/src/services/api/graphql/graphql/mutations.py @@ -14,20 +14,23 @@ # along with this library. If not, see . from importlib import import_module -from ariadne import ObjectType, convert_camel_case_to_snake -from makefun import with_signature # used below by func_sig -from typing import Any, Dict, Optional # pylint: disable=W0611 -from graphql import GraphQLResolveInfo # pylint: disable=W0611 +from typing import Any, Dict, Optional # pylint: disable=W0611 # noqa: F401 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 # noqa: F401 + +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature -from ... session import SessionState -from .. libs import key_auth -from .. session.session import Session -from .. session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError -mutation = ObjectType("Mutation") +from ...session import SessionState +from ..libs import key_auth +from ..session.session import Session +from ..session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code + +mutation = ObjectType('Mutation') + def make_mutation_resolver(mutation_name, class_name, session_func): """Dynamically generate a resolver for the mutation named in the @@ -59,10 +62,7 @@ async def func_impl(*args, **kwargs): auth = key_auth.auth_required(key) if auth is None: - return { - "success": False, - "errors": ['invalid API key'] - } + return {'success': False, 'errors': ['invalid API key']} # We are finished with the 'key' entry, and may remove so as to # pass the rest of data (if any) to function. @@ -77,14 +77,8 @@ async def func_impl(*args, **kwargs): if user is None: error = info.context.get('error') if error is not None: - return { - "success": False, - "errors": [error] - } - return { - "success": False, - "errors": ['not authenticated'] - } + return {'success': False, 'errors': [error]} + return {'success': False, 'errors': ['not authenticated']} else: # AtrributeError will have already been raised if no # auth_type; validation and defaultValue ensure it is @@ -106,35 +100,36 @@ async def func_impl(*args, **kwargs): result = method() data['result'] = result - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} except OpModeError as e: typename = type(e).__name__ msg = str(e) return { - "success": False, - "errore": ['op_mode_error'], - "op_mode_error": {"name": f"{typename}", - "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), - "vyos_code": op_mode_err_code.get(typename, 9999)} + 'success': False, + 'errore': ['op_mode_error'], + 'op_mode_error': { + 'name': f'{typename}', + 'message': msg if msg else op_mode_err_msg.get(typename, 'Unknown'), + 'vyos_code': op_mode_err_code.get(typename, 9999), + }, } except Exception as error: - return { - "success": False, - "errors": [repr(error)] - } + return {'success': False, 'errors': [repr(error)]} return func_impl + def make_config_session_mutation_resolver(mutation_name): - return make_mutation_resolver(mutation_name, mutation_name, - convert_camel_case_to_snake(mutation_name)) + return make_mutation_resolver( + mutation_name, mutation_name, convert_camel_case_to_snake(mutation_name) + ) + def make_gen_op_mutation_resolver(mutation_name): return make_mutation_resolver(mutation_name, mutation_name, 'gen_op_mutation') + def make_composite_mutation_resolver(mutation_name): - return make_mutation_resolver(mutation_name, mutation_name, - convert_camel_case_to_snake(mutation_name)) + return make_mutation_resolver( + mutation_name, mutation_name, convert_camel_case_to_snake(mutation_name) + ) diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py index 1e9036574be..9303fe90990 100644 --- a/src/services/api/graphql/graphql/queries.py +++ b/src/services/api/graphql/graphql/queries.py @@ -14,20 +14,23 @@ # along with this library. If not, see . from importlib import import_module -from ariadne import ObjectType, convert_camel_case_to_snake -from makefun import with_signature # used below by func_sig -from typing import Any, Dict, Optional # pylint: disable=W0611 -from graphql import GraphQLResolveInfo # pylint: disable=W0611 +from typing import Any, Dict, Optional # pylint: disable=W0611 # noqa: F401 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 # noqa: F401 + +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature -from ... session import SessionState -from .. libs import key_auth -from .. session.session import Session -from .. session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError -query = ObjectType("Query") +from ...session import SessionState +from ..libs import key_auth +from ..session.session import Session +from ..session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code + +query = ObjectType('Query') + def make_query_resolver(query_name, class_name, session_func): """Dynamically generate a resolver for the query named in the @@ -59,10 +62,7 @@ async def func_impl(*args, **kwargs): auth = key_auth.auth_required(key) if auth is None: - return { - "success": False, - "errors": ['invalid API key'] - } + return {'success': False, 'errors': ['invalid API key']} # We are finished with the 'key' entry, and may remove so as to # pass the rest of data (if any) to function. @@ -77,14 +77,8 @@ async def func_impl(*args, **kwargs): if user is None: error = info.context.get('error') if error is not None: - return { - "success": False, - "errors": [error] - } - return { - "success": False, - "errors": ['not authenticated'] - } + return {'success': False, 'errors': [error]} + return {'success': False, 'errors': ['not authenticated']} else: # AtrributeError will have already been raised if no # auth_type; validation and defaultValue ensure it is @@ -106,35 +100,36 @@ async def func_impl(*args, **kwargs): result = method() data['result'] = result - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} except OpModeError as e: typename = type(e).__name__ msg = str(e) return { - "success": False, - "errors": ['op_mode_error'], - "op_mode_error": {"name": f"{typename}", - "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), - "vyos_code": op_mode_err_code.get(typename, 9999)} + 'success': False, + 'errors': ['op_mode_error'], + 'op_mode_error': { + 'name': f'{typename}', + 'message': msg if msg else op_mode_err_msg.get(typename, 'Unknown'), + 'vyos_code': op_mode_err_code.get(typename, 9999), + }, } except Exception as error: - return { - "success": False, - "errors": [repr(error)] - } + return {'success': False, 'errors': [repr(error)]} return func_impl + def make_config_session_query_resolver(query_name): - return make_query_resolver(query_name, query_name, - convert_camel_case_to_snake(query_name)) + return make_query_resolver( + query_name, query_name, convert_camel_case_to_snake(query_name) + ) + def make_gen_op_query_resolver(query_name): return make_query_resolver(query_name, query_name, 'gen_op_query') + def make_composite_query_resolver(query_name): - return make_query_resolver(query_name, query_name, - convert_camel_case_to_snake(query_name)) + return make_query_resolver( + query_name, query_name, convert_camel_case_to_snake(query_name) + ) diff --git a/src/services/api/graphql/libs/key_auth.py b/src/services/api/graphql/libs/key_auth.py index 9e49a1203e5..ffd7f32b2e5 100644 --- a/src/services/api/graphql/libs/key_auth.py +++ b/src/services/api/graphql/libs/key_auth.py @@ -14,7 +14,8 @@ # along with this library. If not, see . -from ... session import SessionState +from ...session import SessionState + def check_auth(key_list, key): if not key_list: @@ -25,6 +26,7 @@ def check_auth(key_list, key): key_id = k['id'] return key_id + def auth_required(key): state = SessionState() api_keys = None diff --git a/src/services/api/graphql/libs/token_auth.py b/src/services/api/graphql/libs/token_auth.py index 2d772e0357e..4f743a09641 100644 --- a/src/services/api/graphql/libs/token_auth.py +++ b/src/services/api/graphql/libs/token_auth.py @@ -19,7 +19,7 @@ import pam from secrets import token_hex -from ... session import SessionState +from ...session import SessionState def _check_passwd_pam(username: str, passwd: str) -> bool: @@ -48,13 +48,13 @@ def generate_token(user: str, passwd: str, secret: str, exp: int) -> dict: payload_data = {'iss': user, 'sub': user_id, 'exp': exp} secret = getattr(state, 'secret', None) if secret is None: - return {"errors": ['missing secret']} - token = jwt.encode(payload=payload_data, key=secret, algorithm="HS256") + return {'errors': ['missing secret']} + token = jwt.encode(payload=payload_data, key=secret, algorithm='HS256') users |= {user_id: user} return {'token': token} else: - return {"errors": ['failed pam authentication']} + return {'errors': ['failed pam authentication']} def get_user_context(request): @@ -70,7 +70,7 @@ def get_user_context(request): try: secret = getattr(state, 'secret', None) - payload = jwt.decode(token, secret, algorithms=["HS256"]) + payload = jwt.decode(token, secret, algorithms=['HS256']) user_id: str = payload.get('sub') if user_id is None: return context diff --git a/src/services/api/graphql/routers.py b/src/services/api/graphql/routers.py index f02380cdc88..ed3ee1e8cf3 100644 --- a/src/services/api/graphql/routers.py +++ b/src/services/api/graphql/routers.py @@ -26,14 +26,15 @@ from fastapi import FastAPI -def graphql_init(app: "FastAPI"): - from .. session import SessionState +def graphql_init(app: 'FastAPI'): + from ..session import SessionState from .libs.token_auth import get_user_context state = SessionState() # import after initializaion of state from .bindings import generate_schema + schema = generate_schema() in_spec = state.introspection @@ -44,21 +45,33 @@ def graphql_init(app: "FastAPI"): if state.origins: origins = state.origins - app.add_route('/graphql', CORSMiddleware(GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec), - allow_origins=origins, - allow_methods=("GET", "POST", "OPTIONS"), - allow_headers=("Authorization",))) + app.add_route( + '/graphql', + CORSMiddleware( + GraphQL( + schema, + context_value=get_user_context, + debug=True, + introspection=in_spec, + ), + allow_origins=origins, + allow_methods=('GET', 'POST', 'OPTIONS'), + allow_headers=('Authorization',), + ), + ) else: - app.add_route('/graphql', GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec)) + app.add_route( + '/graphql', + GraphQL( + schema, + context_value=get_user_context, + debug=True, + introspection=in_spec, + ), + ) -def graphql_clear(app: "FastAPI"): +def graphql_clear(app: 'FastAPI'): for r in app.routes: if r.path == '/graphql': app.routes.remove(r) diff --git a/src/services/api/graphql/session/session.py b/src/services/api/graphql/session/session.py index 6e2875f3c29..619534f43fe 100644 --- a/src/services/api/graphql/session/session.py +++ b/src/services/api/graphql/session/session.py @@ -28,34 +28,45 @@ op_mode_include_file = os.path.join(directories['data'], 'op-mode-standardized.json') -def get_config_dict(path=[], effective=False, key_mangling=None, - get_first_key=False, no_multi_convert=False, - no_tag_node_value_mangle=False): + +def get_config_dict( + path=[], + effective=False, + key_mangling=None, + get_first_key=False, + no_multi_convert=False, + no_tag_node_value_mangle=False, +): config = Config() - return config.get_config_dict(path=path, effective=effective, - key_mangling=key_mangling, - get_first_key=get_first_key, - no_multi_convert=no_multi_convert, - no_tag_node_value_mangle=no_tag_node_value_mangle) + return config.get_config_dict( + path=path, + effective=effective, + key_mangling=key_mangling, + get_first_key=get_first_key, + no_multi_convert=no_multi_convert, + no_tag_node_value_mangle=no_tag_node_value_mangle, + ) + def get_user_info(user): user_info = {} - info = get_config_dict(['system', 'login', 'user', user], - get_first_key=True) + info = get_config_dict(['system', 'login', 'user', user], get_first_key=True) if not info: - raise ValueError("No such user") + raise ValueError('No such user') user_info['user'] = user user_info['full_name'] = info.get('full-name', '') return user_info + class Session: """ Wrapper for calling configsession functions based on GraphQL requests. Non-nullable fields in the respective schema allow avoiding a key check in 'data'. """ + def __init__(self, session, data): self._session = session self._data = data diff --git a/src/services/api/rest/models.py b/src/services/api/rest/models.py index 034e3fcdb3c..d65d6e1ec73 100644 --- a/src/services/api/rest/models.py +++ b/src/services/api/rest/models.py @@ -31,75 +31,88 @@ def error(code, msg): - resp = {"success": False, "error": msg, "data": None} + resp = {'success': False, 'error': msg, 'data': None} resp = json.dumps(resp) return HTMLResponse(resp, status_code=code) + def success(data): - resp = {"success": True, "data": data, "error": None} + resp = {'success': True, 'data': data, 'error': None} resp = json.dumps(resp) return HTMLResponse(resp) + # Pydantic models for validation # Pydantic will cast when possible, so use StrictStr validators added as # needed for additional constraints # json_schema_extra adds anotations to OpenAPI to add examples + class ApiModel(BaseModel): key: StrictStr + class BasePathModel(BaseModel): op: StrictStr path: List[StrictStr] - @field_validator("path") + @field_validator('path') @classmethod def check_non_empty(cls, path: str) -> str: if not len(path) > 0: raise ValueError('path must be non-empty') return path + class BaseConfigureModel(BasePathModel): value: StrictStr = None + class ConfigureModel(ApiModel, BaseConfigureModel): class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "set | delete | comment", - "path": ["config", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'set | delete | comment', + 'path': ['config', 'mode', 'path'], } } + class ConfigureListModel(ApiModel): commands: List[BaseConfigureModel] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "commands": "list of commands", + 'example': { + 'key': 'id_key', + 'commands': 'list of commands', } } + class BaseConfigSectionModel(BasePathModel): section: Dict + class ConfigSectionModel(ApiModel, BaseConfigSectionModel): pass + class ConfigSectionListModel(ApiModel): commands: List[BaseConfigSectionModel] + class BaseConfigSectionTreeModel(BaseModel): op: StrictStr mask: Dict config: Dict + class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): pass + class RetrieveModel(ApiModel): op: StrictStr path: List[StrictStr] @@ -107,34 +120,34 @@ class RetrieveModel(ApiModel): class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "returnValue | returnValues | exists | showConfig", - "path": ["config", "mode", "path"], - "configFormat": "json (default) | json_ast | raw", - + 'example': { + 'key': 'id_key', + 'op': 'returnValue | returnValues | exists | showConfig', + 'path': ['config', 'mode', 'path'], + 'configFormat': 'json (default) | json_ast | raw', } } + class ConfigFileModel(ApiModel): op: StrictStr file: StrictStr = None class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "save | load", - "file": "filename", + 'example': { + 'key': 'id_key', + 'op': 'save | load', + 'file': 'filename', } } class ImageOp(str, Enum): - add = "add" - delete = "delete" - show = "show" - set_default = "set_default" + add = 'add' + delete = 'delete' + show = 'show' + set_default = 'set_default' class ImageModel(ApiModel): @@ -146,23 +159,24 @@ class ImageModel(ApiModel): def check_data(self) -> Self: if self.op == 'add': if not self.url: - raise ValueError("Missing required field \"url\"") + raise ValueError('Missing required field "url"') elif self.op in ['delete', 'set_default']: if not self.name: - raise ValueError("Missing required field \"name\"") + raise ValueError('Missing required field "name"') return self class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show | set_default", - "url": "imagelocation", - "name": "imagename", + 'example': { + 'key': 'id_key', + 'op': 'add | delete | show | set_default', + 'url': 'imagelocation', + 'name': 'imagename', } } + class ImportPkiModel(ApiModel): op: StrictStr path: List[StrictStr] @@ -170,11 +184,11 @@ class ImportPkiModel(ApiModel): class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "import_pki", - "path": ["op", "mode", "path"], - "passphrase": "passphrase", + 'example': { + 'key': 'id_key', + 'op': 'import_pki', + 'path': ['op', 'mode', 'path'], + 'passphrase': 'passphrase', } } @@ -185,75 +199,80 @@ class ContainerImageModel(ApiModel): class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show", - "name": "imagename", + 'example': { + 'key': 'id_key', + 'op': 'add | delete | show', + 'name': 'imagename', } } + class GenerateModel(ApiModel): op: StrictStr path: List[StrictStr] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "generate", - "path": ["op", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'generate', + 'path': ['op', 'mode', 'path'], } } + class ShowModel(ApiModel): op: StrictStr path: List[StrictStr] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "show", - "path": ["op", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'show', + 'path': ['op', 'mode', 'path'], } } + class RebootModel(ApiModel): op: StrictStr path: List[StrictStr] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "reboot", - "path": ["op", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'reboot', + 'path': ['op', 'mode', 'path'], } } + class ResetModel(ApiModel): op: StrictStr path: List[StrictStr] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "reset", - "path": ["op", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'reset', + 'path': ['op', 'mode', 'path'], } } + class PoweroffModel(ApiModel): op: StrictStr path: List[StrictStr] class Config: json_schema_extra = { - "example": { - "key": "id_key", - "op": "poweroff", - "path": ["op", "mode", "path"], + 'example': { + 'key': 'id_key', + 'op': 'poweroff', + 'path': ['op', 'mode', 'path'], } } @@ -263,14 +282,16 @@ class Success(BaseModel): data: Union[str, bool, Dict] error: str + class Error(BaseModel): success: bool = False data: Union[str, bool, Dict] error: str + responses = { 200: {'model': Success}, 400: {'model': Error}, 422: {'model': Error, 'description': 'Validation Error'}, - 500: {'model': Error} + 500: {'model': Error}, } diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py index 38b10ef7d25..da981d5bff3 100644 --- a/src/services/api/rest/routers.py +++ b/src/services/api/rest/routers.py @@ -18,10 +18,12 @@ # pylint: disable=wildcard-import,unused-wildcard-import # pylint: disable=broad-exception-caught +import json import copy import logging import traceback from threading import Lock +from typing import Union from typing import Callable from typing import TYPE_CHECKING @@ -43,8 +45,30 @@ from vyos.configdiff import get_config_diff from vyos.configsession import ConfigSessionError -from .. session import SessionState -from . models import * +from ..session import SessionState +from .models import success +from .models import error +from .models import responses +from .models import ApiModel +from .models import ConfigureModel +from .models import ConfigureListModel +from .models import ConfigSectionModel +from .models import ConfigSectionListModel +from .models import ConfigSectionTreeModel +from .models import BaseConfigSectionTreeModel +from .models import BaseConfigureModel +from .models import BaseConfigSectionModel +from .models import RetrieveModel +from .models import ConfigFileModel +from .models import ImageModel +from .models import ContainerImageModel +from .models import GenerateModel +from .models import ShowModel +from .models import RebootModel +from .models import ResetModel +from .models import ImportPkiModel +from .models import PoweroffModel + if TYPE_CHECKING: from fastapi import FastAPI @@ -69,7 +93,7 @@ def auth_required(data: ApiModel): api_keys = session.keys key_id = check_auth(api_keys, key) if not key_id: - raise HTTPException(status_code=401, detail="Valid API key is required") + raise HTTPException(status_code=401, detail='Valid API key is required') session.id = key_id @@ -79,10 +103,12 @@ def auth_required(data: ApiModel): # validation by FastAPI/Pydantic, as is used for application/json requests class MultipartRequest(Request): """Override Request class to convert form request to json""" + # pylint: disable=attribute-defined-outside-init # pylint: disable=too-many-branches,too-many-statements _form_err = () + @property def form_err(self): return self._form_err @@ -104,16 +130,16 @@ def headers(self): return self._headers async def _get_form( - self, *, max_files: int | float = 1000, max_fields: int | float = 1000 - ) -> FormData: + self, *, max_files: int | float = 1000, max_fields: int | float = 1000 + ) -> FormData: if self._form is None: assert ( parse_options_header is not None - ), "The `python-multipart` library must be installed to use form parsing." - content_type_header = self.orig_headers.get("Content-Type") + ), 'The `python-multipart` library must be installed to use form parsing.' + content_type_header = self.orig_headers.get('Content-Type') content_type: bytes content_type, _ = parse_options_header(content_type_header) - if content_type == b"multipart/form-data": + if content_type == b'multipart/form-data': try: multipart_parser = MultiPartParser( self.orig_headers, @@ -123,10 +149,10 @@ async def _get_form( ) self._form = await multipart_parser.parse() except MultiPartException as exc: - if "app" in self.scope: + if 'app' in self.scope: raise HTTPException(status_code=400, detail=exc.message) raise exc - elif content_type == b"application/x-www-form-urlencoded": + elif content_type == b'application/x-www-form-urlencoded': form_parser = FormParser(self.orig_headers, self.stream()) self._form = await form_parser.parse() else: @@ -134,7 +160,7 @@ async def _get_form( return self._form async def body(self) -> bytes: - if not hasattr(self, "_body"): + if not hasattr(self, '_body'): forms = {} merge = {} body = await super().body() @@ -143,12 +169,12 @@ async def body(self) -> bytes: form_data = await self.form() if form_data: endpoint = self.url.path - LOG.debug("processing form data") + LOG.debug('processing form data') for k, v in form_data.multi_items(): forms[k] = v if 'data' not in forms: - self.form_err = (422, "Non-empty data field is required") + self.form_err = (422, 'Non-empty data field is required') return self._body try: tmp = json.loads(forms['data']) @@ -168,37 +194,57 @@ async def body(self) -> bytes: for c in cmds: if not isinstance(c, dict): - self.form_err = (400, - f"Malformed command '{c}': any command must be JSON of dict") + self.form_err = ( + 400, + f"Malformed command '{c}': any command must be JSON of dict", + ) return self._body if 'op' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'op' field") - if endpoint not in ('/config-file', '/container-image', - '/image', '/configure-section'): + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'op' field", + ) + if endpoint not in ( + '/config-file', + '/container-image', + '/image', + '/configure-section', + ): if 'path' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'path' field") + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'path' field", + ) elif not isinstance(c['path'], list): - self.form_err = (400, - f"Malformed command '{c}': 'path' field must be a list") + self.form_err = ( + 400, + f"Malformed command '{c}': 'path' field must be a list", + ) elif not all(isinstance(el, str) for el in c['path']): - self.form_err = (400, - f"Malformed command '{0}': 'path' field must be a list of strings") + self.form_err = ( + 400, + f"Malformed command '{0}': 'path' field must be a list of strings", + ) if endpoint in ('/configure'): if not c['path']: - self.form_err = (400, - f"Malformed command '{c}': 'path' list must be non-empty") + self.form_err = ( + 400, + f"Malformed command '{c}': 'path' list must be non-empty", + ) if 'value' in c and not isinstance(c['value'], str): - self.form_err = (400, - f"Malformed command '{c}': 'value' field must be a string") + self.form_err = ( + 400, + f"Malformed command '{c}': 'value' field must be a string", + ) if endpoint in ('/configure-section'): if 'section' not in c and 'config' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'section' or 'config' field") + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'section' or 'config' field", + ) if 'key' not in forms and 'key' not in merge: - self.form_err = (401, "Valid API key is required") + self.form_err = (401, 'Valid API key is required') if 'key' in forms and 'key' not in merge: merge['key'] = forms['key'] @@ -232,12 +278,15 @@ async def custom_route_handler(request: Request) -> Response: return custom_route_handler -router = APIRouter(route_class=MultipartRoute, - responses={**responses}, - dependencies=[Depends(auth_required)]) +router = APIRouter( + route_class=MultipartRoute, + responses={**responses}, + dependencies=[Depends(auth_required)], +) -self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" +self_ref_msg = 'Requested HTTP API server configuration change; commit will be called in the background' + def call_commit(s: SessionState): try: @@ -245,15 +294,22 @@ def call_commit(s: SessionState): except ConfigSessionError as e: s.session.discard() if s.debug: - LOG.warning(f"ConfigSessionError:\n {traceback.format_exc()}") + LOG.warning(f'ConfigSessionError:\n {traceback.format_exc()}') else: - LOG.warning(f"ConfigSessionError: {e}") - - -def _configure_op(data: Union[ConfigureModel, ConfigureListModel, - ConfigSectionModel, ConfigSectionListModel, - ConfigSectionTreeModel], - _request: Request, background_tasks: BackgroundTasks): + LOG.warning(f'ConfigSessionError: {e}') + + +def _configure_op( + data: Union[ + ConfigureModel, + ConfigureListModel, + ConfigSectionModel, + ConfigSectionListModel, + ConfigSectionTreeModel, + ], + _request: Request, + background_tasks: BackgroundTasks, +): # pylint: disable=too-many-branches,too-many-locals,too-many-nested-blocks,too-many-statements # pylint: disable=consider-using-with @@ -287,10 +343,10 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, if c.value: value = c.value else: - value = "" + value = '' # For vyos.configsession calls that have no separate value arguments, # and for type checking too - cfg_path = " ".join(path + [value]).strip() + cfg_path = ' '.join(path + [value]).strip() elif isinstance(c, BaseConfigSectionModel): section = c.section @@ -304,7 +360,9 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, session.set(path, value=value) elif op == 'delete': if state.strict and not config.exists(cfg_path): - raise ConfigSessionError(f"Cannot delete [{cfg_path}]: path/value does not exist") + raise ConfigSessionError( + f'Cannot delete [{cfg_path}]: path/value does not exist' + ) session.delete(path, value=value) elif op == 'comment': session.comment(path, value=value) @@ -343,7 +401,7 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, session.discard() status = 400 if state.debug: - LOG.critical(f"ConfigSessionError:\n {traceback.format_exc()}") + LOG.critical(f'ConfigSessionError:\n {traceback.format_exc()}') error_msg = str(e) except Exception: session.discard() @@ -351,7 +409,7 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, status = 500 # Don't give the details away to the outer world - error_msg = "An internal error occured. Check the logs for details." + error_msg = 'An internal error occured. Check the logs for details.' finally: lock.release() @@ -371,21 +429,24 @@ def create_path_import_pki_no_prompt(path): @router.post('/configure') -def configure_op(data: Union[ConfigureModel, - ConfigureListModel], - request: Request, background_tasks: BackgroundTasks): +def configure_op( + data: Union[ConfigureModel, ConfigureListModel], + request: Request, + background_tasks: BackgroundTasks, +): return _configure_op(data, request, background_tasks) @router.post('/configure-section') -def configure_section_op(data: Union[ConfigSectionModel, - ConfigSectionListModel, - ConfigSectionTreeModel], - request: Request, background_tasks: BackgroundTasks): +def configure_section_op( + data: Union[ConfigSectionModel, ConfigSectionListModel, ConfigSectionTreeModel], + request: Request, + background_tasks: BackgroundTasks, +): return _configure_op(data, request, background_tasks) -@router.post("/retrieve") +@router.post('/retrieve') async def retrieve_op(data: RetrieveModel): state = SessionState() session = state.session @@ -393,7 +454,7 @@ async def retrieve_op(data: RetrieveModel): config = Config(session_env=env) op = data.op - path = " ".join(data.path) + path = ' '.join(data.path) try: if op == 'returnValue': @@ -424,7 +485,7 @@ async def retrieve_op(data: RetrieveModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -448,7 +509,7 @@ def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): if data.file: path = data.file else: - return error(400, "Missing required field \"file\"") + return error(400, 'Missing required field "file"') session.migrate_and_load_config(path) @@ -466,7 +527,7 @@ def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(msg) @@ -484,14 +545,14 @@ def image_op(data: ImageModel): elif op == 'delete': res = session.remove_image(data.name) elif op == 'show': - res = session.show(["system", "image"]) + res = session.show(['system', 'image']) elif op == 'set_default': res = session.set_default_image(data.name) except ConfigSessionError as e: return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -508,13 +569,13 @@ def container_image_op(data: ContainerImageModel): if data.name: name = data.name else: - return error(400, "Missing required field \"name\"") + return error(400, 'Missing required field "name"') res = session.add_container_image(name) elif op == 'delete': if data.name: name = data.name else: - return error(400, "Missing required field \"name\"") + return error(400, 'Missing required field "name"') res = session.delete_container_image(name) elif op == 'show': res = session.show_container_image() @@ -524,7 +585,7 @@ def container_image_op(data: ContainerImageModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -546,7 +607,7 @@ def generate_op(data: GenerateModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -568,7 +629,7 @@ def show_op(data: ShowModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -590,7 +651,7 @@ def reboot_op(data: RebootModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -612,7 +673,7 @@ def reset_op(data: ResetModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) @@ -652,7 +713,7 @@ def import_pki(data: ImportPkiModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') finally: lock.release() @@ -676,18 +737,18 @@ def poweroff_op(data: PoweroffModel): return error(400, str(e)) except Exception: LOG.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + return error(500, 'An internal error occured. Check the logs for details.') return success(res) -def rest_init(app: "FastAPI"): +def rest_init(app: 'FastAPI'): if all(r in app.routes for r in router.routes): return app.include_router(router) -def rest_clear(app: "FastAPI"): +def rest_clear(app: 'FastAPI'): for r in router.routes: if r in app.routes: app.routes.remove(r) diff --git a/src/services/api/session.py b/src/services/api/session.py index dcdc7246c5c..ad3ef660c3b 100644 --- a/src/services/api/session.py +++ b/src/services/api/session.py @@ -13,6 +13,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . + class SessionState: # pylint: disable=attribute-defined-outside-init # pylint: disable=too-many-instance-attributes,too-few-public-methods From 431a78cad78fb2cb4188c82dc878041b9778b4a7 Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Wed, 2 Oct 2024 08:47:55 +0000 Subject: [PATCH 13/85] T6755: Change vyos mirror URL Change vyos mirror URL --- .github/workflows/package-smoketest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/package-smoketest.yml b/.github/workflows/package-smoketest.yml index 289ad70f35b..3595bcf0ebd 100644 --- a/.github/workflows/package-smoketest.yml +++ b/.github/workflows/package-smoketest.yml @@ -17,7 +17,7 @@ env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed for PR comments BUILD_BY: autobuild@vyos.net DEBIAN_MIRROR: http://deb.debian.org/debian/ - VYOS_MIRROR: https://rolling-packages.vyos.net/current/ + VYOS_MIRROR: https://packages.vyos.net/repositories/current/ jobs: build_iso: From 289ca9987b14f7b0d94f528c131d080f51b1a0db Mon Sep 17 00:00:00 2001 From: Nicolas Fort Date: Wed, 2 Oct 2024 13:22:27 +0000 Subject: [PATCH 14/85] T6757: Openconnect: fix template for correct config parsing while configuring source address for radius authentication. --- data/templates/ocserv/radius_conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/templates/ocserv/radius_conf.j2 b/data/templates/ocserv/radius_conf.j2 index 1ab322f6917..c86929e4724 100644 --- a/data/templates/ocserv/radius_conf.j2 +++ b/data/templates/ocserv/radius_conf.j2 @@ -22,7 +22,7 @@ authserver {{ authsrv }} {% endif %} {% endfor %} radius_timeout {{ authentication['radius']['timeout'] }} -{% if source_address %} +{% if authentication.radius.source_address is vyos_defined %} bindaddr {{ authentication['radius']['source_address'] }} {% else %} bindaddr * From 34bbc3be98d20b7ce704cc498f070e70bf8f0213 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Wed, 2 Oct 2024 16:18:32 +0300 Subject: [PATCH 15/85] ipsec: T6101: Add validation for proposal option used in IKE group --- smoketest/scripts/cli/test_vpn_ipsec.py | 8 +++++++- src/conf_mode/vpn_ipsec.py | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/smoketest/scripts/cli/test_vpn_ipsec.py b/smoketest/scripts/cli/test_vpn_ipsec.py index 3b8687b93e9..de18d042744 100755 --- a/smoketest/scripts/cli/test_vpn_ipsec.py +++ b/smoketest/scripts/cli/test_vpn_ipsec.py @@ -947,7 +947,8 @@ def test_remote_access_dhcp_fail_handling(self): self.cli_set(base_path + ['ike-group', ike_group, 'lifetime', ike_lifetime]) self.cli_set(base_path + ['ike-group', ike_group, 'proposal', '1', 'dh-group', '14']) self.cli_set(base_path + ['ike-group', ike_group, 'proposal', '1', 'encryption', 'aes256']) - self.cli_set(base_path + ['ike-group', ike_group, 'proposal', '1', 'hash', 'sha512']) + # a hash algorithm that cannot be mapped to an equivalent PRF + self.cli_set(base_path + ['ike-group', ike_group, 'proposal', '1', 'hash', 'aes192gmac']) # ESP self.cli_set(base_path + ['esp-group', esp_group, 'lifetime', eap_lifetime]) @@ -968,6 +969,11 @@ def test_remote_access_dhcp_fail_handling(self): self.cli_set(base_path + ['remote-access', 'pool', ip_pool_name, 'name-server', name_server]) self.cli_set(base_path + ['remote-access', 'pool', ip_pool_name, 'prefix', prefix]) + # verify() - IKE group use not mapped hash algorithm + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set(base_path + ['ike-group', ike_group, 'proposal', '1', 'hash', 'sha512']) self.cli_commit() self.assertTrue(os.path.exists(dhcp_interfaces_file)) diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py index ca0c3657fa1..e22b7550c97 100755 --- a/src/conf_mode/vpn_ipsec.py +++ b/src/conf_mode/vpn_ipsec.py @@ -214,6 +214,19 @@ def verify(ipsec): else: verify_interface_exists(ipsec, interface) + # need to use a pseudo-random function (PRF) with an authenticated encryption algorithm. + # If a hash algorithm is defined then it will be mapped to an equivalent PRF + if 'ike_group' in ipsec: + for _, ike_config in ipsec['ike_group'].items(): + for proposal, proposal_config in ike_config.get('proposal', {}).items(): + if 'encryption' in proposal_config and 'prf' not in proposal_config: + # list of hash algorithms that cannot be mapped to an equivalent PRF + algs = ['aes128gmac', 'aes192gmac', 'aes256gmac', 'sha256_96'] + if 'hash' in proposal_config and proposal_config['hash'] in algs: + raise ConfigError( + f"A PRF algorithm is mandatory in IKE proposal {proposal}" + ) + if 'l2tp' in ipsec: if 'esp_group' in ipsec['l2tp']: if 'esp_group' not in ipsec or ipsec['l2tp']['esp_group'] not in ipsec['esp_group']: From e0c643a34f89c9e5f0d2d22f5129354b3f38054e Mon Sep 17 00:00:00 2001 From: Daniil Baturin Date: Thu, 3 Oct 2024 14:50:14 +0100 Subject: [PATCH 16/85] vyos.configtree: T6742: add bindings for create_node and is_leaf/set_leaf (#4109) --- python/vyos/configtree.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/python/vyos/configtree.py b/python/vyos/configtree.py index bd77ab8994c..ee8ca8b837d 100644 --- a/python/vyos/configtree.py +++ b/python/vyos/configtree.py @@ -88,6 +88,10 @@ def __init__(self, config_string=None, address=None, libpath=LIBPATH): self.__to_json_ast.argtypes = [c_void_p] self.__to_json_ast.restype = c_char_p + self.__create_node = self.__lib.create_node + self.__create_node.argtypes = [c_void_p, c_char_p] + self.__create_node.restype = c_int + self.__set_add_value = self.__lib.set_add_value self.__set_add_value.argtypes = [c_void_p, c_char_p, c_char_p] self.__set_add_value.restype = c_int @@ -140,6 +144,14 @@ def __init__(self, config_string=None, address=None, libpath=LIBPATH): self.__set_tag.argtypes = [c_void_p, c_char_p] self.__set_tag.restype = c_int + self.__is_leaf = self.__lib.is_leaf + self.__is_leaf.argtypes = [c_void_p, c_char_p] + self.__is_leaf.restype = c_bool + + self.__set_leaf = self.__lib.set_leaf + self.__set_leaf.argtypes = [c_void_p, c_char_p, c_bool] + self.__set_leaf.restype = c_int + self.__get_subtree = self.__lib.get_subtree self.__get_subtree.argtypes = [c_void_p, c_char_p] self.__get_subtree.restype = c_void_p @@ -197,6 +209,14 @@ def to_json(self): def to_json_ast(self): return self.__to_json_ast(self.__config).decode() + def create_node(self, path): + check_path(path) + path_str = " ".join(map(str, path)).encode() + + res = self.__create_node(self.__config, path_str) + if (res != 0): + raise ConfigTreeError(f"Path already exists: {path}") + def set(self, path, value=None, replace=True): """Set new entry in VyOS configuration. path: configuration path e.g. 'system dns forwarding listen-address' @@ -349,6 +369,22 @@ def set_tag(self, path): else: raise ConfigTreeError("Path [{}] doesn't exist".format(path_str)) + def is_leaf(self, path): + check_path(path) + path_str = " ".join(map(str, path)).encode() + + return self.__is_leaf(self.__config, path_str) + + def set_leaf(self, path, value): + check_path(path) + path_str = " ".join(map(str, path)).encode() + + res = self.__set_leaf(self.__config, path_str, value) + if (res == 0): + return True + else: + raise ConfigTreeError("Path [{}] doesn't exist".format(path_str)) + def get_subtree(self, path, with_node=False): check_path(path) path_str = " ".join(map(str, path)).encode() From c21fa1fb77264c0a92653b064824ac3bce5086ce Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Mon, 30 Sep 2024 21:51:56 -0500 Subject: [PATCH 17/85] http-api: T6736: sanitize error message containing user input --- src/services/api/rest/models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/services/api/rest/models.py b/src/services/api/rest/models.py index d65d6e1ec73..23ae9be9d9c 100644 --- a/src/services/api/rest/models.py +++ b/src/services/api/rest/models.py @@ -17,6 +17,7 @@ # pylint: disable=too-few-public-methods import json +from html import escape from enum import Enum from typing import List from typing import Union @@ -31,6 +32,7 @@ def error(code, msg): + msg = escape(msg, quote=False) resp = {'success': False, 'error': msg, 'data': None} resp = json.dumps(resp) return HTMLResponse(resp, status_code=code) From e846d2c1500df83742eb803684980396857cc0f1 Mon Sep 17 00:00:00 2001 From: Nicolas Fort Date: Thu, 3 Oct 2024 14:51:55 +0000 Subject: [PATCH 18/85] T6760: firewall: add packet modifications existing in policy route to regular firewall ruleset. --- .../firewall/bridge-hook-forward.xml.i | 6 ++ .../include/firewall/bridge-hook-output.xml.i | 6 ++ .../firewall/bridge-hook-prerouting.xml.i | 5 + .../include/firewall/ipv4-custom-name.xml.i | 6 ++ .../include/firewall/ipv4-hook-forward.xml.i | 5 + .../include/firewall/ipv4-hook-output.xml.i | 10 ++ .../firewall/ipv4-hook-prerouting.xml.i | 4 + .../include/firewall/ipv6-custom-name.xml.i | 5 + .../include/firewall/ipv6-hook-forward.xml.i | 5 + .../include/firewall/ipv6-hook-output.xml.i | 10 ++ .../firewall/ipv6-hook-prerouting.xml.i | 4 + .../set-packet-modifications-conn-mark.xml.i | 21 ++++ .../set-packet-modifications-dscp.xml.i | 21 ++++ .../set-packet-modifications-hop-limit.xml.i | 21 ++++ .../set-packet-modifications-mark.xml.i | 21 ++++ ...t-packet-modifications-table-and-vrf.xml.i | 48 ++++++++++ .../set-packet-modifications-tcp-mss.xml.i | 21 ++++ .../set-packet-modifications-ttl.xml.i | 21 ++++ .../firewall/set-packet-modifications.xml.i | 96 ------------------- .../include/policy/route-common.xml.i | 6 +- python/vyos/firewall.py | 6 ++ smoketest/scripts/cli/test_firewall.py | 16 +++- 22 files changed, 262 insertions(+), 102 deletions(-) create mode 100644 interface-definitions/include/firewall/set-packet-modifications-conn-mark.xml.i create mode 100644 interface-definitions/include/firewall/set-packet-modifications-dscp.xml.i create mode 100755 interface-definitions/include/firewall/set-packet-modifications-hop-limit.xml.i create mode 100644 interface-definitions/include/firewall/set-packet-modifications-mark.xml.i create mode 100644 interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i create mode 100644 interface-definitions/include/firewall/set-packet-modifications-tcp-mss.xml.i create mode 100755 interface-definitions/include/firewall/set-packet-modifications-ttl.xml.i delete mode 100644 interface-definitions/include/firewall/set-packet-modifications.xml.i diff --git a/interface-definitions/include/firewall/bridge-hook-forward.xml.i b/interface-definitions/include/firewall/bridge-hook-forward.xml.i index fcc9819254f..03ac26cf62d 100644 --- a/interface-definitions/include/firewall/bridge-hook-forward.xml.i +++ b/interface-definitions/include/firewall/bridge-hook-forward.xml.i @@ -32,6 +32,12 @@ #include #include #include + #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/bridge-hook-output.xml.i b/interface-definitions/include/firewall/bridge-hook-output.xml.i index 38b8b08cad7..85331598943 100644 --- a/interface-definitions/include/firewall/bridge-hook-output.xml.i +++ b/interface-definitions/include/firewall/bridge-hook-output.xml.i @@ -31,6 +31,12 @@ #include #include #include + #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/bridge-hook-prerouting.xml.i b/interface-definitions/include/firewall/bridge-hook-prerouting.xml.i index ea567644f7a..7a45f5cd1f1 100644 --- a/interface-definitions/include/firewall/bridge-hook-prerouting.xml.i +++ b/interface-definitions/include/firewall/bridge-hook-prerouting.xml.i @@ -28,6 +28,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv4-custom-name.xml.i b/interface-definitions/include/firewall/ipv4-custom-name.xml.i index 8046b2d6c3d..b08262e2dc9 100644 --- a/interface-definitions/include/firewall/ipv4-custom-name.xml.i +++ b/interface-definitions/include/firewall/ipv4-custom-name.xml.i @@ -36,6 +36,12 @@ #include #include #include + #include + #include + #include + #include + #include + diff --git a/interface-definitions/include/firewall/ipv4-hook-forward.xml.i b/interface-definitions/include/firewall/ipv4-hook-forward.xml.i index b0e240a03a4..a2da4b7012f 100644 --- a/interface-definitions/include/firewall/ipv4-hook-forward.xml.i +++ b/interface-definitions/include/firewall/ipv4-hook-forward.xml.i @@ -31,6 +31,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv4-hook-output.xml.i b/interface-definitions/include/firewall/ipv4-hook-output.xml.i index ee9157592e9..f68136557cb 100644 --- a/interface-definitions/include/firewall/ipv4-hook-output.xml.i +++ b/interface-definitions/include/firewall/ipv4-hook-output.xml.i @@ -28,6 +28,11 @@ #include #include #include + #include + #include + #include + #include + #include @@ -56,6 +61,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv4-hook-prerouting.xml.i b/interface-definitions/include/firewall/ipv4-hook-prerouting.xml.i index b431303ae69..6f9fe68428c 100644 --- a/interface-definitions/include/firewall/ipv4-hook-prerouting.xml.i +++ b/interface-definitions/include/firewall/ipv4-hook-prerouting.xml.i @@ -35,6 +35,10 @@ #include #include #include + #include + #include + #include + #include Set jump target. Action jump must be defined to use this setting diff --git a/interface-definitions/include/firewall/ipv6-custom-name.xml.i b/interface-definitions/include/firewall/ipv6-custom-name.xml.i index fb8740c3870..d49267b5291 100644 --- a/interface-definitions/include/firewall/ipv6-custom-name.xml.i +++ b/interface-definitions/include/firewall/ipv6-custom-name.xml.i @@ -36,6 +36,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv6-hook-forward.xml.i b/interface-definitions/include/firewall/ipv6-hook-forward.xml.i index 7efc2614ec9..79898d6914f 100644 --- a/interface-definitions/include/firewall/ipv6-hook-forward.xml.i +++ b/interface-definitions/include/firewall/ipv6-hook-forward.xml.i @@ -31,6 +31,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv6-hook-output.xml.i b/interface-definitions/include/firewall/ipv6-hook-output.xml.i index d3c4c1eada7..9a6d0bb77dd 100644 --- a/interface-definitions/include/firewall/ipv6-hook-output.xml.i +++ b/interface-definitions/include/firewall/ipv6-hook-output.xml.i @@ -28,6 +28,11 @@ #include #include #include + #include + #include + #include + #include + #include @@ -56,6 +61,11 @@ #include #include #include + #include + #include + #include + #include + #include diff --git a/interface-definitions/include/firewall/ipv6-hook-prerouting.xml.i b/interface-definitions/include/firewall/ipv6-hook-prerouting.xml.i index 21f8de6f9e6..15454bbbfc4 100644 --- a/interface-definitions/include/firewall/ipv6-hook-prerouting.xml.i +++ b/interface-definitions/include/firewall/ipv6-hook-prerouting.xml.i @@ -35,6 +35,10 @@ #include #include #include + #include + #include + #include + #include Set jump target. Action jump must be defined to use this setting diff --git a/interface-definitions/include/firewall/set-packet-modifications-conn-mark.xml.i b/interface-definitions/include/firewall/set-packet-modifications-conn-mark.xml.i new file mode 100644 index 00000000000..dff95d324ff --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-conn-mark.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set connection mark + + u32:0-2147483647 + Connection mark + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-dscp.xml.i b/interface-definitions/include/firewall/set-packet-modifications-dscp.xml.i new file mode 100644 index 00000000000..5082806fb8a --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-dscp.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set DSCP (Packet Differentiated Services Codepoint) bits + + u32:0-63 + DSCP number + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-hop-limit.xml.i b/interface-definitions/include/firewall/set-packet-modifications-hop-limit.xml.i new file mode 100755 index 00000000000..8a6e5347a88 --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-hop-limit.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set hop limit + + u32:0-255 + Hop limit number + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-mark.xml.i b/interface-definitions/include/firewall/set-packet-modifications-mark.xml.i new file mode 100644 index 00000000000..b229d05797a --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-mark.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set packet mark + + u32:1-2147483647 + Packet mark + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i b/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i new file mode 100644 index 00000000000..c7875b31d02 --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i @@ -0,0 +1,48 @@ + + + + Packet modifications + + + + + Set the routing table for matched packets + + u32:1-200 + Table number + + + main + Main table + + + + (main) + + + main + protocols static table + + + + + + VRF to forward packet with + + txt + VRF instance name + + + default + Forward into default global VRF + + + default + vrf name + + #include + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-tcp-mss.xml.i b/interface-definitions/include/firewall/set-packet-modifications-tcp-mss.xml.i new file mode 100644 index 00000000000..06ffdfede38 --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-tcp-mss.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set TCP Maximum Segment Size + + u32:500-1460 + Explicitly set TCP MSS value + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications-ttl.xml.i b/interface-definitions/include/firewall/set-packet-modifications-ttl.xml.i new file mode 100755 index 00000000000..e2f14050bb9 --- /dev/null +++ b/interface-definitions/include/firewall/set-packet-modifications-ttl.xml.i @@ -0,0 +1,21 @@ + + + + Packet modifications + + + + + Set TTL (time to live) + + u32:0-255 + TTL number + + + + + + + + + diff --git a/interface-definitions/include/firewall/set-packet-modifications.xml.i b/interface-definitions/include/firewall/set-packet-modifications.xml.i deleted file mode 100644 index ee019b64ee7..00000000000 --- a/interface-definitions/include/firewall/set-packet-modifications.xml.i +++ /dev/null @@ -1,96 +0,0 @@ - - - - Packet modifications - - - - - Set connection mark - - u32:0-2147483647 - Connection mark - - - - - - - - - Set DSCP (Packet Differentiated Services Codepoint) bits - - u32:0-63 - DSCP number - - - - - - - - - Set packet mark - - u32:1-2147483647 - Packet mark - - - - - - - - - Set the routing table for matched packets - - u32:1-200 - Table number - - - main - Main table - - - - (main) - - - main - protocols static table - - - - - - VRF to forward packet with - - txt - VRF instance name - - - default - Forward into default global VRF - - - default - vrf name - - #include - - - - - Set TCP Maximum Segment Size - - u32:500-1460 - Explicitly set TCP MSS value - - - - - - - - - \ No newline at end of file diff --git a/interface-definitions/include/policy/route-common.xml.i b/interface-definitions/include/policy/route-common.xml.i index 19ffc05069a..5c69a527906 100644 --- a/interface-definitions/include/policy/route-common.xml.i +++ b/interface-definitions/include/policy/route-common.xml.i @@ -66,7 +66,11 @@ -#include +#include +#include +#include +#include +#include #include #include #include diff --git a/python/vyos/firewall.py b/python/vyos/firewall.py index 34d0b73f6ea..314e8dfe3d7 100755 --- a/python/vyos/firewall.py +++ b/python/vyos/firewall.py @@ -583,6 +583,12 @@ def parse_rule(rule_conf, hook, fw_name, rule_id, ip_name): if 'tcp_mss' in rule_conf['set']: mss = rule_conf['set']['tcp_mss'] output.append(f'tcp option maxseg size set {mss}') + if 'ttl' in rule_conf['set']: + ttl = rule_conf['set']['ttl'] + output.append(f'ip ttl set {ttl}') + if 'hop_limit' in rule_conf['set']: + hoplimit = rule_conf['set']['hop_limit'] + output.append(f'ip6 hoplimit set {hoplimit}') if 'action' in rule_conf: if rule_conf['action'] == 'offload': diff --git a/smoketest/scripts/cli/test_firewall.py b/smoketest/scripts/cli/test_firewall.py index 3e9ec293554..2d18f049569 100755 --- a/smoketest/scripts/cli/test_firewall.py +++ b/smoketest/scripts/cli/test_firewall.py @@ -248,6 +248,7 @@ def test_ipv4_basic_rules(self): self.cli_set(['firewall', 'ipv4', 'prerouting', 'raw', 'rule', '1', 'action', 'notrack']) self.cli_set(['firewall', 'ipv4', 'prerouting', 'raw', 'rule', '1', 'protocol', 'tcp']) self.cli_set(['firewall', 'ipv4', 'prerouting', 'raw', 'rule', '1', 'destination', 'port', '23']) + self.cli_set(['firewall', 'ipv4', 'prerouting', 'raw', 'rule', '1', 'set', 'mark', '55']) self.cli_commit() @@ -275,7 +276,7 @@ def test_ipv4_basic_rules(self): ['OUT-raw default-action drop', 'drop'], ['chain VYOS_PREROUTING_raw'], ['type filter hook prerouting priority raw; policy accept;'], - ['tcp dport 23', 'notrack'], + ['tcp dport 23', 'meta mark set 0x00000037', 'notrack'], ['PRE-raw default-action accept', 'accept'], ['chain NAME_smoketest'], ['saddr 172.16.20.10', 'daddr 172.16.10.10', 'log prefix "[ipv4-NAM-smoketest-1-A]" log level debug', 'ip ttl 15', 'accept'], @@ -315,6 +316,7 @@ def test_ipv4_advanced(self): self.cli_set(['firewall', 'ipv4', 'forward', 'filter', 'rule', '1', 'mark', '1010']) self.cli_set(['firewall', 'ipv4', 'forward', 'filter', 'rule', '1', 'action', 'jump']) self.cli_set(['firewall', 'ipv4', 'forward', 'filter', 'rule', '1', 'jump-target', name]) + self.cli_set(['firewall', 'ipv4', 'forward', 'filter', 'rule', '1', 'set', 'dscp', '32']) self.cli_set(['firewall', 'ipv4', 'input', 'filter', 'rule', '2', 'protocol', 'tcp']) self.cli_set(['firewall', 'ipv4', 'input', 'filter', 'rule', '2', 'mark', '!98765']) @@ -331,7 +333,7 @@ def test_ipv4_advanced(self): nftables_search = [ ['chain VYOS_FORWARD_filter'], ['type filter hook forward priority filter; policy accept;'], - ['ip saddr 198.51.100.1-198.51.100.50', 'meta mark 0x000003f2', f'jump NAME_{name}'], + ['ip saddr 198.51.100.1-198.51.100.50', 'meta mark 0x000003f2', 'ip dscp set cs4', f'jump NAME_{name}'], ['FWD-filter default-action drop', 'drop'], ['chain VYOS_INPUT_filter'], ['type filter hook input priority filter; policy accept;'], @@ -485,6 +487,7 @@ def test_ipv6_basic_rules(self): self.cli_set(['firewall', 'ipv6', 'prerouting', 'raw', 'rule', '1', 'action', 'drop']) self.cli_set(['firewall', 'ipv6', 'prerouting', 'raw', 'rule', '1', 'protocol', 'tcp']) self.cli_set(['firewall', 'ipv6', 'prerouting', 'raw', 'rule', '1', 'destination', 'port', '23']) + self.cli_set(['firewall', 'ipv6', 'prerouting', 'raw', 'rule', '1', 'set', 'hop-limit', '79']) self.cli_commit() @@ -507,7 +510,7 @@ def test_ipv6_basic_rules(self): ['OUT-raw default-action drop', 'drop'], ['chain VYOS_IPV6_PREROUTING_raw'], ['type filter hook prerouting priority raw; policy accept;'], - ['tcp dport 23', 'drop'], + ['tcp dport 23', 'ip6 hoplimit set 79', 'drop'], ['PRE-raw default-action accept', 'accept'], [f'chain NAME6_{name}'], ['saddr 2002::1-2002::10', 'daddr 2002::1:1', 'log prefix "[ipv6-NAM-v6-smoketest-1-A]" log level crit', 'accept'], @@ -722,9 +725,12 @@ def test_bridge_firewall(self): self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '1', 'action', 'accept']) self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '1', 'vlan', 'id', vlan_id]) self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '1', 'vlan', 'ethernet-type', 'ipv4']) + self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '1', 'set', 'connection-mark', '123123']) + self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '2', 'action', 'jump']) self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '2', 'jump-target', name]) self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '2', 'vlan', 'priority', vlan_prior]) + self.cli_set(['firewall', 'bridge', 'forward', 'filter', 'rule', '2', 'set', 'ttl', '128']) self.cli_set(['firewall', 'bridge', 'input', 'filter', 'rule', '1', 'action', 'accept']) self.cli_set(['firewall', 'bridge', 'input', 'filter', 'rule', '1', 'inbound-interface', 'name', interface_in]) @@ -746,8 +752,8 @@ def test_bridge_firewall(self): ['chain VYOS_FORWARD_filter'], ['type filter hook forward priority filter; policy accept;'], ['jump VYOS_STATE_POLICY'], - [f'vlan id {vlan_id}', 'vlan type ip', 'accept'], - [f'vlan pcp {vlan_prior}', f'jump NAME_{name}'], + [f'vlan id {vlan_id}', 'vlan type ip', 'ct mark set 0x0001e0f3', 'accept'], + [f'vlan pcp {vlan_prior}', 'ip ttl set 128', f'jump NAME_{name}'], ['log prefix "[bri-FWD-filter-default-D]"', 'drop', 'FWD-filter default-action drop'], [f'chain NAME_{name}'], [f'ether saddr {mac_address}', f'iifname "{interface_in}"', f'log prefix "[bri-NAM-{name}-1-A]" log level crit', 'accept'], From 9821aa7a47fddb2f9e9d36bfffc1d55a17682667 Mon Sep 17 00:00:00 2001 From: Daniil Baturin Date: Thu, 3 Oct 2024 16:43:39 +0100 Subject: [PATCH 19/85] cli: T6740: add a converter from set commands to config --- debian/vyos-1x.install | 1 + python/vyos/utils/config.py | 41 +++++++++++++++++++++++++ python/vyos/xml_ref/definition.py | 28 ++++++++++++++++- src/utils/vyos-commands-to-config | 50 +++++++++++++++++++++++++++++++ 4 files changed, 119 insertions(+), 1 deletion(-) create mode 100755 src/utils/vyos-commands-to-config diff --git a/debian/vyos-1x.install b/debian/vyos-1x.install index 7171911dc9e..a945cad18fd 100644 --- a/debian/vyos-1x.install +++ b/debian/vyos-1x.install @@ -28,6 +28,7 @@ usr/bin/initial-setup usr/bin/vyos-config-file-query usr/bin/vyos-config-to-commands usr/bin/vyos-config-to-json +usr/bin/vyos-commands-to-config usr/bin/vyos-hostsd-client usr/lib usr/libexec/vyos/activate diff --git a/python/vyos/utils/config.py b/python/vyos/utils/config.py index 33047010b1b..b02621e7b3b 100644 --- a/python/vyos/utils/config.py +++ b/python/vyos/utils/config.py @@ -37,3 +37,44 @@ def read_saved_value(path: list): if len(res) == 1: return ' '.join(res) return res + +def parse_commands(cmds: str) -> dict: + from re import split as re_split + from shlex import split as shlex_split + + from vyos.xml_ref import definition + from vyos.xml_ref.pkg_cache.vyos_1x_cache import reference + + ref_tree = definition.Xml() + ref_tree.define(reference) + + res = [] + + cmds = re_split(r'\n+', cmds) + for c in cmds: + cmd_parts = shlex_split(c) + + if not cmd_parts: + # Ignore empty lines + continue + + path = cmd_parts[1:] + op = cmd_parts[0] + + try: + path, value = ref_tree.split_path(path) + except ValueError as e: + raise ValueError(f'Incorrect command: {e}') + + entry = {} + entry["op"] = op + entry["path"] = path + entry["value"] = value + + entry["is_multi"] = ref_tree.is_multi(path) + entry["is_leaf"] = ref_tree.is_leaf(path) + entry["is_tag"] = ref_tree.is_tag(path) + + res.append(entry) + + return res diff --git a/python/vyos/xml_ref/definition.py b/python/vyos/xml_ref/definition.py index 5ff28daed1d..4e755ab7208 100644 --- a/python/vyos/xml_ref/definition.py +++ b/python/vyos/xml_ref/definition.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -from typing import Optional, Union, Any, TYPE_CHECKING +from typing import Tuple, Optional, Union, Any, TYPE_CHECKING # https://peps.python.org/pep-0484/#forward-references # for type 'ConfigDict' @@ -90,6 +90,32 @@ def _is_tag_node(self, node: dict) -> bool: res = self._get_ref_node_data(node, 'node_type') return res == 'tag' + def exists(self, path: list) -> bool: + try: + _ = self._get_ref_path(path) + return True + except ValueError: + return False + + def split_path(self, path: list) -> Tuple[list, Optional[str]]: + """ Splits a list into config path and value components """ + + # First, check if the complete path is valid by itself + if self.exists(path): + if self.is_valueless(path) or not self.is_leaf(path): + # It's a complete path for a valueless node + # or a path to an empy non-leaf node + return (path, None) + else: + raise ValueError(f'Path "{path}" needs a value or children') + else: + # If the complete path doesn't exist, it's probably a path with a value + if self.exists(path[0:-1]): + return (path[0:-1], path[-1]) + else: + # Or not a valid path at all + raise ValueError(f'Path "{path}" is incorrect') + def is_tag(self, path: list) -> bool: ref_path = path.copy() d = self.ref diff --git a/src/utils/vyos-commands-to-config b/src/utils/vyos-commands-to-config new file mode 100755 index 00000000000..85613b49d26 --- /dev/null +++ b/src/utils/vyos-commands-to-config @@ -0,0 +1,50 @@ +#! /usr/bin/python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import sys +import json + +from vyos.configtree import ConfigTree +from vyos.utils.config import parse_commands + + +def commands_to_config(cmds): + ct = ConfigTree('') + cmds = parse_commands(cmds) + + for c in cmds: + if c["op"] == "set": + if c["is_leaf"]: + replace = False if c["is_multi"] else True + ct.set(c["path"], value=c["value"], replace=replace) + else: + ct.create_node(c["path"]) + else: + raise ValueError( + f"\"{c['op']}\" is not a supported config operation") + + return ct + + +if __name__ == '__main__': + try: + cmds = sys.stdin.read() + ct = commands_to_config(cmds) + print(str(ct)) + except Exception as e: + print(e) + sys.exit(1) From 23e4ab655e3ef428250763150ae0a6a69fa59859 Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Thu, 3 Oct 2024 17:33:46 +0000 Subject: [PATCH 20/85] T6761: Add timeout for OSPF smoketest fail From time to time the smoektest script checks frrconfig to early. I.e. FRR does not fully load the config during checking or the OSPF daemon is not started at the time of checking. --- smoketest/scripts/cli/test_protocols_ospf.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/smoketest/scripts/cli/test_protocols_ospf.py b/smoketest/scripts/cli/test_protocols_ospf.py index 905eaf2e932..c3ae54e1213 100755 --- a/smoketest/scripts/cli/test_protocols_ospf.py +++ b/smoketest/scripts/cli/test_protocols_ospf.py @@ -15,6 +15,7 @@ # along with this program. If not, see . import unittest +import time from base_vyostest_shim import VyOSUnitTestSHIM @@ -558,6 +559,22 @@ def test_ospf_17_duplicate_area_network(self): # Verify FRR ospfd configuration frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + # Required to prevent the race condition T6761 + retry_count = 0 + max_retries = 60 + + while not frrconfig and retry_count < max_retries: + # Log every 10 seconds + if retry_count % 10 == 0: + print(f"Attempt {retry_count}: FRR config is still empty. Retrying...") + + retry_count += 1 + time.sleep(1) + frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + + if not frrconfig: + print("Failed to retrieve FRR config after 60 seconds") + self.assertIn(f'router ospf', frrconfig) self.assertIn(f' network {network} area {area1}', frrconfig) From 5b10912a00ef5621c84aed2129ba3daaa7f06b98 Mon Sep 17 00:00:00 2001 From: Daniil Baturin Date: Thu, 3 Oct 2024 19:01:20 +0100 Subject: [PATCH 21/85] cli: T6752: add a wrapper for the show command (#4111) --- debian/vyos-1x.install | 1 + src/utils/vyos-show-config | 57 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100755 src/utils/vyos-show-config diff --git a/debian/vyos-1x.install b/debian/vyos-1x.install index 7171911dc9e..fff6ebeab73 100644 --- a/debian/vyos-1x.install +++ b/debian/vyos-1x.install @@ -25,6 +25,7 @@ srv/localui usr/sbin usr/bin/config-mgmt usr/bin/initial-setup +usr/bin/vyos-show-config usr/bin/vyos-config-file-query usr/bin/vyos-config-to-commands usr/bin/vyos-config-to-json diff --git a/src/utils/vyos-show-config b/src/utils/vyos-show-config new file mode 100755 index 00000000000..152322fc14a --- /dev/null +++ b/src/utils/vyos-show-config @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import sys +import argparse + +from signal import signal, SIGPIPE, SIG_DFL + +def get_config(path): + from vyos.utils.process import rc_cmd + res, out = rc_cmd(f"cli-shell-api showCfg {path}") + if res > 0: + print("Error: failed to retrieve the config", file=sys.stderr) + sys.exit(1) + else: + return out + +def strip_config(config): + from vyos.utils.strip_config import strip_config_source + return strip_config_source(config) + +if __name__ == '__main__': + signal(SIGPIPE,SIG_DFL) + + parser = argparse.ArgumentParser() + parser.add_argument("--strip-private", + help="Strip private information from the config", + action="store_true") + + args, path_args = parser.parse_known_args() + + config = get_config(" ".join(path_args)) + + if args.strip_private: + edit_level = os.getenv("VYATTA_EDIT_LEVEL") + if (edit_level != "/") or (len(path_args) > 0): + print("Error: show --strip-private only works at the top level", + file=sys.stderr) + sys.exit(1) + else: + print(strip_config(config)) + else: + print(config) From 1749c3a99b88c8376b505d0d776cc9b4d5f167cd Mon Sep 17 00:00:00 2001 From: rebortg Date: Mon, 30 Sep 2024 22:31:16 +0200 Subject: [PATCH 22/85] T973: remove irrelevant standard values --- data/templates/node_exporter/node_exporter.service.j2 | 9 ++++++--- .../service_monitoring_node_exporter.xml.in | 3 --- .../scripts/cli/test_service_monitoring_node-exporter.py | 2 +- src/conf_mode/service_monitoring_node-exporter.py | 5 +---- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/data/templates/node_exporter/node_exporter.service.j2 b/data/templates/node_exporter/node_exporter.service.j2 index 01dad5aa23c..62e7e67746f 100644 --- a/data/templates/node_exporter/node_exporter.service.j2 +++ b/data/templates/node_exporter/node_exporter.service.j2 @@ -9,9 +9,12 @@ After=network.target User=node_exporter {% endif %} ExecStart={{ vrf_command }}/usr/sbin/node_exporter \ -{% for address in listen_address %} +{% if listen_address is vyos_defined %} +{% for address in listen_address %} --web.listen-address={{ address }}:{{ port }} -{% endfor %} - +{% endfor %} +{% else %} + --web.listen-address=:{{ port }} +{% endif %} [Install] WantedBy=multi-user.target diff --git a/interface-definitions/service_monitoring_node_exporter.xml.in b/interface-definitions/service_monitoring_node_exporter.xml.in index ee6d06d6d4f..a11d2304fe6 100644 --- a/interface-definitions/service_monitoring_node_exporter.xml.in +++ b/interface-definitions/service_monitoring_node_exporter.xml.in @@ -11,9 +11,6 @@ #include - - 0.0.0.0 - #include 9100 diff --git a/smoketest/scripts/cli/test_service_monitoring_node-exporter.py b/smoketest/scripts/cli/test_service_monitoring_node-exporter.py index 6b96e3130a4..e18a3f7a2a8 100755 --- a/smoketest/scripts/cli/test_service_monitoring_node-exporter.py +++ b/smoketest/scripts/cli/test_service_monitoring_node-exporter.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020-2024 VyOS maintainers and contributors +# Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as diff --git a/src/conf_mode/service_monitoring_node-exporter.py b/src/conf_mode/service_monitoring_node-exporter.py index e8d7c15b43d..db34bb5d0d2 100755 --- a/src/conf_mode/service_monitoring_node-exporter.py +++ b/src/conf_mode/service_monitoring_node-exporter.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2024 VyOS maintainers and contributors +# Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -89,9 +89,6 @@ def apply(config_data): call(f'systemctl {systemd_action} {systemd_service}') - # Telegraf include custom rsyslog config changes - call('systemctl reload-or-restart rsyslog') - if __name__ == '__main__': try: From a3b79255fae48dea35b6fd240c6671e226382cfe Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Fri, 4 Oct 2024 07:15:25 +0000 Subject: [PATCH 23/85] T6763: Delete Jenkins file --- Jenkinsfile | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 Jenkinsfile diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 21a6829c09e..00000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2020-2021 VyOS maintainers and contributors -// -// This program is free software; you can redistribute it and/or modify -// in order to easy exprort images built to "external" world -// it under the terms of the GNU General Public License version 2 or later as -// published by the Free Software Foundation. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -@NonCPS - -// Using a version specifier library, use 'current' branch. The underscore (_) -// is not a typo! You need this underscore if the line immediately after the -// @Library annotation is not an import statement! -@Library('vyos-build@current')_ - -// Start package build using library function from https://github.com/vyos/vyos-build -buildPackage(null, null, null, true) From f9c81b121b146fbcf3c458273b2b47e6e571f2e2 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 2 Oct 2024 12:20:15 -0500 Subject: [PATCH 24/85] config-mgmt: T5976: normalize formatting --- python/vyos/config_mgmt.py | 271 +++++++++++++--------- python/vyos/configsession.py | 78 +++++-- src/conf_mode/system_config-management.py | 13 +- src/helpers/commit-confirm-notify.py | 18 +- 4 files changed, 236 insertions(+), 144 deletions(-) diff --git a/python/vyos/config_mgmt.py b/python/vyos/config_mgmt.py index d518737ca51..920a19fec35 100644 --- a/python/vyos/config_mgmt.py +++ b/python/vyos/config_mgmt.py @@ -49,8 +49,10 @@ # created by vyatta-cfg-postinst commit_post_hook_dir = '/etc/commit/post-hooks.d' -commit_hooks = {'commit_revision': '01vyos-commit-revision', - 'commit_archive': '02vyos-commit-archive'} +commit_hooks = { + 'commit_revision': '01vyos-commit-revision', + 'commit_archive': '02vyos-commit-archive', +} DEFAULT_TIME_MINUTES = 10 timer_name = 'commit-confirm' @@ -72,6 +74,7 @@ ch.setFormatter(formatter) logger.addHandler(ch) + def save_config(target, json_out=None): if json_out is None: cmd = f'{SAVE_CONFIG} {target}' @@ -81,6 +84,7 @@ def save_config(target, json_out=None): if rc != 0: logger.critical(f'save config failed: {out}') + def unsaved_commits(allow_missing_config=False) -> bool: if get_full_version_data()['boot_via'] == 'livecd': return False @@ -92,6 +96,7 @@ def unsaved_commits(allow_missing_config=False) -> bool: os.unlink(tmp_save) return ret + def get_file_revision(rev: int): revision = os.path.join(archive_dir, f'config.boot.{rev}.gz') try: @@ -102,12 +107,15 @@ def get_file_revision(rev: int): return '' return r + def get_config_tree_revision(rev: int): c = get_file_revision(rev) return ConfigTree(c) + def is_node_revised(path: list = [], rev1: int = 1, rev2: int = 0) -> bool: from vyos.configtree import DiffTree + left = get_config_tree_revision(rev1) right = get_config_tree_revision(rev2) diff_tree = DiffTree(left, right) @@ -115,9 +123,11 @@ def is_node_revised(path: list = [], rev1: int = 1, rev2: int = 0) -> bool: return True return False + class ConfigMgmtError(Exception): pass + class ConfigMgmt: def __init__(self, session_env=None, config=None): if session_env: @@ -128,15 +138,14 @@ def __init__(self, session_env=None, config=None): if config is None: config = Config() - d = config.get_config_dict(['system', 'config-management'], - key_mangling=('-', '_'), - get_first_key=True) + d = config.get_config_dict( + ['system', 'config-management'], key_mangling=('-', '_'), get_first_key=True + ) self.max_revisions = int(d.get('commit_revisions', 0)) self.num_revisions = 0 self.locations = d.get('commit_archive', {}).get('location', []) - self.source_address = d.get('commit_archive', - {}).get('source_address', '') + self.source_address = d.get('commit_archive', {}).get('source_address', '') if config.exists(['system', 'host-name']): self.hostname = config.return_value(['system', 'host-name']) if config.exists(['system', 'domain-name']): @@ -156,15 +165,16 @@ def __init__(self, session_env=None, config=None): # a call to compare without args is edit_level aware edit_level = os.getenv('VYATTA_EDIT_LEVEL', '') - self.edit_path = [l for l in edit_level.split('/') if l] + self.edit_path = [l for l in edit_level.split('/') if l] # noqa: E741 self.active_config = config._running_config self.working_config = config._session_config # Console script functions # - def commit_confirm(self, minutes: int=DEFAULT_TIME_MINUTES, - no_prompt: bool=False) -> Tuple[str,int]: + def commit_confirm( + self, minutes: int = DEFAULT_TIME_MINUTES, no_prompt: bool = False + ) -> Tuple[str, int]: """Commit with reboot to saved config in 'minutes' minutes if 'confirm' call is not issued. """ @@ -177,10 +187,10 @@ def commit_confirm(self, minutes: int=DEFAULT_TIME_MINUTES, else: W = '' - prompt_str = f''' + prompt_str = f""" commit-confirm will automatically reboot in {minutes} minutes unless changes are confirmed.\n -Proceed ?''' +Proceed ?""" prompt_str = W + prompt_str if not no_prompt and not ask_yes_no(prompt_str, default=True): msg = 'commit-confirm canceled' @@ -199,7 +209,7 @@ def commit_confirm(self, minutes: int=DEFAULT_TIME_MINUTES, msg = f'Initialized commit-confirm; {minutes} minutes to confirm before reboot' return msg, 0 - def confirm(self) -> Tuple[str,int]: + def confirm(self) -> Tuple[str, int]: """Do not reboot to saved config following 'commit-confirm'. Update commit log and archive. """ @@ -227,9 +237,8 @@ def confirm(self) -> Tuple[str,int]: msg = 'Reboot timer stopped' return msg, 0 - def revert(self) -> Tuple[str,int]: - """Reboot to saved config, dropping commits from 'commit-confirm'. - """ + def revert(self) -> Tuple[str, int]: + """Reboot to saved config, dropping commits from 'commit-confirm'.""" _ = self._read_tmp_log_entry() # archived config will be reverted on boot @@ -239,13 +248,14 @@ def revert(self) -> Tuple[str,int]: return '', 0 - def rollback(self, rev: int, no_prompt: bool=False) -> Tuple[str,int]: - """Reboot to config revision 'rev'. - """ + def rollback(self, rev: int, no_prompt: bool = False) -> Tuple[str, int]: + """Reboot to config revision 'rev'.""" msg = '' if not self._check_revision_number(rev): - msg = f'Invalid revision number {rev}: must be 0 < rev < {self.num_revisions}' + msg = ( + f'Invalid revision number {rev}: must be 0 < rev < {self.num_revisions}' + ) return msg, 1 prompt_str = 'Proceed with reboot ?' @@ -274,12 +284,13 @@ def rollback(self, rev: int, no_prompt: bool=False) -> Tuple[str,int]: return msg, 0 def rollback_soft(self, rev: int): - """Rollback without reboot (rollback-soft) - """ + """Rollback without reboot (rollback-soft)""" msg = '' if not self._check_revision_number(rev): - msg = f'Invalid revision number {rev}: must be 0 < rev < {self.num_revisions}' + msg = ( + f'Invalid revision number {rev}: must be 0 < rev < {self.num_revisions}' + ) return msg, 1 rollback_ct = self._get_config_tree_revision(rev) @@ -292,9 +303,13 @@ def rollback_soft(self, rev: int): return msg, 0 - def compare(self, saved: bool=False, commands: bool=False, - rev1: Optional[int]=None, - rev2: Optional[int]=None) -> Tuple[str,int]: + def compare( + self, + saved: bool = False, + commands: bool = False, + rev1: Optional[int] = None, + rev2: Optional[int] = None, + ) -> Tuple[str, int]: """General compare function for config file revisions: revision n vs. revision m; working version vs. active version; or working version vs. saved version. @@ -335,7 +350,7 @@ def compare(self, saved: bool=False, commands: bool=False, return msg, 0 - def wrap_compare(self, options) -> Tuple[str,int]: + def wrap_compare(self, options) -> Tuple[str, int]: """Interface to vyatta-cfg-run: args collected as 'options' to parse for compare. """ @@ -343,7 +358,7 @@ def wrap_compare(self, options) -> Tuple[str,int]: r1 = None r2 = None if 'commands' in options: - cmnds=True + cmnds = True options.remove('commands') for i in options: if not i.isnumeric(): @@ -358,8 +373,7 @@ def wrap_compare(self, options) -> Tuple[str,int]: # Initialization and post-commit hooks for conf-mode # def initialize_revision(self): - """Initialize config archive, logrotate conf, and commit log. - """ + """Initialize config archive, logrotate conf, and commit log.""" mask = os.umask(0o002) os.makedirs(archive_dir, exist_ok=True) json_dir = os.path.dirname(config_json) @@ -371,8 +385,7 @@ def initialize_revision(self): self._add_logrotate_conf() - if (not os.path.exists(commit_log_file) or - self._get_number_of_revisions() == 0): + if not os.path.exists(commit_log_file) or self._get_number_of_revisions() == 0: user = self._get_user() via = 'init' comment = '' @@ -399,8 +412,7 @@ def commit_revision(self): self._update_archive() def commit_archive(self): - """Upload config to remote archive. - """ + """Upload config to remote archive.""" from vyos.remote import upload hostname = self.hostname @@ -410,20 +422,23 @@ def commit_archive(self): source_address = self.source_address if self.effective_locations: - print("Archiving config...") + print('Archiving config...') for location in self.effective_locations: url = urlsplit(location) - _, _, netloc = url.netloc.rpartition("@") + _, _, netloc = url.netloc.rpartition('@') redacted_location = urlunsplit(url._replace(netloc=netloc)) - print(f" {redacted_location}", end=" ", flush=True) - upload(archive_config_file, f'{location}/{remote_file}', - source_host=source_address) + print(f' {redacted_location}', end=' ', flush=True) + upload( + archive_config_file, + f'{location}/{remote_file}', + source_host=source_address, + ) # op-mode functions # def get_raw_log_data(self) -> list: """Return list of dicts of log data: - keys: [timestamp, user, commit_via, commit_comment] + keys: [timestamp, user, commit_via, commit_comment] """ log = self._get_log_entries() res_l = [] @@ -435,20 +450,20 @@ def get_raw_log_data(self) -> list: @staticmethod def format_log_data(data: list) -> str: - """Return formatted log data as str. - """ + """Return formatted log data as str.""" res_l = [] - for l_no, l in enumerate(data): - time_d = datetime.fromtimestamp(int(l['timestamp'])) - time_str = time_d.strftime("%Y-%m-%d %H:%M:%S") + for l_no, l_val in enumerate(data): + time_d = datetime.fromtimestamp(int(l_val['timestamp'])) + time_str = time_d.strftime('%Y-%m-%d %H:%M:%S') - res_l.append([l_no, time_str, - f"by {l['user']}", f"via {l['commit_via']}"]) + res_l.append( + [l_no, time_str, f"by {l_val['user']}", f"via {l_val['commit_via']}"] + ) - if l['commit_comment'] != 'commit': # default comment - res_l.append([None, l['commit_comment']]) + if l_val['commit_comment'] != 'commit': # default comment + res_l.append([None, l_val['commit_comment']]) - ret = tabulate(res_l, tablefmt="plain") + ret = tabulate(res_l, tablefmt='plain') return ret @staticmethod @@ -459,23 +474,25 @@ def format_log_data_brief(data: list) -> str: 'rollback'. """ res_l = [] - for l_no, l in enumerate(data): - time_d = datetime.fromtimestamp(int(l['timestamp'])) - time_str = time_d.strftime("%Y-%m-%d %H:%M:%S") + for l_no, l_val in enumerate(data): + time_d = datetime.fromtimestamp(int(l_val['timestamp'])) + time_str = time_d.strftime('%Y-%m-%d %H:%M:%S') - res_l.append(['\t', l_no, time_str, - f"{l['user']}", f"by {l['commit_via']}"]) + res_l.append( + ['\t', l_no, time_str, f"{l_val['user']}", f"by {l_val['commit_via']}"] + ) - ret = tabulate(res_l, tablefmt="plain") + ret = tabulate(res_l, tablefmt='plain') return ret - def show_commit_diff(self, rev: int, rev2: Optional[int]=None, - commands: bool=False) -> str: + def show_commit_diff( + self, rev: int, rev2: Optional[int] = None, commands: bool = False + ) -> str: """Show commit diff at revision number, compared to previous revision, or to another revision. """ if rev2 is None: - out, _ = self.compare(commands=commands, rev1=rev, rev2=(rev+1)) + out, _ = self.compare(commands=commands, rev1=rev, rev2=(rev + 1)) return out out, _ = self.compare(commands=commands, rev1=rev, rev2=rev2) @@ -519,8 +536,9 @@ def _add_logrotate_conf(self): conf_file.chmod(0o644) def _archive_active_config(self) -> bool: - save_to_tmp = (boot_configuration_complete() or not - os.path.isfile(archive_config_file)) + save_to_tmp = boot_configuration_complete() or not os.path.isfile( + archive_config_file + ) mask = os.umask(0o113) ext = os.getpid() @@ -560,15 +578,14 @@ def _archive_active_config(self) -> bool: @staticmethod def _update_archive(): - cmd = f"sudo logrotate -f -s {logrotate_state} {logrotate_conf}" + cmd = f'sudo logrotate -f -s {logrotate_state} {logrotate_conf}' rc, out = rc_cmd(cmd) if rc != 0: logger.critical(f'logrotate failure: {out}') @staticmethod def _get_log_entries() -> list: - """Return lines of commit log as list of strings - """ + """Return lines of commit log as list of strings""" entries = [] if os.path.exists(commit_log_file): with open(commit_log_file) as f: @@ -577,8 +594,8 @@ def _get_log_entries() -> list: return entries def _get_number_of_revisions(self) -> int: - l = self._get_log_entries() - return len(l) + log_entries = self._get_log_entries() + return len(log_entries) def _check_revision_number(self, rev: int) -> bool: self.num_revisions = self._get_number_of_revisions() @@ -599,9 +616,14 @@ def _get_user() -> str: user = 'unknown' return user - def _new_log_entry(self, user: str='', commit_via: str='', - commit_comment: str='', timestamp: Optional[int]=None, - tmp_file: str=None) -> Optional[str]: + def _new_log_entry( + self, + user: str = '', + commit_via: str = '', + commit_comment: str = '', + timestamp: Optional[int] = None, + tmp_file: str = None, + ) -> Optional[str]: # Format log entry and return str or write to file. # # Usage is within a post-commit hook, using env values. In case of @@ -647,12 +669,12 @@ def _get_log_entry(line: str) -> dict: logger.critical(f'Invalid log format {line}') return {} - timestamp, user, commit_via, commit_comment = ( - tuple(line.strip().strip('|').split('|'))) + timestamp, user, commit_via, commit_comment = tuple( + line.strip().strip('|').split('|') + ) commit_comment = commit_comment.replace('%%', '|') - d = dict(zip(keys, [user, commit_via, - commit_comment, timestamp])) + d = dict(zip(keys, [user, commit_via, commit_comment, timestamp])) return d @@ -666,13 +688,21 @@ def _read_tmp_log_entry(self) -> dict: return self._get_log_entry(entry) - def _add_log_entry(self, user: str='', commit_via: str='', - commit_comment: str='', timestamp: Optional[int]=None): + def _add_log_entry( + self, + user: str = '', + commit_via: str = '', + commit_comment: str = '', + timestamp: Optional[int] = None, + ): mask = os.umask(0o113) - entry = self._new_log_entry(user=user, commit_via=commit_via, - commit_comment=commit_comment, - timestamp=timestamp) + entry = self._new_log_entry( + user=user, + commit_via=commit_via, + commit_comment=commit_comment, + timestamp=timestamp, + ) log_entries = self._get_log_entries() log_entries.insert(0, entry) @@ -687,6 +717,7 @@ def _add_log_entry(self, user: str='', commit_via: str='', os.umask(mask) + # entry_point for console script # def run(): @@ -706,43 +737,53 @@ def run(): parser = ArgumentParser() subparsers = parser.add_subparsers(dest='subcommand') - commit_confirm = subparsers.add_parser('commit_confirm', - help="Commit with opt-out reboot to saved config") - commit_confirm.add_argument('-t', dest='minutes', type=int, - default=DEFAULT_TIME_MINUTES, - help="Minutes until reboot, unless 'confirm'") - commit_confirm.add_argument('-y', dest='no_prompt', action='store_true', - help="Execute without prompt") - - subparsers.add_parser('confirm', help="Confirm commit") - subparsers.add_parser('revert', help="Revert commit-confirm") - - rollback = subparsers.add_parser('rollback', - help="Rollback to earlier config") - rollback.add_argument('--rev', type=int, - help="Revision number for rollback") - rollback.add_argument('-y', dest='no_prompt', action='store_true', - help="Excute without prompt") - - rollback_soft = subparsers.add_parser('rollback_soft', - help="Rollback to earlier config") - rollback_soft.add_argument('--rev', type=int, - help="Revision number for rollback") - - compare = subparsers.add_parser('compare', - help="Compare config files") - - compare.add_argument('--saved', action='store_true', - help="Compare session config with saved config") - compare.add_argument('--commands', action='store_true', - help="Show difference between commands") - compare.add_argument('--rev1', type=int, default=None, - help="Compare revision with session config or other revision") - compare.add_argument('--rev2', type=int, default=None, - help="Compare revisions") - - wrap_compare = subparsers.add_parser('wrap_compare', - help="Wrapper interface for vyatta-cfg-run") + commit_confirm = subparsers.add_parser( + 'commit_confirm', help='Commit with opt-out reboot to saved config' + ) + commit_confirm.add_argument( + '-t', + dest='minutes', + type=int, + default=DEFAULT_TIME_MINUTES, + help="Minutes until reboot, unless 'confirm'", + ) + commit_confirm.add_argument( + '-y', dest='no_prompt', action='store_true', help='Execute without prompt' + ) + + subparsers.add_parser('confirm', help='Confirm commit') + subparsers.add_parser('revert', help='Revert commit-confirm') + + rollback = subparsers.add_parser('rollback', help='Rollback to earlier config') + rollback.add_argument('--rev', type=int, help='Revision number for rollback') + rollback.add_argument( + '-y', dest='no_prompt', action='store_true', help='Excute without prompt' + ) + + rollback_soft = subparsers.add_parser( + 'rollback_soft', help='Rollback to earlier config' + ) + rollback_soft.add_argument('--rev', type=int, help='Revision number for rollback') + + compare = subparsers.add_parser('compare', help='Compare config files') + + compare.add_argument( + '--saved', action='store_true', help='Compare session config with saved config' + ) + compare.add_argument( + '--commands', action='store_true', help='Show difference between commands' + ) + compare.add_argument( + '--rev1', + type=int, + default=None, + help='Compare revision with session config or other revision', + ) + compare.add_argument('--rev2', type=int, default=None, help='Compare revisions') + + wrap_compare = subparsers.add_parser( + 'wrap_compare', help='Wrapper interface for vyatta-cfg-run' + ) wrap_compare.add_argument('--options', nargs=REMAINDER) args = vars(parser.parse_args()) diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py index 7d51b94e401..c0d3c7ecb27 100644 --- a/python/vyos/configsession.py +++ b/python/vyos/configsession.py @@ -32,15 +32,34 @@ LOAD_CONFIG = ['/bin/cli-shell-api', 'loadFile'] MIGRATE_LOAD_CONFIG = ['/usr/libexec/vyos/vyos-load-config.py'] SAVE_CONFIG = ['/usr/libexec/vyos/vyos-save-config.py'] -INSTALL_IMAGE = ['/usr/libexec/vyos/op_mode/image_installer.py', - '--action', 'add', '--no-prompt', '--image-path'] +INSTALL_IMAGE = [ + '/usr/libexec/vyos/op_mode/image_installer.py', + '--action', + 'add', + '--no-prompt', + '--image-path', +] IMPORT_PKI = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'import'] -IMPORT_PKI_NO_PROMPT = ['/usr/libexec/vyos/op_mode/pki.py', - '--action', 'import', '--no-prompt'] -REMOVE_IMAGE = ['/usr/libexec/vyos/op_mode/image_manager.py', - '--action', 'delete', '--no-prompt', '--image-name'] -SET_DEFAULT_IMAGE = ['/usr/libexec/vyos/op_mode/image_manager.py', - '--action', 'set', '--no-prompt', '--image-name'] +IMPORT_PKI_NO_PROMPT = [ + '/usr/libexec/vyos/op_mode/pki.py', + '--action', + 'import', + '--no-prompt', +] +REMOVE_IMAGE = [ + '/usr/libexec/vyos/op_mode/image_manager.py', + '--action', + 'delete', + '--no-prompt', + '--image-name', +] +SET_DEFAULT_IMAGE = [ + '/usr/libexec/vyos/op_mode/image_manager.py', + '--action', + 'set', + '--no-prompt', + '--image-name', +] GENERATE = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'generate'] SHOW = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'show'] RESET = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'reset'] @@ -50,7 +69,8 @@ OP_CMD_DELETE = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'delete'] # Default "commit via" string -APP = "vyos-http-api" +APP = 'vyos-http-api' + # When started as a service rather than from a user shell, # the process lacks the VyOS-specific environment that comes @@ -61,7 +81,7 @@ def inject_vyos_env(env): env['VYATTA_USER_LEVEL_DIR'] = '/opt/vyatta/etc/shell/level/admin' env['VYATTA_PROCESS_CLIENT'] = 'gui2_rest' env['VYOS_HEADLESS_CLIENT'] = 'vyos_http_api' - env['vyatta_bindir']= '/opt/vyatta/bin' + env['vyatta_bindir'] = '/opt/vyatta/bin' env['vyatta_cfg_templates'] = '/opt/vyatta/share/vyatta-cfg/templates' env['vyatta_configdir'] = directories['vyos_configdir'] env['vyatta_datadir'] = '/opt/vyatta/share' @@ -78,7 +98,7 @@ def inject_vyos_env(env): env['vyos_configdir'] = directories['vyos_configdir'] env['vyos_conf_scripts_dir'] = '/usr/libexec/vyos/conf_mode' env['vyos_datadir'] = '/opt/vyatta/share' - env['vyos_datarootdir']= '/opt/vyatta/share' + env['vyos_datarootdir'] = '/opt/vyatta/share' env['vyos_libdir'] = '/opt/vyatta/lib' env['vyos_libexec_dir'] = '/usr/libexec/vyos' env['vyos_op_scripts_dir'] = '/usr/libexec/vyos/op_mode' @@ -102,6 +122,7 @@ class ConfigSession(object): """ The write API of VyOS. """ + def __init__(self, session_id, app=APP): """ Creates a new config session. @@ -116,7 +137,9 @@ def __init__(self, session_id, app=APP): and used the PID for the session identifier. """ - env_str = subprocess.check_output([CLI_SHELL_API, 'getSessionEnv', str(session_id)]) + env_str = subprocess.check_output( + [CLI_SHELL_API, 'getSessionEnv', str(session_id)] + ) self.__session_id = session_id # Extract actual variables from the chunk of shell it outputs @@ -129,20 +152,39 @@ def __init__(self, session_id, app=APP): session_env[k] = v self.__session_env = session_env - self.__session_env["COMMIT_VIA"] = app + self.__session_env['COMMIT_VIA'] = app self.__run_command([CLI_SHELL_API, 'setupSession']) def __del__(self): try: - output = subprocess.check_output([CLI_SHELL_API, 'teardownSession'], env=self.__session_env).decode().strip() + output = ( + subprocess.check_output( + [CLI_SHELL_API, 'teardownSession'], env=self.__session_env + ) + .decode() + .strip() + ) if output: - print("cli-shell-api teardownSession output for sesion {0}: {1}".format(self.__session_id, output), file=sys.stderr) + print( + 'cli-shell-api teardownSession output for sesion {0}: {1}'.format( + self.__session_id, output + ), + file=sys.stderr, + ) except Exception as e: - print("Could not tear down session {0}: {1}".format(self.__session_id, e), file=sys.stderr) + print( + 'Could not tear down session {0}: {1}'.format(self.__session_id, e), + file=sys.stderr, + ) def __run_command(self, cmd_list): - p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.__session_env) + p = subprocess.Popen( + cmd_list, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=self.__session_env, + ) (stdout_data, stderr_data) = p.communicate() output = stdout_data.decode() result = p.wait() @@ -204,7 +246,7 @@ def load_section_tree(self, mask: dict, d: dict): def comment(self, path, value=None): if not value: - value = [""] + value = [''] else: value = [value] self.__run_command([COMMENT] + path + value) diff --git a/src/conf_mode/system_config-management.py b/src/conf_mode/system_config-management.py index c681a84052d..99f25bef6d7 100755 --- a/src/conf_mode/system_config-management.py +++ b/src/conf_mode/system_config-management.py @@ -22,6 +22,7 @@ from vyos.config_mgmt import ConfigMgmt from vyos.config_mgmt import commit_post_hook_dir, commit_hooks + def get_config(config=None): if config: conf = config @@ -36,22 +37,24 @@ def get_config(config=None): return mgmt + def verify(_mgmt): return + def generate(mgmt): if mgmt is None: return mgmt.initialize_revision() + def apply(mgmt): if mgmt is None: return locations = mgmt.locations - archive_target = os.path.join(commit_post_hook_dir, - commit_hooks['commit_archive']) + archive_target = os.path.join(commit_post_hook_dir, commit_hooks['commit_archive']) if locations: try: os.symlink('/usr/bin/config-mgmt', archive_target) @@ -68,8 +71,9 @@ def apply(mgmt): raise ConfigError from exc revisions = mgmt.max_revisions - revision_target = os.path.join(commit_post_hook_dir, - commit_hooks['commit_revision']) + revision_target = os.path.join( + commit_post_hook_dir, commit_hooks['commit_revision'] + ) if revisions > 0: try: os.symlink('/usr/bin/config-mgmt', revision_target) @@ -85,6 +89,7 @@ def apply(mgmt): except OSError as exc: raise ConfigError from exc + if __name__ == '__main__': try: c = get_config() diff --git a/src/helpers/commit-confirm-notify.py b/src/helpers/commit-confirm-notify.py index 8d7626c7853..69dda511232 100755 --- a/src/helpers/commit-confirm-notify.py +++ b/src/helpers/commit-confirm-notify.py @@ -6,15 +6,19 @@ # Minutes before reboot to trigger notification. intervals = [1, 5, 15, 60] + def notify(interval): - s = "" if interval == 1 else "s" + s = '' if interval == 1 else 's' time.sleep((minutes - interval) * 60) - message = ('"[commit-confirm] System is going to reboot in ' - f'{interval} minute{s} to rollback the last commit.\n' - 'Confirm your changes to cancel the reboot."') - os.system("wall -n " + message) + message = ( + '"[commit-confirm] System is going to reboot in ' + f'{interval} minute{s} to rollback the last commit.\n' + 'Confirm your changes to cancel the reboot."' + ) + os.system('wall -n ' + message) + -if __name__ == "__main__": +if __name__ == '__main__': # Must be run as root to call wall(1) without a banner. if len(sys.argv) != 2 or os.getuid() != 0: print('This script requires superuser privileges.', file=sys.stderr) @@ -27,5 +31,5 @@ def notify(interval): for interval in sorted(intervals, reverse=True): if minutes >= interval: notify(interval) - minutes -= (minutes - interval) + minutes -= minutes - interval exit(0) From 4d5f2a58bbf5a366c43871ef27b75d31b3b2a114 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 2 Oct 2024 12:36:41 -0500 Subject: [PATCH 25/85] config-mgmt: T5976: add option for commit-confirm to use 'soft' rollback Commit-confirm will restore a previous configuration if a confirmation is not received in N minutes. Traditionally, this was restored by a reboot into the last configuration on disk; add a configurable option to reload the last completed commit without a reboot. The default setting is to reboot. --- .../system_config-management.xml.in | 20 +++++ python/vyos/config_mgmt.py | 83 ++++++++++++++++--- python/vyos/configsession.py | 9 ++ src/conf_mode/system_config-management.py | 6 +- src/helpers/commit-confirm-notify.py | 42 +++++++--- 5 files changed, 137 insertions(+), 23 deletions(-) diff --git a/interface-definitions/system_config-management.xml.in b/interface-definitions/system_config-management.xml.in index e666633b78f..b8fb6cdb5e3 100644 --- a/interface-definitions/system_config-management.xml.in +++ b/interface-definitions/system_config-management.xml.in @@ -67,6 +67,26 @@ Number of revisions must be between 0 and 65535 + + + Commit confirm rollback type if no confirmation + + reload reboot + + + reload + Reload previous configuration if not confirmed + + + reboot + Reboot to saved configuration if not confirmed + + + (reload|reboot) + + + reboot + diff --git a/python/vyos/config_mgmt.py b/python/vyos/config_mgmt.py index 920a19fec35..851ac21341f 100644 --- a/python/vyos/config_mgmt.py +++ b/python/vyos/config_mgmt.py @@ -33,6 +33,8 @@ from vyos.config import Config from vyos.configtree import ConfigTree from vyos.configtree import ConfigTreeError +from vyos.configsession import ConfigSession +from vyos.configsession import ConfigSessionError from vyos.configtree import show_diff from vyos.load_config import load from vyos.load_config import LoadConfigError @@ -139,13 +141,19 @@ def __init__(self, session_env=None, config=None): config = Config() d = config.get_config_dict( - ['system', 'config-management'], key_mangling=('-', '_'), get_first_key=True + ['system', 'config-management'], + key_mangling=('-', '_'), + get_first_key=True, + with_defaults=True, ) self.max_revisions = int(d.get('commit_revisions', 0)) self.num_revisions = 0 self.locations = d.get('commit_archive', {}).get('location', []) self.source_address = d.get('commit_archive', {}).get('source_address', '') + self.reboot_unconfirmed = bool(d.get('commit_confirm') == 'reboot') + self.config_dict = d + if config.exists(['system', 'host-name']): self.hostname = config.return_value(['system', 'host-name']) if config.exists(['system', 'domain-name']): @@ -175,42 +183,63 @@ def __init__(self, session_env=None, config=None): def commit_confirm( self, minutes: int = DEFAULT_TIME_MINUTES, no_prompt: bool = False ) -> Tuple[str, int]: - """Commit with reboot to saved config in 'minutes' minutes if + """Commit with reload/reboot to saved config in 'minutes' minutes if 'confirm' call is not issued. """ if is_systemd_service_active(f'{timer_name}.timer'): msg = 'Another confirm is pending' return msg, 1 - if unsaved_commits(): + if self.reboot_unconfirmed and unsaved_commits(): W = '\nYou should save previous commits before commit-confirm !\n' else: W = '' - prompt_str = f""" + if self.reboot_unconfirmed: + prompt_str = f""" commit-confirm will automatically reboot in {minutes} minutes unless changes -are confirmed.\n +are confirmed. +Proceed ?""" + else: + prompt_str = f""" +commit-confirm will automatically reload previous config in {minutes} minutes +unless changes are confirmed. Proceed ?""" + prompt_str = W + prompt_str if not no_prompt and not ask_yes_no(prompt_str, default=True): msg = 'commit-confirm canceled' return msg, 1 - action = 'sg vyattacfg "/usr/bin/config-mgmt revert"' + if self.reboot_unconfirmed: + action = 'sg vyattacfg "/usr/bin/config-mgmt revert"' + else: + action = 'sg vyattacfg "/usr/bin/config-mgmt revert_soft"' + cmd = f'sudo systemd-run --quiet --on-active={minutes}m --unit={timer_name} {action}' rc, out = rc_cmd(cmd) if rc != 0: raise ConfigMgmtError(out) # start notify - cmd = f'sudo -b /usr/libexec/vyos/commit-confirm-notify.py {minutes}' + if self.reboot_unconfirmed: + cmd = ( + f'sudo -b /usr/libexec/vyos/commit-confirm-notify.py --reboot {minutes}' + ) + else: + cmd = f'sudo -b /usr/libexec/vyos/commit-confirm-notify.py {minutes}' + os.system(cmd) - msg = f'Initialized commit-confirm; {minutes} minutes to confirm before reboot' + if self.reboot_unconfirmed: + msg = f'Initialized commit-confirm; {minutes} minutes to confirm before reboot' + else: + msg = f'Initialized commit-confirm; {minutes} minutes to confirm before reload' + return msg, 0 def confirm(self) -> Tuple[str, int]: - """Do not reboot to saved config following 'commit-confirm'. + """Do not reboot/reload to saved/completed config following 'commit-confirm'. Update commit log and archive. """ if not is_systemd_service_active(f'{timer_name}.timer'): @@ -234,7 +263,11 @@ def confirm(self) -> Tuple[str, int]: self._add_log_entry(**entry) self._update_archive() - msg = 'Reboot timer stopped' + if self.reboot_unconfirmed: + msg = 'Reboot timer stopped' + else: + msg = 'Reload timer stopped' + return msg, 0 def revert(self) -> Tuple[str, int]: @@ -248,6 +281,28 @@ def revert(self) -> Tuple[str, int]: return '', 0 + def revert_soft(self) -> Tuple[str, int]: + """Reload last revision, dropping commits from 'commit-confirm'.""" + _ = self._read_tmp_log_entry() + + # commits under commit-confirm are not added to revision list unless + # confirmed, hence a soft revert is to revision 0 + revert_ct = self._get_config_tree_revision(0) + + mask = os.umask(0o002) + session = ConfigSession(os.getpid(), app='config-mgmt') + + try: + session.load_explicit(revert_ct) + session.commit() + except ConfigSessionError as e: + raise ConfigMgmtError(e) from e + finally: + os.umask(mask) + del session + + return '', 0 + def rollback(self, rev: int, no_prompt: bool = False) -> Tuple[str, int]: """Reboot to config revision 'rev'.""" msg = '' @@ -684,7 +739,10 @@ def _read_tmp_log_entry(self) -> dict: entry = f.read() os.unlink(tmp_log_entry) except OSError as e: - logger.critical(f'error on file {tmp_log_entry}: {e}') + logger.info(f'error on file {tmp_log_entry}: {e}') + # fail gracefully in corner case: + # delete commit-revisions; commit-confirm + return {} return self._get_log_entry(entry) @@ -752,7 +810,8 @@ def run(): ) subparsers.add_parser('confirm', help='Confirm commit') - subparsers.add_parser('revert', help='Revert commit-confirm') + subparsers.add_parser('revert', help='Revert commit-confirm with reboot') + subparsers.add_parser('revert_soft', help='Revert commit-confirm with reload') rollback = subparsers.add_parser('rollback', help='Rollback to earlier config') rollback.add_argument('--rev', type=int, help='Revision number for rollback') diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py index c0d3c7ecb27..9c56d246a5c 100644 --- a/python/vyos/configsession.py +++ b/python/vyos/configsession.py @@ -268,6 +268,15 @@ def load_config(self, file_path): out = self.__run_command(LOAD_CONFIG + [file_path]) return out + def load_explicit(self, file_path): + from vyos.load_config import load + from vyos.load_config import LoadConfigError + + try: + load(file_path, switch='explicit') + except LoadConfigError as e: + raise ConfigSessionError(e) from e + def migrate_and_load_config(self, file_path): out = self.__run_command(MIGRATE_LOAD_CONFIG + [file_path]) return out diff --git a/src/conf_mode/system_config-management.py b/src/conf_mode/system_config-management.py index 99f25bef6d7..a1ee136cd95 100755 --- a/src/conf_mode/system_config-management.py +++ b/src/conf_mode/system_config-management.py @@ -38,7 +38,11 @@ def get_config(config=None): return mgmt -def verify(_mgmt): +def verify(mgmt): + d = mgmt.config_dict + if d.get('commit_confirm', '') == 'reload' and 'commit_revisions' not in d: + raise ConfigError('commit-confirm reload requires non-zero commit-revisions') + return diff --git a/src/helpers/commit-confirm-notify.py b/src/helpers/commit-confirm-notify.py index 69dda511232..af61676510f 100755 --- a/src/helpers/commit-confirm-notify.py +++ b/src/helpers/commit-confirm-notify.py @@ -2,34 +2,56 @@ import os import sys import time +from argparse import ArgumentParser # Minutes before reboot to trigger notification. intervals = [1, 5, 15, 60] +parser = ArgumentParser() +parser.add_argument( + 'minutes', type=int, help='minutes before rollback to trigger notification' +) +parser.add_argument( + '--reboot', action='store_true', help="use 'soft' rollback instead of reboot" +) -def notify(interval): + +def notify(interval, reboot=False): s = '' if interval == 1 else 's' time.sleep((minutes - interval) * 60) - message = ( - '"[commit-confirm] System is going to reboot in ' - f'{interval} minute{s} to rollback the last commit.\n' - 'Confirm your changes to cancel the reboot."' - ) - os.system('wall -n ' + message) + if reboot: + message = ( + '"[commit-confirm] System will reboot in ' + f'{interval} minute{s}\nto rollback the last commit.\n' + 'Confirm your changes to cancel the reboot."' + ) + os.system('wall -n ' + message) + else: + message = ( + '"[commit-confirm] System will reload previous config in ' + f'{interval} minute{s}\nto rollback the last commit.\n' + 'Confirm your changes to cancel the reload."' + ) + os.system('wall -n ' + message) if __name__ == '__main__': # Must be run as root to call wall(1) without a banner. - if len(sys.argv) != 2 or os.getuid() != 0: + if os.getuid() != 0: print('This script requires superuser privileges.', file=sys.stderr) exit(1) - minutes = int(sys.argv[1]) + + args = parser.parse_args() + + minutes = args.minutes + reboot = args.reboot + # Drop the argument from the list so that the notification # doesn't kick in immediately. if minutes in intervals: intervals.remove(minutes) for interval in sorted(intervals, reverse=True): if minutes >= interval: - notify(interval) + notify(interval, reboot=reboot) minutes -= minutes - interval exit(0) From 810d9a819ee378460a05ed6c7e064fb105058fc2 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sat, 5 Oct 2024 10:03:49 +0200 Subject: [PATCH 26/85] smoketest: T4576: add guard timeout for systemd in log level tests Systemd comes with a default of 5 restarts in 10 seconds policy, this limit can be hit by this reastart sequence, slow down a bit. --- smoketest/scripts/cli/base_accel_ppp_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/smoketest/scripts/cli/base_accel_ppp_test.py b/smoketest/scripts/cli/base_accel_ppp_test.py index c6f6cb80458..750702e98d6 100644 --- a/smoketest/scripts/cli/base_accel_ppp_test.py +++ b/smoketest/scripts/cli/base_accel_ppp_test.py @@ -14,6 +14,7 @@ import re +from time import sleep from base_vyostest_shim import VyOSUnitTestSHIM from configparser import ConfigParser @@ -641,6 +642,11 @@ def test_accel_log_level(self): for log_level in range(0, 5): self.set(['log', 'level', str(log_level)]) self.cli_commit() + + # Systemd comes with a default of 5 restarts in 10 seconds policy, + # this limit can be hit by this reastart sequence, slow down a bit + sleep(5) + # Validate configuration values conf = ConfigParser(allow_no_value=True) conf.read(self._config_file) From 1601ede1ecc1fdfae2170fa1fdb3f9715eade6a7 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sat, 5 Oct 2024 10:17:09 +0200 Subject: [PATCH 27/85] Debian: T973: add missing dependency on node-exporter package Extend commit a0c15a159 ("T973: add basic node_exporter implementation") by adding the required dependency to install node-exporter binary. --- debian/control | 3 +++ 1 file changed, 3 insertions(+) diff --git a/debian/control b/debian/control index f8cfb876c88..15fb5d72e48 100644 --- a/debian/control +++ b/debian/control @@ -235,6 +235,9 @@ Depends: squidclient, squidguard, # End "service webproxy" +# For "service monitoring node-exporter" + node-exporter, +# End "service monitoring node-exporter" # For "service monitoring telegraf" telegraf (>= 1.20), # End "service monitoring telegraf" From 490ee3ec5ba7ea28002890841eab8e46f775a129 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sat, 5 Oct 2024 20:34:43 +0200 Subject: [PATCH 28/85] static: T4283: fix missing f'ormat string This fixes the error message: Can not use both blackhole and reject for prefix "{prefix}"! Added in commit bb78f3a9ad28 ("static: T4283: support "reject" routes - emit an ICMP unreachable when matched") --- src/conf_mode/protocols_static.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index a2373218ae8..430cc69d4f8 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -88,7 +88,7 @@ def verify(static): if {'blackhole', 'reject'} <= set(prefix_options): raise ConfigError(f'Can not use both blackhole and reject for '\ - 'prefix "{prefix}"!') + f'prefix "{prefix}"!') return None From 17c9b444ecc7883f1d8af01fefc8d00f6f1ef49b Mon Sep 17 00:00:00 2001 From: "Nataliia S." <81954790+natali-rs1985@users.noreply.github.com> Date: Sat, 5 Oct 2024 22:48:43 +0300 Subject: [PATCH 29/85] op-mode: T6753: Fix json output for mtr / monitor traceroute (#4122) --- src/op_mode/mtr.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/op_mode/mtr.py b/src/op_mode/mtr.py index de139f2fa7b..baf9672a1ef 100644 --- a/src/op_mode/mtr.py +++ b/src/op_mode/mtr.py @@ -178,6 +178,7 @@ 6: '/bin/mtr -6', } + class List(list): def first(self): return self.pop(0) if self else '' @@ -218,12 +219,15 @@ def complete(prefix): def convert(command, args): + to_json = False while args: shortname = args.first() longnames = complete(shortname) if len(longnames) != 1: expension_failure(shortname, longnames) longname = longnames[0] + if longname == 'json': + to_json = True if options[longname]['type'] == 'noarg': command = options[longname]['mtr'].format( command=command, value='') @@ -232,7 +236,7 @@ def convert(command, args): else: command = options[longname]['mtr'].format( command=command, value=args.first()) - return command + return command, to_json if __name__ == '__main__': @@ -242,7 +246,6 @@ def convert(command, args): if not host: sys.exit("mtr: Missing host") - if host == '--get-options' or host == '--get-options-nested': if host == '--get-options-nested': args.first() # pop monitor @@ -302,5 +305,8 @@ def convert(command, args): except ValueError: sys.exit(f'mtr: Unknown host: {host}') - command = convert(mtr[version], args) - call(f'{command} --curses --displaymode 0 {host}') + command, to_json = convert(mtr[version], args) + if to_json: + call(f'{command} {host}') + else: + call(f'{command} --curses --displaymode 0 {host}') From 1c83b39f30880b7e5297db3fffc3afd2cd699f55 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sun, 6 Oct 2024 15:47:48 +0200 Subject: [PATCH 30/85] T6759: add support for italian keymap set system option keyboard-layout it --- interface-definitions/system_option.xml.in | 8 ++++++-- smoketest/scripts/cli/test_system_option.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/interface-definitions/system_option.xml.in b/interface-definitions/system_option.xml.in index dc9958ff5a6..064d9ff4029 100644 --- a/interface-definitions/system_option.xml.in +++ b/interface-definitions/system_option.xml.in @@ -88,7 +88,7 @@ System keyboard layout, type ISO2 - us uk fr de es fi jp106 no dk se-latin1 dvorak + us uk fr de es fi it jp106 no dk se-latin1 dvorak us @@ -114,6 +114,10 @@ fi Finland + + it + Italy + jp106 Japan @@ -135,7 +139,7 @@ Dvorak - (us|uk|fr|de|es|fi|jp106|no|dk|se-latin1|dvorak) + (us|uk|fr|de|es|fi|it|jp106|no|dk|se-latin1|dvorak) Invalid keyboard layout diff --git a/smoketest/scripts/cli/test_system_option.py b/smoketest/scripts/cli/test_system_option.py index ffb1d76aecc..ed028062866 100755 --- a/smoketest/scripts/cli/test_system_option.py +++ b/smoketest/scripts/cli/test_system_option.py @@ -96,4 +96,4 @@ def test_ssh_client_options(self): if __name__ == '__main__': - unittest.main(verbosity=2, failfast=True) + unittest.main(verbosity=2) From 72a132e46ad186dc1447be58b1b1daf65a01ee8c Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sun, 6 Oct 2024 16:15:12 +0200 Subject: [PATCH 31/85] GitHub: T6494: add parallel step to run interface based smoketests --- .github/workflows/package-smoketest.yml | 41 ++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/.github/workflows/package-smoketest.yml b/.github/workflows/package-smoketest.yml index 3595bcf0ebd..91c968c8224 100644 --- a/.github/workflows/package-smoketest.yml +++ b/.github/workflows/package-smoketest.yml @@ -83,12 +83,43 @@ jobs: with: name: vyos-${{ needs.build_iso.outputs.build_version }} path: build - - name: VyOS CLI smoketests + - name: VyOS CLI smoketests (no interfaces) id: test shell: bash run: | set -e - sudo make test + sudo make test-no-interfaces + if [[ $? == 0 ]]; then + echo "exit_code=success" >> $GITHUB_OUTPUT + else + echo "exit_code=fail" >> $GITHUB_OUTPUT + fi + + test_interfaces_cli: + needs: build_iso + runs-on: ubuntu-24.04 + timeout-minutes: 180 + container: + image: vyos/vyos-build:current + options: --sysctl net.ipv6.conf.lo.disable_ipv6=0 --privileged + outputs: + exit_code: ${{ steps.test.outputs.exit_code }} + steps: + # We need the test script from vyos-build repo + - name: Clone vyos-build source code + uses: actions/checkout@v4 + with: + repository: vyos/vyos-build + - uses: actions/download-artifact@v4 + with: + name: vyos-${{ needs.build_iso.outputs.build_version }} + path: build + - name: VyOS CLI smoketests (interfaces only) + id: test + shell: bash + run: | + set -e + sudo make test-interfaces if [[ $? == 0 ]]; then echo "exit_code=success" >> $GITHUB_OUTPUT else @@ -191,6 +222,7 @@ jobs: result: needs: - test_smoketest_cli + - test_interfaces_cli - test_config_load - test_raid1_install - test_encrypted_config_tpm @@ -203,13 +235,14 @@ jobs: uses: mshick/add-pr-comment@v2 with: message: | - CI integration ${{ needs.test_smoketest_cli.outputs.exit_code == 'success' && needs.test_config_load.outputs.exit_code == 'success' && needs.test_raid1_install.outputs.exit_code == 'success' && '👍 passed!' || '❌ failed!' }} + CI integration ${{ needs.test_smoketest_cli.outputs.exit_code == 'success' && needs.test_interfaces_cli.outputs.exit_code == 'success' && needs.test_config_load.outputs.exit_code == 'success' && needs.test_raid1_install.outputs.exit_code == 'success' && '👍 passed!' || '❌ failed!' }} ### Details [CI logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) - * CLI Smoketests ${{ needs.test_smoketest_cli.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} + * CLI Smoketests (no interfaces) ${{ needs.test_smoketest_cli.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} + * CLI Smoketests (interfaces only) ${{ needs.test_interfaces_cli.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} * Config tests ${{ needs.test_config_load.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} * RAID1 tests ${{ needs.test_raid1_install.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} * TPM tests ${{ needs.test_encrypted_config_tpm.outputs.exit_code == 'success' && '👍 passed' || '❌ failed' }} From 875764b07f937fc599e2e62c667e7b811ddc2ed3 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sun, 6 Oct 2024 20:13:56 +0200 Subject: [PATCH 32/85] pki: T6481: auto import ACME certificate chain into CLI When using an ACME based certificate with VyOS we provide the necessary PEM files opaque in the background when using the internal tools. This however will not properly work with the CA chain portion, as the system is based on the "pki certificate acme" CLI node of a certificate but CA chains reside under "pki ca". This adds support for importing the PEM data of a CA chain issued via ACME into the "pki ca AUTOCHAIN_ certificate" subsystem so it can be queried by other daemons. Importing the chain only happens, when the chain was not already added manually by the user. ACME certificate chains that are automatically added to the CLI are all prefixed using AUTOCHAIN_certname so they can be consumed by any daemon. This also adds a safeguard when the intermediate CA changes, the referenced name on the CLI stays consitent for any pending daemon updates. --- src/conf_mode/pki.py | 33 ++++++++++++++++++++++++++++++++- src/op_mode/pki.py | 17 +++++++++++++---- 2 files changed, 45 insertions(+), 5 deletions(-) diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py index 215b22b37f0..233d73ba897 100755 --- a/src/conf_mode/pki.py +++ b/src/conf_mode/pki.py @@ -27,6 +27,7 @@ from vyos.configdiff import Diff from vyos.configdiff import get_config_diff from vyos.defaults import directories +from vyos.pki import encode_certificate from vyos.pki import is_ca_certificate from vyos.pki import load_certificate from vyos.pki import load_public_key @@ -36,9 +37,11 @@ from vyos.pki import load_crl from vyos.pki import load_dh_parameters from vyos.utils.boot import boot_configuration_complete +from vyos.utils.configfs import add_cli_node from vyos.utils.dict import dict_search from vyos.utils.dict import dict_search_args from vyos.utils.dict import dict_search_recursive +from vyos.utils.file import read_file from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import is_systemd_service_active @@ -446,9 +449,37 @@ def generate(pki): # Get foldernames under vyos_certbot_dir which each represent a certbot cert if os.path.exists(f'{vyos_certbot_dir}/live'): for cert in certbot_list_on_disk: + # ACME certificate is no longer in use by CLI remove it if cert not in certbot_list: - # certificate is no longer active on the CLI - remove it certbot_delete(cert) + continue + # ACME not enabled for individual certificate - bail out early + if 'acme' not in pki['certificate'][cert]: + continue + + # Read in ACME certificate chain information + tmp = read_file(f'{vyos_certbot_dir}/live/{cert}/chain.pem') + tmp = load_certificate(tmp, wrap_tags=False) + cert_chain_base64 = "".join(encode_certificate(tmp).strip().split("\n")[1:-1]) + + # Check if CA chain certificate is already present on CLI to avoid adding + # a duplicate. This only checks for manual added CA certificates and not + # auto added ones with the AUTOCHAIN_ prefix + autochain_prefix = 'AUTOCHAIN_' + ca_cert_present = False + if 'ca' in pki: + for ca_base64, cli_path in dict_search_recursive(pki['ca'], 'certificate'): + # Ignore automatic added CA certificates + if any(item.startswith(autochain_prefix) for item in cli_path): + continue + if cert_chain_base64 == ca_base64: + ca_cert_present = True + + if not ca_cert_present: + tmp = dict_search_args(pki, 'ca', f'{autochain_prefix}{cert}', 'certificate') + if not bool(tmp) or tmp != cert_chain_base64: + print(f'Adding/replacing automatically imported CA certificate for "{cert}" ...') + add_cli_node(['pki', 'ca', f'{autochain_prefix}{cert}', 'certificate'], value=cert_chain_base64) return None diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py index ab613e5c45d..5652a5d7462 100755 --- a/src/op_mode/pki.py +++ b/src/op_mode/pki.py @@ -26,13 +26,22 @@ from vyos.config import Config from vyos.config import config_dict_mangle_acme -from vyos.pki import encode_certificate, encode_public_key, encode_private_key, encode_dh_parameters +from vyos.pki import encode_certificate +from vyos.pki import encode_public_key +from vyos.pki import encode_private_key +from vyos.pki import encode_dh_parameters from vyos.pki import get_certificate_fingerprint -from vyos.pki import create_certificate, create_certificate_request, create_certificate_revocation_list +from vyos.pki import create_certificate +from vyos.pki import create_certificate_request +from vyos.pki import create_certificate_revocation_list from vyos.pki import create_private_key from vyos.pki import create_dh_parameters -from vyos.pki import load_certificate, load_certificate_request, load_private_key -from vyos.pki import load_crl, load_dh_parameters, load_public_key +from vyos.pki import load_certificate +from vyos.pki import load_certificate_request +from vyos.pki import load_private_key +from vyos.pki import load_crl +from vyos.pki import load_dh_parameters +from vyos.pki import load_public_key from vyos.pki import verify_certificate from vyos.utils.io import ask_input from vyos.utils.io import ask_yes_no From 051cb6fbe2a9e4b8008bf21cec33eb2b8385305d Mon Sep 17 00:00:00 2001 From: Andrew Topp Date: Wed, 4 Sep 2024 04:09:46 +1000 Subject: [PATCH 33/85] pbr: T6430: Local IP rules routing into VRFs by name * This is the `policy local-route*` part of T6430, manipulating ip rules, another PR covers firewall-backed `policy route*` for similar functionality * Local PBR (policy local-route*) can only target table IDs up to 200 and the previous PR to extend the range was rejected * PBR with this PR can now also target VRFs directly by name, working around targeting problems for VRF table IDs outside the overlapping 100-200 range * Validation ensures rules can't target both a table ID and a VRF name (internally they are handled the same) * Relocated TestPolicyRoute.verify_rules() into VyOSUnitTestSHIM.TestCase, extended to allow lookups in other address families (IPv6 in the new tests). verify_rules() is used by original pbr and new lpbr smoketests in this PR. --- .../policy_local-route.xml.in | 36 ++++ smoketest/scripts/cli/base_vyostest_shim.py | 12 ++ .../scripts/cli/test_policy_local-route.py | 171 ++++++++++++++++++ smoketest/scripts/cli/test_policy_route.py | 15 +- src/conf_mode/policy_local-route.py | 45 ++++- 5 files changed, 257 insertions(+), 22 deletions(-) create mode 100644 smoketest/scripts/cli/test_policy_local-route.py diff --git a/interface-definitions/policy_local-route.xml.in b/interface-definitions/policy_local-route.xml.in index 7a019154acf..5235a668af1 100644 --- a/interface-definitions/policy_local-route.xml.in +++ b/interface-definitions/policy_local-route.xml.in @@ -39,6 +39,24 @@ + + + VRF to forward packet with + + txt + VRF instance name + + + default + Forward into default global VRF + + + default + vrf name + + #include + + @@ -113,6 +131,24 @@ + + + VRF to forward packet with + + txt + VRF instance name + + + default + Forward into default global VRF + + + default + vrf name + + #include + + diff --git a/smoketest/scripts/cli/base_vyostest_shim.py b/smoketest/scripts/cli/base_vyostest_shim.py index 940306ac3dd..a383e596cb2 100644 --- a/smoketest/scripts/cli/base_vyostest_shim.py +++ b/smoketest/scripts/cli/base_vyostest_shim.py @@ -147,6 +147,18 @@ def verify_nftables_chain(self, nftables_search, table, chain, inverse=False, ar break self.assertTrue(not matched if inverse else matched, msg=search) + # Verify ip rule output + def verify_rules(self, rules_search, inverse=False, addr_family='inet'): + rule_output = cmd(f'ip -family {addr_family} rule show') + + for search in rules_search: + matched = False + for line in rule_output.split("\n"): + if all(item in line for item in search): + matched = True + break + self.assertTrue(not matched if inverse else matched, msg=search) + # standard construction; typing suggestion: https://stackoverflow.com/a/70292317 def ignore_warning(warning: Type[Warning]): import warnings diff --git a/smoketest/scripts/cli/test_policy_local-route.py b/smoketest/scripts/cli/test_policy_local-route.py new file mode 100644 index 00000000000..8d6ba40dcb6 --- /dev/null +++ b/smoketest/scripts/cli/test_policy_local-route.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest + +from base_vyostest_shim import VyOSUnitTestSHIM + +interface = 'eth0' +mark = '100' +table_id = '101' +extra_table_id = '102' +vrf_name = 'LPBRVRF' +vrf_rt_id = '202' + +class TestPolicyLocalRoute(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + super(TestPolicyLocalRoute, cls).setUpClass() + # Clear out current configuration to allow running this test on a live system + cls.cli_delete(cls, ['policy', 'local-route']) + cls.cli_delete(cls, ['policy', 'local-route6']) + + cls.cli_set(cls, ['vrf', 'name', vrf_name, 'table', vrf_rt_id]) + + @classmethod + def tearDownClass(cls): + cls.cli_delete(cls, ['vrf', 'name', vrf_name]) + + super(TestPolicyLocalRoute, cls).tearDownClass() + + def tearDown(self): + self.cli_delete(['policy', 'local-route']) + self.cli_delete(['policy', 'local-route6']) + self.cli_commit() + + ip_rule_search = [ + [f'lookup {table_id}'] + ] + + self.verify_rules(ip_rule_search, inverse=True) + self.verify_rules(ip_rule_search, inverse=True, addr_family='inet6') + + def test_local_pbr_matching_criteria(self): + self.cli_set(['policy', 'local-route', 'rule', '4', 'inbound-interface', interface]) + self.cli_set(['policy', 'local-route', 'rule', '4', 'protocol', 'udp']) + self.cli_set(['policy', 'local-route', 'rule', '4', 'fwmark', mark]) + self.cli_set(['policy', 'local-route', 'rule', '4', 'destination', 'address', '198.51.100.0/24']) + self.cli_set(['policy', 'local-route', 'rule', '4', 'destination', 'port', '111']) + self.cli_set(['policy', 'local-route', 'rule', '4', 'source', 'address', '198.51.100.1']) + self.cli_set(['policy', 'local-route', 'rule', '4', 'source', 'port', '443']) + self.cli_set(['policy', 'local-route', 'rule', '4', 'set', 'table', table_id]) + + self.cli_set(['policy', 'local-route6', 'rule', '6', 'inbound-interface', interface]) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'protocol', 'tcp']) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'fwmark', mark]) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'destination', 'address', '2001:db8::/64']) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'destination', 'port', '123']) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'source', 'address', '2001:db8::1']) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'source', 'port', '80']) + self.cli_set(['policy', 'local-route6', 'rule', '6', 'set', 'table', table_id]) + + self.cli_commit() + + rule_lookup = f'lookup {table_id}' + rule_fwmark = 'fwmark ' + hex(int(mark)) + rule_interface = f'iif {interface}' + + ip4_rule_search = [ + ['from 198.51.100.1', 'to 198.51.100.0/24', rule_fwmark, rule_interface, 'ipproto udp', 'sport 443', 'dport 111', rule_lookup] + ] + + self.verify_rules(ip4_rule_search) + + ip6_rule_search = [ + ['from 2001:db8::1', 'to 2001:db8::/64', rule_fwmark, rule_interface, 'ipproto tcp', 'sport 80', 'dport 123', rule_lookup] + ] + + self.verify_rules(ip6_rule_search, addr_family='inet6') + + def test_local_pbr_rule_removal(self): + self.cli_set(['policy', 'local-route', 'rule', '1', 'destination', 'address', '198.51.100.1']) + self.cli_set(['policy', 'local-route', 'rule', '1', 'set', 'table', table_id]) + + self.cli_set(['policy', 'local-route', 'rule', '2', 'destination', 'address', '198.51.100.2']) + self.cli_set(['policy', 'local-route', 'rule', '2', 'set', 'table', table_id]) + + self.cli_set(['policy', 'local-route', 'rule', '3', 'destination', 'address', '198.51.100.3']) + self.cli_set(['policy', 'local-route', 'rule', '3', 'set', 'table', table_id]) + + self.cli_commit() + + rule_lookup = f'lookup {table_id}' + + ip_rule_search = [ + ['to 198.51.100.1', rule_lookup], + ['to 198.51.100.2', rule_lookup], + ['to 198.51.100.3', rule_lookup], + ] + + self.verify_rules(ip_rule_search) + + self.cli_delete(['policy', 'local-route', 'rule', '2']) + self.cli_commit() + + ip_rule_missing = [ + ['to 198.51.100.2', rule_lookup], + ] + + self.verify_rules(ip_rule_missing, inverse=True) + + def test_local_pbr_rule_changes(self): + self.cli_set(['policy', 'local-route', 'rule', '1', 'destination', 'address', '198.51.100.0/24']) + self.cli_set(['policy', 'local-route', 'rule', '1', 'set', 'table', table_id]) + + self.cli_commit() + + self.cli_set(['policy', 'local-route', 'rule', '1', 'set', 'table', extra_table_id]) + self.cli_commit() + + ip_rule_search_extra = [ + ['to 198.51.100.0/24', f'lookup {extra_table_id}'] + ] + + self.verify_rules(ip_rule_search_extra) + + ip_rule_search_orig = [ + ['to 198.51.100.0/24', f'lookup {table_id}'] + ] + + self.verify_rules(ip_rule_search_orig, inverse=True) + + self.cli_delete(['policy', 'local-route', 'rule', '1', 'set', 'table']) + self.cli_set(['policy', 'local-route', 'rule', '1', 'set', 'vrf', vrf_name]) + + self.cli_commit() + + ip_rule_search_vrf = [ + ['to 198.51.100.0/24', f'lookup {vrf_name}'] + ] + + self.verify_rules(ip_rule_search_extra, inverse=True) + self.verify_rules(ip_rule_search_vrf) + + def test_local_pbr_target_vrf(self): + self.cli_set(['policy', 'local-route', 'rule', '1', 'destination', 'address', '198.51.100.0/24']) + self.cli_set(['policy', 'local-route', 'rule', '1', 'set', 'vrf', vrf_name]) + + self.cli_commit() + + ip_rule_search = [ + ['to 198.51.100.0/24', f'lookup {vrf_name}'] + ] + + self.verify_rules(ip_rule_search) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_policy_route.py b/smoketest/scripts/cli/test_policy_route.py index 797ab97704c..672865eb005 100755 --- a/smoketest/scripts/cli/test_policy_route.py +++ b/smoketest/scripts/cli/test_policy_route.py @@ -18,8 +18,6 @@ from base_vyostest_shim import VyOSUnitTestSHIM -from vyos.utils.process import cmd - mark = '100' conn_mark = '555' conn_mark_set = '111' @@ -41,7 +39,7 @@ def setUpClass(cls): cls.cli_set(cls, ['interfaces', 'ethernet', interface, 'address', interface_ip]) cls.cli_set(cls, ['protocols', 'static', 'table', table_id, 'route', '0.0.0.0/0', 'interface', interface]) - + cls.cli_set(cls, ['vrf', 'name', vrf, 'table', vrf_table_id]) @classmethod @@ -73,17 +71,6 @@ def tearDown(self): self.verify_rules(ip_rule_search, inverse=True) - def verify_rules(self, rules_search, inverse=False): - rule_output = cmd('ip rule show') - - for search in rules_search: - matched = False - for line in rule_output.split("\n"): - if all(item in line for item in search): - matched = True - break - self.assertTrue(not matched if inverse else matched, msg=search) - def test_pbr_group(self): self.cli_set(['firewall', 'group', 'network-group', 'smoketest_network', 'network', '172.16.99.0/24']) self.cli_set(['firewall', 'group', 'network-group', 'smoketest_network1', 'network', '172.16.101.0/24']) diff --git a/src/conf_mode/policy_local-route.py b/src/conf_mode/policy_local-route.py index 331fd972dd0..9be2bc227a8 100755 --- a/src/conf_mode/policy_local-route.py +++ b/src/conf_mode/policy_local-route.py @@ -54,6 +54,7 @@ def get_config(config=None): dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + vrf = leaf_node_changed(conf, base_rule + [rule, 'set', 'vrf']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) rule_def = {} if src: @@ -70,6 +71,8 @@ def get_config(config=None): rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def) if table: rule_def = dict_merge({'table' : table}, rule_def) + if vrf: + rule_def = dict_merge({'vrf' : vrf}, rule_def) if proto: rule_def = dict_merge({'protocol' : proto}, rule_def) dict = dict_merge({dict_id : {rule : rule_def}}, dict) @@ -90,6 +93,7 @@ def get_config(config=None): dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + vrf = leaf_node_changed(conf, base_rule + [rule, 'set', 'vrf']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) # keep track of changes in configuration # otherwise we might remove an existing node although nothing else has changed @@ -179,6 +183,15 @@ def get_config(config=None): if len(table) > 0: rule_def = dict_merge({'table' : table}, rule_def) + # vrf + if vrf is None: + if 'set' in rule_config and 'vrf' in rule_config['set']: + rule_def = dict_merge({'vrf': [rule_config['set']['vrf']]}, rule_def) + else: + changed = True + if len(vrf) > 0: + rule_def = dict_merge({'vrf' : vrf}, rule_def) + # protocol if proto is None: if 'protocol' in rule_config: @@ -218,8 +231,15 @@ def verify(pbr): ): raise ConfigError('Source or destination address or fwmark or inbound-interface or protocol is required!') - if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']: - raise ConfigError('Table set is required!') + if 'set' not in pbr_route['rule'][rule]: + raise ConfigError('Either set table or set vrf is required!') + + set_tgts = pbr_route['rule'][rule]['set'] + if 'table' not in set_tgts and 'vrf' not in set_tgts: + raise ConfigError('Either set table or set vrf is required!') + + if 'table' in set_tgts and 'vrf' in set_tgts: + raise ConfigError('set table and set vrf cannot both be set!') if 'inbound_interface' in pbr_route['rule'][rule]: interface = pbr_route['rule'][rule]['inbound_interface'] @@ -250,11 +270,14 @@ def apply(pbr): fwmark = rule_config.get('fwmark', ['']) inbound_interface = rule_config.get('inbound_interface', ['']) protocol = rule_config.get('protocol', ['']) - table = rule_config.get('table', ['']) + # VRF 'default' is actually table 'main' for RIB rules + vrf = [ 'main' if x == 'default' else x for x in rule_config.get('vrf', ['']) ] + # See generate section below for table/vrf overlap explanation + table_or_vrf = rule_config.get('table', vrf) - for src, dst, src_port, dst_port, fwmk, iif, proto, table in product( + for src, dst, src_port, dst_port, fwmk, iif, proto, table_or_vrf in product( source, destination, source_port, destination_port, - fwmark, inbound_interface, protocol, table): + fwmark, inbound_interface, protocol, table_or_vrf): f_src = '' if src == '' else f' from {src} ' f_src_port = '' if src_port == '' else f' sport {src_port} ' f_dst = '' if dst == '' else f' to {dst} ' @@ -262,7 +285,7 @@ def apply(pbr): f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} ' f_iif = '' if iif == '' else f' iif {iif} ' f_proto = '' if proto == '' else f' ipproto {proto} ' - f_table = '' if table == '' else f' lookup {table} ' + f_table = '' if table_or_vrf == '' else f' lookup {table_or_vrf} ' call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}') @@ -276,7 +299,13 @@ def apply(pbr): if 'rule' in pbr_route: for rule, rule_config in pbr_route['rule'].items(): - table = rule_config['set'].get('table', '') + # VRFs get configred as route table alias names for iproute2 and only + # one 'set' can get past validation. Either can be fed to lookup. + vrf = rule_config['set'].get('vrf', '') + if vrf == 'default': + table_or_vrf = 'main' + else: + table_or_vrf = rule_config['set'].get('table', vrf) source = rule_config.get('source', {}).get('address', ['all']) source_port = rule_config.get('source', {}).get('port', '') destination = rule_config.get('destination', {}).get('address', ['all']) @@ -295,7 +324,7 @@ def apply(pbr): f_iif = f' iif {inbound_interface} ' if inbound_interface else '' f_proto = f' ipproto {protocol} ' if protocol else '' - call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}') + call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table_or_vrf}') return None From 9c291d115d987cc635d1ef56898119c7d2bdfee6 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 7 Oct 2024 17:13:14 +0200 Subject: [PATCH 34/85] xml: T6430: add re-usable vrf CLI node for firewall and pbr --- ...t-packet-modifications-table-and-vrf.xml.i | 19 +--------- .../include/firewall/vrf.xml.i | 20 ++++++++++ .../policy_local-route.xml.in | 38 +------------------ 3 files changed, 23 insertions(+), 54 deletions(-) create mode 100644 interface-definitions/include/firewall/vrf.xml.i diff --git a/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i b/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i index c7875b31d02..5eb1984a563 100644 --- a/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i +++ b/interface-definitions/include/firewall/set-packet-modifications-table-and-vrf.xml.i @@ -25,24 +25,7 @@ - - - VRF to forward packet with - - txt - VRF instance name - - - default - Forward into default global VRF - - - default - vrf name - - #include - - + #include diff --git a/interface-definitions/include/firewall/vrf.xml.i b/interface-definitions/include/firewall/vrf.xml.i new file mode 100644 index 00000000000..af8ce3ab495 --- /dev/null +++ b/interface-definitions/include/firewall/vrf.xml.i @@ -0,0 +1,20 @@ + + + + VRF to forward packet with + + txt + VRF instance name + + + default + Forward into default global VRF + + + default + vrf name + + #include + + + diff --git a/interface-definitions/policy_local-route.xml.in b/interface-definitions/policy_local-route.xml.in index 5235a668af1..9f6588db86f 100644 --- a/interface-definitions/policy_local-route.xml.in +++ b/interface-definitions/policy_local-route.xml.in @@ -39,24 +39,7 @@ - - - VRF to forward packet with - - txt - VRF instance name - - - default - Forward into default global VRF - - - default - vrf name - - #include - - + #include @@ -131,24 +114,7 @@ - - - VRF to forward packet with - - txt - VRF instance name - - - default - Forward into default global VRF - - - default - vrf name - - #include - - + #include From 8013e228320a414087e3456c1632b479483b856c Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Thu, 3 Oct 2024 15:27:27 -0500 Subject: [PATCH 35/85] cli: T6740: set_tag on created paths and add parse step for ordering Signed-off-by: Daniil Baturin --- python/vyos/utils/config.py | 25 +++++++++++++++++++++++++ src/utils/vyos-commands-to-config | 7 +++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/python/vyos/utils/config.py b/python/vyos/utils/config.py index b02621e7b3b..deda13c1325 100644 --- a/python/vyos/utils/config.py +++ b/python/vyos/utils/config.py @@ -14,8 +14,14 @@ # License along with this library. If not, see . import os +from typing import TYPE_CHECKING + from vyos.defaults import directories +# https://peps.python.org/pep-0484/#forward-references +if TYPE_CHECKING: + from vyos.configtree import ConfigTree + config_file = os.path.join(directories['config'], 'config.boot') def read_saved_value(path: list): @@ -38,6 +44,25 @@ def read_saved_value(path: list): return ' '.join(res) return res +def flag(l: list) -> list: + res = [l[0:i] for i,_ in enumerate(l, start=1)] + return res + +def tag_node_of_path(p: list) -> list: + from vyos.xml_ref import is_tag + + fl = flag(p) + res = list(map(is_tag, fl)) + + return res + +def set_tags(ct: 'ConfigTree', path: list) -> None: + fl = flag(path) + if_tag = tag_node_of_path(path) + for condition, target in zip(if_tag, fl): + if condition: + ct.set_tag(target) + def parse_commands(cmds: str) -> dict: from re import split as re_split from shlex import split as shlex_split diff --git a/src/utils/vyos-commands-to-config b/src/utils/vyos-commands-to-config index 85613b49d26..927d9bd7068 100755 --- a/src/utils/vyos-commands-to-config +++ b/src/utils/vyos-commands-to-config @@ -20,7 +20,7 @@ import json from vyos.configtree import ConfigTree from vyos.utils.config import parse_commands - +from vyos.utils.config import set_tags def commands_to_config(cmds): ct = ConfigTree('') @@ -31,8 +31,10 @@ def commands_to_config(cmds): if c["is_leaf"]: replace = False if c["is_multi"] else True ct.set(c["path"], value=c["value"], replace=replace) + set_tags(ct, c["path"]) else: ct.create_node(c["path"]) + set_tags(ct, c["path"]) else: raise ValueError( f"\"{c['op']}\" is not a supported config operation") @@ -44,7 +46,8 @@ if __name__ == '__main__': try: cmds = sys.stdin.read() ct = commands_to_config(cmds) - print(str(ct)) + out = ConfigTree(ct.to_string()) + print(str(out)) except Exception as e: print(e) sys.exit(1) From 64196ec5fd90a8caedef88edb9068a32e4e6abfa Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Mon, 7 Oct 2024 12:15:44 -0500 Subject: [PATCH 36/85] config-mgmt: T5976: move commit-confirm revert action to subnode --- .../system_config-management.xml.in | 43 +++++++++++-------- src/conf_mode/system_config-management.py | 3 +- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/interface-definitions/system_config-management.xml.in b/interface-definitions/system_config-management.xml.in index b8fb6cdb5e3..a23d44aeab1 100644 --- a/interface-definitions/system_config-management.xml.in +++ b/interface-definitions/system_config-management.xml.in @@ -67,26 +67,33 @@ Number of revisions must be between 0 and 65535 - + - Commit confirm rollback type if no confirmation - - reload reboot - - - reload - Reload previous configuration if not confirmed - - - reboot - Reboot to saved configuration if not confirmed - - - (reload|reboot) - + Commit confirm options - reboot - + + + + Commit confirm revert action + + reload reboot + + + reload + Reload previous configuration if not confirmed + + + reboot + Reboot to saved configuration if not confirmed + + + (reload|reboot) + + + reboot + + + diff --git a/src/conf_mode/system_config-management.py b/src/conf_mode/system_config-management.py index a1ee136cd95..8de4e534299 100755 --- a/src/conf_mode/system_config-management.py +++ b/src/conf_mode/system_config-management.py @@ -40,7 +40,8 @@ def get_config(config=None): def verify(mgmt): d = mgmt.config_dict - if d.get('commit_confirm', '') == 'reload' and 'commit_revisions' not in d: + confirm = d.get('commit_confirm', {}) + if confirm.get('action', '') == 'reload' and 'commit_revisions' not in d: raise ConfigError('commit-confirm reload requires non-zero commit-revisions') return From d979117ff9988a2633471202b8be8ca4a0d1a923 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Mon, 7 Oct 2024 13:35:58 +0300 Subject: [PATCH 37/85] vyos.configtree: T4318: Allow set tag flag to true or false --- python/vyos/configtree.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/vyos/configtree.py b/python/vyos/configtree.py index ee8ca8b837d..3e02fbba612 100644 --- a/python/vyos/configtree.py +++ b/python/vyos/configtree.py @@ -141,7 +141,7 @@ def __init__(self, config_string=None, address=None, libpath=LIBPATH): self.__is_tag.restype = c_int self.__set_tag = self.__lib.set_tag - self.__set_tag.argtypes = [c_void_p, c_char_p] + self.__set_tag.argtypes = [c_void_p, c_char_p, c_bool] self.__set_tag.restype = c_int self.__is_leaf = self.__lib.is_leaf @@ -359,11 +359,11 @@ def is_tag(self, path): else: return False - def set_tag(self, path): + def set_tag(self, path, value=True): check_path(path) path_str = " ".join(map(str, path)).encode() - res = self.__set_tag(self.__config, path_str) + res = self.__set_tag(self.__config, path_str, value) if (res == 0): return True else: From dfba19b3e89f47690c0abdee5f6410278237122e Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Tue, 8 Oct 2024 10:16:52 -0500 Subject: [PATCH 38/85] config-mgmt: T5976: display message when reverting to previous config --- python/vyos/config_mgmt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/vyos/config_mgmt.py b/python/vyos/config_mgmt.py index 851ac21341f..1c2b70fdf06 100644 --- a/python/vyos/config_mgmt.py +++ b/python/vyos/config_mgmt.py @@ -289,6 +289,9 @@ def revert_soft(self) -> Tuple[str, int]: # confirmed, hence a soft revert is to revision 0 revert_ct = self._get_config_tree_revision(0) + message = '[commit-confirm] Reverting to previous config now' + os.system('wall -n ' + message) + mask = os.umask(0o002) session = ConfigSession(os.getpid(), app='config-mgmt') From bb28e001b1127eea3efbb7924c1871db3e8d2b82 Mon Sep 17 00:00:00 2001 From: sarthurdev <965089+sarthurdev@users.noreply.github.com> Date: Wed, 9 Oct 2024 11:15:20 +0200 Subject: [PATCH 39/85] pki: T6766: Add support for ECDSA private keys --- python/vyos/pki.py | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/python/vyos/pki.py b/python/vyos/pki.py index 5a0e2ddda7e..55dc02631fc 100644 --- a/python/vyos/pki.py +++ b/python/vyos/pki.py @@ -33,6 +33,8 @@ CERT_END='\n-----END CERTIFICATE-----' KEY_BEGIN='-----BEGIN PRIVATE KEY-----\n' KEY_END='\n-----END PRIVATE KEY-----' +KEY_EC_BEGIN='-----BEGIN EC PRIVATE KEY-----\n' +KEY_EC_END='\n-----END EC PRIVATE KEY-----' KEY_ENC_BEGIN='-----BEGIN ENCRYPTED PRIVATE KEY-----\n' KEY_ENC_END='\n-----END ENCRYPTED PRIVATE KEY-----' KEY_PUB_BEGIN='-----BEGIN PUBLIC KEY-----\n' @@ -228,8 +230,18 @@ def create_dh_parameters(bits=2048): def wrap_public_key(raw_data): return KEY_PUB_BEGIN + raw_data + KEY_PUB_END -def wrap_private_key(raw_data, passphrase=None): - return (KEY_ENC_BEGIN if passphrase else KEY_BEGIN) + raw_data + (KEY_ENC_END if passphrase else KEY_END) +def wrap_private_key(raw_data, passphrase=None, ec=False): + begin = KEY_BEGIN + end = KEY_END + + if passphrase: + begin = KEY_ENC_BEGIN + end = KEY_ENC_END + elif ec: + begin = KEY_EC_BEGIN + end = KEY_EC_END + + return begin + raw_data + end def wrap_openssh_public_key(raw_data, type): return f'{type} {raw_data}' @@ -262,17 +274,26 @@ def load_public_key(raw_data, wrap_tags=True): except ValueError: return False -def load_private_key(raw_data, passphrase=None, wrap_tags=True): - if wrap_tags: - raw_data = wrap_private_key(raw_data, passphrase) +def _load_private_key(raw_data, passphrase): + try: + return serialization.load_pem_private_key(bytes(raw_data, 'utf-8'), password=passphrase) + except (ValueError, TypeError): + return False +def load_private_key(raw_data, passphrase=None, wrap_tags=True): if passphrase is not None: passphrase = bytes(passphrase, 'utf-8') - try: - return serialization.load_pem_private_key(bytes(raw_data, 'utf-8'), password=passphrase) - except (ValueError, TypeError): + result = False + + if wrap_tags: + for ec_test in [False, True]: + wrapped_data = wrap_private_key(raw_data, passphrase, ec_test) + if result := _load_private_key(wrapped_data, passphrase): + return result return False + else: + return _load_private_key(raw_data, passphrase) def load_openssh_public_key(raw_data, type): try: From 90a4827284acd3cb072cdfeef323c522802c6449 Mon Sep 17 00:00:00 2001 From: sarthurdev <965089+sarthurdev@users.noreply.github.com> Date: Wed, 9 Oct 2024 14:55:11 +0200 Subject: [PATCH 40/85] haproxy: T6745: Rename `reverse-proxy` to `haproxy` --- data/config-mode-dependencies/vyos-1x.json | 2 +- data/op-mode-standardized.json | 2 +- data/templates/load-balancing/haproxy.cfg.j2 | 2 +- debian/control | 4 +-- .../version/reverseproxy-version.xml.i | 2 +- ...y.xml.in => load-balancing_haproxy.xml.in} | 8 +++--- ...-proxy.xml.in => load-balacing_haproxy.in} | 12 ++++----- ...roxy.py => test_load-balancing_haproxy.py} | 2 +- ...rse-proxy.py => load-balancing_haproxy.py} | 2 +- src/conf_mode/pki.py | 2 +- src/migration-scripts/reverse-proxy/1-to-2 | 27 +++++++++++++++++++ ...erseproxy.py => load-balancing_haproxy.py} | 4 +-- src/op_mode/restart.py | 10 +++---- 13 files changed, 53 insertions(+), 26 deletions(-) rename interface-definitions/{load-balancing_reverse-proxy.xml.in => load-balancing_haproxy.xml.in} (98%) rename op-mode-definitions/{reverse-proxy.xml.in => load-balacing_haproxy.in} (55%) rename smoketest/scripts/cli/{test_load-balancing_reverse-proxy.py => test_load-balancing_haproxy.py} (99%) rename src/conf_mode/{load-balancing_reverse-proxy.py => load-balancing_haproxy.py} (99%) mode change 100755 => 100644 create mode 100755 src/migration-scripts/reverse-proxy/1-to-2 rename src/op_mode/{reverseproxy.py => load-balancing_haproxy.py} (97%) diff --git a/data/config-mode-dependencies/vyos-1x.json b/data/config-mode-dependencies/vyos-1x.json index 2981a0851f0..cbd14f7c6bc 100644 --- a/data/config-mode-dependencies/vyos-1x.json +++ b/data/config-mode-dependencies/vyos-1x.json @@ -26,10 +26,10 @@ "pki": { "ethernet": ["interfaces_ethernet"], "openvpn": ["interfaces_openvpn"], + "haproxy": ["load-balancing_haproxy"], "https": ["service_https"], "ipsec": ["vpn_ipsec"], "openconnect": ["vpn_openconnect"], - "reverse_proxy": ["load-balancing_reverse-proxy"], "rpki": ["protocols_rpki"], "sstp": ["vpn_sstp"], "sstpc": ["interfaces_sstpc"], diff --git a/data/op-mode-standardized.json b/data/op-mode-standardized.json index baa1e9110de..35587b63c9d 100644 --- a/data/op-mode-standardized.json +++ b/data/op-mode-standardized.json @@ -25,7 +25,7 @@ "otp.py", "qos.py", "reset_vpn.py", -"reverseproxy.py", +"load-balancing_haproxy.py", "route.py", "storage.py", "system.py", diff --git a/data/templates/load-balancing/haproxy.cfg.j2 b/data/templates/load-balancing/haproxy.cfg.j2 index 5137966c1cc..786ebfb2109 100644 --- a/data/templates/load-balancing/haproxy.cfg.j2 +++ b/data/templates/load-balancing/haproxy.cfg.j2 @@ -1,4 +1,4 @@ -### Autogenerated by load-balancing_reverse-proxy.py ### +### Autogenerated by load-balancing_haproxy.py ### global chroot /var/lib/haproxy diff --git a/debian/control b/debian/control index 15fb5d72e48..20cfcdc4355 100644 --- a/debian/control +++ b/debian/control @@ -202,9 +202,9 @@ Depends: # For "service router-advert" radvd, # End "service route-advert" -# For "load-balancing reverse-proxy" +# For "load-balancing haproxy" haproxy, -# End "load-balancing reverse-proxy" +# End "load-balancing haproxy" # For "load-balancing wan" vyatta-wanloadbalance, # End "load-balancing wan" diff --git a/interface-definitions/include/version/reverseproxy-version.xml.i b/interface-definitions/include/version/reverseproxy-version.xml.i index 907ea1e5ee8..4f09f2848af 100644 --- a/interface-definitions/include/version/reverseproxy-version.xml.i +++ b/interface-definitions/include/version/reverseproxy-version.xml.i @@ -1,3 +1,3 @@ - + diff --git a/interface-definitions/load-balancing_reverse-proxy.xml.in b/interface-definitions/load-balancing_haproxy.xml.in similarity index 98% rename from interface-definitions/load-balancing_reverse-proxy.xml.in rename to interface-definitions/load-balancing_haproxy.xml.in index 18274622cd4..7422724362c 100644 --- a/interface-definitions/load-balancing_reverse-proxy.xml.in +++ b/interface-definitions/load-balancing_haproxy.xml.in @@ -2,9 +2,9 @@ - + - Configure reverse-proxy + Configure haproxy 900 @@ -26,10 +26,10 @@ Backend name must be alphanumeric and can contain hyphen and underscores txt - Name of reverse-proxy backend system + Name of haproxy backend system - load-balancing reverse-proxy backend + load-balancing haproxy backend diff --git a/op-mode-definitions/reverse-proxy.xml.in b/op-mode-definitions/load-balacing_haproxy.in similarity index 55% rename from op-mode-definitions/reverse-proxy.xml.in rename to op-mode-definitions/load-balacing_haproxy.in index b45ce107f1b..c3d6c799b85 100644 --- a/op-mode-definitions/reverse-proxy.xml.in +++ b/op-mode-definitions/load-balacing_haproxy.in @@ -2,21 +2,21 @@ - + - Restart reverse-proxy service + Restart haproxy service - sudo ${vyos_op_scripts_dir}/restart.py restart_service --name reverse_proxy + sudo ${vyos_op_scripts_dir}/restart.py restart_service --name haproxy - + - Show load-balancing reverse-proxy + Show load-balancing haproxy - sudo ${vyos_op_scripts_dir}/reverseproxy.py show + sudo ${vyos_op_scripts_dir}/load-balacing_haproxy.py show diff --git a/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py b/smoketest/scripts/cli/test_load-balancing_haproxy.py similarity index 99% rename from smoketest/scripts/cli/test_load-balancing_reverse-proxy.py rename to smoketest/scripts/cli/test_load-balancing_haproxy.py index 34f77b95dfa..967eb3869a3 100755 --- a/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py +++ b/smoketest/scripts/cli/test_load-balancing_haproxy.py @@ -24,7 +24,7 @@ PROCESS_NAME = 'haproxy' HAPROXY_CONF = '/run/haproxy/haproxy.cfg' -base_path = ['load-balancing', 'reverse-proxy'] +base_path = ['load-balancing', 'haproxy'] proxy_interface = 'eth1' valid_ca_cert = """ diff --git a/src/conf_mode/load-balancing_reverse-proxy.py b/src/conf_mode/load-balancing_haproxy.py old mode 100755 new mode 100644 similarity index 99% rename from src/conf_mode/load-balancing_reverse-proxy.py rename to src/conf_mode/load-balancing_haproxy.py index 17226efe9a4..45042dd52d6 --- a/src/conf_mode/load-balancing_reverse-proxy.py +++ b/src/conf_mode/load-balancing_haproxy.py @@ -48,7 +48,7 @@ def get_config(config=None): else: conf = Config() - base = ['load-balancing', 'reverse-proxy'] + base = ['load-balancing', 'haproxy'] if not conf.exists(base): return None lb = conf.get_config_dict(base, diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py index 233d73ba897..45e0129a331 100755 --- a/src/conf_mode/pki.py +++ b/src/conf_mode/pki.py @@ -71,7 +71,7 @@ }, { 'keys': ['certificate', 'ca_certificate'], - 'path': ['load_balancing', 'reverse_proxy'], + 'path': ['load_balancing', 'haproxy'], }, { 'keys': ['key'], diff --git a/src/migration-scripts/reverse-proxy/1-to-2 b/src/migration-scripts/reverse-proxy/1-to-2 new file mode 100755 index 00000000000..61612bc360c --- /dev/null +++ b/src/migration-scripts/reverse-proxy/1-to-2 @@ -0,0 +1,27 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +# T6745: Rename base node to haproxy + +from vyos.configtree import ConfigTree + +base = ['load-balancing', 'reverse-proxy'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + config.rename(base, 'haproxy') diff --git a/src/op_mode/reverseproxy.py b/src/op_mode/load-balancing_haproxy.py similarity index 97% rename from src/op_mode/reverseproxy.py rename to src/op_mode/load-balancing_haproxy.py index 19704182a88..ae6734e16dd 100755 --- a/src/op_mode/reverseproxy.py +++ b/src/op_mode/load-balancing_haproxy.py @@ -217,8 +217,8 @@ def _get_formatted_output(data): def show(raw: bool): config = ConfigTreeQuery() - if not config.exists('load-balancing reverse-proxy'): - raise vyos.opmode.UnconfiguredSubsystem('Reverse-proxy is not configured') + if not config.exists('load-balancing haproxy'): + raise vyos.opmode.UnconfiguredSubsystem('Haproxy is not configured') data = _get_raw_data() if raw: diff --git a/src/op_mode/restart.py b/src/op_mode/restart.py index a83c8b9d8f5..3b0031f347f 100755 --- a/src/op_mode/restart.py +++ b/src/op_mode/restart.py @@ -41,6 +41,10 @@ 'systemd_service': 'pdns-recursor', 'path': ['service', 'dns', 'forwarding'], }, + 'haproxy': { + 'systemd_service': 'haproxy', + 'path': ['load-balancing', 'haproxy'], + }, 'igmp_proxy': { 'systemd_service': 'igmpproxy', 'path': ['protocols', 'igmp-proxy'], @@ -53,10 +57,6 @@ 'systemd_service': 'avahi-daemon', 'path': ['service', 'mdns', 'repeater'], }, - 'reverse_proxy': { - 'systemd_service': 'haproxy', - 'path': ['load-balancing', 'reverse-proxy'], - }, 'router_advert': { 'systemd_service': 'radvd', 'path': ['service', 'router-advert'], @@ -83,10 +83,10 @@ 'dhcpv6', 'dns_dynamic', 'dns_forwarding', + 'haproxy', 'igmp_proxy', 'ipsec', 'mdns_repeater', - 'reverse_proxy', 'router_advert', 'snmp', 'ssh', From 7765e037b9fc9913b58a39bfa39209ebfd3b3f71 Mon Sep 17 00:00:00 2001 From: sarthurdev <965089+sarthurdev@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:00:15 +0200 Subject: [PATCH 41/85] haproxy: T6745: Add haproxy migration to config test --- smoketest/configs/basic-vyos | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/smoketest/configs/basic-vyos b/smoketest/configs/basic-vyos index e95d7458f99..242f3d1defd 100644 --- a/smoketest/configs/basic-vyos +++ b/smoketest/configs/basic-vyos @@ -32,6 +32,27 @@ interfaces { loopback lo { } } +load-balancing { + reverse-proxy { + backend bk-01 { + balance "round-robin" + mode "tcp" + server srv01 { + address "192.0.2.11" + port "8881" + } + server srv02 { + address "192.0.2.12" + port "8882" + } + } + service my-tcp-api { + backend "bk-01" + mode "tcp" + port "8888" + } + } +} protocols { static { arp 192.168.0.20 { From e5d2ac54150922640c08bacab124e96c7bbd1f7f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 08:23:46 +0300 Subject: [PATCH 42/85] T6712: Add nonproduction banner (#4149) (cherry picked from commit 3abe7c72c95c3d9b825db08b092c555786e9fbcf) Co-authored-by: Viacheslav Hletenko --- data/templates/login/motd_vyos_nonproduction.j2 | 3 +++ src/conf_mode/system_login_banner.py | 4 ++++ 2 files changed, 7 insertions(+) create mode 100644 data/templates/login/motd_vyos_nonproduction.j2 diff --git a/data/templates/login/motd_vyos_nonproduction.j2 b/data/templates/login/motd_vyos_nonproduction.j2 new file mode 100644 index 00000000000..32922f27f3d --- /dev/null +++ b/data/templates/login/motd_vyos_nonproduction.j2 @@ -0,0 +1,3 @@ + +--- +Warning: This VyOS system is not a stable long-term support version and is not intended for production use. diff --git a/src/conf_mode/system_login_banner.py b/src/conf_mode/system_login_banner.py index 923e1bf57f0..5826d8042a2 100755 --- a/src/conf_mode/system_login_banner.py +++ b/src/conf_mode/system_login_banner.py @@ -28,6 +28,7 @@ PRELOGIN_FILE = r'/etc/issue' PRELOGIN_NET_FILE = r'/etc/issue.net' POSTLOGIN_FILE = r'/etc/motd' +POSTLOGIN_VYOS_FILE = r'/run/motd.d/01-vyos-nonproduction' default_config_data = { 'issue': 'Welcome to VyOS - \\n \\l\n\n', @@ -94,6 +95,9 @@ def apply(banner): render(POSTLOGIN_FILE, 'login/default_motd.j2', banner, permission=0o644, user='root', group='root') + render(POSTLOGIN_VYOS_FILE, 'login/motd_vyos_nonproduction.j2', banner, + permission=0o644, user='root', group='root') + return None if __name__ == '__main__': From 60cd669d2cee2a17d4e6ab6fce9101069d311e23 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Fri, 11 Oct 2024 13:24:43 +0300 Subject: [PATCH 43/85] T6695: Machine-readable operational mode support for traceroute --- data/templates/https/nginx.default.j2 | 2 +- python/vyos/configsession.py | 14 ++ python/vyos/opmode.py | 5 +- src/op_mode/mtr.py | 2 +- src/op_mode/mtr_execute.py | 217 ++++++++++++++++++++++++++ src/services/api/rest/models.py | 14 ++ src/services/api/rest/routers.py | 24 +++ 7 files changed, 275 insertions(+), 3 deletions(-) create mode 100644 src/op_mode/mtr_execute.py diff --git a/data/templates/https/nginx.default.j2 b/data/templates/https/nginx.default.j2 index 1dde66ebf1e..51da4694668 100644 --- a/data/templates/https/nginx.default.j2 +++ b/data/templates/https/nginx.default.j2 @@ -48,7 +48,7 @@ server { ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK'; # proxy settings for HTTP API, if enabled; 503, if not - location ~ ^/(retrieve|configure|config-file|image|import-pki|container-image|generate|show|reboot|reset|poweroff|docs|openapi.json|redoc|graphql) { + location ~ ^/(retrieve|configure|config-file|image|import-pki|container-image|generate|show|reboot|reset|poweroff|traceroute|docs|openapi.json|redoc|graphql) { {% if api is vyos_defined %} proxy_pass http://unix:/run/api.sock; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py index 7d51b94e401..6bfca520045 100644 --- a/python/vyos/configsession.py +++ b/python/vyos/configsession.py @@ -48,6 +48,16 @@ POWEROFF = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'poweroff'] OP_CMD_ADD = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'add'] OP_CMD_DELETE = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'delete'] +TRACEROUTE = [ + '/usr/libexec/vyos/op_mode/mtr_execute.py', + 'mtr', + '--for-api', + '--report-mode', + '--report-cycles', + '1', + '--json', + '--host', +] # Default "commit via" string APP = "vyos-http-api" @@ -285,3 +295,7 @@ def delete_container_image(self, name): def show_container_image(self): out = self.__run_command(SHOW + ['container', 'image']) return out + + def traceroute(self, host): + out = self.__run_command(TRACEROUTE + [host]) + return out diff --git a/python/vyos/opmode.py b/python/vyos/opmode.py index 066c8058f23..83e25fb5055 100644 --- a/python/vyos/opmode.py +++ b/python/vyos/opmode.py @@ -89,7 +89,10 @@ class InternalError(Error): def _is_op_mode_function_name(name): - if re.match(r"^(show|clear|reset|restart|add|update|delete|generate|set|renew|release|execute)", name): + if re.match( + r'^(show|clear|reset|restart|add|update|delete|generate|set|renew|release|execute|mtr)', + name, + ): return True else: return False diff --git a/src/op_mode/mtr.py b/src/op_mode/mtr.py index baf9672a1ef..eea80c4dac9 100644 --- a/src/op_mode/mtr.py +++ b/src/op_mode/mtr.py @@ -23,7 +23,7 @@ from vyos.utils.process import call options = { - 'report': { + 'report-mode': { 'mtr': '{command} --report', 'type': 'noarg', 'help': 'This option puts mtr into report mode. When in this mode, mtr will run for the number of cycles specified by the -c option, and then print statistics and exit.' diff --git a/src/op_mode/mtr_execute.py b/src/op_mode/mtr_execute.py new file mode 100644 index 00000000000..2585a7ee4b0 --- /dev/null +++ b/src/op_mode/mtr_execute.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import ipaddress +import socket +import sys +import typing + +from json import loads + +from vyos.utils.network import interface_list +from vyos.utils.network import vrf_list +from vyos.utils.process import cmd +from vyos.utils.process import call + +import vyos.opmode + +ArgProtocol = typing.Literal['tcp', 'udp', 'sctp'] +noargs_list = [ + 'report_mode', + 'json', + 'report_wide', + 'split', + 'raw', + 'no_dns', + 'aslookup', +] + + +def vrf_list_default(): + return vrf_list() + ['default'] + + +options = { + 'report_mode': { + 'mtr': '{command} --report', + }, + 'protocol': { + 'mtr': '{command} --{value}', + }, + 'json': { + 'mtr': '{command} --json', + }, + 'report_wide': { + 'mtr': '{command} --report-wide', + }, + 'raw': { + 'mtr': '{command} --raw', + }, + 'split': { + 'mtr': '{command} --split', + }, + 'no_dns': { + 'mtr': '{command} --no-dns', + }, + 'show_ips': { + 'mtr': '{command} --show-ips {value}', + }, + 'ipinfo': { + 'mtr': '{command} --ipinfo {value}', + }, + 'aslookup': { + 'mtr': '{command} --aslookup', + }, + 'interval': { + 'mtr': '{command} --interval {value}', + }, + 'report_cycles': { + 'mtr': '{command} --report-cycles {value}', + }, + 'psize': { + 'mtr': '{command} --psize {value}', + }, + 'bitpattern': { + 'mtr': '{command} --bitpattern {value}', + }, + 'gracetime': { + 'mtr': '{command} --gracetime {value}', + }, + 'tos': { + 'mtr': '{command} --tos {value}', + }, + 'mpls': { + 'mtr': '{command} --mpls {value}', + }, + 'interface': { + 'mtr': '{command} --interface {value}', + 'helpfunction': interface_list, + }, + 'address': { + 'mtr': '{command} --address {value}', + }, + 'first_ttl': { + 'mtr': '{command} --first-ttl {value}', + }, + 'max_ttl': { + 'mtr': '{command} --max-ttl {value}', + }, + 'max_unknown': { + 'mtr': '{command} --max-unknown {value}', + }, + 'port': { + 'mtr': '{command} --port {value}', + }, + 'localport': { + 'mtr': '{command} --localport {value}', + }, + 'timeout': { + 'mtr': '{command} --timeout {value}', + }, + 'mark': { + 'mtr': '{command} --mark {value}', + }, + 'vrf': { + 'mtr': 'sudo ip vrf exec {value} {command}', + 'helpfunction': vrf_list_default, + 'dflt': 'default', + }, +} + +mtr_command = { + 4: '/bin/mtr -4', + 6: '/bin/mtr -6', +} + + +def mtr( + host: str, + for_api: typing.Optional[bool], + report_mode: typing.Optional[bool], + protocol: typing.Optional[ArgProtocol], + report_wide: typing.Optional[bool], + raw: typing.Optional[bool], + json: typing.Optional[bool], + split: typing.Optional[bool], + no_dns: typing.Optional[bool], + show_ips: typing.Optional[str], + ipinfo: typing.Optional[str], + aslookup: typing.Optional[bool], + interval: typing.Optional[str], + report_cycles: typing.Optional[str], + psize: typing.Optional[str], + bitpattern: typing.Optional[str], + gracetime: typing.Optional[str], + tos: typing.Optional[str], + mpl: typing.Optional[bool], + interface: typing.Optional[str], + address: typing.Optional[str], + first_ttl: typing.Optional[str], + max_ttl: typing.Optional[str], + max_unknown: typing.Optional[str], + port: typing.Optional[str], + localport: typing.Optional[str], + timeout: typing.Optional[str], + mark: typing.Optional[str], + vrf: typing.Optional[str], +): + args = locals() + for name, option in options.items(): + if 'dflt' in option and not args[name]: + args[name] = option['dflt'] + + try: + ip = socket.gethostbyname(host) + except UnicodeError: + raise vyos.opmode.InternalError(f'Unknown host: {host}') + except socket.gaierror: + ip = host + + try: + version = ipaddress.ip_address(ip).version + except ValueError: + raise vyos.opmode.InternalError(f'Unknown host: {host}') + + command = mtr_command[version] + + for key, val in args.items(): + if key in options and val: + if 'helpfunction' in options[key]: + allowed_values = options[key]['helpfunction']() + if val not in allowed_values: + raise vyos.opmode.InternalError( + f'Invalid argument for option {key} - {val}' + ) + value = '' if key in noargs_list else val + command = options[key]['mtr'].format(command=command, value=val) + + if json: + output = cmd(f'{command} {host}') + if for_api: + output = loads(output) + print(output) + else: + call(f'{command} --curses --displaymode 0 {host}') + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/services/api/rest/models.py b/src/services/api/rest/models.py index 23ae9be9d9c..27d9fb5ee27 100644 --- a/src/services/api/rest/models.py +++ b/src/services/api/rest/models.py @@ -279,6 +279,20 @@ class Config: } +class TracerouteModel(ApiModel): + op: StrictStr + host: StrictStr + + class Config: + schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'traceroute', + 'host': 'host', + } + } + + class Success(BaseModel): success: bool data: Union[str, bool, Dict] diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py index da981d5bff3..47d06b7e921 100644 --- a/src/services/api/rest/routers.py +++ b/src/services/api/rest/routers.py @@ -68,6 +68,7 @@ from .models import ResetModel from .models import ImportPkiModel from .models import PoweroffModel +from .models import TracerouteModel if TYPE_CHECKING: @@ -209,6 +210,7 @@ async def body(self) -> bytes: '/container-image', '/image', '/configure-section', + '/traceroute', ): if 'path' not in c: self.form_err = ( @@ -742,6 +744,28 @@ def poweroff_op(data: PoweroffModel): return success(res) +@router.post('/traceroute') +def traceroute_op(data: TracerouteModel): + state = SessionState() + session = state.session + + op = data.op + host = data.host + + try: + if op == 'traceroute': + res = session.traceroute(host) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occurred. Check the logs for details.') + + return success(res) + + def rest_init(app: 'FastAPI'): if all(r in app.routes for r in router.routes): return From 5c76607a9faef1fb5dc07459a38d37261ce988c1 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Fri, 11 Oct 2024 17:03:35 +0300 Subject: [PATCH 44/85] T6695: normalize formatting --- python/vyos/opmode.py | 163 ++++++++++++++++++++++++++---------------- src/op_mode/mtr.py | 74 +++++++++---------- 2 files changed, 139 insertions(+), 98 deletions(-) diff --git a/python/vyos/opmode.py b/python/vyos/opmode.py index 83e25fb5055..58c1e2c9d9f 100644 --- a/python/vyos/opmode.py +++ b/python/vyos/opmode.py @@ -20,71 +20,90 @@ class Error(Exception): - """ Any error that makes requested operation impossible to complete - for reasons unrelated to the user input or script logic. + """Any error that makes requested operation impossible to complete + for reasons unrelated to the user input or script logic. - This is the base class, scripts should not use it directly - and should raise more specific errors instead, - whenever possible. + This is the base class, scripts should not use it directly + and should raise more specific errors instead, + whenever possible. """ + pass + class UnconfiguredSubsystem(Error): - """ Requested operation is valid, but cannot be completed - because corresponding subsystem is not configured - and thus is not running. + """Requested operation is valid, but cannot be completed + because corresponding subsystem is not configured + and thus is not running. """ + pass + class UnconfiguredObject(UnconfiguredSubsystem): - """ Requested operation is valid but cannot be completed - because its parameter refers to an object that does not exist - in the system configuration. + """Requested operation is valid but cannot be completed + because its parameter refers to an object that does not exist + in the system configuration. """ + pass + class DataUnavailable(Error): - """ Requested operation is valid, but cannot be completed - because data for it is not available. - This error MAY be treated as temporary because such issues - are often caused by transient events such as service restarts. + """Requested operation is valid, but cannot be completed + because data for it is not available. + This error MAY be treated as temporary because such issues + are often caused by transient events such as service restarts. """ + pass + class PermissionDenied(Error): - """ Requested operation is valid, but the caller has no permission - to perform it. + """Requested operation is valid, but the caller has no permission + to perform it. """ + pass + class InsufficientResources(Error): - """ Requested operation and its arguments are valid but the system - does not have enough resources (such as drive space or memory) - to complete it. + """Requested operation and its arguments are valid but the system + does not have enough resources (such as drive space or memory) + to complete it. """ + pass + class UnsupportedOperation(Error): - """ Requested operation is technically valid but is not implemented yet. """ + """Requested operation is technically valid but is not implemented yet.""" + pass + class IncorrectValue(Error): - """ Requested operation is valid, but an argument provided has an - incorrect value, preventing successful completion. + """Requested operation is valid, but an argument provided has an + incorrect value, preventing successful completion. """ + pass + class CommitInProgress(Error): - """ Requested operation is valid, but not possible at the time due + """Requested operation is valid, but not possible at the time due to a commit being in progress. """ + pass + class InternalError(Error): - """ Any situation when VyOS detects that it could not perform - an operation correctly due to logic errors in its own code - or errors in underlying software. + """Any situation when VyOS detects that it could not perform + an operation correctly due to logic errors in its own code + or errors in underlying software. """ + pass @@ -97,12 +116,14 @@ def _is_op_mode_function_name(name): else: return False + def _capture_output(name): - if re.match(r"^(show|generate)", name): + if re.match(r'^(show|generate)', name): return True else: return False + def _get_op_mode_functions(module): from inspect import getmembers, isfunction @@ -113,32 +134,35 @@ def _get_op_mode_functions(module): funcs = list(filter(lambda ft: _is_op_mode_function_name(ft[0]), funcs)) funcs_dict = {} - for (name, thunk) in funcs: + for name, thunk in funcs: funcs_dict[name] = thunk return funcs_dict + def _is_optional_type(t): # Optional[t] is internally an alias for Union[t, NoneType] # and there's no easy way to get union members it seems - if (type(t) == typing._UnionGenericAlias): - if (len(t.__args__) == 2): - if t.__args__[1] == type(None): + if type(t) is typing._UnionGenericAlias: + if len(t.__args__) == 2: + if t.__args__[1] is type(None): return True return False + def _get_arg_type(t): - """ Returns the type itself if it's a primitive type, - or the "real" type of typing.Optional + """Returns the type itself if it's a primitive type, + or the "real" type of typing.Optional - Doesn't work with anything else at the moment! + Doesn't work with anything else at the moment! """ if _is_optional_type(t): return t.__args__[0] else: return t + def _is_literal_type(t): if _is_optional_type(t): t = _get_arg_type(t) @@ -148,9 +172,9 @@ def _is_literal_type(t): return False + def _get_literal_values(t): - """ Returns the tuple of allowed values for a Literal type - """ + """Returns the tuple of allowed values for a Literal type""" if not _is_literal_type(t): return tuple() if _is_optional_type(t): @@ -158,6 +182,7 @@ def _get_literal_values(t): return typing.get_args(t) + def _normalize_field_name(name): # Convert the name to string if it is not # (in some cases they may be numbers) @@ -182,6 +207,7 @@ def _normalize_field_name(name): return name + def _normalize_dict_field_names(old_dict): new_dict = {} @@ -191,10 +217,11 @@ def _normalize_dict_field_names(old_dict): # Sanity check if len(old_dict) != len(new_dict): - raise InternalError("Dictionary fields do not allow unique normalization") + raise InternalError('Dictionary fields do not allow unique normalization') else: return new_dict + def _normalize_field_names(value): if isinstance(value, dict): return _normalize_dict_field_names(value) @@ -203,16 +230,19 @@ def _normalize_field_names(value): else: return value + def run(module): from argparse import ArgumentParser functions = _get_op_mode_functions(module) parser = ArgumentParser() - subparsers = parser.add_subparsers(dest="subcommand") + subparsers = parser.add_subparsers(dest='subcommand') for function_name in functions: - subparser = subparsers.add_parser(function_name, help=functions[function_name].__doc__) + subparser = subparsers.add_parser( + function_name, help=functions[function_name].__doc__ + ) type_hints = typing.get_type_hints(functions[function_name]) if 'return' in type_hints: @@ -225,62 +255,73 @@ def run(module): # Without this, we'd get options like "--foo_bar" opt = re.sub(r'_', '-', opt) - if _get_arg_type(th) == bool: - subparser.add_argument(f"--{opt}", action='store_true') + if _get_arg_type(th) is bool: + subparser.add_argument(f'--{opt}', action='store_true') else: if _is_optional_type(th): if _is_literal_type(th): - subparser.add_argument(f"--{opt}", - choices=list(_get_literal_values(th)), - default=None) + subparser.add_argument( + f'--{opt}', + choices=list(_get_literal_values(th)), + default=None, + ) else: - subparser.add_argument(f"--{opt}", - type=_get_arg_type(th), default=None) + subparser.add_argument( + f'--{opt}', + type=_get_arg_type(th), + default=None, + ) else: if _is_literal_type(th): - subparser.add_argument(f"--{opt}", - choices=list(_get_literal_values(th)), - required=True) + subparser.add_argument( + f'--{opt}', + choices=list(_get_literal_values(th)), + required=True, + ) else: - subparser.add_argument(f"--{opt}", - type=_get_arg_type(th), required=True) + subparser.add_argument( + f'--{opt}', type=_get_arg_type(th), required=True + ) # Get options as a dict rather than a namespace, # so that we can modify it and pack for passing to functions args = vars(parser.parse_args()) - if not args["subcommand"]: - print("Subcommand required!") + if not args['subcommand']: + print('Subcommand required!') parser.print_usage() sys.exit(1) - function_name = args["subcommand"] + function_name = args['subcommand'] func = functions[function_name] # Remove the subcommand from the arguments, # it would cause an extra argument error when we pass the dict to a function - del args["subcommand"] + del args['subcommand'] # Show and generate commands must always get the "raw" argument, # but other commands (clear/reset/restart/add/delete) should not, # because they produce no output and it makes no sense for them. - if ("raw" not in args) and _capture_output(function_name): - args["raw"] = False + if ('raw' not in args) and _capture_output(function_name): + args['raw'] = False if _capture_output(function_name): # Show and generate commands are slightly special: # they may return human-formatted output # or a raw dict that we need to serialize in JSON for printing res = func(**args) - if not args["raw"]: + if not args['raw']: return res else: if not isinstance(res, dict) and not isinstance(res, list): - raise InternalError(f"Bare literal is not an acceptable raw output, must be a list or an object.\ - The output was:{res}") + raise InternalError( + f'Bare literal is not an acceptable raw output, must be a list or an object.\ + The output was:{res}' + ) res = decamelize(res) res = _normalize_field_names(res) from json import dumps + return dumps(res, indent=4) else: # Other functions should not return anything, diff --git a/src/op_mode/mtr.py b/src/op_mode/mtr.py index eea80c4dac9..522cbe008f5 100644 --- a/src/op_mode/mtr.py +++ b/src/op_mode/mtr.py @@ -26,152 +26,152 @@ 'report-mode': { 'mtr': '{command} --report', 'type': 'noarg', - 'help': 'This option puts mtr into report mode. When in this mode, mtr will run for the number of cycles specified by the -c option, and then print statistics and exit.' + 'help': 'This option puts mtr into report mode. When in this mode, mtr will run for the number of cycles specified by the -c option, and then print statistics and exit.', }, 'report-wide': { 'mtr': '{command} --report-wide', 'type': 'noarg', - 'help': 'This option puts mtr into wide report mode. When in this mode, mtr will not cut hostnames in the report.' + 'help': 'This option puts mtr into wide report mode. When in this mode, mtr will not cut hostnames in the report.', }, 'raw': { 'mtr': '{command} --raw', 'type': 'noarg', - 'help': 'Use the raw output format. This format is better suited for archival of the measurement results.' + 'help': 'Use the raw output format. This format is better suited for archival of the measurement results.', }, 'json': { 'mtr': '{command} --json', 'type': 'noarg', - 'help': 'Use this option to tell mtr to use the JSON output format.' + 'help': 'Use this option to tell mtr to use the JSON output format.', }, 'split': { 'mtr': '{command} --split', 'type': 'noarg', - 'help': 'Use this option to set mtr to spit out a format that is suitable for a split-user interface.' + 'help': 'Use this option to set mtr to spit out a format that is suitable for a split-user interface.', }, 'no-dns': { 'mtr': '{command} --no-dns', 'type': 'noarg', - 'help': 'Use this option to force mtr to display numeric IP numbers and not try to resolve the host names.' + 'help': 'Use this option to force mtr to display numeric IP numbers and not try to resolve the host names.', }, 'show-ips': { 'mtr': '{command} --show-ips {value}', 'type': '', - 'help': 'Use this option to tell mtr to display both the host names and numeric IP numbers.' + 'help': 'Use this option to tell mtr to display both the host names and numeric IP numbers.', }, 'ipinfo': { 'mtr': '{command} --ipinfo {value}', 'type': '', - 'help': 'Displays information about each IP hop.' + 'help': 'Displays information about each IP hop.', }, 'aslookup': { 'mtr': '{command} --aslookup', 'type': 'noarg', - 'help': 'Displays the Autonomous System (AS) number alongside each hop. Equivalent to --ipinfo 0.' + 'help': 'Displays the Autonomous System (AS) number alongside each hop. Equivalent to --ipinfo 0.', }, 'interval': { 'mtr': '{command} --interval {value}', 'type': '', - 'help': 'Use this option to specify the positive number of seconds between ICMP ECHO requests. The default value for this parameter is one second. The root user may choose values between zero and one.' + 'help': 'Use this option to specify the positive number of seconds between ICMP ECHO requests. The default value for this parameter is one second. The root user may choose values between zero and one.', }, 'report-cycles': { 'mtr': '{command} --report-cycles {value}', 'type': '', - 'help': 'Use this option to set the number of pings sent to determine both the machines on the network and the reliability of those machines. Each cycle lasts one second.' + 'help': 'Use this option to set the number of pings sent to determine both the machines on the network and the reliability of those machines. Each cycle lasts one second.', }, 'psize': { 'mtr': '{command} --psize {value}', 'type': '', - 'help': 'This option sets the packet size used for probing. It is in bytes, inclusive IP and ICMP headers. If set to a negative number, every iteration will use a different, random packet size up to that number.' + 'help': 'This option sets the packet size used for probing. It is in bytes, inclusive IP and ICMP headers. If set to a negative number, every iteration will use a different, random packet size up to that number.', }, 'bitpattern': { 'mtr': '{command} --bitpattern {value}', 'type': '', - 'help': 'Specifies bit pattern to use in payload. Should be within range 0 - 255. If NUM is greater than 255, a random pattern is used.' + 'help': 'Specifies bit pattern to use in payload. Should be within range 0 - 255. If NUM is greater than 255, a random pattern is used.', }, 'gracetime': { 'mtr': '{command} --gracetime {value}', 'type': '', - 'help': 'Use this option to specify the positive number of seconds to wait for responses after the final request. The default value is five seconds.' + 'help': 'Use this option to specify the positive number of seconds to wait for responses after the final request. The default value is five seconds.', }, 'tos': { 'mtr': '{command} --tos {value}', 'type': '', - 'help': 'Specifies value for type of service field in IP header. Should be within range 0 - 255.' + 'help': 'Specifies value for type of service field in IP header. Should be within range 0 - 255.', }, 'mpls': { 'mtr': '{command} --mpls {value}', 'type': 'noarg', - 'help': 'Use this option to tell mtr to display information from ICMP extensions for MPLS (RFC 4950) that are encoded in the response packets.' + 'help': 'Use this option to tell mtr to display information from ICMP extensions for MPLS (RFC 4950) that are encoded in the response packets.', }, 'interface': { 'mtr': '{command} --interface {value}', 'type': '', 'helpfunction': interface_list, - 'help': 'Use the network interface with a specific name for sending network probes. This can be useful when you have multiple network interfaces with routes to your destination, for example both wired Ethernet and WiFi, and wish to test a particular interface.' + 'help': 'Use the network interface with a specific name for sending network probes. This can be useful when you have multiple network interfaces with routes to your destination, for example both wired Ethernet and WiFi, and wish to test a particular interface.', }, 'address': { 'mtr': '{command} --address {value}', 'type': ' ', - 'help': 'Use this option to bind the outgoing socket to ADDRESS, so that all packets will be sent with ADDRESS as source address.' + 'help': 'Use this option to bind the outgoing socket to ADDRESS, so that all packets will be sent with ADDRESS as source address.', }, 'first-ttl': { 'mtr': '{command} --first-ttl {value}', 'type': '', - 'help': 'Specifies with what TTL to start. Defaults to 1.' + 'help': 'Specifies with what TTL to start. Defaults to 1.', }, 'max-ttl': { 'mtr': '{command} --max-ttl {value}', 'type': '', - 'help': 'Specifies the maximum number of hops or max time-to-live value mtr will probe. Default is 30.' + 'help': 'Specifies the maximum number of hops or max time-to-live value mtr will probe. Default is 30.', }, 'max-unknown': { 'mtr': '{command} --max-unknown {value}', 'type': '', - 'help': 'Specifies the maximum unknown host. Default is 5.' + 'help': 'Specifies the maximum unknown host. Default is 5.', }, 'udp': { 'mtr': '{command} --udp', 'type': 'noarg', - 'help': 'Use UDP datagrams instead of ICMP ECHO.' + 'help': 'Use UDP datagrams instead of ICMP ECHO.', }, 'tcp': { 'mtr': '{command} --tcp', 'type': 'noarg', - 'help': ' Use TCP SYN packets instead of ICMP ECHO. PACKETSIZE is ignored, since SYN packets can not contain data.' + 'help': ' Use TCP SYN packets instead of ICMP ECHO. PACKETSIZE is ignored, since SYN packets can not contain data.', }, 'sctp': { 'mtr': '{command} --sctp', 'type': 'noarg', - 'help': 'Use Stream Control Transmission Protocol packets instead of ICMP ECHO.' + 'help': 'Use Stream Control Transmission Protocol packets instead of ICMP ECHO.', }, 'port': { 'mtr': '{command} --port {value}', 'type': '', - 'help': 'The target port number for TCP/SCTP/UDP traces.' + 'help': 'The target port number for TCP/SCTP/UDP traces.', }, 'localport': { 'mtr': '{command} --localport {value}', 'type': '', - 'help': 'The source port number for UDP traces.' + 'help': 'The source port number for UDP traces.', }, 'timeout': { 'mtr': '{command} --timeout {value}', 'type': '', - 'help': ' The number of seconds to keep probe sockets open before giving up on the connection.' + 'help': ' The number of seconds to keep probe sockets open before giving up on the connection.', }, 'mark': { 'mtr': '{command} --mark {value}', 'type': '', - 'help': ' Set the mark for each packet sent through this socket similar to the netfilter MARK target but socket-based. MARK is 32 unsigned integer.' + 'help': ' Set the mark for each packet sent through this socket similar to the netfilter MARK target but socket-based. MARK is 32 unsigned integer.', }, 'vrf': { 'mtr': 'sudo ip vrf exec {value} {command}', 'type': '', 'help': 'Use specified VRF table', 'helpfunction': vrf_list, - 'dflt': 'default' - } - } + 'dflt': 'default', + }, +} mtr = { 4: '/bin/mtr -4', @@ -204,8 +204,8 @@ def completion_failure(option: str) -> None: def expension_failure(option, completions): reason = 'Ambiguous' if completions else 'Invalid' sys.stderr.write( - '\n\n {} command: {} [{}]\n\n'.format(reason, ' '.join(sys.argv), - option)) + '\n\n {} command: {} [{}]\n\n'.format(reason, ' '.join(sys.argv), option) + ) if completions: sys.stderr.write(' Possible completions:\n ') sys.stderr.write('\n '.join(completions)) @@ -229,13 +229,13 @@ def convert(command, args): if longname == 'json': to_json = True if options[longname]['type'] == 'noarg': - command = options[longname]['mtr'].format( - command=command, value='') + command = options[longname]['mtr'].format(command=command, value='') elif not args: sys.exit(f'mtr: missing argument for {longname} option') else: command = options[longname]['mtr'].format( - command=command, value=args.first()) + command=command, value=args.first() + ) return command, to_json @@ -244,7 +244,7 @@ def convert(command, args): host = args.first() if not host: - sys.exit("mtr: Missing host") + sys.exit('mtr: Missing host') if host == '--get-options' or host == '--get-options-nested': if host == '--get-options-nested': From 3f933f1642debfca5c9e3873790a5742ef242c75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20G=C3=B6hler?= Date: Thu, 17 Oct 2024 08:35:47 +0200 Subject: [PATCH 45/85] T973: add basic frr_exporter implementation (#4150) --- .../frr_exporter/frr_exporter.service.j2 | 20 ++++ debian/control | 3 + .../service_monitoring_frr_exporter.xml.in | 25 +++++ .../test_service_monitoring_frr-exporter.py | 64 +++++++++++ .../service_monitoring_frr-exporter.py | 101 ++++++++++++++++++ 5 files changed, 213 insertions(+) create mode 100644 data/templates/frr_exporter/frr_exporter.service.j2 create mode 100644 interface-definitions/service_monitoring_frr_exporter.xml.in create mode 100755 smoketest/scripts/cli/test_service_monitoring_frr-exporter.py create mode 100755 src/conf_mode/service_monitoring_frr-exporter.py diff --git a/data/templates/frr_exporter/frr_exporter.service.j2 b/data/templates/frr_exporter/frr_exporter.service.j2 new file mode 100644 index 00000000000..c3892e42b93 --- /dev/null +++ b/data/templates/frr_exporter/frr_exporter.service.j2 @@ -0,0 +1,20 @@ +{% set vrf_command = 'ip vrf exec ' ~ vrf ~ ' runuser -u frr -- ' if vrf is vyos_defined else '' %} +[Unit] +Description=FRR Exporter +Documentation=https://github.com/tynany/frr_exporter +After=network.target + +[Service] +{% if vrf is not vyos_defined %} +User=frr +{% endif %} +ExecStart={{ vrf_command }}/usr/sbin/frr_exporter \ +{% if listen_address is vyos_defined %} +{% for address in listen_address %} + --web.listen-address={{ address }}:{{ port }} +{% endfor %} +{% else %} + --web.listen-address=:{{ port }} +{% endif %} +[Install] +WantedBy=multi-user.target diff --git a/debian/control b/debian/control index 20cfcdc4355..a194614121e 100644 --- a/debian/control +++ b/debian/control @@ -238,6 +238,9 @@ Depends: # For "service monitoring node-exporter" node-exporter, # End "service monitoring node-exporter" +# For "service monitoring frr-exporter" + frr-exporter, +# End "service monitoring frr-exporter" # For "service monitoring telegraf" telegraf (>= 1.20), # End "service monitoring telegraf" diff --git a/interface-definitions/service_monitoring_frr_exporter.xml.in b/interface-definitions/service_monitoring_frr_exporter.xml.in new file mode 100644 index 00000000000..96aee3ab473 --- /dev/null +++ b/interface-definitions/service_monitoring_frr_exporter.xml.in @@ -0,0 +1,25 @@ + + + + + + + + + Prometheus exporter for FRR metrics + 1280 + + + #include + #include + + 9342 + + #include + + + + + + + diff --git a/smoketest/scripts/cli/test_service_monitoring_frr-exporter.py b/smoketest/scripts/cli/test_service_monitoring_frr-exporter.py new file mode 100755 index 00000000000..230171c11af --- /dev/null +++ b/smoketest/scripts/cli/test_service_monitoring_frr-exporter.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest + +from base_vyostest_shim import VyOSUnitTestSHIM +from vyos.utils.process import process_named_running +from vyos.utils.file import read_file + +PROCESS_NAME = 'frr_exporter' +base_path = ['service', 'monitoring', 'frr-exporter'] +service_file = '/etc/systemd/system/frr_exporter.service' +listen_if = 'dum3421' +listen_ip = '192.0.2.1' + + +class TestMonitoringFrrExporter(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + # call base-classes classmethod + super(TestMonitoringFrrExporter, cls).setUpClass() + # create a test interfaces + cls.cli_set( + cls, ['interfaces', 'dummy', listen_if, 'address', listen_ip + '/32'] + ) + + @classmethod + def tearDownClass(cls): + cls.cli_delete(cls, ['interfaces', 'dummy', listen_if]) + super(TestMonitoringFrrExporter, cls).tearDownClass() + + def tearDown(self): + self.cli_delete(base_path) + self.cli_commit() + self.assertFalse(process_named_running(PROCESS_NAME)) + + def test_01_basic_config(self): + self.cli_set(base_path + ['listen-address', listen_ip]) + + # commit changes + self.cli_commit() + + file_content = read_file(service_file) + self.assertIn(f'{listen_ip}:9342', file_content) + + # Check for running process + self.assertTrue(process_named_running(PROCESS_NAME)) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/conf_mode/service_monitoring_frr-exporter.py b/src/conf_mode/service_monitoring_frr-exporter.py new file mode 100755 index 00000000000..01527d5799a --- /dev/null +++ b/src/conf_mode/service_monitoring_frr-exporter.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from sys import exit + +from vyos.config import Config +from vyos.configdict import is_node_changed +from vyos.configverify import verify_vrf +from vyos.template import render +from vyos.utils.process import call +from vyos import ConfigError +from vyos import airbag + + +airbag.enable() + +service_file = '/etc/systemd/system/frr_exporter.service' +systemd_service = 'frr_exporter.service' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['service', 'monitoring', 'frr-exporter'] + if not conf.exists(base): + return None + + config_data = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True + ) + config_data = conf.merge_defaults(config_data, recursive=True) + + tmp = is_node_changed(conf, base + ['vrf']) + if tmp: + config_data.update({'restart_required': {}}) + + return config_data + + +def verify(config_data): + # bail out early - looks like removal from running config + if not config_data: + return None + + verify_vrf(config_data) + return None + + +def generate(config_data): + if not config_data: + # Delete systemd files + if os.path.isfile(service_file): + os.unlink(service_file) + return None + + # Render frr_exporter service_file + render(service_file, 'frr_exporter/frr_exporter.service.j2', config_data) + return None + + +def apply(config_data): + # Reload systemd manager configuration + call('systemctl daemon-reload') + if not config_data: + call(f'systemctl stop {systemd_service}') + return + + # we need to restart the service if e.g. the VRF name changed + systemd_action = 'reload-or-restart' + if 'restart_required' in config_data: + systemd_action = 'restart' + + call(f'systemctl {systemd_action} {systemd_service}') + + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) From e42e32d48489329686ce6642848b189c0d262196 Mon Sep 17 00:00:00 2001 From: khramshinr Date: Tue, 15 Oct 2024 17:37:13 +0600 Subject: [PATCH 46/85] T4583: Rewrite VRRP op-mode to vyos.opmode format --- op-mode-definitions/vrrp.xml.in | 25 ++- python/vyos/ifconfig/vrrp.py | 6 +- src/op_mode/vrrp.py | 355 ++++++++++++++++++++++++++++---- 3 files changed, 341 insertions(+), 45 deletions(-) diff --git a/op-mode-definitions/vrrp.xml.in b/op-mode-definitions/vrrp.xml.in index 158e7093e09..fb777b2e4c2 100644 --- a/op-mode-definitions/vrrp.xml.in +++ b/op-mode-definitions/vrrp.xml.in @@ -2,23 +2,42 @@ + + + Show specified VRRP (Virtual Router Redundancy Protocol) group information + + + + + Show VRRP statistics + + sudo ${vyos_op_scripts_dir}/vrrp.py show_statistics --group-name="$3" + + + + Show detailed VRRP state information + + sudo ${vyos_op_scripts_dir}/vrrp.py show_detail --group-name="$3" + + + Show VRRP (Virtual Router Redundancy Protocol) information - sudo ${vyos_op_scripts_dir}/vrrp.py --summary + sudo ${vyos_op_scripts_dir}/vrrp.py show_summary Show VRRP statistics - sudo ${vyos_op_scripts_dir}/vrrp.py --statistics + sudo ${vyos_op_scripts_dir}/vrrp.py show_statistics Show detailed VRRP state information - sudo ${vyos_op_scripts_dir}/vrrp.py --data + sudo ${vyos_op_scripts_dir}/vrrp.py show_detail diff --git a/python/vyos/ifconfig/vrrp.py b/python/vyos/ifconfig/vrrp.py index ee9336d1a35..d3d31cc0734 100644 --- a/python/vyos/ifconfig/vrrp.py +++ b/python/vyos/ifconfig/vrrp.py @@ -99,11 +99,11 @@ def collect(cls, what): timeout=30) return read_file(fname) + except FileNotFoundError: + raise VRRPNoData("VRRP data is not available (process not running or no active groups)") except OSError: # raised by vyos.utils.file.read_file raise VRRPNoData("VRRP data is not available (wait time exceeded)") - except FileNotFoundError: - raise VRRPNoData("VRRP data is not available (process not running or no active groups)") except Exception: name = cls._name[what] raise VRRPError(f'VRRP {name} is not available') @@ -136,7 +136,7 @@ def format(cls, data): headers = ["Name", "Interface", "VRID", "State", "Priority", "Last Transition"] groups = [] - data = json.loads(data) + data = json.loads(data) if isinstance(data, str) else data for group in data: data = group['data'] diff --git a/src/op_mode/vrrp.py b/src/op_mode/vrrp.py index 60be8606518..ef1338e23f3 100755 --- a/src/op_mode/vrrp.py +++ b/src/op_mode/vrrp.py @@ -13,47 +13,324 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - +import json import sys -import argparse +import typing + +from jinja2 import Template -from vyos.configquery import ConfigTreeQuery -from vyos.ifconfig.vrrp import VRRP +import vyos.opmode +from vyos.ifconfig import VRRP from vyos.ifconfig.vrrp import VRRPNoData -parser = argparse.ArgumentParser() -group = parser.add_mutually_exclusive_group() -group.add_argument("-s", "--summary", action="store_true", help="Print VRRP summary") -group.add_argument("-t", "--statistics", action="store_true", help="Print VRRP statistics") -group.add_argument("-d", "--data", action="store_true", help="Print detailed VRRP data") - -args = parser.parse_args() - -def is_configured(): - """ Check if VRRP is configured """ - config = ConfigTreeQuery() - if not config.exists(['high-availability', 'vrrp', 'group']): - return False - return True - -# Exit early if VRRP is dead or not configured -if is_configured() == False: - print('VRRP not configured!') - exit(0) -if not VRRP.is_running(): - print('VRRP is not running') - sys.exit(0) - -try: - if args.summary: - print(VRRP.format(VRRP.collect('json'))) - elif args.statistics: - print(VRRP.collect('stats')) - elif args.data: - print(VRRP.collect('state')) - else: - parser.print_help() + +stat_template = Template(""" +{% for rec in instances %} +VRRP Instance: {{rec.instance}} + Advertisements: + Received: {{rec.advert_rcvd}} + Sent: {{rec.advert_sent}} + Became master: {{rec.become_master}} + Released master: {{rec.release_master}} + Packet Errors: + Length: {{rec.packet_len_err}} + TTL: {{rec.ip_ttl_err}} + Invalid Type: {{rec.invalid_type_rcvd}} + Advertisement Interval: {{rec.advert_interval_err}} + Address List: {{rec.addr_list_err}} + Authentication Errors: + Invalid Type: {{rec.invalid_authtype}} + Type Mismatch: {{rec.authtype_mismatch}} + Failure: {{rec.auth_failure}} + Priority Zero: + Received: {{rec.pri_zero_rcvd}} + Sent: {{rec.pri_zero_sent}} +{% endfor %} +""") + +detail_template = Template(""" +{%- for rec in instances %} + VRRP Instance: {{rec.iname}} + VRRP Version: {{rec.version}} + State: {{rec.state}} + {% if rec.state == 'BACKUP' -%} + Master priority: {{ rec.master_priority }} + {% if rec.version == 3 -%} + Master advert interval: {{ rec.master_adver_int }} + {% endif -%} + {% endif -%} + Wantstate: {{rec.wantstate}} + Last transition: {{rec.last_transition}} + Interface: {{rec.ifp_ifname}} + {% if rec.dont_track_primary > 0 -%} + VRRP interface tracking disabled + {% endif -%} + {% if rec.skip_check_adv_addr > 0 -%} + Skip checking advert IP addresses + {% endif -%} + {% if rec.strict_mode > 0 -%} + Enforcing strict VRRP compliance + {% endif -%} + Gratuitous ARP delay: {{rec.garp_delay}} + Gratuitous ARP repeat: {{rec.garp_rep}} + Gratuitous ARP refresh: {{rec.garp_refresh}} + Gratuitous ARP refresh repeat: {{rec.garp_refresh_rep}} + Gratuitous ARP lower priority delay: {{rec.garp_lower_prio_delay}} + Gratuitous ARP lower priority repeat: {{rec.garp_lower_prio_rep}} + Send advert after receive lower priority advert: {{rec.lower_prio_no_advert}} + Send advert after receive higher priority advert: {{rec.higher_prio_send_advert}} + Virtual Router ID: {{rec.vrid}} + Priority: {{rec.base_priority}} + Effective priority: {{rec.effective_priority}} + Advert interval: {{rec.adver_int}} sec + Accept: {{rec.accept}} + Preempt: {{rec.nopreempt}} + {% if rec.preempt_delay -%} + Preempt delay: {{rec.preempt_delay}} + {% endif -%} + Promote secondaries: {{rec.promote_secondaries}} + Authentication type: {{rec.auth_type}} + {% if rec.vips %} + Virtual IP ({{ rec.vips | length }}): + {% for ip in rec.vips -%} + {{ip}} + {% endfor -%} + {% endif -%} + {% if rec.evips %} + Virtual IP Excluded: + {% for ip in rec.evips -%} + {{ip}} + {% endfor -%} + {% endif -%} + {% if rec.vroutes %} + Virtual Routes: + {% for route in rec.vroutes -%} + {{route}} + {% endfor -%} + {% endif -%} + {% if rec.vrules %} + Virtual Rules: + {% for rule in rec.vrules -%} + {{rule}} + {% endfor -%} + {% endif -%} + {% if rec.track_ifp %} + Tracked interfaces: + {% for ifp in rec.track_ifp -%} + {{ifp}} + {% endfor -%} + {% endif -%} + {% if rec.track_script %} + Tracked scripts: + {% for script in rec.track_script -%} + {{script}} + {% endfor -%} + {% endif %} + Using smtp notification: {{rec.smtp_alert}} + Notify deleted: {{rec.notify_deleted}} +{% endfor %} +""") + +# https://github.com/acassen/keepalived/blob/59c39afe7410f927c9894a1bafb87e398c6f02be/keepalived/include/vrrp.h#L126 +VRRP_AUTH_NONE = 0 +VRRP_AUTH_PASS = 1 +VRRP_AUTH_AH = 2 + +# https://github.com/acassen/keepalived/blob/59c39afe7410f927c9894a1bafb87e398c6f02be/keepalived/include/vrrp.h#L417 +VRRP_STATE_INIT = 0 +VRRP_STATE_BACK = 1 +VRRP_STATE_MAST = 2 +VRRP_STATE_FAULT = 3 + +VRRP_AUTH_TO_NAME = { + VRRP_AUTH_NONE: 'NONE', + VRRP_AUTH_PASS: 'SIMPLE_PASSWORD', + VRRP_AUTH_AH: 'IPSEC_AH', +} + +VRRP_STATE_TO_NAME = { + VRRP_STATE_INIT: 'INIT', + VRRP_STATE_BACK: 'BACKUP', + VRRP_STATE_MAST: 'MASTER', + VRRP_STATE_FAULT: 'FAULT', +} + + +def _get_raw_data(group_name: str = None) -> list: + """ + Retrieve raw JSON data for all VRRP groups. + + Args: + group_name (str, optional): If provided, filters the data to only + include the specified vrrp group. + + Returns: + list: A list of raw JSON data for VRRP groups, filtered by group_name + if specified. + """ + try: + output = VRRP.collect('json') + except VRRPNoData as e: + raise vyos.opmode.DataUnavailable(f'{e}') + + data = json.loads(output) + + if not data: + return [] + + if group_name is not None: + for rec in data: + if rec['data'].get('iname') == group_name: + return [rec] + return [] + return data + + +def _get_formatted_statistics_output(data: list) -> str: + """ + Prepare formatted statistics output from the given data. + + Args: + data (list): A list of dictionaries containing vrrp grop information + and statistics. + + Returns: + str: Rendered statistics output based on the provided data. + """ + instances = list() + for instance in data: + instances.append( + {'instance': instance['data'].get('iname'), **instance['stats']} + ) + + return stat_template.render(instances=instances) + + +def _process_field(data: dict, field: str, true_value: str, false_value: str): + """ + Updates the given field in the data dictionary with a specified value based + on its truthiness. + + Args: + data (dict): The dictionary containing the field to be processed. + field (str): The key representing the field in the dictionary. + true_value (str): The value to set if the field's value is truthy. + false_value (str): The value to set if the field's value is falsy. + + Returns: + None: The function modifies the dictionary in place. + """ + data[field] = true_value if data.get(field) else false_value + + +def _get_formatted_detail_output(data: list) -> str: + """ + Prepare formatted detail information output from the given data. + + Args: + data (list): A list of dictionaries containing vrrp grop information + and statistics. + + Returns: + str: Rendered detail info output based on the provided data. + """ + instances = list() + for instance in data: + instance['data']['state'] = VRRP_STATE_TO_NAME.get( + instance['data'].get('state'), 'unknown' + ) + instance['data']['wantstate'] = VRRP_STATE_TO_NAME.get( + instance['data'].get('wantstate'), 'unknown' + ) + instance['data']['auth_type'] = VRRP_AUTH_TO_NAME.get( + instance['data'].get('auth_type'), 'unknown' + ) + _process_field(instance['data'], 'lower_prio_no_advert', 'false', 'true') + _process_field(instance['data'], 'higher_prio_send_advert', 'true', 'false') + _process_field(instance['data'], 'accept', 'Enabled', 'Disabled') + _process_field(instance['data'], 'notify_deleted', 'Deleted', 'Fault') + _process_field(instance['data'], 'smtp_alert', 'yes', 'no') + _process_field(instance['data'], 'nopreempt', 'Disabled', 'Enabled') + _process_field(instance['data'], 'promote_secondaries', 'Enabled', 'Disabled') + instance['data']['vips'] = instance['data'].get('vips', False) + instance['data']['evips'] = instance['data'].get('evips', False) + instance['data']['vroutes'] = instance['data'].get('vroutes', False) + instance['data']['vrules'] = instance['data'].get('vrules', False) + + instances.append(instance['data']) + + return detail_template.render(instances=instances) + + +def show_detail( + raw: bool, group_name: typing.Optional[str] = None +) -> typing.Union[list, str]: + """ + Display detailed information about the VRRP group. + + Args: + raw (bool): If True, return raw data instead of formatted output. + group_name (str, optional): Filter the data by a specific group name, + if provided. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted detail + output. + """ + data = _get_raw_data(group_name) + + if raw: + return data + + return _get_formatted_detail_output(data) + + +def show_statistics( + raw: bool, group_name: typing.Optional[str] = None +) -> typing.Union[list, str]: + """ + Display VRRP group statistics. + + Args: + raw (bool): If True, return raw data instead of formatted output. + group_name (str, optional): Filter the data by a specific group name, + if provided. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted statistic + output. + """ + data = _get_raw_data(group_name) + + if raw: + return data + + return _get_formatted_statistics_output(data) + + +def show_summary(raw: bool) -> typing.Union[list, str]: + """ + Display a summary of VRRP group. + + Args: + raw (bool): If True, return raw data instead of formatted output. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted summary output. + """ + data = _get_raw_data() + + if raw: + return data + + return VRRP.format(data) + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) sys.exit(1) -except VRRPNoData as e: - print(e) - sys.exit(1) From a5a484a50f976b4df7abd0a00a89dfb1512d84cb Mon Sep 17 00:00:00 2001 From: khramshinr Date: Thu, 17 Oct 2024 15:30:28 +0600 Subject: [PATCH 47/85] T4583: Rewrite VRRP op-mode to vyos.opmode format reformat file by linter rules --- python/vyos/ifconfig/vrrp.py | 64 ++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/python/vyos/ifconfig/vrrp.py b/python/vyos/ifconfig/vrrp.py index d3d31cc0734..a3657370f54 100644 --- a/python/vyos/ifconfig/vrrp.py +++ b/python/vyos/ifconfig/vrrp.py @@ -26,34 +26,37 @@ from vyos.utils.file import wait_for_file_write_complete from vyos.utils.process import process_running + class VRRPError(Exception): pass + class VRRPNoData(VRRPError): pass + class VRRP(object): _vrrp_prefix = '00:00:5E:00:01:' location = { - 'pid': '/run/keepalived/keepalived.pid', - 'fifo': '/run/keepalived/keepalived_notify_fifo', - 'state': '/tmp/keepalived.data', - 'stats': '/tmp/keepalived.stats', - 'json': '/tmp/keepalived.json', - 'daemon': '/etc/default/keepalived', - 'config': '/run/keepalived/keepalived.conf', + 'pid': '/run/keepalived/keepalived.pid', + 'fifo': '/run/keepalived/keepalived_notify_fifo', + 'state': '/tmp/keepalived.data', + 'stats': '/tmp/keepalived.stats', + 'json': '/tmp/keepalived.json', + 'daemon': '/etc/default/keepalived', + 'config': '/run/keepalived/keepalived.conf', } _signal = { - 'state': signal.SIGUSR1, - 'stats': signal.SIGUSR2, - 'json': signal.SIGRTMIN + 2, + 'state': signal.SIGUSR1, + 'stats': signal.SIGUSR2, + 'json': signal.SIGRTMIN + 2, } _name = { 'state': 'information', 'stats': 'statistics', - 'json': 'data', + 'json': 'data', } state = { @@ -64,7 +67,7 @@ class VRRP(object): # UNKNOWN } - def __init__(self,ifname): + def __init__(self, ifname): self.ifname = ifname def enabled(self): @@ -79,7 +82,7 @@ def active_interfaces(cls): @classmethod def decode_state(cls, code): - return cls.state.get(code,'UNKNOWN') + return cls.state.get(code, 'UNKNOWN') # used in conf mode @classmethod @@ -94,16 +97,20 @@ def collect(cls, what): try: # send signal to generate the configuration file pid = read_file(cls.location['pid']) - wait_for_file_write_complete(fname, - pre_hook=(lambda: os.kill(int(pid), cls._signal[what])), - timeout=30) + wait_for_file_write_complete( + fname, + pre_hook=(lambda: os.kill(int(pid), cls._signal[what])), + timeout=30, + ) return read_file(fname) except FileNotFoundError: - raise VRRPNoData("VRRP data is not available (process not running or no active groups)") + raise VRRPNoData( + 'VRRP data is not available (process not running or no active groups)' + ) except OSError: # raised by vyos.utils.file.read_file - raise VRRPNoData("VRRP data is not available (wait time exceeded)") + raise VRRPNoData('VRRP data is not available (wait time exceeded)') except Exception: name = cls._name[what] raise VRRPError(f'VRRP {name} is not available') @@ -118,22 +125,31 @@ def disabled(cls): conf = ConfigTreeQuery() if conf.exists(base): # Read VRRP configuration directly from CLI - vrrp_config_dict = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) + vrrp_config_dict = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True + ) # add disabled groups to the list if 'group' in vrrp_config_dict: for group, group_config in vrrp_config_dict['group'].items(): if 'disable' not in group_config: continue - disabled.append([group, group_config['interface'], group_config['vrid'], 'DISABLED', '']) + disabled.append( + [ + group, + group_config['interface'], + group_config['vrid'], + 'DISABLED', + '', + ] + ) # return list with disabled instances return disabled @classmethod def format(cls, data): - headers = ["Name", "Interface", "VRID", "State", "Priority", "Last Transition"] + headers = ['Name', 'Interface', 'VRID', 'State', 'Priority', 'Last Transition'] groups = [] data = json.loads(data) if isinstance(data, str) else data @@ -143,7 +159,7 @@ def format(cls, data): name = data['iname'] intf = data['ifp_ifname'] vrid = data['vrid'] - state = cls.decode_state(data["state"]) + state = cls.decode_state(data['state']) priority = data['effective_priority'] since = int(time() - float(data['last_transition'])) @@ -153,4 +169,4 @@ def format(cls, data): # add to the active list disabled instances groups.extend(cls.disabled()) - return(tabulate(groups, headers)) + return tabulate(groups, headers) From 8943446c8c79c6daad2326ddc7a59950123b35d2 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Fri, 18 Oct 2024 15:42:59 +0300 Subject: [PATCH 48/85] pki: T4914: Rewrite the PKI op mode in the new style --- op-mode-definitions/pki.xml.in | 108 +++++------ python/vyos/configsession.py | 3 +- python/vyos/opmode.py | 5 +- src/op_mode/pki.py | 295 +++++++++++++++++-------------- src/services/api/rest/routers.py | 4 +- 5 files changed, 224 insertions(+), 191 deletions(-) diff --git a/op-mode-definitions/pki.xml.in b/op-mode-definitions/pki.xml.in index 254ef08cc16..866f482bf53 100644 --- a/op-mode-definitions/pki.xml.in +++ b/op-mode-definitions/pki.xml.in @@ -27,7 +27,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --ca "$7" --sign "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca --name "$7" --sign "$5" --file @@ -36,10 +36,10 @@ <certificate name> - ${vyos_op_scripts_dir}/pki.py --action generate --ca "$7" --sign "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca --name "$7" --sign "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --ca "noname" --sign "$5" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca --sign "$5" @@ -48,7 +48,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --ca "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca --name "$5" --file @@ -57,10 +57,10 @@ <CA name> - ${vyos_op_scripts_dir}/pki.py --action generate --ca "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca --name "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --ca "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ca @@ -79,7 +79,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$6" --self-sign --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$6" --self-sign --file @@ -88,10 +88,10 @@ <certificate name> - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$6" --self-sign --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$6" --self-sign --install - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "noname" --self-sign + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --self-sign @@ -108,7 +108,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$7" --sign "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$7" --sign "$5" --file @@ -117,10 +117,10 @@ <certificate name> - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$7" --sign "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$7" --sign "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "noname" --sign "$5" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --sign "$5" @@ -129,7 +129,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$5" --file @@ -138,10 +138,10 @@ <certificate name> - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate --name "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --certificate "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type certificate @@ -158,16 +158,16 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --crl "$4" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type crl --name "$4" --file Commands for installing generated CRL into running configuration - ${vyos_op_scripts_dir}/pki.py --action generate --crl "$4" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type crl --name "$4" --install - ${vyos_op_scripts_dir}/pki.py --action generate --crl "$4" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type crl --name "$4" @@ -181,7 +181,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --dh "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type dh --name "$5" --file @@ -190,10 +190,10 @@ <DH name> - ${vyos_op_scripts_dir}/pki.py --action generate --dh "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type dh --name "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --dh "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type dh @@ -207,7 +207,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --keypair "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type key-pair --name "$5" --file @@ -216,10 +216,10 @@ <key name> - ${vyos_op_scripts_dir}/pki.py --action generate --keypair "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type key-pair --name "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --keypair "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type key-pair @@ -238,7 +238,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --openvpn "$6" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type openvpn --name "$6" --file @@ -247,10 +247,10 @@ <key name> - ${vyos_op_scripts_dir}/pki.py --action generate --openvpn "$6" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type openvpn --name "$6" --install - ${vyos_op_scripts_dir}/pki.py --action generate --openvpn "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type openvpn @@ -266,7 +266,7 @@ <filename> - sudo -E ${vyos_op_scripts_dir}/pki.py --action generate --ssh "$5" --file + sudo -E ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ssh --name "$5" --file @@ -275,10 +275,10 @@ <key name> - ${vyos_op_scripts_dir}/pki.py --action generate --ssh "$5" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ssh --name "$5" --install - ${vyos_op_scripts_dir}/pki.py --action generate --ssh "noname" + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type ssh @@ -302,12 +302,12 @@ interfaces wireguard - ${vyos_op_scripts_dir}/pki.py --action generate --wireguard --key --interface "$7" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type wireguard --key --interface "$7" --install - ${vyos_op_scripts_dir}/pki.py --action generate --wireguard --key + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type wireguard --key @@ -334,14 +334,14 @@ interfaces wireguard ${COMP_WORDS[COMP_CWORD-2]} peer - ${vyos_op_scripts_dir}/pki.py --action generate --wireguard --psk --interface "$7" --peer "$9" --install + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type wireguard --psk --interface "$7" --peer "$9" --install - ${vyos_op_scripts_dir}/pki.py --action generate --wireguard --psk + ${vyos_op_scripts_dir}/pki.py generate_pki --pki-type wireguard --psk @@ -371,13 +371,13 @@ Path to CA certificate file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --ca "$4" --filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type ca --name "$4" --filename "$6" Path to private key file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --ca "$4" --key-filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type ca --name "$4" --key-filename "$6" @@ -393,13 +393,13 @@ Path to certificate file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --certificate "$4" --filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type certificate --name "$4" --filename "$6" Path to private key file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --certificate "$4" --key-filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type certificate --name "$4" --key-filename "$6" @@ -415,7 +415,7 @@ Path to CRL file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --crl "$4" --filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type crl --name "$4" --filename "$6" @@ -431,7 +431,7 @@ Path to DH parameters file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --dh "$4" --filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type dh --name "$4" --filename "$6" @@ -447,13 +447,13 @@ Path to public key file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --keypair "$4" --filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type key-pair --name "$4" --filename "$6" Path to private key file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --keypair "$4" --key-filename "$6" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type key-pair --name "$4" --key-filename "$6" @@ -474,7 +474,7 @@ Path to shared secret key file - sudo -E ${vyos_op_scripts_dir}/pki.py --action import --openvpn "$5" --filename "$7" + sudo -E ${vyos_op_scripts_dir}/pki.py import_pki --pki-type openvpn --name "$5" --filename "$7" @@ -490,13 +490,13 @@ Show PKI x509 certificates - sudo ${vyos_op_scripts_dir}/pki.py --action show + sudo ${vyos_op_scripts_dir}/pki.py show_all Show x509 CA certificates - sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "all" + sudo ${vyos_op_scripts_dir}/pki.py show_certificate_authority @@ -505,13 +505,13 @@ pki ca - sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "$4" + sudo ${vyos_op_scripts_dir}/pki.py show_certificate_authority --name "$4" Show x509 CA certificate in PEM format - sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "$4" --pem + sudo ${vyos_op_scripts_dir}/pki.py show_certificate_authority --name "$4" --pem @@ -519,7 +519,7 @@ Show x509 certificates - sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "all" + sudo ${vyos_op_scripts_dir}/pki.py show_certificate @@ -528,13 +528,13 @@ pki certificate - sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "$4" + sudo ${vyos_op_scripts_dir}/pki.py show_certificate --name "$4" Show x509 certificate in PEM format - sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "$4" --pem + sudo ${vyos_op_scripts_dir}/pki.py show_certificate --name "$4" --pem @@ -543,7 +543,7 @@ sha256 sha384 sha512 - sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "$4" --fingerprint "$6" + sudo ${vyos_op_scripts_dir}/pki.py show_certificate --name "$4" --fingerprint "$6" @@ -551,7 +551,7 @@ Show x509 certificate revocation lists - ${vyos_op_scripts_dir}/pki.py --action show --crl "all" + ${vyos_op_scripts_dir}/pki.py show_crl @@ -560,13 +560,13 @@ pki ca - ${vyos_op_scripts_dir}/pki.py --action show --crl "$4" + ${vyos_op_scripts_dir}/pki.py show_crl --name "$4" Show x509 certificate revocation lists by CA name in PEM format - ${vyos_op_scripts_dir}/pki.py --action show --crl "$4" --pem + ${vyos_op_scripts_dir}/pki.py show_crl --name "$4" --pem diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py index 9c56d246a5c..5876dc5b04a 100644 --- a/python/vyos/configsession.py +++ b/python/vyos/configsession.py @@ -42,8 +42,7 @@ IMPORT_PKI = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'import'] IMPORT_PKI_NO_PROMPT = [ '/usr/libexec/vyos/op_mode/pki.py', - '--action', - 'import', + 'import_pki', '--no-prompt', ] REMOVE_IMAGE = [ diff --git a/python/vyos/opmode.py b/python/vyos/opmode.py index 066c8058f23..136ac35e23b 100644 --- a/python/vyos/opmode.py +++ b/python/vyos/opmode.py @@ -89,7 +89,10 @@ class InternalError(Error): def _is_op_mode_function_name(name): - if re.match(r"^(show|clear|reset|restart|add|update|delete|generate|set|renew|release|execute)", name): + if re.match( + r'^(show|clear|reset|restart|add|update|delete|generate|set|renew|release|execute|import)', + name, + ): return True else: return False diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py index 5652a5d7462..56b873bb125 100755 --- a/src/op_mode/pki.py +++ b/src/op_mode/pki.py @@ -14,16 +14,18 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import argparse import ipaddress import os import re import sys import tabulate +import typing from cryptography import x509 from cryptography.x509.oid import ExtendedKeyUsageOID +import vyos.opmode + from vyos.config import Config from vyos.config import config_dict_mangle_acme from vyos.pki import encode_certificate @@ -51,8 +53,36 @@ CERT_REQ_END = '-----END CERTIFICATE REQUEST-----' auth_dir = '/config/auth' +ArgsPkiType = typing.Literal['ca', 'certificate', 'dh', 'key-pair', 'openvpn', 'crl'] +ArgsPkiTypeGen = typing.Literal[ArgsPkiType, typing.Literal['ssh', 'wireguard']] +ArgsFingerprint = typing.Literal['sha256', 'sha384', 'sha512'] + # Helper Functions conf = Config() + + +def _verify(target): + """Decorator checks if config for PKI exists""" + from functools import wraps + + if target not in ['ca', 'certificate']: + raise ValueError('Invalid PKI') + + def _verify_target(func): + @wraps(func) + def _wrapper(*args, **kwargs): + name = kwargs.get('name') + unconf_message = f'PKI {target} "{name}" does not exist!' + if name: + if not conf.exists(['pki', target, name]): + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + + return _wrapper + + return _verify_target + + def get_default_values(): # Fetch default x509 values base = ['pki', 'x509', 'default'] @@ -872,8 +902,111 @@ def import_openvpn_secret(name, path): install_openvpn_key(name, key_data, key_version) -# Show functions -def show_certificate_authority(name=None, pem=False): + +def generate_pki( + raw: bool, + pki_type: ArgsPkiTypeGen, + name: typing.Optional[str], + file: typing.Optional[bool], + install: typing.Optional[bool], + sign: typing.Optional[str], + self_sign: typing.Optional[bool], + key: typing.Optional[bool], + psk: typing.Optional[bool], + interface: typing.Optional[str], + peer: typing.Optional[str], +): + try: + if pki_type == 'ca': + if sign: + generate_ca_certificate_sign(name, sign, install=install, file=file) + else: + generate_ca_certificate(name, install=install, file=file) + elif pki_type == 'certificate': + if sign: + generate_certificate_sign(name, sign, install=install, file=file) + elif self_sign: + generate_certificate_selfsign(name, install=install, file=file) + else: + generate_certificate_request(name=name, install=install, file=file) + + elif pki_type == 'crl': + generate_certificate_revocation_list(name, install=install, file=file) + + elif pki_type == 'ssh': + generate_ssh_keypair(name, install=install, file=file) + + elif pki_type == 'dh': + generate_dh_parameters(name, install=install, file=file) + + elif pki_type == 'key-pair': + generate_keypair(name, install=install, file=file) + + elif pki_type == 'openvpn': + generate_openvpn_key(name, install=install, file=file) + + elif pki_type == 'wireguard': + # WireGuard supports writing key directly into the CLI, but this + # requires the vyos_libexec_dir environment variable to be set + os.environ['vyos_libexec_dir'] = '/usr/libexec/vyos' + + if key: + generate_wireguard_key(interface, install=install) + if psk: + generate_wireguard_psk(interface, peer=peer, install=install) + except KeyboardInterrupt: + print('Aborted') + sys.exit(0) + + +def import_pki( + name: str, + pki_type: ArgsPkiType, + filename: typing.Optional[str], + key_filename: typing.Optional[str], + no_prompt: typing.Optional[bool], + passphrase: typing.Optional[str], +): + try: + if pki_type == 'ca': + import_ca_certificate( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'certificate': + import_certificate( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'crl': + import_crl(name, filename) + elif pki_type == 'dh': + import_dh_parameters(name, filename) + elif pki_type == 'key-pair': + import_keypair( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'openvpn': + import_openvpn_secret(name, filename) + except KeyboardInterrupt: + print('Aborted') + sys.exit(0) + + +@_verify('ca') +def show_certificate_authority( + raw: bool, name: typing.Optional[str] = None, pem: typing.Optional[bool] = False +): headers = ['Name', 'Subject', 'Issuer CN', 'Issued', 'Expiry', 'Private Key', 'Parent'] data = [] certs = get_config_ca_certificate() @@ -905,7 +1038,14 @@ def show_certificate_authority(name=None, pem=False): print("Certificate Authorities:") print(tabulate.tabulate(data, headers)) -def show_certificate(name=None, pem=False, fingerprint_hash=None): + +@_verify('certificate') +def show_certificate( + raw: bool, + name: typing.Optional[str] = None, + pem: typing.Optional[bool] = False, + fingerprint: typing.Optional[ArgsFingerprint] = None, +): headers = ['Name', 'Type', 'Subject CN', 'Issuer CN', 'Issued', 'Expiry', 'Revoked', 'Private Key', 'CA Present'] data = [] certs = get_config_certificate() @@ -926,8 +1066,8 @@ def show_certificate(name=None, pem=False, fingerprint_hash=None): if name and pem: print(encode_certificate(cert)) return - elif name and fingerprint_hash: - print(get_certificate_fingerprint(cert, fingerprint_hash)) + elif name and fingerprint: + print(get_certificate_fingerprint(cert, fingerprint)) return ca_name = get_certificate_ca(cert, ca_certs) @@ -955,7 +1095,10 @@ def show_certificate(name=None, pem=False, fingerprint_hash=None): print("Certificates:") print(tabulate.tabulate(data, headers)) -def show_crl(name=None, pem=False): + +def show_crl( + raw: bool, name: typing.Optional[str] = None, pem: typing.Optional[bool] = False +): headers = ['CA Name', 'Updated', 'Revokes'] data = [] certs = get_config_ca_certificate() @@ -989,132 +1132,20 @@ def show_crl(name=None, pem=False): print("Certificate Revocation Lists:") print(tabulate.tabulate(data, headers)) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--action', help='PKI action', required=True) - - # X509 - parser.add_argument('--ca', help='Certificate Authority', required=False) - parser.add_argument('--certificate', help='Certificate', required=False) - parser.add_argument('--crl', help='Certificate Revocation List', required=False) - parser.add_argument('--sign', help='Sign certificate with specified CA', required=False) - parser.add_argument('--self-sign', help='Self-sign the certificate', action='store_true') - parser.add_argument('--pem', help='Output using PEM encoding', action='store_true') - parser.add_argument('--fingerprint', help='Show fingerprint and exit', action='store') - # SSH - parser.add_argument('--ssh', help='SSH Key', required=False) +def show_all(raw: bool): + show_certificate_authority(raw) + print('\n') + show_certificate(raw) + print('\n') + show_crl(raw) - # DH - parser.add_argument('--dh', help='DH Parameters', required=False) - - # Key pair - parser.add_argument('--keypair', help='Key pair', required=False) - - # OpenVPN - parser.add_argument('--openvpn', help='OpenVPN TLS key', required=False) - - # WireGuard - parser.add_argument('--wireguard', help='Wireguard', action='store_true') - group = parser.add_mutually_exclusive_group() - group.add_argument('--key', help='Wireguard key pair', action='store_true', required=False) - group.add_argument('--psk', help='Wireguard pre shared key', action='store_true', required=False) - parser.add_argument('--interface', help='Install generated keys into running-config for named interface', action='store') - parser.add_argument('--peer', help='Install generated keys into running-config for peer', action='store') - - # Global - parser.add_argument('--file', help='Write generated keys into specified filename', action='store_true') - parser.add_argument('--install', help='Install generated keys into running-config', action='store_true') - - parser.add_argument('--filename', help='Write certificate into specified filename', action='store') - parser.add_argument('--key-filename', help='Write key into specified filename', action='store') - - parser.add_argument('--no-prompt', action='store_true', help='Perform action non-interactively') - parser.add_argument('--passphrase', help='A passphrase to decrypt the private key') - - args = parser.parse_args() +if __name__ == '__main__': try: - if args.action == 'generate': - if args.ca: - if args.sign: - generate_ca_certificate_sign(args.ca, args.sign, install=args.install, file=args.file) - else: - generate_ca_certificate(args.ca, install=args.install, file=args.file) - elif args.certificate: - if args.sign: - generate_certificate_sign(args.certificate, args.sign, install=args.install, file=args.file) - elif args.self_sign: - generate_certificate_selfsign(args.certificate, install=args.install, file=args.file) - else: - generate_certificate_request(name=args.certificate, install=args.install, file=args.file) - - elif args.crl: - generate_certificate_revocation_list(args.crl, install=args.install, file=args.file) - - elif args.ssh: - generate_ssh_keypair(args.ssh, install=args.install, file=args.file) - - elif args.dh: - generate_dh_parameters(args.dh, install=args.install, file=args.file) - - elif args.keypair: - generate_keypair(args.keypair, install=args.install, file=args.file) - - elif args.openvpn: - generate_openvpn_key(args.openvpn, install=args.install, file=args.file) - - elif args.wireguard: - # WireGuard supports writing key directly into the CLI, but this - # requires the vyos_libexec_dir environment variable to be set - os.environ["vyos_libexec_dir"] = "/usr/libexec/vyos" - - if args.key: - generate_wireguard_key(args.interface, install=args.install) - if args.psk: - generate_wireguard_psk(args.interface, peer=args.peer, install=args.install) - elif args.action == 'import': - if args.ca: - import_ca_certificate(args.ca, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.certificate: - import_certificate(args.certificate, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.crl: - import_crl(args.crl, args.filename) - elif args.dh: - import_dh_parameters(args.dh, args.filename) - elif args.keypair: - import_keypair(args.keypair, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.openvpn: - import_openvpn_secret(args.openvpn, args.filename) - elif args.action == 'show': - if args.ca: - ca_name = None if args.ca == 'all' else args.ca - if ca_name: - if not conf.exists(['pki', 'ca', ca_name]): - print(f'CA "{ca_name}" does not exist!') - exit(1) - show_certificate_authority(ca_name, args.pem) - elif args.certificate: - cert_name = None if args.certificate == 'all' else args.certificate - if cert_name: - if not conf.exists(['pki', 'certificate', cert_name]): - print(f'Certificate "{cert_name}" does not exist!') - exit(1) - if args.fingerprint is None: - show_certificate(None if args.certificate == 'all' else args.certificate, args.pem) - else: - show_certificate(args.certificate, fingerprint_hash=args.fingerprint) - elif args.crl: - show_crl(None if args.crl == 'all' else args.crl, args.pem) - else: - show_certificate_authority() - print('\n') - show_certificate() - print('\n') - show_crl() - except KeyboardInterrupt: - print("Aborted") - sys.exit(0) + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py index da981d5bff3..5612e947ce5 100644 --- a/src/services/api/rest/routers.py +++ b/src/services/api/rest/routers.py @@ -423,9 +423,9 @@ def create_path_import_pki_no_prompt(path): correct_paths = ['ca', 'certificate', 'key-pair'] if path[1] not in correct_paths: return False - path[1] = '--' + path[1].replace('-', '') path[3] = '--key-filename' - return path[1:] + path.insert(2, '--name') + return ['--pki-type'] + path[1:] @router.post('/configure') From 52e3ba35c09237744340c2cce4cba1783ec3f2be Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Fri, 18 Oct 2024 16:46:39 +0300 Subject: [PATCH 49/85] pki: T4914: reformat file by linter rules --- src/op_mode/pki.py | 517 ++++++++++++++++++++++++++++++++------------- 1 file changed, 375 insertions(+), 142 deletions(-) diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py index 56b873bb125..49a461e9eda 100755 --- a/src/op_mode/pki.py +++ b/src/op_mode/pki.py @@ -86,13 +86,17 @@ def _wrapper(*args, **kwargs): def get_default_values(): # Fetch default x509 values base = ['pki', 'x509', 'default'] - x509_defaults = conf.get_config_dict(base, key_mangling=('-', '_'), - no_tag_node_value_mangle=True, - get_first_key=True, - with_recursive_defaults=True) + x509_defaults = conf.get_config_dict( + base, + key_mangling=('-', '_'), + no_tag_node_value_mangle=True, + get_first_key=True, + with_recursive_defaults=True, + ) return x509_defaults + def get_config_ca_certificate(name=None): # Fetch ca certificates from config base = ['pki', 'ca'] @@ -101,12 +105,15 @@ def get_config_ca_certificate(name=None): if name: base = base + [name] - if not conf.exists(base + ['private', 'key']) or not conf.exists(base + ['certificate']): + if not conf.exists(base + ['private', 'key']) or not conf.exists( + base + ['certificate'] + ): return False - return conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + return conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True + ) + def get_config_certificate(name=None): # Get certificates from config @@ -116,18 +123,21 @@ def get_config_certificate(name=None): if name: base = base + [name] - if not conf.exists(base + ['private', 'key']) or not conf.exists(base + ['certificate']): + if not conf.exists(base + ['private', 'key']) or not conf.exists( + base + ['certificate'] + ): return False - pki = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + pki = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True + ) if pki: for certificate in pki: pki[certificate] = config_dict_mangle_acme(certificate, pki[certificate]) return pki + def get_certificate_ca(cert, ca_certs): # Find CA certificate for given certificate if not ca_certs: @@ -146,6 +156,7 @@ def get_certificate_ca(cert, ca_certs): return ca_name return None + def get_config_revoked_certificates(): # Fetch revoked certificates from config ca_base = ['pki', 'ca'] @@ -154,19 +165,26 @@ def get_config_revoked_certificates(): certs = [] if conf.exists(ca_base): - ca_certificates = conf.get_config_dict(ca_base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + ca_certificates = conf.get_config_dict( + ca_base, + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + ) certs.extend(ca_certificates.values()) if conf.exists(cert_base): - certificates = conf.get_config_dict(cert_base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + certificates = conf.get_config_dict( + cert_base, + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + ) certs.extend(certificates.values()) return [cert_dict for cert_dict in certs if 'revoke' in cert_dict] + def get_revoked_by_serial_numbers(serial_numbers=[]): # Return serial numbers of revoked certificates certs_out = [] @@ -190,113 +208,153 @@ def get_revoked_by_serial_numbers(serial_numbers=[]): certs_out.append(cert_name) return certs_out -def install_certificate(name, cert='', private_key=None, key_type=None, key_passphrase=None, is_ca=False): + +def install_certificate( + name, cert='', private_key=None, key_type=None, key_passphrase=None, is_ca=False +): # Show/install conf commands for certificate prefix = 'ca' if is_ca else 'certificate' - base = f"pki {prefix} {name}" + base = f'pki {prefix} {name}' config_paths = [] if cert: - cert_pem = "".join(encode_certificate(cert).strip().split("\n")[1:-1]) + cert_pem = ''.join(encode_certificate(cert).strip().split('\n')[1:-1]) config_paths.append(f"{base} certificate '{cert_pem}'") if private_key: - key_pem = "".join(encode_private_key(private_key, passphrase=key_passphrase).strip().split("\n")[1:-1]) + key_pem = ''.join( + encode_private_key(private_key, passphrase=key_passphrase) + .strip() + .split('\n')[1:-1] + ) config_paths.append(f"{base} private key '{key_pem}'") if key_passphrase: - config_paths.append(f"{base} private password-protected") + config_paths.append(f'{base} private password-protected') install_into_config(conf, config_paths) + def install_crl(ca_name, crl): # Show/install conf commands for crl - crl_pem = "".join(encode_certificate(crl).strip().split("\n")[1:-1]) + crl_pem = ''.join(encode_certificate(crl).strip().split('\n')[1:-1]) install_into_config(conf, [f"pki ca {ca_name} crl '{crl_pem}'"]) + def install_dh_parameters(name, params): # Show/install conf commands for dh params - dh_pem = "".join(encode_dh_parameters(params).strip().split("\n")[1:-1]) + dh_pem = ''.join(encode_dh_parameters(params).strip().split('\n')[1:-1]) install_into_config(conf, [f"pki dh {name} parameters '{dh_pem}'"]) + def install_ssh_key(name, public_key, private_key, passphrase=None): # Show/install conf commands for ssh key - key_openssh = encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH') + key_openssh = encode_public_key( + public_key, encoding='OpenSSH', key_format='OpenSSH' + ) username = os.getlogin() - type_key_split = key_openssh.split(" ") - - base = f"system login user {username} authentication public-keys {name}" - install_into_config(conf, [ - f"{base} key '{type_key_split[1]}'", - f"{base} type '{type_key_split[0]}'" - ]) - print(encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) - -def install_keypair(name, key_type, private_key=None, public_key=None, passphrase=None, prompt=True): + type_key_split = key_openssh.split(' ') + + base = f'system login user {username} authentication public-keys {name}' + install_into_config( + conf, + [f"{base} key '{type_key_split[1]}'", f"{base} type '{type_key_split[0]}'"], + ) + print( + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ) + ) + + +def install_keypair( + name, key_type, private_key=None, public_key=None, passphrase=None, prompt=True +): # Show/install conf commands for key-pair config_paths = [] if public_key: - install_public_key = not prompt or ask_yes_no('Do you want to install the public key?', default=True) + install_public_key = not prompt or ask_yes_no( + 'Do you want to install the public key?', default=True + ) public_key_pem = encode_public_key(public_key) if install_public_key: - install_public_pem = "".join(public_key_pem.strip().split("\n")[1:-1]) - config_paths.append(f"pki key-pair {name} public key '{install_public_pem}'") + install_public_pem = ''.join(public_key_pem.strip().split('\n')[1:-1]) + config_paths.append( + f"pki key-pair {name} public key '{install_public_pem}'" + ) else: - print("Public key:") + print('Public key:') print(public_key_pem) if private_key: - install_private_key = not prompt or ask_yes_no('Do you want to install the private key?', default=True) + install_private_key = not prompt or ask_yes_no( + 'Do you want to install the private key?', default=True + ) private_key_pem = encode_private_key(private_key, passphrase=passphrase) if install_private_key: - install_private_pem = "".join(private_key_pem.strip().split("\n")[1:-1]) - config_paths.append(f"pki key-pair {name} private key '{install_private_pem}'") + install_private_pem = ''.join(private_key_pem.strip().split('\n')[1:-1]) + config_paths.append( + f"pki key-pair {name} private key '{install_private_pem}'" + ) if passphrase: - config_paths.append(f"pki key-pair {name} private password-protected") + config_paths.append(f'pki key-pair {name} private password-protected') else: - print("Private key:") + print('Private key:') print(private_key_pem) install_into_config(conf, config_paths) + def install_openvpn_key(name, key_data, key_version='1'): config_paths = [ f"pki openvpn shared-secret {name} key '{key_data}'", - f"pki openvpn shared-secret {name} version '{key_version}'" + f"pki openvpn shared-secret {name} version '{key_version}'", ] install_into_config(conf, config_paths) + def install_wireguard_key(interface, private_key, public_key): # Show conf commands for installing wireguard key pairs from vyos.ifconfig import Section + if Section.section(interface) != 'wireguard': print(f'"{interface}" is not a WireGuard interface name!') exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - install_into_config(conf, [f"interfaces wireguard {interface} private-key '{private_key}'"]) + install_into_config( + conf, [f"interfaces wireguard {interface} private-key '{private_key}'"] + ) print(f"Corresponding public-key to use on peer system is: '{public_key}'") + def install_wireguard_psk(interface, peer, psk): from vyos.ifconfig import Section + if Section.section(interface) != 'wireguard': print(f'"{interface}" is not a WireGuard interface name!') exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - install_into_config(conf, [f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'"]) + install_into_config( + conf, [f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'"] + ) + def ask_passphrase(): passphrase = None - print("Note: If you plan to use the generated key on this router, do not encrypt the private key.") + print( + 'Note: If you plan to use the generated key on this router, do not encrypt the private key.' + ) if ask_yes_no('Do you want to encrypt the private key with a passphrase?'): passphrase = ask_input('Enter passphrase:') return passphrase + def write_file(filename, contents): full_path = os.path.join(auth_dir, filename) directory = os.path.dirname(full_path) @@ -305,7 +363,9 @@ def write_file(filename, contents): print('Failed to write file: directory does not exist') return False - if os.path.exists(full_path) and not ask_yes_no('Do you want to overwrite the existing file?'): + if os.path.exists(full_path) and not ask_yes_no( + 'Do you want to overwrite the existing file?' + ): return False with open(full_path, 'w') as f: @@ -313,10 +373,14 @@ def write_file(filename, contents): print(f'File written to {full_path}') -# Generation functions +# Generation functions def generate_private_key(): - key_type = ask_input('Enter private key type: [rsa, dsa, ec]', default='rsa', valid_responses=['rsa', 'dsa', 'ec']) + key_type = ask_input( + 'Enter private key type: [rsa, dsa, ec]', + default='rsa', + valid_responses=['rsa', 'dsa', 'ec'], + ) size_valid = [] size_default = 0 @@ -328,28 +392,43 @@ def generate_private_key(): size_default = 256 size_valid = [224, 256, 384, 521] - size = ask_input('Enter private key bits:', default=size_default, numeric_only=True, valid_responses=size_valid) + size = ask_input( + 'Enter private key bits:', + default=size_default, + numeric_only=True, + valid_responses=size_valid, + ) return create_private_key(key_type, size), key_type + def parse_san_string(san_string): if not san_string: return None output = [] - san_split = san_string.strip().split(",") + san_split = san_string.strip().split(',') for pair_str in san_split: - tag, value = pair_str.strip().split(":", 1) + tag, value = pair_str.strip().split(':', 1) if tag == 'ipv4': output.append(ipaddress.IPv4Address(value)) elif tag == 'ipv6': output.append(ipaddress.IPv6Address(value)) elif tag == 'dns' or tag == 'rfc822': output.append(value) - return output + return -def generate_certificate_request(private_key=None, key_type=None, return_request=False, name=None, install=False, file=False, ask_san=True): + +def generate_certificate_request( + private_key=None, + key_type=None, + return_request=False, + name=None, + install=False, + file=False, + ask_san=True, +): if not private_key: private_key, key_type = generate_private_key() @@ -358,18 +437,24 @@ def generate_certificate_request(private_key=None, key_type=None, return_request while True: country = ask_input('Enter country code:', default=default_values['country']) if len(country) != 2: - print("Country name must be a 2 character country code") + print('Country name must be a 2 character country code') continue subject['country'] = country break subject['state'] = ask_input('Enter state:', default=default_values['state']) - subject['locality'] = ask_input('Enter locality:', default=default_values['locality']) - subject['organization'] = ask_input('Enter organization name:', default=default_values['organization']) + subject['locality'] = ask_input( + 'Enter locality:', default=default_values['locality'] + ) + subject['organization'] = ask_input( + 'Enter organization name:', default=default_values['organization'] + ) subject['common_name'] = ask_input('Enter common name:', default='vyos.io') subject_alt_names = None if ask_san and ask_yes_no('Do you want to configure Subject Alternative Names?'): - print("Enter alternative names in a comma separate list, example: ipv4:1.1.1.1,ipv6:fe80::1,dns:vyos.net,rfc822:user@vyos.net") + print( + 'Enter alternative names in a comma separate list, example: ipv4:1.1.1.1,ipv6:fe80::1,dns:vyos.net,rfc822:user@vyos.net' + ) san_string = ask_input('Enter Subject Alternative Names:') subject_alt_names = parse_san_string(san_string) @@ -386,24 +471,48 @@ def generate_certificate_request(private_key=None, key_type=None, return_request return None if install: - print("Certificate request:") - print(encode_certificate(cert_req) + "\n") - install_certificate(name, private_key=private_key, key_type=key_type, key_passphrase=passphrase, is_ca=False) + print('Certificate request:') + print(encode_certificate(cert_req) + '\n') + install_certificate( + name, + private_key=private_key, + key_type=key_type, + key_passphrase=passphrase, + is_ca=False, + ) if file: write_file(f'{name}.csr', encode_certificate(cert_req)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) -def generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=False, is_sub_ca=False): - valid_days = ask_input('Enter how many days certificate will be valid:', default='365' if not is_ca else '1825', numeric_only=True) + +def generate_certificate( + cert_req, ca_cert, ca_private_key, is_ca=False, is_sub_ca=False +): + valid_days = ask_input( + 'Enter how many days certificate will be valid:', + default='365' if not is_ca else '1825', + numeric_only=True, + ) cert_type = None if not is_ca: - cert_type = ask_input('Enter certificate type: (client, server)', default='server', valid_responses=['client', 'server']) - return create_certificate(cert_req, ca_cert, ca_private_key, valid_days, cert_type, is_ca, is_sub_ca) + cert_type = ask_input( + 'Enter certificate type: (client, server)', + default='server', + valid_responses=['client', 'server'], + ) + return create_certificate( + cert_req, ca_cert, ca_private_key, valid_days, cert_type, is_ca, is_sub_ca + ) + def generate_ca_certificate(name, install=False, file=False): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True, ask_san=False) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True, ask_san=False + ) cert = generate_certificate(cert_req, cert_req, private_key, is_ca=True) passphrase = ask_passphrase() @@ -413,11 +522,16 @@ def generate_ca_certificate(name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_ca_certificate_sign(name, ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -429,17 +543,19 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load signing CA certificate, aborting") + print('Failed to load signing CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter signing CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load signing CA private key, aborting") + print('Failed to load signing CA private key, aborting') return None private_key = None @@ -448,9 +564,11 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): cert_req = None if not ask_yes_no('Do you already have a certificate request?'): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True, ask_san=False) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True, ask_san=False + ) else: - print("Paste certificate request and press enter:") + print('Paste certificate request and press enter:') lines = [] curr_line = '' while True: @@ -460,17 +578,21 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): lines.append(curr_line) if not lines: - print("Aborted") + print('Aborted') return None - wrap = lines[0].find('-----') < 0 # Only base64 pasted, add the CSR tags for parsing - cert_req = load_certificate_request("\n".join(lines), wrap) + wrap = ( + lines[0].find('-----') < 0 + ) # Only base64 pasted, add the CSR tags for parsing + cert_req = load_certificate_request('\n'.join(lines), wrap) if not cert_req: - print("Invalid certificate request") + print('Invalid certificate request') return None - cert = generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=True, is_sub_ca=True) + cert = generate_certificate( + cert_req, ca_cert, ca_private_key, is_ca=True, is_sub_ca=True + ) passphrase = None if private_key is not None: @@ -483,12 +605,17 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) if private_key is not None: - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_sign(name, ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -500,17 +627,19 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load CA certificate, aborting") + print('Failed to load CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load CA private key, aborting") + print('Failed to load CA private key, aborting') return None private_key = None @@ -519,9 +648,11 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): cert_req = None if not ask_yes_no('Do you already have a certificate request?'): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True + ) else: - print("Paste certificate request and press enter:") + print('Paste certificate request and press enter:') lines = [] curr_line = '' while True: @@ -531,18 +662,20 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): lines.append(curr_line) if not lines: - print("Aborted") + print('Aborted') return None - wrap = lines[0].find('-----') < 0 # Only base64 pasted, add the CSR tags for parsing - cert_req = load_certificate_request("\n".join(lines), wrap) + wrap = ( + lines[0].find('-----') < 0 + ) # Only base64 pasted, add the CSR tags for parsing + cert_req = load_certificate_request('\n'.join(lines), wrap) if not cert_req: - print("Invalid certificate request") + print('Invalid certificate request') return None cert = generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=False) - + passphrase = None if private_key is not None: passphrase = ask_passphrase() @@ -554,12 +687,17 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=False) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=False + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) if private_key is not None: - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_selfsign(name, install=False, file=False): private_key, key_type = generate_private_key() @@ -573,11 +711,21 @@ def generate_certificate_selfsign(name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key=private_key, key_type=key_type, key_passphrase=passphrase, is_ca=False) + install_certificate( + name, + cert, + private_key=private_key, + key_type=key_type, + key_passphrase=passphrase, + is_ca=False, + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_revocation_list(ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -589,17 +737,19 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load CA certificate, aborting") + print('Failed to load CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load CA private key, aborting") + print('Failed to load CA private key, aborting') return None revoked_certs = get_config_revoked_certificates() @@ -620,13 +770,13 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): continue if not to_revoke: - print("No revoked certificates to add to the CRL") + print('No revoked certificates to add to the CRL') return None crl = create_certificate_revocation_list(ca_cert, ca_private_key, to_revoke) if not crl: - print("Failed to create CRL") + print('Failed to create CRL') return None if not install and not file: @@ -637,7 +787,8 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): install_crl(ca_name, crl) if file: - write_file(f'{name}.crl', encode_certificate(crl)) + write_file(f'{ca_name}.crl', encode_certificate(crl)) + def generate_ssh_keypair(name, install=False, file=False): private_key, key_type = generate_private_key() @@ -646,29 +797,42 @@ def generate_ssh_keypair(name, install=False, file=False): if not install and not file: print(encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH')) - print("") - print(encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) + print('') + print( + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ) + ) return None if install: install_ssh_key(name, public_key, private_key, passphrase) if file: - write_file(f'{name}.pem', encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH')) - write_file(f'{name}.key', encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) + write_file( + f'{name}.pem', + encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH'), + ) + write_file( + f'{name}.key', + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ), + ) + def generate_dh_parameters(name, install=False, file=False): bits = ask_input('Enter DH parameters key size:', default=2048, numeric_only=True) - print("Generating parameters...") + print('Generating parameters...') dh_params = create_dh_parameters(bits) if not dh_params: - print("Failed to create DH parameters") + print('Failed to create DH parameters') return None if not install and not file: - print("DH Parameters:") + print('DH Parameters:') print(encode_dh_parameters(dh_params)) if install: @@ -677,6 +841,7 @@ def generate_dh_parameters(name, install=False, file=False): if file: write_file(f'{name}.pem', encode_dh_parameters(dh_params)) + def generate_keypair(name, install=False, file=False): private_key, key_type = generate_private_key() public_key = private_key.public_key() @@ -684,7 +849,7 @@ def generate_keypair(name, install=False, file=False): if not install and not file: print(encode_public_key(public_key)) - print("") + print('') print(encode_private_key(private_key, passphrase=passphrase)) return None @@ -693,13 +858,16 @@ def generate_keypair(name, install=False, file=False): if file: write_file(f'{name}.pem', encode_public_key(public_key)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_openvpn_key(name, install=False, file=False): result = cmd('openvpn --genkey secret /dev/stdout | grep -o "^[^#]*"') if not result: - print("Failed to generate OpenVPN key") + print('Failed to generate OpenVPN key') return None if not install and not file: @@ -707,11 +875,13 @@ def generate_openvpn_key(name, install=False, file=False): return None if install: - key_lines = result.split("\n") - key_data = "".join(key_lines[1:-1]) # Remove wrapper tags and line endings + key_lines = result.split('\n') + key_data = ''.join(key_lines[1:-1]) # Remove wrapper tags and line endings key_version = '1' - version_search = re.search(r'BEGIN OpenVPN Static key V(\d+)', result) # Future-proofing (hopefully) + version_search = re.search( + r'BEGIN OpenVPN Static key V(\d+)', result + ) # Future-proofing (hopefully) if version_search: key_version = version_search[1] @@ -720,6 +890,7 @@ def generate_openvpn_key(name, install=False, file=False): if file: write_file(f'{name}.key', result) + def generate_wireguard_key(interface=None, install=False): private_key = cmd('wg genkey') public_key = cmd('wg pubkey', input=private_key) @@ -730,6 +901,7 @@ def generate_wireguard_key(interface=None, install=False): print(f'Private key: {private_key}') print(f'Public key: {public_key}', end='\n\n') + def generate_wireguard_psk(interface=None, peer=None, install=False): psk = cmd('wg genpsk') if interface and peer and install: @@ -737,8 +909,11 @@ def generate_wireguard_psk(interface=None, peer=None, install=False): else: print(f'Pre-shared key: {psk}') + # Import functions -def import_ca_certificate(name, path=None, key_path=None, no_prompt=False, passphrase=None): +def import_ca_certificate( + name, path=None, key_path=None, no_prompt=False, passphrase=None +): if path: if not os.path.exists(path): print(f'File not found: {path}') @@ -775,7 +950,10 @@ def import_ca_certificate(name, path=None, key_path=None, no_prompt=False, passp install_certificate(name, private_key=key, is_ca=True) -def import_certificate(name, path=None, key_path=None, no_prompt=False, passphrase=None): + +def import_certificate( + name, path=None, key_path=None, no_prompt=False, passphrase=None +): if path: if not os.path.exists(path): print(f'File not found: {path}') @@ -812,6 +990,7 @@ def import_certificate(name, path=None, key_path=None, no_prompt=False, passphra install_certificate(name, private_key=key, is_ca=False) + def import_crl(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -829,6 +1008,7 @@ def import_crl(name, path): install_crl(name, crl) + def import_dh_parameters(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -846,6 +1026,7 @@ def import_dh_parameters(name, path): install_dh_parameters(name, dh) + def import_keypair(name, path=None, key_path=None, no_prompt=False, passphrase=None): if path: if not os.path.exists(path): @@ -883,6 +1064,7 @@ def import_keypair(name, path=None, key_path=None, no_prompt=False, passphrase=N install_keypair(name, None, private_key=key, prompt=False) + def import_openvpn_secret(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -892,11 +1074,15 @@ def import_openvpn_secret(name, path): key_version = '1' with open(path) as f: - key_lines = f.read().strip().split("\n") - key_lines = list(filter(lambda line: not line.strip().startswith('#'), key_lines)) # Remove commented lines - key_data = "".join(key_lines[1:-1]) # Remove wrapper tags and line endings - - version_search = re.search(r'BEGIN OpenVPN Static key V(\d+)', key_lines[0]) # Future-proofing (hopefully) + key_lines = f.read().strip().split('\n') + key_lines = list( + filter(lambda line: not line.strip().startswith('#'), key_lines) + ) # Remove commented lines + key_data = ''.join(key_lines[1:-1]) # Remove wrapper tags and line endings + + version_search = re.search( + r'BEGIN OpenVPN Static key V(\d+)', key_lines[0] + ) # Future-proofing (hopefully) if version_search: key_version = version_search[1] @@ -1007,7 +1193,15 @@ def import_pki( def show_certificate_authority( raw: bool, name: typing.Optional[str] = None, pem: typing.Optional[bool] = False ): - headers = ['Name', 'Subject', 'Issuer CN', 'Issued', 'Expiry', 'Private Key', 'Parent'] + headers = [ + 'Name', + 'Subject', + 'Issuer CN', + 'Issued', + 'Expiry', + 'Private Key', + 'Parent', + ] data = [] certs = get_config_ca_certificate() if certs: @@ -1024,7 +1218,7 @@ def show_certificate_authority( return parent_ca_name = get_certificate_ca(cert, certs) - cert_issuer_cn = cert.issuer.rfc4514_string().split(",")[0] + cert_issuer_cn = cert.issuer.rfc4514_string().split(',')[0] if not parent_ca_name or parent_ca_name == cert_name: parent_ca_name = 'N/A' @@ -1032,10 +1226,24 @@ def show_certificate_authority( if not cert: continue - have_private = 'Yes' if 'private' in cert_dict and 'key' in cert_dict['private'] else 'No' - data.append([cert_name, cert.subject.rfc4514_string(), cert_issuer_cn, cert.not_valid_before, cert.not_valid_after, have_private, parent_ca_name]) + have_private = ( + 'Yes' + if 'private' in cert_dict and 'key' in cert_dict['private'] + else 'No' + ) + data.append( + [ + cert_name, + cert.subject.rfc4514_string(), + cert_issuer_cn, + cert.not_valid_before, + cert.not_valid_after, + have_private, + parent_ca_name, + ] + ) - print("Certificate Authorities:") + print('Certificate Authorities:') print(tabulate.tabulate(data, headers)) @@ -1046,7 +1254,17 @@ def show_certificate( pem: typing.Optional[bool] = False, fingerprint: typing.Optional[ArgsFingerprint] = None, ): - headers = ['Name', 'Type', 'Subject CN', 'Issuer CN', 'Issued', 'Expiry', 'Revoked', 'Private Key', 'CA Present'] + headers = [ + 'Name', + 'Type', + 'Subject CN', + 'Issuer CN', + 'Issued', + 'Expiry', + 'Revoked', + 'Private Key', + 'CA Present', + ] data = [] certs = get_config_certificate() if certs: @@ -1071,8 +1289,8 @@ def show_certificate( return ca_name = get_certificate_ca(cert, ca_certs) - cert_subject_cn = cert.subject.rfc4514_string().split(",")[0] - cert_issuer_cn = cert.issuer.rfc4514_string().split(",")[0] + cert_subject_cn = cert.subject.rfc4514_string().split(',')[0] + cert_issuer_cn = cert.issuer.rfc4514_string().split(',')[0] cert_type = 'Unknown' try: @@ -1081,18 +1299,31 @@ def show_certificate( cert_type = 'Server' elif ext and ExtendedKeyUsageOID.CLIENT_AUTH in ext.value: cert_type = 'Client' - except: + except Exception: pass revoked = 'Yes' if 'revoke' in cert_dict else 'No' - have_private = 'Yes' if 'private' in cert_dict and 'key' in cert_dict['private'] else 'No' + have_private = ( + 'Yes' + if 'private' in cert_dict and 'key' in cert_dict['private'] + else 'No' + ) have_ca = f'Yes ({ca_name})' if ca_name else 'No' - data.append([ - cert_name, cert_type, cert_subject_cn, cert_issuer_cn, - cert.not_valid_before, cert.not_valid_after, - revoked, have_private, have_ca]) + data.append( + [ + cert_name, + cert_type, + cert_subject_cn, + cert_issuer_cn, + cert.not_valid_before, + cert.not_valid_after, + revoked, + have_private, + have_ca, + ] + ) - print("Certificates:") + print('Certificates:') print(tabulate.tabulate(data, headers)) @@ -1123,13 +1354,15 @@ def show_crl( print(encode_certificate(crl)) continue - certs = get_revoked_by_serial_numbers([revoked.serial_number for revoked in crl]) - data.append([cert_name, crl.last_update, ", ".join(certs)]) + certs = get_revoked_by_serial_numbers( + [revoked.serial_number for revoked in crl] + ) + data.append([cert_name, crl.last_update, ', '.join(certs)]) if name and pem: return - print("Certificate Revocation Lists:") + print('Certificate Revocation Lists:') print(tabulate.tabulate(data, headers)) From fa272f5297177354714ee7867104f43e4e322df6 Mon Sep 17 00:00:00 2001 From: Vijayakumar A <36878324+kumvijaya@users.noreply.github.com> Date: Sat, 19 Oct 2024 20:45:22 +0530 Subject: [PATCH 50/85] T6784: enabled repo-sync wokrflow only for current and equuleus (#4163) --- .github/workflows/repo-sync.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml index 6da2fb40d38..752cf947a4e 100644 --- a/.github/workflows/repo-sync.yml +++ b/.github/workflows/repo-sync.yml @@ -6,6 +6,7 @@ on: - closed branches: - current + - equuleus workflow_dispatch: jobs: From 9cc550079bbb533b629c942ff1c82019552dc846 Mon Sep 17 00:00:00 2001 From: khramshinr Date: Mon, 21 Oct 2024 16:33:12 +0600 Subject: [PATCH 51/85] T6791: Extend fair-queue hash-interval --- interface-definitions/qos.xml.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/interface-definitions/qos.xml.in b/interface-definitions/qos.xml.in index 927594c11ea..907fd5e4ce3 100644 --- a/interface-definitions/qos.xml.in +++ b/interface-definitions/qos.xml.in @@ -201,13 +201,13 @@ No perturbation - u32:1-127 + u32:1-2147483647 Interval in seconds for queue algorithm perturbation (advised: 10) - + - Interval must be in range 0 to 127 + Interval must be in range 0 to 2147483647 0 From 64d291e78291123b0db7f72de0af0129c8e046ed Mon Sep 17 00:00:00 2001 From: Aslan Hajiyev <126803786+aslanvyos@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:50:53 +0400 Subject: [PATCH 52/85] T6807: allow a trailing slash character in system login Allow the use of a trailing slash `("/")` at the of the user's home directory path. For example `/home/test/` --- interface-definitions/system_login.xml.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface-definitions/system_login.xml.in b/interface-definitions/system_login.xml.in index f6c8021d325..9865e3d3279 100644 --- a/interface-definitions/system_login.xml.in +++ b/interface-definitions/system_login.xml.in @@ -190,7 +190,7 @@ Path to home directory - \/$|(\/[a-zA-Z_0-9-.]+)+ + (\/[a-zA-Z_0-9-.]+)+\/?$ From 59c0c0b2e5e0793804785bec0d702b879114a795 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Thu, 24 Oct 2024 15:25:41 +0300 Subject: [PATCH 53/85] op_mode: T6808: Console server op mode commands throw errors when console server is not configured --- op-mode-definitions/show-console-server.xml.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/op-mode-definitions/show-console-server.xml.in b/op-mode-definitions/show-console-server.xml.in index eae6fd5364b..03dd97d83e3 100644 --- a/op-mode-definitions/show-console-server.xml.in +++ b/op-mode-definitions/show-console-server.xml.in @@ -21,13 +21,13 @@ Examine console ports and configured baud rates - /usr/bin/console -x + if cli-shell-api existsActive service console-server; then /usr/bin/console -x; else echo "Console server is not configured"; fi Show users on various consoles - /usr/bin/console -u + if cli-shell-api existsActive service console-server; then /usr/bin/console -u; else echo "Console server is not configured"; fi From 9911bf34df960c89a47f8aa8bd0795a01aabdda5 Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Fri, 25 Oct 2024 17:34:33 +0300 Subject: [PATCH 54/85] T6812: Fix smoketest iproute2 check (#4174) In the new iproute2 package the link to `ip` was changed $ file /usr/sbin/ip /usr/sbin/ip: symbolic link to ../bin/ip Fix smoketest --- smoketest/scripts/system/test_iproute2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/smoketest/scripts/system/test_iproute2.py b/smoketest/scripts/system/test_iproute2.py index 2d2fe195b53..f4fa0f3ba76 100755 --- a/smoketest/scripts/system/test_iproute2.py +++ b/smoketest/scripts/system/test_iproute2.py @@ -21,7 +21,7 @@ class TestIproute2(unittest.TestCase): def test_ip_is_symlink(self): # For an unknown reason VyOS 1.3.0-rc2 did not have a symlink from # /usr/sbin/ip -> /bin/ip - verify this now and forever - real_file = '/bin/ip' + real_file = '../bin/ip' symlink = '/usr/sbin/ip' self.assertTrue(os.path.islink(symlink)) self.assertFalse(os.path.islink(real_file)) From 41a651314085e020386cd1f1938a7412ce503b13 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sun, 27 Oct 2024 19:42:27 +0100 Subject: [PATCH 55/85] login: T6712: honor 80x25 terminal size for nonproduction banner message --- data/templates/login/motd_vyos_nonproduction.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/templates/login/motd_vyos_nonproduction.j2 b/data/templates/login/motd_vyos_nonproduction.j2 index 32922f27f3d..3f10423ffea 100644 --- a/data/templates/login/motd_vyos_nonproduction.j2 +++ b/data/templates/login/motd_vyos_nonproduction.j2 @@ -1,3 +1,4 @@ --- -Warning: This VyOS system is not a stable long-term support version and is not intended for production use. +WARNING: This VyOS system is not a stable long-term support version and + is not intended for production use. From 1bd594c22c7a51d2b92e237df2294a6c2f7b37c8 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Tue, 5 Nov 2024 16:41:26 +0200 Subject: [PATCH 56/85] dhcp_server: T6852: Add op mode command "show dhcpv6 server statistics" --- op-mode-definitions/dhcp.xml.in | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/op-mode-definitions/dhcp.xml.in b/op-mode-definitions/dhcp.xml.in index b3438ab808e..63b1f62bb02 100644 --- a/op-mode-definitions/dhcp.xml.in +++ b/op-mode-definitions/dhcp.xml.in @@ -228,6 +228,23 @@ + + + Show DHCPv6 server statistics + + ${vyos_op_scripts_dir}/dhcp.py show_pool_statistics --family inet6 + + + + Show DHCPv6 server statistics for a specific pool + + service dhcpv6-server shared-network-name + + + ${vyos_op_scripts_dir}/dhcp.py show_pool_statistics --family inet6 --pool $6 + + + From 09ca8e2a01fcef509d6f39e0abbe7a65fa1eb5b8 Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Tue, 5 Nov 2024 20:25:18 +0200 Subject: [PATCH 57/85] T6764: Fix unhandled exception on ethtool output parsing for Xen NICs (#4182) Not all NICs could provide ring-buffers info requested by ethtool in JSON format For example 'vif' Xen/XCP-NG interfaces Fix it --- python/vyos/ethtool.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/vyos/ethtool.py b/python/vyos/ethtool.py index 21272cc5b13..4710a5d402b 100644 --- a/python/vyos/ethtool.py +++ b/python/vyos/ethtool.py @@ -23,7 +23,7 @@ # flow control settings _drivers_without_speed_duplex_flow = ['vmxnet3', 'virtio_net', 'xen_netfront', 'iavf', 'ice', 'i40e', 'hv_netvsc', 'veth', 'ixgbevf', - 'tun'] + 'tun', 'vif'] class Ethtool: """ @@ -101,8 +101,9 @@ def __init__(self, ifname): self._features = loads(out)[0] # Get information about NIC ring buffers - out, _ = popen(f'ethtool --json --show-ring {ifname}') - self._ring_buffer = loads(out)[0] + out, err = popen(f'ethtool --json --show-ring {ifname}') + if not bool(err): + self._ring_buffer = loads(out)[0] # Get current flow control settings, but this is not supported by # all NICs (e.g. vmxnet3 does not support is) From 029cde4608c450a16495e0676b410a4b38183960 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Mon, 28 Oct 2024 15:18:19 +0200 Subject: [PATCH 58/85] T3501: Allow using more than one tuned profile --- .../include/version/system-version.xml.i | 2 +- interface-definitions/system_option.xml.in | 21 +++++++++--- .../config-tests/dialup-router-wireguard-ipv6 | 2 +- .../configs/dialup-router-wireguard-ipv6 | 2 +- smoketest/scripts/cli/test_system_option.py | 10 +++--- src/conf_mode/system_option.py | 12 ++++++- src/migration-scripts/system/27-to-28 | 33 +++++++++++++++++++ 7 files changed, 70 insertions(+), 12 deletions(-) create mode 100644 src/migration-scripts/system/27-to-28 diff --git a/interface-definitions/include/version/system-version.xml.i b/interface-definitions/include/version/system-version.xml.i index fcb24abe2c9..3ecf124c79f 100644 --- a/interface-definitions/include/version/system-version.xml.i +++ b/interface-definitions/include/version/system-version.xml.i @@ -1,3 +1,3 @@ - + diff --git a/interface-definitions/system_option.xml.in b/interface-definitions/system_option.xml.in index 064d9ff4029..638ac1a3d8b 100644 --- a/interface-definitions/system_option.xml.in +++ b/interface-definitions/system_option.xml.in @@ -149,19 +149,32 @@ Tune system performance - throughput latency + network-throughput network-latency power-save virtual-host virtual-guest - throughput + network-throughput Tune for maximum network throughput - latency + network-latency Tune for low network latency + + power-save + Tune for low power consumption + + + virtual-guest + Tune for running inside a virtual machine + + + virtual-host + Tune for running guest virtual machines + - (throughput|latency) + (network-throughput|network-latency|power-save|virtual-guest|virtual-host) + diff --git a/smoketest/config-tests/dialup-router-wireguard-ipv6 b/smoketest/config-tests/dialup-router-wireguard-ipv6 index ff4bf89c269..c2cf2e9d8a8 100644 --- a/smoketest/config-tests/dialup-router-wireguard-ipv6 +++ b/smoketest/config-tests/dialup-router-wireguard-ipv6 @@ -688,7 +688,7 @@ set system login user vyos authentication encrypted-password '$6$2Ta6TWHd/U$NmrX set system login user vyos authentication plaintext-password '' set system name-server '172.16.254.30' set system option ctrl-alt-delete 'ignore' -set system option performance 'latency' +set system option performance 'network-latency' set system option reboot-on-panic set system option startup-beep set system syslog global facility all level 'debug' diff --git a/smoketest/configs/dialup-router-wireguard-ipv6 b/smoketest/configs/dialup-router-wireguard-ipv6 index 0585821487d..76760634181 100644 --- a/smoketest/configs/dialup-router-wireguard-ipv6 +++ b/smoketest/configs/dialup-router-wireguard-ipv6 @@ -1470,7 +1470,7 @@ system { } option { ctrl-alt-delete ignore - performance latency + performance network-latency reboot-on-panic startup-beep } diff --git a/smoketest/scripts/cli/test_system_option.py b/smoketest/scripts/cli/test_system_option.py index ed028062866..f3112cf0bce 100755 --- a/smoketest/scripts/cli/test_system_option.py +++ b/smoketest/scripts/cli/test_system_option.py @@ -23,6 +23,7 @@ base_path = ['system', 'option'] + class TestSystemOption(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) @@ -59,6 +60,7 @@ def test_reboot_on_panic(self): def test_performance(self): tuned_service = 'tuned.service' + path = ['system', 'sysctl', 'parameter'] self.assertFalse(is_systemd_service_active(tuned_service)) @@ -67,11 +69,11 @@ def test_performance(self): gc_thresh2 = '262000' gc_thresh3 = '524000' - self.cli_set(['system', 'sysctl', 'parameter', 'net.ipv4.neigh.default.gc_thresh1', 'value', gc_thresh1]) - self.cli_set(['system', 'sysctl', 'parameter', 'net.ipv4.neigh.default.gc_thresh2', 'value', gc_thresh2]) - self.cli_set(['system', 'sysctl', 'parameter', 'net.ipv4.neigh.default.gc_thresh3', 'value', gc_thresh3]) + self.cli_set(path + ['net.ipv4.neigh.default.gc_thresh1', 'value', gc_thresh1]) + self.cli_set(path + ['net.ipv4.neigh.default.gc_thresh2', 'value', gc_thresh2]) + self.cli_set(path + ['net.ipv4.neigh.default.gc_thresh3', 'value', gc_thresh3]) - self.cli_set(base_path + ['performance', 'throughput']) + self.cli_set(base_path + ['performance', 'network-throughput']) self.cli_commit() self.assertTrue(is_systemd_service_active(tuned_service)) diff --git a/src/conf_mode/system_option.py b/src/conf_mode/system_option.py index a84572f831d..e2832cde6af 100755 --- a/src/conf_mode/system_option.py +++ b/src/conf_mode/system_option.py @@ -46,6 +46,13 @@ usb_autosuspend = r'/etc/udev/rules.d/40-usb-autosuspend.rules' kernel_dynamic_debug = r'/sys/kernel/debug/dynamic_debug/control' time_format_to_locale = {'12-hour': 'en_US.UTF-8', '24-hour': 'en_GB.UTF-8'} +tuned_profiles = { + 'power-save': 'powersave', + 'network-latency': 'network-latency', + 'network-throughput': 'network-throughput', + 'virtual-guest': 'virtual-guest', + 'virtual-host': 'virtual-host', +} def get_config(config=None): @@ -171,7 +178,10 @@ def apply(options): # wait until daemon has started before sending configuration while not is_systemd_service_running('tuned.service'): sleep(0.250) - cmd('tuned-adm profile network-{performance}'.format(**options)) + performance = ' '.join( + list(tuned_profiles[profile] for profile in options['performance']) + ) + cmd(f'tuned-adm profile {performance}') else: cmd('systemctl stop tuned.service') diff --git a/src/migration-scripts/system/27-to-28 b/src/migration-scripts/system/27-to-28 new file mode 100644 index 00000000000..0a5be48ab90 --- /dev/null +++ b/src/migration-scripts/system/27-to-28 @@ -0,0 +1,33 @@ +# Copyright 2023-2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +# rename 'system option performance' leaf nodes to new names + +from vyos.configtree import ConfigTree + +base = ['system', 'option', 'performance'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + return + + replace = { + 'throughput' : 'network-throughput', + 'latency' : 'network-latency' + } + + for old_name, new_name in replace.items(): + if config.return_value(base) == old_name: + config.set(base, new_name) From 12f9bdf93aa248309fae16883c27f69de4cdc338 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Thu, 7 Nov 2024 22:04:42 +0100 Subject: [PATCH 59/85] smoketest: T6719: use read_file() over 'sudo cat FILENAME' --- smoketest/scripts/cli/test_system_syslog.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/smoketest/scripts/cli/test_system_syslog.py b/smoketest/scripts/cli/test_system_syslog.py index c802ceeeb6d..d9e16eef8fb 100755 --- a/smoketest/scripts/cli/test_system_syslog.py +++ b/smoketest/scripts/cli/test_system_syslog.py @@ -20,7 +20,6 @@ from base_vyostest_shim import VyOSUnitTestSHIM from vyos.utils.file import read_file -from vyos.utils.process import cmd from vyos.utils.process import process_named_running PROCESS_NAME = 'rsyslogd' @@ -80,20 +79,22 @@ def test_syslog_basic(self): self.assertTrue(process_named_running(PROCESS_NAME)) def test_syslog_global(self): - self.cli_set(['system', 'host-name', 'vyos']) - self.cli_set(['system', 'domain-name', 'example.local']) + hostname = 'vyos123' + domainname = 'example.local' + self.cli_set(['system', 'host-name', hostname]) + self.cli_set(['system', 'domain-name', domainname]) self.cli_set(base_path + ['global', 'marker', 'interval', '600']) self.cli_set(base_path + ['global', 'preserve-fqdn']) self.cli_set(base_path + ['global', 'facility', 'kern', 'level', 'err']) self.cli_commit() - config = cmd(f'sudo cat {RSYSLOG_CONF}') + config = read_file(RSYSLOG_CONF) expected = [ '$MarkMessagePeriod 600', '$PreserveFQDN on', 'kern.err', - '$LocalHostName vyos.example.local', + f'$LocalHostName {hostname}.{domainname}', ] for e in expected: From 9ba81b263f4e0dd7b5dbf2e766702eedff96538b Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Thu, 7 Nov 2024 22:05:00 +0100 Subject: [PATCH 60/85] syslog: T6858: bugfix remote syslog using TCP Commit 042be39cc ("syslog: T5367: add format option to include timezone in message") added an invalid, outer if-statement when rendering the rsyslog configuration option for TCP. Remote hosts only got added when the format option "octet-counting" was defined in addition to the TCP protocol. This has been fix and now TCP transport is decoupled from octet-counting mode. --- data/templates/rsyslog/rsyslog.conf.j2 | 2 -- smoketest/scripts/cli/test_system_syslog.py | 24 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/data/templates/rsyslog/rsyslog.conf.j2 b/data/templates/rsyslog/rsyslog.conf.j2 index 7fd592d1ff9..253a4bee2ca 100644 --- a/data/templates/rsyslog/rsyslog.conf.j2 +++ b/data/templates/rsyslog/rsyslog.conf.j2 @@ -57,9 +57,7 @@ $outchannel {{ file_name }},/var/log/user/{{ file_name }},{{ file_options.archiv {% endfor %} {% endif %} {% if host_options.protocol is vyos_defined('tcp') %} -{% if host_options.format.octet_counted is vyos_defined %} {{ tmp | join(';') }} @@{{ '(o)' if host_options.format.octet_counted is vyos_defined }}{{ host_name | bracketize_ipv6 }}:{{ host_options.port }}{{ ';RSYSLOG_SyslogProtocol23Format' if host_options.format.include_timezone is vyos_defined }} -{% endif %} {% else %} {{ tmp | join(';') }} @{{ host_name | bracketize_ipv6 }}:{{ host_options.port }}{{ ';RSYSLOG_SyslogProtocol23Format' if host_options.format.include_timezone is vyos_defined }} {% endif %} diff --git a/smoketest/scripts/cli/test_system_syslog.py b/smoketest/scripts/cli/test_system_syslog.py index d9e16eef8fb..a8671111952 100755 --- a/smoketest/scripts/cli/test_system_syslog.py +++ b/smoketest/scripts/cli/test_system_syslog.py @@ -21,6 +21,7 @@ from vyos.utils.file import read_file from vyos.utils.process import process_named_running +from vyos.xml_ref import default_value PROCESS_NAME = 'rsyslogd' RSYSLOG_CONF = '/etc/rsyslog.d/00-vyos.conf' @@ -102,6 +103,29 @@ def test_syslog_global(self): # Check for running process self.assertTrue(process_named_running(PROCESS_NAME)) + def test_syslog_remote(self): + rhost = '169.254.0.1' + default_port = default_value(base_path + ['host', rhost, 'port']) + + self.cli_set(base_path + ['global', 'facility', 'all', 'level', 'info']) + self.cli_set(base_path + ['global', 'facility', 'local7', 'level', 'debug']) + self.cli_set(base_path + ['host', rhost, 'facility', 'all', 'level', 'all']) + self.cli_set(base_path + ['host', rhost, 'protocol', 'tcp']) + + self.cli_commit() + + config = read_file(RSYSLOG_CONF) + self.assertIn(f'*.* @@{rhost}:{default_port}', config) + + # Change default port and enable "octet-counting" mode + port = '10514' + self.cli_set(base_path + ['host', rhost, 'port', port]) + self.cli_set(base_path + ['host', rhost, 'format', 'octet-counted']) + self.cli_commit() + + config = read_file(RSYSLOG_CONF) + self.assertIn(f'*.* @@(o){rhost}:{port}', config) + if __name__ == '__main__': unittest.main(verbosity=2) From 5fac74ef9d3356a66a39e8ea6ddcb40d6944e23b Mon Sep 17 00:00:00 2001 From: "Nataliia S." <81954790+natali-rs1985@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:30:29 +0200 Subject: [PATCH 61/85] dhcp_server: T6031: DHCP Option Failures - windows-static-route (#4183) In "option-def" parameter "record-types" field is an array of uint8 in format ", , , , , , " where with the value 0 is omitted, so the minimal length of array is 7 (e.g. for routing 10.1.0.0/16 via 10.1.6.4 "record-types": "16,10,1,10,1,6,4") --- data/templates/dhcp-server/kea-dhcp4.conf.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/templates/dhcp-server/kea-dhcp4.conf.j2 b/data/templates/dhcp-server/kea-dhcp4.conf.j2 index bf37b94f660..29cf5e082be 100644 --- a/data/templates/dhcp-server/kea-dhcp4.conf.j2 +++ b/data/templates/dhcp-server/kea-dhcp4.conf.j2 @@ -29,14 +29,14 @@ "code": 121, "type": "record", "array": true, - "record-types": "uint8,uint8,uint8,uint8,uint8,uint8,uint8,uint8" + "record-types": "uint8,uint8,uint8,uint8,uint8,uint8,uint8" }, { "name": "windows-static-route", "code": 249, "type": "record", "array": true, - "record-types": "uint8,uint8,uint8,uint8,uint8,uint8,uint8,uint8" + "record-types": "uint8,uint8,uint8,uint8,uint8,uint8,uint8" }, { "name": "wpad-url", From 4139e1c12c3f8d6abdf42dc3febfffc097a41c7a Mon Sep 17 00:00:00 2001 From: Roman Khramshin Date: Fri, 8 Nov 2024 14:50:14 +0600 Subject: [PATCH 62/85] T6802: Fix QoS Policy Round-Robin with Default Configuration (#4177) - Resolved unhandled exception occurring with default round-robin policy config. - Added default filter to ensure proper round-robin policy. --- python/vyos/qos/roundrobin.py | 13 ++++- smoketest/scripts/cli/test_qos.py | 82 ++++++++++++++++++++++++++++--- 2 files changed, 87 insertions(+), 8 deletions(-) diff --git a/python/vyos/qos/roundrobin.py b/python/vyos/qos/roundrobin.py index 80814ddfbb7..509c4069f52 100644 --- a/python/vyos/qos/roundrobin.py +++ b/python/vyos/qos/roundrobin.py @@ -15,6 +15,7 @@ from vyos.qos.base import QoSBase + class RoundRobin(QoSBase): _parent = 1 @@ -34,11 +35,21 @@ def update(self, config, direction): if 'default' in config: class_id_max = self._get_class_max_id(config) - default_cls_id = int(class_id_max) +1 + default_cls_id = int(class_id_max) + 1 if class_id_max else 1 # class ID via CLI is in range 1-4095, thus 1000 hex = 4096 tmp = f'tc class replace dev {self._interface} parent 1:1 classid 1:{default_cls_id:x} drr' self._cmd(tmp) + # You need to add at least one filter to classify packets + # otherwise, all packets will be dropped. + filter_cmd = ( + f'tc filter replace dev {self._interface} ' + f'parent {self._parent:x}: prio {default_cls_id} protocol all ' + 'u32 match u32 0 0 ' + f'flowid {self._parent}:{default_cls_id}' + ) + self._cmd(filter_cmd) + # call base class super().update(config, direction, priority=True) diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py index b98c0e9b797..77d384024b1 100755 --- a/smoketest/scripts/cli/test_qos.py +++ b/smoketest/scripts/cli/test_qos.py @@ -26,25 +26,42 @@ base_path = ['qos'] -def get_tc_qdisc_json(interface) -> dict: +def get_tc_qdisc_json(interface, all=False) -> dict: tmp = cmd(f'tc -detail -json qdisc show dev {interface}') tmp = loads(tmp) + + if all: + return tmp + return next(iter(tmp)) -def get_tc_filter_json(interface, direction) -> list: - if direction not in ['ingress', 'egress']: + +def get_tc_filter_json(interface, direction=None) -> list: + if direction not in ['ingress', 'egress', None]: raise ValueError() - tmp = cmd(f'tc -detail -json filter show dev {interface} {direction}') + + cmd_stmt = f'tc -detail -json filter show dev {interface}' + if direction: + cmd_stmt += f' {direction}' + + tmp = cmd(cmd_stmt) tmp = loads(tmp) return tmp -def get_tc_filter_details(interface, direction) -> list: + +def get_tc_filter_details(interface, direction=None) -> list: # json doesn't contain all params, such as mtu - if direction not in ['ingress', 'egress']: + if direction not in ['ingress', 'egress', None]: raise ValueError() - tmp = cmd(f'tc -details filter show dev {interface} {direction}') + + cmd_stmt = f'tc -details filter show dev {interface}' + if direction: + cmd_stmt += f' {direction}' + + tmp = cmd(cmd_stmt) return tmp + class TestQoS(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): @@ -854,6 +871,57 @@ def test_16_wrong_traffic_match_group(self): self.cli_set(['qos', 'traffic-match-group', '3', 'match-group', 'unexpected']) self.cli_commit() + def test_20_round_robin_policy_default(self): + interface = self._interfaces[0] + policy_name = f'qos-policy-{interface}' + + self.cli_set(base_path + ['interface', interface, 'egress', policy_name]) + self.cli_set( + base_path + + ['policy', 'round-robin', policy_name, 'description', 'default policy'] + ) + + # commit changes + self.cli_commit() + + tmp = get_tc_qdisc_json(interface, all=True) + + self.assertEqual(2, len(tmp)) + self.assertEqual('drr', tmp[0]['kind']) + self.assertDictEqual({}, tmp[0]['options']) + self.assertEqual('sfq', tmp[1]['kind']) + self.assertDictEqual( + { + 'limit': 127, + 'quantum': 1514, + 'depth': 127, + 'flows': 128, + 'divisor': 1024, + }, + tmp[1]['options'], + ) + + tmp = get_tc_filter_json(interface) + self.assertEqual(3, len(tmp)) + + for rec in tmp: + self.assertEqual('u32', rec['kind']) + self.assertEqual(1, rec['pref']) + self.assertEqual('all', rec['protocol']) + + self.assertDictEqual( + { + 'fh': '800::800', + 'order': 2048, + 'key_ht': '800', + 'bkt': '0', + 'flowid': '1:1', + 'not_in_hw': True, + 'match': {'value': '0', 'mask': '0', 'offmask': '', 'off': 0}, + }, + tmp[2]['options'], + ) + if __name__ == '__main__': unittest.main(verbosity=2) From a48b8fdc18a654cfe6b4d7cfefecd9338b623c55 Mon Sep 17 00:00:00 2001 From: Date Huang Date: Fri, 8 Nov 2024 21:19:03 +0800 Subject: [PATCH 63/85] T6861: op-mode: ignore error code 255 if this UEFI doesn't support secure boot `mokutil --sb-state` will output "This system doesn't support Secure Boot" to stderr. and return error code 255 if the UEFI system doesn't support secure boot. Signed-off-by: Date Huang --- python/vyos/utils/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/vyos/utils/system.py b/python/vyos/utils/system.py index 7b12efb1479..a556a8ec4e6 100644 --- a/python/vyos/utils/system.py +++ b/python/vyos/utils/system.py @@ -145,5 +145,5 @@ def get_secure_boot_state() -> bool: from vyos.utils.boot import is_uefi_system if not is_uefi_system(): return False - tmp = cmd('mokutil --sb-state') + tmp = cmd('mokutil --sb-state', expect=[255]) return bool('enabled' in tmp) From c6a097ee7d9b6d2f4c4b8ee63ca45ccfb6fdda34 Mon Sep 17 00:00:00 2001 From: Date Huang Date: Tue, 12 Nov 2024 18:01:27 +0800 Subject: [PATCH 64/85] T6861: op-mode: add 0 into errno expect list (#4189) Signed-off-by: Date Huang --- python/vyos/utils/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/vyos/utils/system.py b/python/vyos/utils/system.py index a556a8ec4e6..6c112334bd4 100644 --- a/python/vyos/utils/system.py +++ b/python/vyos/utils/system.py @@ -145,5 +145,5 @@ def get_secure_boot_state() -> bool: from vyos.utils.boot import is_uefi_system if not is_uefi_system(): return False - tmp = cmd('mokutil --sb-state', expect=[255]) + tmp = cmd('mokutil --sb-state', expect=[0, 255]) return bool('enabled' in tmp) From 5e9c71fb28676340ef4ebf02a1d1359b11f09d5b Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 11 Nov 2024 20:25:12 +0100 Subject: [PATCH 65/85] babel: T4977: we do not have EIGRP support - remove redistribution option --- interface-definitions/protocols_babel.xml.in | 6 ------ 1 file changed, 6 deletions(-) diff --git a/interface-definitions/protocols_babel.xml.in b/interface-definitions/protocols_babel.xml.in index 49fffe2306e..edea5742581 100644 --- a/interface-definitions/protocols_babel.xml.in +++ b/interface-definitions/protocols_babel.xml.in @@ -83,12 +83,6 @@ - - - Redistribute EIGRP routes - - - Redistribute IS-IS routes From 0cb568c0eb9168c82fa3f12de77737bbab46c53d Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 11 Nov 2024 20:26:06 +0100 Subject: [PATCH 66/85] babel: T4977: use common include for IPv4/IPv6 route redistribution --- .../include/babel/redistribute-common.xml.i | 38 +++++++++ interface-definitions/protocols_babel.xml.in | 78 +------------------ 2 files changed, 42 insertions(+), 74 deletions(-) create mode 100644 interface-definitions/include/babel/redistribute-common.xml.i diff --git a/interface-definitions/include/babel/redistribute-common.xml.i b/interface-definitions/include/babel/redistribute-common.xml.i new file mode 100644 index 00000000000..93efe68dd40 --- /dev/null +++ b/interface-definitions/include/babel/redistribute-common.xml.i @@ -0,0 +1,38 @@ + + + + Border Gateway Protocol (BGP) + + + + + + Connected routes (directly attached subnet or host) + + + + + + Intermediate System to Intermediate System (IS-IS) + + + + + + Redistribute Kernel routes (not installed via the zebra RIB) + + + + + + OpenFabric Routing Protocol + + + + + + Statically configured routes + + + + diff --git a/interface-definitions/protocols_babel.xml.in b/interface-definitions/protocols_babel.xml.in index edea5742581..2795a7dd495 100644 --- a/interface-definitions/protocols_babel.xml.in +++ b/interface-definitions/protocols_babel.xml.in @@ -71,36 +71,7 @@ Redistribute IPv4 routes - - - Redistribute BGP routes - - - - - - Redistribute connected routes - - - - - - Redistribute IS-IS routes - - - - - - Redistribute kernel routes - - - - - - Redistribute NHRP routes - - - + #include Redistribute OSPF routes @@ -113,12 +84,6 @@ - - - Redistribute static routes - - - @@ -126,51 +91,16 @@ Redistribute IPv6 routes - - - Redistribute BGP routes - - - - - - Redistribute connected routes - - - - - - Redistribute IS-IS routes - - - - - - Redistribute kernel routes - - - - - - Redistribute NHRP routes - - - + #include - Redistribute OSPFv3 routes + Open Shortest Path First (IPv6) (OSPFv3) - Redistribute RIPng routes - - - - - - Redistribute static routes + Routing Information Protocol next-generation (IPv6) (RIPng) From 02a3eaf7662fbe05d4f4930f361e6fce813cc509 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 11 Nov 2024 20:26:33 +0100 Subject: [PATCH 67/85] babel: T4977: add missing smoketests --- smoketest/scripts/cli/test_protocols_babel.py | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100755 smoketest/scripts/cli/test_protocols_babel.py diff --git a/smoketest/scripts/cli/test_protocols_babel.py b/smoketest/scripts/cli/test_protocols_babel.py new file mode 100755 index 00000000000..b4dbe529efd --- /dev/null +++ b/smoketest/scripts/cli/test_protocols_babel.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest + +from base_vyostest_shim import VyOSUnitTestSHIM + +from vyos.ifconfig import Section +from vyos.utils.process import process_named_running +from vyos.xml_ref import default_value + +PROCESS_NAME = 'babeld' +base_path = ['protocols', 'babel'] + +class TestProtocolsBABEL(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + cls._interfaces = Section.interfaces('ethernet', vlan=False) + # call base-classes classmethod + super(TestProtocolsBABEL, cls).setUpClass() + # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same + cls.daemon_pid = process_named_running(PROCESS_NAME) + # ensure we can also run this test on a live system - so lets clean + # out the current configuration :) + cls.cli_delete(cls, base_path) + + def tearDown(self): + # always destroy the entire babel configuration to make the processes + # life as hard as possible + self.cli_delete(base_path) + self.cli_commit() + + # check process health and continuity + self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + + def test_babel_interfaces(self): + def_update_interval = default_value(base_path + ['interface', 'eth0', 'update-interval']) + channel = '20' + hello_interval = '1000' + max_rtt_penalty = '100' + rtt_decay = '23' + rtt_max = '119' + rtt_min = '11' + rxcost = '40000' + type = 'wired' + + for interface in self._interfaces: + self.cli_set(base_path + ['interface', interface]) + self.cli_set(base_path + ['interface', interface, 'channel', channel]) + self.cli_set(base_path + ['interface', interface, 'enable-timestamps']) + self.cli_set(base_path + ['interface', interface, 'hello-interval', hello_interval]) + self.cli_set(base_path + ['interface', interface, 'max-rtt-penalty', max_rtt_penalty]) + self.cli_set(base_path + ['interface', interface, 'rtt-decay', rtt_decay]) + self.cli_set(base_path + ['interface', interface, 'rtt-max', rtt_max]) + self.cli_set(base_path + ['interface', interface, 'rtt-min', rtt_min]) + self.cli_set(base_path + ['interface', interface, 'enable-timestamps']) + self.cli_set(base_path + ['interface', interface, 'rxcost', rxcost]) + self.cli_set(base_path + ['interface', interface, 'split-horizon', 'disable']) + self.cli_set(base_path + ['interface', interface, 'type', type]) + + self.cli_commit() + + frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + for interface in self._interfaces: + self.assertIn(f' network {interface}', frrconfig) + + iface_config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + self.assertIn(f' babel channel {channel}', iface_config) + self.assertIn(f' babel enable-timestamps', iface_config) + self.assertIn(f' babel update-interval {def_update_interval}', iface_config) + self.assertIn(f' babel hello-interval {hello_interval}', iface_config) + self.assertIn(f' babel rtt-decay {rtt_decay}', iface_config) + self.assertIn(f' babel rtt-max {rtt_max}', iface_config) + self.assertIn(f' babel rtt-min {rtt_min}', iface_config) + self.assertIn(f' babel rxcost {rxcost}', iface_config) + self.assertIn(f' babel max-rtt-penalty {max_rtt_penalty}', iface_config) + self.assertIn(f' no babel split-horizon', iface_config) + self.assertIn(f' babel {type}', iface_config) + + def test_babel_redistribute(self): + ipv4_protos = ['bgp', 'connected', 'isis', 'kernel', 'ospf', 'rip', 'static'] + ipv6_protos = ['bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static'] + + for protocol in ipv4_protos: + self.cli_set(base_path + ['redistribute', 'ipv4', protocol]) + for protocol in ipv6_protos: + self.cli_set(base_path + ['redistribute', 'ipv6', protocol]) + + self.cli_commit() + + frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + for protocol in ipv4_protos: + self.assertIn(f' redistribute ipv4 {protocol}', frrconfig) + for protocol in ipv6_protos: + if protocol == 'ospfv3': + protocol = 'ospf6' + self.assertIn(f' redistribute ipv6 {protocol}', frrconfig) + + def test_babel_basic(self): + diversity_factor = '64' + resend_delay = '100' + smoothing_half_life = '400' + + self.cli_set(base_path + ['parameters', 'diversity']) + self.cli_set(base_path + ['parameters', 'diversity-factor', diversity_factor]) + self.cli_set(base_path + ['parameters', 'resend-delay', resend_delay]) + self.cli_set(base_path + ['parameters', 'smoothing-half-life', smoothing_half_life]) + + self.cli_commit() + + frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + self.assertIn(f' babel diversity', frrconfig) + self.assertIn(f' babel diversity-factor {diversity_factor}', frrconfig) + self.assertIn(f' babel resend-delay {resend_delay}', frrconfig) + self.assertIn(f' babel smoothing-half-life {smoothing_half_life}', frrconfig) + +if __name__ == '__main__': + unittest.main(verbosity=2) From 1686dac1a67d2dbf21d0386fdf7ec9d6aad851b8 Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Mon, 11 Nov 2024 21:36:11 +0100 Subject: [PATCH 68/85] babel: T6866: IPv6 distribute-lists in access-list6 format have names not numbers --- .../include/rip/access-list6.xml.i | 22 ++--- smoketest/scripts/cli/test_protocols_babel.py | 87 +++++++++++++++++++ 2 files changed, 95 insertions(+), 14 deletions(-) diff --git a/interface-definitions/include/rip/access-list6.xml.i b/interface-definitions/include/rip/access-list6.xml.i index 732135253de..395d21c146c 100644 --- a/interface-definitions/include/rip/access-list6.xml.i +++ b/interface-definitions/include/rip/access-list6.xml.i @@ -7,31 +7,25 @@ Access list to apply to input packets - - u32 - Access list to apply to input packets - policy access-list6 - - - + + txt + Name of IPv6 access-list + Access list to apply to output packets - - u32 - Access list to apply to output packets - policy access-list6 - - - + + txt + Name of IPv6 access-list + diff --git a/smoketest/scripts/cli/test_protocols_babel.py b/smoketest/scripts/cli/test_protocols_babel.py index b4dbe529efd..606c1efd33b 100755 --- a/smoketest/scripts/cli/test_protocols_babel.py +++ b/smoketest/scripts/cli/test_protocols_babel.py @@ -36,11 +36,15 @@ def setUpClass(cls): # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) + cls.cli_delete(cls, ['policy', 'prefix-list']) + cls.cli_delete(cls, ['policy', 'prefix-list6']) def tearDown(self): # always destroy the entire babel configuration to make the processes # life as hard as possible self.cli_delete(base_path) + self.cli_delete(['policy', 'prefix-list']) + self.cli_delete(['policy', 'prefix-list6']) self.cli_commit() # check process health and continuity @@ -127,5 +131,88 @@ def test_babel_basic(self): self.assertIn(f' babel resend-delay {resend_delay}', frrconfig) self.assertIn(f' babel smoothing-half-life {smoothing_half_life}', frrconfig) + def test_babel_distribute_list(self): + access_list_in4 = '40' + access_list_out4 = '50' + access_list_in4_iface = '44' + access_list_out4_iface = '55' + access_list_in6 = 'AL-foo-in6' + access_list_out6 = 'AL-foo-out6' + + prefix_list_in4 = 'PL-foo-in4' + prefix_list_out4 = 'PL-foo-out4' + prefix_list_in6 = 'PL-foo-in6' + prefix_list_out6 = 'PL-foo-out6' + + self.cli_set(['policy', 'access-list', access_list_in4]) + self.cli_set(['policy', 'access-list', access_list_out4]) + self.cli_set(['policy', 'access-list6', access_list_in6]) + self.cli_set(['policy', 'access-list6', access_list_out6]) + + self.cli_set(['policy', 'access-list', f'{access_list_in4_iface}']) + self.cli_set(['policy', 'access-list', f'{access_list_out4_iface}']) + + self.cli_set(['policy', 'prefix-list', prefix_list_in4]) + self.cli_set(['policy', 'prefix-list', prefix_list_out4]) + self.cli_set(['policy', 'prefix-list6', prefix_list_in6]) + self.cli_set(['policy', 'prefix-list6', prefix_list_out6]) + + self.cli_set(base_path + ['distribute-list', 'ipv4', 'access-list', 'in', access_list_in4]) + self.cli_set(base_path + ['distribute-list', 'ipv4', 'access-list', 'out', access_list_out4]) + self.cli_set(base_path + ['distribute-list', 'ipv6', 'access-list', 'in', access_list_in6]) + self.cli_set(base_path + ['distribute-list', 'ipv6', 'access-list', 'out', access_list_out6]) + + self.cli_set(base_path + ['distribute-list', 'ipv4', 'prefix-list', 'in', prefix_list_in4]) + self.cli_set(base_path + ['distribute-list', 'ipv4', 'prefix-list', 'out', prefix_list_out4]) + self.cli_set(base_path + ['distribute-list', 'ipv6', 'prefix-list', 'in', prefix_list_in6]) + self.cli_set(base_path + ['distribute-list', 'ipv6', 'prefix-list', 'out', prefix_list_out6]) + + for interface in self._interfaces: + self.cli_set(base_path + ['interface', interface]) + + self.cli_set(['policy', 'access-list6', f'{access_list_in6}-{interface}']) + self.cli_set(['policy', 'access-list6', f'{access_list_out6}-{interface}']) + + self.cli_set(['policy', 'prefix-list', f'{prefix_list_in4}-{interface}']) + self.cli_set(['policy', 'prefix-list', f'{prefix_list_out4}-{interface}']) + self.cli_set(['policy', 'prefix-list6', f'{prefix_list_in6}-{interface}']) + self.cli_set(['policy', 'prefix-list6', f'{prefix_list_out6}-{interface}']) + + tmp_path = base_path + ['distribute-list', 'ipv4', 'interface', interface] + self.cli_set(tmp_path + ['access-list', 'in', f'{access_list_in4_iface}']) + self.cli_set(tmp_path + ['access-list', 'out', f'{access_list_out4_iface}']) + self.cli_set(tmp_path + ['prefix-list', 'in', f'{prefix_list_in4}-{interface}']) + self.cli_set(tmp_path + ['prefix-list', 'out', f'{prefix_list_out4}-{interface}']) + + tmp_path = base_path + ['distribute-list', 'ipv6', 'interface', interface] + self.cli_set(tmp_path + ['access-list', 'in', f'{access_list_in6}-{interface}']) + self.cli_set(tmp_path + ['access-list', 'out', f'{access_list_out6}-{interface}']) + self.cli_set(tmp_path + ['prefix-list', 'in', f'{prefix_list_in6}-{interface}']) + self.cli_set(tmp_path + ['prefix-list', 'out', f'{prefix_list_out6}-{interface}']) + + self.cli_commit() + + frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + self.assertIn(f' distribute-list {access_list_in4} in', frrconfig) + self.assertIn(f' distribute-list {access_list_out4} out', frrconfig) + self.assertIn(f' ipv6 distribute-list {access_list_in6} in', frrconfig) + self.assertIn(f' ipv6 distribute-list {access_list_out6} out', frrconfig) + + self.assertIn(f' distribute-list prefix {prefix_list_in4} in', frrconfig) + self.assertIn(f' distribute-list prefix {prefix_list_out4} out', frrconfig) + self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in6} in', frrconfig) + self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out6} out', frrconfig) + + for interface in self._interfaces: + self.assertIn(f' distribute-list {access_list_in4_iface} in {interface}', frrconfig) + self.assertIn(f' distribute-list {access_list_out4_iface} out {interface}', frrconfig) + self.assertIn(f' ipv6 distribute-list {access_list_in6}-{interface} in {interface}', frrconfig) + self.assertIn(f' ipv6 distribute-list {access_list_out6}-{interface} out {interface}', frrconfig) + + self.assertIn(f' distribute-list prefix {prefix_list_in4}-{interface} in {interface}', frrconfig) + self.assertIn(f' distribute-list prefix {prefix_list_out4}-{interface} out {interface}', frrconfig) + self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in6}-{interface} in {interface}', frrconfig) + self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out6}-{interface} out {interface}', frrconfig) + if __name__ == '__main__': unittest.main(verbosity=2) From 3fae9e812c21508e12e88b69697cbeb73c19720e Mon Sep 17 00:00:00 2001 From: khramshinr Date: Wed, 13 Nov 2024 13:33:08 +0800 Subject: [PATCH 69/85] T6795: Fix duplicate entries in class match filters --- python/vyos/qos/base.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/python/vyos/qos/base.py b/python/vyos/qos/base.py index 98e486e42b7..322cdca4494 100644 --- a/python/vyos/qos/base.py +++ b/python/vyos/qos/base.py @@ -248,6 +248,8 @@ def update(self, config, direction, priority=None): if 'match' in cls_config: has_filter = False + has_action_policy = any(tmp in ['exceed', 'bandwidth', 'burst'] for tmp in cls_config) + max_index = len(cls_config['match']) for index, (match, match_config) in enumerate(cls_config['match'].items(), start=1): filter_cmd = filter_cmd_base if not has_filter: @@ -335,15 +337,16 @@ def update(self, config, direction, priority=None): elif af == 'ipv6': filter_cmd += f' match u8 {mask} {mask} at 53' - cls = int(cls) - filter_cmd += f' flowid {self._parent:x}:{cls:x}' - self._cmd(filter_cmd) + if index != max_index or not has_action_policy: + # avoid duplicate last match rule + cls = int(cls) + filter_cmd += f' flowid {self._parent:x}:{cls:x}' + self._cmd(filter_cmd) vlan_expression = "match.*.vif" match_vlan = jmespath.search(vlan_expression, cls_config) - if any(tmp in ['exceed', 'bandwidth', 'burst'] for tmp in cls_config) \ - and has_filter: + if has_action_policy and has_filter: # For "vif" "basic match" is used instead of "action police" T5961 if not match_vlan: filter_cmd += f' action police' From e6558a535594c1cd25133979f07663cf6958ce75 Mon Sep 17 00:00:00 2001 From: khramshinr Date: Thu, 14 Nov 2024 10:25:38 +0800 Subject: [PATCH 70/85] T6801: QoS: Policy rate-control is broken by default - Fixed unhandled exception for policy rate-control without params --- smoketest/scripts/cli/test_qos.py | 21 +++++++++++++++++++++ src/conf_mode/qos.py | 3 +++ 2 files changed, 24 insertions(+) diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py index 77d384024b1..aaeebcdae2b 100755 --- a/smoketest/scripts/cli/test_qos.py +++ b/smoketest/scripts/cli/test_qos.py @@ -922,6 +922,27 @@ def test_20_round_robin_policy_default(self): tmp[2]['options'], ) + def test_22_rate_control_default(self): + interface = self._interfaces[0] + policy_name = f'qos-policy-{interface}' + bandwidth = 5000 + + self.cli_set(base_path + ['interface', interface, 'egress', policy_name]) + self.cli_set(base_path + ['policy', 'rate-control', policy_name]) + with self.assertRaises(ConfigSessionError): + # Bandwidth not defined + self.cli_commit() + + self.cli_set(base_path + ['policy', 'rate-control', policy_name, 'bandwidth', str(bandwidth)]) + # commit changes + self.cli_commit() + + tmp = get_tc_qdisc_json(interface) + + self.assertEqual('tbf', tmp['kind']) + # TC store rates as a 32-bit unsigned integer in bps (Bytes per second) + self.assertEqual(int(bandwidth * 125), tmp['options']['rate']) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index 7dfad3180e4..a4d5f44e7d2 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -255,6 +255,9 @@ def verify(qos): if policy_type in ['priority_queue']: if 'default' not in policy_config: raise ConfigError(f'Policy {policy} misses "default" class!') + if policy_type in ['rate_control']: + if 'bandwidth' not in policy_config: + raise ConfigError('Bandwidth not defined') if 'default' in policy_config: if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']: raise ConfigError('Bandwidth not defined for default traffic!') From 92631e7516bfe8aebc0332d41c17ba92c93ab7bb Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Thu, 14 Nov 2024 14:37:30 +0000 Subject: [PATCH 71/85] T6876: DHCP-server increase retries that Kea makes to open a socket KEA DHCP-server sometimes could be in the race condition when an interface is not in the UP state. The server tries to open the socket 5 times with an interval of 5 seconds, then just starts as it is. In this case, users cannot get leases from the interface, which was in the DOWN state before starting KEA, but it is now in the UP state. Increase 'service-sockets-max-retries' from 5 to 60 --- data/templates/dhcp-server/kea-dhcp4.conf.j2 | 2 +- data/templates/dhcp-server/kea-dhcp6.conf.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/templates/dhcp-server/kea-dhcp4.conf.j2 b/data/templates/dhcp-server/kea-dhcp4.conf.j2 index 29cf5e082be..2e10d58e00e 100644 --- a/data/templates/dhcp-server/kea-dhcp4.conf.j2 +++ b/data/templates/dhcp-server/kea-dhcp4.conf.j2 @@ -11,7 +11,7 @@ "interfaces": [ "*" ], "dhcp-socket-type": "raw", {% endif %} - "service-sockets-max-retries": 5, + "service-sockets-max-retries": 60, "service-sockets-retry-wait-time": 5000 }, "control-socket": { diff --git a/data/templates/dhcp-server/kea-dhcp6.conf.j2 b/data/templates/dhcp-server/kea-dhcp6.conf.j2 index 2f0de6b3067..4745d693ca6 100644 --- a/data/templates/dhcp-server/kea-dhcp6.conf.j2 +++ b/data/templates/dhcp-server/kea-dhcp6.conf.j2 @@ -6,7 +6,7 @@ {% else %} "interfaces": [ "*" ], {% endif %} - "service-sockets-max-retries": 5, + "service-sockets-max-retries": 60, "service-sockets-retry-wait-time": 5000 }, "control-socket": { From 14fc23e43c5934f8c6871ec7c0a3e568bebba763 Mon Sep 17 00:00:00 2001 From: khramshinr Date: Fri, 15 Nov 2024 11:02:07 +0800 Subject: [PATCH 72/85] T6878: Stop conntrack logging service --- src/conf_mode/system_conntrack.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/conf_mode/system_conntrack.py b/src/conf_mode/system_conntrack.py index 2529445bf92..f25ed8d103c 100755 --- a/src/conf_mode/system_conntrack.py +++ b/src/conf_mode/system_conntrack.py @@ -258,6 +258,8 @@ def apply(conntrack): if 'log' in conntrack: call(f'systemctl restart vyos-conntrack-logger.service') + else: + call(f'systemctl stop vyos-conntrack-logger.service') return None From 82b73dc8d7ad06b7d621d34063db01e88321d7df Mon Sep 17 00:00:00 2001 From: Nicolas Vollmar Date: Sun, 17 Nov 2024 10:47:22 +0100 Subject: [PATCH 73/85] T6884: adds mtu option for container networks --- interface-definitions/container.xml.in | 1 + smoketest/scripts/cli/test_container.py | 16 ++++++++++++++++ src/conf_mode/container.py | 6 ++++++ 3 files changed, 23 insertions(+) diff --git a/interface-definitions/container.xml.in b/interface-definitions/container.xml.in index 3dd1b3249dd..2441ac410b0 100644 --- a/interface-definitions/container.xml.in +++ b/interface-definitions/container.xml.in @@ -501,6 +501,7 @@ #include + #include Prefix which allocated to that network diff --git a/smoketest/scripts/cli/test_container.py b/smoketest/scripts/cli/test_container.py index c03b9eb4491..0541384da91 100755 --- a/smoketest/scripts/cli/test_container.py +++ b/smoketest/scripts/cli/test_container.py @@ -224,6 +224,22 @@ def test_no_name_server(self): n = cmd_to_json(f'sudo podman network inspect {net_name}') self.assertEqual(n['dns_enabled'], False) + def test_network_mtu(self): + prefix = '192.0.2.0/24' + base_name = 'ipv4' + net_name = 'NET01' + + self.cli_set(base_path + ['network', net_name, 'prefix', prefix]) + self.cli_set(base_path + ['network', net_name, 'mtu', '1280']) + + name = f'{base_name}-2' + self.cli_set(base_path + ['name', name, 'image', cont_image]) + self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix).ip + 2)]) + self.cli_commit() + + n = cmd_to_json(f'sudo podman network inspect {net_name}') + self.assertEqual(n['options']['mtu'], '1280') + def test_uid_gid(self): cont_name = 'uid-test' gid = '100' diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py index 14387cbbf62..a7dc33d9d47 100755 --- a/src/conf_mode/container.py +++ b/src/conf_mode/container.py @@ -419,12 +419,18 @@ def generate(container): 'dns_enabled': True, 'ipam_options': { 'driver': 'host-local' + }, + 'options': { + 'mtu': '1500' } } if 'no_name_server' in network_config: tmp['dns_enabled'] = False + if 'mtu' in network_config: + tmp['options']['mtu'] = network_config['mtu'] + for prefix in network_config['prefix']: net = {'subnet': prefix, 'gateway': inc_ip(prefix, 1)} tmp['subnets'].append(net) From f3da16e0da459027c1552b50f8a722cd75050630 Mon Sep 17 00:00:00 2001 From: Nicolas Vollmar Date: Mon, 18 Nov 2024 11:40:09 +0100 Subject: [PATCH 74/85] T6884: enables 16000 mtu --- interface-definitions/container.xml.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface-definitions/container.xml.in b/interface-definitions/container.xml.in index 2441ac410b0..bd2ff820da5 100644 --- a/interface-definitions/container.xml.in +++ b/interface-definitions/container.xml.in @@ -501,7 +501,7 @@ #include - #include + #include Prefix which allocated to that network From f38a402f158dad178165e330f292393278705281 Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Mon, 18 Nov 2024 15:37:05 -0600 Subject: [PATCH 75/85] configd: T6899: use multipart message instead of extra exchange --- src/services/vyos-configd | 28 +++++++++------------------- src/shim/vyshim.c | 12 ++++-------- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/src/services/vyos-configd b/src/services/vyos-configd index cb23642dc44..d977ba2cb10 100755 --- a/src/services/vyos-configd +++ b/src/services/vyos-configd @@ -56,6 +56,7 @@ else: SOCKET_PATH = 'ipc:///run/vyos-configd.sock' MAX_MSG_SIZE = 65535 +PAD_MSG_SIZE = 6 # Response error codes R_SUCCESS = 1 @@ -256,25 +257,14 @@ def process_node_data(config, data, _last: bool = False) -> tuple[int, str]: def send_result(sock, err, msg): - msg_size = min(MAX_MSG_SIZE, len(msg)) if msg else 0 - - err_rep = err.to_bytes(1, byteorder=sys.byteorder) - logger.debug(f'Sending reply: {err}') - sock.send(err_rep) - - # size req from vyshim client - size_req = sock.recv().decode() - logger.debug(f'Received request: {size_req}') - msg_size_rep = hex(msg_size).encode() - sock.send(msg_size_rep) - logger.debug(f'Sending reply: {msg_size}') - - if msg_size > 0: - # send req is sent from vyshim client only if msg_size > 0 - send_req = sock.recv().decode() - logger.debug(f'Received request: {send_req}') - sock.send(msg.encode()) - logger.debug('Sending reply with output') + msg = msg if msg else '' + msg_size = min(MAX_MSG_SIZE, len(msg)) + + err_rep = err.to_bytes(1) + msg_size_rep = f'{msg_size:#0{PAD_MSG_SIZE}x}' + + logger.debug(f'Sending reply: error_code {err} with output') + sock.send_multipart([err_rep, msg_size_rep.encode(), msg.encode()]) write_stdout_log(script_stdout_log, msg) diff --git a/src/shim/vyshim.c b/src/shim/vyshim.c index 68e6c401586..1eb653cbfad 100644 --- a/src/shim/vyshim.c +++ b/src/shim/vyshim.c @@ -119,21 +119,17 @@ int main(int argc, char* argv[]) zmq_send(requester, string_node_data_msg, strlen(string_node_data_msg), 0); zmq_recv(requester, error_code, 1, 0); - debug_print("Received node data receipt\n"); + debug_print("Received node data receipt with error_code\n"); char msg_size_str[7]; - zmq_send(requester, "msg_size", 8, 0); zmq_recv(requester, msg_size_str, 6, 0); msg_size_str[6] = '\0'; int msg_size = (int)strtol(msg_size_str, NULL, 16); debug_print("msg_size: %d\n", msg_size); - if (msg_size > 0) { - zmq_send(requester, "send", 4, 0); - char *msg = s_recv_string(requester, msg_size); - printf("%s", msg); - free(msg); - } + char *msg = s_recv_string(requester, msg_size); + printf("%s", msg); + free(msg); free(string_node_data_msg); From 2419c2f42b12604aac6ac2e32bac977e91ae8bc0 Mon Sep 17 00:00:00 2001 From: sskaje Date: Wed, 20 Nov 2024 03:40:34 +0800 Subject: [PATCH 76/85] T6490: Allow creation of wireguard interfaces without requiring peers (#4194) * T6490: Allow creation of wireguard interfaces without requiring peers --- python/vyos/ifconfig/wireguard.py | 3 +++ src/conf_mode/interfaces_wireguard.py | 36 +++++++++++++-------------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/python/vyos/ifconfig/wireguard.py b/python/vyos/ifconfig/wireguard.py index 9030b130248..cccac361dd0 100644 --- a/python/vyos/ifconfig/wireguard.py +++ b/python/vyos/ifconfig/wireguard.py @@ -195,6 +195,9 @@ def update(self, config): base_cmd += f' private-key {tmp_file.name}' base_cmd = base_cmd.format(**config) + # T6490: execute command to ensure interface configured + self._cmd(base_cmd) + if 'peer' in config: for peer, peer_config in config['peer'].items(): # T4702: No need to configure this peer when it was explicitly diff --git a/src/conf_mode/interfaces_wireguard.py b/src/conf_mode/interfaces_wireguard.py index 7abdfdbfa10..b6fd6b0b20a 100755 --- a/src/conf_mode/interfaces_wireguard.py +++ b/src/conf_mode/interfaces_wireguard.py @@ -70,9 +70,6 @@ def verify(wireguard): if 'private_key' not in wireguard: raise ConfigError('Wireguard private-key not defined') - if 'peer' not in wireguard: - raise ConfigError('At least one Wireguard peer is required!') - if 'port' in wireguard and 'port_changed' in wireguard: listen_port = int(wireguard['port']) if check_port_availability('0.0.0.0', listen_port, 'udp') is not True: @@ -80,28 +77,29 @@ def verify(wireguard): 'cannot be used for the interface!') # run checks on individual configured WireGuard peer - public_keys = [] - for tmp in wireguard['peer']: - peer = wireguard['peer'][tmp] + if 'peer' in wireguard: + public_keys = [] + for tmp in wireguard['peer']: + peer = wireguard['peer'][tmp] - if 'allowed_ips' not in peer: - raise ConfigError(f'Wireguard allowed-ips required for peer "{tmp}"!') + if 'allowed_ips' not in peer: + raise ConfigError(f'Wireguard allowed-ips required for peer "{tmp}"!') - if 'public_key' not in peer: - raise ConfigError(f'Wireguard public-key required for peer "{tmp}"!') + if 'public_key' not in peer: + raise ConfigError(f'Wireguard public-key required for peer "{tmp}"!') - if ('address' in peer and 'port' not in peer) or ('port' in peer and 'address' not in peer): - raise ConfigError('Both Wireguard port and address must be defined ' - f'for peer "{tmp}" if either one of them is set!') + if ('address' in peer and 'port' not in peer) or ('port' in peer and 'address' not in peer): + raise ConfigError('Both Wireguard port and address must be defined ' + f'for peer "{tmp}" if either one of them is set!') - if peer['public_key'] in public_keys: - raise ConfigError(f'Duplicate public-key defined on peer "{tmp}"') + if peer['public_key'] in public_keys: + raise ConfigError(f'Duplicate public-key defined on peer "{tmp}"') - if 'disable' not in peer: - if is_wireguard_key_pair(wireguard['private_key'], peer['public_key']): - raise ConfigError(f'Peer "{tmp}" has the same public key as the interface "{wireguard["ifname"]}"') + if 'disable' not in peer: + if is_wireguard_key_pair(wireguard['private_key'], peer['public_key']): + raise ConfigError(f'Peer "{tmp}" has the same public key as the interface "{wireguard["ifname"]}"') - public_keys.append(peer['public_key']) + public_keys.append(peer['public_key']) def generate(wireguard): return None From 1c8321a8679e132cec1b769ab364149e794558cc Mon Sep 17 00:00:00 2001 From: Roman Khramshin Date: Wed, 20 Nov 2024 15:21:37 +0600 Subject: [PATCH 77/85] T6790: QoS: Improve CAKE Policy (#4173) - Fixed handling of flow isolation parameters. - Corrected support for `nat` and `nonat` in flow isolation. - Extended RTT values to cover the full range supported by `tc`. - Make migration script 2-to-3 qos --- .../include/version/qos-version.xml.i | 2 +- interface-definitions/qos.xml.in | 111 ++++++++---------- python/vyos/qos/cake.py | 47 ++++---- smoketest/scripts/cli/test_qos.py | 63 ++++++++++ src/migration-scripts/qos/2-to-3 | 34 ++++++ 5 files changed, 174 insertions(+), 83 deletions(-) create mode 100644 src/migration-scripts/qos/2-to-3 diff --git a/interface-definitions/include/version/qos-version.xml.i b/interface-definitions/include/version/qos-version.xml.i index c67e61e9156..127f771a9f2 100644 --- a/interface-definitions/include/version/qos-version.xml.i +++ b/interface-definitions/include/version/qos-version.xml.i @@ -1,3 +1,3 @@ - + diff --git a/interface-definitions/qos.xml.in b/interface-definitions/qos.xml.in index 907fd5e4ce3..c6ecb742e3a 100644 --- a/interface-definitions/qos.xml.in +++ b/interface-definitions/qos.xml.in @@ -85,78 +85,67 @@ #include #include - + Flow isolation settings + + blind src-host dst-host host flow dual-src-host dual-dst-host triple-isolate + + + blind + Disables flow isolation, all traffic passes through a single queue + + + src-host + Flows are defined only by source address + + + dst-host + Flows are defined only by destination address + + + host + Flows are defined by source-destination host pairs + + + flow + Flows are defined by the entire 5-tuple + + + dual-src-host + Flows are defined by the 5-tuple, fairness is applied first over source addresses, then over individual flows + + + dual-dst-host + Flows are defined by the 5-tuple, fairness is applied first over destination addresses, then over individual flows + + + triple-isolate + Flows are defined by the 5-tuple, fairness is applied over source and destination addresses and also over individual flows (default) + + + (blind|src-host|dst-host|host|flow|dual-src-host|dual-dst-host|triple-isolate) + - - - - Disables flow isolation, all traffic passes through a single queue - - - - - - Flows are defined only by source address - - - - - - Flows are defined only by destination address - - - - - - Flows are defined by source-destination host pairs - - - - - - Flows are defined by the entire 5-tuple - - - - - - Flows are defined by the 5-tuple, fairness is applied first over source addresses, then over individual flows - - - - - - Flows are defined by the 5-tuple, fairness is applied first over destination addresses, then over individual flows - - - - - - Flows are defined by the 5-tuple, fairness is applied over source and destination addresses and also over individual flows (default) - - - - - - Perform NAT lookup before applying flow-isolation rules - - - - - + triple-isolate + + + + Perform NAT lookup before applying flow-isolation rules + + + Round-Trip-Time for Active Queue Management (AQM) - u32:1-3600000 + u32:1-1000000000 RTT in ms - + - RTT must be in range 1 to 3600000 milli-seconds + RTT must be in range 1 to 1000000000 milli-seconds 100 diff --git a/python/vyos/qos/cake.py b/python/vyos/qos/cake.py index 1ee7d0fc3cc..ca5a2691744 100644 --- a/python/vyos/qos/cake.py +++ b/python/vyos/qos/cake.py @@ -15,10 +15,25 @@ from vyos.qos.base import QoSBase + class CAKE(QoSBase): + """ + https://man7.org/linux/man-pages/man8/tc-cake.8.html + """ + _direction = ['egress'] - # https://man7.org/linux/man-pages/man8/tc-cake.8.html + flow_isolation_map = { + 'blind': 'flowblind', + 'src-host': 'srchost', + 'dst-host': 'dsthost', + 'dual-dst-host': 'dual-dsthost', + 'dual-src-host': 'dual-srchost', + 'triple-isolate': 'triple-isolate', + 'flow': 'flows', + 'host': 'hosts', + } + def update(self, config, direction): tmp = f'tc qdisc add dev {self._interface} root handle 1: cake {direction}' if 'bandwidth' in config: @@ -30,26 +45,16 @@ def update(self, config, direction): tmp += f' rtt {rtt}ms' if 'flow_isolation' in config: - if 'blind' in config['flow_isolation']: - tmp += f' flowblind' - if 'dst_host' in config['flow_isolation']: - tmp += f' dsthost' - if 'dual_dst_host' in config['flow_isolation']: - tmp += f' dual-dsthost' - if 'dual_src_host' in config['flow_isolation']: - tmp += f' dual-srchost' - if 'triple_isolate' in config['flow_isolation']: - tmp += f' triple-isolate' - if 'flow' in config['flow_isolation']: - tmp += f' flows' - if 'host' in config['flow_isolation']: - tmp += f' hosts' - if 'nat' in config['flow_isolation']: - tmp += f' nat' - if 'src_host' in config['flow_isolation']: - tmp += f' srchost ' - else: - tmp += f' nonat' + isolation_value = self.flow_isolation_map.get(config['flow_isolation']) + + if isolation_value is not None: + tmp += f' {isolation_value}' + else: + raise ValueError( + f'Invalid flow isolation parameter: {config["flow_isolation"]}' + ) + + tmp += ' nat' if 'flow_isolation_nat' in config else ' nonat' self._cmd(tmp) diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py index aaeebcdae2b..9c3e848cdb2 100755 --- a/smoketest/scripts/cli/test_qos.py +++ b/smoketest/scripts/cli/test_qos.py @@ -22,6 +22,7 @@ from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.qos import CAKE from vyos.utils.process import cmd base_path = ['qos'] @@ -871,6 +872,68 @@ def test_16_wrong_traffic_match_group(self): self.cli_set(['qos', 'traffic-match-group', '3', 'match-group', 'unexpected']) self.cli_commit() + def test_17_cake_updates(self): + bandwidth = 1000000 + rtt = 200 + interface = self._interfaces[0] + policy_name = f'qos-policy-{interface}' + + self.cli_set(base_path + ['interface', interface, 'egress', policy_name]) + self.cli_set( + base_path + ['policy', 'cake', policy_name, 'bandwidth', str(bandwidth)] + ) + self.cli_set(base_path + ['policy', 'cake', policy_name, 'rtt', str(rtt)]) + + # commit changes + self.cli_commit() + + tmp = get_tc_qdisc_json(interface) + + self.assertEqual('cake', tmp['kind']) + # TC store rates as a 32-bit unsigned integer in bps (Bytes per second) + self.assertEqual(int(bandwidth * 125), tmp['options']['bandwidth']) + # RTT internally is in us + self.assertEqual(int(rtt * 1000), tmp['options']['rtt']) + self.assertEqual('triple-isolate', tmp['options']['flowmode']) + self.assertFalse(tmp['options']['ingress']) + self.assertFalse(tmp['options']['nat']) + self.assertTrue(tmp['options']['raw']) + + nat = True + for flow_isolation in [ + 'blind', + 'src-host', + 'dst-host', + 'dual-dst-host', + 'dual-src-host', + 'triple-isolate', + 'flow', + 'host', + ]: + self.cli_set( + base_path + + ['policy', 'cake', policy_name, 'flow-isolation', flow_isolation] + ) + + if nat: + self.cli_set( + base_path + ['policy', 'cake', policy_name, 'flow-isolation-nat'] + ) + else: + self.cli_delete( + base_path + ['policy', 'cake', policy_name, 'flow-isolation-nat'] + ) + + self.cli_commit() + + tmp = get_tc_qdisc_json(interface) + self.assertEqual( + CAKE.flow_isolation_map.get(flow_isolation), tmp['options']['flowmode'] + ) + + self.assertEqual(nat, tmp['options']['nat']) + nat = not nat + def test_20_round_robin_policy_default(self): interface = self._interfaces[0] policy_name = f'qos-policy-{interface}' diff --git a/src/migration-scripts/qos/2-to-3 b/src/migration-scripts/qos/2-to-3 new file mode 100644 index 00000000000..284fe828e6e --- /dev/null +++ b/src/migration-scripts/qos/2-to-3 @@ -0,0 +1,34 @@ +# Copyright 2024 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +from vyos.configtree import ConfigTree + + +def migrate(config: ConfigTree) -> None: + base = ['qos', 'policy', 'cake'] + if config.exists(base): + for policy in config.list_nodes(base): + if config.exists(base + [policy, 'flow-isolation']): + isolation = None + for isol in config.list_nodes(base + [policy, 'flow-isolation']): + if isol == 'nat': + config.set(base + [policy, 'flow-isolation-nat']) + else: + isolation = isol + + config.delete(base + [policy, 'flow-isolation']) + + if isolation: + config.set(base + [policy, 'flow-isolation'], value=isolation) From f6ee516ca4409265b8b2b883c132357bf76655a3 Mon Sep 17 00:00:00 2001 From: sarthurdev <965089+sarthurdev@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:33:48 +0100 Subject: [PATCH 78/85] serial: T3397: Remove `--keep-baud` which could result in unexpected baud rate --- data/templates/getty/serial-getty.service.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/templates/getty/serial-getty.service.j2 b/data/templates/getty/serial-getty.service.j2 index 0183eae7d89..687b05b6dc9 100644 --- a/data/templates/getty/serial-getty.service.j2 +++ b/data/templates/getty/serial-getty.service.j2 @@ -22,7 +22,7 @@ Before=rescue.service # The '-o' option value tells agetty to replace 'login' arguments with an # option to preserve environment (-p), followed by '--' for safety, and then # the entered username. -ExecStart=-/sbin/agetty -o '-p -- \\u' --keep-baud {{ speed }} %I $TERM +ExecStart=-/sbin/agetty -o '-p -- \\u' %I {{ speed }} $TERM Type=idle Restart=always UtmpIdentifier=%I From eceb410fd5e7f74b1e0655bc60ce5b7e34fb422f Mon Sep 17 00:00:00 2001 From: John Estabrook Date: Wed, 20 Nov 2024 18:32:02 -0600 Subject: [PATCH 79/85] op-mode: T6900: remove uninformative 'show configuration files' The legacy node and behavior under 'show configuration files' is not useful as is; remove node and drop script to allow for a useful repurpose in the future. --- op-mode-definitions/show-configuration.xml.in | 7 ------- src/op_mode/show_configuration_files.sh | 10 ---------- 2 files changed, 17 deletions(-) delete mode 100755 src/op_mode/show_configuration_files.sh diff --git a/op-mode-definitions/show-configuration.xml.in b/op-mode-definitions/show-configuration.xml.in index 5a2fdedfade..7ec7188902a 100644 --- a/op-mode-definitions/show-configuration.xml.in +++ b/op-mode-definitions/show-configuration.xml.in @@ -23,13 +23,6 @@ cli-shell-api showCfg --show-active-only | vyos-config-to-commands - - - Show available saved configurations - - - ${vyos_op_scripts_dir}/show_configuration_files.sh - Show running configuration in JSON format diff --git a/src/op_mode/show_configuration_files.sh b/src/op_mode/show_configuration_files.sh deleted file mode 100755 index ad8e0747cc6..00000000000 --- a/src/op_mode/show_configuration_files.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Wrapper script for the show configuration files command -find ${vyatta_sysconfdir}/config/ \ - -type f \ - -not -name ".*" \ - -not -name "config.boot.*" \ - -printf "%f\t(%Tc)\t%T@\n" \ - | sort -r -k3 \ - | awk -F"\t" '{printf ("%-20s\t%s\n", $1,$2) ;}' From b51cf400b42d7b2d05237169a813d1e952213558 Mon Sep 17 00:00:00 2001 From: Roman Khramshin Date: Thu, 21 Nov 2024 13:41:10 +0600 Subject: [PATCH 80/85] T6796: QoS: match filter by interface(iif) (#4188) --- python/vyos/ifconfig/interface.py | 15 +++++++++++++++ python/vyos/qos/base.py | 8 +++++++- smoketest/scripts/cli/test_qos.py | 26 +++++++++++++++++++++++++- src/conf_mode/qos.py | 8 +++++++- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/python/vyos/ifconfig/interface.py b/python/vyos/ifconfig/interface.py index 002d3da9e88..cd562e1fead 100644 --- a/python/vyos/ifconfig/interface.py +++ b/python/vyos/ifconfig/interface.py @@ -98,6 +98,10 @@ class Interface(Control): 'shellcmd': 'ip -json -detail link list dev {ifname}', 'format': lambda j: jmespath.search('[*].ifalias | [0]', json.loads(j)) or '', }, + 'ifindex': { + 'shellcmd': 'ip -json -detail link list dev {ifname}', + 'format': lambda j: jmespath.search('[*].ifindex | [0]', json.loads(j)) or '', + }, 'mac': { 'shellcmd': 'ip -json -detail link list dev {ifname}', 'format': lambda j: jmespath.search('[*].address | [0]', json.loads(j)), @@ -428,6 +432,17 @@ def _add_interface_to_ct_iface_map(self, vrf_table_id: int): nft_command = f'add element inet vrf_zones ct_iface_map {{ "{self.ifname}" : {vrf_table_id} }}' self._nft_check_and_run(nft_command) + def get_ifindex(self): + """ + Get interface index by name + + Example: + >>> from vyos.ifconfig import Interface + >>> Interface('eth0').get_ifindex() + '2' + """ + return int(self.get_interface('ifindex')) + def get_min_mtu(self): """ Get hardware minimum supported MTU diff --git a/python/vyos/qos/base.py b/python/vyos/qos/base.py index 322cdca4494..35cc5be18af 100644 --- a/python/vyos/qos/base.py +++ b/python/vyos/qos/base.py @@ -17,6 +17,7 @@ import jmespath from vyos.base import Warning +from vyos.ifconfig import Interface from vyos.utils.process import cmd from vyos.utils.dict import dict_search from vyos.utils.file import read_file @@ -253,7 +254,7 @@ def update(self, config, direction, priority=None): for index, (match, match_config) in enumerate(cls_config['match'].items(), start=1): filter_cmd = filter_cmd_base if not has_filter: - for key in ['mark', 'vif', 'ip', 'ipv6']: + for key in ['mark', 'vif', 'ip', 'ipv6', 'interface']: if key in match_config: has_filter = True break @@ -263,9 +264,14 @@ def update(self, config, direction, priority=None): if 'mark' in match_config: mark = match_config['mark'] filter_cmd += f' handle {mark} fw' + if 'vif' in match_config: vif = match_config['vif'] filter_cmd += f' basic match "meta(vlan mask 0xfff eq {vif})"' + elif 'interface' in match_config: + iif_name = match_config['interface'] + iif = Interface(iif_name).get_ifindex() + filter_cmd += f' basic match "meta(rt_iif eq {iif})"' for af in ['ip', 'ipv6']: tc_af = af diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py index 9c3e848cdb2..53b2c18b5bc 100755 --- a/smoketest/scripts/cli/test_qos.py +++ b/smoketest/scripts/cli/test_qos.py @@ -21,7 +21,7 @@ from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError -from vyos.ifconfig import Section +from vyos.ifconfig import Section, Interface from vyos.qos import CAKE from vyos.utils.process import cmd @@ -1006,6 +1006,30 @@ def test_22_rate_control_default(self): # TC store rates as a 32-bit unsigned integer in bps (Bytes per second) self.assertEqual(int(bandwidth * 125), tmp['options']['rate']) + def test_23_policy_limiter_iif_filter(self): + policy_name = 'smoke_test' + base_policy_path = ['qos', 'policy', 'limiter', policy_name] + + self.cli_set(['qos', 'interface', self._interfaces[0], 'ingress', policy_name]) + self.cli_set(base_policy_path + ['class', '100', 'bandwidth', '20gbit']) + self.cli_set(base_policy_path + ['class', '100', 'burst', '3760k']) + self.cli_set(base_policy_path + ['class', '100', 'match', 'test', 'interface', self._interfaces[0]]) + self.cli_set(base_policy_path + ['class', '100', 'priority', '20']) + self.cli_set(base_policy_path + ['default', 'bandwidth', '1gbit']) + self.cli_set(base_policy_path + ['default', 'burst', '125000000b']) + self.cli_commit() + + iif = Interface(self._interfaces[0]).get_ifindex() + tc_filters = cmd(f'tc filter show dev {self._interfaces[0]} ingress') + + # class 100 + self.assertIn('filter parent ffff: protocol all pref 20 basic chain 0', tc_filters) + self.assertIn(f'meta(rt_iif eq {iif})', tc_filters) + self.assertIn('action order 1: police 0x1 rate 20Gbit burst 3847500b mtu 2Kb action drop overhead 0b', tc_filters) + # default + self.assertIn('filter parent ffff: protocol all pref 255 basic chain 0', tc_filters) + self.assertIn('action order 1: police 0x2 rate 1Gbit burst 125000000b mtu 2Kb action drop overhead 0b', tc_filters) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index a4d5f44e7d2..0019873d9cc 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -198,10 +198,16 @@ def get_config(config=None): def _verify_match(cls_config: dict) -> None: if 'match' in cls_config: for match, match_config in cls_config['match'].items(): - if {'ip', 'ipv6'} <= set(match_config): + filters = set(match_config) + if {'ip', 'ipv6'} <= filters: raise ConfigError( f'Can not use both IPv6 and IPv4 in one match ({match})!') + if {'interface', 'vif'} & filters: + if {'ip', 'ipv6', 'ether'} & filters: + raise ConfigError( + f'Can not combine protocol and interface or vlan tag match ({match})!') + def _verify_match_group_exist(cls_config, qos): if 'match_group' in cls_config: From df0ef8a25f8f431ed9216b307f817a25d280acd8 Mon Sep 17 00:00:00 2001 From: Roman Khramshin Date: Thu, 21 Nov 2024 13:44:31 +0600 Subject: [PATCH 81/85] T6806: Rework QoS Policy for HFSC Shaper (#4181) - Removed default `m1` and `m2` values from interface definitions - Adjusted filter priorities for shapers - Fixed SFQ qdisc and HFSC class creation to fully support `m1`, `d`, and `m2` parameters - Added validation logic similar to VyOS 1.3 to improve error handling and user experience --- .../include/qos/hfsc-m1.xml.i | 1 - .../include/qos/hfsc-m2.xml.i | 1 - python/vyos/qos/base.py | 2 +- python/vyos/qos/trafficshaper.py | 116 +++++++--------- smoketest/scripts/cli/test_qos.py | 131 ++++++++++++++++++ src/conf_mode/qos.py | 47 ++++++- 6 files changed, 225 insertions(+), 73 deletions(-) diff --git a/interface-definitions/include/qos/hfsc-m1.xml.i b/interface-definitions/include/qos/hfsc-m1.xml.i index 21b9c4f324c..ca37f6ecfc4 100644 --- a/interface-definitions/include/qos/hfsc-m1.xml.i +++ b/interface-definitions/include/qos/hfsc-m1.xml.i @@ -27,6 +27,5 @@ bps(8),kbps(8*10^3),mbps(8*10^6), gbps, tbps - Byte/sec - 0bit diff --git a/interface-definitions/include/qos/hfsc-m2.xml.i b/interface-definitions/include/qos/hfsc-m2.xml.i index 24e8f5d6328..816546657b2 100644 --- a/interface-definitions/include/qos/hfsc-m2.xml.i +++ b/interface-definitions/include/qos/hfsc-m2.xml.i @@ -27,6 +27,5 @@ bps(8),kbps(8*10^3),mbps(8*10^6), gbps, tbps - Byte/sec - 100% diff --git a/python/vyos/qos/base.py b/python/vyos/qos/base.py index 35cc5be18af..12d940e3c7b 100644 --- a/python/vyos/qos/base.py +++ b/python/vyos/qos/base.py @@ -259,7 +259,7 @@ def update(self, config, direction, priority=None): has_filter = True break - if self.qostype == 'shaper' and 'prio ' not in filter_cmd: + if self.qostype in ['shaper', 'shaper_hfsc'] and 'prio ' not in filter_cmd: filter_cmd += f' prio {index}' if 'mark' in match_config: mark = match_config['mark'] diff --git a/python/vyos/qos/trafficshaper.py b/python/vyos/qos/trafficshaper.py index 8b0333c217d..9f92ccd8b1c 100644 --- a/python/vyos/qos/trafficshaper.py +++ b/python/vyos/qos/trafficshaper.py @@ -126,91 +126,71 @@ def update(self, config, direction): # call base class super().update(config, direction) + class TrafficShaperHFSC(QoSBase): + """ + Traffic shaper using Hierarchical Fair Service Curve (HFSC). + Documentation: https://man7.org/linux/man-pages/man8/tc-hfsc.8.html + """ + _parent = 1 qostype = 'shaper_hfsc' - # https://man7.org/linux/man-pages/man8/tc-hfsc.8.html - def update(self, config, direction): - class_id_max = 0 - if 'class' in config: - tmp = list(config['class']) - tmp.sort() - class_id_max = tmp[-1] + criteria = ['linkshare', 'realtime', 'upperlimit'] + short_criterion = { + 'linkshare': 'ls', + 'realtime': 'rt', + 'upperlimit': 'ul', + } + + def _gen_class(self, cls: int, cls_config: dict): + """ + Generate HFSC class and add Stochastic Fair Queueing (SFQ) qdisc. + + Args: + cls (int): Class ID + cls_config (dict): Configuration for the class + """ + tmp = f'tc class replace dev {self._interface} parent {self._parent:x}:1 classid {self._parent:x}:{cls:x} hfsc' + + for crit in self.criteria: + param = cls_config.get(crit) + if param: + tmp += ( + f' {self.short_criterion[crit]}' + f' m1 {self._rate_convert(param["m1"]) if param.get("m1") else 0}' + f' d {param.get("d", 0)}ms' + f' m2 {self._rate_convert(param["m2"])}' + ) - r2q = 10 - # bandwidth is a mandatory CLI node - speed = self._rate_convert(config['bandwidth']) - speed_bps = int(speed) // 8 + self._cmd(tmp) - # need a bigger r2q if going fast than 16 mbits/sec - if (speed_bps // r2q) >= MAXQUANTUM: # integer division - r2q = ceil(speed_bps // MAXQUANTUM) - else: - # if there is a slow class then may need smaller value - if 'class' in config: - min_speed = speed_bps - for cls, cls_options in config['class'].items(): - # find class with the lowest bandwidth used - if 'bandwidth' in cls_options: - bw_bps = int(self._rate_convert(cls_options['bandwidth'])) // 8 # bandwidth in bytes per second - if bw_bps < min_speed: - min_speed = bw_bps + tmp = f'tc qdisc replace dev {self._interface} parent {self._parent:x}:{cls:x} sfq perturb 10' + self._cmd(tmp) - while (r2q > 1) and (min_speed // r2q) < MINQUANTUM: - tmp = r2q -1 - if (speed_bps // tmp) >= MAXQUANTUM: - break - r2q = tmp + def update(self, config, direction): + class_id_max = self._get_class_max_id(config) + default_cls_id = int(class_id_max) + 1 if class_id_max else 2 - default_minor_id = int(class_id_max) +1 - tmp = f'tc qdisc replace dev {self._interface} root handle {self._parent:x}: hfsc default {default_minor_id:x}' # default is in hex + speed = self._rate_convert(config['bandwidth']) + + tmp = f'tc qdisc replace dev {self._interface} root handle {self._parent:x}: hfsc default {default_cls_id:x}' # default is in hex self._cmd(tmp) tmp = f'tc class replace dev {self._interface} parent {self._parent:x}: classid {self._parent:x}:1 hfsc sc rate {speed} ul rate {speed}' self._cmd(tmp) + # tmp = f'tc qdisc add dev {self._interface} parent {self._parent:x}:1 handle f1: sfq perturb 10' + # self._cmd(tmp) + if 'class' in config: for cls, cls_config in config['class'].items(): - # class id is used later on and passed as hex, thus this needs to be an int - cls = int(cls) - # ls m1 - if cls_config.get('linkshare', {}).get('m1').endswith('%'): - percent = cls_config['linkshare']['m1'].rstrip('%') - m_one_rate = self._rate_convert(config['bandwidth']) * int(percent) // 100 - else: - m_one_rate = cls_config['linkshare']['m1'] - # ls m2 - if cls_config.get('linkshare', {}).get('m2').endswith('%'): - percent = cls_config['linkshare']['m2'].rstrip('%') - m_two_rate = self._rate_convert(config['bandwidth']) * int(percent) // 100 - else: - m_two_rate = self._rate_convert(cls_config['linkshare']['m2']) - - tmp = f'tc class replace dev {self._interface} parent {self._parent:x}:1 classid {self._parent:x}:{cls:x} hfsc ls m1 {m_one_rate} m2 {m_two_rate} ' - self._cmd(tmp) - - tmp = f'tc qdisc replace dev {self._interface} parent {self._parent:x}:{cls:x} sfq perturb 10' - self._cmd(tmp) + self._gen_class(cls=int(cls), cls_config=cls_config) if 'default' in config: - # ls m1 - if config.get('default', {}).get('linkshare', {}).get('m1').endswith('%'): - percent = config['default']['linkshare']['m1'].rstrip('%') - m_one_rate = self._rate_convert(config['default']['linkshare']['m1']) * int(percent) // 100 - else: - m_one_rate = config['default']['linkshare']['m1'] - # ls m2 - if config.get('default', {}).get('linkshare', {}).get('m2').endswith('%'): - percent = config['default']['linkshare']['m2'].rstrip('%') - m_two_rate = self._rate_convert(config['default']['linkshare']['m2']) * int(percent) // 100 - else: - m_two_rate = self._rate_convert(config['default']['linkshare']['m2']) - tmp = f'tc class replace dev {self._interface} parent {self._parent:x}:1 classid {self._parent:x}:{default_minor_id:x} hfsc ls m1 {m_one_rate} m2 {m_two_rate} ' - self._cmd(tmp) - - tmp = f'tc qdisc replace dev {self._interface} parent {self._parent:x}:{default_minor_id:x} sfq perturb 10' - self._cmd(tmp) + self._gen_class( + cls=int(default_cls_id), cls_config=config.get('default', {}) + ) # call base class super().update(config, direction) diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py index 53b2c18b5bc..79b79128882 100755 --- a/smoketest/scripts/cli/test_qos.py +++ b/smoketest/scripts/cli/test_qos.py @@ -985,6 +985,137 @@ def test_20_round_robin_policy_default(self): tmp[2]['options'], ) + def test_21_shaper_hfsc(self): + interface = self._interfaces[0] + policy_name = f'qos-policy-{interface}' + ul = { + 'm1': '100kbit', + 'm2': '150kbit', + 'd': '100', + } + ls = {'m2': '120kbit'} + rt = { + 'm1': '110kbit', + 'm2': '130kbit', + 'd': '75', + } + self.cli_set(base_path + ['interface', interface, 'egress', policy_name]) + self.cli_set(base_path + ['policy', 'shaper-hfsc', policy_name]) + + # Policy {policy_name} misses "default" class! + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'upperlimit'] + ) + + # At least one m2 value needs to be set for class: {class_name} + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'upperlimit', 'm1', ul['m1']] + ) + # {class_name} upperlimit m1 value is set, but no m2 was found! + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'upperlimit', 'm2', ul['m2']] + ) + # {class_name} upperlimit m1 value is set, but no d was found! + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'upperlimit', 'd', ul['d']] + ) + # Linkshare m2 needs to be defined to use upperlimit m2 for class: {class_name} + with self.assertRaises(ConfigSessionError): + self.cli_commit() + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'linkshare', 'm2', ls['m2']] + ) + self.cli_commit() + + # use raw because tc json is incorrect here + tmp = cmd(f'tc -details qdisc show dev {interface}') + for rec in tmp.split('\n'): + rec = rec.strip() + if 'root' in rec: + self.assertEqual(rec, 'qdisc hfsc 1: root refcnt 2 default 2') + else: + self.assertRegex( + rec, + r'qdisc sfq \S+: parent 1:2 limit 127p quantum 1514b depth 127 flows 128 divisor 1024 perturb 10sec', + ) + # use raw because tc json is incorrect here + tmp = cmd(f'tc -details class show dev {interface}') + for rec in tmp.split('\n'): + rec = rec.strip().lower() + if 'root' in rec: + self.assertEqual(rec, 'class hfsc 1: root') + elif 'hfsc 1:1' in rec: + # m2 \S+bit is auto bandwidth + self.assertRegex( + rec, + r'class hfsc 1:1 parent 1: sc m1 0bit d 0us m2 \S+bit ul m1 0bit d 0us m2 \S+bit', + ) + else: + self.assertRegex( + rec, + rf'class hfsc 1:2 parent 1:1 leaf \S+: ls m1 0bit d 0us m2 {ls["m2"]} ul m1 {ul["m1"]} d {ul["d"]}ms m2 {ul["m2"]}', + ) + + for key, val in rt.items(): + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'default', 'realtime', key, val] + ) + self.cli_commit() + + tmp = cmd(f'tc -details class show dev {interface}') + for rec in tmp.split('\n'): + rec = rec.strip().lower() + if 'hfsc 1:2' in rec: + self.assertTrue( + f'rt m1 {rt["m1"]} d {rt["d"]}ms m2 {rt["m2"]} ls m1 0bit d 0us m2 {ls["m2"]} ul m1 {ul["m1"]} d {ul["d"]}ms m2 {ul["m2"]}' + in rec + ) + + # add some class + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'class', '10', 'linkshare', 'm2', '300kbit'] + ) + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'class', '10', 'match', 'tst', 'ip', 'dscp', 'internet'] + ) + + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'class', '30', 'realtime', 'm2', '250kbit'] + ) + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'class', '30', 'realtime', 'd', '77'] + ) + self.cli_set( + base_path + ['policy', 'shaper-hfsc', policy_name, 'class', '30', 'match', 'tst30', 'ip', 'dscp', 'critical'] + ) + self.cli_commit() + + tmp = cmd(f'tc -details qdisc show dev {interface}') + self.assertEqual(4, len(tmp.split('\n'))) + + tmp = cmd(f'tc -details class show dev {interface}') + tmp = tmp.lower() + + self.assertTrue( + f'rt m1 {rt["m1"]} d {rt["d"]}ms m2 {rt["m2"]} ls m1 0bit d 0us m2 {ls["m2"]} ul m1 {ul["m1"]} d {ul["d"]}ms m2 {ul["m2"]}' + in tmp + ) + self.assertTrue(': ls m1 0bit d 0us m2 300kbit' in tmp) + self.assertTrue(': rt m1 0bit d 77ms m2 250kbit' in tmp) + def test_22_rate_control_default(self): interface = self._interfaces[0] policy_name = f'qos-policy-{interface}' diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index 0019873d9cc..59e307a39f1 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -216,6 +216,46 @@ def _verify_match_group_exist(cls_config, qos): Warning(f'Match group "{group}" does not exist!') +def _verify_default_policy_exist(policy, policy_config): + if 'default' not in policy_config: + raise ConfigError(f'Policy {policy} misses "default" class!') + + +def _check_shaper_hfsc_rate(cls, cls_conf): + is_m2_exist = False + for crit in TrafficShaperHFSC.criteria: + if cls_conf.get(crit, {}).get('m2') is not None: + is_m2_exist = True + + if cls_conf.get(crit, {}).get('m1') is not None: + for crit_val in ['m2', 'd']: + if cls_conf.get(crit, {}).get(crit_val) is None: + raise ConfigError( + f'{cls} {crit} m1 value is set, but no {crit_val} was found!' + ) + + if not is_m2_exist: + raise ConfigError(f'At least one m2 value needs to be set for class: {cls}') + + if ( + cls_conf.get('upperlimit', {}).get('m2') is not None + and cls_conf.get('linkshare', {}).get('m2') is None + ): + raise ConfigError( + f'Linkshare m2 needs to be defined to use upperlimit m2 for class: {cls}' + ) + + +def _verify_shaper_hfsc(policy, policy_config): + _verify_default_policy_exist(policy, policy_config) + + _check_shaper_hfsc_rate('default', policy_config.get('default')) + + if 'class' in policy_config: + for cls, cls_conf in policy_config['class'].items(): + _check_shaper_hfsc_rate(cls, cls_conf) + + def verify(qos): if not qos or 'interface' not in qos: return None @@ -259,11 +299,13 @@ def verify(qos): if queue_lim < max_tr: raise ConfigError(f'Policy "{policy}" uses queue-limit "{queue_lim}" < max-threshold "{max_tr}"!') if policy_type in ['priority_queue']: - if 'default' not in policy_config: - raise ConfigError(f'Policy {policy} misses "default" class!') + _verify_default_policy_exist(policy, policy_config) if policy_type in ['rate_control']: if 'bandwidth' not in policy_config: raise ConfigError('Bandwidth not defined') + if policy_type in ['shaper_hfsc']: + _verify_shaper_hfsc(policy, policy_config) + if 'default' in policy_config: if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']: raise ConfigError('Bandwidth not defined for default traffic!') @@ -299,6 +341,7 @@ def generate(qos): return None + def apply(qos): # Always delete "old" shapers first for interface in interfaces(): From e8e72e27a7f45607e75cb68b836108213481e1b8 Mon Sep 17 00:00:00 2001 From: Nataliia Solomko Date: Mon, 18 Nov 2024 13:02:39 +0200 Subject: [PATCH 82/85] ipoe_server: T6872: Add the ability to configure LUA scripts and username --- data/templates/accel-ppp/ipoe.config.j2 | 6 +++- .../service_ipoe-server.xml.in | 24 +++++++++++++ src/conf_mode/service_ipoe-server.py | 35 ++++++++++++++----- 3 files changed, 56 insertions(+), 9 deletions(-) diff --git a/data/templates/accel-ppp/ipoe.config.j2 b/data/templates/accel-ppp/ipoe.config.j2 index 81f63c53b05..a10dcf2c169 100644 --- a/data/templates/accel-ppp/ipoe.config.j2 +++ b/data/templates/accel-ppp/ipoe.config.j2 @@ -38,6 +38,9 @@ level={{ log.level }} [ipoe] verbose=1 +{% if lua_file is vyos_defined %} +lua-file={{ lua_file }} +{% endif %} {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} {% set tmp = 'interface=' %} @@ -55,7 +58,8 @@ verbose=1 {% set range = 'range=' ~ iface_config.client_subnet ~ ',' if iface_config.client_subnet is vyos_defined else '' %} {% set relay = ',' ~ 'relay=' ~ iface_config.external_dhcp.dhcp_relay if iface_config.external_dhcp.dhcp_relay is vyos_defined else '' %} {% set giaddr = ',' ~ 'giaddr=' ~ iface_config.external_dhcp.giaddr if iface_config.external_dhcp.giaddr is vyos_defined else '' %} -{{ tmp }},{{ shared }}mode={{ iface_config.mode | upper }},ifcfg=1,{{ range }}start=dhcpv4,ipv6=1{{ relay }}{{ giaddr }} +{% set username = ',' ~ 'username=lua:' ~ iface_config.lua_username if iface_config.lua_username is vyos_defined else '' %} +{{ tmp }},{{ shared }}mode={{ iface_config.mode | upper }},ifcfg=1,{{ range }}start=dhcpv4,ipv6=1{{ relay }}{{ giaddr }}{{ username }} {% if iface_config.vlan_mon is vyos_defined %} vlan-mon={{ iface }},{{ iface_config.vlan | join(',') }} {% endif %} diff --git a/interface-definitions/service_ipoe-server.xml.in b/interface-definitions/service_ipoe-server.xml.in index 25bc43cc650..27a654f9268 100644 --- a/interface-definitions/service_ipoe-server.xml.in +++ b/interface-definitions/service_ipoe-server.xml.in @@ -174,10 +174,34 @@ + + + Username function + + txt + Name of the function in the Lua file to construct usernames with + + + #include + + + #include #include + + + Lua script file for constructing user names + + filename + File with Lua script in /config/scripts directory + + + + + + #include #include #include diff --git a/src/conf_mode/service_ipoe-server.py b/src/conf_mode/service_ipoe-server.py index c7e3ef0336c..a14d4b5b631 100755 --- a/src/conf_mode/service_ipoe-server.py +++ b/src/conf_mode/service_ipoe-server.py @@ -31,6 +31,7 @@ from vyos.accel_ppp_util import verify_accel_ppp_authentication from vyos import ConfigError from vyos import airbag + airbag.enable() @@ -52,7 +53,9 @@ def get_config(config=None): if dict_search('client_ip_pool', ipoe): # Multiple named pools require ordered values T5099 - ipoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', ipoe)) + ipoe['ordered_named_pools'] = get_pools_in_order( + dict_search('client_ip_pool', ipoe) + ) ipoe['server_type'] = 'ipoe' return ipoe @@ -68,11 +71,23 @@ def verify(ipoe): for interface, iface_config in ipoe['interface'].items(): verify_interface_exists(ipoe, interface, warning_only=True) if 'client_subnet' in iface_config and 'vlan' in iface_config: - raise ConfigError('Option "client-subnet" and "vlan" are mutually exclusive, ' - 'use "client-ip-pool" instead!') - if 'vlan_mon' in iface_config and not 'vlan' in iface_config: + raise ConfigError( + 'Options "client-subnet" and "vlan" are mutually exclusive, ' + 'use "client-ip-pool" instead!' + ) + if 'vlan_mon' in iface_config and 'vlan' not in iface_config: raise ConfigError('Option "vlan-mon" requires "vlan" to be set!') + if 'lua_username' in iface_config: + if 'lua_file' not in ipoe: + raise ConfigError( + 'Option "lua-username" requires "lua-file" to be set!' + ) + if dict_search('authentication.mode', ipoe) != 'radius': + raise ConfigError( + 'Can configure username with Lua script only for RADIUS authentication' + ) + verify_accel_ppp_authentication(ipoe, local_users=False) verify_accel_ppp_ip_pool(ipoe) verify_accel_ppp_name_servers(ipoe) @@ -88,14 +103,15 @@ def generate(ipoe): render(ipoe_conf, 'accel-ppp/ipoe.config.j2', ipoe) if dict_search('authentication.mode', ipoe) == 'local': - render(ipoe_chap_secrets, 'accel-ppp/chap-secrets.ipoe.j2', - ipoe, permission=0o640) + render( + ipoe_chap_secrets, 'accel-ppp/chap-secrets.ipoe.j2', ipoe, permission=0o640 + ) return None def apply(ipoe): systemd_service = 'accel-ppp@ipoe.service' - if ipoe == None: + if ipoe is None: call(f'systemctl stop {systemd_service}') for file in [ipoe_conf, ipoe_chap_secrets]: if os.path.exists(file): @@ -103,7 +119,10 @@ def apply(ipoe): return None - call(f'systemctl reload-or-restart {systemd_service}') + # Accel-pppd does not do soft-reload correctly. + # Most of the changes require restarting the service + call(f'systemctl restart {systemd_service}') + if __name__ == '__main__': try: From 5c7647bcc242d4b26cd9afdde1f084ef93916727 Mon Sep 17 00:00:00 2001 From: Viacheslav Hletenko Date: Tue, 19 Nov 2024 17:44:58 +0000 Subject: [PATCH 83/85] T264: IPsec add base64 encoded secret-type feature Add the ability to configure base64 encoded passwords for VPN IPSec site-to-site peers authentication psk PSK secret 'xxxxx==' authentication psk PSK secret-type --- data/templates/ipsec/swanctl.conf.j2 | 4 ++++ interface-definitions/vpn_ipsec.xml.in | 12 ++++++++++++ python/vyos/utils/convert.py | 26 +++++++++++++++++++++++++ smoketest/scripts/cli/test_vpn_ipsec.py | 7 +++++-- 4 files changed, 47 insertions(+), 2 deletions(-) diff --git a/data/templates/ipsec/swanctl.conf.j2 b/data/templates/ipsec/swanctl.conf.j2 index 698a9135efa..64e7ea860cb 100644 --- a/data/templates/ipsec/swanctl.conf.j2 +++ b/data/templates/ipsec/swanctl.conf.j2 @@ -87,7 +87,11 @@ secrets { id-{{ gen_uuid }} = "{{ id }}" {% endfor %} {% endif %} +{% if psk_config.secret_type is vyos_defined('base64') %} + secret = 0s{{ psk_config.secret }} +{% elif psk_config.secret_type is vyos_defined('plaintext') %} secret = "{{ psk_config.secret }}" +{% endif %} } {% endfor %} {% endif %} diff --git a/interface-definitions/vpn_ipsec.xml.in b/interface-definitions/vpn_ipsec.xml.in index d9d6fd93bb7..5540021e230 100644 --- a/interface-definitions/vpn_ipsec.xml.in +++ b/interface-definitions/vpn_ipsec.xml.in @@ -41,6 +41,18 @@ + + + Secret type + + base64 plaintext + + + (base64|plaintext) + + + plaintext + diff --git a/python/vyos/utils/convert.py b/python/vyos/utils/convert.py index dd4266f57c6..2f587405d4e 100644 --- a/python/vyos/utils/convert.py +++ b/python/vyos/utils/convert.py @@ -235,3 +235,29 @@ def convert_data(data) -> dict | list | tuple | str | int | float | bool | None: # which cannot be converted to JSON # for example: complex | range | memoryview return + + +def encode_to_base64(input_string): + """ + Encodes a given string to its base64 representation. + + Args: + input_string (str): The string to be encoded. + + Returns: + str: The base64-encoded version of the input string. + + Example: + input_string = "Hello, World!" + encoded_string = encode_to_base64(input_string) + print(encoded_string) # Output: SGVsbG8sIFdvcmxkIQ== + """ + import base64 + # Convert the string to bytes + byte_string = input_string.encode('utf-8') + + # Encode the byte string to base64 + encoded_string = base64.b64encode(byte_string) + + # Decode the base64 bytes back to a string + return encoded_string.decode('utf-8') diff --git a/smoketest/scripts/cli/test_vpn_ipsec.py b/smoketest/scripts/cli/test_vpn_ipsec.py index de18d042744..f2bea58d1ce 100755 --- a/smoketest/scripts/cli/test_vpn_ipsec.py +++ b/smoketest/scripts/cli/test_vpn_ipsec.py @@ -21,6 +21,7 @@ from vyos.configsession import ConfigSessionError from vyos.ifconfig import Interface +from vyos.utils.convert import encode_to_base64 from vyos.utils.process import process_named_running from vyos.utils.file import read_file @@ -495,6 +496,7 @@ def test_flex_vpn_vips(self): local_id = 'vyos-r1' remote_id = 'vyos-r2' peer_base_path = base_path + ['site-to-site', 'peer', connection_name] + secret_base64 = encode_to_base64(secret) self.cli_set(tunnel_path + ['tun1', 'encapsulation', 'gre']) self.cli_set(tunnel_path + ['tun1', 'source-address', local_address]) @@ -509,7 +511,8 @@ def test_flex_vpn_vips(self): self.cli_set(base_path + ['authentication', 'psk', connection_name, 'id', remote_id]) self.cli_set(base_path + ['authentication', 'psk', connection_name, 'id', local_address]) self.cli_set(base_path + ['authentication', 'psk', connection_name, 'id', peer_ip]) - self.cli_set(base_path + ['authentication', 'psk', connection_name, 'secret', secret]) + self.cli_set(base_path + ['authentication', 'psk', connection_name, 'secret', secret_base64]) + self.cli_set(base_path + ['authentication', 'psk', connection_name, 'secret-type', 'base64']) self.cli_set(peer_base_path + ['authentication', 'local-id', local_id]) self.cli_set(peer_base_path + ['authentication', 'mode', 'pre-shared-secret']) @@ -546,7 +549,7 @@ def test_flex_vpn_vips(self): f'id-{regex_uuid4} = "{remote_id}"', f'id-{regex_uuid4} = "{peer_ip}"', f'id-{regex_uuid4} = "{local_address}"', - f'secret = "{secret}"', + f'secret = 0s{secret_base64}', ] for line in swanctl_secrets_lines: From 4e49794fcf554b038b1f6613af7fec848f24cefe Mon Sep 17 00:00:00 2001 From: sarthurdev <965089+sarthurdev@users.noreply.github.com> Date: Thu, 21 Nov 2024 15:30:41 +0100 Subject: [PATCH 84/85] dhcp: T6692: Fix range options not present when `exclude` is used Add smoketest to verify range options are present with `exclude` --- .../scripts/cli/test_service_dhcp-server.py | 23 ++++++++++++++++--- src/conf_mode/service_dhcp-server.py | 8 +++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/smoketest/scripts/cli/test_service_dhcp-server.py b/smoketest/scripts/cli/test_service_dhcp-server.py index 46c4e25a1f8..f891bf2954f 100755 --- a/smoketest/scripts/cli/test_service_dhcp-server.py +++ b/smoketest/scripts/cli/test_service_dhcp-server.py @@ -557,6 +557,7 @@ def test_dhcp_exclude_not_in_range(self): self.cli_set(pool + ['subnet-id', '1']) self.cli_set(pool + ['option', 'default-router', router]) self.cli_set(pool + ['exclude', router]) + self.cli_set(pool + ['range', '0', 'option', 'default-router', router]) self.cli_set(pool + ['range', '0', 'start', range_0_start]) self.cli_set(pool + ['range', '0', 'stop', range_0_stop]) @@ -569,6 +570,11 @@ def test_dhcp_exclude_not_in_range(self): self.verify_config_value(obj, ['Dhcp4', 'shared-networks'], 'name', 'EXCLUDE-TEST') self.verify_config_value(obj, ['Dhcp4', 'shared-networks', 0, 'subnet4'], 'subnet', subnet) + pool_obj = { + 'pool': f'{range_0_start} - {range_0_stop}', + 'option-data': [{'name': 'routers', 'data': router}] + } + # Verify options self.verify_config_object( obj, @@ -579,7 +585,7 @@ def test_dhcp_exclude_not_in_range(self): self.verify_config_object( obj, ['Dhcp4', 'shared-networks', 0, 'subnet4', 0, 'pools'], - {'pool': f'{range_0_start} - {range_0_stop}'}) + pool_obj) # Check for running process self.assertTrue(process_named_running(PROCESS_NAME)) @@ -600,6 +606,7 @@ def test_dhcp_exclude_in_range(self): self.cli_set(pool + ['subnet-id', '1']) self.cli_set(pool + ['option', 'default-router', router]) self.cli_set(pool + ['exclude', exclude_addr]) + self.cli_set(pool + ['range', '0', 'option', 'default-router', router]) self.cli_set(pool + ['range', '0', 'start', range_0_start]) self.cli_set(pool + ['range', '0', 'stop', range_0_stop]) @@ -612,6 +619,16 @@ def test_dhcp_exclude_in_range(self): self.verify_config_value(obj, ['Dhcp4', 'shared-networks'], 'name', 'EXCLUDE-TEST-2') self.verify_config_value(obj, ['Dhcp4', 'shared-networks', 0, 'subnet4'], 'subnet', subnet) + pool_obj = { + 'pool': f'{range_0_start} - {range_0_stop_excl}', + 'option-data': [{'name': 'routers', 'data': router}] + } + + pool_exclude_obj = { + 'pool': f'{range_0_start_excl} - {range_0_stop}', + 'option-data': [{'name': 'routers', 'data': router}] + } + # Verify options self.verify_config_object( obj, @@ -621,12 +638,12 @@ def test_dhcp_exclude_in_range(self): self.verify_config_object( obj, ['Dhcp4', 'shared-networks', 0, 'subnet4', 0, 'pools'], - {'pool': f'{range_0_start} - {range_0_stop_excl}'}) + pool_obj) self.verify_config_object( obj, ['Dhcp4', 'shared-networks', 0, 'subnet4', 0, 'pools'], - {'pool': f'{range_0_start_excl} - {range_0_stop}'}) + pool_exclude_obj) # Check for running process self.assertTrue(process_named_running(PROCESS_NAME)) diff --git a/src/conf_mode/service_dhcp-server.py b/src/conf_mode/service_dhcp-server.py index e89448e2d17..9c59aa63d3e 100755 --- a/src/conf_mode/service_dhcp-server.py +++ b/src/conf_mode/service_dhcp-server.py @@ -87,6 +87,10 @@ def dhcp_slice_range(exclude_list, range_dict): 'start' : range_start, 'stop' : str(ip_address(e) -1) } + + if 'option' in range_dict: + r['option'] = range_dict['option'] + # On the next run our address range will start one address after # the exclude address range_start = str(ip_address(e) + 1) @@ -104,6 +108,10 @@ def dhcp_slice_range(exclude_list, range_dict): 'start': str(ip_address(e) + 1), 'stop': str(range_stop) } + + if 'option' in range_dict: + r['option'] = range_dict['option'] + if not (ip_address(r['start']) > ip_address(r['stop'])): output.append(r) else: From ec18cc393591052fd1f021c4a62220ab2e537a2e Mon Sep 17 00:00:00 2001 From: Christian Breunig Date: Sat, 23 Nov 2024 18:36:33 +0100 Subject: [PATCH 85/85] avahi: T6908: add option to define max-cache entries (#4207) * avahi: T6908: reduce runtime overhead in smoketests by using setUpClass/tearDownClass * avahi: T6908: add option to define mdns-repeater max-cache entries Add CLI option to configure `cache-entries-max` entries in Avahi daemon configuration. Default value of 4096 for "cache-entries" CVLI node was retrieved from source code. --- .../mdns-repeater/avahi-daemon.conf.j2 | 3 + .../service_mdns_repeater.xml.in | 17 +++++ .../scripts/cli/test_service_mdns_repeater.py | 76 ++++++++++++++----- 3 files changed, 79 insertions(+), 17 deletions(-) diff --git a/data/templates/mdns-repeater/avahi-daemon.conf.j2 b/data/templates/mdns-repeater/avahi-daemon.conf.j2 index cc64958170f..a5031945c1b 100644 --- a/data/templates/mdns-repeater/avahi-daemon.conf.j2 +++ b/data/templates/mdns-repeater/avahi-daemon.conf.j2 @@ -6,6 +6,9 @@ allow-interfaces={{ interface | join(', ') }} {% if browse_domain is vyos_defined and browse_domain | length %} browse-domains={{ browse_domain | join(', ') }} {% endif %} +{% if cache_entries is vyos_defined %} +cache-entries-max={{ cache_entries }} +{% endif %} disallow-other-stacks=no [wide-area] diff --git a/interface-definitions/service_mdns_repeater.xml.in b/interface-definitions/service_mdns_repeater.xml.in index 5d6f61d7451..9d626bf6a18 100644 --- a/interface-definitions/service_mdns_repeater.xml.in +++ b/interface-definitions/service_mdns_repeater.xml.in @@ -67,6 +67,23 @@ + + + Number of resource records cached per interface + + u32:0 + Disable caching + + + u32:1-65535 + Resource records to cache per interface + + + + + + 4096 + Disables mDNS repeater on VRRP interfaces not in MASTER state diff --git a/smoketest/scripts/cli/test_service_mdns_repeater.py b/smoketest/scripts/cli/test_service_mdns_repeater.py index f2fb3b50914..30e48683f70 100755 --- a/smoketest/scripts/cli/test_service_mdns_repeater.py +++ b/smoketest/scripts/cli/test_service_mdns_repeater.py @@ -21,36 +21,45 @@ from configparser import ConfigParser from vyos.configsession import ConfigSessionError from vyos.utils.process import process_named_running +from vyos.xml_ref import default_value base_path = ['service', 'mdns', 'repeater'] intf_base = ['interfaces', 'dummy'] config_file = '/run/avahi-daemon/avahi-daemon.conf' - class TestServiceMDNSrepeater(VyOSUnitTestSHIM.TestCase): - def setUp(self): - # Start with a clean CLI instance - self.cli_delete(base_path) + @classmethod + def setUpClass(cls): + super(TestServiceMDNSrepeater, cls).setUpClass() - # Service required a configured IP address on the interface - self.cli_set(intf_base + ['dum10', 'address', '192.0.2.1/30']) - self.cli_set(intf_base + ['dum10', 'ipv6', 'address', 'no-default-link-local']) - self.cli_set(intf_base + ['dum20', 'address', '192.0.2.5/30']) - self.cli_set(intf_base + ['dum20', 'address', '2001:db8:0:2::5/64']) - self.cli_set(intf_base + ['dum30', 'address', '192.0.2.9/30']) - self.cli_set(intf_base + ['dum30', 'address', '2001:db8:0:2::9/64']) - self.cli_set(intf_base + ['dum40', 'address', '2001:db8:0:2::11/64']) - self.cli_commit() + # ensure we can also run this test on a live system - so lets clean + # out the current configuration :) + cls.cli_delete(cls, base_path) + + cls.cli_set(cls, intf_base + ['dum10', 'address', '192.0.2.1/30']) + cls.cli_set(cls, intf_base + ['dum10', 'ipv6', 'address', 'no-default-link-local']) + cls.cli_set(cls, intf_base + ['dum20', 'address', '192.0.2.5/30']) + cls.cli_set(cls, intf_base + ['dum20', 'address', '2001:db8:0:2::5/64']) + cls.cli_set(cls, intf_base + ['dum30', 'address', '192.0.2.9/30']) + cls.cli_set(cls, intf_base + ['dum30', 'address', '2001:db8:0:2::9/64']) + cls.cli_set(cls, intf_base + ['dum40', 'address', '2001:db8:0:2::11/64']) + + cls.cli_commit(cls) + + @classmethod + def tearDownClass(cls): + cls.cli_delete(cls, intf_base + ['dum10']) + cls.cli_delete(cls, intf_base + ['dum20']) + cls.cli_delete(cls, intf_base + ['dum30']) + cls.cli_delete(cls, intf_base + ['dum40']) + + cls.cli_commit(cls) def tearDown(self): # Check for running process self.assertTrue(process_named_running('avahi-daemon')) self.cli_delete(base_path) - self.cli_delete(intf_base + ['dum10']) - self.cli_delete(intf_base + ['dum20']) - self.cli_delete(intf_base + ['dum30']) - self.cli_delete(intf_base + ['dum40']) self.cli_commit() # Check that there is no longer a running process @@ -130,5 +139,38 @@ def test_service_ipv6(self): self.assertEqual(conf['server']['allow-interfaces'], 'dum30, dum40') self.assertEqual(conf['reflector']['enable-reflector'], 'yes') + def test_service_max_cache_entries(self): + cli_default_max_cache = default_value(base_path + ['cache-entries']) + self.cli_set(base_path) + + # Need at least two interfaces + with self.assertRaises(ConfigSessionError): + self.cli_commit() + self.cli_set(base_path + ['interface', 'dum20']) + + # Need at least two interfaces + with self.assertRaises(ConfigSessionError): + self.cli_commit() + self.cli_set(base_path + ['interface', 'dum30']) + + self.cli_commit() + + # Validate configuration values + conf = ConfigParser(delimiters='=') + conf.read(config_file) + self.assertEqual(conf['server']['cache-entries-max'], cli_default_max_cache) + + # Set max cache entries + cache_entries = '1234' + self.cli_set(base_path + ['cache-entries', cache_entries]) + + self.cli_commit() + + # Validate configuration values + conf = ConfigParser(delimiters='=') + conf.read(config_file) + + self.assertEqual(conf['server']['cache-entries-max'], cache_entries) + if __name__ == '__main__': unittest.main(verbosity=2)