diff --git a/.github/workflows/ci-ui-tests.yaml b/.github/workflows/ci-ui-tests.yaml index 1656560fb..3aef7742f 100644 --- a/.github/workflows/ci-ui-tests.yaml +++ b/.github/workflows/ci-ui-tests.yaml @@ -21,7 +21,22 @@ jobs: uses: actions/setup-python@v2 with: python-version: "3.10" +# + - name: run install_microk8s.sh + run: | + sudo snap install microk8s --classic --channel=1.25/stable + sudo apt-get install snmp -y + sudo apt-get install python3-dev -y + + - name: run automatic_setup.sh + working-directory: integration_tests + run: integration_tests/automatic_setup.sh +# - name: run tests +# working-directory: integration_tests +# run: | +# poetry run pytest --splunk_host="localhost" --splunk_password="changeme2" --trap_external_ip="$(hostname -I | cut -d " " -f1)" +# - name: install dependencies working-directory: ui_tests run: pip install -r requirements.txt @@ -29,4 +44,4 @@ jobs: - name: run tests working-directory: ui_tests run: | - pytest \ No newline at end of file + pytest --splunk-user=admin --splunk_password="changeme2" --splunk_host="localhost" --trap_external_ip="$(hostname -I | cut -d " " -f1)" -k current \ No newline at end of file diff --git a/integration_tests/values.yaml b/integration_tests/values.yaml index 8f92d8a6c..4f69e02a0 100644 --- a/integration_tests/values.yaml +++ b/integration_tests/values.yaml @@ -1,3 +1,33 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + repository: ghcr.io/splunk/sc4snmp-ui/frontend/container + tag: "develop" + pullPolicy: "Always" + backEnd: + NodePort: 30002 + repository: ghcr.io/splunk/sc4snmp-ui/backend/container + tag: "develop" + pullPolicy: "Always" + init: + image: registry.access.redhat.com/ubi9/ubi + pullPolicy: IfNotPresent + + # valuesFileDirectory is obligatory if UI is used. It is an absolute directory path on the host machine + # where values.yaml is located and where configuration files from the UI will be generated. + valuesFileDirectory: "/home/splunker" + + # valuesFileName is an exact name of yaml file with user's configuration, located inside directory specified in + # valuesFileDirectory. It is optional. If it is provided then this file fill be updated with configuration from the UI. + # If the valuesFileName is empty, or provided file name can't be found inside valuesFileDirectory directory, + # then configuration from the UI will be saved in few files, each file for different section, inside + # valuesFileDirectory directory. + valuesFileName: "new_values.yaml" + + # If keepSectionFiles is set to true, separate configration files for different sections will be saved in + # valuesFileDirectory directory regardless of valuesFileName proper configuration. + keepSectionFiles: false splunk: enabled: true protocol: https @@ -6,11 +36,14 @@ splunk: insecureSSL: "true" port: "8088" image: - repository: "snmp-local" - tag: "latest" - pullPolicy: "Never" + repository: wzya732/sc4snmp + pullPolicy: Always + tag: "ui" traps: - replicaCount: 1 + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 communities: 2c: - public @@ -22,67 +55,189 @@ traps: #loadBalancerIP: The IP address in the metallb pool loadBalancerIP: ###LOAD_BALANCER_ID### worker: - poller: - replicaCount: 1 - #changed replicaCount from 4 to 1 - concurrency: 4 - prefetch: 1 + # There are 3 types of workers trap: - autoscaling: - enabled: false + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks replicaCount: 1 - concurrency: 8 - prefetch: 60 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 sender: + # replicaCount: number of sender-worker pods which consumes sending tasks replicaCount: 1 - concurrency: 4 - prefetch: 60 - profilesReloadDelay: 1 - # replicas: Number of replicas for worker container should two or more + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 # udpConnectionTimeout: timeout in seconds for SNMP operations #udpConnectionTimeout: 5 logLevel: "DEBUG" scheduler: - logLevel: "INFO" - customTranslations: - IP-MIB: - icmpOutEchoReps: myCustomName1 - profiles: | - v3profile: - frequency: 5 - varBinds: - - ['IF-MIB'] - - ['TCP-MIB'] - - ['UDP-MIB'] -# profiles: | -# generic_switch: -# frequency: 60 -# varBinds: -# - ['SNMPv2-MIB', 'sysDescr'] -# - ['SNMPv2-MIB', 'sysName', 0] -# - ['IF-MIB'] -# - ['TCP-MIB'] + logLevel: "DEBUG" + profiles: | + DCN_profile: + frequency: 100 + varBinds: + - ['CISCO-ENTITY-SENSOR-MIB','entSensorValue'] + - ['CISCO-ENTITY-SENSOR-MIB','entSensorThresholdValue'] + - ['CISCO-ENTITY-SENSOR-MIB','entSensorScale'] + - ['CISCO-ENTITY-SENSOR-MIB','entSensorPrecision'] + - ['CISCO-PROCESS-MIB','cpmCPULoadAvg1min'] + - ['CISCO-PROCESS-MIB','cpmCPULoadAvg5min'] + - ['CISCO-PROCESS-MIB','cpmCPULoadAvg15min'] + - ['CISCO-PROCESS-MIB','cpmCPUMemoryUsed'] + - ['CISCO-PROCESS-MIB','cpmCPUMemoryFree'] + - ['ENTITY-MIB','entPhysicalDescr'] + single_metric: + frequency: 60 + varBinds: + - ["IF-MIB","ifMtu",1] + - ["IF-MIB","ifIndex",1] + DCN_walk: + condition: + type: "walk" + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + - ['TCP-MIB'] + - ['UDP-MIB'] + - ['CISCO-ENTITY-SENSOR-MIB'] + - [ 'CISCO-PROCESS-MIB','cpmCPULoadAvg1min' ] + - [ 'CISCO-PROCESS-MIB','cpmCPULoadAvg5min' ] + - [ 'CISCO-PROCESS-MIB','cpmCPULoadAvg15min' ] + - [ 'CISCO-PROCESS-MIB','cpmCPUMemoryUsed' ] + - [ 'CISCO-PROCESS-MIB','cpmCPUMemoryFree' ] + small_walk: + condition: + type: "walk" + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + - ['TCP-MIB'] + - ['UDP-MIB'] + gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + eq_profile_lab: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "equals" + value: "Loopback16" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile2: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 200 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile3: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 3 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + IF_profile: + frequency: 30 + conditions: + - field: IF-MIB.ifAdminStatus + operation: "equals" + value: "up" + - field: IF-MIB.ifOperStatus + operation: "equals" + value: "up" + varBinds: + - ["IF-MIB", "ifDescr"] + - ["IF-MIB", "ifAdminStatus"] + - ["IF-MIB", "ifOperStatus"] + - ["IF-MIB", "ifName"] + - ["IF-MIB", "ifAlias"] + - ["IF-MIB", "ifIndex"] + - ["IF-MIB", "ifSpeed"] + - ["IF-MIB", "ifInDiscards"] + - ["IF-MIB", "ifInErrors"] + - ["IF-MIB", "ifInOctets"] + - ["IF-MIB", "ifOutDiscards"] + - ["IF-MIB", "ifOutErrors"] + - ["IF-MIB", "ifOutOctets"] + - ["IF-MIB", "ifOutQLen"] + - ["IF-MIB", "ifLastChange"] poller: - usernameSecrets: - - sv3poller +# metricsIndexingEnabled: true +# usernameSecrets: +# - testv3 # - sc4snmp-hlab-sha-aes # - sc4snmp-hlab-sha-des inventory: | address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - ###LOAD_BALANCER_ID###,,2c,public,,,600,,, + 10.202.1.154,,2c,public,,,300,,, +# group1,,2c,public,,,3000,generic_switch,, +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, sim: # sim must be enabled if you want to use signalFx enabled: false -# signalfxToken: BCwaJ_Ands4Xh7Nrg -# signalfxRealm: us0 + signalfxToken: _wgJ0QUHdccbtKPXFGHUCA + signalfxRealm: us1 mongodb: pdb: create: true persistence: storageClass: "microk8s-hostpath" volumePermissions: - enabled: true -redis: - architecture: standalone - auth: - enabled: false \ No newline at end of file + enabled: true \ No newline at end of file diff --git a/ui_tests/config/config.py b/ui_tests/config/config.py index 91353be56..d8b04e8a4 100644 --- a/ui_tests/config/config.py +++ b/ui_tests/config/config.py @@ -13,7 +13,8 @@ def get_execution_type(): # EXECUTION_TYPE_LOCAL = "remote" EXECUTION_TYPE = get_execution_type() -UI_URL = "http://10.202.2.199:30001/" +# UI_URL = "http://10.202.2.199:30001/" +UI_URL = "http://localhost:30001/" EVENT_INDEX = "netops" LOGS_INDEX = "em_logs" diff --git a/ui_tests/tests/test_error_handling_and_complex_scenarios.py b/ui_tests/tests/test_error_handling_and_complex_scenarios.py index 28dac0952..1e7d34e48 100644 --- a/ui_tests/tests/test_error_handling_and_complex_scenarios.py +++ b/ui_tests/tests/test_error_handling_and_complex_scenarios.py @@ -20,7 +20,7 @@ values_reader = YamlValuesReader() -@pytest.mark.current +# @pytest.mark.current def test_trying_to_configure_profle_with_the_same_name(): """ Configure profile @@ -54,7 +54,7 @@ def test_trying_to_configure_profle_with_the_same_name(): p_profiles.delete_profile_from_list(profile_name) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_configure_group_with_the_same_name(): """ Configure group @@ -85,7 +85,7 @@ def test_trying_to_configure_group_with_the_same_name(): p_groups.delete_group_from_list(group_name) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_add_group_device_which_already_exists(): """ Configure group with device @@ -131,7 +131,7 @@ def test_trying_to_add_group_device_which_already_exists(): p_groups.delete_group_from_list(group_name) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_add_inventory_with_host_which_already_exists(): """ Configure inventory with host @@ -165,7 +165,7 @@ def test_trying_to_add_inventory_with_host_which_already_exists(): p_inventory.delete_entry_from_list(host_ip) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_add_inventory_with_group_which_is_already_added(): """ Configure inventory with group @@ -209,7 +209,7 @@ def test_trying_to_add_inventory_with_group_which_is_already_added(): p_groups.delete_group_from_list(group_name) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_add_inventory_group_with_host_which_is_configured_as_host(): """ Configure inventory with group with host @@ -260,7 +260,7 @@ def test_trying_to_add_inventory_group_with_host_which_is_configured_as_host(): p_groups.delete_group_from_list(group_name) -@pytest.mark.current +# @pytest.mark.current def test_removing_group_which_is_configured_in_inventory(): """ Configure inventory -> add group as inventory entry @@ -302,7 +302,7 @@ def test_removing_group_which_is_configured_in_inventory(): assert is_on_list is False -@pytest.mark.current +# @pytest.mark.current def test_removing_profile_which_is_configured_in_inventory(): """ Configure inventory with profile @@ -352,7 +352,7 @@ def test_removing_profile_which_is_configured_in_inventory(): p_inventory.delete_entry_from_list(host) -@pytest.mark.current +# @pytest.mark.current def test_try_to_add_to_inventory_group_which_does_not_exist(): """ Configure inventory with group which does not exist @@ -377,7 +377,7 @@ def test_try_to_add_to_inventory_group_which_does_not_exist(): assert is_on_list is False -@pytest.mark.current +# @pytest.mark.current def test_trying_to_edit_profile_name_into_profile_name_that_exists(): """ Configure two profiles @@ -415,7 +415,7 @@ def test_trying_to_edit_profile_name_into_profile_name_that_exists(): p_profiles.delete_profile_from_list(profile_name_2) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_edit_group_name_into_another_group_name(): """ Configure two groups @@ -449,7 +449,7 @@ def test_trying_to_edit_group_name_into_another_group_name(): p_groups.delete_group_from_list(group_name_2) -@pytest.mark.current +# @pytest.mark.current def test_trying_to_edit_inventory_host_into_host_which_exists(): """ Configure two inventory hosts diff --git a/ui_tests/tests/test_groups_basic.py b/ui_tests/tests/test_groups_basic.py index 98d282c0b..a79db309d 100644 --- a/ui_tests/tests/test_groups_basic.py +++ b/ui_tests/tests/test_groups_basic.py @@ -69,7 +69,7 @@ def test_change_group_name(): is_on_list = p_groups.check_if_groups_is_on_list(new_group_name) assert is_on_list is False - +# @pytest.mark.current def test_try_adding_device_to_group_with_no_data(): """ Test that user is not able to add device with no data @@ -77,7 +77,7 @@ def test_try_adding_device_to_group_with_no_data(): then click cancel check no device on list """ - group_name = f"test-group-device-with-no-data" + group_name = f"device-with-no-data" p_header.switch_to_groups() is_on_list = p_groups.check_if_groups_is_on_list(group_name) assert is_on_list is False diff --git a/ui_tests/tests/test_inventory_basic.py b/ui_tests/tests/test_inventory_basic.py index f9138e9e7..396fd7a00 100644 --- a/ui_tests/tests/test_inventory_basic.py +++ b/ui_tests/tests/test_inventory_basic.py @@ -117,7 +117,7 @@ def test_add_group_into_inventory_entry(): is_on_list = p_groups.check_if_groups_is_on_list(group_name) assert is_on_list is False - +@pytest.mark.current def test_try_to_add_device_with_no_data_into_inventory(): """ Test that user is not able to add inventory entry with no data @@ -140,8 +140,6 @@ def test_try_to_add_device_with_no_data_into_inventory(): assert is_on_list is False p_inventory.set_host_or_group_name(host) - error = p_inventory.get_host_missing_error() - assert error is None p_inventory.click_submit_button_for_add_entry() error = p_inventory.get_host_missing_error() assert error is None