From f59eaeac44b30443686103abd56ab63e167defc4 Mon Sep 17 00:00:00 2001 From: togashidm Date: Wed, 31 Jul 2024 18:19:07 +0000 Subject: [PATCH] Release 0.3.0 - Versions bump for K8s 1.30 and Go 1.22. - Security patches. - Dependency Updates - Enabling Power KPIs in Custom Resource Definitions (CRDs). - Logs improvements. - Added timeout to HTTP clients. - Test timeout fixes. - Comprehensive Framework Testing. - Fine tuning on horizontal scaling model. - Increase data range usage for CPU rightsizing model. - Resource type changed to Int64. - Plugins refactoring. - Documentation updates. Co-authored-by: tmetsch tmetsch@users.noreply.github.com Co-authored-by: togashidm togashidm@users.noreply.github.com Co-authored-by: kcollinsInt kcollinsInt@users.noreply.github.com Co-authored-by: andrepcb andrepcb@users.noreply.github.com --- .github/workflows/sca.yml | 20 +- .github/workflows/test-build.yml | 14 +- Dockerfile | 2 +- Makefile | 4 +- README.md | 6 +- artefacts/deploy/manifest.yaml | 2 +- artefacts/intents_crds_v1alpha1.yaml | 5 +- cmd/main.go | 3 +- docs/actuators.md | 24 +- docs/pluggability.md | 75 +- go.mod | 72 +- go.sum | 251 ++--- hack/generate_code.sh | 21 +- hack/generate_protobuf.sh | 4 +- .../plugins/v1alpha1/actuator_client_stub.go | 6 +- .../plugins/v1alpha1/actuator_plugin_stub.go | 4 +- pkg/api/plugins/v1alpha1/plugin_manager.go | 2 +- .../plugins/v1alpha1/plugin_manager_test.go | 5 +- pkg/api/plugins/v1alpha1/protobufs/api.pb.go | 18 +- pkg/api/plugins/v1alpha1/protobufs/api.proto | 7 +- .../plugins/v1alpha1/protobufs/api_grpc.pb.go | 30 +- pkg/api/plugins/v1alpha1/test_utils.go | 12 +- pkg/common/config.go | 2 +- pkg/common/ttl_cache.go | 2 +- pkg/common/ttl_cache_test.go | 4 +- pkg/common/types.go | 17 +- pkg/common/types_test.go | 50 +- pkg/controller/intent_controller.go | 12 +- pkg/controller/intent_controller_test.go | 28 +- pkg/controller/intent_monitor.go | 2 +- pkg/controller/intent_monitor_test.go | 6 +- pkg/controller/kpi.go | 4 +- pkg/controller/kpi_test.go | 8 +- pkg/controller/pod_monitor.go | 2 +- pkg/controller/pod_monitor_test.go | 6 +- pkg/controller/profile_monitor.go | 4 +- pkg/controller/profile_monitor_test.go | 6 +- pkg/controller/state_helper.go | 10 +- pkg/controller/state_helper_test.go | 6 +- pkg/controller/telemetry.go | 5 +- pkg/controller/telemetry_test.go | 2 +- pkg/controller/tracer_test.go | 3 +- .../clientset/versioned/clientset.go | 8 +- pkg/generated/clientset/versioned/doc.go | 17 - .../versioned/fake/clientset_generated.go | 5 +- .../clientset/versioned/fake/register.go | 1 - .../clientset/versioned/scheme/register.go | 1 - .../intents/v1alpha1/fake/fake_intent.go | 6 +- .../v1alpha1/fake/fake_intents_client.go | 1 - .../intents/v1alpha1/fake/fake_kpiprofile.go | 6 +- .../typed/intents/v1alpha1/intent.go | 1 - .../typed/intents/v1alpha1/intents_client.go | 5 +- .../typed/intents/v1alpha1/kpiprofile.go | 1 - .../informers/externalversions/factory.go | 92 +- .../informers/externalversions/generic.go | 1 - .../intents/v1alpha1/intent.go | 1 - .../intents/v1alpha1/kpiprofile.go | 1 - .../internalinterfaces/factory_interfaces.go | 1 - .../listers/intents/v1alpha1/intent.go | 1 - .../listers/intents/v1alpha1/kpiprofile.go | 1 - pkg/planner/actuators/platform/analyze.py | 2 +- pkg/planner/actuators/platform/rdt.go | 11 +- pkg/planner/actuators/platform/rdt_test.go | 10 +- .../scaling/analytics/cpu_rightsizing.py | 27 +- .../scaling/analytics/horizontal_scaling.py | 17 +- pkg/planner/actuators/scaling/cpu_scale.go | 41 +- .../actuators/scaling/cpu_scale_test.go | 102 +- pkg/planner/actuators/scaling/rm_pod.go | 8 +- pkg/planner/actuators/scaling/scale_out.go | 30 +- .../actuators/scaling/scale_out_test.go | 18 +- pkg/planner/actuators/types.go | 2 +- pkg/planner/astar/astar_planner.go | 4 +- pkg/planner/astar/astar_planner_test.go | 25 +- pkg/planner/astar/astar_test.go | 4 +- pkg/planner/astar/priority_queue_test.go | 10 +- pkg/planner/astar/state_graph_test.go | 6 +- pkg/tests/dummy_rm_pod_plugin.go | 55 -- pkg/tests/dummy_scale_out_plugin.go | 54 -- pkg/tests/full_framework_test.go | 739 ++++++++++++++ pkg/tests/full_planner_test.go | 58 +- pkg/tests/traces/cpu_scale.json | 8 + pkg/tests/traces/defaults.json | 20 + pkg/tests/traces/queries.json | 18 + pkg/tests/traces/rm_pod.json | 4 + pkg/tests/traces/scale_out.json | 7 + pkg/tests/traces/trace_0/effects.json | 80 ++ pkg/tests/traces/trace_0/events.json | 97 ++ pkg/tests/traces/trace_1/effects.json | 198 ++++ pkg/tests/traces/trace_1/events.json | 905 ++++++++++++++++++ plugins/cpu_scale/Dockerfile | 11 +- plugins/cpu_scale/cmd/cpu_scale.go | 127 +-- plugins/cpu_scale/cmd/cpu_scale_test.go | 114 ++- .../cpu_scale/cpu-scale-actuator-plugin.yaml | 48 +- plugins/plugins_helper.go | 58 +- plugins/plugins_helper_test.go | 191 ++-- plugins/rdt/Dockerfile | 10 +- plugins/rdt/cmd/rdt.go | 102 +- plugins/rdt/cmd/rdt_test.go | 55 ++ plugins/rdt/rdt-actuator-plugin.yaml | 54 +- plugins/rm_pod/Dockerfile | 2 +- plugins/rm_pod/cmd/rm_pod.go | 92 +- plugins/rm_pod/cmd/rm_pod_test.go | 50 + plugins/rm_pod/rmpod-actuator-plugin.yaml | 32 +- plugins/scale_out/Dockerfile | 10 +- plugins/scale_out/cmd/scale_out.go | 103 +- plugins/scale_out/cmd/scale_out_test.go | 60 +- .../scale_out/scaleout-actuator-plugin.yaml | 46 +- 107 files changed, 3359 insertions(+), 1282 deletions(-) delete mode 100644 pkg/generated/clientset/versioned/doc.go delete mode 100644 pkg/tests/dummy_rm_pod_plugin.go delete mode 100644 pkg/tests/dummy_scale_out_plugin.go create mode 100644 pkg/tests/full_framework_test.go create mode 100644 pkg/tests/traces/cpu_scale.json create mode 100644 pkg/tests/traces/defaults.json create mode 100644 pkg/tests/traces/queries.json create mode 100644 pkg/tests/traces/rm_pod.json create mode 100644 pkg/tests/traces/scale_out.json create mode 100644 pkg/tests/traces/trace_0/effects.json create mode 100644 pkg/tests/traces/trace_0/events.json create mode 100644 pkg/tests/traces/trace_1/effects.json create mode 100644 pkg/tests/traces/trace_1/events.json create mode 100644 plugins/rdt/cmd/rdt_test.go create mode 100644 plugins/rm_pod/cmd/rm_pod_test.go diff --git a/.github/workflows/sca.yml b/.github/workflows/sca.yml index 7b9d319..a46d50f 100644 --- a/.github/workflows/sca.yml +++ b/.github/workflows/sca.yml @@ -11,7 +11,7 @@ jobs: name: Shellcheck runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: | wget -q https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz tar -xvf shellcheck-stable.linux.x86_64.tar.xz @@ -21,18 +21,18 @@ jobs: runs-on: ubuntu-latest name: Hadolint steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: wget -q https://github.com/hadolint/hadolint/releases/download/v2.12.0/hadolint-Linux-x86_64 -O hadolint; chmod +x hadolint ; find . -type f \( -name "Dockerfile*" \) -print0 | xargs -n 1 -0 ./hadolint ; gofmt-imports: runs-on: ubuntu-latest name: Go Fmt and Go Import steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version-file: 'go.mod' - run: | - go install golang.org/x/tools/cmd/goimports@v0.6.0 && goimports -l . && gofmt -l . + go install golang.org/x/tools/cmd/goimports@v0.20.0 && goimports -l . && gofmt -l . shell: bash golangci: permissions: @@ -41,12 +41,12 @@ jobs: runs-on: ubuntu-latest name: lint steps: - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - go-version: 1.19 - - uses: actions/checkout@v3 + go-version-file: 'go.mod' - name: golangci-lint run: | - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2 make golangci-lint shell: bash diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 3436258..7264e33 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -10,9 +10,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: 'go.mod' - name: Build @@ -20,9 +20,9 @@ jobs: utests: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: 'go.mod' - name: Unit test @@ -30,10 +30,10 @@ jobs: race: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: 'go.mod' - name: Test race - run: go test -count=1 -race ./... + run: go test -count=1 -parallel 1 -race ./... diff --git a/Dockerfile b/Dockerfile index 9738e12..bbdfe76 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Copyright (c) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.19 AS build +FROM golang:1.22 AS build WORKDIR /app diff --git a/Makefile b/Makefile index 54844d8..e20b431 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ RMPOD_PLUGIN=rm_pod RDT_PLUGIN=rdt CPU_PLUGIN=cpu_scale GO_CILINT_CHECKERS=errcheck,goimports,gosec,gosimple,govet,ineffassign,nilerr,revive,staticcheck,unused -DOCKER_IMAGE_VERSION=0.2.0 +DOCKER_IMAGE_VERSION=0.3.0 api: hack/generate_code.sh @@ -57,7 +57,7 @@ prepare-build: go mod tidy utest: - go test -count=1 -v ./... + go test -count=1 -parallel 1 -v ./... test: hack/run_test.sh diff --git a/README.md b/README.md index f36f6d0..9bed2e8 100644 --- a/README.md +++ b/README.md @@ -67,9 +67,9 @@ Step 3) deploy the actuators of interest using: These steps should be followed by setting up your default profiles (if needed). -We recommend the usage of a service mesh like [Linkerd](https://linkerd.io/) or [Istio](https://istio.io/) to ensure -encryption and monitoring capabilities for the subcomponents of the planning framework themselves. After creating the -namespace, enable auto-injection; For Linkerd do: +We recommend the usage of a service mesh like [Linkerd](https://linkerd.io/) or [Istio](https://istio.io/) to ensure +robust authentication, encryption and monitoring capabilities for the subcomponents of the planning framework +themselves. After creating the namespace, + plugins.StartActuatorPlugin(actuator, "localhost", 12345, "localhost", "33333") diff --git a/go.mod b/go.mod index ae2d7a1..1a1c54e 100644 --- a/go.mod +++ b/go.mod @@ -1,38 +1,44 @@ module github.com/intel/intent-driven-orchestration -go 1.19 +go 1.22.0 require ( - github.com/stretchr/testify v1.8.1 - go.mongodb.org/mongo-driver v1.11.2 - google.golang.org/grpc v1.56.3 + github.com/stretchr/testify v1.9.0 + go.mongodb.org/mongo-driver v1.15.0 + google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 - k8s.io/code-generator v0.26.1 - k8s.io/klog/v2 v2.90.0 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/client-go v0.30.0 + k8s.io/code-generator v0.30.0 + k8s.io/klog/v2 v2.120.1 +) + +require ( + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/uuid v1.6.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect; indirectgo mod tidy github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.1 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.13.6 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect @@ -40,30 +46,28 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.1 // indirect - github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect golang.org/x/crypto v0.21.0 // indirect - golang.org/x/mod v0.8.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sync v0.1.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.6.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.18.0 // indirect + google.golang.org/appengine v1.6.8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 8454b73..d4c3da8 100644 --- a/go.sum +++ b/go.sum @@ -1,63 +1,42 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -68,17 +47,15 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -88,181 +65,141 @@ github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6f github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= -go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= +go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo= -k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= -k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= +k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/generate_code.sh b/hack/generate_code.sh index 50e751b..d3f5df6 100755 --- a/hack/generate_code.sh +++ b/hack/generate_code.sh @@ -5,11 +5,16 @@ if ! [ -d "./vendor/k8s.io/code-generator/" ]; then exit 1 fi -SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}") - -bash ./vendor/k8s.io/code-generator/generate-groups.sh "deepcopy,client,informer,lister" \ - github.com/intel/intent-driven-orchestration/pkg/generated github.com/intel/intent-driven-orchestration/pkg/api \ - intents:v1alpha1 \ - --output-base "$SCRIPT_DIR/../../../.." \ - --go-header-file "$SCRIPT_DIR/header.go.txt" \ - -v 2 +REPO_DIR=$(git rev-parse --show-toplevel) +source "${REPO_DIR}/vendor/k8s.io/code-generator/kube_codegen.sh" + +kube::codegen::gen_helpers \ + ${REPO_DIR}/pkg/api/ \ + --boilerplate "${REPO_DIR}/hack/header.go.txt" + +kube::codegen::gen_client \ + ${REPO_DIR}/pkg/api/ \ + --with-watch \ + --output-dir "${REPO_DIR}"/pkg/generated \ + --output-pkg github.com/intel/intent-driven-orchestration/pkg/generated \ + --boilerplate "${REPO_DIR}/hack/header.go.txt" diff --git a/hack/generate_protobuf.sh b/hack/generate_protobuf.sh index bc062e7..36cc142 100755 --- a/hack/generate_protobuf.sh +++ b/hack/generate_protobuf.sh @@ -4,8 +4,8 @@ MAIN_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/../" &> /dev/null && pwd set -e function protoc::ensure_installed { - if [[ ! -x "$(command -v protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then - echo "Generating api requires protoc 3 compiler. Please follow the instructions at" + if [[ ! -x "$(command -v protoc)" || "$(protoc --version)" != "libprotoc 27."* ]]; then + echo "Generating api requires an up-to-date protoc compiler. Please follow the instructions at" echo " https://grpc.io/docs/protoc-installation/" exit 1 fi diff --git a/pkg/api/plugins/v1alpha1/actuator_client_stub.go b/pkg/api/plugins/v1alpha1/actuator_client_stub.go index 766306b..ee886aa 100644 --- a/pkg/api/plugins/v1alpha1/actuator_client_stub.go +++ b/pkg/api/plugins/v1alpha1/actuator_client_stub.go @@ -78,7 +78,7 @@ func toGrpcState(s *common.State) *protobufs.State { }, CurrentPods: make(map[string]*protobufs.PodState), CurrentData: make(map[string]*protobufs.DataEntry), - Resources: make(map[string]string), + Resources: make(map[string]int64), Annotations: make(map[string]string), } @@ -144,7 +144,7 @@ func getNextStateRequest(state *common.State, goal *common.State, profiles map[s func toGrpcActions(actions []planner.Action) []*protobufs.Action { var res []*protobufs.Action for _, a := range actions { - iProp, iok := a.Properties.(map[string]int32) + iProp, iok := a.Properties.(map[string]int64) sProp, _ := a.Properties.(map[string]string) p := protobufs.ActionProperties{ Type: protobufs.PropertyType_INT_PROPERTY, @@ -177,7 +177,7 @@ func getNextStateResponse(r *protobufs.NextStateResponse) ([]common.State, []flo }, CurrentPods: make(map[string]common.PodState), CurrentData: make(map[string]map[string]float64), - Resources: make(map[string]string), + Resources: make(map[string]int64), Annotations: make(map[string]string), } for kp, vp := range v.CurrentPods { diff --git a/pkg/api/plugins/v1alpha1/actuator_plugin_stub.go b/pkg/api/plugins/v1alpha1/actuator_plugin_stub.go index 18c37c3..2d35662 100644 --- a/pkg/api/plugins/v1alpha1/actuator_plugin_stub.go +++ b/pkg/api/plugins/v1alpha1/actuator_plugin_stub.go @@ -100,7 +100,7 @@ func (s *ActuatorPluginStub) Start() error { var lastDialErr error // TODO: make configurable. - err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, true, func(_ context.Context) (bool, error) { var conn *grpc.ClientConn conn, lastDialErr = grpc.Dial(fmt.Sprintf("%s:%d", s.endpoint, s.port), grpc.WithTransportCredentials(insecure.NewCredentials())) if lastDialErr != nil { @@ -206,7 +206,7 @@ func toState(s *protobufs.State) *common.State { }, CurrentPods: make(map[string]common.PodState), CurrentData: make(map[string]map[string]float64), - Resources: make(map[string]string), + Resources: make(map[string]int64), Annotations: make(map[string]string), } diff --git a/pkg/api/plugins/v1alpha1/plugin_manager.go b/pkg/api/plugins/v1alpha1/plugin_manager.go index efe40e2..9a3d8c0 100644 --- a/pkg/api/plugins/v1alpha1/plugin_manager.go +++ b/pkg/api/plugins/v1alpha1/plugin_manager.go @@ -126,7 +126,7 @@ func (pm *PluginManagerServer) Start() error { }() go wait.Until(func() { pm.refreshRegisteredPlugin(pm.retries) }, pm.reconcilePeriod, pm.stop) var lastDialErr error - err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { // TODO: make configurable. + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, true, func(_ context.Context) (bool, error) { // TODO: make configurable. var conn *grpc.ClientConn conn, lastDialErr = grpc.Dial(fmt.Sprintf("localhost:%d", pm.port), grpc.WithTransportCredentials(insecure.NewCredentials())) if lastDialErr != nil { diff --git a/pkg/api/plugins/v1alpha1/plugin_manager_test.go b/pkg/api/plugins/v1alpha1/plugin_manager_test.go index cc090d9..51fc596 100644 --- a/pkg/api/plugins/v1alpha1/plugin_manager_test.go +++ b/pkg/api/plugins/v1alpha1/plugin_manager_test.go @@ -46,7 +46,10 @@ func newTestPluginManager(ctx context.Context) (protobufs.RegistrationClient, fu go func() { err := s.Serve(listener) if err != nil { - klog.Exit(err) + if err == grpc.ErrServerStopped { + klog.Info("Server stopped") + } + klog.Errorf("Server serve error: %v", err) } }() diff --git a/pkg/api/plugins/v1alpha1/protobufs/api.pb.go b/pkg/api/plugins/v1alpha1/protobufs/api.pb.go index 55a71d1..53065a7 100644 --- a/pkg/api/plugins/v1alpha1/protobufs/api.pb.go +++ b/pkg/api/plugins/v1alpha1/protobufs/api.pb.go @@ -2,8 +2,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.33.0 +// protoc v4.25.1 // source: pkg/api/plugins/v1alpha1/protobufs/api.proto package plugins @@ -482,7 +482,7 @@ type Profile struct { unknownFields protoimpl.UnknownFields Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - ProfileType ProfileType `protobuf:"varint,2,opt,name=profile_type,json=profileType,proto3,enum=plugins.ProfileType" json:"profile_type,omitempty"` + ProfileType ProfileType `protobuf:"varint,2,opt,name=profile_type,json=profileType,proto3,enum=plugins.ProfileType" json:"profile_type,omitempty"` // We are not copying over endpoints, query etc. for security reasons; those are not needed by the actuators. } func (x *Profile) Reset() { @@ -660,7 +660,7 @@ type State struct { Intent *Intent `protobuf:"bytes,1,opt,name=intent,proto3" json:"intent,omitempty"` CurrentPods map[string]*PodState `protobuf:"bytes,2,rep,name=current_pods,json=currentPods,proto3" json:"current_pods,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` CurrentData map[string]*DataEntry `protobuf:"bytes,3,rep,name=current_data,json=currentData,proto3" json:"current_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Resources map[string]string `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Resources map[string]int64 `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -717,7 +717,7 @@ func (x *State) GetCurrentData() map[string]*DataEntry { return nil } -func (x *State) GetResources() map[string]string { +func (x *State) GetResources() map[string]int64 { if x != nil { return x.Resources } @@ -738,7 +738,7 @@ type ActionProperties struct { unknownFields protoimpl.UnknownFields Type PropertyType `protobuf:"varint,1,opt,name=type,proto3,enum=plugins.PropertyType" json:"type,omitempty"` - IntProperties map[string]int32 `protobuf:"bytes,2,rep,name=intProperties,proto3" json:"intProperties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + IntProperties map[string]int64 `protobuf:"bytes,2,rep,name=intProperties,proto3" json:"intProperties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` StrProperties map[string]string `protobuf:"bytes,3,rep,name=strProperties,proto3" json:"strProperties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -781,7 +781,7 @@ func (x *ActionProperties) GetType() PropertyType { return PropertyType_INT_PROPERTY } -func (x *ActionProperties) GetIntProperties() map[string]int32 { +func (x *ActionProperties) GetIntProperties() map[string]int64 { if x != nil { return x.IntProperties } @@ -1187,7 +1187,7 @@ var file_pkg_api_plugins_v1alpha1_protobufs_api_proto_rawDesc = []byte{ 0x3c, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, + 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, @@ -1209,7 +1209,7 @@ var file_pkg_api_plugins_v1alpha1_protobufs_api_proto_rawDesc = []byte{ 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x49, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, diff --git a/pkg/api/plugins/v1alpha1/protobufs/api.proto b/pkg/api/plugins/v1alpha1/protobufs/api.proto index 0f6bc74..e448960 100644 --- a/pkg/api/plugins/v1alpha1/protobufs/api.proto +++ b/pkg/api/plugins/v1alpha1/protobufs/api.proto @@ -14,7 +14,6 @@ message Empty { // Registration may fail when ido plugin version is not supported by // ido controller or the registered plugin name is already taken by another // active ido plugin. IDO plugin is expected to terminate upon registration failure - service Registration { rpc Register(RegisterRequest) returns (RegistrationStatusResponse) {} } @@ -95,8 +94,8 @@ message State { Intent intent = 1; map current_pods = 2; map current_data = 3; - map resources = 4; - map annotations = 5; + map resources = 4; + map annotations = 5; } // PropertyType type of property: integer or string @@ -108,7 +107,7 @@ enum PropertyType{ // ActionProperties action properties message ActionProperties{ PropertyType type = 1; - map intProperties = 2; + map intProperties = 2; map strProperties = 3; } diff --git a/pkg/api/plugins/v1alpha1/protobufs/api_grpc.pb.go b/pkg/api/plugins/v1alpha1/protobufs/api_grpc.pb.go index 48cc3c6..02adcb3 100644 --- a/pkg/api/plugins/v1alpha1/protobufs/api_grpc.pb.go +++ b/pkg/api/plugins/v1alpha1/protobufs/api_grpc.pb.go @@ -1,7 +1,9 @@ +// To regenerate api.pb.go run hack/generate_protobuf.sh or make api in the root of the repository + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 // source: pkg/api/plugins/v1alpha1/protobufs/api.proto package plugins @@ -18,6 +20,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Registration_Register_FullMethodName = "/plugins.Registration/Register" +) + // RegistrationClient is the client API for Registration service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +41,7 @@ func NewRegistrationClient(cc grpc.ClientConnInterface) RegistrationClient { func (c *registrationClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { out := new(RegistrationStatusResponse) - err := c.cc.Invoke(ctx, "/plugins.Registration/Register", in, out, opts...) + err := c.cc.Invoke(ctx, Registration_Register_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +86,7 @@ func _Registration_Register_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/plugins.Registration/Register", + FullMethod: Registration_Register_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationServer).Register(ctx, req.(*RegisterRequest)) @@ -104,6 +110,12 @@ var Registration_ServiceDesc = grpc.ServiceDesc{ Metadata: "pkg/api/plugins/v1alpha1/protobufs/api.proto", } +const ( + ActuatorPlugin_NextState_FullMethodName = "/plugins.ActuatorPlugin/NextState" + ActuatorPlugin_Perform_FullMethodName = "/plugins.ActuatorPlugin/Perform" + ActuatorPlugin_Effect_FullMethodName = "/plugins.ActuatorPlugin/Effect" +) + // ActuatorPluginClient is the client API for ActuatorPlugin service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -125,7 +137,7 @@ func NewActuatorPluginClient(cc grpc.ClientConnInterface) ActuatorPluginClient { } func (c *actuatorPluginClient) NextState(ctx context.Context, opts ...grpc.CallOption) (ActuatorPlugin_NextStateClient, error) { - stream, err := c.cc.NewStream(ctx, &ActuatorPlugin_ServiceDesc.Streams[0], "/plugins.ActuatorPlugin/NextState", opts...) + stream, err := c.cc.NewStream(ctx, &ActuatorPlugin_ServiceDesc.Streams[0], ActuatorPlugin_NextState_FullMethodName, opts...) if err != nil { return nil, err } @@ -157,7 +169,7 @@ func (x *actuatorPluginNextStateClient) Recv() (*NextStateResponse, error) { func (c *actuatorPluginClient) Perform(ctx context.Context, in *PerformRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/plugins.ActuatorPlugin/Perform", in, out, opts...) + err := c.cc.Invoke(ctx, ActuatorPlugin_Perform_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -166,7 +178,7 @@ func (c *actuatorPluginClient) Perform(ctx context.Context, in *PerformRequest, func (c *actuatorPluginClient) Effect(ctx context.Context, in *EffectRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/plugins.ActuatorPlugin/Effect", in, out, opts...) + err := c.cc.Invoke(ctx, ActuatorPlugin_Effect_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -248,7 +260,7 @@ func _ActuatorPlugin_Perform_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/plugins.ActuatorPlugin/Perform", + FullMethod: ActuatorPlugin_Perform_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ActuatorPluginServer).Perform(ctx, req.(*PerformRequest)) @@ -266,7 +278,7 @@ func _ActuatorPlugin_Effect_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/plugins.ActuatorPlugin/Effect", + FullMethod: ActuatorPlugin_Effect_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ActuatorPluginServer).Effect(ctx, req.(*EffectRequest)) diff --git a/pkg/api/plugins/v1alpha1/test_utils.go b/pkg/api/plugins/v1alpha1/test_utils.go index fa6fa36..787453f 100644 --- a/pkg/api/plugins/v1alpha1/test_utils.go +++ b/pkg/api/plugins/v1alpha1/test_utils.go @@ -41,7 +41,7 @@ func generateActuatorGrpcValidationSet() *ActuatorGrpcValidationSet { Data: map[string]float64{"host0": 20.0}, }, }, - Resources: map[string]string{"cpu": "23"}, + Resources: map[string]int64{"cpu": 23}, Annotations: map[string]string{"foo": "bar"}, }, goal: &protobufs.State{ @@ -70,7 +70,7 @@ func generateActuatorGrpcValidationSet() *ActuatorGrpcValidationSet { }, CurrentPods: map[string]*protobufs.PodState{"pod_0": {Availability: 0.6}}, CurrentData: map[string]*protobufs.DataEntry{"cpu_value": {Data: map[string]float64{"host0": 21.3}}}, - Resources: map[string]string{"cpu": "12"}, + Resources: map[string]int64{"cpu": 12}, Annotations: map[string]string{"foo": "bar"}, }, }, @@ -87,7 +87,7 @@ func generateActuatorGrpcValidationSet() *ActuatorGrpcValidationSet { Name: "action 2", Properties: &protobufs.ActionProperties{ Type: protobufs.PropertyType_INT_PROPERTY, - IntProperties: map[string]int32{"option3": 42}, + IntProperties: map[string]int64{"option3": 42}, }, }, }, @@ -109,7 +109,7 @@ func generateActuatorValidationSet() *ActuatorValidationSet { }, CurrentPods: map[string]common.PodState{"pod_0": {Availability: 0.7}}, CurrentData: map[string]map[string]float64{"cpu_value": {"host0": 20.0}}, - Resources: map[string]string{"cpu": "23"}, + Resources: map[string]int64{"cpu": 23}, Annotations: map[string]string{"foo": "bar"}, }, goal: &common.State{ @@ -138,7 +138,7 @@ func generateActuatorValidationSet() *ActuatorValidationSet { }, CurrentPods: map[string]common.PodState{"pod_0": {Availability: 0.6}}, CurrentData: map[string]map[string]float64{"cpu_value": {"host0": 21.3}}, - Resources: map[string]string{"cpu": "12"}, + Resources: map[string]int64{"cpu": 12}, Annotations: map[string]string{"foo": "bar"}, }, }, @@ -150,7 +150,7 @@ func generateActuatorValidationSet() *ActuatorValidationSet { }, { Name: "action 2", - Properties: map[string]int32{"option3": 42}, + Properties: map[string]int64{"option3": 42}, }, }, } diff --git a/pkg/common/config.go b/pkg/common/config.go index f4cd5bf..30ba866 100644 --- a/pkg/common/config.go +++ b/pkg/common/config.go @@ -126,7 +126,7 @@ func ParseConfig(filename string) (Config, error) { result.Planner.AStar.OpportunisticCandidates > 1000 { return *result, fmt.Errorf("invalid input value: Out of the provided limits") } - if result.Planner.AStar.PluginManagerPort < 0 || + if result.Planner.AStar.PluginManagerPort < 1 || result.Planner.AStar.PluginManagerPort > 65535 { return *result, fmt.Errorf("invalid input value: Port number is not in a valid range: %d", result.Planner.AStar.PluginManagerPort) } diff --git a/pkg/common/ttl_cache.go b/pkg/common/ttl_cache.go index 3bea06d..c0aed63 100644 --- a/pkg/common/ttl_cache.go +++ b/pkg/common/ttl_cache.go @@ -19,7 +19,7 @@ func NewCache(ttl int, tick time.Duration) (*TTLCache, chan struct{}) { cache := &TTLCache{entries: make(map[string]int64)} done := make(chan struct{}) if tick <= 0 || ttl <= 0 || tick > MaxPlanCacheTimeout || ttl > MaxPlanCacheTTL { - klog.Error("invalid time value.") + klog.Error("invalid timing values.") return cache, done } diff --git a/pkg/common/ttl_cache_test.go b/pkg/common/ttl_cache_test.go index 25c068c..b8d7d3a 100644 --- a/pkg/common/ttl_cache_test.go +++ b/pkg/common/ttl_cache_test.go @@ -8,14 +8,14 @@ import ( // Tests for success. // TestPutForSuccess tests for success. -func TestPutForSuccess(t *testing.T) { +func TestPutForSuccess(_ *testing.T) { cache, done := NewCache(10, time.Duration(100)) defer close(done) cache.Put("foo") } // TestIsInForSuccess tests for success. -func TestIsInForSuccess(t *testing.T) { +func TestIsInForSuccess(_ *testing.T) { cache, done := NewCache(10, time.Duration(100)) defer close(done) cache.Put("foo") diff --git a/pkg/common/types.go b/pkg/common/types.go index ad6dec1..15b5fa5 100644 --- a/pkg/common/types.go +++ b/pkg/common/types.go @@ -4,8 +4,6 @@ import ( "math" "strings" "time" - - "k8s.io/apimachinery/pkg/api/resource" ) // ProfileType defines the type of KPIProfiles. @@ -75,7 +73,7 @@ type State struct { Intent Intent CurrentPods map[string]PodState CurrentData map[string]map[string]float64 - Resources map[string]string + Resources map[string]int64 Annotations map[string]string } @@ -98,7 +96,7 @@ func (one *State) DeepCopy() State { objective, map[string]PodState{}, map[string]map[string]float64{}, - map[string]string{}, + map[string]int64{}, map[string]string{}, } @@ -155,14 +153,15 @@ func (one *State) Distance(another *State, profiles map[string]Profile) float64 return math.Sqrt(squaresSum) } -// IsBetter compares the objectives of one state to another - returns true if all latency related objective targets are smaller or equal, and all others are larger or equal. +// IsBetter compares the objectives of one state to another - returns true if all latency or power related objective targets are smaller or equal, and all others are larger or equal. func (one *State) IsBetter(another *State, profiles map[string]Profile) bool { if len(one.Intent.Objectives) != len(another.Intent.Objectives) { return false } res := false for k, v := range one.Intent.Objectives { - if profiles[k].ProfileType == ProfileTypeFromText("latency") { + // TODO: make this configurable through the KPI profiles for which we define larger or smaller is better. + if profiles[k].ProfileType == ProfileTypeFromText("latency") || profiles[k].ProfileType == ProfileTypeFromText("power") { if v <= another.Intent.Objectives[k] { res = true } else { @@ -187,11 +186,7 @@ func (one *State) LessResources(another *State) bool { if !ok { return false } - oneVal := resource.MustParse(v) - oneFloat := oneVal.AsApproximateFloat64() - anotherVal := resource.MustParse(tmp) - anotherFloat := anotherVal.AsApproximateFloat64() - if oneFloat <= anotherFloat { + if v <= tmp { res = true } else { return false diff --git a/pkg/common/types_test.go b/pkg/common/types_test.go index 2023a55..bbfa17f 100644 --- a/pkg/common/types_test.go +++ b/pkg/common/types_test.go @@ -15,7 +15,7 @@ func TestProfileTypeFromTextForSuccess(t *testing.T) { } // TestDistanceForSuccess tests for success. -func TestDistanceForSuccess(t *testing.T) { +func TestDistanceForSuccess(_ *testing.T) { s0 := State{ Intent: Intent{ Key: "default/foo", @@ -51,7 +51,7 @@ func TestDistanceForSuccess(t *testing.T) { } // TestDeepCopyStateForSuccess tests for success. -func TestDeepCopyStateForSuccess(t *testing.T) { +func TestDeepCopyStateForSuccess(_ *testing.T) { state := State{ Intent: Intent{ Key: "default/foo", @@ -65,7 +65,7 @@ func TestDeepCopyStateForSuccess(t *testing.T) { } // TestIsBetterStateForSuccess tests for success. -func TestIsBetterStateForSuccess(t *testing.T) { +func TestIsBetterStateForSuccess(_ *testing.T) { s0 := State{ Intent: Intent{ Key: "default/foo", @@ -85,15 +85,15 @@ func TestIsBetterStateForSuccess(t *testing.T) { } // TestLessResourcesForSuccess tests for success. -func TestLessResourcesForSuccess(t *testing.T) { +func TestLessResourcesForSuccess(_ *testing.T) { s0 := State{ - Resources: map[string]string{ - "0_cpu": "1", + Resources: map[string]int64{ + "0_cpu": 1, }, } s1 := State{ - Resources: map[string]string{ - "0_cpu": "2", + Resources: map[string]int64{ + "0_cpu": 2, }, } s0.LessResources(&s1) @@ -202,7 +202,7 @@ func TestDeepCopyStateForSanity(t *testing.T) { }, }, CurrentData: map[string]map[string]float64{"cpu_value": {"host0": 20.0}}, - Resources: map[string]string{"0_cpu": "100Mi"}, + Resources: map[string]int64{"0_cpu": 100}, Annotations: map[string]string{"llc": "0x1"}, } res := state.DeepCopy() @@ -270,8 +270,8 @@ func TestIsBetterStateForSanity(t *testing.T) { "availability": 0.99, }, }, - Resources: map[string]string{ - "cpu": "1", + Resources: map[string]int64{ + "cpu": 1, }, } s1 := s0.DeepCopy() @@ -285,10 +285,20 @@ func TestIsBetterStateForSanity(t *testing.T) { s5.Intent.Objectives["availability"] = 0.96 s6 := s0.DeepCopy() delete(s6.Intent.Objectives, "p99") + s7 := s0.DeepCopy() + s7.Intent.Objectives["power"] = 75 + s8 := s7.DeepCopy() + s8.Intent.Objectives["power"] = 100 + s9 := s0.DeepCopy() + s9.Intent.Objectives["rps"] = 12 + s10 := s9.DeepCopy() + s10.Intent.Objectives["rps"] = 10 profiles := map[string]Profile{ "p99": {ProfileType: ProfileTypeFromText("latency")}, "availability": {ProfileType: ProfileTypeFromText("availability")}, + "power": {ProfileType: ProfileTypeFromText("power")}, + "rps": {ProfileType: ProfileTypeFromText("throughput")}, } // deep-copy should be equal. @@ -308,21 +318,29 @@ func TestIsBetterStateForSanity(t *testing.T) { if s6.IsBetter(&s0, profiles) != false { t.Errorf("Should be uncomparable --> false.") } + // s7 has better power then s8. + if s7.IsBetter(&s8, profiles) != true { + t.Errorf("Should be better as s8: %v - %v.", s7, s8) + } + // s9 has better throughput then s10. + if s9.IsBetter(&s10, profiles) != true { + t.Errorf("Should be better as s10: %v - %v.", s9, s10) + } } // TestLessResourcesForSanity tests for sanity. func TestLessResourcesForSanity(t *testing.T) { s0 := State{ - Resources: map[string]string{ - "0_cpu": "1024", - "1_cpu": "1024", + Resources: map[string]int64{ + "0_cpu": 1024, + "1_cpu": 1024, }, } s1 := s0.DeepCopy() s2 := s1.DeepCopy() - s2.Resources["0_cpu"] = "2048" + s2.Resources["0_cpu"] = 2048 s3 := s0.DeepCopy() - s3.Resources["2_cpu"] = "2" + s3.Resources["2_cpu"] = 2 // deep-copy should be equal. res := s0.LessResources(&s1) diff --git a/pkg/controller/intent_controller.go b/pkg/controller/intent_controller.go index 4c72b23..12ca944 100644 --- a/pkg/controller/intent_controller.go +++ b/pkg/controller/intent_controller.go @@ -27,7 +27,7 @@ var warmupLock = sync.RWMutex{} // IntentController defines the overall intent controller. type IntentController struct { cfg common.Config - clientSet *kubernetes.Clientset + clientSet kubernetes.Interface podInformer v1.PodInformer tasks chan string intents map[string]common.Intent @@ -43,7 +43,7 @@ type IntentController struct { } // NewController initializes a new IntentController. -func NewController(cfg common.Config, clientSet *kubernetes.Clientset, informer v1.PodInformer) *IntentController { +func NewController(cfg common.Config, tracer Tracer, clientSet kubernetes.Interface, informer v1.PodInformer) *IntentController { if cfg.Controller.TaskChannelLength <= 0 || cfg.Controller.TaskChannelLength > common.MaxTaskChannelLen { klog.Error("invalid input value. Check documentation for the allowed limit") @@ -58,7 +58,7 @@ func NewController(cfg common.Config, clientSet *kubernetes.Clientset, informer intents: make(map[string]common.Intent), profiles: make(map[string]common.Profile), podErrors: make(map[string][]common.PodError), - tracer: NewMongoTracer(cfg.Generic.MongoEndpoint), + tracer: tracer, } c.planCache, _ = common.NewCache(cfg.Controller.PlanCacheTTL, time.Duration(cfg.Controller.PlanCacheTimeout)) return c @@ -159,8 +159,10 @@ func (c *IntentController) worker(id int, tasks <-chan string) { klog.Info("no planner configured") continue } + c.intentsLock.Lock() current := getCurrentState(c.cfg.Controller, c.clientSet, c.podInformer, c.intents[key], c.podErrors, c.profiles) desired := getDesiredState(c.intents[key]) + c.intentsLock.Unlock() plan := planner.CreatePlan(current, desired, c.profiles) klog.Infof("Planner output for %s was: %v", key, plan) _, err := os.Stat(lockFile) @@ -191,12 +193,12 @@ func (c *IntentController) Run(nWorkers int, stopper <-chan struct{}) { return case t := <-ticker.C: klog.V(2).Infof("Tick at: %s", t) + warmupLock.Lock() if !warmupDone { // This is stupid - to many ifs; but works for now. - warmupLock.Lock() warmupDone = true - warmupLock.Unlock() } + warmupLock.Unlock() c.processIntents() } runtime.Gosched() diff --git a/pkg/controller/intent_controller_test.go b/pkg/controller/intent_controller_test.go index b358423..ff86a36 100644 --- a/pkg/controller/intent_controller_test.go +++ b/pkg/controller/intent_controller_test.go @@ -16,6 +16,9 @@ import ( "k8s.io/klog/v2" ) +// TIMEOUT used to ensure updates get processed by the controller before asserting the results. +const TIMEOUT = 100 + // dummyTracer for testing. type dummyTracer struct{} @@ -57,16 +60,15 @@ func newTestController() *IntentController { client := fake.NewSimpleClientset(nil...) informer := informers.NewSharedInformerFactory(client, func() time.Duration { return 0 }()) dummyPlanner := dummyPlanner{} - controller := NewController(cfg, nil, informer.Core().V1().Pods()) + controller := NewController(cfg, dummyTracer{}, nil, informer.Core().V1().Pods()) controller.SetPlanner(dummyPlanner) - controller.tracer = dummyTracer{} return controller } // Tests for success. // TestUpdateProfileForSuccess tests for success. -func TestUpdateProfileForSuccess(t *testing.T) { +func TestUpdateProfileForSuccess(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -75,7 +77,7 @@ func TestUpdateProfileForSuccess(t *testing.T) { } // TestUpdatePodErrorForSuccess tests for success. -func TestUpdatePodErrorForSuccess(t *testing.T) { +func TestUpdatePodErrorForSuccess(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -84,7 +86,7 @@ func TestUpdatePodErrorForSuccess(t *testing.T) { } // TestUpdateIntentForSuccess tests for success. -func TestUpdateIntentForSuccess(t *testing.T) { +func TestUpdateIntentForSuccess(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -94,7 +96,7 @@ func TestUpdateIntentForSuccess(t *testing.T) { } // TestRunForSuccess tests for success. -func TestRunControllerForSuccess(t *testing.T) { +func TestRunControllerForSuccess(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -102,7 +104,7 @@ func TestRunControllerForSuccess(t *testing.T) { } // TestProcessIntentsForSuccess tests for success. -func TestProcessIntentsForSuccess(t *testing.T) { +func TestProcessIntentsForSuccess(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -123,6 +125,7 @@ func TestUpdateProfileForSanity(t *testing.T) { c.Run(1, stopChannel) c.UpdateProfile() <- common.Profile{Key: "test", ProfileType: common.Latency, Query: "foo", External: true} + time.Sleep(TIMEOUT * time.Millisecond) c.profilesLock.Lock() if len(c.profiles) != 1 { t.Error("Profile not added to profiles map.") @@ -130,6 +133,7 @@ func TestUpdateProfileForSanity(t *testing.T) { c.profilesLock.Unlock() c.UpdateProfile() <- common.Profile{Key: "test", ProfileType: common.Obsolete, Query: "foo", External: true} + time.Sleep(TIMEOUT * time.Millisecond) c.profilesLock.Lock() if len(c.profiles) != 0 { t.Error("Profile should have been removed.") @@ -145,6 +149,7 @@ func TestUpdatePodErrorForSanity(t *testing.T) { c.Run(1, stopChannel) c.UpdatePodError() <- common.PodError{Key: "test", Start: time.Now(), End: time.Now()} + time.Sleep(TIMEOUT * time.Millisecond) c.podErrorLock.Lock() if len(c.podErrors["test"]) == 0 { t.Error("These should be a POD error in the map.") @@ -152,6 +157,7 @@ func TestUpdatePodErrorForSanity(t *testing.T) { c.podErrorLock.Unlock() c.UpdatePodError() <- common.PodError{Key: "test"} + time.Sleep(TIMEOUT * time.Millisecond) c.podErrorLock.Lock() if len(c.podErrors) != 0 { t.Error("Should removed the POD from the map.") @@ -167,6 +173,7 @@ func TestUpdateIntentForSanity(t *testing.T) { c.Run(1, stopChannel) c.UpdateIntent() <- common.Intent{Key: "test", Priority: 1.0, TargetKey: "frontend", TargetKind: "Deployment"} + time.Sleep(TIMEOUT * time.Millisecond) c.intentsLock.Lock() if len(c.intents) != 1 { t.Error("Intent has not been added to intents map.") @@ -174,6 +181,7 @@ func TestUpdateIntentForSanity(t *testing.T) { c.intentsLock.Unlock() c.UpdateIntent() <- common.Intent{Key: "test", Priority: -1.0} + time.Sleep(TIMEOUT * time.Millisecond) c.intentsLock.Lock() if len(c.intents) != 0 { t.Error("Intent should have been removed.") @@ -182,7 +190,7 @@ func TestUpdateIntentForSanity(t *testing.T) { } // TestRunControllerForSanity tests for sanity. -func TestRunControllerForSanity(t *testing.T) { +func TestRunControllerForSanity(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -193,7 +201,7 @@ func TestRunControllerForSanity(t *testing.T) { } // TestProcessIntentsForSanity tests for sanity. -func TestProcessIntentsForSanity(t *testing.T) { +func TestProcessIntentsForSanity(_ *testing.T) { stopChannel := make(chan struct{}) defer close(stopChannel) c := newTestController() @@ -303,7 +311,7 @@ func TestNewControllerForFailure(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - controller := NewController(tt.args.cfg, tt.args.clientSet, tt.args.informer) + controller := NewController(tt.args.cfg, nil, tt.args.clientSet, tt.args.informer) if controller != nil { if got := controller.planner; !reflect.DeepEqual(got, tt.want) { t.Errorf("Planner in NewController() = %v, want %v", got, tt.want) diff --git a/pkg/controller/intent_monitor.go b/pkg/controller/intent_monitor.go index ba413eb..2836fd9 100644 --- a/pkg/controller/intent_monitor.go +++ b/pkg/controller/intent_monitor.go @@ -34,7 +34,7 @@ func NewIntentMonitor(intentClient clientSet.Interface, intentInformer informers intentClient: intentClient, intentLister: intentInformer.Lister(), intentSynced: intentInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Intents"), + queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "Intents"}), update: ch, } mon.syncHandler = mon.processIntent diff --git a/pkg/controller/intent_monitor_test.go b/pkg/controller/intent_monitor_test.go index b4e5dab..6370517 100644 --- a/pkg/controller/intent_monitor_test.go +++ b/pkg/controller/intent_monitor_test.go @@ -143,7 +143,7 @@ func TestRunIntentMonitorForSuccess(t *testing.T) { defer close(stopper) mon, faker := f.newMonitor(stopper) - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } @@ -173,7 +173,7 @@ func TestRunIntentMonitorForFailure(t *testing.T) { // syncHandler bails out. mon, faker := f.newMonitor(stopChannel) - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return errors.New("oh") } @@ -205,7 +205,7 @@ func TestRunIntentMonitorForSanity(t *testing.T) { mon, faker := f.newMonitor(stopChannel) // syncHandler "replaced" by sth simpler... - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } diff --git a/pkg/controller/kpi.go b/pkg/controller/kpi.go index 89db96d..528d90c 100644 --- a/pkg/controller/kpi.go +++ b/pkg/controller/kpi.go @@ -30,7 +30,9 @@ type httpClient interface { // init makes sure we use the "real" http client when not testing. func init() { - Client = &http.Client{} + Client = &http.Client{ + Timeout: 5 * time.Second, + } } // getFloat returns a float64 from that weird Prometheus string. diff --git a/pkg/controller/kpi_test.go b/pkg/controller/kpi_test.go index 8283959..db8050b 100644 --- a/pkg/controller/kpi_test.go +++ b/pkg/controller/kpi_test.go @@ -15,8 +15,8 @@ func init() { // Tests for success. // TestDoQueryForSuccess tests for success. -func TestDoQueryForSuccess(t *testing.T) { - responseBody := "{\"data\": {\"result\": [{\"value\": [1645019125.000, 1.23456780]}]}}" +func TestDoQueryForSuccess(_ *testing.T) { + responseBody := "{\"data\": {\"result\": [{\"value\": [1645019125.000, \"1.23456780\"]}]}}" MockResponse(responseBody, 200) prof := common.Profile{ Key: "default/my-p99-compliance", @@ -36,13 +36,13 @@ func TestDoQueryForSuccess(t *testing.T) { } // TestPodAvailabilityForSuccess tests for success. -func TestPodAvailabilityForSuccess(t *testing.T) { +func TestPodAvailabilityForSuccess(_ *testing.T) { var errors []common.PodError podAvailability(errors, time.Now()) } // TestPodSetAvailabilityForSuccess tests for success. -func TestPodSetAvailabilityForSuccess(t *testing.T) { +func TestPodSetAvailabilityForSuccess(_ *testing.T) { PodSetAvailability(map[string]common.PodState{"pod0": {Availability: 0.8}}) } diff --git a/pkg/controller/pod_monitor.go b/pkg/controller/pod_monitor.go index 1d62152..4050792 100644 --- a/pkg/controller/pod_monitor.go +++ b/pkg/controller/pod_monitor.go @@ -61,7 +61,7 @@ func NewPodMonitor(podClient kubernetes.Interface, informer coreInformer.PodInfo podClient: podClient, podLister: informer.Lister(), podSynced: informer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Pods"), + queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "Pods"}), update: ch, podsWithError: make(map[string]bool), cacheLock: sync.Mutex{}, diff --git a/pkg/controller/pod_monitor_test.go b/pkg/controller/pod_monitor_test.go index af85f5c..a646f69 100644 --- a/pkg/controller/pod_monitor_test.go +++ b/pkg/controller/pod_monitor_test.go @@ -129,7 +129,7 @@ func TestRunPodMonitorForSuccess(t *testing.T) { mon, faker := f.newMonitor(stop) // syncHandler "replaced" by sth simpler... - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } @@ -171,7 +171,7 @@ func TestRunPodMonitorForFailure(t *testing.T) { // syncHandler will raise an error. mon, faker := f.newMonitor(stop) - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return errors.New("oops") } @@ -212,7 +212,7 @@ func TestRunPodMonitorForSanity(t *testing.T) { mon, faker := f.newMonitor(stop) // syncHandler "replaced" by sth simpler... - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } diff --git a/pkg/controller/profile_monitor.go b/pkg/controller/profile_monitor.go index 544cdcb..4cb86b6 100644 --- a/pkg/controller/profile_monitor.go +++ b/pkg/controller/profile_monitor.go @@ -51,7 +51,7 @@ func NewKPIProfileMonitor(cfg common.MonitorConfig, profileClient clientSet.Inte profileClient: profileClient, profileLister: profileInformer.Lister(), profileSynced: profileInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "KPIProfiles"), + queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "KPIProfiles"}), update: ch, defaultProfiles: result, } @@ -173,7 +173,7 @@ func (mon *KPIProfileMonitor) processProfile(key string) error { mon.updateStatus(profile, true, "ok") mon.update <- parsedProfile } else { - mon.updateStatus(profile, false, "Both and query endpoint and a query need to be defined.") + mon.updateStatus(profile, false, "Both a endpoint and a query need to be defined.") mon.update <- common.Profile{Key: key, ProfileType: common.Obsolete, External: true} } } diff --git a/pkg/controller/profile_monitor_test.go b/pkg/controller/profile_monitor_test.go index b21b2ba..267e221 100644 --- a/pkg/controller/profile_monitor_test.go +++ b/pkg/controller/profile_monitor_test.go @@ -184,7 +184,7 @@ func TestRunKPIProfileMonitorForSuccess(t *testing.T) { mon, faker := f.newMonitor(stopChannel) // syncHandler "replaced" by sth simpler... - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } @@ -229,7 +229,7 @@ func TestRunKPIProfileMonitorForFailure(t *testing.T) { // syncHandler bails out. mon, faker := f.newMonitor(stopChannel) - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return errors.New("whoops") } @@ -261,7 +261,7 @@ func TestRunKPIProfileMonitorForSanity(t *testing.T) { mon, faker := f.newMonitor(stopChannel) // syncHandler "replaced" by sth simpler... - mon.syncHandler = func(key string) error { + mon.syncHandler = func(_ string) error { return nil } diff --git a/pkg/controller/state_helper.go b/pkg/controller/state_helper.go index 23d49d1..284c868 100644 --- a/pkg/controller/state_helper.go +++ b/pkg/controller/state_helper.go @@ -2,7 +2,6 @@ package controller import ( "context" - "fmt" "strconv" "strings" "time" @@ -23,7 +22,7 @@ func getPods(clientSet kubernetes.Interface, informer v1.PodInformer, targetKey string, targetKind string, - podErrors map[string][]common.PodError) (map[string]common.PodState, map[string]string, map[string]string, []string) { + podErrors map[string][]common.PodError) (map[string]common.PodState, map[string]string, map[string]int64, []string) { podStates := map[string]common.PodState{} var hosts []string tmp := strings.Split(targetKey, "/") @@ -51,7 +50,7 @@ func getPods(clientSet kubernetes.Interface, // Will ignore errors as pod list will be empty anyhow. selector, _ := metaV1.LabelSelectorAsSelector(labels) pods, _ := informer.Lister().Pods(tmp[0]).List(selector) - containerResources := map[string]string{} + containerResources := map[string]int64{} var annotations map[string]string for _, pod := range pods { if len(annotations) != len(pod.ObjectMeta.Annotations) { @@ -61,10 +60,10 @@ func getPods(clientSet kubernetes.Interface, if len(containerResources) < 1 { for i, container := range pod.Spec.Containers { for name, requests := range container.Resources.Requests { - containerResources[strings.Join([]string{strconv.Itoa(i), name.String(), "requests"}, resourceDelimiter)] = fmt.Sprint(requests.MilliValue()) + containerResources[strings.Join([]string{strconv.Itoa(i), name.String(), "requests"}, resourceDelimiter)] = requests.MilliValue() } for name, limits := range container.Resources.Limits { - containerResources[strings.Join([]string{strconv.Itoa(i), name.String(), "limits"}, resourceDelimiter)] = fmt.Sprint(limits.MilliValue()) + containerResources[strings.Join([]string{strconv.Itoa(i), name.String(), "limits"}, resourceDelimiter)] = limits.MilliValue() } } } @@ -131,6 +130,5 @@ func getCurrentState( // getDesiredState returns the desired state for an objective. func getDesiredState(objective common.Intent) common.State { - klog.Infof("getting a desired state %v for Planner to create a plan from an objective %v: ", common.State{Intent: objective}, objective) return common.State{Intent: objective} } diff --git a/pkg/controller/state_helper_test.go b/pkg/controller/state_helper_test.go index d7f13f8..272dc3d 100644 --- a/pkg/controller/state_helper_test.go +++ b/pkg/controller/state_helper_test.go @@ -108,7 +108,7 @@ func k8sShim(podSet runtime.Object, pods []*coreV1.Pod) (kubernetes.Interface, v // Tests for success. // TestGetPodsForSuccess tests for success. -func TestGetPodsForSuccess(t *testing.T) { +func TestGetPodsForSuccess(_ *testing.T) { deployment, pods := createDummies("Deployment", map[string]string{"foo": "bar"}, 1) podErrors := map[string][]common.PodError{} client, informer := k8sShim(deployment, pods) @@ -116,7 +116,7 @@ func TestGetPodsForSuccess(t *testing.T) { } // TestGetDesiredStateForSuccess test for success. -func TestGetDesiredStateForSuccess(t *testing.T) { +func TestGetDesiredStateForSuccess(_ *testing.T) { objective := common.Intent{ Objectives: map[string]float64{"P99compliance": 100.0}, } @@ -156,7 +156,7 @@ func TestGetPodsForSanity(t *testing.T) { if _, ok := annotations["sample-annotation"]; !ok { t.Errorf("Annotation should have been set - was: %v", annotations) } - if len(resources) != 4 || resources["0_foo_requests"] != "2000" || resources["1_bar_limits"] != "1048576000000" { + if len(resources) != 4 || resources["0_foo_requests"] != 2000 || resources["1_bar_limits"] != 1048576000000 { t.Errorf("Expected 4 resoure entries, one with foo another with bar resource requests & limits - was: %+v", resources) } if hosts[0] != "node0" { diff --git a/pkg/controller/telemetry.go b/pkg/controller/telemetry.go index da3fbd2..1b1b5f1 100644 --- a/pkg/controller/telemetry.go +++ b/pkg/controller/telemetry.go @@ -7,13 +7,16 @@ import ( "net/http" "net/url" "strings" + "time" "k8s.io/klog/v2" ) // init makes sure we use the "real" http client when not testing. func init() { - Client = &http.Client{} + Client = &http.Client{ + Timeout: 5 * time.Second, + } } // getHostTelemetry returns (optional) information for a set of hosts. diff --git a/pkg/controller/telemetry_test.go b/pkg/controller/telemetry_test.go index e410988..77685d5 100644 --- a/pkg/controller/telemetry_test.go +++ b/pkg/controller/telemetry_test.go @@ -12,7 +12,7 @@ func init() { // Tests for success. // TestGetHostTelemetryForSuccess tests for success. -func TestGetHostTelemetryForSuccess(t *testing.T) { +func TestGetHostTelemetryForSuccess(_ *testing.T) { responseBody := "{\"data\": {\"result\": [{\"metric\": {\"exported_instance\": \"node0\"}, \"value\": [1645019125.000, 25.0]}]}}" MockResponse(responseBody, 200) query := "avg(collectd_cpu_percent{exported_instance=~\"%s\"})by(exported_instance)" diff --git a/pkg/controller/tracer_test.go b/pkg/controller/tracer_test.go index ace78e8..7de57a9 100644 --- a/pkg/controller/tracer_test.go +++ b/pkg/controller/tracer_test.go @@ -77,7 +77,7 @@ func TestTraceEventForSanity(t1 *testing.T) { {name: "tc2", client: nil, args: args{plan: []planner.Action{}}}, } for _, tt := range tests { - t1.Run(tt.name, func(t1 *testing.T) { + t1.Run(tt.name, func(_ *testing.T) { t := MongoTracer{ client: tt.client, } @@ -89,7 +89,6 @@ func TestTraceEventForSanity(t1 *testing.T) { // TestNewMongoTracerForSanity tests for failure. func TestNewMongoTracerForSanity(t *testing.T) { mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) - defer mt.Close() mt.Client, _ = mongo.Connect(context.TODO(), options.Client().ApplyURI(MongoURIForTesting)) tests := []struct { name string diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index c1856b2..b940b1f 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -20,9 +20,6 @@ import ( "net/http" idov1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/typed/intents/v1alpha1" - - "k8s.io/klog/v2" - discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -33,8 +30,7 @@ type Interface interface { IdoV1alpha1() idov1alpha1.IdoV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient idoV1alpha1 *idov1alpha1.IdoV1alpha1Client @@ -106,7 +102,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, func NewForConfigOrDie(c *rest.Config) *Clientset { cs, err := NewForConfig(c) if err != nil { - klog.Fatal(err) + panic(err) } return cs } diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go deleted file mode 100644 index 97628cd..0000000 --- a/pkg/generated/clientset/versioned/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index 59faaf3..3720bfa 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -19,9 +19,6 @@ import ( clientset "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned" idov1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/typed/intents/v1alpha1" fakeidov1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake" - - "k8s.io/klog/v2" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -37,7 +34,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { - klog.Fatal(err) + panic(err) } } diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 4dbd6fa..4904b13 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -17,7 +17,6 @@ package fake import ( idov1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index 0bee7dc..5b56e58 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -17,7 +17,6 @@ package scheme import ( idov1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intent.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intent.go index 0ab6c0b..e55aa4f 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intent.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intent.go @@ -19,10 +19,8 @@ import ( "context" v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -34,9 +32,9 @@ type FakeIntents struct { ns string } -var intentsResource = schema.GroupVersionResource{Group: "ido.intel.com", Version: "v1alpha1", Resource: "intents"} +var intentsResource = v1alpha1.SchemeGroupVersion.WithResource("intents") -var intentsKind = schema.GroupVersionKind{Group: "ido.intel.com", Version: "v1alpha1", Kind: "Intent"} +var intentsKind = v1alpha1.SchemeGroupVersion.WithKind("Intent") // Get takes name of the intent, and returns the corresponding intent object, and an error if there is any. func (c *FakeIntents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Intent, err error) { diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intents_client.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intents_client.go index b2450ea..190f930 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intents_client.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_intents_client.go @@ -17,7 +17,6 @@ package fake import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/typed/intents/v1alpha1" - rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_kpiprofile.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_kpiprofile.go index 4ce2d13..2b75aa1 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_kpiprofile.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/fake/fake_kpiprofile.go @@ -19,10 +19,8 @@ import ( "context" v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -34,9 +32,9 @@ type FakeKPIProfiles struct { ns string } -var kpiprofilesResource = schema.GroupVersionResource{Group: "ido.intel.com", Version: "v1alpha1", Resource: "kpiprofiles"} +var kpiprofilesResource = v1alpha1.SchemeGroupVersion.WithResource("kpiprofiles") -var kpiprofilesKind = schema.GroupVersionKind{Group: "ido.intel.com", Version: "v1alpha1", Kind: "KPIProfile"} +var kpiprofilesKind = v1alpha1.SchemeGroupVersion.WithKind("KPIProfile") // Get takes name of the kPIProfile, and returns the corresponding kPIProfile object, and an error if there is any. func (c *FakeKPIProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KPIProfile, err error) { diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intent.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intent.go index ffea406..db7e5ea 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intent.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intent.go @@ -21,7 +21,6 @@ import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" scheme "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intents_client.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intents_client.go index 27b5ed6..d4e6fe3 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intents_client.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/intents_client.go @@ -20,9 +20,6 @@ import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/scheme" - - "k8s.io/klog/v2" - rest "k8s.io/client-go/rest" ) @@ -79,7 +76,7 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*IdoV1alpha1Client, func NewForConfigOrDie(c *rest.Config) *IdoV1alpha1Client { client, err := NewForConfig(c) if err != nil { - klog.Fatal(err) + panic(err) } return client } diff --git a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/kpiprofile.go b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/kpiprofile.go index d2f6e1f..1912cbf 100644 --- a/pkg/generated/clientset/versioned/typed/intents/v1alpha1/kpiprofile.go +++ b/pkg/generated/clientset/versioned/typed/intents/v1alpha1/kpiprofile.go @@ -21,7 +21,6 @@ import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" scheme "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index 9eb915c..f68f936 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -23,7 +23,6 @@ import ( versioned "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned" intents "github.com/intel/intent-driven-orchestration/pkg/generated/informers/externalversions/intents" internalinterfaces "github.com/intel/intent-driven-orchestration/pkg/generated/informers/externalversions/internalinterfaces" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -40,11 +39,17 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -73,6 +78,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -105,20 +118,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -140,7 +172,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -158,6 +190,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -165,11 +198,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Ido() intents.Interface } diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 0522dcd..15fcb79 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -19,7 +19,6 @@ import ( "fmt" v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) diff --git a/pkg/generated/informers/externalversions/intents/v1alpha1/intent.go b/pkg/generated/informers/externalversions/intents/v1alpha1/intent.go index 9877a07..6861369 100644 --- a/pkg/generated/informers/externalversions/intents/v1alpha1/intent.go +++ b/pkg/generated/informers/externalversions/intents/v1alpha1/intent.go @@ -23,7 +23,6 @@ import ( versioned "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned" internalinterfaces "github.com/intel/intent-driven-orchestration/pkg/generated/informers/externalversions/internalinterfaces" v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/listers/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/intents/v1alpha1/kpiprofile.go b/pkg/generated/informers/externalversions/intents/v1alpha1/kpiprofile.go index 7e93d4e..f474294 100644 --- a/pkg/generated/informers/externalversions/intents/v1alpha1/kpiprofile.go +++ b/pkg/generated/informers/externalversions/intents/v1alpha1/kpiprofile.go @@ -23,7 +23,6 @@ import ( versioned "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned" internalinterfaces "github.com/intel/intent-driven-orchestration/pkg/generated/informers/externalversions/internalinterfaces" v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/generated/listers/intents/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index 26b641f..af57e35 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,7 +19,6 @@ import ( time "time" versioned "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/intents/v1alpha1/intent.go b/pkg/generated/listers/intents/v1alpha1/intent.go index f1a9a71..e25365d 100644 --- a/pkg/generated/listers/intents/v1alpha1/intent.go +++ b/pkg/generated/listers/intents/v1alpha1/intent.go @@ -17,7 +17,6 @@ package v1alpha1 import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/intents/v1alpha1/kpiprofile.go b/pkg/generated/listers/intents/v1alpha1/kpiprofile.go index 54172e4..c607342 100644 --- a/pkg/generated/listers/intents/v1alpha1/kpiprofile.go +++ b/pkg/generated/listers/intents/v1alpha1/kpiprofile.go @@ -17,7 +17,6 @@ package v1alpha1 import ( v1alpha1 "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/planner/actuators/platform/analyze.py b/pkg/planner/actuators/platform/analyze.py index e27993e..62a1499 100644 --- a/pkg/planner/actuators/platform/analyze.py +++ b/pkg/planner/actuators/platform/analyze.py @@ -125,7 +125,7 @@ def store_data(data, args): 'group': 'rdt', 'data': data, 'static': False, - 'timestamp': datetime.datetime.now()} + 'timestamp': datetime.datetime.utcnow()} try: coll.insert_one(doc) except errors.ExecutionTimeout as err: diff --git a/pkg/planner/actuators/platform/rdt.go b/pkg/planner/actuators/platform/rdt.go index 618f78a..f0f233d 100644 --- a/pkg/planner/actuators/platform/rdt.go +++ b/pkg/planner/actuators/platform/rdt.go @@ -33,11 +33,11 @@ type RdtConfig struct { Analytics string `json:"analytics_script"` Prediction string `json:"prediction_script"` Options []string `json:"options"` - Port int `json:"port"` Endpoint string `json:"endpoint"` - MongoEndpoint string `json:"mongo_endpoint"` + Port int `json:"port"` PluginManagerEndpoint string `json:"plugin_manager_endpoint"` PluginManagerPort int `json:"plugin_manager_port"` + MongoEndpoint string `json:"mongo_endpoint"` } // RdtActuator represents the actual RDT actuator. @@ -74,9 +74,13 @@ type responseBody struct { // doQuery calls the prediction function. func doQuery(body requestBody) float64 { tmp, _ := json.Marshal(body) - resp, err := http.Post("http://localhost:8000", "application/json", bytes.NewBuffer(tmp)) + client := http.Client{ + Timeout: 5 * time.Second, + } + resp, err := client.Post("http://localhost:8000", "application/json", bytes.NewBuffer(tmp)) if err != nil { klog.Errorf("Could not reach prediction endpoint: %s.", err) + return -1.0 } defer func(Body io.ReadCloser) { err := Body.Close() @@ -87,6 +91,7 @@ func doQuery(body requestBody) float64 { respBody, err := io.ReadAll(resp.Body) if err != nil { klog.Errorf("Error to read the body: %s", err) + return -1.0 } var res responseBody err = json.Unmarshal(respBody, &res) diff --git a/pkg/planner/actuators/platform/rdt_test.go b/pkg/planner/actuators/platform/rdt_test.go index 2463d8f..ce21034 100644 --- a/pkg/planner/actuators/platform/rdt_test.go +++ b/pkg/planner/actuators/platform/rdt_test.go @@ -63,7 +63,7 @@ func (f *rdtActuatorFixture) newRdtTestActuator() *RdtActuator { // Tests for success. // TestRdtNextStateForSuccess tests for success. -func TestRdtNextStateForSuccess(t *testing.T) { +func TestRdtNextStateForSuccess(_ *testing.T) { f := newRdtActuatorFixture() actuator := f.newRdtTestActuator() @@ -95,7 +95,7 @@ func TestRdtNextStateForSuccess(t *testing.T) { } // TestRdtPerformForSuccess tests for success. -func TestRdtPerformForSuccess(t *testing.T) { +func TestRdtPerformForSuccess(_ *testing.T) { f := newRdtActuatorFixture() f.objects = []runtime.Object{ &coreV1.Pod{ @@ -115,7 +115,7 @@ func TestRdtPerformForSuccess(t *testing.T) { } // TestRdtEffectForSuccess tests for success. -func TestRdtEffectForSuccess(t *testing.T) { +func TestRdtEffectForSuccess(_ *testing.T) { f := newRdtActuatorFixture() actuator := f.newRdtTestActuator() state := common.State{ @@ -162,7 +162,7 @@ func TestRdtPerformForFailure(t *testing.T) { } // TestRdtEffectForFailure tests for failure. -func TestRdtEffectForFailure(t *testing.T) { +func TestRdtEffectForFailure(_ *testing.T) { f := newRdtActuatorFixture() // not much to do here, as this will "just" trigger a python script. state := common.State{ @@ -413,7 +413,7 @@ func TestRdtPerformForSanity(t *testing.T) { } // TestRdtEffectForSanity tests for sanity. -func TestRdtEffectForSanity(t *testing.T) { +func TestRdtEffectForSanity(_ *testing.T) { f := newRdtActuatorFixture() // not much to do here, as this will "just" trigger a python script. state := common.State{ diff --git a/pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py b/pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py index 7073e19..2c563a5 100644 --- a/pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py +++ b/pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py @@ -42,6 +42,7 @@ def _get_cpu(resources): tmp = key.split('_') if int(tmp[0]) > last and tmp[1] == 'cpu' and tmp[2] == 'limits': res = int(val) / 1000 + last = int(tmp[0]) return res @@ -86,10 +87,16 @@ def get_data(args): 'cpus')], inplace=True) - # Keeping first will lead to the latest value being taken. - data.drop_duplicates(subset=["cpus"], inplace=True, keep="first") + if data.empty: + return None data = data[(data[args.latency] != -1.0)] - data.dropna(inplace=True) + data = data.dropna() + + # keep the 10% of lowest values. + n_items = round(len(data)*0.1) + data = data.groupby(['cpus']).apply( + lambda x: x.nsmallest(n=n_items, + columns=args.latency)).reset_index(drop=True) return data @@ -108,7 +115,7 @@ def store_result(popt, latency_range = (min(data[args.latency]), max(data[args.latency])) cpu_range = (min(data["cpus"]), max(data["cpus"])) training_features = ["cpus"] - timestamp = datetime.datetime.now() + timestamp = datetime.datetime.utcnow() doc = {"name": args.name, "profileName": args.latency, "group": "vertical_scaling", @@ -144,7 +151,8 @@ def analyse(data, args): try: popt, _ = optimize.curve_fit(latency_func, data["cpus"], - data[args.latency], method="trf") + data[args.latency], + bounds=([0, 0.2, 0], [np.inf, 5, np.inf])) except (ValueError, RuntimeError) as err: logging.warning("Could not curve fit: %s.", err) return None, None @@ -161,8 +169,7 @@ def plot_results(data, popt, args): """ Visualize the results and return base6 encoded img. """ - fig = plt.Figure(figsize=FIG_SIZE) - axes = fig.add_subplot() + fig, axes = plt.subplots(1, 1, figsize=FIG_SIZE) axes.scatter(data["cpus"], data[args.latency], marker="o", color="black", alpha=0.5) @@ -188,6 +195,10 @@ def main(args): Main logic. """ data = get_data(args) + if data is None: + logging.info("Not enough data collected for: %s - %s.", + args.name, args.latency) + return popt, data = analyse(data, args) if popt is not None: img = plot_results(data, popt, args) @@ -231,7 +242,7 @@ def from_args(args): help="Name of the objective.") parser.add_argument("latency", type=str, help="Name of the latency objective.") - parser.add_argument("--min_vals", type=int, default=10, + parser.add_argument("--min_vals", type=int, default=20, help="Amount of features we want to collect before " "even training a model") parser.add_argument("--max_vals", type=int, default=500, diff --git a/pkg/planner/actuators/scaling/analytics/horizontal_scaling.py b/pkg/planner/actuators/scaling/analytics/horizontal_scaling.py index e729cd8..9e05f71 100644 --- a/pkg/planner/actuators/scaling/analytics/horizontal_scaling.py +++ b/pkg/planner/actuators/scaling/analytics/horizontal_scaling.py @@ -32,15 +32,13 @@ logging.basicConfig(format=FORMAT, level=logging.INFO) -def latency_func(data, p_0, p_1, p_2, p_3, p_4): +def latency_func(data, p_0, p_1, p_2, p_3): """ Latency function relating throughput, replicas and latency. """ tput = data[0, :] n_pods = data[1, :] - # 1st part: higher traffic --> higher latency. - # 2nd part: more replicas --> lower latency. - return p_0 + p_1 * np.exp(p_2 * tput) + p_3 * np.exp(-p_4 * n_pods) + return (p_0 * np.exp(p_1 * tput)) / (p_2 * np.exp(p_3 * tput * n_pods)) def get_data(args): @@ -94,7 +92,7 @@ def store_result(popt, (max(data[args.throughput]) - scale[1]) / scale[0]) replica_range = (min(data["replicas"]), max(data["replicas"])) training_features = [args.throughput, "replicas"] - timestamp = datetime.datetime.now() + timestamp = datetime.datetime.utcnow() doc = {"name": args.name, "profileName": args.latency, "group": "scaling", @@ -150,13 +148,13 @@ def analyse(data, args): tmp = [df_0[args.throughput], df_0["replicas"]] try: popt, _ = optimize.curve_fit(latency_func, tmp, df_0[args.latency], - bounds=(0, [np.inf, np.inf, 2, np.inf, 2])) + bounds=(0, np.inf)) except (RuntimeError, ValueError) as err: logging.warning("Could not curve fit: %s.", err) return None, None, None # check if we have a proper model - if popt[2] > 0.0 and popt.sum() != 1.0: + if all(x > 0.0 for x in popt): return popt, df_0, scale logging.warning("Found inverted plane for: %s:%s (%s) - will discard.", args.name, args.latency, popt) @@ -167,8 +165,9 @@ def plot_results(data, popt, scale, args): """ Visualize the results and return base6 encoded img. """ - fig = plt.Figure(figsize=FIG_SIZE) - axes = fig.add_subplot(projection="3d") + + fig, axes = plt.subplots(1, 1, + subplot_kw={'projection': '3d'}, figsize=FIG_SIZE) tput_min, tput_max = data[args.throughput].min(), data[ args.throughput].max() diff --git a/pkg/planner/actuators/scaling/cpu_scale.go b/pkg/planner/actuators/scaling/cpu_scale.go index b54ad64..5ad4f4b 100644 --- a/pkg/planner/actuators/scaling/cpu_scale.go +++ b/pkg/planner/actuators/scaling/cpu_scale.go @@ -44,12 +44,12 @@ type CPUScaleConfig struct { CPUSafeGuardFactor float64 `json:"cpu_safeguard_factor"` MaxProActiveCPU int64 `json:"max_proactive_cpu"` ProActiveLatencyPercentage float64 `json:"proactive_latency_percentage"` - Port int `json:"port"` - Endpoint string `json:"endpoint"` - MongoEndpoint string `json:"mongo_endpoint"` LookBack int `json:"look_back"` + Endpoint string `json:"endpoint"` + Port int `json:"port"` PluginManagerEndpoint string `json:"plugin_manager_endpoint"` PluginManagerPort int `json:"plugin_manager_port"` + MongoEndpoint string `json:"mongo_endpoint"` } // CPUScaleEffect describes the data that is stored in the knowledge base. @@ -100,12 +100,11 @@ func getResourceValues(state *common.State) int64 { return 0 } if items[1] == "cpu" && index >= lastIndex { - tmp := resource.MustParse(value) if items[2] == "requests" { - cpuRequest = tmp.Value() + cpuRequest = value } else if items[2] == "limits" { - cpuLimit = tmp.Value() - cpuRequest = tmp.Value() + cpuLimit = value + cpuRequest = value } lastIndex = index } @@ -231,8 +230,12 @@ func (cs CPUScaleActuator) findState( index = tmp } } - newState.Resources[strings.Join([]string{strconv.Itoa(index), "cpu", "limits"}, delimiter)] = strconv.FormatInt(newCPUValue, 10) - newState.Resources[strings.Join([]string{strconv.Itoa(index), "cpu", "requests"}, delimiter)] = strconv.FormatInt(newCPUValue, 10) + // resources can be nil so need a quick check here. + if newState.Resources == nil { + newState.Resources = make(map[string]int64) + } + newState.Resources[strings.Join([]string{strconv.Itoa(index), "cpu", "limits"}, delimiter)] = newCPUValue + newState.Resources[strings.Join([]string{strconv.Itoa(index), "cpu", "requests"}, delimiter)] = newCPUValue newState.CurrentData[cs.Name()] = map[string]float64{cs.Name(): 1} return newState, newCPUValue, nil } @@ -282,8 +285,8 @@ func (cs CPUScaleActuator) proactiveScaling( actionPlan := []planner.Action{ { Name: cs.Name(), - Properties: map[string]int32{ - "value": int32(newCPULim), + Properties: map[string]int64{ + "value": newCPULim, "proactive": 1, }, }, @@ -300,8 +303,11 @@ func (cs CPUScaleActuator) NextState(state *common.State, goal *common.State, if _, ok := state.CurrentData[cs.Name()]; ok { return nil, nil, nil } - - // let's find a follow-up state + // we don't need to do anything if there are no PODs. + if len(state.CurrentPods) == 0 { + return nil, nil, nil + } + // let's find a follow-up state. currentValue := getResourceValues(state) newState, newValue, err := cs.findState(state, goal, currentValue, profiles) if newValue != 0 && err == nil { @@ -310,7 +316,7 @@ func (cs CPUScaleActuator) NextState(state *common.State, goal *common.State, utility *= 1.0 / goal.Intent.Priority } return []common.State{newState}, []float64{utility}, []planner.Action{ - {Name: cs.Name(), Properties: map[string]int32{"value": int32(newValue)}}, + {Name: cs.Name(), Properties: map[string]int64{"value": newValue}}, } } // if the actuator is allowed to proactively scale - let's try that. @@ -327,7 +333,7 @@ func (cs CPUScaleActuator) NextState(state *common.State, goal *common.State, func (cs CPUScaleActuator) Perform(state *common.State, plan []planner.Action) { for _, item := range plan { if item.Name == actionName { - a := item.Properties.(map[string]int32) + a := item.Properties.(map[string]int64) if val, ok := a["value"]; ok { cs.setResourceValues(state, int(val)) } @@ -337,6 +343,11 @@ func (cs CPUScaleActuator) Perform(state *common.State, plan []planner.Action) { } func (cs CPUScaleActuator) Effect(state *common.State, profiles map[string]common.Profile) { + if cs.cfg.Script == "None" { + klog.V(2).Infof("Effect calculation is disabled - will not run analytics.") + return + } + var latencyObjectives []string for k := range state.Intent.Objectives { if profiles[k].ProfileType == common.ProfileTypeFromText("latency") { diff --git a/pkg/planner/actuators/scaling/cpu_scale_test.go b/pkg/planner/actuators/scaling/cpu_scale_test.go index 88248b0..40905b2 100644 --- a/pkg/planner/actuators/scaling/cpu_scale_test.go +++ b/pkg/planner/actuators/scaling/cpu_scale_test.go @@ -166,12 +166,12 @@ func TestCPUScalePerformForSuccess(t *testing.T) { s0 := common.State{ Intent: common.Intent{TargetKey: "default/my-deployment", TargetKind: "Deployment"}, CurrentPods: map[string]common.PodState{"pod_0": {}}, - Resources: map[string]string{ - "1_cpu_limits": "100", + Resources: map[string]int64{ + "1_cpu_limits": 100, }, } s0.Intent.TargetKind = "Deployment" - plan := []planner.Action{{Name: actionName, Properties: map[string]int32{"value": 2000}}} + plan := []planner.Action{{Name: actionName, Properties: map[string]int64{"value": 2000}}} actuator.Perform(&s0, plan) } @@ -185,8 +185,8 @@ func TestCPUScaleEffectForSuccess(t *testing.T) { } // TestCPUScaleGetResourcesForSuccess tests for success. -func TestCPUScaleGetResourcesForSuccess(t *testing.T) { - s0 := common.State{Resources: map[string]string{}} +func TestCPUScaleGetResourcesForSuccess(_ *testing.T) { + s0 := common.State{Resources: map[string]int64{}} getResourceValues(&s0) } @@ -218,9 +218,9 @@ func TestCPUScaleNextStateForFailure(t *testing.T) { State: "Running", }, }, - Resources: map[string]string{ - "1_cpu_requests": "100m", - "1_cpu_limits": "100m", + Resources: map[string]int64{ + "1_cpu_requests": 100, + "1_cpu_limits": 100, }, } goal := common.State{} @@ -244,8 +244,8 @@ func TestCPUScaleNextStateForFailure(t *testing.T) { } // negative resource limit. - state.Resources = map[string]string{ - "1_cpu_limits": "-100", + state.Resources = map[string]int64{ + "1_cpu_limits": -100, } states, _, _ = actuator.NextState(&state, &goal, profiles) if len(state.CurrentData) > 0 { @@ -253,8 +253,8 @@ func TestCPUScaleNextStateForFailure(t *testing.T) { } // too high resource limit - state.Resources = map[string]string{ - "1_cpu_limits": "100000", + state.Resources = map[string]int64{ + "1_cpu_limits": 100000, } states, _, _ = actuator.NextState(&state, &goal, profiles) if len(state.CurrentData) > 0 { @@ -274,7 +274,7 @@ func TestCPUScalePerformForFailure(t *testing.T) { CurrentPods: map[string]common.PodState{"pod_0": {}}, } plan := []planner.Action{ - {Name: actionName, Properties: map[string]int32{"value": 750}}, + {Name: actionName, Properties: map[string]int64{"value": 750}}, } actuator.Perform(&s0, plan) expectedActions := []string{"get"} @@ -293,7 +293,7 @@ func TestCPUScalePerformForFailure(t *testing.T) { Intent: common.Intent{TargetKey: "default/my-rs", TargetKind: "ReplicaSet"}, } plan = []planner.Action{ - {Name: actionName, Properties: map[string]int32{"value": 750}}, + {Name: actionName, Properties: map[string]int64{"value": 750}}, } actuator.Perform(&s1, plan) expectedActions = []string{"get"} @@ -309,7 +309,7 @@ func TestCPUScalePerformForFailure(t *testing.T) { // plan property is invalid. f.client.ClearActions() plan = []planner.Action{ - {Name: actionName, Properties: map[string]int32{"booja": 200}}, + {Name: actionName, Properties: map[string]int64{"booja": 200}}, } actuator.Perform(&s0, plan) if len(f.client.Actions()) != 0 { @@ -319,7 +319,7 @@ func TestCPUScalePerformForFailure(t *testing.T) { // TestCPUScaleGetResourcesForFailure tests for failure func TestCPUScaleGetResourcesForFailure(t *testing.T) { - s0 := common.State{Resources: map[string]string{"a_cpu_limits": "100"}} + s0 := common.State{Resources: map[string]int64{"a_cpu_limits": 100}} res := getResourceValues(&s0) if res != 0 { t.Errorf("Should have been 0 - was: %d.", res) @@ -354,9 +354,9 @@ func TestCPUScaleNextStateForSanity(t *testing.T) { State: "Running", }, }, - Resources: map[string]string{ - "1_cpu_limits": "1600", - "1_cpu_requests": "1600", + Resources: map[string]int64{ + "1_cpu_limits": 1600, + "1_cpu_requests": 1600, }, CurrentData: make(map[string]map[string]float64), } @@ -386,7 +386,7 @@ func TestCPUScaleNextStateForSanity(t *testing.T) { goal.Intent.Objectives["default/p99"] = 120 delete(state.CurrentData, actionName) _, _, actions = actuator.NextState(&state, &goal, profiles) - if len(actions) != 1 || actions[0].Properties.(map[string]int32)["value"] != 800 { + if len(actions) != 1 || actions[0].Properties.(map[string]int64)["value"] != 800 { t.Errorf("Extpected one action to set 800 - got: %v", actions) } @@ -401,7 +401,7 @@ func TestCPUScaleNextStateForSanity(t *testing.T) { actuator = f.newCPUScaleTestActuator(true) goal.Intent.Objectives["default/p99"] = 20 _, _, actions = actuator.NextState(&state, &goal, profiles) - if len(actions) != 1 || actions[0].Properties.(map[string]int32)["proactive"] != 1 || actions[0].Properties.(map[string]int32)["value"] != 1800 { + if len(actions) != 1 || actions[0].Properties.(map[string]int64)["proactive"] != 1 || actions[0].Properties.(map[string]int64)["value"] != 1800 { t.Errorf("Should contain 1 proactive action; was: %v", actions) } @@ -414,8 +414,8 @@ func TestCPUScaleNextStateForSanity(t *testing.T) { // maxProactive reached. delete(state.CurrentPods, "proactiveResourceAlloc") - state.Resources = map[string]string{ - "1_cpu_limits": fmt.Sprint(actuator.cfg.MaxProActiveCPU), + state.Resources = map[string]int64{ + "1_cpu_limits": actuator.cfg.MaxProActiveCPU, } states, utilities, actions = actuator.NextState(&state, &goal, profiles) if len(states) != 0 || len(utilities) != 0 || len(actions) != 0 { @@ -428,9 +428,33 @@ func TestCPUScaleNextStateForSanity(t *testing.T) { state.Intent.Objectives["default/p95"] = 100 goal.Intent.Objectives["default/p95"] = 200 _, _, actions = actuator.NextState(&state, &goal, profiles) - if len(actions) != 1 || actions[0].Properties.(map[string]int32)["proactive"] != 1 || actions[0].Properties.(map[string]int32)["value"] < 1800 || actions[0].Properties.(map[string]int32)["value"] > 1900 { + if len(actions) != 1 || actions[0].Properties.(map[string]int64)["proactive"] != 1 || actions[0].Properties.(map[string]int64)["value"] < 1800 || actions[0].Properties.(map[string]int64)["value"] > 1900 { t.Errorf("Should contain 1 proactive action; was: %v", actions) } + + // ensure an "empty" state does not crash the actuator. + actuator = f.newCPUScaleTestActuator(false) + delete(goal.Intent.Objectives, "default/p95") + goal.Intent.Objectives["default/p99"] = 120 + emptyState := common.State{ + Intent: struct { + Key string + Priority float64 + TargetKey string + TargetKind string + Objectives map[string]float64 + }{ + Key: "default/my-objective", + Priority: 1.0, + TargetKey: "default/my-deployment", + TargetKind: "Deployment", + Objectives: map[string]float64{"default/p99": 250.0}, + }, + } + _, _, actions = actuator.NextState(&emptyState, &goal, profiles) + if len(actions) != 0 { + t.Errorf("Should contain no action; was: %v", actions) + } } // TestCPUScalePerformForSanity tests for sanity. @@ -444,7 +468,7 @@ func TestCPUScalePerformForSanity(t *testing.T) { Intent: common.Intent{TargetKey: "default/my-deployment", TargetKind: "Deployment"}, } plan := []planner.Action{ - {Name: actionName, Properties: map[string]int32{"value": 1000}}, + {Name: actionName, Properties: map[string]int64{"value": 1000}}, } actuator.Perform(&s0, plan) expectedActions := []string{"get", "update"} @@ -546,30 +570,34 @@ func TestCPUScaleEffectForSanity(t *testing.T) { profiles := map[string]common.Profile{"p99": {ProfileType: common.ProfileTypeFromText("latency")}} actuator := f.newCPUScaleTestActuator(false) actuator.Effect(&state, profiles) + + // check with None. + actuator.cfg.Script = "None" + actuator.Effect(&state, profiles) } // TestCPUScaleGetResourcesForSuccess tests for sanity. func TestCPUScaleGetResourcesForSanity(t *testing.T) { - s0 := common.State{Resources: map[string]string{}} + s0 := common.State{Resources: map[string]int64{}} res := getResourceValues(&s0) if res != 0 { t.Errorf("Should have been 0 - was: %v", res) } // request defined. - s0.Resources["0_cpu_requests"] = "200" + s0.Resources["0_cpu_requests"] = 200 res = getResourceValues(&s0) if res != 200 { t.Errorf("Should have been 200 - was: %v", res) } // limits defined. - s0.Resources["0_cpu_limits"] = "400" + s0.Resources["0_cpu_limits"] = 400 res = getResourceValues(&s0) if res != 400 { t.Errorf("Should have been 400 - was: %v", res) } // the last container matters. - s0.Resources["1_cpu_limits"] = "100" + s0.Resources["1_cpu_limits"] = 100 res = getResourceValues(&s0) if res != 100 { t.Errorf("Should have been 100 - was: %v", res) @@ -608,9 +636,9 @@ func TestCPUScaleActuator_NextState(t *testing.T) { }, }, CurrentData: map[string]map[string]float64{"cpu_usage": {}}, - Resources: map[string]string{ - "1_cpu_limits": "1000", - "1_cpu_requests": "1000", + Resources: map[string]int64{ + "1_cpu_limits": 1000, + "1_cpu_requests": 1000, }, Annotations: nil, } @@ -649,9 +677,9 @@ func TestCPUScaleActuator_NextState(t *testing.T) { }, }, CurrentData: map[string]map[string]float64{"cpu_usage": {}}, - Resources: map[string]string{ - "1_cpu_limits": "500", - "1_cpu_requests": "500", + Resources: map[string]int64{ + "1_cpu_limits": 500, + "1_cpu_requests": 500, }, Annotations: nil, }, @@ -665,7 +693,7 @@ func TestCPUScaleActuator_NextState(t *testing.T) { want: []common.State{newState}, want1: []float64{0.5}, want2: []planner.Action{{Name: actionName, - Properties: map[string]int32{"value": 1000}}, + Properties: map[string]int64{"value": 1000}}, }, }, } @@ -680,7 +708,7 @@ func TestCPUScaleActuator_NextState(t *testing.T) { newState.Intent.Objectives["default/p95latency"] = cs.predictLatency( []float64{400, 2, 30}, 900) newState.Intent.Objectives["default/availability"] = 1 - got, got1, got2 := cs.NextState(&tt.args.state, &tt.args.goal, tt.args.profiles) + got, got1, got2 := cs.NextState(&tt.args.state, &tt.args.goal, tt.args.profiles) //#nosec G601 -- NA as this is a test. if !reflect.DeepEqual(got[0].Resources, tt.want[0].Resources) { t.Errorf("NextState() got = %v, want %v", got, tt.want) diff --git a/pkg/planner/actuators/scaling/rm_pod.go b/pkg/planner/actuators/scaling/rm_pod.go index fe996a4..3e05202 100644 --- a/pkg/planner/actuators/scaling/rm_pod.go +++ b/pkg/planner/actuators/scaling/rm_pod.go @@ -19,13 +19,13 @@ const rmPodActionName = "rmPod" // RmPodConfig represents the configuration for this actuator. type RmPodConfig struct { - LookBack int `json:"look_back"` MinPods int `json:"min_pods"` - Port int `json:"port"` + LookBack int `json:"look_back"` Endpoint string `json:"endpoint"` - MongoEndpoint string `json:"mongo_endpoint"` + Port int `json:"port"` PluginManagerEndpoint string `json:"plugin_manager_endpoint"` PluginManagerPort int `json:"plugin_manager_port"` + MongoEndpoint string `json:"mongo_endpoint"` } // RmPodActuator is an actuator that can remove particular PODs. @@ -72,7 +72,7 @@ func (rm RmPodActuator) NextState(state *common.State, goal *common.State, profi res, err := rm.tracer.GetEffect(state.Intent.Key, rm.Group(), k, rm.cfg.LookBack, func() interface{} { return &ScaleOutEffect{} }) - if err != nil || len(res.(*ScaleOutEffect).ReplicaRange) < 1 { + if err != nil { klog.Warningf("No valid effect data found in knowledge base: %v.", res) return states, utilities, actions } diff --git a/pkg/planner/actuators/scaling/scale_out.go b/pkg/planner/actuators/scaling/scale_out.go index 8d0becc..31ec31d 100644 --- a/pkg/planner/actuators/scaling/scale_out.go +++ b/pkg/planner/actuators/scaling/scale_out.go @@ -38,11 +38,11 @@ type ScaleOutConfig struct { LookBack int `json:"look_back"` MaxProActiveScaleOut int `json:"max_proactive_scale_out"` ProActiveLatencyFactor float64 `json:"proactive_latency_factor"` - Port int `json:"port"` Endpoint string `json:"endpoint"` - MongoEndpoint string `json:"mongo_endpoint"` + Port int `json:"port"` PluginManagerEndpoint string `json:"plugin_manager_endpoint"` PluginManagerPort int `json:"plugin_manager_port"` + MongoEndpoint string `json:"mongo_endpoint"` } // ScaleOutEffect describes the data that is stored in the knowledge base. @@ -52,7 +52,7 @@ type ScaleOutEffect struct { ThroughputRange [2]float64 ThroughputScale [2]float64 ReplicaRange [2]int - Popt [5]float64 + Popt [4]float64 TrainingFeatures [2]string TargetFeature string Image string @@ -85,12 +85,12 @@ func averageAvailability(pods map[string]common.PodState) float64 { } // predictLatency uses the knowledge base to forecast the latency. -func predictLatency(popt [5]float64, throughput float64, numPods int) float64 { +func predictLatency(popt [4]float64, throughput float64, numPods int) float64 { // TODO: predict future throughput. if numPods == 0 || throughput == 0 { return 0.0 } - return popt[0] + popt[1]*math.Exp(popt[2]*throughput) + popt[3]*math.Exp(-popt[4]*float64(numPods)) + return (popt[0] * math.Exp(popt[1]*throughput)) / (popt[2] * math.Exp(popt[3]*throughput*float64(numPods))) } // findState tries to determine the best possible # of replicas. @@ -104,7 +104,7 @@ func (scale ScaleOutActuator) findState(state *common.State, goal *common.State, res, err := scale.tracer.GetEffect(state.Intent.Key, scale.Group(), k, scale.cfg.LookBack, func() interface{} { return &ScaleOutEffect{} }) - if err != nil || len(res.(*ScaleOutEffect).ReplicaRange) < 1 { + if err != nil { return common.State{}, fmt.Errorf("no valid effect data found in knowledge base: %s - %v", err, res) } if len(newState.CurrentPods) > res.(*ScaleOutEffect).ReplicaRange[1] { @@ -151,25 +151,25 @@ func (scale ScaleOutActuator) NextState(state *common.State, goal *common.State, tempState.Intent.Objectives[name] *= scale.cfg.ProActiveLatencyFactor } } - return []common.State{tempState}, []float64{0.1}, []planner.Action{{Name: scale.Name(), Properties: map[string]int32{"factor": 1, "proactive": 1}}} + return []common.State{tempState}, []float64{0.1}, []planner.Action{{Name: scale.Name(), Properties: map[string]int64{"factor": 1, "proactive": 1}}} } return nil, nil, nil } utility := 0.9 + (float64(len(newState.CurrentPods))/float64(scale.cfg.MaxPods))*(1.0/goal.Intent.Priority) return []common.State{newState}, []float64{utility}, []planner.Action{ - {Name: scale.Name(), Properties: map[string]int32{"factor": int32(len(newState.CurrentPods) - len(state.CurrentPods))}}, + {Name: scale.Name(), Properties: map[string]int64{"factor": int64(len(newState.CurrentPods) - len(state.CurrentPods))}}, } } func (scale ScaleOutActuator) Perform(state *common.State, plan []planner.Action) { // calculate the scale factor - var factor int32 + var factor int64 factor = 0 for _, item := range plan { if item.Name == rmPodActionName { factor-- } else if item.Name == scaleOutActionName { - factor += item.Properties.(map[string]int32)["factor"] + factor += item.Properties.(map[string]int64)["factor"] } } @@ -188,7 +188,8 @@ func (scale ScaleOutActuator) Perform(state *common.State, plan []planner.Action klog.Errorf("failed to get latest version of: %v", err) return err } - res.Spec.Replicas = getInt32Pointer(*res.Spec.Replicas + factor) + // conversion to int32 is ok - as we have a MaxPods defined + res.Spec.Replicas = getInt32Pointer(*res.Spec.Replicas + int32(factor)) if *res.Spec.Replicas > 0 { _, updateErr := scale.apps.AppsV1().Deployments(namespace).Update(context.TODO(), res, metaV1.UpdateOptions{}) return updateErr @@ -205,7 +206,8 @@ func (scale ScaleOutActuator) Perform(state *common.State, plan []planner.Action klog.Errorf("failed to get latest version of: %v", err) return err } - res.Spec.Replicas = getInt32Pointer(*res.Spec.Replicas + factor) + // conversion to int32 is ok - as we have a MaxPods defined + res.Spec.Replicas = getInt32Pointer(*res.Spec.Replicas + int32(factor)) if *res.Spec.Replicas > 0 { _, updateErr := scale.apps.AppsV1().ReplicaSets(namespace).Update(context.TODO(), res, metaV1.UpdateOptions{}) return updateErr @@ -219,6 +221,10 @@ func (scale ScaleOutActuator) Perform(state *common.State, plan []planner.Action } func (scale ScaleOutActuator) Effect(state *common.State, profiles map[string]common.Profile) { + if scale.cfg.Script == "None" { + klog.V(2).Infof("Effect calculation is disabled - will not run analytics.") + return + } throughputObjective := "" var latencyObjectives []string diff --git a/pkg/planner/actuators/scaling/scale_out_test.go b/pkg/planner/actuators/scaling/scale_out_test.go index 53def7c..a1778b0 100644 --- a/pkg/planner/actuators/scaling/scale_out_test.go +++ b/pkg/planner/actuators/scaling/scale_out_test.go @@ -28,8 +28,8 @@ func (d dummyTracer) GetEffect(_ string, _ string, profileName string, _ int, co } tmp := constructor().(*ScaleOutEffect) tmp.ThroughputScale = [2]float64{0.01, 0.0} - tmp.ReplicaRange = [2]int{1, 7} - tmp.Popt = [5]float64{0.01, 1, 1, 4, 1} + tmp.ReplicaRange = [2]int{1, 5} + tmp.Popt = [4]float64{2., 1., 1., 0.2} return tmp, nil } @@ -179,7 +179,7 @@ func TestScalePerformForFailure(t *testing.T) { CurrentPods: map[string]common.PodState{"pod_0": {}}, } plan := []planner.Action{ - {Name: scaleOutActionName, Properties: map[string]int32{"factor": 2}}, + {Name: scaleOutActionName, Properties: map[string]int64{"factor": 2}}, {Name: rmPodActionName}, } actuator.Perform(&s0, plan) @@ -253,11 +253,11 @@ func TestScaleNextStateForSanity(t *testing.T) { t.Errorf("Resultsets are empty: %v, %v, %v.", states, utilities, actions) } // check if results match for scale-out - if len(states[0].CurrentPods) != 3 || actions[0].Name != actuator.Name() || actions[0].Properties.(map[string]int32)["factor"] != 2 { + if len(states[0].CurrentPods) != 3 || actions[0].Name != actuator.Name() || actions[0].Properties.(map[string]int64)["factor"] != 2 { t.Errorf("Expected a scale out by factor of 2 - got: %v.", actions[0]) } if utilities[0] > 0.95 { - t.Errorf("Expected utiltiy to be < 1.0 - got: %v.", utilities) + t.Errorf("Expected utility to be < 1.0 - got: %v.", utilities) } // empty results if no solution can be found. @@ -294,7 +294,7 @@ func TestScaleNextStateForSanity(t *testing.T) { if len(states) != 1 || len(utilities) != 1 || len(actions) != 1 { t.Errorf("Resultsets should not be empty: %v, %v, %v.", states, utilities, actions) } - if actions[0].Properties.(map[string]int32)["proactive"] != 1 || utilities[0] != 0.1 { + if actions[0].Properties.(map[string]int64)["proactive"] != 1 || utilities[0] != 0.1 { t.Errorf("Action should be marked as being proactive, utiltiy == 0.01 -got %v, %v.", states[0], utilities[0]) } } @@ -330,7 +330,7 @@ func TestScalePerformForSanity(t *testing.T) { CurrentPods: map[string]common.PodState{"pod_0": {}}, } plan := []planner.Action{ - {Name: scaleOutActionName, Properties: map[string]int32{"factor": 2}}, + {Name: scaleOutActionName, Properties: map[string]int64{"factor": 2}}, {Name: rmPodActionName}, } actuator.Perform(&s0, plan) @@ -374,4 +374,8 @@ func TestScaleEffectForSanity(t *testing.T) { state.Intent.Objectives["default/rps"] = 100 profiles["default/rps"] = common.Profile{ProfileType: common.ProfileTypeFromText("throughput")} actuator.Effect(&state, profiles) + + // check with None. + actuator.cfg.Script = "None" + actuator.Effect(&state, profiles) } diff --git a/pkg/planner/actuators/types.go b/pkg/planner/actuators/types.go index b5efe32..13d00c7 100644 --- a/pkg/planner/actuators/types.go +++ b/pkg/planner/actuators/types.go @@ -13,7 +13,7 @@ type Plugin interface { Group() string } -// ActuatorPlugin defines the interface for the actuators. +// Actuator defines the interface for the actuators. type Actuator interface { Plugin // NextState should return a set of potential follow-up states for a given state if this actuator would potentially be used. diff --git a/pkg/planner/astar/astar_planner.go b/pkg/planner/astar/astar_planner.go index 3deb101..5305d9c 100644 --- a/pkg/planner/astar/astar_planner.go +++ b/pkg/planner/astar/astar_planner.go @@ -1,9 +1,8 @@ package astar import ( - "reflect" - "container/heap" + "reflect" "k8s.io/klog/v2" @@ -164,7 +163,6 @@ func (p APlanner) CreatePlan(current common.State, desired common.State, profile klog.V(2).Infof("State graph has %d nodes.", len(sg.nodes)) if goal { _, actions := solve(sg, s0, g0, h, true, profiles) - // sg.toDot(path, "tmp.dot") plan = actions } else { klog.Warning("No path to goal state possible!") diff --git a/pkg/planner/astar/astar_planner_test.go b/pkg/planner/astar/astar_planner_test.go index 4da2c70..bce930c 100644 --- a/pkg/planner/astar/astar_planner_test.go +++ b/pkg/planner/astar/astar_planner_test.go @@ -236,9 +236,9 @@ func (res resourceAction) NextState(state *common.State, _ *common.State, _ map[ // Set the right CPU resource allocations if not already the case... current, ok := state.Resources["cpu"] if ok { - if current != "2" { + if current != 2 { newState := state.DeepCopy() - newState.Resources["cpu"] = "2" + newState.Resources["cpu"] = 2 followUpStates = append(followUpStates, newState) utilities = append(utilities, 0.0) @@ -418,14 +418,14 @@ func getPlannerTestCases(enableOpportunistic bool) []testCaseData { { name: "local actuators", fixture: f1, - plannerCrt: func(f *aStarPlannerFixture) *APlanner { return f1.newTestPlanner(enableOpportunistic) }, - stubsCrt: func(f *aStarPlannerFixture) []*plugins.ActuatorPluginStub { return []*plugins.ActuatorPluginStub{} }, + plannerCrt: func(_ *aStarPlannerFixture) *APlanner { return f1.newTestPlanner(enableOpportunistic) }, + stubsCrt: func(_ *aStarPlannerFixture) []*plugins.ActuatorPluginStub { return []*plugins.ActuatorPluginStub{} }, }, { name: "grpc actuators", fixture: f2, - plannerCrt: func(f *aStarPlannerFixture) *APlanner { return f2.newTestPlannerGrpc(enableOpportunistic) }, - stubsCrt: func(f *aStarPlannerFixture) []*plugins.ActuatorPluginStub { return newTestActuatorsGrpc(f2) }, + plannerCrt: func(_ *aStarPlannerFixture) *APlanner { return f2.newTestPlannerGrpc(enableOpportunistic) }, + stubsCrt: func(_ *aStarPlannerFixture) []*plugins.ActuatorPluginStub { return newTestActuatorsGrpc(f2) }, }, } } @@ -433,14 +433,14 @@ func getPlannerTestCases(enableOpportunistic bool) []testCaseData { // Tests for success. // TestGetNodeForStateForSuccess tests for sanity. -func TestGetNodeForStateForSuccess(t *testing.T) { +func TestGetNodeForStateForSuccess(_ *testing.T) { sg := newStateGraph() s0 := common.State{} getNodeForState(*sg, s0) } // TestCreatePlanForSuccess tests for success. -func TestCreatePlanForSuccess(t *testing.T) { +func TestCreatePlanForSuccess(_ *testing.T) { f := newAStarPlannerFixture() start := common.State{ Intent: common.Intent{ @@ -475,7 +475,7 @@ func TestCreatePlanForSuccess(t *testing.T) { } // TestExecutePlanForSuccess tests for success. -func TestExecutePlanForSuccess(t *testing.T) { +func TestExecutePlanForSuccess(_ *testing.T) { f := newAStarPlannerFixture() state := common.State{Intent: common.Intent{ Key: "foo", @@ -491,7 +491,7 @@ func TestExecutePlanForSuccess(t *testing.T) { } // TestTriggerEffectForSuccess tests for success. -func TestTriggerEffectForSuccess(t *testing.T) { +func TestTriggerEffectForSuccess(_ *testing.T) { f := newAStarPlannerFixture() state := common.State{Intent: common.Intent{ Key: "foo", @@ -643,7 +643,7 @@ func TestShortCutForSanity(t *testing.T) { }, CurrentPods: map[string]common.PodState{"pod_0": {Availability: 1.0}}, CurrentData: map[string]map[string]float64{"cpu_value": {"host0": 20.0}}, - Resources: map[string]string{"cpu": "4"}, + Resources: map[string]int64{"cpu": 4}, } goal := common.State{ Intent: common.Intent{ @@ -761,12 +761,12 @@ func TestFaultyActuatorOverGrpcForSanity(t *testing.T) { cfg.Planner.AStar.PluginManagerEndpoint = "localhost" cfg.Planner.AStar.PluginManagerPort = 33337 aPlanner := NewAPlanner(actuatorList, cfg) + defer aPlanner.Stop() faultyStub, err := createPlugin("faulty", 3338, faulty, 33337) if err != nil { klog.Errorf("Cannot create faulty plugin over grpc") } - defer aPlanner.Stop() start := common.State{} goal := common.State{} @@ -787,7 +787,6 @@ func TestFaultyActuatorOverGrpcForSanity(t *testing.T) { "and goal, that means Nextstate() should be called 4 times as we only take 2 candidates from each call. "+ "Was: %v", len(f.triggeredUpdates)) } - } // BenchmarkCreatePlan benchmarks the planner. diff --git a/pkg/planner/astar/astar_test.go b/pkg/planner/astar/astar_test.go index f46472a..2a17786 100644 --- a/pkg/planner/astar/astar_test.go +++ b/pkg/planner/astar/astar_test.go @@ -52,14 +52,14 @@ func testHeuristic(one, _ Node, _ map[string]common.Profile) float64 { // Tests for success. // TestSolveForSuccess tests for success. -func TestSolveForSuccess(t *testing.T) { +func TestSolveForSuccess(_ *testing.T) { sg, start, goal := newTestGraph() profiles := map[string]common.Profile{} solve(sg, start, goal, testHeuristic, true, profiles) } // TestResolvePathForSuccess tests for success. -func TestResolvePathForSuccess(t *testing.T) { +func TestResolvePathForSuccess(_ *testing.T) { node0 := Node{value: "a"} node1 := Node{value: "b"} node2 := Node{value: "c"} diff --git a/pkg/planner/astar/priority_queue_test.go b/pkg/planner/astar/priority_queue_test.go index 2d8b5d4..b5b895a 100644 --- a/pkg/planner/astar/priority_queue_test.go +++ b/pkg/planner/astar/priority_queue_test.go @@ -8,14 +8,14 @@ import ( // Tests for success. // TestLenForSuccess tests for success. -func TestLenForSuccess(t *testing.T) { +func TestLenForSuccess(_ *testing.T) { queue := make(PriorityQueue, 0) heap.Init(&queue) queue.Len() } // TestLessForSuccess tests for success. -func TestLessForSuccess(t *testing.T) { +func TestLessForSuccess(_ *testing.T) { queue := make(PriorityQueue, 0) heap.Init(&queue) item1 := &Item{ @@ -32,7 +32,7 @@ func TestLessForSuccess(t *testing.T) { } // TestSwapForSuccess tests for success. -func TestSwapForSuccess(t *testing.T) { +func TestSwapForSuccess(_ *testing.T) { queue := make(PriorityQueue, 0) heap.Init(&queue) item1 := &Item{ @@ -49,7 +49,7 @@ func TestSwapForSuccess(t *testing.T) { } // TestPushForSuccess tests for success. -func TestPushForSuccess(t *testing.T) { +func TestPushForSuccess(_ *testing.T) { queue := make(PriorityQueue, 0) heap.Init(&queue) item := &Item{ @@ -60,7 +60,7 @@ func TestPushForSuccess(t *testing.T) { } // TestPopForSuccess tests for success. -func TestPopForSuccess(t *testing.T) { +func TestPopForSuccess(_ *testing.T) { queue := make(PriorityQueue, 0) heap.Init(&queue) item := &Item{ diff --git a/pkg/planner/astar/state_graph_test.go b/pkg/planner/astar/state_graph_test.go index e66d996..6c4f6fd 100644 --- a/pkg/planner/astar/state_graph_test.go +++ b/pkg/planner/astar/state_graph_test.go @@ -10,7 +10,7 @@ import ( // Tests for success. // TestAddNodeForSuccess tests for success. -func TestAddNodeForSuccess(t *testing.T) { +func TestAddNodeForSuccess(_ *testing.T) { g := newStateGraph() node0 := Node{"foo"} node1 := Node{"bar"} @@ -19,7 +19,7 @@ func TestAddNodeForSuccess(t *testing.T) { } // TestAddEdgeForSuccess tests for success. -func TestAddEdgeForSuccess(t *testing.T) { +func TestAddEdgeForSuccess(_ *testing.T) { g := newStateGraph() node0 := Node{"foo"} node1 := Node{"bar"} @@ -96,7 +96,7 @@ func TestToDotForSanity(t *testing.T) { } // TestStatGraphForSanity tests for sanity. -func TestStatGraphForSanity(t *testing.T) { +func TestStatGraphForSanity(_ *testing.T) { g := newStateGraph() start := Node{"DeploymentWithOnePod"} morePods := Node{"DeploymentWithTwoPods"} diff --git a/pkg/tests/dummy_rm_pod_plugin.go b/pkg/tests/dummy_rm_pod_plugin.go deleted file mode 100644 index e36bbc7..0000000 --- a/pkg/tests/dummy_rm_pod_plugin.go +++ /dev/null @@ -1,55 +0,0 @@ -package tests - -import ( - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" - "github.com/intel/intent-driven-orchestration/pkg/controller" - - "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" - - "k8s.io/klog/v2" -) - -type DummyRemovePluginHandler struct { - actuator actuators.Actuator -} - -func (s *DummyRemovePluginHandler) NextState(state *common.State, goal *common.State, profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - return s.actuator.NextState(state, goal, profiles) -} - -func (s *DummyRemovePluginHandler) Perform(state *common.State, plan []planner.Action) { - s.actuator.Perform(state, plan) -} - -func (s *DummyRemovePluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - s.actuator.Effect(state, profiles) -} - -// startRemovePodPlugin initializes a remove pod actuator. -func startRemovePodPlugin(tracer controller.Tracer, port int, pluginManagerPort int) *plugins.ActuatorPluginStub { - cfg := scaling.RmPodConfig{ - LookBack: 20, - MinPods: 1, - } - p := &DummyRemovePluginHandler{ - actuator: scaling.NewRmPodActuator(nil, tracer, cfg), - } - stub := plugins.NewActuatorPluginStub("rmpod", "localhost", port, "localhost", pluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err := stub.Start() - if err != nil { - klog.Fatalf("Error starting plugin: %s", err) - return nil - } - err = stub.Register() - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) - return nil - } - return stub -} diff --git a/pkg/tests/dummy_scale_out_plugin.go b/pkg/tests/dummy_scale_out_plugin.go deleted file mode 100644 index 2be0c8a..0000000 --- a/pkg/tests/dummy_scale_out_plugin.go +++ /dev/null @@ -1,54 +0,0 @@ -package tests - -import ( - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" - "github.com/intel/intent-driven-orchestration/pkg/controller" - - "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" - - "k8s.io/klog/v2" -) - -type DummyScaleOutPluginHandler struct { - actuator actuators.Actuator -} - -func (s *DummyScaleOutPluginHandler) NextState(state *common.State, goal *common.State, profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - return s.actuator.NextState(state, goal, profiles) -} - -func (s *DummyScaleOutPluginHandler) Perform(state *common.State, plan []planner.Action) { - s.actuator.Perform(state, plan) -} - -func (s *DummyScaleOutPluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - s.actuator.Effect(state, profiles) -} - -// startScaleOutPlugin initiates a scale_ou actuator. -func startScaleOutPlugin(tracer controller.Tracer, port int, pluginManagerPort int) *plugins.ActuatorPluginStub { - cfg := scaling.ScaleOutConfig{ - MaxPods: 128, - } - p := &DummyScaleOutPluginHandler{ - actuator: scaling.NewScaleOutActuator(nil, tracer, cfg), - } - stub := plugins.NewActuatorPluginStub("scale_out", "localhost", port, "localhost", pluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err := stub.Start() - if err != nil { - klog.Fatalf("Error starting plugin: %s", err) - return nil - } - err = stub.Register() - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) - return nil - } - return stub -} diff --git a/pkg/tests/full_framework_test.go b/pkg/tests/full_framework_test.go new file mode 100644 index 0000000..0b124ba --- /dev/null +++ b/pkg/tests/full_framework_test.go @@ -0,0 +1,739 @@ +package tests + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "k8s.io/klog/v2" + + "github.com/intel/intent-driven-orchestration/pkg/api/intents/v1alpha1" + "github.com/intel/intent-driven-orchestration/pkg/common" + "github.com/intel/intent-driven-orchestration/pkg/controller" + "github.com/intel/intent-driven-orchestration/pkg/generated/clientset/versioned/fake" + informers "github.com/intel/intent-driven-orchestration/pkg/generated/informers/externalversions" + "github.com/intel/intent-driven-orchestration/pkg/planner" + "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" + "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" + "github.com/intel/intent-driven-orchestration/pkg/planner/astar" + appsV1 "k8s.io/api/apps/v1" + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + k8sInformers "k8s.io/client-go/informers" + k8sFake "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" +) + +// tries defines the number of times we'll try to add objects. +const tries = 60 + +// timeoutInMillis defines the timout between retries. +const timeoutInMillis = 750 + +type planEvent struct { + plan []planner.Action + name string +} + +// fileTracer is a simplified version of the knowledge base. +type fileTracer struct { + ch chan planEvent + data map[string]map[string]map[string][]interface{} + indexer map[string]map[string]map[string]int + indexerMutex *sync.RWMutex +} + +func (t fileTracer) TraceEvent(_ common.State, desired common.State, plan []planner.Action) { + t.ch <- planEvent{ + plan: plan, + name: desired.Intent.Key, + } +} + +func (t fileTracer) GetEffect(name string, group string, profileName string, _ int, constructor func() interface{}) (interface{}, error) { + if _, ok := t.indexer[group]; !ok { + return nil, fmt.Errorf("group not found in dataset: %v", group) + } + t.indexerMutex.RLock() + index := t.indexer[group][profileName][name] + t.indexerMutex.RUnlock() + + data := t.data[group][profileName][name][index].(map[string]interface{}) + if group == "scaling" { + tmp := constructor().(*scaling.ScaleOutEffect) + popt := [4]float64{} + for i, v := range data["popt"].([]interface{}) { + popt[i] = v.(float64) + } + tmp.Popt = popt + replicaRange := [2]int{} + for i, v := range data["replicaRange"].([]interface{}) { + replicaRange[i] = int(v.(float64)) + } + tmp.ReplicaRange = replicaRange + throughputScale := [2]float64{} + for i, v := range data["throughputScale"].([]interface{}) { + throughputScale[i] = v.(float64) + } + tmp.ThroughputScale = throughputScale + return tmp, nil + } else if group == "vertical_scaling" { + tmp := constructor().(*scaling.CPUScaleEffect) + popt := [3]float64{} + for i, v := range data["popt"].([]interface{}) { + popt[i] = v.(float64) + } + tmp.Popt = popt + return tmp, nil + } + return nil, nil +} + +// stepIndex increments the index, so we can retrieve updated models. +func (t fileTracer) stepIndex(index int) { + t.indexerMutex.Lock() + defer t.indexerMutex.Unlock() + for k1, v1 := range t.indexer { + for k2, v2 := range v1 { + for k3 := range v2 { + if index < len(t.data[k1][k2][k3]) { + t.indexer[k1][k2][k3] = index + } else { + klog.Infof("Cannot find another entry for %v-%v-%v will use last!", k1, k2, k3) + } + } + } + } +} + +// actuatorSetup contains information about the way to initialize and configure actuators. +type actuatorSetup struct { + initFunc interface{} + cfg interface{} +} + +// testEnvironment holds all info needed for the replay. +type testEnvironment struct { + name string + effectsFilename string + eventsFilename string + defaults *common.Config + actuators map[string]actuatorSetup +} + +// testFixture for the replay test. +type testFixture struct { + test *testing.T + objects []runtime.Object + k8sClient *k8sFake.Clientset + intentClient *fake.Clientset + k8sInformer k8sInformers.SharedInformerFactory + intentInformer informers.SharedInformerFactory + tracer fileTracer + prometheus prometheusDummy + prometheusServer *http.Server + ticker chan planEvent +} + +// newTestFixture creates a new fixture for testing. +func newTestFixture(test *testing.T) testFixture { + f := testFixture{} + f.test = test + return f +} + +// prometheusDummy enables serving values to the framework. +type prometheusDummy struct { + vals map[string]float64 + valLock *sync.RWMutex +} + +// prometheusValues holds the values the prometheus dummy can return. +type prometheusValues struct { + vals map[string]float64 +} + +// updateValues enables the values that are going to be returned to the framework. +func (p *prometheusDummy) updateValues() chan<- prometheusValues { + updates := make(chan prometheusValues) + go func() { + for e := range updates { + p.valLock.Lock() + for key, val := range e.vals { + p.vals[key] = val + } + p.valLock.Unlock() + } + }() + return updates +} + +// serve handles the HTTP requests - mimics a prometheus server. +func (p *prometheusDummy) serve() *http.Server { + mux := http.NewServeMux() + // return KPI related information. + mux.HandleFunc("/query", func(w http.ResponseWriter, r *http.Request) { + p.valLock.Lock() + key := strings.Split(r.URL.Query().Get("query"), "&")[0] + _, err := fmt.Fprintf(w, "{\"data\": {\"result\": [{\"value\": [1680347032.000, \"%f\"]}]}}", p.vals[key]) + if err != nil { + return + } + p.valLock.Unlock() + }) + // returns host based telemetry information - assumes query contains a host and metrics name seperated by @. + mux.HandleFunc("/data", func(w http.ResponseWriter, r *http.Request) { + p.valLock.Lock() + key := strings.Split(r.URL.Query().Get("query"), "&")[0] + host := strings.Split(key, "@")[1] + _, err := fmt.Fprintf(w, "{\"data\": {\"result\": [{\"metric\": {\"host\": \"%v\"}, \"value\": [1680347032.000, \"%f\"]}]}}", host, p.vals[key]) + if err != nil { + return + } + p.valLock.Unlock() + }) + + server := &http.Server{ + Addr: ":39090", + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + } + go func() { + if err := server.ListenAndServe(); err != http.ErrServerClosed { + if strings.Contains(err.Error(), "address already in use") { + klog.Errorf("Assigned Port address already in use in here, skipping test: %v", err) + } else { + klog.Fatalf("Could not serve: %v", err) + } + + } + }() + return server +} + +// checkPrometheus makes sure the dummy prometheus server can be reached. +func (f *testFixture) checkPrometheus() { + ready := false + for i := 0; i < tries; i++ { + time.Sleep(time.Millisecond * timeoutInMillis) + resp, err := http.Get("http://127.0.0.1:39090/query") + if err != nil || resp.Status != "200 OK" { + klog.Warningf("Could not reach prometheus: %v - %v.", err, resp.Status) + } else { + ready = true + break + } + } + if !ready { + f.test.Errorf("Failed to reach prometheus dummy web server!") + } +} + +// checkProfiles makes sure that all KPI profiles have been added. +func (f *testFixture) checkProfiles(profiles map[string]float64) { + // now let's wait till we've seen the status updates to the KPI profiles... + counter := 0 + ready := false + for i := 0; i < tries; i++ { + for _, action := range f.intentClient.Actions() { + if action.GetSubresource() == "status" && action.GetVerb() == "update" { + counter++ + } + if counter == len(profiles) { + ready = true + } + } + if ready { + break + } + time.Sleep(time.Millisecond * timeoutInMillis) + } + if !ready { + f.test.Errorf("Profiles were not added in time: %v.", profiles) + } +} + +// newTestSetup sets up a new version of the test. +func (f *testFixture) newTestSetup(env testEnvironment, stopper chan struct{}) context.CancelFunc { + ctx, cancel := context.WithCancel(context.Background()) + + // load trace. + effects, indexer, err := parseEffects(env.effectsFilename) + if err != nil { + f.test.Errorf("Could not load trace: %s.", err) + } + + // fake environment... + f.ticker = make(chan planEvent) + f.tracer = fileTracer{ch: f.ticker, data: effects, indexer: indexer, indexerMutex: &sync.RWMutex{}} + f.prometheus = prometheusDummy{valLock: &sync.RWMutex{}, vals: make(map[string]float64)} + f.k8sClient = k8sFake.NewSimpleClientset(f.objects...) + f.intentClient = fake.NewSimpleClientset(f.objects...) + fakeWatch := watch.NewFake() + f.intentClient.PrependWatchReactor("KpiProfile", core.DefaultWatchReactor(fakeWatch, nil)) + f.intentClient.PrependWatchReactor("Intents", core.DefaultWatchReactor(fakeWatch, nil)) + + // informers + f.k8sInformer = k8sInformers.NewSharedInformerFactory(f.k8sClient, func() time.Duration { return 0 }()) + f.intentInformer = informers.NewSharedInformerFactory(f.intentClient, func() time.Duration { return 0 }()) + + // we provide the actuators with a dummy client that does nothing; so the framework test can make sure for each cycle the right state is setup. + dummyK8sClient := k8sFake.NewSimpleClientset(f.objects...) + + // plnr and actuator setup + var actuatorList []actuators.Actuator + for _, actuatorConfig := range env.actuators { + in := make([]reflect.Value, 3) + in[0] = reflect.ValueOf(dummyK8sClient) + in[1] = reflect.ValueOf(f.tracer) + in[2] = reflect.ValueOf(actuatorConfig.cfg) + + var res []reflect.Value + f := reflect.ValueOf(actuatorConfig.initFunc) + res = f.Call(in) + result := res[0].Interface() + actuatorList = append(actuatorList, result.(actuators.Actuator)) + } + plnr := astar.NewAPlanner(actuatorList, *env.defaults) + defer plnr.Stop() + + // intent controller... + ctlr := controller.NewController(*env.defaults, f.tracer, f.k8sClient, f.k8sInformer.Core().V1().Pods()) + ctlr.SetPlanner(plnr) + go ctlr.Run(1, stopper) + + // profile monitor... + profileMonitor := controller.NewKPIProfileMonitor(env.defaults.Monitor, f.intentClient, f.intentInformer.Ido().V1alpha1().KPIProfiles(), ctlr.UpdateProfile()) + go profileMonitor.Run(1, stopper) + + // intent monitor... + intentMonitor := controller.NewIntentMonitor(f.intentClient, f.intentInformer.Ido().V1alpha1().Intents(), ctlr.UpdateIntent()) + go intentMonitor.Run(1, stopper) + + // pod monitor... + podMonitor := controller.NewPodMonitor(f.k8sClient, f.k8sInformer.Core().V1().Pods(), ctlr.UpdatePodError()) + go podMonitor.Run(1, stopper) + + // start a prometheus dummy and run the framework + f.prometheusServer = f.prometheus.serve() + + f.k8sInformer.Start(ctx.Done()) + f.intentInformer.Start(ctx.Done()) + + // let's give all workers a chance to spin up... + time.Sleep(time.Millisecond * timeoutInMillis) + + return cancel +} + +// Event represents an entry from an events' collection. +type Event struct { + Intent string `json:"name"` + Current map[string]float64 `json:"current_objectives"` + Desired map[string]float64 `json:"desired_objectives"` + Pods map[string]map[string]interface{} `json:"pods"` + Resources map[string]int64 `json:"resources"` + Plan []map[string]interface{} `json:"plan"` + Data map[string]map[string]float64 `json:"data"` +} + +// Effect represents an entry in an effects' collection. +type Effect struct { + Name string `json:"name"` + ProfileName string `json:"profileName"` + Group string `json:"group"` + Data map[string]interface{} `json:"data"` +} + +// parseTrace reads a trace from a json file. +func parseTrace(filename string) []Event { + trace, err := os.Open(filename) + if err != nil { + klog.Errorf("Could not open events trace: %v.", err) + } + defer func(trace *os.File) { + err := trace.Close() + if err != nil { + klog.Errorf("Now this should not happen: %v.", err) + } + }(trace) + + var events []Event + tmp, err1 := io.ReadAll(trace) + err2 := json.Unmarshal(tmp, &events) + if err1 != nil || err2 != nil { + klog.Errorf("Could not read and/or unmarshal trace: %v-%v.", err1, err2) + } + return events +} + +// parseEffects reads a set of effects from a json file. +func parseEffects(filename string) (map[string]map[string]map[string][]interface{}, map[string]map[string]map[string]int, error) { + effects, err := os.Open(filename) + if err != nil { + return nil, nil, fmt.Errorf("could not open effects effects: %v", err) + } + defer func(trace *os.File) { + err := trace.Close() + if err != nil { + klog.Errorf("Now this should not happen: %v.", err) + } + }(effects) + + var events []Effect + tmp, err1 := io.ReadAll(effects) + err2 := json.Unmarshal(tmp, &events) + if err1 != nil || err2 != nil { + return nil, nil, fmt.Errorf("could not read and/or unmarshal effects trace: %v-%v", err1, err2) + } + + data := make(map[string]map[string]map[string][]interface{}) + indexes := make(map[string]map[string]map[string]int) + for _, effect := range events { + if _, ok := data[effect.Group]; !ok { + data[effect.Group] = make(map[string]map[string][]interface{}) + indexes[effect.Group] = make(map[string]map[string]int) + } + if _, ok := data[effect.Group][effect.ProfileName]; !ok { + data[effect.Group][effect.ProfileName] = make(map[string][]interface{}) + indexes[effect.Group][effect.ProfileName] = make(map[string]int) + } + indexes[effect.Group][effect.ProfileName][effect.Name] = 0 + data[effect.Group][effect.ProfileName][effect.Name] = append(data[effect.Group][effect.ProfileName][effect.Name], effect.Data) + } + + return data, indexes, nil +} + +// setupProfiles initially defines the profiles. +func (f *testFixture) setupProfiles(profiles map[string]float64) { + // FIXME: we should store all information related to the profiles ano not use names to infer types. + for name := range profiles { + tmp := strings.Split(name, "/") + typeName := "throughput" + if strings.Contains(tmp[1], "latency") { + typeName = "latency" + } + profile := &v1alpha1.KPIProfile{ + ObjectMeta: metaV1.ObjectMeta{ + Name: tmp[1], + Namespace: tmp[0], + }, + Spec: v1alpha1.KPIProfileSpec{ + KPIType: typeName, + }, + } + _, err := f.intentClient.IdoV1alpha1().KPIProfiles(tmp[0]).Create(context.TODO(), profile, metaV1.CreateOptions{}) + if err != nil { + f.test.Errorf("Could not add profile: %v.", err) + } + klog.Infof("Adding profile: %s.", name) // For some weird reason this logging is important for timing reasons. + } + f.checkProfiles(profiles) +} + +// setWorkloadState updates the deployment and pod specs. +func (f *testFixture) setWorkloadState(pods map[string]map[string]interface{}, resources map[string]int64) { + // FIXME: current we do not store information on the workload - we could pick it up from a manifest later on. + // if deployment does not exist - add it. + res, err := f.k8sClient.AppsV1().Deployments("default").Get(context.TODO(), "function-deployment", metaV1.GetOptions{}) + if err != nil || res == nil { + repl := int32(len(pods)) + deployment := &appsV1.Deployment{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "function-deployment", + Namespace: metaV1.NamespaceDefault, + }, + Spec: appsV1.DeploymentSpec{ + Replicas: &repl, + Selector: &metaV1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "sample-function", + }, + }, + Template: coreV1.PodTemplateSpec{ + Spec: coreV1.PodSpec{ + Containers: []coreV1.Container{ + { + Name: "function", + Resources: coreV1.ResourceRequirements{ + Limits: make(map[coreV1.ResourceName]resource.Quantity), + Requests: make(map[coreV1.ResourceName]resource.Quantity), + }, + }, + }, + }, + }, + }, + } + _, err := f.k8sClient.AppsV1().Deployments("default").Create(context.TODO(), deployment, metaV1.CreateOptions{}) + if err != nil { + klog.Errorf("Could not add deployment: %v", err) + } + } + // add all pods from trace. + for key := range pods { + res, err := f.k8sClient.CoreV1().Pods("default").Get(context.TODO(), key, metaV1.GetOptions{}) + if err != nil || res == nil { + pod := &coreV1.Pod{ + ObjectMeta: metaV1.ObjectMeta{ + Name: key, + Labels: map[string]string{"app": "sample-function"}, + Namespace: "default", + }, + Status: coreV1.PodStatus{ + Phase: coreV1.PodPhase(pods[key]["state"].(string)), + QOSClass: coreV1.PodQOSClass(pods[key]["qosclass"].(string)), + }, + Spec: coreV1.PodSpec{ + NodeName: pods[key]["nodename"].(string), + }, + } + var containers []coreV1.Container + keys := make([]string, 0) + for k := range resources { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + val := resources[key] + tmp := strings.Split(key, "_") + index, err := strconv.Atoi(tmp[0]) + if err != nil { + klog.Infof("Error in splitting: %s.", err) + } + if index > len(containers)-1 { + containers = append(containers, coreV1.Container{ + Name: tmp[0], + Resources: coreV1.ResourceRequirements{ + Limits: make(map[coreV1.ResourceName]resource.Quantity), + Requests: make(map[coreV1.ResourceName]resource.Quantity), + }, + }) + } + if tmp[2] == "limits" { + quan := resource.NewMilliQuantity(val, resource.DecimalSI) + containers[index].Resources.Limits[coreV1.ResourceName(tmp[1])] = *quan + } else if tmp[2] == "requests" { + quan := resource.NewMilliQuantity(val, resource.DecimalSI) + containers[index].Resources.Requests[coreV1.ResourceName(tmp[1])] = *quan + } + } + pod.Spec.Containers = containers + _, err := f.k8sClient.CoreV1().Pods("default").Create(context.TODO(), pod, metaV1.CreateOptions{}) + if err != nil { + klog.Errorf("Could not add pod: %v", err) + } + } + } + // remove all pods that should no longer exists. + activePods, err := f.k8sClient.CoreV1().Pods("default").List(context.TODO(), metaV1.ListOptions{}) + if err != nil { + klog.Errorf("Now this should never happen: %v.", err) + } + for _, active := range activePods.Items { + _, ok := pods[active.Name] + if !ok { + err := f.k8sClient.CoreV1().Pods("default").Delete(context.TODO(), active.Name, metaV1.DeleteOptions{}) + if err != nil { + klog.Errorf("Now this should never happen: %v.", err) + } + } + } +} + +// setupIntent defines the initial intent. +func (f *testFixture) setupIntent(name string, objectives map[string]float64) { + tmp := strings.Split(name, "/") + myIntent := &v1alpha1.Intent{ + ObjectMeta: metaV1.ObjectMeta{ + Name: tmp[1], + Namespace: tmp[0], + }, + Spec: v1alpha1.IntentSpec{ + Priority: 1.0, + TargetRef: v1alpha1.TargetRef(struct { + Kind string + Name string + }{Kind: "Deployment", Name: "default/function-deployment"}), + }, + } + for key, val := range objectives { + tmp := v1alpha1.TargetObjective{ + Name: key, + MeasuredBy: key, + Value: val, + } + myIntent.Spec.Objectives = append(myIntent.Spec.Objectives, tmp) + } + _, err := f.intentClient.IdoV1alpha1().Intents(tmp[0]).Create(context.TODO(), myIntent, metaV1.CreateOptions{}) + if err != nil { + klog.Errorf("Could not add intent: %v.", err) + } +} + +// setDesiredObjectives updates an existing intent. +func (f *testFixture) setDesiredObjectives(name string, objectives map[string]float64) { + tmp := strings.Split(name, "/") + myIntent, err := f.intentClient.IdoV1alpha1().Intents(tmp[0]).Get(context.TODO(), tmp[1], metaV1.GetOptions{}) + if err != nil { + klog.Errorf("Could not retrieve previous set intent: %v.", err) + } + changed := false + myIntent = myIntent.DeepCopy() + myIntent.ResourceVersion += "1" + for i, objv := range myIntent.Spec.Objectives { + if objectives[objv.Name] != objv.Value { + myIntent.Spec.Objectives[i].Value = objectives[objv.Name] + changed = true + } + } + if changed { + _, err = f.intentClient.IdoV1alpha1().Intents(tmp[0]).Update(context.TODO(), myIntent, metaV1.UpdateOptions{}) + if err != nil { + klog.Errorf("Could not update intent: %v.", err) + } + } else { + klog.Infof("No change in intent - will not update: %v", objectives) + } +} + +// setCurrentObjectives updates the values that the prometheus dummy will report back. +func (f *testFixture) setCurrentObjectives(values map[string]float64) { + f.prometheus.updateValues() <- prometheusValues{vals: values} +} + +// setCurrentData updates the values that the prometheus dummy will report back for host related metrics. +func (f *testFixture) setCurrentData(vals map[string]map[string]float64) { + data := make(map[string]float64) + for metric, entry := range vals { + for host, val := range entry { + data[metric+"@"+host] = val + } + } + f.prometheus.updateValues() <- prometheusValues{vals: data} +} + +// comparePlans compares two planes and returns false if they are not the same. +func comparePlans(old []map[string]interface{}, new []planner.Action) bool { + if len(new) != len(old) { + return false + } + var oldPlan []planner.Action + for _, entry := range old { + tmp := planner.Action{ + Name: entry["name"].(string), + Properties: entry["properties"], + } + oldPlan = append(oldPlan, tmp) + } + for i, item := range oldPlan { + if item.Name != new[i].Name { + klog.Infof("Expected action name: %v - got %v", item.Name, new[i].Name) + return false + } + one := fmt.Sprintf("%v", item.Properties) + another := fmt.Sprintf("%v", new[i].Properties) + if one != another && item.Name != "rmPod" { + klog.Infof("Expected property: %v - got %v", one, another) + return false + } else if item.Name == "rmPod" { + if one != another && len(one) == len(another) { + klog.Warningf("Not super sure - but looks ok: %v - %v", item.Properties, new[i].Properties) + } else if one != another { + klog.Infof("This does not look right; expected: %v - got %v", one, another) + return false + } + } + } + return true +} + +// runTrace tries to retrace a single trace. +func runTrace(env testEnvironment, t *testing.T) { + f := newTestFixture(t) + stopChannel := make(chan struct{}) + defer close(stopChannel) + + cancel := f.newTestSetup(env, stopChannel) + + events := parseTrace(env.eventsFilename) + // We'll use the first entry in the trace to set up the system. + f.setupProfiles(events[0].Desired) + f.setupIntent(events[0].Intent, events[0].Desired) + f.setCurrentObjectives(events[0].Current) + f.setCurrentData(events[0].Data) + f.setWorkloadState(events[0].Pods, events[0].Resources) + f.checkPrometheus() + <-f.ticker // although we use first entry for setup, we should wait for first plan; but don't need to compare. + + // now replay rest of the trace. + for i := 1; i < len(events); i++ { + fmt.Println(strconv.Itoa(i) + "----") + f.setCurrentObjectives(events[i].Current) + f.setCurrentData(events[i].Data) + f.setWorkloadState(events[i].Pods, events[i].Resources) + f.setDesiredObjectives(events[i].Intent, events[i].Desired) + f.tracer.stepIndex(i) + + planEvent := <-f.ticker + if !comparePlans(events[i].Plan, planEvent.plan) { + t.Errorf("Expected %v - got %v.", events[i].Plan, planEvent.plan) + } + } + + err := f.prometheusServer.Shutdown(context.TODO()) + if err != nil { + klog.Errorf("Error while shutdown of prometheus server: %v", err) + } + stopChannel <- struct{}{} + cancel() +} + +// TestTracesForSanity checks if various set of traces work. +func TestTracesForSanity(t *testing.T) { + defaultsConfig, err1 := common.LoadConfig("traces/defaults.json", func() interface{} { + return &common.Config{} + }) + scaleOutConfig, err2 := common.LoadConfig("traces/scale_out.json", func() interface{} { + return &scaling.ScaleOutConfig{} + }) + rmPodConfig, err3 := common.LoadConfig("traces/rm_pod.json", func() interface{} { + return &scaling.RmPodConfig{} + }) + cpuScaleConfig, err4 := common.LoadConfig("traces/cpu_scale.json", func() interface{} { + return &scaling.CPUScaleConfig{} + }) + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + t.Errorf("Could not load config files!") + } + + var tests = []testEnvironment{ + {name: "fabricated_trace", effectsFilename: "traces/trace_0/effects.json", eventsFilename: "traces/trace_0/events.json", defaults: defaultsConfig.(*common.Config), actuators: map[string]actuatorSetup{ + "NewCPUScaleActuator": {scaling.NewCPUScaleActuator, *cpuScaleConfig.(*scaling.CPUScaleConfig)}}, + }, + {name: "horizontal_vertical_scaling", effectsFilename: "traces/trace_1/effects.json", eventsFilename: "traces/trace_1/events.json", defaults: defaultsConfig.(*common.Config), actuators: map[string]actuatorSetup{ + "NewCPUScaleActuator": {scaling.NewCPUScaleActuator, *cpuScaleConfig.(*scaling.CPUScaleConfig)}, + "NewRmPodActuator": {scaling.NewRmPodActuator, *rmPodConfig.(*scaling.RmPodConfig)}, + "NewScaleOutActuator": {scaling.NewScaleOutActuator, *scaleOutConfig.(*scaling.ScaleOutConfig)}, + }}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runTrace(tt, t) + }) + } +} diff --git a/pkg/tests/full_planner_test.go b/pkg/tests/full_planner_test.go index 7a70329..6624947 100644 --- a/pkg/tests/full_planner_test.go +++ b/pkg/tests/full_planner_test.go @@ -1,8 +1,13 @@ package tests import ( + "os" + "syscall" "testing" + "github.com/intel/intent-driven-orchestration/pkg/controller" + pluginsHelper "github.com/intel/intent-driven-orchestration/plugins" + "github.com/intel/intent-driven-orchestration/pkg/common" "github.com/intel/intent-driven-orchestration/pkg/planner" "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" @@ -24,9 +29,9 @@ func (d dummyTracer) GetEffect(_ string, _ string, _ string, _ int, constructor // these numbers are from a test of the recommendation service of the GCP online boutique microservice demo app. tmp.ReplicaRange = [2]int{1, 12} // Note that back in the days we did measure latency in s not ms. - tmp.Popt = [5]float64{0., 4.22303386, 0.14946586, 27.25801409, 0.32679364} + tmp.Popt = [4]float64{0.306355135, 0.193240314, 53.0816499, 0.0147372746} tmp.ThroughputRange = [2]float64{0.3, 6064.0} - tmp.ThroughputScale = [2]float64{0.002033898305084746, 0.7966101694915254} + tmp.ThroughputScale = [2]float64{0.002143905536223758, 0.9993568283391329} return tmp, nil } @@ -47,7 +52,7 @@ func setupTestCase() (common.State, common.State, map[string]common.Profile) { goal := common.State{ Intent: common.Intent{ Objectives: map[string]float64{ - "p99": 30, + "p99": 0.03, "rps": 0, "availability": 0.999, }, @@ -62,12 +67,31 @@ func setupTestCase() (common.State, common.State, map[string]common.Profile) { return start, goal, profiles } +func startScaleOutPlugin(tracer controller.Tracer, port, endpoint int) chan os.Signal { + scaleCfg := scaling.ScaleOutConfig{ + MaxPods: 128, + } + actuator0 := scaling.NewScaleOutActuator(nil, tracer, scaleCfg) + stopper := pluginsHelper.StartActuatorPlugin(actuator0, "localhost", port, "localhost", endpoint) + return stopper +} + +func startRemovePodPlugin(tracer controller.Tracer, port, endpoint int) chan os.Signal { + rmPodCfg := scaling.RmPodConfig{ + LookBack: 20, + MinPods: 1, + } + actuator1 := scaling.NewRmPodActuator(nil, tracer, rmPodCfg) + stopper := pluginsHelper.StartActuatorPlugin(actuator1, "localhost", port, "localhost", endpoint) + return stopper +} + func executeBenchmark(b *testing.B, planner *astar.APlanner) { start, goal, profiles := setupTestCase() // quick test if the planner actually does sth... res := planner.CreatePlan(start, goal, profiles) - if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int32)["factor"] != 2 { + if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int64)["factor"] != 2 { b.Errorf("benchmarks will fail - planner did not run correctly; result was: %v.", res) } @@ -98,14 +122,8 @@ func BenchmarkAStarGrpcCreatePlan(b *testing.B) { executeBenchmark(b, myPlanner) }) - err := pS.Stop() - if err != nil { - klog.ErrorS(err, "error stopping scale_out") - } - err = pR.Stop() - if err != nil { - klog.ErrorS(err, "error stopping rm_pod") - } + pS <- syscall.SIGTERM + pR <- syscall.SIGTERM } // BenchmarkAStarCreatePlan benchmarks the planner including actuators. @@ -156,19 +174,13 @@ func TestAStarGrpcCreatePlan(t *testing.T) { s0, g0, profiles := setupTestCase() res := myPlanner.CreatePlan(s0, g0, profiles) - if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int32)["factor"] != 2 { + if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int64)["factor"] != 2 { t.Errorf("Planner did not run correctly; result was: %v.", res) } - err := pS.Stop() - if err != nil { - klog.ErrorS(err, "error stopping scale_out") - } - err = pR.Stop() - if err != nil { - klog.ErrorS(err, "error stopping rm_pod") - } - + // stop plugins. + pS <- syscall.SIGTERM + pR <- syscall.SIGTERM } // TestAStarCreatePlan tests the planner including actuators. @@ -193,7 +205,7 @@ func TestAStarCreatePlan(t *testing.T) { start, goal, profiles := setupTestCase() myPlanner := astar.NewAPlanner(actuatorList, cfg) res := myPlanner.CreatePlan(start, goal, profiles) - if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int32)["factor"] != 2 { + if len(res) != 1 || res[0].Name != "scaleOut" || res[0].Properties.(map[string]int64)["factor"] != 2 { t.Errorf("Planner did not run correctly; result was: %v.", res) } myPlanner.Stop() diff --git a/pkg/tests/traces/cpu_scale.json b/pkg/tests/traces/cpu_scale.json new file mode 100644 index 0000000..9270904 --- /dev/null +++ b/pkg/tests/traces/cpu_scale.json @@ -0,0 +1,8 @@ +{ + "interpreter": "echo", + "analytics_script": "", + "cpu_max": 4000, + "cpu_rounding": 100, + "cpu_safeguard_factor": 0.95, + "max_proactive_cpu": 0 +} diff --git a/pkg/tests/traces/defaults.json b/pkg/tests/traces/defaults.json new file mode 100644 index 0000000..508be6d --- /dev/null +++ b/pkg/tests/traces/defaults.json @@ -0,0 +1,20 @@ +{ + "controller": { + "task_channel_length": 100, + "controller_timeout": 3, + "plan_cache_ttl": 1000, + "plan_cache_timeout": 100 + }, + "monitor": { + "profile": { + "queries": "traces/queries.json" + } + }, + "planner": { + "astar": { + "opportunistic_candidates": 0, + "max_states": 2000, + "max_candidates": 10 + } + } +} diff --git a/pkg/tests/traces/queries.json b/pkg/tests/traces/queries.json new file mode 100644 index 0000000..f893029 --- /dev/null +++ b/pkg/tests/traces/queries.json @@ -0,0 +1,18 @@ +{ + "default/p50latency": { + "query": "default/p50latency&namespace=\"%s\"&type=\"%s\"&name=\"%s\"&?by=\"%s\"", + "endpoint": "http://127.0.0.1:39090/query" + }, + "default/p95latency": { + "query": "default/p95latency&namespace=\"%s\"&type=\"%s\"&name=\"%s\"&?by=\"%s\"", + "endpoint": "http://127.0.0.1:39090/query" + }, + "default/p99latency": { + "query": "default/p99latency&namespace=\"%s\"&type=\"%s\"&name=\"%s\"&?by=\"%s\"", + "endpoint": "http://127.0.0.1:39090/query" + }, + "default/throughput": { + "query": "default/throughput&namespace=\"%s\"&type=\"%s\"&name=\"%s\"&?by=\"%s\"", + "endpoint": "http://127.0.0.1:39090/query" + } +} diff --git a/pkg/tests/traces/rm_pod.json b/pkg/tests/traces/rm_pod.json new file mode 100644 index 0000000..e1d06ef --- /dev/null +++ b/pkg/tests/traces/rm_pod.json @@ -0,0 +1,4 @@ +{ + "look_back": 20, + "min_pods": 1 +} diff --git a/pkg/tests/traces/scale_out.json b/pkg/tests/traces/scale_out.json new file mode 100644 index 0000000..9ad9738 --- /dev/null +++ b/pkg/tests/traces/scale_out.json @@ -0,0 +1,7 @@ +{ + "interpreter": "echo", + "analytics_script": "", + "max_pods": 10, + "look_back": 20, + "max_proactive_scale_out": 0 +} diff --git a/pkg/tests/traces/trace_0/effects.json b/pkg/tests/traces/trace_0/effects.json new file mode 100644 index 0000000..6a6aa42 --- /dev/null +++ b/pkg/tests/traces/trace_0/effects.json @@ -0,0 +1,80 @@ +[ + { + "name": "default/intent", + "profileName": "default/p99latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 50, + 150 + ], + "cpuRange": [ + 1, + 10 + ], + "popt": [ + 100, + 0.2, + 50 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p99latency" + }, + "static": true, + "timestamp": "0" + }, + { + "name": "default/intent", + "profileName": "default/p99latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 50, + 150 + ], + "cpuRange": [ + 1, + 10 + ], + "popt": [ + 100, + 0.2, + 50 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p99latency" + }, + "static": true, + "timestamp": "0" + }, + { + "name": "default/intent", + "profileName": "default/p99latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 50, + 250 + ], + "cpuRange": [ + 1, + 10 + ], + "popt": [ + 200, + 0.2, + 50 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p99latency" + }, + "static": true, + "timestamp": "1" + } +] diff --git a/pkg/tests/traces/trace_0/events.json b/pkg/tests/traces/trace_0/events.json new file mode 100644 index 0000000..ffd5897 --- /dev/null +++ b/pkg/tests/traces/trace_0/events.json @@ -0,0 +1,97 @@ +[ + { + "name": "default/intent", + "timestamp": "0", + "current_objectives": { + "default/p99latency": 200, + "default/throughput": 100 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 1000, + "0_cpu_limits": 1000 + }, + "annotations": {}, + "pods": { + "pod_0": { + "availability": 1.0, + "nodename": "node_0", + "state": "Running", + "qosclass": "Guaranteed" + } + }, + "data": {}, + "plan": null + }, + { + "name": "default/intent", + "timestamp": "0", + "current_objectives": { + "default/p99latency": 200, + "default/throughput": 100 + }, + "desired_objectives": { + "default/p99latency": 150.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 1000, + "0_cpu_limits": 1000 + }, + "annotations": {}, + "pods": { + "pod_0": { + "availability": 1.0, + "nodename": "node_0", + "state": "Running", + "qosclass": "Guaranteed" + } + }, + "data": {}, + "plan": [ + { + "name": "scaleCPU", + "properties": { + "value": 400 + } + } + ] + }, + { + "name": "default/intent", + "timestamp": "0", + "current_objectives": { + "default/p99latency": 200, + "default/throughput": 100 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 1000, + "0_cpu_limits": 1000 + }, + "annotations": {}, + "pods": { + "pod_0": { + "availability": 1.0, + "nodename": "node_0", + "state": "Running", + "qosclass": "Guaranteed" + } + }, + "data": {}, + "plan": [ + { + "name": "scaleCPU", + "properties": { + "value": 7500 + } + } + ] + } +] diff --git a/pkg/tests/traces/trace_1/effects.json b/pkg/tests/traces/trace_1/effects.json new file mode 100644 index 0000000..f00bb55 --- /dev/null +++ b/pkg/tests/traces/trace_1/effects.json @@ -0,0 +1,198 @@ +[{ + "_id": { + "$oid": "65b7a9d5d8c0a5577562fb5e" + }, + "name": "default/my-intent", + "profileName": "default/p50latency", + "group": "scaling", + "data": { + "throughputRange": [ + 11.7, + 37.4 + ], + "throughputScale": [ + 0.1556420233463035, + -0.8210116731517509 + ], + "replicaRange": [ + 1, + 4 + ], + "popt": [ + 1.7953060759736734, + 0.7107664907145342, + 0.08418771034672608, + 0.16712977859691586 + ], + "trainingFeatures": [ + "default/throughput", + "replicas" + ], + "targetFeature": "default/p50latency" + }, + "timestamp": { + "$date": "2024-01-29T14:36:21.811Z" + }, + "static": true +},{ + "_id": { + "$oid": "65b7a9d22130241fce26d847" + }, + "name": "default/my-intent", + "profileName": "default/p95latency", + "group": "scaling", + "data": { + "throughputRange": [ + 6.4, + 37.4 + ], + "throughputScale": [ + 0.12903225806451613, + 0.17419354838709677 + ], + "replicaRange": [ + 1, + 4 + ], + "popt": [ + 1.876236766451076, + 0.3479978871129307, + 0.03630452074498126, + 0.05612263537403011 + ], + "trainingFeatures": [ + "default/throughput", + "replicas" + ], + "targetFeature": "default/p95latency" + }, + "timestamp": { + "$date": "2024-01-29T14:36:18.291Z" + }, + "static": true +},{ + "_id": { + "$oid": "65b7a9d8f5a8afa4802bee95" + }, + "name": "default/my-intent", + "profileName": "default/p99latency", + "group": "scaling", + "data": { + "throughputRange": [ + 6.4, + 37.2 + ], + "throughputScale": [ + 0.12987012987012986, + 0.1688311688311689 + ], + "replicaRange": [ + 1, + 4 + ], + "popt": [ + 1.7340978705359251, + 0.32188728156949387, + 0.02992459259216999, + 0.042336575288062825 + ], + "trainingFeatures": [ + "default/throughput", + "replicas" + ], + "targetFeature": "default/p99latency" + }, + "timestamp": { + "$date": "2024-01-29T14:36:24.871Z" + }, + "static": true +},{ + "_id": { + "$oid": "65b7a9d6c34376a54780332c" + }, + "name": "default/my-intent", + "profileName": "default/p50latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 35.043, + 149.407 + ], + "cpuRange": [ + 0.25, + 2.5 + ], + "popt": [ + 272.3754065441032, + 3.3408376505041804, + 32.917177116938596 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p50latency" + }, + "timestamp": { + "$date": "2024-01-29T14:36:22.920Z" + }, + "static": true +},{ + "_id": { + "$oid": "65b7a9d4b9bd0848eca85768" + }, + "name": "default/my-intent", + "profileName": "default/p95latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 39.815, + 245.714 + ], + "cpuRange": [ + 0.25, + 2.5 + ], + "popt": [ + 269.59131728063625, + 2.225499837045876, + 54.44974387771397 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p95latency"}, + "timestamp": { + "$date": "2024-01-29T14:36:20.311Z" + }, + "static": true +},{ + "_id": { + "$oid": "65b7a9d14895657c322f7e6c" + }, + "name": "default/my-intent", + "profileName": "default/p99latency", + "group": "vertical_scaling", + "data": { + "latencyRange": [ + 46.925, + 289.143 + ], + "cpuRange": [ + 0.25, + 2.5 + ], + "popt": [ + 248.69930654974348, + 1.8325502392260382, + 66.53386004054035 + ], + "trainingFeatures": [ + "cpus" + ], + "targetFeature": "default/p99latency" + }, + "timestamp": { + "$date": "2024-01-29T14:36:17.619Z" + }, + "static": true +}] diff --git a/pkg/tests/traces/trace_1/events.json b/pkg/tests/traces/trace_1/events.json new file mode 100644 index 0000000..cf80d93 --- /dev/null +++ b/pkg/tests/traces/trace_1/events.json @@ -0,0 +1,905 @@ +[{ + "_id": { + "$oid": "65b8e70d1a454775358ae4a1" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:09:49.704Z" + }, + "current_objectives": { + "default/p99latency": -1.0, + "default/throughput": -1.0 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 500, + "1_cpu_limits": 500 + }, + "annotations": { + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy" + }, + "pods": { + "function-deployment-6fc9444fd4-4lmrz": { + "availability": 1.0, + "nodename": "", + "state": "Pending", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": {} + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e7311a454775358ae4a2" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:10:25.527Z" + }, + "current_objectives": { + "default/p99latency": 0.99, + "default/throughput": 0.3 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 500, + "1_cpu_limits": 500 + }, + "annotations": { + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8" + }, + "pods": { + "function-deployment-6fc9444fd4-4lmrz": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet1": 13.062482793132801 + } + }, + "plan": [ + { + "name": "scaleCPU", + "properties": { + "value": 1200 + } + } + ] +},{ + "_id": { + "$oid": "65b8e78b1a454775358ae4a3" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:11:55.556Z" + }, + "current_objectives": { + "default/throughput": 12.3, + "default/p99latency": 91.214 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200, + "0_cpu_requests": 500 + }, + "annotations": { + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8" + }, + "pods": { + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 12.715407145003718 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e7b81a454775358ae4a4" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:12:40.486Z" + }, + "current_objectives": { + "default/throughput": 12.3, + "default/p99latency": 84.625 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "1_cpu_requests": 1200, + "1_cpu_limits": 1200, + "0_cpu_requests": 500, + "0_cpu_limits": 500 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 12.503563267721876 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e7e51a454775358ae4a5" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:13:25.561Z" + }, + "current_objectives": { + "default/throughput": 24.3, + "default/p99latency": 139.25 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "1_cpu_limits": 1200, + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 27.074713069055015 + } + }, + "plan": [ + { + "name": "scaleOut", + "properties": { + "factor": 3 + } + } + ] +},{ + "_id": { + "$oid": "65b8e8401a454775358ae4a6" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:14:56.295Z" + }, + "current_objectives": { + "default/p99latency": 94.386, + "default/throughput": 24.7 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy" + }, + "pods": { + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 13.742450629440548, + "comet1": 14.330834547076375 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e86d1a454775358ae4a7" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:15:41.037Z" + }, + "current_objectives": { + "default/p99latency": 95.241, + "default/throughput": 25.9 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8" + }, + "pods": { + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet1": 13.067782364694686, + "comet0": 14.785801654440816 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e89a1a454775358ae4a8" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:16:26.551Z" + }, + "current_objectives": { + "default/p99latency": 96.258, + "default/throughput": 25.0 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 12.679994287253427, + "comet1": 14.838800568485551 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e8c71a454775358ae4a9" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:17:11.058Z" + }, + "current_objectives": { + "default/p99latency": 95.062, + "default/throughput": 23.8 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200, + "0_cpu_requests": 500 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 12.292454262744858, + "comet1": 15.220247878961262 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e8d11a454775358ae4aa" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:17:21.981Z" + }, + "current_objectives": { + "default/p99latency": 99.017, + "default/throughput": 26.1 + }, + "desired_objectives": { + "default/throughput": 0.0, + "default/p99latency": 100.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8" + }, + "pods": { + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 15.171684274897899, + "comet1": 14.505257472795826 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8e8f31a454775358ae4ab" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:17:55.579Z" + }, + "current_objectives": { + "default/p99latency": 49.758, + "default/throughput": 12.5 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8" + }, + "pods": { + "function-deployment-5876876568-mb4zc": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 6.9707200098048405, + "comet1": 6.38656047921196 + } + }, + "plan": [ + { + "name": "rmPod", + "properties": { + "name": "function-deployment-5876876568-mb4zc" + } + } + ] +},{ + "_id": { + "$oid": "65b8e94d1a454775358ae4ac" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:19:25.655Z" + }, + "current_objectives": { + "default/p99latency": 68.25, + "default/throughput": 12.7 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200 + }, + "annotations": { + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy" + }, + "pods": { + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-5pqb7": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 3.8579247401675874, + "comet1": 10.100545776820109 + } + }, + "plan": [ + { + "name": "rmPod", + "properties": { + "name": "function-deployment-5876876568-5pqb7" + } + } + ] +},{ + "_id": { + "$oid": "65b8e9a71a454775358ae4ad" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:20:55.519Z" + }, + "current_objectives": { + "default/p99latency": 80.667, + "default/throughput": 11.7 + }, + "desired_objectives": { + "default/p99latency": 100.0, + "default/throughput": 0.0 + }, + "resources": { + "1_cpu_limits": 1200, + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 1200 + }, + "annotations": { + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled" + }, + "pods": { + "function-deployment-5876876568-x96sc": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + }, + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet1": 12.827335315982339 + } + }, + "plan": [ + { + "name": "rmPod", + "properties": { + "name": "function-deployment-5876876568-x96sc" + } + } + ] +},{ + "_id": { + "$oid": "65b8ea011a454775358ae4ae" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:22:25.513Z" + }, + "current_objectives": { + "default/p99latency": 69.5, + "default/throughput": 12.2 + }, + "desired_objectives": { + "default/throughput": 0.0, + "default/p99latency": 100.0 + }, + "resources": { + "0_cpu_limits": 500, + "1_cpu_requests": 1200, + "1_cpu_limits": 1200, + "0_cpu_requests": 500 + }, + "annotations": { + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8" + }, + "pods": { + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet1": 12.547019655726091 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8ea1b1a454775358ae4af" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:22:51.164Z" + }, + "current_objectives": { + "default/p99latency": 89.75, + "default/throughput": 12.4 + }, + "desired_objectives": { + "default/p99latency": 150.0, + "default/throughput": 0.0 + }, + "resources": { + "1_cpu_requests": 1200, + "1_cpu_limits": 1200, + "0_cpu_requests": 500, + "0_cpu_limits": 500 + }, + "annotations": { + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true", + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled" + }, + "pods": { + "function-deployment-5876876568-zcc7m": { + "availability": 1.0, + "nodename": "comet1", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet1": 12.720645567046084 + } + }, + "plan": [ + { + "name": "scaleCPU", + "properties": { + "value": 700 + } + } + ] +},{ + "_id": { + "$oid": "65b8ea5b1a454775358ae4b0" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:23:55.476Z" + }, + "current_objectives": { + "default/throughput": 12.4, + "default/p99latency": 98.949 + }, + "desired_objectives": { + "default/p99latency": 150.0, + "default/throughput": 0.0 + }, + "resources": { + "1_cpu_requests": 700, + "1_cpu_limits": 700, + "0_cpu_requests": 500, + "0_cpu_limits": 500 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-69668858c6-gpfvs": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 12.875166204743469 + } + }, + "plan": null +},{ + "_id": { + "$oid": "65b8ea881a454775358ae4b1" + }, + "name": "default/my-intent", + "timestamp": { + "$date": "2024-01-30T12:24:40.469Z" + }, + "current_objectives": { + "default/p99latency": 98.94, + "default/throughput": 12.3 + }, + "desired_objectives": { + "default/p99latency": 150.0, + "default/throughput": 0.0 + }, + "resources": { + "0_cpu_requests": 500, + "0_cpu_limits": 500, + "1_cpu_requests": 700, + "1_cpu_limits": 700 + }, + "annotations": { + "kubernetes.io/limit-ranger": "LimitRanger plugin set: cpu request for container linkerd-proxy; cpu limit for container linkerd-proxy", + "linkerd.io/created-by": "linkerd/proxy-injector stable-2.14.8", + "linkerd.io/inject": "enabled", + "linkerd.io/proxy-version": "stable-2.14.8", + "viz.linkerd.io/tap-enabled": "true" + }, + "pods": { + "function-deployment-69668858c6-gpfvs": { + "availability": 1.0, + "nodename": "comet0", + "state": "Running", + "qosclass": "Burstable" + } + }, + "data": { + "cpu_value": { + "comet0": 13.32175415893714 + } + }, + "plan": null +}] diff --git a/plugins/cpu_scale/Dockerfile b/plugins/cpu_scale/Dockerfile index cb10250..779b13f 100644 --- a/plugins/cpu_scale/Dockerfile +++ b/plugins/cpu_scale/Dockerfile @@ -1,7 +1,7 @@ # Copyright (c) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.19 AS builder +FROM golang:1.22 AS builder WORKDIR /plugins @@ -11,11 +11,12 @@ RUN make prepare-build build-plugins \ && go run github.com/google/go-licenses@v1.6.0 save "./..." --save_path licenses \ && hack/additional-licenses.sh -FROM alpine:3.17 +FROM alpine:3.20 -RUN apk add --upgrade --no-cache openssl=~3.0 && apk add --no-cache python3=~3.10 py3-matplotlib=~3.6 \ - py3-pip=~22.3 py3-scikit-learn=~1.1 -RUN pip install --no-cache-dir pymongo~=4.3 +RUN adduser -D nonroot +RUN apk add --upgrade --no-cache openssl=~3.3 && apk add --no-cache python3=~3.12 py3-matplotlib=~3.7 \ + py3-pip=~24.0 py3-scikit-learn=~1.3 +RUN pip install --break-system-packages --no-cache-dir pymongo~=4.6 WORKDIR /plugins diff --git a/plugins/cpu_scale/cmd/cpu_scale.go b/plugins/cpu_scale/cmd/cpu_scale.go index 4c52ff7..cbf156b 100644 --- a/plugins/cpu_scale/cmd/cpu_scale.go +++ b/plugins/cpu_scale/cmd/cpu_scale.go @@ -4,49 +4,34 @@ import ( "flag" "fmt" "os" - "os/signal" - - "k8s.io/client-go/rest" "github.com/intel/intent-driven-orchestration/pkg/controller" - val "github.com/intel/intent-driven-orchestration/plugins" + pluginsHelper "github.com/intel/intent-driven-orchestration/plugins" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" "k8s.io/klog/v2" ) +// / maxCPUValue defines the maximum amount of CPU units. +const maxCPUValue = int64(1000 * 1024) + +// maxLookBack defines the maximum age a model in the knowledge base can have (1 week) +const maxLookBack = 10080 + var ( kubeConfig string config string ) -type CPUScalePluginHandler struct { - actuator actuators.Actuator -} - -func (s *CPUScalePluginHandler) NextState(state *common.State, goal *common.State, - profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - klog.InfoS("From plugin: Invoked CPUScale Next State Callback") - return s.actuator.NextState(state, goal, profiles) -} - -func (s *CPUScalePluginHandler) Perform(state *common.State, plan []planner.Action) { - klog.InfoS("From plugin: Invoked CPUScale Perform Callback") - s.actuator.Perform(state, plan) -} - -func (s *CPUScalePluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - klog.InfoS("From plugin: Invoked CPUScale Effect Callback") - s.actuator.Effect(state, profiles) +func init() { + flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") + flag.StringVar(&config, "config", "", "Path to configuration file.") } func main() { @@ -56,96 +41,74 @@ func main() { tmp, err := common.LoadConfig(config, func() interface{} { return &scaling.CPUScaleConfig{} }) - if err != nil { klog.Fatalf("Error loading configuration for actuator: %s", err) } - cfg := tmp.(*scaling.CPUScaleConfig) - err = isValidConf(cfg.CPUMax, cfg.CPURounding, cfg.MaxProActiveCPU, - cfg.CPUSafeGuardFactor, cfg.ProActiveLatencyPercentage) + // validate configuration. + err = pluginsHelper.IsValidGenericConf(cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort, cfg.MongoEndpoint) if err != nil { - klog.Fatalf("Error on configuration for actuator: %s", err) + klog.Fatalf("Error on generic configuration for actuator: %s", err) } - - err = val.IsValidGenericConf(cfg.LookBack, cfg.PluginManagerPort, cfg.Port, - cfg.PythonInterpreter, cfg.Script, cfg.Endpoint, cfg.PluginManagerEndpoint, cfg.MongoEndpoint) + err = isValidConf(cfg.PythonInterpreter, cfg.Script, cfg.CPUMax, cfg.CPURounding, cfg.MaxProActiveCPU, + cfg.CPUSafeGuardFactor, cfg.ProActiveLatencyPercentage, cfg.LookBack) if err != nil { - klog.Fatalf("Error on generic configuration for actuator: %s", err) + klog.Fatalf("Error on configuration for actuator: %s", err) } - mt := controller.NewMongoTracer(cfg.MongoEndpoint) - var config *rest.Config - config, err = clientcmd.BuildConfigFromFlags("", kubeConfig) - + // get K8s config. + config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { klog.Fatalf("Error getting Kubernetes config: %s", err) } - - var clusterClient *kubernetes.Clientset - clusterClient, err = kubernetes.NewForConfig(config) - + clusterClient, err := kubernetes.NewForConfig(config) if err != nil { klog.Fatalf("Error creating Kubernetes cluster client: %s", err) } - p := &CPUScalePluginHandler{ - actuator: scaling.NewCPUScaleActuator(clusterClient, mt, *cfg), - } - stub := plugins.NewActuatorPluginStub(p.actuator.Name(), cfg.Endpoint, cfg.Port, - cfg.PluginManagerEndpoint, cfg.PluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err = stub.Start() + // once configuration is ready & valid start the plugin mechanism. + mt := controller.NewMongoTracer(cfg.MongoEndpoint) + actuator := scaling.NewCPUScaleActuator(clusterClient, mt, *cfg) + signal := pluginsHelper.StartActuatorPlugin(actuator, cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) + <-signal +} - if err != nil { - klog.Fatalf("Error starting plugin server: %s", err) +func isValidConf(interpreter, script string, confCPUMax, confCPURounding, confMaxProActiveCPU int64, + confCPUSafeGuardFactor, configProActiveLatencyPercentage float64, lookBack int) error { + if !pluginsHelper.IsStrConfigValid(interpreter) { + return fmt.Errorf("invalid path to python interpreter: %s", interpreter) } - err = stub.Register() - - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) + if script != "None" { + _, err := os.Stat(script) + if err != nil { + return fmt.Errorf("invalid script %s", err) + } } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - <-signalChan - - err = stub.Stop() - - if err != nil { - klog.Fatalf("Error stopping plugin server: %s", err) + if confCPUMax <= 0 || confCPUMax > maxCPUValue { + return fmt.Errorf("invalid cpu numbers: %d", confCPUMax) } -} -func init() { - flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") - flag.StringVar(&config, "config", "", "Path to configuration file.") -} - -func isValidConf(confCPUMax, confCPURounding, confMaxProActiveCPU int64, - confCPUSafeGuardFactor, configProActiveLatencyPercentage float64) error { - if confCPUMax <= 0 || confCPUMax > int64(1000*1024) { - return fmt.Errorf("invalid cpu numbers") + if confCPURounding <= 0 || confCPURounding > 1000 || confCPURounding%10 != 0 { + return fmt.Errorf("invalid round base: %d", confCPURounding) } - if confCPURounding <= 0 || confCPURounding > 1000 || confCPURounding%10 != 0 { - return fmt.Errorf("invalid round base") + if confMaxProActiveCPU < 0 || confMaxProActiveCPU > confCPUMax { + return fmt.Errorf("invalid max proactive value: %d", confMaxProActiveCPU) } if confCPUSafeGuardFactor <= 0 || confCPUSafeGuardFactor > 1 { - return fmt.Errorf("invalid safeguard factor") + return fmt.Errorf("invalid safeguard factor: %f", confCPUSafeGuardFactor) } - if confMaxProActiveCPU < 0 || confMaxProActiveCPU > confCPUMax { - return fmt.Errorf("invalid max proactive value") + if configProActiveLatencyPercentage < 0 || configProActiveLatencyPercentage > 1 { + return fmt.Errorf("invalid fraction value for proactive latency: %f", configProActiveLatencyPercentage) } - if configProActiveLatencyPercentage < 0 || configProActiveLatencyPercentage > 1 { - return fmt.Errorf("invalid fraction value for proactive latency") + if lookBack <= 0 || lookBack > maxLookBack { + return fmt.Errorf("invalid lookback value: %d", lookBack) } return nil diff --git a/plugins/cpu_scale/cmd/cpu_scale_test.go b/plugins/cpu_scale/cmd/cpu_scale_test.go index 14f14a0..ff44b76 100644 --- a/plugins/cpu_scale/cmd/cpu_scale_test.go +++ b/plugins/cpu_scale/cmd/cpu_scale_test.go @@ -4,117 +4,125 @@ import ( "testing" ) -type specCPUConfig struct { - CPUMax int64 - CPURounding int64 - CPUSafeGuardFactor float64 - MaxProActiveCPU int64 - ProActiveLatencyPercentage float64 -} +// pathToAnalyticsScript defines the path to an existing script for this actuator. +const pathToAnalyticsScript = "../../../pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py" -func setCPUConfigValues(CPUMax, CPURounding, MaxProActiveCPU int64, - CPUSafeGuardFactor, ProActiveLatencyPercentage float64) specCPUConfig { - return specCPUConfig{ - CPUMax: CPUMax, - CPURounding: CPURounding, - CPUSafeGuardFactor: CPUSafeGuardFactor, - MaxProActiveCPU: MaxProActiveCPU, - ProActiveLatencyPercentage: ProActiveLatencyPercentage, +func TestIsValidConf(t *testing.T) { + type args struct { + interpreter string + script string + cpuMax int64 + cpuRounding int64 + maxProActiveCPU int64 + cpuSafeGuardFactor float64 + proActiveLatencyPercentage float64 + lookBack int } -} - -func Test_isValidConf(t *testing.T) { tests := []struct { name string - args specCPUConfig + args args wantErr bool }{ { - name: "tc", - args: setCPUConfigValues(4000, 100, 0, 0.95, 0.1), + name: "tc-0", + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.95, 0.1, 10000}, wantErr: false, }, { name: "tc-1", - args: setCPUConfigValues(-10, 100, 0, 0.95, 0.1), - wantErr: true, // negative cpu + args: args{"", pathToAnalyticsScript, 4000, 100, 0, 0.95, 0.1, 10000}, + wantErr: true, }, { name: "tc-2", - args: setCPUConfigValues(0, 100, 0, 0.95, 0.1), - wantErr: true, // zero cpu + args: args{"python3", "", 4000, 100, 0, 0.95, 0.1, 10000}, + wantErr: true, }, { - // over uplimit name: "tc-3", - args: setCPUConfigValues(999999999999999999, 100, 0, 0.95, 0.1), - wantErr: true, // over limit cpu + args: args{"python3", pathToAnalyticsScript, -1, 100, 0, 0.95, 0.1, 10000}, + wantErr: true, // negative cpu }, { name: "tc-4", - args: setCPUConfigValues(4000, -10, 0, 0.95, 0.1), - wantErr: true, // negative round base + args: args{"python3", pathToAnalyticsScript, 0, 100, 0, 0.95, 0.1, 10000}, + wantErr: true, // zero cpu }, { name: "tc-5", - args: setCPUConfigValues(4000, 0, 0, 0.95, 0.1), - wantErr: true, // zero round base + args: args{"python3", pathToAnalyticsScript, 999999999, 100, 0, 0.95, 0.1, 10000}, + wantErr: true, // over limit cpu }, { name: "tc-6", - args: setCPUConfigValues(4000, 1001, 0, 0.95, 0.1), - wantErr: true, // over limit round base + args: args{"python3", pathToAnalyticsScript, 4000, -1, 0, 0.95, 0.1, 10000}, + wantErr: true, // negative round base }, { name: "tc-7", - args: setCPUConfigValues(4000, 101, 0, 0.95, 0.1), - wantErr: true, // not round base 10 + args: args{"python3", pathToAnalyticsScript, 4000, 0, 0, 0.95, 0.1, 10000}, + wantErr: true, // zero round base }, { name: "tc-8", - args: setCPUConfigValues(4000, 100, -10, 0.95, 0.1), - wantErr: true, // negative cpu for proactive + args: args{"python3", pathToAnalyticsScript, 4000, 1001, 0, 0.95, 0.1, 10000}, + wantErr: true, // over limit round base }, { name: "tc-9", - args: setCPUConfigValues(4000, 100, 100000, 0.95, 0.1), - wantErr: true, // over limit cpu for proactive + args: args{"python3", pathToAnalyticsScript, 4000, 101, 0, 0.95, 0.1, 10000}, + wantErr: true, // not round base 10 }, { name: "tc-10", - args: setCPUConfigValues(4000, 100, 1700, -0.9, 0.1), - wantErr: true, // negative value for safeguard + args: args{"python3", pathToAnalyticsScript, 4000, 100, -1, 0.95, 0.1, 10000}, + wantErr: true, // negative cpu for proactive }, { name: "tc-11", - args: setCPUConfigValues(4000, 100, 1700, 0, 0.1), - wantErr: true, // zero value for safeguard + args: args{"python3", pathToAnalyticsScript, 4000, 100, 10000, 0.95, 0.1, 10000}, + wantErr: true, // over limit cpu for proactive }, { name: "tc-12", - args: setCPUConfigValues(4000, 100, 1700, 2.12, 0.1), - wantErr: true, // over limit for safeguard + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, -1.0, 0.1, 10000}, + wantErr: true, // negative value for safeguard }, { name: "tc-13", - args: setCPUConfigValues(4000, 100, 1700, 0.12, -0.21), - wantErr: true, // negative proactive latency fraction + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.0, 0.1, 10000}, + wantErr: true, // zero value for safeguard }, { name: "tc-14", - args: setCPUConfigValues(4000, 100, 1700, 0.12, 0), - wantErr: false, // aceptable proactive latency fraction + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 2.0, 0.1, 10000}, + wantErr: true, // over limit for safeguard }, { name: "tc-15", - args: setCPUConfigValues(4000, 100, 1700, 0.12, 2), + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.95, -1.0, 10000}, + wantErr: true, // negative proactive latency fraction + }, + { + name: "tc-16", + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.95, 1.01, 10000}, wantErr: true, // over limit proactive latency fraction }, + { + name: "tc-17", + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.95, 1.0, -1}, + wantErr: true, // negative lookback. + }, + { + name: "tc-18", + args: args{"python3", pathToAnalyticsScript, 4000, 100, 0, 0.95, 1.0, 999999}, + wantErr: true, // over limit lookback. + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := isValidConf(tt.args.CPUMax, tt.args.CPURounding, tt.args.MaxProActiveCPU, - tt.args.CPUSafeGuardFactor, tt.args.ProActiveLatencyPercentage); (err != nil) != tt.wantErr { + if err := isValidConf(tt.args.interpreter, tt.args.script, tt.args.cpuMax, tt.args.cpuRounding, tt.args.maxProActiveCPU, + tt.args.cpuSafeGuardFactor, tt.args.proActiveLatencyPercentage, tt.args.lookBack); (err != nil) != tt.wantErr { t.Errorf("isValidConf() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/plugins/cpu_scale/cpu-scale-actuator-plugin.yaml b/plugins/cpu_scale/cpu-scale-actuator-plugin.yaml index 750454d..54e9731 100644 --- a/plugins/cpu_scale/cpu-scale-actuator-plugin.yaml +++ b/plugins/cpu_scale/cpu-scale-actuator-plugin.yaml @@ -29,13 +29,13 @@ metadata: spec: serviceAccountName: planner-service-account containers: - - name: cpu-scale-actuator - image: 127.0.0.1:5000/cpuscale:0.2.0 - imagePullPolicy: Always - args: [ "-config", "/config/defaults.json", "-v", "2" ] - ports: - - containerPort: 33334 - securityContext: + - name: cpu-scale-actuator + image: 127.0.0.1:5000/cpuscale:0.3.0 + imagePullPolicy: Always + args: [ "-config", "/config/defaults.json", "-v", "2" ] + ports: + - containerPort: 33334 + securityContext: capabilities: drop: [ 'ALL' ] seccompProfile: @@ -45,22 +45,24 @@ spec: runAsNonRoot: true runAsUser: 10001 runAsGroup: 10001 - resources: - limits: - memory: "1024Mi" - cpu: "2000m" - requests: - memory: "512Mi" - cpu: "500m" - volumeMounts: - - name: cpu-scale-configmap-volume - mountPath: /config/ - - name: matplotlib-tmp - mountPath: /var/tmp - env: - # Needed for the analytics python script. - - name: MONGO_URL - value: "mongodb://planner-mongodb-service:27017/" + resources: + limits: + memory: "1024Mi" + cpu: "2000m" + requests: + memory: "512Mi" + cpu: "500m" + volumeMounts: + - name: cpu-scale-configmap-volume + mountPath: /config/ + - name: matplotlib-tmp + mountPath: /var/tmp + env: + # Needed for the analytics python script. + - name: MONGO_URL + value: "mongodb://planner-mongodb-service:27017/" + - name: MPLCONFIGDIR + value: /var/tmp volumes: - name: matplotlib-tmp emptyDir: diff --git a/plugins/plugins_helper.go b/plugins/plugins_helper.go index 6d175e3..e7db25c 100644 --- a/plugins/plugins_helper.go +++ b/plugins/plugins_helper.go @@ -1,54 +1,70 @@ -package pluginsHelper +package pluginshelper import ( "fmt" "net/url" "os" -) + "os/signal" + "syscall" -func IsValidGenericConf(lookBack, pluginManagerPort, port int, - pythonInterpreter, script, endpoint, pluginManagerEndpoint, mongo string) error { + plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" + "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" + "k8s.io/klog/v2" +) - if script != "none" { - _, err := os.Stat(script) - if err != nil { - return fmt.Errorf("invalid %s", err) - } +// StartActuatorPlugin starts the necessary Stubs and registers the plugin with the plugin manager. +func StartActuatorPlugin(actuator actuators.Actuator, endpoint string, port int, serverEndpoint string, serverPort int) chan os.Signal { + stub := plugins.NewActuatorPluginStub(actuator.Name(), endpoint, port, serverEndpoint, serverPort) + stub.SetNextStateFunc(actuator.NextState) + stub.SetPerformFunc(actuator.Perform) + stub.SetEffectFunc(actuator.Effect) + err := stub.Start() + if err != nil { + klog.Fatalf("Error starting plugin server: %s", err) } - - if lookBack <= 0 || lookBack > 60*24*7 { - return fmt.Errorf("invalid lookback value") + err = stub.Register() + if err != nil { + klog.Fatalf("Error registering plugin: %s", err) } + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) + go func() { + <-signalChan + err = stub.Stop() + if err != nil { + klog.Fatalf("Error stopping plugin server: %s", err) + } + }() + return signalChan +} +// IsValidGenericConf checks if a set of generic configuration fields are valid. +func IsValidGenericConf(endpoint string, port int, pluginManagerEndpoint string, pluginManagerPort int, mongo string) error { if !isPortNumValid(port) || !isPortNumValid(pluginManagerPort) { return fmt.Errorf("invalid port value") } _, err := url.ParseRequestURI(mongo) - if err != nil { - return fmt.Errorf("invalid %s", err) - } - - if !isStrValid(pythonInterpreter) { - return fmt.Errorf("invalid analytical script interpreter") + return fmt.Errorf("invalid uri: %s", err) } - if !isStrValid(endpoint) || - !isStrValid(pluginManagerEndpoint) { + if !IsStrConfigValid(endpoint) || !IsStrConfigValid(pluginManagerEndpoint) { return fmt.Errorf("invalid endpoint value") } return nil } -func isStrValid(str string) bool { +// IsStrConfigValid checks if string property of a configuration is valid. +func IsStrConfigValid(str string) bool { if str != "" && len(str) < 101 { return true } return false } +// isPortNumValid checks if a port definition is > 0 and smaller 65536. func isPortNumValid(num int) bool { if num > 0 && num < 65536 { return true diff --git a/plugins/plugins_helper_test.go b/plugins/plugins_helper_test.go index 3a13e8e..14e8e7a 100644 --- a/plugins/plugins_helper_test.go +++ b/plugins/plugins_helper_test.go @@ -1,141 +1,93 @@ -package pluginsHelper +package pluginshelper import ( + "syscall" "testing" -) + "time" -type genericConf struct { - lookBack int - pluginManagerPort int - port int - pythonInterpreter string - script string - endpoint string - pluginManagerEndpoint string - mongo string -} + plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" + "github.com/intel/intent-driven-orchestration/pkg/common" + "github.com/intel/intent-driven-orchestration/pkg/planner" + "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" +) var ( PluginManagerPort = 33333 Port = 33344 - LookBack = 20 - Script = "../pkg/planner/actuators/scaling/analytics/cpu_rightsizing.py" - Endpoint = "_XxIXS.10HMR1Nt0jaXi+ DKSvscN5312cB3TrQPEpSfEr/!|NXhZIZhEpeqNaxFNaxz9CMHo64iiCMgP9NfYVCiJzgRSFFFsxnb" - PythonInterpreter = "python3" + Endpoint = "some-endpoint" PluginManagerEndpoint = "some-service-ep" MongoEndpoint = "mongodb://planner-mongodb-service:27017/" ) -func setConfigValues(lookBack, port, pluginManagerPort int, pythonInterpreter, script, endpoint, pluginManagerEndpoint, mongo string) genericConf { - return genericConf{ - lookBack: lookBack, - pluginManagerPort: pluginManagerPort, - port: port, - pythonInterpreter: pythonInterpreter, - script: script, - endpoint: endpoint, - pluginManagerEndpoint: pluginManagerEndpoint, - mongo: mongo, +func TestIsValidGenericConf(t *testing.T) { + type genericConf struct { + endpoint string + port int + pluginManagerEndpoint string + pluginManagerPort int + mongo string } -} - -func TestIsValidCPUGenericConf(t *testing.T) { tests := []struct { name string args genericConf wantErr bool }{ { - name: "tc", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: false, // negative value for time - }, - { - name: "tc-1", - args: setConfigValues(-20, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // negative value for time - }, - { - name: "tc-2", - args: setConfigValues(0, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // zero value for time - }, - { - name: "tc-3", - args: setConfigValues(2222222222222220, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // over limit time value - }, - { - name: "tc-4", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, "script", - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // wrong Script path + name: "tc-0", + args: genericConf{Endpoint, Port, PluginManagerEndpoint, PluginManagerPort, "MongoEndpoint"}, + wantErr: true, // wrong url }, { - name: "tc-5", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, "", - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // wrong Script path + name: "tc-1", + args: genericConf{Endpoint, Port, PluginManagerEndpoint, PluginManagerPort, ""}, + wantErr: true, // wrong url }, { - name: "tc-6", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, "MongoEndpoint"), - wantErr: true, // wrong url + name: "tc-2", + args: genericConf{Endpoint, Port, PluginManagerEndpoint, PluginManagerPort, MongoEndpoint}, + wantErr: false, // all good. }, { - name: "tc-7", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, ""), - wantErr: true, // wrong url + name: "tc-3", + args: genericConf{Endpoint, -10, PluginManagerEndpoint, PluginManagerPort, MongoEndpoint}, + wantErr: true, // negative port value }, { - name: "tc-8", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, "someUrl.co:23454"), - wantErr: false, // wrong url + name: "tc-4", + args: genericConf{"", Port, PluginManagerEndpoint, PluginManagerPort, MongoEndpoint}, + wantErr: true, // wrong endpoint }, { - name: "tc-9", - args: setConfigValues(20, -1, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // wrong url + name: "tc-5", + args: genericConf{Endpoint, 10000000, PluginManagerEndpoint, PluginManagerPort, MongoEndpoint}, + wantErr: true, // port to high. }, { - name: "tc-9", - args: setConfigValues(20, -1, PluginManagerPort, PythonInterpreter, Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), + name: "tc-6", + args: genericConf{Endpoint, Port, PluginManagerEndpoint, -20, MongoEndpoint}, wantErr: true, // negative port value }, { - name: "tc-10", - args: setConfigValues(20, Port, PluginManagerPort, PythonInterpreter, Script, - "", PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // no endpoint defined + name: "tc-7", + args: genericConf{Endpoint, Port, "", PluginManagerPort, MongoEndpoint}, + wantErr: true, // wrong endpoint }, { - name: "tc-11", - args: setConfigValues(20, Port, PluginManagerPort, "", Script, - Endpoint, PluginManagerEndpoint, MongoEndpoint), - wantErr: true, // no python + name: "tc-8", + args: genericConf{Endpoint, Port, PluginManagerEndpoint, 10000000, MongoEndpoint}, + wantErr: true, // port to high. }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := IsValidGenericConf(tt.args.lookBack, tt.args.pluginManagerPort, tt.args.port, - tt.args.pythonInterpreter, tt.args.script, tt.args.endpoint, tt.args.pluginManagerEndpoint, - tt.args.mongo); (err != nil) != tt.wantErr { - t.Errorf("IsValidCPUGenericConf() error = %v, wantErr %v", err, tt.wantErr) + if err := IsValidGenericConf(tt.args.endpoint, tt.args.port, tt.args.pluginManagerEndpoint, tt.args.pluginManagerPort, tt.args.mongo); (err != nil) != tt.wantErr { + t.Errorf("IsValidGenericConf() error = %v, wantErr %v", err, tt.wantErr) } }) } } -func Test_isStrValid(t *testing.T) { +func TestIsStrConfigValid(t *testing.T) { type args struct { str string } @@ -144,20 +96,20 @@ func Test_isStrValid(t *testing.T) { args args want bool }{ + {name: "tc-0", args: args{str: Endpoint}, want: true}, {name: "tc-1", args: args{str: ""}, want: false}, - {name: "tc-2", args: args{str: Endpoint}, want: true}, - {name: "tc-3", args: args{str: Endpoint + "1"}, want: false}, + {name: "tc-2", args: args{str: "garbage inputs with very long line so that this should invalid given this is longer then one hundred and ten chars."}, want: false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := isStrValid(tt.args.str); got != tt.want { + if got := IsStrConfigValid(tt.args.str); got != tt.want { t.Errorf("isStrValid() = %v, want %v", got, tt.want) } }) } } -func Test_isPortNumValid(t *testing.T) { +func TestIsPortNumValid(t *testing.T) { type args struct { num int } @@ -189,3 +141,48 @@ func Test_isPortNumValid(t *testing.T) { }) } } + +type DummyActuator struct { +} + +func (d DummyActuator) Name() string { + return "dummy" +} + +func (d DummyActuator) Group() string { + return "dummies" +} + +func (d DummyActuator) NextState(_ *common.State, _ *common.State, _ map[string]common.Profile) ([]common.State, []float64, []planner.Action) { + return nil, nil, nil +} + +func (d DummyActuator) Perform(_ *common.State, _ []planner.Action) { + +} + +func (d DummyActuator) Effect(_ *common.State, _ map[string]common.Profile) { + +} + +func TestStartActuatorPluginForSuccess(t *testing.T) { + var tmp []actuators.Actuator + pluginManager := plugins.NewPluginManagerServer(tmp, "localhost", 33350) + err := pluginManager.Start() + if err != nil { + t.Fatalf("Could not start plugin manager error was: %v", err) + } + + actuator := DummyActuator{} + exitChannel := StartActuatorPlugin(actuator, "localhost", 3350, "localhost", 33350) + if err != nil { + t.Errorf("Error should have been nil, was: %v", err) + } + time.Sleep(250 * time.Millisecond) + exitChannel <- syscall.SIGINT + + err = pluginManager.Stop() + if err != nil { + t.Fatalf("Could not stop plugin manager error was: %v", err) + } +} diff --git a/plugins/rdt/Dockerfile b/plugins/rdt/Dockerfile index 0a0f67d..003e4fe 100644 --- a/plugins/rdt/Dockerfile +++ b/plugins/rdt/Dockerfile @@ -1,7 +1,7 @@ # Copyright (c) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.19 AS builder +FROM golang:1.22 AS builder WORKDIR /plugins @@ -11,12 +11,12 @@ RUN make prepare-build build-plugins \ && go run github.com/google/go-licenses@v1.6.0 save "./..." --save_path licenses \ && hack/additional-licenses.sh -FROM alpine:3.17 +FROM alpine:3.20 RUN adduser -D nonroot -RUN apk add --upgrade --no-cache openssl=~3.0 && apk add --no-cache python3=~3.10 py3-matplotlib=~3.6 \ - py3-pip=~22.3 py3-scikit-learn=~1.1 -RUN pip install --no-cache-dir pymongo~=4.3 +RUN apk add --upgrade --no-cache openssl=~3.3 && apk add --no-cache python3=~3.12 py3-matplotlib=~3.7 \ + py3-pip=~24.0 py3-scikit-learn=~1.3 +RUN pip install --break-system-packages --no-cache-dir pymongo~=4.6 WORKDIR /plugins diff --git a/plugins/rdt/cmd/rdt.go b/plugins/rdt/cmd/rdt.go index b4bf000..e50cba9 100644 --- a/plugins/rdt/cmd/rdt.go +++ b/plugins/rdt/cmd/rdt.go @@ -2,19 +2,19 @@ package main import ( "flag" + "fmt" + "os" + + pluginsHelper "github.com/intel/intent-driven-orchestration/plugins" "github.com/intel/intent-driven-orchestration/pkg/controller" + val "github.com/intel/intent-driven-orchestration/plugins" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "os" - "os/signal" - - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/platform" "k8s.io/klog/v2" @@ -25,71 +25,73 @@ var ( config string ) -// RdtPluginHandler represents the actual actuator. -type RdtPluginHandler struct { - actuator actuators.Actuator -} - -func (s *RdtPluginHandler) NextState(state *common.State, goal *common.State, profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - klog.InfoS("Invoked rdt Next State Callback") - return s.actuator.NextState(state, goal, profiles) -} - -func (s *RdtPluginHandler) Perform(state *common.State, plan []planner.Action) { - klog.InfoS("Invoked rdt Perform Callback") - s.actuator.Perform(state, plan) -} - -func (s *RdtPluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - klog.InfoS("Invoked rdt Effect Callback") - s.actuator.Effect(state, profiles) +func init() { + flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") + flag.StringVar(&config, "config", "", "Path to configuration file.") } func main() { klog.InitFlags(nil) flag.Parse() + tmp, err := common.LoadConfig(config, func() interface{} { return &platform.RdtConfig{} }) - cfg := tmp.(*platform.RdtConfig) if err != nil { klog.Fatalf("Error loading configuration for actuator: %s", err) } - mt := controller.NewMongoTracer(cfg.MongoEndpoint) + cfg := tmp.(*platform.RdtConfig) + + // validate configuration. + err = val.IsValidGenericConf(cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort, cfg.MongoEndpoint) + if err != nil { + klog.Fatalf("Error on generic configuration for actuator: %s", err) + } + err = isValidConf(cfg.Interpreter, cfg.Analytics, cfg.Prediction, cfg.Options) + if err != nil { + klog.Fatalf("Error on configuration for actuator: %s", err) + } + + // get K8s config. config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { - klog.Fatalf("Error getting incluster k8s config: %s", err) + klog.Fatalf("Error getting Kubernetes config: %s", err) } clusterClient, err := kubernetes.NewForConfig(config) if err != nil { - klog.Fatalf("Error creating k8s cluster client: %s", err) + klog.Fatalf("Error creating Kubernetes cluster client: %s", err) } - p := &RdtPluginHandler{ - actuator: platform.NewRdtActuator(clusterClient, mt, *cfg), + // once configuration is ready & valid start the plugin mechanism. + mt := controller.NewMongoTracer(cfg.MongoEndpoint) + actuator := platform.NewRdtActuator(clusterClient, mt, *cfg) + signal := pluginsHelper.StartActuatorPlugin(actuator, cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) + <-signal +} + +func isValidConf(interpreter, analyticsScript, predictionScript string, options []string) error { + // TODO: implement! + if !val.IsStrConfigValid(interpreter) { + return fmt.Errorf("invalid path to python interpreter: %s", interpreter) } - stub := plugins.NewActuatorPluginStub(p.actuator.Name(), cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err = stub.Start() - if err != nil { - klog.Fatalf("Error starting plugin server: %s", err) + + if analyticsScript != "None" { + _, err := os.Stat(analyticsScript) + if err != nil { + return fmt.Errorf("invalid analytics script %s", err) + } } - err = stub.Register() - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) + + if predictionScript != "None" { + _, err := os.Stat(predictionScript) + if err != nil { + return fmt.Errorf("invalid prediction script %s", err) + } } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - <-signalChan - err = stub.Stop() - if err != nil { - klog.Fatalf("Error stopping plugin server: %s", err) + + if len(options) == 0 { + return fmt.Errorf("not enough options defined: %v", options) } -} -func init() { - flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") - flag.StringVar(&config, "config", "", "Path to configuration file.") + return nil } diff --git a/plugins/rdt/cmd/rdt_test.go b/plugins/rdt/cmd/rdt_test.go new file mode 100644 index 0000000..7e19aa2 --- /dev/null +++ b/plugins/rdt/cmd/rdt_test.go @@ -0,0 +1,55 @@ +package main + +import ( + "testing" +) + +// pathToAnalyticsScript defines the path to an existing script for this actuator. +const pathToAnalyticsScript = "../../../pkg/planner/actuators/platform/analyze.py" + +func TestIsValidConf(t *testing.T) { + type args struct { + interpreter string + analyticsScript string + predictionScript string + options []string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "tc-0", + args: args{"python3", pathToAnalyticsScript, "../../../pkg/planner/actuators/platform/predict.py", []string{"cos0", "cos1"}}, + wantErr: false, + }, + { + name: "tc-1", + args: args{"", pathToAnalyticsScript, "../../../pkg/planner/actuators/platform/predict.py", []string{"cos0", "cos1"}}, + wantErr: true, // wrong interpreter + }, + { + name: "tc-2", + args: args{"python3", "", "../../../pkg/planner/actuators/platform/predict.py", []string{"cos0", "cos1"}}, + wantErr: true, // invalid analytics script + }, + { + name: "tc-3", + args: args{"python3", pathToAnalyticsScript, "", []string{"cos0", "cos1"}}, + wantErr: true, // invalid prediction script + }, + { + name: "tc-4", + args: args{"python3", pathToAnalyticsScript, "../../../pkg/planner/actuators/platform/predict.py", []string{}}, + wantErr: true, // invalid cos options + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := isValidConf(tt.args.interpreter, tt.args.analyticsScript, tt.args.predictionScript, tt.args.options); (err != nil) != tt.wantErr { + t.Errorf("isValidConf() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/plugins/rdt/rdt-actuator-plugin.yaml b/plugins/rdt/rdt-actuator-plugin.yaml index 95a7a42..ba6b2b7 100644 --- a/plugins/rdt/rdt-actuator-plugin.yaml +++ b/plugins/rdt/rdt-actuator-plugin.yaml @@ -8,7 +8,11 @@ data: "interpreter": "python3", "analytics_script": "./pkg/planner/actuators/platform/analyze.py", "prediction_script": "./pkg/planner/actuators/platform/predict.py", - "options": ["None", "COS1", "COS2", "COS3"], + "options": [ + "None", + "COS1", + "COS2", + "COS3"], "endpoint": "rdt-actuator-service", "port": 33334, "mongo_endpoint": "mongodb://planner-mongodb-service:27017/", @@ -25,12 +29,12 @@ metadata: spec: serviceAccountName: planner-service-account containers: - - name: rdt-actuator - image: 127.0.0.1:5000/rdt:0.2.0 - args: [ "-config", "/config/defaults.json" ] - ports: - - containerPort: 33334 - securityContext: + - name: rdt-actuator + image: 127.0.0.1:5000/rdt:0.3.0 + args: [ "-config", "/config/defaults.json" ] + ports: + - containerPort: 33334 + securityContext: capabilities: drop: [ 'ALL' ] seccompProfile: @@ -40,24 +44,24 @@ spec: runAsNonRoot: true runAsUser: 10001 runAsGroup: 10001 - resources: - limits: - memory: "1024Mi" - cpu: "2000m" - requests: - memory: "512Mi" - cpu: "500m" - volumeMounts: - - name: matplotlib-tmp - mountPath: /var/tmp - - name: rdt-configmap-volume - mountPath: /config/ - env: - # Needed for the analytics python script. - - name: MONGO_URL - value: "mongodb://planner-mongodb-service:27017/" - - name: MPLCONFIGDIR - value: /var/tmp + resources: + limits: + memory: "1024Mi" + cpu: "2000m" + requests: + memory: "512Mi" + cpu: "500m" + volumeMounts: + - name: matplotlib-tmp + mountPath: /var/tmp + - name: rdt-configmap-volume + mountPath: /config/ + env: + # Needed for the analytics python script. + - name: MONGO_URL + value: "mongodb://planner-mongodb-service:27017/" + - name: MPLCONFIGDIR + value: /var/tmp volumes: - name: matplotlib-tmp emptyDir: diff --git a/plugins/rm_pod/Dockerfile b/plugins/rm_pod/Dockerfile index 8fe28e0..ded46b7 100644 --- a/plugins/rm_pod/Dockerfile +++ b/plugins/rm_pod/Dockerfile @@ -1,7 +1,7 @@ # Copyright (c) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.19 AS builder +FROM golang:1.22 AS builder WORKDIR /plugins diff --git a/plugins/rm_pod/cmd/rm_pod.go b/plugins/rm_pod/cmd/rm_pod.go index acab46e..630ec7c 100644 --- a/plugins/rm_pod/cmd/rm_pod.go +++ b/plugins/rm_pod/cmd/rm_pod.go @@ -9,66 +9,53 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "os" - "os/signal" + pluginsHelper "github.com/intel/intent-driven-orchestration/plugins" - val "github.com/intel/intent-driven-orchestration/plugins" - - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" "k8s.io/klog/v2" ) +// maxLookBack defines the maximum age a model in the knowledge base can have (1 week) +const maxLookBack = 10080 + +// maxScaleOut defines the maximum number of replicas. +const maxScaleOut = 128 + var ( kubeConfig string config string ) -// RmpodPluginHandler represents the actual actuator. -type RmpodPluginHandler struct { - actuator actuators.Actuator -} - -func (s *RmpodPluginHandler) NextState(state *common.State, goal *common.State, profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - klog.V(1).InfoS("Invoked Rmpod Next State Callback") - return s.actuator.NextState(state, goal, profiles) -} - -func (s *RmpodPluginHandler) Perform(state *common.State, plan []planner.Action) { - klog.V(1).InfoS("Invoked Rmpod Perform Callback") - s.actuator.Perform(state, plan) -} - -func (s *RmpodPluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - klog.V(1).InfoS("Invoked Rmpod Effect Callback") - s.actuator.Effect(state, profiles) +func init() { + flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") + flag.StringVar(&config, "config", "", "Path to configuration file.") } func main() { klog.InitFlags(nil) flag.Parse() + tmp, err := common.LoadConfig(config, func() interface{} { return &scaling.RmPodConfig{} }) - cfg := tmp.(*scaling.RmPodConfig) if err != nil { klog.Fatalf("Error loading configuration for actuator: %s", err) } - err = isValidConf(cfg.MinPods) + cfg := tmp.(*scaling.RmPodConfig) + + // validate configuration. + err = pluginsHelper.IsValidGenericConf(cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort, cfg.MongoEndpoint) if err != nil { - klog.Fatalf("Error on configuration for actuator: %s", err) + klog.Fatalf("Error on generic configuration for actuator: %s", err) } - err = val.IsValidGenericConf(cfg.LookBack, cfg.PluginManagerPort, cfg.Port, "none", "none", - cfg.Endpoint, cfg.PluginManagerEndpoint, cfg.MongoEndpoint) + err = isValidConf(cfg.MinPods, cfg.LookBack) if err != nil { - klog.Fatalf("Error on generic configuration for actuator: %s", err) + klog.Fatalf("Error on configuration for actuator: %s", err) } - mt := controller.NewMongoTracer(cfg.MongoEndpoint) + // get K8s config. config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { klog.Fatalf("Error getting Kubernetes config: %s", err) @@ -78,38 +65,21 @@ func main() { klog.Fatalf("Error creating Kubernetes cluster client: %s", err) } - p := &RmpodPluginHandler{ - actuator: scaling.NewRmPodActuator(clusterClient, mt, *cfg), - } - stub := plugins.NewActuatorPluginStub(p.actuator.Name(), cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err = stub.Start() - if err != nil { - klog.Fatalf("Error starting plugin server: %s", err) - } - err = stub.Register() - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) - } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - <-signalChan - err = stub.Stop() - if err != nil { - klog.Fatalf("Error stopping plugin server: %s", err) - } + // once configuration is ready & valid start the plugin mechanism. + mt := controller.NewMongoTracer(cfg.MongoEndpoint) + actuator := scaling.NewRmPodActuator(clusterClient, mt, *cfg) + signal := pluginsHelper.StartActuatorPlugin(actuator, cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) + <-signal } -func init() { - flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") - flag.StringVar(&config, "config", "", "Path to configuration file.") -} +func isValidConf(confMinPods, lookBack int) error { + if confMinPods <= 0 || confMinPods > maxScaleOut { + return fmt.Errorf("invalid pods number: %d", confMinPods) + } -func isValidConf(confMinPods int) error { - if confMinPods <= 0 || confMinPods > 128 { - return fmt.Errorf("invalid pods number") + if lookBack <= 0 || lookBack > maxLookBack { + return fmt.Errorf("invalid lookback value: %d", lookBack) } + return nil } diff --git a/plugins/rm_pod/cmd/rm_pod_test.go b/plugins/rm_pod/cmd/rm_pod_test.go new file mode 100644 index 0000000..66aafbc --- /dev/null +++ b/plugins/rm_pod/cmd/rm_pod_test.go @@ -0,0 +1,50 @@ +package main + +import ( + "testing" +) + +func TestIsValidConf(t *testing.T) { + type args struct { + minPods int + lookBack int + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "tc-0", + args: args{1, 1000}, + wantErr: false, + }, + { + name: "tc-1", + args: args{0, 1000}, + wantErr: true, // min pods to small. + }, + { + name: "tc-2", + args: args{1024, 1000}, + wantErr: true, // min pods to large. + }, + { + name: "tc-3", + args: args{1, -1}, + wantErr: true, // lookback negative. + }, + { + name: "tc-4", + args: args{1, 9999999}, + wantErr: true, // lookback to large. + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := isValidConf(tt.args.minPods, tt.args.lookBack); (err != nil) != tt.wantErr { + t.Errorf("isValidConf() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/plugins/rm_pod/rmpod-actuator-plugin.yaml b/plugins/rm_pod/rmpod-actuator-plugin.yaml index cc73ea6..8a19aa2 100644 --- a/plugins/rm_pod/rmpod-actuator-plugin.yaml +++ b/plugins/rm_pod/rmpod-actuator-plugin.yaml @@ -23,12 +23,12 @@ metadata: spec: serviceAccountName: planner-service-account containers: - - name: rmpod-actuator - image: 127.0.0.1:5000/rmpod:0.2.0 - args: [ "-config", "/config/defaults.json" ] - ports: - - containerPort: 33334 - securityContext: + - name: rmpod-actuator + image: 127.0.0.1:5000/rmpod:0.3.0 + args: [ "-config", "/config/defaults.json" ] + ports: + - containerPort: 33334 + securityContext: capabilities: drop: [ 'ALL' ] seccompProfile: @@ -38,16 +38,16 @@ spec: runAsNonRoot: true runAsUser: 10001 runAsGroup: 10001 - resources: - limits: - memory: "1024Mi" - cpu: "2000m" - requests: - memory: "512Mi" - cpu: "500m" - volumeMounts: - - name: rmpod-configmap-volume - mountPath: /config/ + resources: + limits: + memory: "1024Mi" + cpu: "2000m" + requests: + memory: "512Mi" + cpu: "500m" + volumeMounts: + - name: rmpod-configmap-volume + mountPath: /config/ volumes: - name: rmpod-configmap-volume configMap: diff --git a/plugins/scale_out/Dockerfile b/plugins/scale_out/Dockerfile index 0877874..fcc345e 100644 --- a/plugins/scale_out/Dockerfile +++ b/plugins/scale_out/Dockerfile @@ -1,7 +1,7 @@ # Copyright (c) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.19 AS builder +FROM golang:1.22 AS builder WORKDIR /plugins @@ -11,12 +11,12 @@ RUN make prepare-build build-plugins \ && go run github.com/google/go-licenses@v1.6.0 save "./..." --save_path licenses \ && hack/additional-licenses.sh -FROM alpine:3.17 +FROM alpine:3.20 RUN adduser -D nonroot -RUN apk add --upgrade --no-cache openssl=~3.0 && apk add --no-cache python3=~3.10 py3-matplotlib=~3.6 \ - py3-pip=~22.3 py3-scikit-learn=~1.1 -RUN pip install --no-cache-dir pymongo~=4.3 +RUN apk add --upgrade --no-cache openssl=~3.3 && apk add --no-cache python3=~3.12 py3-matplotlib=~3.7 \ + py3-pip=~24.0 py3-scikit-learn=~1.3 +RUN pip install --break-system-packages --no-cache-dir pymongo~=4.6 WORKDIR /plugins diff --git a/plugins/scale_out/cmd/scale_out.go b/plugins/scale_out/cmd/scale_out.go index e64b8e5..f0e4266 100644 --- a/plugins/scale_out/cmd/scale_out.go +++ b/plugins/scale_out/cmd/scale_out.go @@ -4,73 +4,59 @@ import ( "flag" "fmt" "os" - "os/signal" "github.com/intel/intent-driven-orchestration/pkg/controller" - val "github.com/intel/intent-driven-orchestration/plugins" + pluginsHelper "github.com/intel/intent-driven-orchestration/plugins" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - plugins "github.com/intel/intent-driven-orchestration/pkg/api/plugins/v1alpha1" "github.com/intel/intent-driven-orchestration/pkg/common" - "github.com/intel/intent-driven-orchestration/pkg/planner" - "github.com/intel/intent-driven-orchestration/pkg/planner/actuators" "github.com/intel/intent-driven-orchestration/pkg/planner/actuators/scaling" "k8s.io/klog/v2" ) +// maxLookBack defines the maximum age a model in the knowledge base can have (1 week) +const maxLookBack = 10080 + +// maxScaleOut defines the maximum number of replicas. +const maxScaleOut = 128 + var ( kubeConfig string config string ) -type ScaleOutPluginHandler struct { - actuator actuators.Actuator -} - -func (s *ScaleOutPluginHandler) NextState(state *common.State, goal *common.State, profiles map[string]common.Profile) ([]common.State, []float64, []planner.Action) { - klog.InfoS("Invoked ScaleOut Next State Callback") - return s.actuator.NextState(state, goal, profiles) -} - -func (s *ScaleOutPluginHandler) Perform(state *common.State, plan []planner.Action) { - klog.InfoS("Invoked ScaleOut Perform Callback") - s.actuator.Perform(state, plan) -} - -func (s *ScaleOutPluginHandler) Effect(state *common.State, profiles map[string]common.Profile) { - klog.InfoS("Invoked ScaleOut Effect Callback") - s.actuator.Effect(state, profiles) +func init() { + flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") + flag.StringVar(&config, "config", "", "Path to configuration file.") } func main() { klog.InitFlags(nil) flag.Parse() + tmp, err := common.LoadConfig(config, func() interface{} { return &scaling.ScaleOutConfig{} }) - if err != nil { klog.Fatalf("Error loading configuration for actuator: %s", err) } - cfg := tmp.(*scaling.ScaleOutConfig) - err = isValidConf(cfg.MaxPods, cfg.MaxProActiveScaleOut, cfg.ProActiveLatencyFactor) + // validate configuration. + err = pluginsHelper.IsValidGenericConf(cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort, cfg.MongoEndpoint) if err != nil { - klog.Fatalf("Error on configuration for actuator: %s", err) + klog.Fatalf("Error on generic configuration for actuator: %s", err) } - - err = val.IsValidGenericConf(cfg.LookBack, cfg.PluginManagerPort, cfg.Port, - cfg.PythonInterpreter, cfg.Script, cfg.Endpoint, cfg.PluginManagerEndpoint, cfg.MongoEndpoint) + err = isValidConf(cfg.PythonInterpreter, cfg.Script, cfg.MaxPods, cfg.MaxProActiveScaleOut, cfg.LookBack, cfg.ProActiveLatencyFactor) if err != nil { - klog.Fatalf("Error on generic configuration for actuator: %s", err) + klog.Fatalf("Error on configuration for actuator: %s", err) } - mt := controller.NewMongoTracer(cfg.MongoEndpoint) + // get K8s config. config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { klog.Fatalf("Error getting Kubernetes config: %s", err) @@ -80,46 +66,39 @@ func main() { klog.Fatalf("Error creating Kubernetes cluster client: %s", err) } - p := &ScaleOutPluginHandler{ - actuator: scaling.NewScaleOutActuator(clusterClient, mt, *cfg), - } - stub := plugins.NewActuatorPluginStub(p.actuator.Name(), cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) - stub.SetNextStateFunc(p.NextState) - stub.SetPerformFunc(p.Perform) - stub.SetEffectFunc(p.Effect) - err = stub.Start() - if err != nil { - klog.Fatalf("Error starting plugin server: %s", err) - } - err = stub.Register() - if err != nil { - klog.Fatalf("Error registering plugin: %s", err) - } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - <-signalChan - err = stub.Stop() - if err != nil { - klog.Fatalf("Error stopping plugin server: %s", err) - } + // once configuration is ready & valid start the plugin mechanism. + mt := controller.NewMongoTracer(cfg.MongoEndpoint) + actuator := scaling.NewScaleOutActuator(clusterClient, mt, *cfg) + signal := pluginsHelper.StartActuatorPlugin(actuator, cfg.Endpoint, cfg.Port, cfg.PluginManagerEndpoint, cfg.PluginManagerPort) + <-signal } -func init() { - flag.StringVar(&kubeConfig, "kubeConfig", "", "Path to a kube config file.") - flag.StringVar(&config, "config", "", "Path to configuration file.") -} +func isValidConf(interpreter, script string, confMaxPods, confMaxProactiveScaleOut, lookBack int, confProActiveLatencyFactor float64) error { + if !pluginsHelper.IsStrConfigValid(interpreter) { + return fmt.Errorf("invalid path to python interpreter: %s", interpreter) + } -func isValidConf(confMaxPods, confMaxProactiveScaleOut int, confProActiveLatencyFactor float64) error { - if confMaxPods <= 0 || confMaxPods > 128 { - return fmt.Errorf("invalid pods number") + if script != "None" { + _, err := os.Stat(script) + if err != nil { + return fmt.Errorf("invalid script %s", err) + } + } + + if confMaxPods <= 0 || confMaxPods > maxScaleOut { + return fmt.Errorf("invalid pods number: %d", confMaxPods) } if confMaxProactiveScaleOut < 0 || confMaxProactiveScaleOut > confMaxPods { - return fmt.Errorf("invalid max proactive value") + return fmt.Errorf("invalid max proactive value: %d", confMaxProactiveScaleOut) + } + + if lookBack <= 0 || lookBack > maxLookBack { + return fmt.Errorf("invalid lookback value: %d", lookBack) } if confProActiveLatencyFactor < 0 || confProActiveLatencyFactor > 1 { - return fmt.Errorf("invalid fraction value for proactive latency") + return fmt.Errorf("invalid fraction value for proactive latency: %f", confProActiveLatencyFactor) } return nil diff --git a/plugins/scale_out/cmd/scale_out_test.go b/plugins/scale_out/cmd/scale_out_test.go index 1c3c448..c944d42 100644 --- a/plugins/scale_out/cmd/scale_out_test.go +++ b/plugins/scale_out/cmd/scale_out_test.go @@ -2,10 +2,16 @@ package main import "testing" -func Test_isValidConf(t *testing.T) { +// pathToAnalyticsScript defines the path to an existing script for this actuator. +const pathToAnalyticsScript = "../../../pkg/planner/actuators/scaling/analytics/horizontal_scaling.py" + +func TestIsValidConf(t *testing.T) { type args struct { + interpreter string + script string confMaxPods int confMaxProactiveScaleOut int + lookBack int confProActiveLatencyFactor float64 } tests := []struct { @@ -13,50 +19,70 @@ func Test_isValidConf(t *testing.T) { args args wantErr bool }{ + { + name: "tc-0", + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 11, confMaxProactiveScaleOut: 9, lookBack: 1000, confProActiveLatencyFactor: 0.0}, + wantErr: false, + }, { name: "tc-1", - args: args{confMaxPods: -1, confMaxProactiveScaleOut: 0, confProActiveLatencyFactor: 0}, - wantErr: true, // negative pods number + args: args{interpreter: "", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // missing interpreter }, { name: "tc-2", - args: args{confMaxPods: 0, confMaxProactiveScaleOut: 0, confProActiveLatencyFactor: 0}, - wantErr: true, // zero pods number + args: args{interpreter: "python3", script: "", confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // wrong script }, { name: "tc-3", - args: args{confMaxPods: 1110, confMaxProactiveScaleOut: 0, confProActiveLatencyFactor: 0}, - wantErr: true, // over the limit pods number + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: -1, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // negative pods number }, { name: "tc-4", - args: args{confMaxPods: 11, confMaxProactiveScaleOut: -1, confProActiveLatencyFactor: 0}, - wantErr: true, // negative proactive number + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 0, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // zero pods number }, { name: "tc-5", - args: args{confMaxPods: 11, confMaxProactiveScaleOut: 129, confProActiveLatencyFactor: 0}, - wantErr: true, // over the limit + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 99999, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // over the limit pods number }, { name: "tc-6", - args: args{confMaxPods: 11, confMaxProactiveScaleOut: 9, confProActiveLatencyFactor: -1.0}, - wantErr: true, // negative factor + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: -1, confMaxProactiveScaleOut: -1, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // negative proactive number }, { name: "tc-7", - args: args{confMaxPods: 11, confMaxProactiveScaleOut: 9, confProActiveLatencyFactor: 0.0}, - wantErr: false, + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 256, lookBack: 1000, confProActiveLatencyFactor: 0}, + wantErr: true, // over the limit }, { name: "tc-8", - args: args{confMaxPods: 11, confMaxProactiveScaleOut: 9, confProActiveLatencyFactor: 1.01}, + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: 999999, confProActiveLatencyFactor: 0}, + wantErr: true, // lookback to long + }, + { + name: "tc-9", + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: -1, confProActiveLatencyFactor: 0}, + wantErr: true, // lookback negative + }, + { + name: "tc-10", + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: -1.0}, + wantErr: true, // negative factor + }, + { + name: "tc-11", + args: args{interpreter: "python3", script: pathToAnalyticsScript, confMaxPods: 128, confMaxProactiveScaleOut: 0, lookBack: 1000, confProActiveLatencyFactor: 1.2}, wantErr: true, // over the limit }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := isValidConf(tt.args.confMaxPods, tt.args.confMaxProactiveScaleOut, tt.args.confProActiveLatencyFactor); (err != nil) != tt.wantErr { + if err := isValidConf(tt.args.interpreter, tt.args.script, tt.args.confMaxPods, tt.args.confMaxProactiveScaleOut, tt.args.lookBack, tt.args.confProActiveLatencyFactor); (err != nil) != tt.wantErr { t.Errorf("isValidConf() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/plugins/scale_out/scaleout-actuator-plugin.yaml b/plugins/scale_out/scaleout-actuator-plugin.yaml index b0b7eb7..9471fa6 100644 --- a/plugins/scale_out/scaleout-actuator-plugin.yaml +++ b/plugins/scale_out/scaleout-actuator-plugin.yaml @@ -27,12 +27,12 @@ metadata: spec: serviceAccountName: planner-service-account containers: - - name: scaleout-actuator - image: 127.0.0.1:5000/scaleout:0.2.0 - args: [ "-config", "/config/defaults.json" ] - ports: - - containerPort: 33334 - securityContext: + - name: scaleout-actuator + image: 127.0.0.1:5000/scaleout:0.3.0 + args: [ "-config", "/config/defaults.json" ] + ports: + - containerPort: 33334 + securityContext: capabilities: drop: [ 'ALL' ] seccompProfile: @@ -42,22 +42,24 @@ spec: runAsNonRoot: true runAsUser: 10001 runAsGroup: 10001 - resources: - limits: - memory: "1024Mi" - cpu: "2000m" - requests: - memory: "512Mi" - cpu: "500m" - volumeMounts: - - name: matplotlib-tmp - mountPath: /var/tmp - - name: scaleout-configmap-volume - mountPath: /config/ - env: - # Needed for the analytics python script. - - name: MONGO_URL - value: "mongodb://planner-mongodb-service:27017/" + resources: + limits: + memory: "1024Mi" + cpu: "2000m" + requests: + memory: "512Mi" + cpu: "500m" + volumeMounts: + - name: matplotlib-tmp + mountPath: /var/tmp + - name: scaleout-configmap-volume + mountPath: /config/ + env: + # Needed for the analytics python script. + - name: MONGO_URL + value: "mongodb://planner-mongodb-service:27017/" + - name: MPLCONFIGDIR + value: /var/tmp volumes: - name: matplotlib-tmp emptyDir: