diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 779c25676..60920c732 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -40,6 +40,8 @@ const ( // CredentialReadyCondition indicates if referenced Credential exists and has Ready state CredentialReadyCondition = "CredentialReady" + // CredentialPropagatedCondition indicates that CCM credentials were delivered to managed cluster + CredentialsPropagatedCondition = "CredentialsApplied" // TemplateReadyCondition indicates the referenced Template exists and valid. TemplateReadyCondition = "TemplateReady" // HelmChartReadyCondition indicates the corresponding HelmChart is valid and ready. diff --git a/cmd/main.go b/cmd/main.go index 21af5d9e0..ab2b66de1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -29,6 +29,8 @@ import ( "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" + capz "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + capv "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -58,6 +60,8 @@ func init() { utilruntime.Must(sourcev1.AddToScheme(scheme)) utilruntime.Must(hcv2.AddToScheme(scheme)) utilruntime.Must(sveltosv1beta1.AddToScheme(scheme)) + utilruntime.Must(capz.AddToScheme(scheme)) + utilruntime.Must(capv.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -215,9 +219,10 @@ func main() { os.Exit(1) } if err = (&controller.ManagedClusterReconciler{ - Client: mgr.GetClient(), - Config: mgr.GetConfig(), - DynamicClient: dc, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + DynamicClient: dc, + SystemNamespace: currentNamespace, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ManagedCluster") os.Exit(1) diff --git a/go.mod b/go.mod index 4bdaa60c8..77c05adc8 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,8 @@ require ( k8s.io/client-go v0.31.1 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/cluster-api v1.8.4 + sigs.k8s.io/cluster-api-provider-azure v1.17.0 + sigs.k8s.io/cluster-api-provider-vsphere v1.11.1 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) @@ -34,6 +36,9 @@ require ( require ( dario.cat/mergo v1.0.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect @@ -154,6 +159,7 @@ require ( go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect diff --git a/go.sum b/go.sum index cc338049b..235e54120 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,25 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= +github.com/Azure/azure-service-operator/v2 v2.8.0 h1:BcyB8LvRmtgVIIUaXwWIJz5eHvknyno0qq5LkDuvM/s= +github.com/Azure/azure-service-operator/v2 v2.8.0/go.mod h1:ezbJS56PcORFFqLV8XZmM9xZ12m6aGAkg353fQhWD/8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= @@ -32,6 +49,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -158,6 +177,8 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -206,7 +227,7 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= @@ -305,6 +326,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -417,8 +440,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTi go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 h1:dEZWPjVN22urgYCza3PXRUGEyCB++y1sAqm6guWFesk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= @@ -427,8 +450,8 @@ go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4Q go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= -go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -556,6 +579,10 @@ oras.land/oras-go v1.2.6 h1:z8cmxQXBU8yZ4mkytWqXfo6tZcamPwjsuxYU81xJ8Lk= oras.land/oras-go v1.2.6/go.mod h1:OVPc1PegSEe/K8YiLfosrlqlqTN9PUyFvOw5Y9gwrT8= sigs.k8s.io/cluster-api v1.8.4 h1:jBKQH1H/HUdUFk8T6qDzIxZJfWw1F5ZP0ZpYQJDmTHs= sigs.k8s.io/cluster-api v1.8.4/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= +sigs.k8s.io/cluster-api-provider-azure v1.17.0 h1:joiKhPM2E0WDtiESRCODgwEliDCWgNIldBVKVMZb6Hw= +sigs.k8s.io/cluster-api-provider-azure v1.17.0/go.mod h1:+mHDLC7mm7YXst0j0/R9PvEuRHibuhz+mVqJne544go= +sigs.k8s.io/cluster-api-provider-vsphere v1.11.1 h1:hwQEMOiC0zNIRNl5qG1x+IyAAoN32WhQrXo2ANjD+CQ= +sigs.k8s.io/cluster-api-provider-vsphere v1.11.1/go.mod h1:YU5i6KB5MnjuyI4ushaDOwdX1rscnSaJmnF3SlrnLPU= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= diff --git a/internal/controller/credential_controller.go b/internal/controller/credential_controller.go index 53df59b11..24992a194 100644 --- a/internal/controller/credential_controller.go +++ b/internal/controller/credential_controller.go @@ -38,7 +38,6 @@ func (r *CredentialReconciler) Reconcile(ctx context.Context, req ctrl.Request) cred := &hmc.Credential{} if err := r.Client.Get(ctx, req.NamespacedName, cred); err != nil { - l.Error(err, "unable to fetch Credential") return ctrl.Result{}, client.IgnoreNotFound(err) } diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index ec72fe524..aff940e42 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -15,10 +15,12 @@ package controller import ( + "bytes" "context" "encoding/json" "errors" "fmt" + texttemplate "text/template" "time" "github.com/Mirantis/hmc/internal/sveltos" @@ -38,12 +40,17 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + capz "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + capv "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" @@ -57,8 +64,9 @@ const ( // ManagedClusterReconciler reconciles a ManagedCluster object type ManagedClusterReconciler struct { client.Client - Config *rest.Config - DynamicClient *dynamic.DynamicClient + Config *rest.Config + DynamicClient *dynamic.DynamicClient + SystemNamespace string } var ( @@ -365,6 +373,11 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *h return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } + result, err := r.reconcileCredentialPropagation(ctx, managedCluster) + if err != nil { + return result, err + } + return r.updateServices(ctx, managedCluster) } @@ -621,6 +634,349 @@ func (r *ManagedClusterReconciler) objectsAvailable(ctx context.Context, namespa return len(itemsList.Items) != 0, nil } +func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Context, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Reconciling CCM credentials propagation") + + providers, err := r.getProviders(ctx, managedCluster.Namespace, managedCluster.Spec.Template) + if err != nil { + return ctrl.Result{}, + fmt.Errorf("failed to get cluster providers for cluster %s/%s: %s", managedCluster.Namespace, managedCluster.Name, err) + } + + kubeconfSecret := &corev1.Secret{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: fmt.Sprintf("%s-kubeconfig", managedCluster.Name), + Namespace: managedCluster.Namespace, + }, kubeconfSecret); err != nil { + return ctrl.Result{}, + fmt.Errorf("failed to get kubeconfig secret for cluster %s/%s: %s", managedCluster.Namespace, managedCluster.Name, err) + } + + for _, provider := range providers.InfrastructureProviders { + switch provider.Name { + case "aws": + l.Info("Skipping creds propagation for AWS") + continue + case "azure": + l.Info("Azure creds propagation start") + err := r.propagateAzureSecrets(ctx, managedCluster, kubeconfSecret) + if err != nil { + errMsg := fmt.Sprintf("failed to create Azure CCM credentials: %s", err) + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialsPropagatedCondition, + Status: metav1.ConditionFalse, + Reason: hmc.FailedReason, + Message: errMsg, + }) + return ctrl.Result{}, errors.New(errMsg) + } + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialsPropagatedCondition, + Status: metav1.ConditionTrue, + Reason: hmc.SucceededReason, + Message: "Azure CCM credentials created", + }) + continue + case "vsphere": + l.Info("vSphere creds propagation start") + err := r.propagateVSphereSecrets(ctx, managedCluster, kubeconfSecret) + if err != nil { + errMsg := fmt.Sprintf("failed to create vSphere CCM credentials: %s", err) + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialsPropagatedCondition, + Status: metav1.ConditionFalse, + Reason: hmc.FailedReason, + Message: errMsg, + }) + return ctrl.Result{}, errors.New(errMsg) + } + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialsPropagatedCondition, + Status: metav1.ConditionTrue, + Reason: hmc.SucceededReason, + Message: "vSphere CCM credentials created", + }) + continue + default: + errMsg := fmt.Sprintf("unsupported infrastructure provider %s", provider) + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialsPropagatedCondition, + Status: metav1.ConditionFalse, + Reason: hmc.FailedReason, + Message: errMsg, + }) + continue + } + } + l.Info("CCM credentials reconcile finished") + return ctrl.Result{}, nil +} + +func (r *ManagedClusterReconciler) propagateAzureSecrets(ctx context.Context, managedCluster *hmc.ManagedCluster, kubeconfSecret *corev1.Secret) error { + azureCluster := &capz.AzureCluster{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: managedCluster.Name, + Namespace: managedCluster.Namespace, + }, azureCluster); err != nil { + return fmt.Errorf("failed to get AzureCluster %s: %s", managedCluster.Name, err) + } + + azureClIdty := &capz.AzureClusterIdentity{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: azureCluster.Spec.IdentityRef.Name, + Namespace: azureCluster.Spec.IdentityRef.Namespace, + }, azureClIdty); err != nil { + return fmt.Errorf("failed to get AzureClusterIdentity %s: %s", azureCluster.Spec.IdentityRef.Name, err) + } + + azureSecret := &corev1.Secret{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: azureClIdty.Spec.ClientSecret.Name, + Namespace: azureClIdty.Spec.ClientSecret.Namespace, + }, azureSecret); err != nil { + return fmt.Errorf("failed to get azure Secret %s: %s", azureClIdty.Spec.ClientSecret.Name, err) + } + + ccmSecret, err := generateAzureCCMSecret(azureCluster, azureClIdty, azureSecret) + if err != nil { + return fmt.Errorf("failed to generate Azure CCM secret: %s", err) + } + + if err := applyCCMConfigs(ctx, kubeconfSecret, ccmSecret); err != nil { + return fmt.Errorf("failed to apply Azure CCM secret: %s", err) + } + + return nil +} + +func generateAzureCCMSecret(azureCluster *capz.AzureCluster, azureClIdty *capz.AzureClusterIdentity, azureSecret *corev1.Secret) (*corev1.Secret, error) { + azureJSONMap := map[string]any{ + "cloud": azureCluster.Spec.AzureEnvironment, + "tenantId": azureClIdty.Spec.TenantID, + "subscriptionId": azureCluster.Spec.SubscriptionID, + "aadClientId": azureClIdty.Spec.ClientID, + "aadClientSecret": string(azureSecret.Data["clientSecret"]), + "resourceGroup": azureCluster.Spec.ResourceGroup, + "securityGroupName": azureCluster.Spec.NetworkSpec.Subnets[0].SecurityGroup.Name, + "securityGroupResourceGroup": azureCluster.Spec.NetworkSpec.Vnet.ResourceGroup, + "location": azureCluster.Spec.Location, + "vmType": "vmss", + "vnetName": azureCluster.Spec.NetworkSpec.Vnet.Name, + "vnetResourceGroup": azureCluster.Spec.NetworkSpec.Vnet.ResourceGroup, + "subnetName": azureCluster.Spec.NetworkSpec.Subnets[0].Name, + "loadBalancerSku": "Standard", + "loadBalancerName": "", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true, + } + azureJSON, err := json.Marshal(azureJSONMap) + if err != nil { + return nil, fmt.Errorf("error marshalling azure.json: %s", err) + } + + secretData := map[string][]byte{ + "cloud-config": azureJSON, + } + + return makeSecret("azure-cloud-provider", metav1.NamespaceSystem, secretData), nil +} + +func (r *ManagedClusterReconciler) propagateVSphereSecrets(ctx context.Context, managedCluster *hmc.ManagedCluster, kubeconfSecret *corev1.Secret) error { + vsphereCluster := &capv.VSphereCluster{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: managedCluster.Name, + Namespace: managedCluster.Namespace, + }, vsphereCluster); err != nil { + return fmt.Errorf("failed to get VSphereCluster %s: %s", managedCluster.Name, err) + } + + vsphereClIdty := &capv.VSphereClusterIdentity{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: vsphereCluster.Spec.IdentityRef.Name, + }, vsphereClIdty); err != nil { + return fmt.Errorf("failed to get VSphereClusterIdentity %s: %s", vsphereCluster.Spec.IdentityRef.Name, err) + } + + vsphereSecret := &corev1.Secret{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: vsphereClIdty.Spec.SecretName, + Namespace: r.SystemNamespace, + }, vsphereSecret); err != nil { + return fmt.Errorf("failed to get VSphere Secret %s: %s", vsphereClIdty.Spec.SecretName, err) + } + + vsphereMachines := &capv.VSphereMachineList{} + if err := r.Client.List( + ctx, + vsphereMachines, + &client.ListOptions{ + Namespace: managedCluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + hmc.ClusterNameLabelKey: managedCluster.Name, + }), + Limit: 1, + }, + ); err != nil { + return fmt.Errorf("failed to list VSphereMachines for cluster %s: %s", managedCluster.Name, err) + } + ccmSecret, ccmConfig, err := generateVSphereCCMConfigs(vsphereCluster, vsphereSecret, &vsphereMachines.Items[0]) + if err != nil { + return fmt.Errorf("failed to generate VSphere CCM config: %s", err) + } + csiSecret, err := generateVSphereCSISecret(vsphereCluster, vsphereSecret, &vsphereMachines.Items[0]) + if err != nil { + return fmt.Errorf("failed to generate VSphere CSI secret: %s", err) + } + + if err := applyCCMConfigs(ctx, kubeconfSecret, ccmSecret, ccmConfig, csiSecret); err != nil { + return fmt.Errorf("failed to apply VSphere CCM/CSI secrets: %s", err) + } + + return nil +} + +func generateVSphereCCMConfigs(vCl *capv.VSphereCluster, vScrt *corev1.Secret, vMa *capv.VSphereMachine) (*corev1.Secret, *corev1.ConfigMap, error) { + secretName := "vsphere-cloud-secret" + secretData := map[string][]byte{ + fmt.Sprintf("%s.username", vCl.Spec.Server): vScrt.Data["username"], + fmt.Sprintf("%s.password", vCl.Spec.Server): vScrt.Data["password"], + } + ccmCfg := map[string]any{ + "global": map[string]any{ + "port": 443, + "insecureFlag": true, + "secretName": secretName, + "secretNamespace": metav1.NamespaceSystem, + }, + "vcenter": map[string]any{ + vCl.Spec.Server: map[string]any{ + "server": vCl.Spec.Server, + "datacenters": []string{ + vMa.Spec.Datacenter, + }, + }, + }, + "labels": map[string]any{ + "region": "k8s-region", + "zone": "k8s-zone", + }, + } + + ccmCfgYaml, err := yaml.Marshal(ccmCfg) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal CCM config: %s", err) + } + + cmData := map[string]string{ + "vsphere.conf": string(ccmCfgYaml), + } + return makeSecret(secretName, metav1.NamespaceSystem, secretData), + makeConfigMap("cloud-config", metav1.NamespaceSystem, cmData), + nil +} + +func generateVSphereCSISecret(vCl *capv.VSphereCluster, vScrt *corev1.Secret, vMa *capv.VSphereMachine) (*corev1.Secret, error) { + csiCfg := ` +[Global] +cluster-id = "{{ .ClusterID }}" + +[VirtualCenter "{{ .Vcenter }}"] +insecure-flag = "true" +user = "{{ .Username }}" +password = "{{ .Password }}" +port = "443" +datacenters = "{{ .Datacenter }}" +` + type CSIFields struct { + ClusterID, Vcenter, Username, Password, Datacenter string + } + + fields := CSIFields{ + ClusterID: vCl.Name, + Vcenter: vCl.Spec.Server, + Username: string(vScrt.Data["username"]), + Password: string(vScrt.Data["password"]), + Datacenter: vMa.Spec.Datacenter, + } + + tmpl, err := texttemplate.New("csiCfg").Parse(csiCfg) + if err != nil { + return nil, fmt.Errorf("failed to generate CSI secret (tmpl parse): %s", err) + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, fields); err != nil { + return nil, fmt.Errorf("failed to generate CSI secret (tmpl execute): %s", err) + } + + secretData := map[string][]byte{ + "csi-vsphere.conf": buf.Bytes(), + } + + return makeSecret("vcenter-config-secret", metav1.NamespaceSystem, secretData), nil +} + +func applyCCMConfigs(ctx context.Context, kubeconfSecret *corev1.Secret, objects ...client.Object) error { + clnt, err := makeClientFromSecret(kubeconfSecret) + if err != nil { + return fmt.Errorf("failed to create k8s client: %s", err) + } + for _, object := range objects { + if err := clnt.Patch( + ctx, + object, + client.Apply, + client.FieldOwner("hmc-controller"), + ); err != nil { + return fmt.Errorf("failed to apply CCM config object %s: %s", object.GetName(), err) + } + } + return nil +} + +func makeSecret(name, namespace string, data map[string][]byte) *corev1.Secret { + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } + s.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + return s +} + +func makeConfigMap(name, namespace string, data map[string]string) *corev1.ConfigMap { + c := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } + c.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + return c +} + +func makeClientFromSecret(kubeconfSecret *corev1.Secret) (client.Client, error) { + scheme := runtime.NewScheme() + if err := clientgoscheme.AddToScheme(scheme); err != nil { + return nil, err + } + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfSecret.Data["value"]) + if err != nil { + return nil, err + } + cl, err := client.New(restConfig, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, err + } + return cl, nil +} + func setIdentityHelmValues(values *apiextensionsv1.JSON, idRef *corev1.ObjectReference) (*apiextensionsv1.JSON, error) { var valuesJSON map[string]any err := json.Unmarshal(values.Raw, &valuesJSON) diff --git a/templates/cluster/azure-hosted-cp/templates/azurecluster.yaml b/templates/cluster/azure-hosted-cp/templates/azurecluster.yaml index b1734116d..3ec9a7a22 100644 --- a/templates/cluster/azure-hosted-cp/templates/azurecluster.yaml +++ b/templates/cluster/azure-hosted-cp/templates/azurecluster.yaml @@ -7,10 +7,10 @@ metadata: finalizers: - hmc.mirantis.com/cleanup spec: + {{- with .Values.clusterIdentity }} identityRef: - kind: AzureClusterIdentity - name: {{ .Values.clusterIdentity.name }} - namespace: {{ .Values.clusterIdentity.namespace }} + {{- toYaml . | nindent 4 }} + {{- end }} networkSpec: vnet: resourceGroup: {{ .Values.resourceGroup }} diff --git a/templates/cluster/azure-hosted-cp/templates/k0smotroncontrolplane.yaml b/templates/cluster/azure-hosted-cp/templates/k0smotroncontrolplane.yaml index c353a085b..83f4ad67b 100644 --- a/templates/cluster/azure-hosted-cp/templates/k0smotroncontrolplane.yaml +++ b/templates/cluster/azure-hosted-cp/templates/k0smotroncontrolplane.yaml @@ -33,10 +33,11 @@ spec: - name: cloud-provider-azure namespace: kube-system chartname: cloud-provider-azure/cloud-provider-azure - version: 1.30.4 + version: 1.31.2 order: 1 values: | cloudControllerManager: + cloudConfigSecretName: azure-cloud-provider nodeSelector: node-role.kubernetes.io/control-plane: null - name: azuredisk-csi-driver @@ -45,5 +46,9 @@ spec: version: 1.30.3 order: 2 values: | + controller: + cloudConfigSecretName: azure-cloud-provider + node: + cloudConfigSecretName: azure-cloud-provider linux: kubelet: "/var/lib/k0s/kubelet" diff --git a/templates/cluster/azure-hosted-cp/templates/k0sworkerconfigtemplate.yaml b/templates/cluster/azure-hosted-cp/templates/k0sworkerconfigtemplate.yaml index 17618007e..562e4fdeb 100644 --- a/templates/cluster/azure-hosted-cp/templates/k0sworkerconfigtemplate.yaml +++ b/templates/cluster/azure-hosted-cp/templates/k0sworkerconfigtemplate.yaml @@ -9,10 +9,3 @@ spec: args: - --enable-cloud-provider - --kubelet-extra-args="--cloud-provider=external" - files: - - path: "/etc/kubernetes/azure.json" - permissions: "0644" - contentFrom: - secretRef: - key: worker-node-azure.json - name: {{ include "azuremachinetemplate.name" . }}-azure-json diff --git a/templates/cluster/azure-standalone-cp/templates/azurecluster.yaml b/templates/cluster/azure-standalone-cp/templates/azurecluster.yaml index 75769b590..bb940f8cc 100644 --- a/templates/cluster/azure-standalone-cp/templates/azurecluster.yaml +++ b/templates/cluster/azure-standalone-cp/templates/azurecluster.yaml @@ -3,10 +3,10 @@ kind: AzureCluster metadata: name: {{ include "cluster.name" . }} spec: + {{- with .Values.clusterIdentity }} identityRef: - kind: AzureClusterIdentity - name: {{ .Values.clusterIdentity.name }} - namespace: {{ .Values.clusterIdentity.namespace }} + {{- toYaml . | nindent 4 }} + {{- end }} location: {{ .Values.location }} {{- if .Values.bastion.enabled }} {{- with .Values.bastion.bastionSpec }} diff --git a/templates/cluster/azure-standalone-cp/templates/k0scontrolplane.yaml b/templates/cluster/azure-standalone-cp/templates/k0scontrolplane.yaml index 95294cf93..78baf6484 100644 --- a/templates/cluster/azure-standalone-cp/templates/k0scontrolplane.yaml +++ b/templates/cluster/azure-standalone-cp/templates/k0scontrolplane.yaml @@ -11,13 +11,6 @@ spec: - --enable-cloud-provider - --kubelet-extra-args="--cloud-provider=external" - --disable-components=konnectivity-server - files: - - path: "/etc/kubernetes/azure.json" - permissions: "0644" - contentFrom: - secretRef: - key: control-plane-azure.json - name: {{ include "k0scontrolplane.name" . }}-0-azure-json k0s: apiVersion: k0s.k0sproject.io/v1beta1 kind: ClusterConfig @@ -42,10 +35,11 @@ spec: - name: cloud-provider-azure namespace: kube-system chartname: cloud-provider-azure/cloud-provider-azure - version: 1.30.4 + version: 1.31.2 order: 1 values: | cloudControllerManager: + cloudConfigSecretName: azure-cloud-provider nodeSelector: node-role.kubernetes.io/control-plane: "true" - name: azuredisk-csi-driver @@ -54,6 +48,10 @@ spec: version: 1.30.3 order: 2 values: | + controller: + cloudConfigSecretName: azure-cloud-provider + node: + cloudConfigSecretName: azure-cloud-provider linux: kubelet: "/var/lib/k0s/kubelet" machineTemplate: diff --git a/templates/cluster/azure-standalone-cp/templates/k0sworkerconfigtemplate.yaml b/templates/cluster/azure-standalone-cp/templates/k0sworkerconfigtemplate.yaml index 9072c62e3..562e4fdeb 100644 --- a/templates/cluster/azure-standalone-cp/templates/k0sworkerconfigtemplate.yaml +++ b/templates/cluster/azure-standalone-cp/templates/k0sworkerconfigtemplate.yaml @@ -9,10 +9,3 @@ spec: args: - --enable-cloud-provider - --kubelet-extra-args="--cloud-provider=external" - files: - - path: "/etc/kubernetes/azure.json" - permissions: "0644" - contentFrom: - secretRef: - key: worker-node-azure.json - name: {{ include "azuremachinetemplate.worker.name" . }}-azure-json diff --git a/templates/cluster/vsphere-hosted-cp/templates/k0smotroncontrolplane.yaml b/templates/cluster/vsphere-hosted-cp/templates/k0smotroncontrolplane.yaml index bf60832a4..375afe4bc 100644 --- a/templates/cluster/vsphere-hosted-cp/templates/k0smotroncontrolplane.yaml +++ b/templates/cluster/vsphere-hosted-cp/templates/k0smotroncontrolplane.yaml @@ -37,11 +37,7 @@ spec: namespace: kube-system values: | config: - enabled: true - vcenter: {{ .Values.vsphere.server }} - datacenter: {{ .Values.vsphere.datacenter }} - username: "{{ .Values.vsphere.username }}" - password: "{{ .Values.vsphere.password }}" + enabled: false daemonset: affinity: null tolerations: @@ -67,12 +63,7 @@ spec: namespace: kube-system values: | vcenterConfig: - enabled: true - clusterID: {{ include "cluster.name" . }} - vcenter: {{ .Values.vsphere.server }} - user: "{{ .Values.vsphere.username }}" - password: "{{ .Values.vsphere.password }}" - datacenters: {{ .Values.vsphere.datacenter }} + enabled: false controller: nodeAffinity: null node: diff --git a/templates/cluster/vsphere-hosted-cp/values.schema.json b/templates/cluster/vsphere-hosted-cp/values.schema.json index 81a5c0953..1d89a3f28 100644 --- a/templates/cluster/vsphere-hosted-cp/values.schema.json +++ b/templates/cluster/vsphere-hosted-cp/values.schema.json @@ -79,9 +79,7 @@ "datacenter", "datastore", "resourcePool", - "folder", - "username", - "password" + "folder" ], "properties": { "server": { @@ -101,12 +99,6 @@ }, "folder": { "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" } } }, diff --git a/templates/cluster/vsphere-hosted-cp/values.yaml b/templates/cluster/vsphere-hosted-cp/values.yaml index 94c91bef4..9af1941b7 100644 --- a/templates/cluster/vsphere-hosted-cp/values.yaml +++ b/templates/cluster/vsphere-hosted-cp/values.yaml @@ -20,8 +20,6 @@ vsphere: datastore: "" resourcePool: "" folder: "" - username: "" - password: "" controlPlaneEndpointIP: "" # vSphere machines parameters diff --git a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml index 038fb1a93..0b18d1813 100644 --- a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml +++ b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml @@ -72,11 +72,7 @@ spec: namespace: kube-system values: | config: - enabled: true - vcenter: {{ .Values.vsphere.server }} - datacenter: {{ .Values.vsphere.datacenter }} - username: "{{ .Values.vsphere.username }}" - password: "{{ .Values.vsphere.password }}" + enabled: false daemonset: tolerations: - effect: NoSchedule @@ -101,12 +97,7 @@ spec: namespace: kube-system values: | vcenterConfig: - enabled: true - clusterID: {{ include "cluster.name" . }} - vcenter: {{ .Values.vsphere.server }} - user: "{{ .Values.vsphere.username }}" - password: "{{ .Values.vsphere.password }}" - datacenters: {{ .Values.vsphere.datacenter }} + enabled: false node: kubeletPath: /var/lib/k0s/kubelet defaultStorageClass: diff --git a/templates/cluster/vsphere-standalone-cp/values.schema.json b/templates/cluster/vsphere-standalone-cp/values.schema.json index c5207bd78..f2be23fb4 100644 --- a/templates/cluster/vsphere-standalone-cp/values.schema.json +++ b/templates/cluster/vsphere-standalone-cp/values.schema.json @@ -73,9 +73,7 @@ "datacenter", "datastore", "resourcePool", - "folder", - "username", - "password" + "folder" ], "properties": { "server": { @@ -95,12 +93,6 @@ }, "folder": { "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" } } }, diff --git a/templates/cluster/vsphere-standalone-cp/values.yaml b/templates/cluster/vsphere-standalone-cp/values.yaml index 0ee9c2d3d..67d95c472 100644 --- a/templates/cluster/vsphere-standalone-cp/values.yaml +++ b/templates/cluster/vsphere-standalone-cp/values.yaml @@ -20,8 +20,6 @@ vsphere: datastore: "" resourcePool: "" folder: "" - username: "" - password: "" controlPlaneEndpointIP: "" # vSphere machines parameters diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 48c52a7cd..49c3f27a1 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -124,6 +124,8 @@ rules: resources: - awsclusters - azureclusters + - vsphereclusters + - vspheremachines verbs: - get - list @@ -192,6 +194,11 @@ rules: - get - patch - update +- apiGroups: + - "" + resources: + - secrets + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role