Skip to content

Commit

Permalink
[main] Add prebootstrap provisioning tests (rancher#48098)
Browse files Browse the repository at this point in the history
* add prebootstrap provisioning test + gha run specifically for them

* extract common functionality out

* switch to using CATTLE_FEATURES passed through to job
  • Loading branch information
thatmidwesterncoder authored Dec 4, 2024
1 parent a12e86e commit 966420b
Show file tree
Hide file tree
Showing 3 changed files with 196 additions and 1 deletion.
9 changes: 8 additions & 1 deletion .github/workflows/provisioning-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,13 @@ jobs:
V2PROV_TEST_RUN_REGEX: "^Test_Operation_SetA_.*$"
- V2PROV_TEST_DIST: "rke2"
V2PROV_TEST_RUN_REGEX: "^Test_Operation_SetB_.*$"
name: Provisioning tests
- V2PROV_TEST_DIST: "rke2"
V2PROV_TEST_RUN_REGEX: "^Test_PreBootstrap_.*$"
CATTLE_FEATURES: "provisioningprebootstrap=true"
- V2PROV_TEST_DIST: "k3s"
V2PROV_TEST_RUN_REGEX: "^Test_PreBootstrap_.*$"
CATTLE_FEATURES: "provisioningprebootstrap=true"
name: Provisioning tests
runs-on: runs-on,runner=16cpu-linux-x64,ram=64,run-id=${{ github.run_id }}
steps:
- name: Checkout
Expand All @@ -47,3 +53,4 @@ jobs:
DRONE_BUILD_EVENT: "${{ github.event_name }}"
V2PROV_TEST_RUN_REGEX: "${{ matrix.V2PROV_TEST_RUN_REGEX }}"
V2PROV_TEST_DIST: "${{ matrix.V2PROV_TEST_DIST }}"
CATTLE_FEATURES: "${{ matrix.CATTLE_FEATURES }}"
1 change: 1 addition & 0 deletions scripts/provisioning-tests
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ export DIST=${V2PROV_TEST_DIST}
export SOME_K8S_VERSION=${SOME_K8S_VERSION}
export TB_ORG=${TB_ORG}
export CATTLE_CHART_DEFAULT_URL=${CATTLE_CHART_DEFAULT_URL}
export CATTLE_FEATURES=${CATTLE_FEATURES}

# Tell Rancher to use the recently-built Rancher cluster agent image. This image is built as part of CI and will be
# copied to the in-cluster registry during test setup below.
Expand Down
187 changes: 187 additions & 0 deletions tests/v2prov/tests/prebootstrap/prebootstrap_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
package prebootstrap

import (
"context"
"encoding/json"
"fmt"
"testing"

provisioningv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"

"github.com/rancher/rancher/pkg/capr"
"github.com/rancher/rancher/tests/v2prov/clients"
"github.com/rancher/rancher/tests/v2prov/cluster"
"github.com/rancher/rancher/tests/v2prov/systemdnode"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)

func Test_PreBootstrap_Provisioning_Flow(t *testing.T) {
t.Parallel()

tests := []struct {
name string
cluster *provisioningv1.Cluster
}{
{
name: "Generic_Secret_Sync",
cluster: &provisioningv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-prebootstrap-secret-sync",
},
Spec: provisioningv1.ClusterSpec{
RKEConfig: &provisioningv1.RKEConfig{},
},
},
},
{
name: "ACE",
cluster: &provisioningv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-prebootstrap-ace",
},
Spec: provisioningv1.ClusterSpec{
RKEConfig: &provisioningv1.RKEConfig{},
LocalClusterAuthEndpoint: rkev1.LocalClusterAuthEndpoint{
Enabled: true,
},
},
},
},
}

// Run each test case in parallel
for _, tt := range tests {
tt := tt // Capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
prebootstrapSetupAndCheck(t, tt.cluster)
})
}
}

func prebootstrapSetupAndCheck(t *testing.T, c *provisioningv1.Cluster) {
client, err := clients.New()
if err != nil {
t.Fatal(err)
}
defer client.Close()

c, err = cluster.New(client, c)
if err != nil {
t.Fatal(err)
}

feature, err := client.Mgmt.Feature().Get("provisioningprebootstrap", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}

// Ensure the feature flag is enabled for the test to run
// .Status.Default = false by default (harcoded) but gets updated to true if specified in env.
// otherwise, if it's enabled by the user it'll show up as .Spec.Value
if !feature.Status.Default && (feature.Spec.Value != nil && !*feature.Spec.Value) {
t.Fatalf("provisioningprebootstrap flag needs to be enabled for this test to run")
}

// Create a secret with sync annotations and data
_, err = client.Core.Secret().Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "sync-me",
Namespace: c.Namespace,
Annotations: map[string]string{
"provisioning.cattle.io/sync-bootstrap": "true",
"provisioning.cattle.io/sync-target-namespace": "kube-system",
"provisioning.cattle.io/sync-target-name": "hello-ive-been-synced",
"rke.cattle.io/object-authorized-for-clusters": c.Name,
},
},
StringData: map[string]string{
"hello": "world",
// This is to test that the value gets swapped out properly when synchronized.
"clusterId": "{{clusterId}}",
},
})
defer client.Core.Secret().Delete(c.Namespace, "sync-me", &metav1.DeleteOptions{})

if err != nil {
t.Fatal(err)
}

command, err := cluster.CustomCommand(client, c)
if err != nil {
t.Fatal(err)
}

assert.NotEmpty(t, command)

_, err = systemdnode.New(client, c.Namespace, "#!/usr/bin/env sh\n"+command+" --worker --etcd --controlplane --label foobar=bazqux", map[string]string{"custom-cluster-name": c.Name}, nil)
if err != nil {
t.Fatal(err)
}

_, err = cluster.WaitForCreate(client, c)
if err != nil {
t.Fatal(err)
}

machines, err := cluster.Machines(client, c)
if err != nil {
t.Fatal(err)
}

assert.Len(t, machines.Items, 1)
m := machines.Items[0]
assert.Equal(t, m.Labels[capr.WorkerRoleLabel], "true")
assert.Equal(t, m.Labels[capr.ControlPlaneRoleLabel], "true")
assert.Equal(t, m.Labels[capr.EtcdRoleLabel], "true")
assert.NotNil(t, machines.Items[0].Spec.Bootstrap.ConfigRef)

secret, err := client.Core.Secret().Get(machines.Items[0].Namespace, capr.PlanSecretFromBootstrapName(machines.Items[0].Spec.Bootstrap.ConfigRef.Name), metav1.GetOptions{})
assert.NoError(t, err)

assert.NotEmpty(t, secret.Annotations[capr.LabelsAnnotation])
var labels map[string]string
if err := json.Unmarshal([]byte(secret.Annotations[capr.LabelsAnnotation]), &labels); err != nil {
t.Error(err)
}
assert.Equal(t, labels, map[string]string{"cattle.io/os": "linux", "foobar": "bazqux"})

err = cluster.EnsureMinimalConflictsWithThreshold(client, c, cluster.SaneConflictMessageThreshold)
assert.NoError(t, err)

// Retrieve the kubeconfig for the downstream cluster
sec, err := client.Core.Secret().Get(c.Namespace, fmt.Sprintf("%s-kubeconfig", c.Name), metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get kubeconfig for cluster %s", c.Name)
}

kubeconfig, ok := sec.Data["value"]
assert.True(t, ok)

// Create a client for the downstream cluster using the kubeconfig
downstreamConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig)
assert.NoError(t, err)
downstreamClient, err := clients.NewForConfig(context.Background(), downstreamConfig)
if err != nil {
t.Fatalf("failed to create downstream cluster client")
}

// Retrieve the synchronized secret from the downstream cluster.
synced, err := downstreamClient.Core.Secret().Get("kube-system", "hello-ive-been-synced", metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get synchronized downstream secret: %v", err)
}

// Retrieve the latest state of the cluster object
c, err = client.Provisioning.Cluster().Get(c.Namespace, c.Name, metav1.GetOptions{})
assert.NoError(t, err)

// ..and finally, assert the synchronized secret has the expected data
assert.Equal(t, 2, len(synced.Data))
assert.Equal(t, string(synced.Data["hello"]), "world")
assert.Equal(t, string(synced.Data["clusterId"]), c.Status.ClusterName)
}

0 comments on commit 966420b

Please sign in to comment.