Skip to content

Commit

Permalink
DRY validation implementation, add nuke docs
Browse files Browse the repository at this point in the history
Signed-off-by: Kyle Squizzato <[email protected]>
  • Loading branch information
squizzi committed Aug 30, 2024
1 parent 12118bb commit b9b2fc8
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 12 deletions.
7 changes: 7 additions & 0 deletions docs/aws/nuke.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Nuking AWS resources
If you'd like to forcefully cleanup all AWS resources created by HMC you can use
the following command:

```
CLUSTER_NAME=<deployment-name> make dev-aws-nuke
```
1 change: 1 addition & 0 deletions docs/dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,4 @@ export KUBECONFIG=~/.kube/config
```
kubectl --kubeconfig ~/.kube/config get secret -n hmc-system <deployment-name>-kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig
```
7 changes: 5 additions & 2 deletions test/deployment/validate_deleted.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (

"github.com/Mirantis/hmc/test/kubeclient"
"github.com/Mirantis/hmc/test/utils"
. "github.com/onsi/ginkgo/v2"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)

Expand Down Expand Up @@ -49,7 +48,11 @@ func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clus
if cluster != nil {
phase, _, _ := unstructured.NestedString(cluster.Object, "status", "phase")
if phase != "Deleting" {
Fail(fmt.Sprintf("cluster %q exists, but is not in 'Deleting' phase", clusterName))
// TODO: We should have a threshold error system for situations
// like this, we probably don't want to wait the full Eventually
// for something like this, but we can't immediately fail the test
// either.
return fmt.Errorf("cluster %q exists, but is not in 'Deleting' phase", clusterName)
}

conditions, err := utils.GetConditionsFromUnstructured(cluster)
Expand Down
4 changes: 3 additions & 1 deletion test/deployment/validate_deployed.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,9 @@ func VerifyProviderDeployed(ctx context.Context, kc *kubeclient.KubeClient, clus
// In some cases it may be necessary to end the Eventually block early if the
// resource will never reach a ready state, in these instances Ginkgo's Fail
// should be used to end the spec early.
func verifyProviderAction(ctx context.Context, kc *kubeclient.KubeClient, clusterName string, resourcesToValidate map[string]resourceValidationFunc, order []string) error {
func verifyProviderAction(
ctx context.Context, kc *kubeclient.KubeClient, clusterName string,
resourcesToValidate map[string]resourceValidationFunc, order []string) error {
// Sequentially validate each resource type, only returning the first error
// as to not move on to the next resource type until the first is resolved.
// We use []string here since order is important.
Expand Down
21 changes: 13 additions & 8 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,8 @@ func verifyControllerUp(kc *kubeclient.KubeClient, labelSelector string, name st

// collectLogArtfiacts collects log output from each the HMC controller,
// CAPI controller and the provider controller(s) as well as output from clusterctl
// and stores them in the test/e2e directory as artifacts.
// and stores them in the test/e2e directory as artifacts. If it fails it
// produces a warning message to the GinkgoWriter, but does not fail the test.
// We could do this at the end or we could use Kubernetes' CopyPodLogs from
// https://github.com/kubernetes/kubernetes/blob/v1.31.0/test/e2e/storage/podlogs/podlogs.go#L88
// to stream the logs to GinkgoWriter during the test.
Expand All @@ -216,24 +217,28 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider
TailLines: ptr.To(int64(1000)),
})
podLogs, err := req.Stream(context.Background())
Expect(err).NotTo(HaveOccurred(), "failed to get log stream for pod %s", pod.Name)
defer Expect(podLogs.Close()).NotTo(HaveOccurred())
warnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err))
defer podLogs.Close() //nolint:errcheck

output, err := os.Create(fmt.Sprintf("test/e2e/%s.log", pod.Name))
Expect(err).NotTo(HaveOccurred(), "failed to create log file for pod %s", pod.Name)
defer Expect(output.Close()).NotTo(HaveOccurred())
warnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err))
defer output.Close() //nolint:errcheck

r := bufio.NewReader(podLogs)
_, err = r.WriteTo(output)
Expect(err).NotTo(HaveOccurred(), "failed to write log file for pod %s", pod.Name)
warnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err))
}
}

cmd := exec.Command("./bin/clusterctl",
"describe", "cluster", clusterName, "--show-conditions=all")
output, err := utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to get clusterctl log")
warnError(fmt.Errorf("failed to get clusterctl log: %w", err))

err = os.WriteFile(filepath.Join("test/e2e", "clusterctl.log"), output, 0644)
Expect(err).NotTo(HaveOccurred(), "failed to write clusterctl log")
warnError(fmt.Errorf("failed to write clusterctl log: %w", err))
}

func warnError(err error) {
_, _ = fmt.Fprintf(GinkgoWriter, "Warning: %v\n", err)
}
2 changes: 1 addition & 1 deletion test/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func GetConditionsFromUnstructured(unstrObj *unstructured.Unstructured) ([]metav
return nil, fmt.Errorf("failed to get status conditions for %s: %s: %w", objKind, objName, err)
}

var conditions []metav1.Condition
conditions := make([]metav1.Condition, 0, len(unstrConditions))

for _, condition := range unstrConditions {
conditionMap, ok := condition.(map[string]interface{})
Expand Down

0 comments on commit b9b2fc8

Please sign in to comment.