Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cli: enable constellation apply to create new clusters #2549

Merged
merged 22 commits into from
Nov 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions .github/actions/constellation_create/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,12 @@ runs:
if : inputs.selfManagedInfra != 'true'
shell: bash
run: |
constellation create -y --debug --tf-log=DEBUG
# TODO(v2.14): Remove workaround for CLIs not supporting apply command
cmd='apply --skip-phases="init,attestationconfig,certsans,helm,image,k8s"'
if constellation --help | grep -q init; then
cmd=create
fi
constellation $cmd -y --debug --tf-log=DEBUG -

- name: Constellation create (self-managed)
if : inputs.selfManagedInfra == 'true'
Expand All @@ -163,7 +168,7 @@ runs:
shell: bash
run: |
# TODO(v2.14): Remove workaround for CLIs not supporting apply command
cmd=apply
cmd="apply --skip-phases=infrastructure"
if constellation --help | grep -q init; then
cmd=init
fi
Expand Down
5 changes: 5 additions & 0 deletions cli/internal/cloudcmd/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,11 @@ func (a *Applier) RestoreWorkspace() error {
return restoreBackup(a.fileHandler, a.workingDir, filepath.Join(a.backupDir, constants.TerraformUpgradeBackupDir))
}

// WorkingDirIsEmpty returns true if the working directory of the Applier is empty.
func (a *Applier) WorkingDirIsEmpty() (bool, error) {
return a.fileHandler.IsEmpty(a.workingDir)
}

func (a *Applier) terraformApplyVars(ctx context.Context, conf *config.Config) (terraform.Variables, error) {
imageRef, err := a.imageFetcher.FetchReference(
ctx,
Expand Down
7 changes: 4 additions & 3 deletions cli/internal/cloudcmd/tfplan.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ func plan(
return false, fmt.Errorf("terraform plan: %w", err)
}

// If we are planning in a new workspace, we don't want to show a diff
// If we are planning in a new workspace, we don't want to show the plan
if isNewWorkspace {
return false, nil
return hasDiff, nil
daniel-weisse marked this conversation as resolved.
Show resolved Hide resolved
}

if hasDiff {
Expand All @@ -67,14 +67,15 @@ func plan(
}

// restoreBackup replaces the existing Terraform workspace with the backup.
// If no backup exists, this function simply removes workingDir.
func restoreBackup(fileHandler file.Handler, workingDir, backupDir string) error {
if err := fileHandler.RemoveAll(workingDir); err != nil {
return fmt.Errorf("removing existing workspace: %w", err)
}
if err := fileHandler.CopyDir(
backupDir,
workingDir,
); err != nil {
); err != nil && !errors.Is(err, os.ErrNotExist) { // ignore not found error because backup does not exist for new clusters
return fmt.Errorf("replacing terraform workspace with backup: %w", err)
}

Expand Down
33 changes: 22 additions & 11 deletions cli/internal/cloudcmd/tfplan_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ package cloudcmd
import (
"context"
"io"
"os"
"path/filepath"
"testing"

Expand Down Expand Up @@ -123,40 +124,42 @@ func TestTFPlan(t *testing.T) {
func TestRestoreBackup(t *testing.T) {
existingWorkspace := "foo"
backupDir := "bar"
testFile := "file"

testCases := map[string]struct {
prepareFs func(require *require.Assertions) file.Handler
wantErr bool
prepareFs func(require *require.Assertions) file.Handler
wantRemoveWorkingDir bool
wantErr bool
}{
"success": {
prepareFs: func(require *require.Assertions) file.Handler {
fs := file.NewHandler(afero.NewMemMapFs())
require.NoError(fs.MkdirAll(existingWorkspace))
require.NoError(fs.MkdirAll(backupDir))
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
return fs
},
},
"existing workspace does not exist": {
"only backup exists": {
prepareFs: func(require *require.Assertions) file.Handler {
fs := file.NewHandler(afero.NewMemMapFs())
require.NoError(fs.MkdirAll(backupDir))
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
return fs
},
},
"backup dir does not exist": {
"only existingWorkspace exists": {
prepareFs: func(require *require.Assertions) file.Handler {
fs := file.NewHandler(afero.NewMemMapFs())
require.NoError(fs.MkdirAll(existingWorkspace))
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
return fs
},
wantErr: true,
wantRemoveWorkingDir: true,
},
"read only file system": {
prepareFs: func(require *require.Assertions) file.Handler {
memFS := afero.NewMemMapFs()
fs := file.NewHandler(memFS)
require.NoError(fs.MkdirAll(existingWorkspace))
require.NoError(fs.MkdirAll(backupDir))
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
return file.NewHandler(afero.NewReadOnlyFs(memFS))
},
wantErr: true,
Expand All @@ -174,6 +177,14 @@ func TestRestoreBackup(t *testing.T) {
return
}
assert.NoError(err)
_, err = fs.Stat(filepath.Join(backupDir, testFile))
assert.ErrorIs(err, os.ErrNotExist)
_, err = fs.Stat(filepath.Join(existingWorkspace, testFile))
if tc.wantRemoveWorkingDir {
assert.ErrorIs(err, os.ErrNotExist)
} else {
assert.NoError(err)
}
})
}
}
Expand Down
Loading