Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor SDK #228

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ init-examples:

generate-sdk:
@echo "==> Generating castai sdk client"
@API_TAGS=ExternalClusterAPI,PoliciesAPI,NodeConfigurationAPI,NodeTemplatesAPI,AuthTokenAPI,ScheduledRebalancingAPI,InventoryAPI,UsersAPI,OperationsAPI go generate castai/sdk/generate.go
go generate castai/sdk/generate.go

# The following command also rewrites existing documentation
generate-docs:
Expand Down Expand Up @@ -46,4 +46,4 @@ validate-terraform-examples:
terraform validate; \
cd -; \
done \
done
done
Binary file added castai/__debug_bin1667702632
Binary file not shown.
13 changes: 7 additions & 6 deletions castai/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,

err := retry.RetryContext(ctx, data.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
clusterResponse, err := client.ExternalClusterAPIGetClusterWithResponse(ctx, clusterId)
if checkErr := sdk.CheckOKResponse(clusterResponse, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(clusterResponse.HTTPResponse, err); checkErr != nil {
return retry.NonRetryableError(err)
}

Expand All @@ -45,7 +45,8 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,

triggerDelete := func() *retry.RetryError {
log.Printf("[INFO] Deleting cluster.")
if err := sdk.CheckResponseNoContent(client.ExternalClusterAPIDeleteClusterWithResponse(ctx, clusterId)); err != nil {
resp, err := client.ExternalClusterAPIDeleteClusterWithResponse(ctx, clusterId)
if err := sdk.CheckResponseNoContent(resp.HTTPResponse, err); err != nil {
return retry.NonRetryableError(err)
}
return retry.RetryableError(fmt.Errorf("triggered cluster deletion"))
Expand Down Expand Up @@ -75,7 +76,7 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,
DeleteProvisionedNodes: getOptionalBool(data, FieldDeleteNodesOnDisconnect, false),
KeepKubernetesResources: toPtr(true),
})
if checkErr := sdk.CheckOKResponse(response, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(response.HTTPResponse, err); checkErr != nil {
return retry.NonRetryableError(err)
}

Expand All @@ -96,7 +97,7 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,
return nil
}

func fetchClusterData(ctx context.Context, client *sdk.ClientWithResponses, clusterID string) (*sdk.ExternalClusterAPIGetClusterResponse, error) {
func fetchClusterData(ctx context.Context, client sdk.ClientWithResponsesInterface, clusterID string) (*sdk.ExternalClusterAPIGetClusterResponse, error) {
resp, err := client.ExternalClusterAPIGetClusterWithResponse(ctx, clusterID)
if err != nil {
return nil, err
Expand All @@ -107,7 +108,7 @@ func fetchClusterData(ctx context.Context, client *sdk.ClientWithResponses, clus
return nil, nil
}

if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return nil, checkErr
}

Expand All @@ -119,7 +120,7 @@ func fetchClusterData(ctx context.Context, client *sdk.ClientWithResponses, clus
return resp, nil
}

func createClusterToken(ctx context.Context, client *sdk.ClientWithResponses, clusterID string) (string, error) {
func createClusterToken(ctx context.Context, client sdk.ClientWithResponsesInterface, clusterID string) (string, error) {
resp, err := client.ExternalClusterAPICreateClusterTokenWithResponse(ctx, clusterID)
if err != nil {
return "", fmt.Errorf("creating cluster token: %w", err)
Expand Down
2 changes: 1 addition & 1 deletion castai/data_source_eks_cluster_userarn.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func dataSourceCastaiEKSUserARN(ctx context.Context, data *schema.ResourceData,
clusterID := data.Get(EKSClusterUserARNFieldClusterID).(string)

resp, err := client.ExternalClusterAPIGetAssumeRoleUserWithResponse(ctx, clusterID)
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return diag.FromErr(checkErr)
}

Expand Down
6 changes: 3 additions & 3 deletions castai/data_source_organization.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@
func dataSourceOrganizationRead(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*ProviderConfig).api

resp, err := client.ListOrganizationsWithResponse(ctx)
if err := sdk.CheckOKResponse(resp, err); err != nil {
resp, err := client.UsersAPIListOrganizationsWithResponse(ctx, &sdk.UsersAPIListOrganizationsParams{})

Check failure on line 35 in castai/data_source_organization.go

View workflow job for this annotation

GitHub Actions / Build

client.UsersAPIListOrganizationsWithResponse undefined (type sdk.ClientWithResponsesInterface has no field or method UsersAPIListOrganizationsWithResponse)

Check failure on line 35 in castai/data_source_organization.go

View workflow job for this annotation

GitHub Actions / Build

undefined: sdk.UsersAPIListOrganizationsParams
if err := sdk.CheckOKResponse(resp.HTTPResponse, err); err != nil {
return diag.FromErr(fmt.Errorf("retrieving organizations: %w", err))
}

organizationName := data.Get(FieldOrganizationName).(string)

var organizationID string
for _, organization := range resp.JSON200.Organizations {
for _, organization := range *resp.JSON200.Organizations {
if organizationName == organization.Name {
organizationID = *organization.Id
break
Expand Down
4 changes: 2 additions & 2 deletions castai/data_source_organization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func TestOrganizationDataSourceRead(t *testing.T) {
}`)))

mockClient.EXPECT().
ListOrganizations(gomock.Any()).
UsersAPIListOrganizations(gomock.Any(), gomock.Any()).
Return(&http.Response{StatusCode: 200, Body: body, Header: map[string][]string{"Content-Type": {"json"}}}, nil)

state := terraform.NewInstanceStateShimmedFromValue(cty.ObjectVal(map[string]cty.Value{}), 0)
Expand Down Expand Up @@ -97,7 +97,7 @@ func TestOrganizationDataSourceReadError(t *testing.T) {
}`)))

mockClient.EXPECT().
ListOrganizations(gomock.Any()).
UsersAPIListOrganizations(gomock.Any(), gomock.Any()).
Return(&http.Response{StatusCode: 200, Body: body, Header: map[string][]string{"Content-Type": {"json"}}}, nil)

state := terraform.NewInstanceStateShimmedFromValue(cty.ObjectVal(map[string]cty.Value{}), 0)
Expand Down
2 changes: 1 addition & 1 deletion castai/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
)

type ProviderConfig struct {
api *sdk.ClientWithResponses
api sdk.ClientWithResponsesInterface
}

func Provider(version string) *schema.Provider {
Expand Down
6 changes: 3 additions & 3 deletions castai/resource_aks_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func resourceCastaiAKSClusterCreate(ctx context.Context, data *schema.ResourceDa
log.Printf("[INFO] Registering new external AKS cluster: %#v", req)

resp, err := client.ExternalClusterAPIRegisterClusterWithResponse(ctx, req)
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return diag.FromErr(checkErr)
}

Expand Down Expand Up @@ -186,7 +186,7 @@ func resourceCastaiAKSClusterUpdate(ctx context.Context, data *schema.ResourceDa
return resourceCastaiAKSClusterRead(ctx, data, meta)
}

func updateAKSClusterSettings(ctx context.Context, data *schema.ResourceData, client *sdk.ClientWithResponses) error {
func updateAKSClusterSettings(ctx context.Context, data *schema.ResourceData, client sdk.ClientWithResponsesInterface) error {
if !data.HasChanges(
FieldAKSClusterClientID,
FieldAKSClusterClientSecret,
Expand Down Expand Up @@ -217,7 +217,7 @@ func updateAKSClusterSettings(ctx context.Context, data *schema.ResourceData, cl
b := backoff.WithContext(backoff.WithMaxRetries(backoff.NewConstantBackOff(10*time.Second), 30), ctx)
if err = backoff.Retry(func() error {
response, err := client.ExternalClusterAPIUpdateClusterWithResponse(ctx, data.Id(), req)
return sdk.CheckOKResponse(response, err)
return sdk.CheckOKResponse(response.HTTPResponse, err)
}, b); err != nil {
return fmt.Errorf("updating cluster configuration: %w", err)
}
Expand Down
12 changes: 6 additions & 6 deletions castai/resource_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,18 +109,18 @@ func resourceCastaiAutoscalerUpdate(ctx context.Context, data *schema.ResourceDa
return nil
}

func getCurrentPolicies(ctx context.Context, client *sdk.ClientWithResponses, clusterId string) ([]byte, error) {
func getCurrentPolicies(ctx context.Context, client sdk.ClientWithResponsesInterface, clusterId string) ([]byte, error) {
log.Printf("[INFO] Getting cluster autoscaler information.")

resp, err := client.PoliciesAPIGetClusterPolicies(ctx, clusterId)
resp, err := client.PoliciesAPIGetClusterPoliciesWithResponse(ctx, clusterId)
if err != nil {
return nil, err
} else if resp.StatusCode == http.StatusNotFound {
} else if resp.StatusCode() == http.StatusNotFound {
return nil, fmt.Errorf("cluster %s policies do not exist at CAST AI", clusterId)
}

bytes, err := io.ReadAll(resp.Body)
defer resp.Body.Close()
bytes, err := io.ReadAll(resp.HTTPResponse.Body)
defer resp.HTTPResponse.Body.Close()
if err != nil {
return nil, fmt.Errorf("reading response body: %w", err)
}
Expand Down Expand Up @@ -161,7 +161,7 @@ func upsertPolicies(ctx context.Context, meta interface{}, clusterId string, cha
client := meta.(*ProviderConfig).api

resp, err := client.PoliciesAPIUpsertClusterPoliciesWithBodyWithResponse(ctx, clusterId, "application/json", bytes.NewReader([]byte(changedPoliciesJSON)))
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return checkErr
}

Expand Down
88 changes: 55 additions & 33 deletions castai/resource_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
)

func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {
currentPolicies := `
currentPoliciesJSON := `
{
"enabled": true,
"isScopedMode": false,
Expand Down Expand Up @@ -77,7 +77,7 @@ func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {
// 3. enable spot backups
// 4. change spot cloud to aws - just to test if we can do change on arrays
// 5. enable the spot interruption predictions
policyChanges := `{
policyChangesJSON := `{
"isScopedMode":true,
"unschedulablePods": {
"nodeConstraints": {
Expand All @@ -97,7 +97,7 @@ func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {
}
}`

updatedPolicies := `
updatedPoliciesJSON := `
{
"enabled": true,
"isScopedMode": true,
Expand Down Expand Up @@ -154,35 +154,43 @@ func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {

r := require.New(t)
mockctrl := gomock.NewController(t)
mockClient := mock_sdk.NewMockClientInterface(mockctrl)
mockClient := mock_sdk.NewMockClientWithResponsesInterface(mockctrl)

ctx := context.Background()
provider := &ProviderConfig{
api: &sdk.ClientWithResponses{
ClientInterface: mockClient,
},
api: mockClient,
}

resource := resourceAutoscaler()

clusterId := "cluster_id"
val := cty.ObjectVal(map[string]cty.Value{
FieldAutoscalerPoliciesJSON: cty.StringVal(policyChanges),
FieldAutoscalerPoliciesJSON: cty.StringVal(policyChangesJSON),
FieldClusterId: cty.StringVal(clusterId),
})
state := terraform.NewInstanceStateShimmedFromValue(val, 0)
data := resource.Data(state)

body := io.NopCloser(bytes.NewReader([]byte(currentPolicies)))
response := &http.Response{StatusCode: 200, Body: body}

policiesUpdated := false

mockClient.EXPECT().PoliciesAPIGetClusterPolicies(gomock.Any(), clusterId, gomock.Any()).Return(response, nil).Times(1)
mockClient.EXPECT().PoliciesAPIUpsertClusterPoliciesWithBody(gomock.Any(), clusterId, "application/json", gomock.Any()).
DoAndReturn(func(ctx context.Context, clusterId string, contentType string, body io.Reader) (*http.Response, error) {
var currentPolicies sdk.PoliciesV1Policies
err := json.Unmarshal([]byte(currentPoliciesJSON), &currentPolicies)
r.NoError(err)

responsePayload := &sdk.PoliciesAPIGetClusterPoliciesResponse{
JSON200: &currentPolicies,
Body: []byte(currentPoliciesJSON),
HTTPResponse: &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewReader([]byte(currentPoliciesJSON))),
},
}

mockClient.EXPECT().PoliciesAPIGetClusterPoliciesWithResponse(gomock.Any(), clusterId, gomock.Any()).Return(responsePayload, nil).Times(1)
mockClient.EXPECT().PoliciesAPIUpsertClusterPoliciesWithBodyWithResponse(gomock.Any(), clusterId, "application/json", gomock.Any()).
DoAndReturn(func(ctx context.Context, clusterId string, contentType string, body io.Reader) (*sdk.PoliciesAPIUpsertClusterPoliciesResponse, error) {
got, _ := io.ReadAll(body)
expected := []byte(updatedPolicies)
expected := []byte(updatedPoliciesJSON)

eq, err := JSONBytesEqual(got, expected)
r.NoError(err)
Expand All @@ -191,9 +199,11 @@ func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {

policiesUpdated = true

return &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewReader([]byte(""))),
return &sdk.PoliciesAPIUpsertClusterPoliciesResponse{
HTTPResponse: &http.Response{
StatusCode: http.StatusOK,
},
Body: []byte(""),
}, nil
}).Times(1)

Expand All @@ -203,7 +213,7 @@ func TestAutoscalerResource_PoliciesUpdateAction(t *testing.T) {
}

func TestAutoscalerResource_PoliciesUpdateAction_Fail(t *testing.T) {
currentPolicies := `
currentPoliciesJSON := `
{
"enabled": true,
"isScopedMode": false,
Expand Down Expand Up @@ -254,7 +264,7 @@ func TestAutoscalerResource_PoliciesUpdateAction_Fail(t *testing.T) {
}
}`

policyChanges := `{
policyChangesJSON := `{
"isScopedMode":true,
"unschedulablePods": {
"nodeConstraints": {
Expand All @@ -272,34 +282,46 @@ func TestAutoscalerResource_PoliciesUpdateAction_Fail(t *testing.T) {

r := require.New(t)
mockctrl := gomock.NewController(t)
mockClient := mock_sdk.NewMockClientInterface(mockctrl)
mockClient := mock_sdk.NewMockClientWithResponsesInterface(mockctrl)

ctx := context.Background()
provider := &ProviderConfig{
api: &sdk.ClientWithResponses{
ClientInterface: mockClient,
},
api: mockClient,
}

resource := resourceAutoscaler()

clusterId := "cluster_id"
val := cty.ObjectVal(map[string]cty.Value{
FieldAutoscalerPoliciesJSON: cty.StringVal(policyChanges),
FieldAutoscalerPoliciesJSON: cty.StringVal(policyChangesJSON),
FieldClusterId: cty.StringVal(clusterId),
})
state := terraform.NewInstanceStateShimmedFromValue(val, 0)
data := resource.Data(state)

body := io.NopCloser(bytes.NewReader([]byte(currentPolicies)))
response := &http.Response{StatusCode: 200, Body: body}
var currentPolicies sdk.PoliciesV1Policies
err := json.Unmarshal([]byte(currentPoliciesJSON), &currentPolicies)
r.NoError(err)

responsePayload := &sdk.PoliciesAPIGetClusterPoliciesResponse{
JSON200: &currentPolicies,
Body: []byte(currentPoliciesJSON),
HTTPResponse: &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewReader([]byte(currentPoliciesJSON))),
},
}

mockClient.EXPECT().PoliciesAPIGetClusterPoliciesWithResponse(gomock.Any(), clusterId, gomock.Any()).Return(responsePayload, nil).Times(1)
mockClient.EXPECT().PoliciesAPIUpsertClusterPoliciesWithBodyWithResponse(gomock.Any(), clusterId, "application/json", gomock.Any()).
DoAndReturn(func(ctx context.Context, clusterId string, contentType string, body io.Reader) (*sdk.PoliciesAPIUpsertClusterPoliciesResponse, error) {

mockClient.EXPECT().PoliciesAPIGetClusterPolicies(gomock.Any(), clusterId, gomock.Any()).Return(response, nil).Times(1)
mockClient.EXPECT().PoliciesAPIUpsertClusterPoliciesWithBody(gomock.Any(), clusterId, "application/json", gomock.Any()).
DoAndReturn(func(ctx context.Context, clusterId string, contentType string, body io.Reader) (*http.Response, error) {
return &http.Response{
StatusCode: 400,
Body: io.NopCloser(bytes.NewReader([]byte(`{"message":"policies config: Evictor policy management is not allowed: Evictor installed externally. Uninstall Evictor first and try again.","fieldViolations":[]`))),
return &sdk.PoliciesAPIUpsertClusterPoliciesResponse{
HTTPResponse: &http.Response{
StatusCode: http.StatusBadRequest,
Body: io.NopCloser(bytes.NewReader([]byte(`{"message":"policies config: Evictor policy management is not allowed: Evictor installed externally. Uninstall Evictor first and try again.","fieldViolations":[]`))),
},
Body: []byte(""),
}, nil
}).Times(1)

Expand Down
6 changes: 3 additions & 3 deletions castai/resource_eks_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func resourceCastaiEKSClusterCreate(ctx context.Context, data *schema.ResourceDa
log.Printf("[INFO] Registering new external cluster: %#v", req)

resp, err := client.ExternalClusterAPIRegisterClusterWithResponse(ctx, req)
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return diag.FromErr(checkErr)
}

Expand Down Expand Up @@ -174,7 +174,7 @@ func resourceCastaiEKSClusterUpdate(ctx context.Context, data *schema.ResourceDa
return resourceCastaiEKSClusterRead(ctx, data, meta)
}

func updateClusterSettings(ctx context.Context, data *schema.ResourceData, client *sdk.ClientWithResponses) error {
func updateClusterSettings(ctx context.Context, data *schema.ResourceData, client sdk.ClientWithResponsesInterface) error {
if !data.HasChanges(
FieldEKSClusterAssumeRoleArn,
FieldClusterCredentialsId,
Expand All @@ -199,7 +199,7 @@ func updateClusterSettings(ctx context.Context, data *schema.ResourceData, clien
if err != nil {
return err
}
err = sdk.StatusOk(response)
err = sdk.StatusOk(response.HTTPResponse)
// In case of malformed user request return error to user right away.
if response.StatusCode() == 400 {
return backoff.Permanent(err)
Expand Down
2 changes: 1 addition & 1 deletion castai/resource_eks_cluster_id.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func resourceEKSClusterIDCreate(ctx context.Context, data *schema.ResourceData,
}

resp, err := client.ExternalClusterAPIRegisterClusterWithResponse(ctx, req)
if checkErr := sdk.CheckOKResponse(resp, err); checkErr != nil {
if checkErr := sdk.CheckOKResponse(resp.HTTPResponse, err); checkErr != nil {
return diag.FromErr(checkErr)
}

Expand Down
Loading
Loading