Skip to content

Commit

Permalink
APPS-1436 Add static fields validation in update config handler (#299)
Browse files Browse the repository at this point in the history
* use default for max size

* add validation

* add empty test

* add test for validation

* linter warning

* add lock to applier

* use regular mutex
  • Loading branch information
korotkov-aerospike authored Jan 1, 2025
1 parent 71e33b4 commit f575d4c
Show file tree
Hide file tree
Showing 8 changed files with 171 additions and 129 deletions.
11 changes: 11 additions & 0 deletions internal/server/handlers/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,17 @@ func (s *Service) updateConfig(w http.ResponseWriter, r *http.Request) {
return
}

// validate static fields.
oldConfig := dto.NewConfigFromModel(s.config)
if err := validation.ValidateStaticFieldChanges(oldConfig, newConfig); err != nil {
hLogger.Error("static configuration has changed",
slog.Any("error", err),
)
err := fmt.Errorf("static configuration has changed: %w", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}

newConfigModel, err := newConfig.ToModel(s.nsValidator)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
Expand Down
2 changes: 1 addition & 1 deletion internal/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func logWriter(config *model.LoggerConfig) io.Writer {
if config.FileWriter != nil {
fileWriter := &lumberjack.Logger{
Filename: config.FileWriter.Filename,
MaxSize: config.FileWriter.MaxSize,
MaxSize: config.FileWriter.GetMaxSizeOrDefault(),
MaxBackups: config.FileWriter.MaxBackups,
MaxAge: config.FileWriter.MaxAge,
Compress: config.FileWriter.Compress,
Expand Down
9 changes: 9 additions & 0 deletions pkg/dto/compare_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,15 @@ func TestLoggerConfig_Compare(t *testing.T) {
"Filename changed: app.log -> new.log",
},
},
{
name: "empty configs",
current: &LoggerConfig{
FileWriter: &FileLoggerConfig{},
},
other: &LoggerConfig{
FileWriter: &FileLoggerConfig{},
},
},
}

for _, tt := range tests {
Expand Down
258 changes: 134 additions & 124 deletions pkg/dto/convert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,161 +8,171 @@ import (
"github.com/aerospike/aerospike-backup-service/v3/pkg/util"
saClient "github.com/aerospike/backup-go/pkg/secret-agent"
"github.com/aws/smithy-go/ptr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestConfigModelConversionIsLossless(t *testing.T) {
// Step 1: Create a sample Config object with sample data
secretAgentConfig := SecretAgentConfig{
SecretAgentName: util.Ptr("agent1"),
}
var secretAgentConfig = SecretAgentConfig{
SecretAgentName: util.Ptr("agent1"),
}

originalConfig := &Config{
ServiceConfig: BackupServiceConfig{
HTTPServer: &HTTPServerConfig{
Address: ptr.String("localhost"),
var originalConfig = &Config{
ServiceConfig: BackupServiceConfig{
HTTPServer: &HTTPServerConfig{
Address: ptr.String("localhost"),
},
Logger: &LoggerConfig{
Level: util.Ptr("DEBUG"),
FileWriter: &FileLoggerConfig{
Filename: "log",
},
},
AerospikeClusters: map[string]*AerospikeCluster{
"cluster1": {
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
Credentials: &Credentials{
User: util.Ptr("tester"),
Password: util.Ptr("psw"),
SecretAgentConfig: secretAgentConfig,
},
},
AerospikeClusters: map[string]*AerospikeCluster{
"cluster1": {
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
Credentials: &Credentials{
User: util.Ptr("tester"),
Password: util.Ptr("psw"),
SecretAgentConfig: secretAgentConfig,
},
"cluster2": {
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
Credentials: &Credentials{
User: util.Ptr("tester"),
Password: util.Ptr("psw"),
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host2",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
"cluster2": {
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
Credentials: &Credentials{
User: util.Ptr("tester"),
Password: util.Ptr("psw"),
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host2",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
},
"cluster3": {
ClusterLabel: util.Ptr("No credentials"),
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
},
},
Storage: map[string]*Storage{
"aws 0": { // no secret agent
S3Storage: &S3Storage{
Bucket: "bucket",
S3Region: "region",
AccessKeyID: util.Ptr("id"),
SecretAccessKey: util.Ptr("key"),
},
"cluster3": {
ClusterLabel: util.Ptr("No credentials"),
SeedNodes: []SeedNode{{HostName: "host", Port: 80}},
},
},
Storage: map[string]*Storage{
"aws 0": { // no secret agent
S3Storage: &S3Storage{
Bucket: "bucket",
S3Region: "region",
AccessKeyID: util.Ptr("id"),
SecretAccessKey: util.Ptr("key"),
},
"aws 1": { // secret agent by name
S3Storage: &S3Storage{
Bucket: "bucket",
S3Region: "region",
AccessKeyID: util.Ptr("id"),
SecretAccessKey: util.Ptr("key"),
SecretAgentConfig: secretAgentConfig,
},
},
"aws 1": { // secret agent by name
S3Storage: &S3Storage{
Bucket: "bucket",
S3Region: "region",
AccessKeyID: util.Ptr("id"),
SecretAccessKey: util.Ptr("key"),
SecretAgentConfig: secretAgentConfig,
},
"aws 2": { // secret agent by full definition
S3Storage: &S3Storage{
Bucket: "bucket2",
S3Region: "region2",
AccessKeyID: util.Ptr("id2"),
SecretAccessKey: util.Ptr("key2"),
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host3",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
"aws 2": { // secret agent by full definition
S3Storage: &S3Storage{
Bucket: "bucket2",
S3Region: "region2",
AccessKeyID: util.Ptr("id2"),
SecretAccessKey: util.Ptr("key2"),
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host3",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
},
"gcp": {
GcpStorage: &GcpStorage{
KeyFile: "key-file",
BucketName: "bucket",
Path: "path",
Endpoint: "http://localhost",
SecretAgentConfig: secretAgentConfig,
},
},
"gcp": {
GcpStorage: &GcpStorage{
KeyFile: "key-file",
BucketName: "bucket",
Path: "path",
Endpoint: "http://localhost",
SecretAgentConfig: secretAgentConfig,
},
"gcp2": {
GcpStorage: &GcpStorage{
Key: "key-json",
BucketName: "bucket",
Path: "path",
Endpoint: "http://localhost",
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host3",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
"gcp2": {
GcpStorage: &GcpStorage{
Key: "key-json",
BucketName: "bucket",
Path: "path",
Endpoint: "http://localhost",
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host3",
ConnectionType: saClient.ConnectionTypeTCP,
},
},
},
"azure 1": {
AzureStorage: &AzureStorage{
SecretAgentConfig: secretAgentConfig,
Endpoint: "http://localhost",
ContainerName: "container",
Path: "backup",
AccountName: "hello",
AccountKey: "world",
TenantID: "",
ClientID: "",
ClientSecret: "",
},
},
"azure 1": {
AzureStorage: &AzureStorage{
SecretAgentConfig: secretAgentConfig,
Endpoint: "http://localhost",
ContainerName: "container",
Path: "backup",
AccountName: "hello",
AccountKey: "world",
TenantID: "",
ClientID: "",
ClientSecret: "",
},
"azure 2": {
AzureStorage: &AzureStorage{
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host4",
ConnectionType: saClient.ConnectionTypeUDS,
},
},
"azure 2": {
AzureStorage: &AzureStorage{
SecretAgentConfig: SecretAgentConfig{
SecretAgent: &SecretAgent{
Address: "host4",
ConnectionType: saClient.ConnectionTypeUDS,
},
Endpoint: "http://localhost",
ContainerName: "container",
Path: "backup",
AccountName: "",
AccountKey: "",
TenantID: "1",
ClientID: "2",
ClientSecret: "3",
},
Endpoint: "http://localhost",
ContainerName: "container",
Path: "backup",
AccountName: "",
AccountKey: "",
TenantID: "1",
ClientID: "2",
ClientSecret: "3",
},
},
BackupPolicies: map[string]*BackupPolicy{"policy1": {}},
BackupRoutines: map[string]*BackupRoutine{"routine1": {
BackupPolicy: "policy1",
SourceCluster: "cluster1",
Storage: "aws 1",
IntervalCron: "@daily",
}},
SecretAgents: map[string]*SecretAgent{"agent1": {
Address: "host",
ConnectionType: saClient.ConnectionTypeTCP,
}},
}
},
BackupPolicies: map[string]*BackupPolicy{"policy1": {}},
BackupRoutines: map[string]*BackupRoutine{"routine1": {
BackupPolicy: "policy1",
SourceCluster: "cluster1",
Storage: "aws 1",
IntervalCron: "@daily",
}},
SecretAgents: map[string]*SecretAgent{"agent1": {
Address: "host",
ConnectionType: saClient.ConnectionTypeTCP,
}},
}

require.NoError(t, originalConfig.validate())
indent, _ := json.MarshalIndent(originalConfig, "", " ")
t.Logf("\nOriginal config:\n%s\n", string(indent))
func TestConfigModelConversionIsLossless(t *testing.T) {
configJSON, _ := json.MarshalIndent(originalConfig, "", " ")
t.Logf("\nOriginal config:\n%s\n", string(configJSON))

// Step 2: Convert the Config to a model.Config
// Convert the Config to a model.Config
nsValidator := &aerospike.NoopNamespaceValidator{}
modelConfig, err := originalConfig.ToModel(nsValidator)
require.NoError(t, err, "toModel should not return an error")

// Step 3: Convert the model.Config back to a Config
// Convert the model.Config back to a Config
newConfig := NewConfigFromModel(modelConfig)

// Step 4: Assert that the new Config matches the original Config
assert.Equal(t, originalConfig, newConfig, "Config -> model.Config -> Config should be lossless")
// Assert that the new Config matches the original Config
require.Equal(t, originalConfig, newConfig, "Config -> model.Config -> Config should be lossless")
}

func TestConfigValidation(t *testing.T) {
configJSON, _ := json.Marshal(originalConfig)
require.NoError(t, originalConfig.validate())
configJSONAfter, _ := json.Marshal(originalConfig)
require.Equal(t, string(configJSON), string(configJSONAfter), "validation should not change the config")
}
3 changes: 0 additions & 3 deletions pkg/dto/logger_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,6 @@ func (f *FileLoggerConfig) Validate() error {
if f.MaxSize < 0 {
return fmt.Errorf("negative logger MaxSize: %d", f.MaxSize)
}
if f.MaxSize == 0 {
f.MaxSize = 100 // set default in Mb
}
if f.MaxAge < 0 {
return fmt.Errorf("negative logger MaxAge: %d", f.MaxAge)
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/model/config_default.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ var defaultConfig = struct {
Level: util.Ptr("DEBUG"),
Format: util.Ptr("PLAIN"),
StdoutWriter: util.Ptr(true),
FileWriter: &FileLoggerConfig{
MaxSize: 0,
},
},
backupPolicy: BackupPolicy{
RetryPolicy: &models.RetryPolicy{
Expand Down
8 changes: 8 additions & 0 deletions pkg/model/logger_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,11 @@ type FileLoggerConfig struct {
// using gzip. The default is not to perform compression.
Compress bool
}

func (f *FileLoggerConfig) GetMaxSizeOrDefault() int {
if f.MaxSize != 0 {
return f.MaxSize
}

return defaultConfig.logger.FileWriter.MaxSize
}
6 changes: 5 additions & 1 deletion pkg/service/config_applier.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ type ConfigApplier interface {
}

type DefaultConfigApplier struct {
mu sync.Mutex
scheduler quartz.Scheduler
backends BackendsHolder
clientManager aerospike.ClientManager
Expand All @@ -39,6 +40,9 @@ func NewDefaultConfigApplier(
}

func (a *DefaultConfigApplier) ApplyNewRoutines(ctx context.Context, routines map[string]*model.BackupRoutine) error {
a.mu.Lock()
defer a.mu.Unlock()

err := a.clearPeriodicSchedulerJobs()
if err != nil {
return fmt.Errorf("failed to clear periodic jobs: %w", err)
Expand Down Expand Up @@ -71,7 +75,7 @@ func (a *DefaultConfigApplier) clearPeriodicSchedulerJobs() error {
for _, key := range keys {
err = a.scheduler.DeleteJob(key)
if err != nil {
return fmt.Errorf("cannot delete job: %w", err)
return fmt.Errorf("cannot delete job %q: %w", key, err)
}
}
return nil
Expand Down

0 comments on commit f575d4c

Please sign in to comment.