From 528e838a1a309f4bf0f0e4a1816cc5cfbf44794c Mon Sep 17 00:00:00 2001 From: Andrey Borodin Date: Mon, 18 Mar 2024 11:21:34 +0500 Subject: [PATCH 1/3] Skip archive_command check on standby (#1662) WAL archival may be configured on primary. --- internal/databases/postgres/connect.go | 31 +++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/internal/databases/postgres/connect.go b/internal/databases/postgres/connect.go index 967ac8bae..f1cb1a1e4 100644 --- a/internal/databases/postgres/connect.go +++ b/internal/databases/postgres/connect.go @@ -41,13 +41,35 @@ func Connect(configOptions ...func(config *pgx.ConnConfig) error) (*pgx.Conn, er } } - var archiveMode string + err = checkArchiveCommand(conn) + if err != nil { + return nil, err + } + return conn, nil +} + +func checkArchiveCommand(conn *pgx.Conn) error { // TODO: Move this logic to queryRunner + + var standby bool + + err := conn.QueryRow("select pg_is_in_recovery()").Scan(&standby) + if err != nil { + return errors.Wrap(err, "Connect: postgres standby test failed") + } + + if standby { + // archive_mode may be configured on primary + return nil + } + + var archiveMode string + err = conn.QueryRow("show archive_mode").Scan(&archiveMode) if err != nil { - return nil, errors.Wrap(err, "Connect: postgres archive_mode test failed") + return errors.Wrap(err, "Connect: postgres archive_mode test failed") } if archiveMode != "on" && archiveMode != "always" { @@ -60,7 +82,7 @@ func Connect(configOptions ...func(config *pgx.ConnConfig) error) (*pgx.Conn, er err = conn.QueryRow("show archive_command").Scan(&archiveCommand) if err != nil { - return nil, errors.Wrap(err, "Connect: postgres archive_mode test failed") + return errors.Wrap(err, "Connect: postgres archive_mode test failed") } if len(archiveCommand) == 0 || archiveCommand == "(disabled)" { @@ -69,8 +91,7 @@ func Connect(configOptions ...func(config *pgx.ConnConfig) error) (*pgx.Conn, er " Please consider configuring WAL archiving.") } } - - return conn, nil + return nil } // nolint:gocritic From 8270f7772cad2aaed7b21f21081a88117b69a059 Mon Sep 17 00:00:00 2001 From: Stepan Filippov <43007025+debebantur@users.noreply.github.com> Date: Tue, 19 Mar 2024 18:39:55 +0500 Subject: [PATCH 2/3] fixes for ao check config (#1664) * fixes for ao check config * fixed style * fix linters --- cmd/gp/check_ao_length.go | 5 ++++- .../greenplum/ao_check_length_handler.go | 21 ++++++++++--------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/cmd/gp/check_ao_length.go b/cmd/gp/check_ao_length.go index 9aedb5988..a182db7f9 100644 --- a/cmd/gp/check_ao_length.go +++ b/cmd/gp/check_ao_length.go @@ -2,8 +2,10 @@ package gp import ( "github.com/spf13/cobra" + "github.com/spf13/viper" "github.com/wal-g/tracelog" "github.com/wal-g/wal-g/internal" + conf "github.com/wal-g/wal-g/internal/config" "github.com/wal-g/wal-g/internal/databases/greenplum" ) @@ -24,7 +26,8 @@ var checkAOTableLengthMasterCmd = &cobra.Command{ } func init() { - checkAOTableLengthMasterCmd.PersistentFlags().StringVarP(&logsDir, "logs", "l", "/var/log/greenplum", `directory to store logs`) + checkAOTableLengthMasterCmd.PersistentFlags().StringVarP(&logsDir, "logs", "l", viper.GetString(conf.GPLogsDirectory), + "directory to store logs") checkAOTableLengthMasterCmd.PersistentFlags().BoolVar(&runBackupCheck, "check-backup", false, "if the flag is set, checks backup`s length") checkAOTableLengthMasterCmd.PersistentFlags().StringVarP(&name, "backup-name", "n", internal.LatestString, diff --git a/internal/databases/greenplum/ao_check_length_handler.go b/internal/databases/greenplum/ao_check_length_handler.go index bff2c6a89..60f621c4b 100644 --- a/internal/databases/greenplum/ao_check_length_handler.go +++ b/internal/databases/greenplum/ao_check_length_handler.go @@ -13,7 +13,6 @@ import ( ) type AOLengthCheckHandler struct { - logsDir string checkBackup bool backupName string } @@ -21,7 +20,6 @@ type AOLengthCheckHandler struct { func NewAOLengthCheckHandler(logsDir string, checkBackup bool, backupName string) (*AOLengthCheckHandler, error) { initGpLog(logsDir) return &AOLengthCheckHandler{ - logsDir: logsDir, checkBackup: checkBackup, backupName: backupName, }, nil @@ -44,15 +42,18 @@ func (checker *AOLengthCheckHandler) CheckAOTableLength() { tracelog.ErrorLogger.FatalfOnError("could not get cluster info %v", err) } - segmentsBaccups, err := getSegmentBackupNames(checker.backupName) - if err != nil { - tracelog.ErrorLogger.FatalfOnError("could not get segment`s backups %v", err) + segmentsBackups := make(map[int]string) + if checker.checkBackup { + segmentsBackups, err = getSegmentBackupNames(checker.backupName) + if err != nil { + tracelog.ErrorLogger.FatalfOnError("could not get segment`s backups %v", err) + } } remoteOutput := globalCluster.GenerateAndExecuteCommand("Run ao/aocs length check", cluster.ON_SEGMENTS, func(contentID int) string { - return checker.buildCheckAOLengthCmd(contentID, segmentsBaccups[contentID], globalCluster) + return checker.buildCheckAOLengthCmd(contentID, segmentsBackups, globalCluster) }) for _, command := range remoteOutput.Commands { @@ -68,16 +69,16 @@ func (checker *AOLengthCheckHandler) CheckAOTableLength() { } } -func (checker *AOLengthCheckHandler) buildCheckAOLengthCmd(contentID int, backupName string, globalCluster *cluster.Cluster) string { +func (checker *AOLengthCheckHandler) buildCheckAOLengthCmd(contentID int, backupNames map[int]string, + globalCluster *cluster.Cluster) string { segment := globalCluster.ByContent[contentID][0] - runCheckArgs := []string{ fmt.Sprintf("--port=%d", segment.Port), fmt.Sprintf("--segnum=%d", segment.ContentID), } if checker.checkBackup { - runCheckArgs = append(runCheckArgs, "--check-backup", fmt.Sprintf("--backup-name=%s", backupName)) + runCheckArgs = append(runCheckArgs, "--check-backup", fmt.Sprintf("--backup-name=%s", backupNames[contentID])) } runCheckArgsLine := strings.Join(runCheckArgs, " ") @@ -89,7 +90,7 @@ func (checker *AOLengthCheckHandler) buildCheckAOLengthCmd(contentID int, backup fmt.Sprintf("--config=%s", conf.CfgFile), // method "check-ao-aocs-length-segment", - // actual arguments to be passed to the backup-push command + // actual arguments to be passed to the check-ao command runCheckArgsLine, // forward stdout and stderr to the log file "&>>", formatSegmentLogPath(contentID), From b77e6375819caff68ca8b46352588ec309ccb333 Mon Sep 17 00:00:00 2001 From: Stepan Filippov <43007025+debebantur@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:04:24 +0500 Subject: [PATCH 3/3] Fix for GP incremental backups (#1666) * adding ao files * style fixes * added test --- .../greenplum/ao_storage_uploader.go | 6 ++ .../greenplum/ao_storage_uploader_test.go | 58 ++++++++++++++++--- 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/internal/databases/greenplum/ao_storage_uploader.go b/internal/databases/greenplum/ao_storage_uploader.go index 92d5da2d8..c49f7cdf4 100644 --- a/internal/databases/greenplum/ao_storage_uploader.go +++ b/internal/databases/greenplum/ao_storage_uploader.go @@ -76,6 +76,12 @@ func (u *AoStorageUploader) addFile(cfi *internal.ComposeFileInfo, aoMeta AoRelF return u.regularAoUpload(cfi, aoMeta, location) } + if !u.isIncremental && remoteFile.IsIncremented { + tracelog.DebugLogger.Printf("%s: backup isIncremental: %t, remote file isIncremented: %t, will perform a regular upload", + cfi.Header.Name, u.isIncremental, remoteFile.IsIncremented) + return u.regularAoUpload(cfi, aoMeta, location) + } + if aoMeta.modCount != remoteFile.ModCount { if !u.isIncremental || aoMeta.modCount == 0 { tracelog.DebugLogger.Printf("%s: isIncremental: %t, modCount: %d, will perform a regular upload", diff --git a/internal/databases/greenplum/ao_storage_uploader_test.go b/internal/databases/greenplum/ao_storage_uploader_test.go index 31566980f..7bf117dc2 100644 --- a/internal/databases/greenplum/ao_storage_uploader_test.go +++ b/internal/databases/greenplum/ao_storage_uploader_test.go @@ -102,7 +102,7 @@ func TestRegularAoUpload(t *testing.T) { ModCount: 4, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) } func TestAoUpload_MaxAge(t *testing.T) { @@ -176,7 +176,7 @@ func TestAoUpload_MaxAge(t *testing.T) { ModCount: 5, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) } func TestIncrementalAoUpload(t *testing.T) { @@ -256,7 +256,7 @@ func TestIncrementalAoUpload(t *testing.T) { ModCount: 5, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) } func TestIncrementalAoUpload_EqualEof_DifferentModCount(t *testing.T) { @@ -298,7 +298,7 @@ func TestIncrementalAoUpload_EqualEof_DifferentModCount(t *testing.T) { ModCount: 5, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) } func TestIncrementalAoUpload_DifferentEof_EqualModCount(t *testing.T) { @@ -340,7 +340,49 @@ func TestIncrementalAoUpload_DifferentEof_EqualModCount(t *testing.T) { ModCount: 4, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) +} + +func TestIncrementalAoUpload_FullAfterDelta(t *testing.T) { + baseFiles := greenplum.BackupAOFiles{ + "1663.1": { + StoragePath: "1009_13_md5summock_1663_1_4_test_D_aoseg", + IsSkipped: false, + IsIncremented: true, + MTime: time.Now(), + StorageType: greenplum.ColumnOriented, + EOF: 70, + ModCount: 4, + Compressor: "", + FileMode: 420, + InitialUploadTS: time.Now(), + }, + } + bundleFiles := &internal.RegularBundleFiles{} + testFiles := map[string]TestFileInfo{ + "1663.1": { + AoRelFileMetadata: greenplum.NewAoRelFileMetadata("md5summock", greenplum.ColumnOriented, 70, 4), + BlockLocation: walparser.BlockLocation{ + RelationFileNode: walparser.RelFileNode{ + SpcNode: 1009, + DBNode: 13, + RelNode: 1663, + }, + BlockNo: 1, + }, + }, + } + expectedResults := map[string]ExpectedResult{ + "1663.1": { + StoragePath: "1009_13_md5summock_1663_1_4_test_aoseg", + IsSkipped: false, + IsIncremented: false, + StorageType: greenplum.ColumnOriented, + EOF: 70, + ModCount: 4, + }, + } + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, false) } func TestAoUpload_SkippedFile(t *testing.T) { @@ -382,7 +424,7 @@ func TestAoUpload_SkippedFile(t *testing.T) { ModCount: 4, }, } - runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit) + runSingleTest(t, baseFiles, bundleFiles, testFiles, expectedResults, deduplicationAgeLimit, true) } func TestAoUpload_NotExistFile(t *testing.T) { @@ -429,8 +471,8 @@ func TestAoUpload_NotExistFile(t *testing.T) { func runSingleTest(t *testing.T, baseFiles greenplum.BackupAOFiles, bundleFiles *internal.RegularBundleFiles, testFiles map[string]TestFileInfo, expectedResults map[string]ExpectedResult, - deduplicationAgeLimit time.Duration) { - uploader := newAoStorageUploader(baseFiles, bundleFiles, true, deduplicationAgeLimit) + deduplicationAgeLimit time.Duration, isAploaderIncremental bool) { + uploader := newAoStorageUploader(baseFiles, bundleFiles, isAploaderIncremental, deduplicationAgeLimit) testDir, testFiles := generateData("data", testFiles, t) defer os.RemoveAll(testDir)