Skip to content

Commit

Permalink
enrich GC logging messages
Browse files Browse the repository at this point in the history
when there is a lot of data to be GC, it
would be useful to have richer understanding
of what's happening via logs. This commit
changes the completed log messages to info
since this is something operators would want
to monitor under normal operations, and
also enriches the error message to understand
where is it stuck deleting
  • Loading branch information
vroldanbet committed Sep 29, 2023
1 parent 7005ec6 commit 4a3dc5d
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 10 deletions.
5 changes: 4 additions & 1 deletion internal/datastore/common/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,9 @@ func startGarbageCollectorWithMaxElapsedTime(ctx context.Context, gc GarbageColl

case <-time.After(nextInterval):
log.Ctx(ctx).Info().
Dur("interval", nextInterval).
Dur("window", window).
Dur("timeout", timeout).
Msg("running garbage collection worker")

err := RunGarbageCollection(gc, window, timeout)
Expand Down Expand Up @@ -178,7 +181,7 @@ func RunGarbageCollection(gc GarbageCollector, window, timeout time.Duration) er
}

collectionDuration := time.Since(startTime)
log.Ctx(ctx).Debug().
log.Ctx(ctx).Info().
Stringer("highestTxID", watermark).
Dur("duration", collectionDuration).
Time("nowTime", now).
Expand Down
17 changes: 9 additions & 8 deletions internal/datastore/postgres/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,12 @@ func (pgd *pgDatastore) TxIDBefore(ctx context.Context, before time.Time) (datas
return postgresRevision{snapshot}, nil
}

func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revision) (removed common.DeletionCounts, err error) {
func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revision) (common.DeletionCounts, error) {
revision := txID.(postgresRevision)

minTxAlive := newXid8(revision.snapshot.xmin)

removed := common.DeletionCounts{}
var err error
// Delete any relationship rows that were already dead when this transaction started
removed.Relationships, err = pgd.batchDelete(
ctx,
Expand All @@ -79,7 +80,7 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis
sq.Lt{colDeletedXid: minTxAlive},
)
if err != nil {
return
return removed, fmt.Errorf("failed to GC relationships table: %w", err)
}

// Delete all transaction rows with ID < the transaction ID.
Expand All @@ -93,7 +94,7 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis
sq.Lt{colXID: minTxAlive},
)
if err != nil {
return
return removed, fmt.Errorf("failed to GC transactions table: %w", err)
}

// Delete any namespace rows with deleted_transaction <= the transaction ID.
Expand All @@ -104,10 +105,10 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis
sq.Lt{colDeletedXid: minTxAlive},
)
if err != nil {
return
return removed, fmt.Errorf("failed to GC namespaces table: %w", err)
}

return
return removed, err
}

func (pgd *pgDatastore) batchDelete(
Expand All @@ -116,7 +117,7 @@ func (pgd *pgDatastore) batchDelete(
pkCols []string,
filter sqlFilter,
) (int64, error) {
sql, args, err := psql.Select(pkCols...).From(tableName).Where(filter).Limit(batchDeleteSize).ToSql()
sql, args, err := psql.Select(pkCols...).From(tableName).Where(filter).Limit(gcBatchDeleteSize).ToSql()
if err != nil {
return -1, err
}
Expand All @@ -137,7 +138,7 @@ func (pgd *pgDatastore) batchDelete(

rowsDeleted := cr.RowsAffected()
deletedCount += rowsDeleted
if rowsDeleted < batchDeleteSize {
if rowsDeleted < gcBatchDeleteSize {
break
}
}
Expand Down
2 changes: 1 addition & 1 deletion internal/datastore/postgres/postgres.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ const (

tracingDriverName = "postgres-tracing"

batchDeleteSize = 1000
gcBatchDeleteSize = 1000

livingTupleConstraint = "uq_relation_tuple_living_xid"
)
Expand Down

0 comments on commit 4a3dc5d

Please sign in to comment.