diff --git a/e2e/go.mod b/e2e/go.mod index 2656aa95a3..c600153eef 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -5,7 +5,7 @@ go 1.21 toolchain go1.21.1 require ( - github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d + github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7 github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403 github.com/authzed/spicedb v1.23.1 github.com/brianvoe/gofakeit/v6 v6.23.0 diff --git a/e2e/go.sum b/e2e/go.sum index ae3680feac..c765d488a2 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -7,8 +7,8 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d h1:wSt3hHgOOS2vFvRZC0r5hzr9BijmHn9El7pRs88ublI= -github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d/go.mod h1:9Pl5jDQJHrjbMDuCrsa+Q6Tqmi1f2pDdIn/qNGI++vA= +github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7 h1:2sKwbLnVbfe0LkPuWkpntlXRby2bSjkIb7aotZUQQ7I= +github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7/go.mod h1:9Pl5jDQJHrjbMDuCrsa+Q6Tqmi1f2pDdIn/qNGI++vA= github.com/authzed/cel-go v0.17.5 h1:lfpkNrR99B5QRHg5qdG9oLu/kguVlZC68VJuMk8tH9Y= github.com/authzed/cel-go v0.17.5/go.mod h1:XL/zEq5hKGVF8aOdMbG7w+BQPihLjY2W8N+UIygDA2I= github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403 h1:bQeIwWWRI9bl93poTqpix4sYHi+gnXUPK7N6bMtXzBE= diff --git a/go.mod b/go.mod index 1302b14509..fbf4df0200 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( cloud.google.com/go/spanner v1.47.0 github.com/IBM/pgxpoolprometheus v1.1.1 github.com/Masterminds/squirrel v1.5.4 - github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d + github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7 github.com/authzed/cel-go v0.17.5 github.com/authzed/consistent v0.1.0 github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403 diff --git a/go.sum b/go.sum index af13a2500d..963e9f8e86 100644 --- a/go.sum +++ b/go.sum @@ -453,8 +453,8 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d h1:wSt3hHgOOS2vFvRZC0r5hzr9BijmHn9El7pRs88ublI= -github.com/authzed/authzed-go v0.9.1-0.20230830212047-e1e7da6e877d/go.mod h1:9Pl5jDQJHrjbMDuCrsa+Q6Tqmi1f2pDdIn/qNGI++vA= +github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7 h1:2sKwbLnVbfe0LkPuWkpntlXRby2bSjkIb7aotZUQQ7I= +github.com/authzed/authzed-go v0.10.1-0.20231003161414-9c9116f212b7/go.mod h1:9Pl5jDQJHrjbMDuCrsa+Q6Tqmi1f2pDdIn/qNGI++vA= github.com/authzed/cel-go v0.17.5 h1:lfpkNrR99B5QRHg5qdG9oLu/kguVlZC68VJuMk8tH9Y= github.com/authzed/cel-go v0.17.5/go.mod h1:XL/zEq5hKGVF8aOdMbG7w+BQPihLjY2W8N+UIygDA2I= github.com/authzed/consistent v0.1.0 h1:tlh1wvKoRbjRhMm2P+X5WQQyR54SRoS4MyjLOg17Mp8= diff --git a/internal/datastore/common/errors.go b/internal/datastore/common/errors.go index 430b7f2daa..402e99ae85 100644 --- a/internal/datastore/common/errors.go +++ b/internal/datastore/common/errors.go @@ -13,6 +13,32 @@ import ( "github.com/authzed/spicedb/pkg/tuple" ) +// SerializationError is returned when there's been a serialization +// error while performing a datastore operation +type SerializationError struct { + error +} + +func (err SerializationError) GRPCStatus() *status.Status { + return spiceerrors.WithCodeAndDetails( + err, + codes.Aborted, + spiceerrors.ForReason( + v1.ErrorReason_ERROR_REASON_SERIALIZATION_FAILURE, + map[string]string{}, + ), + ) +} + +func (err SerializationError) Unwrap() error { + return err.error +} + +// NewSerializationError creates a new SerializationError +func NewSerializationError(err error) error { + return SerializationError{err} +} + // CreateRelationshipExistsError is an error returned when attempting to CREATE an already-existing // relationship. type CreateRelationshipExistsError struct { diff --git a/internal/datastore/common/gc.go b/internal/datastore/common/gc.go index 6413852a24..a4be724c2d 100644 --- a/internal/datastore/common/gc.go +++ b/internal/datastore/common/gc.go @@ -123,6 +123,9 @@ func startGarbageCollectorWithMaxElapsedTime(ctx context.Context, gc GarbageColl case <-time.After(nextInterval): log.Ctx(ctx).Info(). + Dur("interval", nextInterval). + Dur("window", window). + Dur("timeout", timeout). Msg("running garbage collection worker") err := RunGarbageCollection(gc, window, timeout) @@ -178,7 +181,7 @@ func RunGarbageCollection(gc GarbageCollector, window, timeout time.Duration) er } collectionDuration := time.Since(startTime) - log.Ctx(ctx).Debug(). + log.Ctx(ctx).Info(). Stringer("highestTxID", watermark). Dur("duration", collectionDuration). Time("nowTime", now). diff --git a/internal/datastore/crdb/pool/pool.go b/internal/datastore/crdb/pool/pool.go index a2c179357e..443a681166 100644 --- a/internal/datastore/crdb/pool/pool.go +++ b/internal/datastore/crdb/pool/pool.go @@ -8,13 +8,13 @@ import ( "sync" "time" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "golang.org/x/time/rate" + "github.com/authzed/spicedb/internal/datastore/postgres/common" log "github.com/authzed/spicedb/internal/logging" ) @@ -291,7 +291,7 @@ func (p *RetryPool) withRetries(ctx context.Context, fn func(conn *pgxpool.Conn) p.healthTracker.SetNodeHealth(nodeID, false) } - SleepOnErr(ctx, err, retries) + common.SleepOnErr(ctx, err, retries) conn, err = p.acquireFromDifferentNode(ctx, nodeID) if err != nil { @@ -301,7 +301,7 @@ func (p *RetryPool) withRetries(ctx context.Context, fn func(conn *pgxpool.Conn) } if errors.As(err, &retryable) { log.Ctx(ctx).Info().Err(err).Uint8("retries", retries).Msg("retryable error") - SleepOnErr(ctx, err, retries) + common.SleepOnErr(ctx, err, retries) continue } conn.Release() @@ -323,13 +323,6 @@ func (p *RetryPool) GC(conn *pgx.Conn) { delete(p.nodeForConn, conn) } -// SleepOnErr sleeps for a short period of time after an error has occurred. -func SleepOnErr(ctx context.Context, err error, retries uint8) { - after := retry.BackoffExponentialWithJitter(100*time.Millisecond, 0.5)(ctx, uint(retries+1)) // add one so we always wait at least a little bit - log.Ctx(ctx).Warn().Err(err).Dur("after", after).Msg("retrying on database error") - time.Sleep(after) -} - func (p *RetryPool) acquireFromDifferentNode(ctx context.Context, nodeID uint32) (*pgxpool.Conn, error) { log.Ctx(ctx).Info().Uint32("node_id", nodeID).Msg("acquiring a connection from a different node") for { diff --git a/internal/datastore/crdb/watch.go b/internal/datastore/crdb/watch.go index 51f3b9526b..25c03e1955 100644 --- a/internal/datastore/crdb/watch.go +++ b/internal/datastore/crdb/watch.go @@ -15,6 +15,7 @@ import ( "github.com/authzed/spicedb/internal/datastore/common" "github.com/authzed/spicedb/internal/datastore/crdb/pool" + pgxcommon "github.com/authzed/spicedb/internal/datastore/postgres/common" "github.com/authzed/spicedb/pkg/datastore" core "github.com/authzed/spicedb/pkg/proto/core/v1" "github.com/authzed/spicedb/pkg/spiceerrors" @@ -270,7 +271,7 @@ func (cds *crdbDatastore) WatchSchema(ctx context.Context, afterRevision datasto running = true // Sleep a bit for retrying. - pool.SleepOnErr(ctx, err, uint8(retryCount)) + pgxcommon.SleepOnErr(ctx, err, uint8(retryCount)) return } diff --git a/internal/datastore/postgres/common/errors.go b/internal/datastore/postgres/common/errors.go index 550fa685a5..f7ed9f41dc 100644 --- a/internal/datastore/postgres/common/errors.go +++ b/internal/datastore/postgres/common/errors.go @@ -14,6 +14,7 @@ import ( const ( pgUniqueConstraintViolation = "23505" pgSerializationFailure = "40001" + pgTransactionAborted = "25P02" ) var ( diff --git a/internal/datastore/postgres/common/pgx.go b/internal/datastore/postgres/common/pgx.go index 156b8d7af0..65baacc16e 100644 --- a/internal/datastore/postgres/common/pgx.go +++ b/internal/datastore/postgres/common/pgx.go @@ -8,6 +8,7 @@ import ( "time" "github.com/exaring/otelpgx" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry" zerologadapter "github.com/jackc/pgx-zerolog" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" @@ -87,16 +88,13 @@ func ConfigurePGXLogger(connConfig *pgx.ConnConfig) { level = tracelog.LogLevelDebug } - // do not log cancelled queries as errors - // do not log serialization failues as errors + truncateLargeSQL(data) + + // log cancellation and serialization errors at debug level if errArg, ok := data["err"]; ok { err, ok := errArg.(error) - if ok && errors.Is(err, context.Canceled) { - return - } - - var pgerr *pgconn.PgError - if errors.As(err, &pgerr) && pgerr.SQLState() == pgSerializationFailure { + if ok && (IsCancellationError(err) || IsSerializationError(err)) { + logger.Log(ctx, tracelog.LogLevelDebug, msg, data) return } } @@ -109,6 +107,57 @@ func ConfigurePGXLogger(connConfig *pgx.ConnConfig) { addTracer(connConfig, &tracelog.TraceLog{Logger: levelMappingFn(l), LogLevel: tracelog.LogLevelInfo}) } +// truncateLargeSQL takes arguments of a SQL statement provided via pgx's tracelog.LoggerFunc and +// replaces SQL statements and SQL arguments with placeholders when the statements and/or arguments +// exceed a certain length. This helps de-clutter logs when statements have hundreds to thousands of placeholders. +// The change is done in place. +func truncateLargeSQL(data map[string]any) { + const ( + maxSQLLen = 350 + maxSQLArgsLen = 50 + ) + + if sqlData, ok := data["sql"]; ok { + sqlString, ok := sqlData.(string) + if ok && len(sqlString) > maxSQLLen { + data["sql"] = sqlString[:maxSQLLen] + "..." + } + } + if argsData, ok := data["args"]; ok { + argsSlice, ok := argsData.([]any) + if ok && len(argsSlice) > maxSQLArgsLen { + data["args"] = argsSlice[:maxSQLArgsLen] + } + } +} + +// IsCancellationError determines if an error returned by pgx has been caused by context cancellation. +func IsCancellationError(err error) bool { + if errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || + err.Error() == "conn closed" { // conns are sometimes closed async upon cancellation + return true + } + return false +} + +func IsSerializationError(err error) bool { + var pgerr *pgconn.PgError + if errors.As(err, &pgerr) && + // We need to check unique constraint here because some versions of postgres have an error where + // unique constraint violations are raised instead of serialization errors. + // (e.g. https://www.postgresql.org/message-id/flat/CAGPCyEZG76zjv7S31v_xPeLNRuzj-m%3DY2GOY7PEzu7vhB%3DyQog%40mail.gmail.com) + (pgerr.SQLState() == pgSerializationFailure || pgerr.SQLState() == pgUniqueConstraintViolation || pgerr.SQLState() == pgTransactionAborted) { + return true + } + + if errors.Is(err, pgx.ErrTxCommitRollback) { + return true + } + + return false +} + // ConfigureOTELTracer adds OTEL tracing to a pgx.ConnConfig func ConfigureOTELTracer(connConfig *pgx.ConnConfig) { addTracer(connConfig, otelpgx.NewTracer(otelpgx.WithTrimSQLInSpanName())) @@ -233,3 +282,14 @@ func (t *QuerierFuncs) QueryRowFunc(ctx context.Context, rowFunc func(ctx contex func QuerierFuncsFor(d Querier) DBFuncQuerier { return &QuerierFuncs{d: d} } + +// SleepOnErr sleeps for a short period of time after an error has occurred. +func SleepOnErr(ctx context.Context, err error, retries uint8) { + after := retry.BackoffExponentialWithJitter(25*time.Millisecond, 0.5)(ctx, uint(retries+1)) // add one so we always wait at least a little bit + log.Ctx(ctx).Debug().Err(err).Dur("after", after).Uint8("retry", retries+1).Msg("retrying on database error") + + select { + case <-time.After(after): + case <-ctx.Done(): + } +} diff --git a/internal/datastore/postgres/gc.go b/internal/datastore/postgres/gc.go index e105f6bdf4..3f217a3aaa 100644 --- a/internal/datastore/postgres/gc.go +++ b/internal/datastore/postgres/gc.go @@ -66,11 +66,12 @@ func (pgd *pgDatastore) TxIDBefore(ctx context.Context, before time.Time) (datas return postgresRevision{snapshot}, nil } -func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revision) (removed common.DeletionCounts, err error) { +func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revision) (common.DeletionCounts, error) { revision := txID.(postgresRevision) minTxAlive := newXid8(revision.snapshot.xmin) - + removed := common.DeletionCounts{} + var err error // Delete any relationship rows that were already dead when this transaction started removed.Relationships, err = pgd.batchDelete( ctx, @@ -79,7 +80,7 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis sq.Lt{colDeletedXid: minTxAlive}, ) if err != nil { - return + return removed, fmt.Errorf("failed to GC relationships table: %w", err) } // Delete all transaction rows with ID < the transaction ID. @@ -93,7 +94,7 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis sq.Lt{colXID: minTxAlive}, ) if err != nil { - return + return removed, fmt.Errorf("failed to GC transactions table: %w", err) } // Delete any namespace rows with deleted_transaction <= the transaction ID. @@ -104,10 +105,10 @@ func (pgd *pgDatastore) DeleteBeforeTx(ctx context.Context, txID datastore.Revis sq.Lt{colDeletedXid: minTxAlive}, ) if err != nil { - return + return removed, fmt.Errorf("failed to GC namespaces table: %w", err) } - return + return removed, err } func (pgd *pgDatastore) batchDelete( @@ -116,7 +117,7 @@ func (pgd *pgDatastore) batchDelete( pkCols []string, filter sqlFilter, ) (int64, error) { - sql, args, err := psql.Select(pkCols...).From(tableName).Where(filter).Limit(batchDeleteSize).ToSql() + sql, args, err := psql.Select(pkCols...).From(tableName).Where(filter).Limit(gcBatchDeleteSize).ToSql() if err != nil { return -1, err } @@ -137,7 +138,7 @@ func (pgd *pgDatastore) batchDelete( rowsDeleted := cr.RowsAffected() deletedCount += rowsDeleted - if rowsDeleted < batchDeleteSize { + if rowsDeleted < gcBatchDeleteSize { break } } diff --git a/internal/datastore/postgres/migrations/zz_migration.0017_add_index_tuple_alive_by_resource_rel_subject.go b/internal/datastore/postgres/migrations/zz_migration.0017_add_index_tuple_alive_by_resource_rel_subject.go new file mode 100644 index 0000000000..56c352d87a --- /dev/null +++ b/internal/datastore/postgres/migrations/zz_migration.0017_add_index_tuple_alive_by_resource_rel_subject.go @@ -0,0 +1,27 @@ +package migrations + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5" +) + +const createAliveRelByResourceRelationSubjectIndex = `CREATE INDEX CONCURRENTLY + IF NOT EXISTS ix_relation_tuple_alive_by_resource_rel_subject_covering + ON relation_tuple (namespace, relation, userset_namespace) + INCLUDE (userset_object_id, userset_relation, caveat_name, caveat_context) + WHERE deleted_xid = '9223372036854775807'::xid8;` + +func init() { + if err := DatabaseMigrations.Register("add-rel-by-alive-resource-relation-subject", "add-tuned-gc-index", + func(ctx context.Context, conn *pgx.Conn) error { + if _, err := conn.Exec(ctx, createAliveRelByResourceRelationSubjectIndex); err != nil { + return fmt.Errorf("failed to create index for alive relationships by resource/relation/subject: %w", err) + } + return nil + }, + noTxMigration); err != nil { + panic("failed to register migration: " + err.Error()) + } +} diff --git a/internal/datastore/postgres/postgres.go b/internal/datastore/postgres/postgres.go index 08ae8bdbbe..5ed8917db9 100644 --- a/internal/datastore/postgres/postgres.go +++ b/internal/datastore/postgres/postgres.go @@ -69,10 +69,7 @@ const ( tracingDriverName = "postgres-tracing" - batchDeleteSize = 1000 - - pgSerializationFailure = "40001" - pgUniqueConstraintViolation = "23505" + gcBatchDeleteSize = 1000 livingTupleConstraint = "uq_relation_tuple_living_xid" ) @@ -328,7 +325,7 @@ func (pgd *pgDatastore) ReadWriteTx( for i := uint8(0); i <= pgd.maxRetries; i++ { var newXID xid8 var newSnapshot pgSnapshot - err = pgx.BeginTxFunc(ctx, pgd.writePool, pgx.TxOptions{IsoLevel: pgx.Serializable}, func(tx pgx.Tx) error { + err = wrapError(pgx.BeginTxFunc(ctx, pgd.writePool, pgx.TxOptions{IsoLevel: pgx.Serializable}, func(tx pgx.Tx) error { var err error newXID, newSnapshot, err = createNewTransaction(ctx, tx) if err != nil { @@ -351,22 +348,50 @@ func (pgd *pgDatastore) ReadWriteTx( } return fn(rwt) - }) + })) + if err != nil { if !config.DisableRetries && errorRetryable(err) { + pgxcommon.SleepOnErr(ctx, err, i) continue } + return datastore.NoRevision, err } + if i > 0 { + log.Debug().Uint8("retries", i).Msg("transaction succeeded after retry") + } + return postgresRevision{newSnapshot.markComplete(newXID.Uint64)}, nil } + if !config.DisableRetries { err = fmt.Errorf("max retries exceeded: %w", err) } + return datastore.NoRevision, err } +func wrapError(err error) error { + if pgxcommon.IsSerializationError(err) { + return common.NewSerializationError(err) + } + + // hack: pgx asyncClose usually happens after cancellation, + // but the reason for it being closed is not propagated + // and all we get is attempting to perform an operation + // on cancelled connection. This keeps the same error, + // but wrapped along a cancellation so that: + // - pgx logger does not log it + // - response is sent as canceled back to the client + if err != nil && err.Error() == "conn closed" { + return errors.Join(err, context.Canceled) + } + + return err +} + func (pgd *pgDatastore) Close() error { pgd.cancelGc() @@ -381,16 +406,20 @@ func (pgd *pgDatastore) Close() error { } func errorRetryable(err error) bool { - var pgerr *pgconn.PgError - if !errors.As(err, &pgerr) { - log.Debug().Err(err).Msg("couldn't determine a sqlstate error code") + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return false } - // We need to check unique constraint here because some versions of postgres have an error where - // unique constraint violations are raised instead of serialization errors. - // (e.g. https://www.postgresql.org/message-id/flat/CAGPCyEZG76zjv7S31v_xPeLNRuzj-m%3DY2GOY7PEzu7vhB%3DyQog%40mail.gmail.com) - return pgerr.SQLState() == pgSerializationFailure || pgerr.SQLState() == pgUniqueConstraintViolation + if pgconn.SafeToRetry(err) { + return true + } + + if pgxcommon.IsSerializationError(err) { + return true + } + + log.Warn().Err(err).Msg("unable to determine if pgx error is retryable") + return false } func (pgd *pgDatastore) ReadyState(ctx context.Context) (datastore.ReadyState, error) { diff --git a/internal/datastore/postgres/postgres_test.go b/internal/datastore/postgres/postgres_test.go index ad00fbde38..891073d026 100644 --- a/internal/datastore/postgres/postgres_test.go +++ b/internal/datastore/postgres/postgres_test.go @@ -34,6 +34,8 @@ import ( "github.com/authzed/spicedb/pkg/tuple" ) +const pgSerializationFailure = "40001" + // Implement the TestableDatastore interface func (pgd *pgDatastore) ExampleRetryableError() error { return &pgconn.PgError{ diff --git a/tools/analyzers/go.work.sum b/tools/analyzers/go.work.sum index 6e5bfc15f8..f6297e6d2a 100644 --- a/tools/analyzers/go.work.sum +++ b/tools/analyzers/go.work.sum @@ -1,3 +1,5 @@ +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.31.0-20230802163732-1c33ebd9ecfa.1 h1:tdpHgTbmbvEIARu+bixzmleMi14+3imnpoFXz+Qzjp4= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.31.0-20230802163732-1c33ebd9ecfa.1/go.mod h1:xafc+XIsTxTy76GJQ1TKgvJWsSugFBqMaN27WhUblew= cloud.google.com/go/accessapproval v1.7.1 h1:/5YjNhR6lzCvmJZAnByYkfEgWjfAKwYP6nkuTk6nKFE= cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= cloud.google.com/go/accesscontextmanager v1.8.1 h1:WIAt9lW9AXtqw/bnvrEUaE8VG/7bAAeMzRCBGMkc4+w= @@ -232,6 +234,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUu github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -247,8 +251,12 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/authzed/authzed-go v0.9.1-0.20230727112349-acfb99d1263b/go.mod h1:9Pl5jDQJHrjbMDuCrsa+Q6Tqmi1f2pDdIn/qNGI++vA= github.com/authzed/cel-go v0.17.3/go.mod h1:XL/zEq5hKGVF8aOdMbG7w+BQPihLjY2W8N+UIygDA2I= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M= github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0= +github.com/bufbuild/protovalidate-go v0.2.1 h1:pJr07sYhliyfj/STAM7hU4J3FKpVeLVKvOBmOTN8j+s= +github.com/bufbuild/protovalidate-go v0.2.1/go.mod h1:e7XXDtlxj5vlEyAgsrxpzayp4cEMKCSSb8ZCkin+MVA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= @@ -264,6 +272,8 @@ github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARu github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cristalhq/acmd v0.11.1 h1:DJ4fh2Pv0nPKmqT646IU/0Vh5FNdGblxvF+3/W3NAUI= @@ -290,6 +300,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.17.1 h1:s2151PDGy/eqpCI80/8dl4VL3xTkqI/YubXLXCFw0mw= +github.com/google/cel-go v0.17.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= github.com/google/go-pkcs11 v0.2.0 h1:5meDPB26aJ98f+K9G21f0AqZwo/S5BJMJh8nuhMbdsI= @@ -438,8 +450,16 @@ google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQf gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM= +k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/kms v0.28.1 h1:QLNTIc0k7Yebkt9yobj9Y9qBoRCMB4dq+pFCxVXVBnY= +k8s.io/kms v0.28.1/go.mod h1:I2TwA8oerDRInHWWBOqSUzv1EJDC1+55FQKYkxaPxh0= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=