Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add better error messaging and tests for memdb serialization error #1547

Merged
merged 1 commit into from
Sep 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions internal/datastore/memdb/errors.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package memdb

import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

v1 "github.com/authzed/authzed-go/proto/authzed/api/v1"

"github.com/authzed/spicedb/pkg/spiceerrors"
)

// ErrSerializationMaxRetriesReached occurs when a write request has reached its maximum number
// of retries due to serialization errors.
type ErrSerializationMaxRetriesReached struct {
error
}

// NewSerializationMaxRetriesReachedErr constructs a new max retries reached error.
func NewSerializationMaxRetriesReachedErr(baseErr error) error {
return ErrSerializationMaxRetriesReached{
error: baseErr,
}
}

// GRPCStatus implements retrieving the gRPC status for the error.
func (err ErrSerializationMaxRetriesReached) GRPCStatus() *status.Status {
return spiceerrors.WithCodeAndDetails(
err,
codes.DeadlineExceeded,
spiceerrors.ForReason(
v1.ErrorReason_ERROR_REASON_UNSPECIFIED,
map[string]string{
"details": "too many updates were made to the in-memory datastore at once; this datastore has limited write throughput capability",
},
),
)
}
2 changes: 1 addition & 1 deletion internal/datastore/memdb/memdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func (mdb *memdbDatastore) ReadWriteTx(
return newRevision, nil
}

return datastore.NoRevision, errors.New("serialization max retries exceeded")
return datastore.NoRevision, NewSerializationMaxRetriesReachedErr(errors.New("serialization max retries exceeded; please reduce your parallel writes"))
}

func (mdb *memdbDatastore) ReadyState(_ context.Context) (datastore.ReadyState, error) {
Expand Down
36 changes: 36 additions & 0 deletions internal/datastore/memdb/memdb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,11 @@ import (
"golang.org/x/sync/errgroup"

"github.com/authzed/spicedb/pkg/datastore"
"github.com/authzed/spicedb/pkg/datastore/options"
test "github.com/authzed/spicedb/pkg/datastore/test"
ns "github.com/authzed/spicedb/pkg/namespace"
corev1 "github.com/authzed/spicedb/pkg/proto/core/v1"
"github.com/authzed/spicedb/pkg/tuple"
)

type memDBTest struct{}
Expand Down Expand Up @@ -77,3 +79,37 @@ func TestConcurrentWritePanic(t *testing.T) {
}, 1*time.Second, 10*time.Millisecond)
require.ErrorIs(err, recoverErr)
}

func TestConcurrentWriteRelsError(t *testing.T) {
require := require.New(t)

ds, err := NewMemdbDatastore(0, 1*time.Hour, 1*time.Hour)
require.NoError(err)

ctx := context.Background()

// Kick off a number of writes to ensure at least one hits an error.
g := errgroup.Group{}

for i := 0; i < 50; i++ {
i := i
g.Go(func() error {
_, err = ds.ReadWriteTx(ctx, func(rwt datastore.ReadWriteTransaction) error {
updates := []*corev1.RelationTupleUpdate{}
for j := 0; j < 500; j++ {
updates = append(updates, &corev1.RelationTupleUpdate{
Operation: corev1.RelationTupleUpdate_TOUCH,
Tuple: tuple.MustParse(fmt.Sprintf("document:doc-%d-%d#viewer@user:tom", i, j)),
})
}

return rwt.WriteRelationships(ctx, updates)
}, options.WithDisableRetries(true))
return err
})
}

werr := g.Wait()
require.Error(werr)
require.ErrorContains(werr, "serialization max retries exceeded")
}
36 changes: 36 additions & 0 deletions internal/services/v1/relationships_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
v1 "github.com/authzed/authzed-go/proto/authzed/api/v1"
"github.com/authzed/grpcutil"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
Expand Down Expand Up @@ -1462,3 +1463,38 @@ func standardTuplesWithout(without map[string]struct{}) map[string]struct{} {
}
return out
}

func TestManyConcurrentWriteRelationshipsReturnsSerializationErrorOnMemdb(t *testing.T) {
require := require.New(t)

conn, cleanup, _, _ := testserver.NewTestServer(require, 0, memdb.DisableGC, true, tf.StandardDatastoreWithData)
client := v1.NewPermissionsServiceClient(conn)
t.Cleanup(cleanup)

// Kick off a number of writes to ensure at least one hits an error, as memdb's write throughput
// is limited.
g := errgroup.Group{}

for i := 0; i < 50; i++ {
i := i
g.Go(func() error {
updates := []*v1.RelationshipUpdate{}
for j := 0; j < 500; j++ {
updates = append(updates, &v1.RelationshipUpdate{
Operation: v1.RelationshipUpdate_OPERATION_CREATE,
Relationship: tuple.MustToRelationship(tuple.MustParse(fmt.Sprintf("document:doc-%d-%d#viewer@user:tom", i, j))),
})
}

_, err := client.WriteRelationships(context.Background(), &v1.WriteRelationshipsRequest{
Updates: updates,
})
return err
})
}

werr := g.Wait()
require.Error(werr)
require.ErrorContains(werr, "serialization max retries exceeded")
grpcutil.RequireStatus(t, codes.DeadlineExceeded, werr)
}
Loading