diff --git a/CHANGELOG.md b/CHANGELOG.md index a04644d40..f6c7d8bca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,6 @@ The `Unreleased` section name is replaced by the expected version of next releas ### Added - `Equinox`: `Decider.Transact`, `TransactAsync`, `TransactExAsync` overloads [#325](https://github.com/jet/equinox/pull/325) -- `Equinox`: `StreamId` replaces usage of `FsCodec.StreamName` [#353](https://github.com/jet/equinox/pull/353) [#378](https://github.com/jet/equinox/pull/378) - `Equinox.LoadOption.RequireLeader`: support for requesting a consistent read of a stream [#341](https://github.com/jet/equinox/pull/341) - `Equinox.LoadOption.AllowStale`: Read mode that limits reads to a maximum of one retrieval per the defined time window [#386](https://github.com/jet/equinox/pull/386) - `Equinox.Core`: `Category` base class, with `Decider` and `Stream` helper `module`s [#337](https://github.com/jet/equinox/pull/337) @@ -21,6 +20,7 @@ The `Unreleased` section name is replaced by the expected version of next releas - `CosmosStore.Exceptions`: Active patterns to simplify classification in the context of Propulsion handlers [#416](https://github.com/jet/equinox/pull/416) - `CosmosStore.Prometheus`: Add `rut` tag to enable filtering/grouping by Read vs Write activity as per `DynamoStore` [#321](https://github.com/jet/equinox/pull/321) - `DynamoStore`/`DynamoStore.Prometheus`: Implements the majority of the `CosmosStore` functionality via `FSharp.AWS.DynamoDB` [#321](https://github.com/jet/equinox/pull/321) +- `EventStore`: Revise test rig to target a Docker-hosted cluster [#317](https://github.com/jet/equinox/pull/317) - `EventStoreDb`: As per `EventStore` module, but using the modern `EventStore.Client.Grpc.Streams` client [#196](https://github.com/jet/equinox/pull/196) - `MessageDb`: Implements a [message-db](http://docs.eventide-project.org/user-guide/message-db/) storage backend [#339](https://github.com/jet/equinox/pull/339) with OpenTelemetry tracing and snapshotting support [#348](https://github.com/jet/equinox/pull/348) :pray: [@nordfjord](https://github.com/nordfjord) - `eqx dump`: `-s` flag is now optional @@ -31,16 +31,16 @@ The `Unreleased` section name is replaced by the expected version of next releas - Change surface APIs that use`'event list` or `'event seq` to `'event[]` [#411](https://github.com/jet/equinox/pull/411) - Raise `FSharp.Core` req to `6.0.7`, framework req to `net6.0` [#310](https://github.com/jet/equinox/pull/310) [#337](https://github.com/jet/equinox/pull/337) [#33](https://github.com/jet/equinox/pull/33) [#411](https://github.com/jet/equinox/pull/411) - Replace `AsyncSeq` usage with `FSharp.Control.TaskSeq` v `0.4.0` [#361](https://github.com/jet/equinox/pull/361) [#391](https://github.com/jet/equinox/pull/391) -- `Equinox`: Push `Serilog` dependency out to `Equinox.Core` [#337](https://github.com/jet/equinox/pull/337) +- `Equinox`: Move `Serilog` dependency from `Decider` constructor to `Category`/`Decider.forStream` [#337](https://github.com/jet/equinox/pull/337) [#419](https://github.com/jet/equinox/pull/419) +- `Equinox`: `FsCodec.StreamId` replaces usage of `FsCodec.StreamName` [#353](https://github.com/jet/equinox/pull/353) [#378](https://github.com/jet/equinox/pull/378) [#419](https://github.com/jet/equinox/pull/419) - `Equinox.ResolveOption`: rename to `LoadOption` [#308](https://github.com/jet/equinox/pull/308) [#413](https://github.com/jet/equinox/pull/413) - `Equinox.LoadOption`: Rename `AllowStale` to `AnyCachedValue` [#386](https://github.com/jet/equinox/pull/386) -- `Equinox.Decider`: `log` is now supplied via `Equinox.Category` [#337](https://github.com/jet/equinox/pull/337) - `Equinox.Decider`: Replace `'event list` with `'event[]` [#411](https://github.com/jet/equinox/pull/411) - `Equinox.Decider`: Replace `maxAttempts` with a default policy and an optional argument on `Transact*` APIs [#337](https://github.com/jet/equinox/pull/337) - `Equinox.Core`: push `FsCodec` dependency out to concrete stores [#337](https://github.com/jet/equinox/pull/337) - `Equinox.Core.AsyncBatchingGate`: renamed to `Batching.Batcher` [#390](https://github.com/jet/equinox/pull/390) - Stores: Change Event Body types, requiring `FsCodec` v `3.0.0`, with [`EventBody` types switching from `byte[]` to `ReadOnlyMemory` and/or `JsonElement` see FsCodec#75](https://github.com/jet/FsCodec/pull/75) [#323](https://github.com/jet/equinox/pull/323) -- Stores: `*Category.Resolve`: Replace `Resolve(sn, ?ResolveOption)` with `?load = LoadOption` parameter on all `Transact` and `Query` methods [#308](https://github.com/jet/equinox/pull/308) +- Stores: `*Category.Resolve`: Replace `Resolve(sn, ?ResolveOption, ?context)` with `?load = LoadOption` parameter on all `Transact` and `Query` methods, and `Decider.forStream`/`Decider.forContext` to convey context [#308](https://github.com/jet/equinox/pull/308) - Stores: `*Category` ctor: Add mandatory `name` argument, and `Name` property [#410](https://github.com/jet/equinox/pull/410) - Stores: `*Category` ctor: Change `caching` to be last argument, to reflect that it is applied over the top [#410](https://github.com/jet/equinox/pull/410) - Stores: `*Category` ctor: Change `caching` and `access` to be mandatory, adding `NoCaching` and `Unoptimized` modes to represent the former defaults [#417](https://github.com/jet/equinox/pull/417) @@ -62,7 +62,6 @@ The `Unreleased` section name is replaced by the expected version of next releas ### Fixed -- `EventStore`: Revise test rig to target a Docker-hosted cluster [#317](https://github.com/jet/equinox/pull/317) - `EventStore/SqlStreamStore`: rename `Equinox.XXXStore.Log.Event` -> `Metric` to match `CosmosStore` [#311](https://github.com/jet/equinox/pull/311) - `SqlStreamStore`: Fix `Metric` key to be `ssEvt` (was `esEvt`) [#311](https://github.com/jet/equinox/pull/311) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index 56f7fda61..d54dd0469 100755 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -315,10 +315,9 @@ highly recommended to use the following canonical skeleton layout: ```fsharp module Aggregate -(* StreamName section *) - -let [] Category = "category" -let streamId = Equinox.StreamId.gen Id.toString +module Stream = + let [] Category = "category" + let id = FsCodec.StreamId.gen Id.toString (* Optionally, Helpers/Types *) @@ -386,7 +385,7 @@ type Service internal (resolve: Id -> Equinox.Decider> Equinox.Decider.forStream Serilog.Log.Logger category) +let create category = Service(Stream.id >> Equinox.Decider.forStream Serilog.Log.Logger category) ``` - `Service`'s constructor is `internal`; `create` is the main way in which one @@ -417,12 +416,12 @@ let cacheStrategy = Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, de module EventStore = let accessStrategy = Equinox.EventStoreDb.AccessStrategy.RollingSnapshots (Fold.isOrigin, Fold.snapshot) let category (context, cache) = - Equinox.EventStore.EventStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + Equinox.EventStore.EventStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) module Cosmos = let accessStrategy = Equinox.CosmosStore.AccessStrategy.Snapshot Fold.Snapshot.config let category (context, cache) = - Equinox.CosmosStore.CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + Equinox.CosmosStore.CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) ### `MemoryStore` Storage Binding Module @@ -445,10 +444,10 @@ In F#, independent of the Store being used, the Equinox programming model involves (largely by convention, see [FAQ](README.md#FAQ)), per aggregation of events on a given category of stream: -- `Category`: the common part of the [Stream Name](https://github.com/fscodec#streamname), +- `Stream.Category`: the common part of the [Stream Name](https://github.com/fscodec#streamname), i.e., the `"Favorites"` part of the `"Favorites-clientId"` -- `streamId`: function responsible for mapping from the input elements that define the Aggregate's identity +- `Stream.id`: function responsible for mapping from the input elements that define the Aggregate's identity to the `streamId` portion of the `{categoryName}-{streamId}` StreamName that's used within the concrete store. In general, the inputs should be [strongly typed ids](https://github.com/jet/FsCodec#strongly-typed-stream-ids-using-fsharpumx) @@ -542,8 +541,9 @@ brevity, that implements all the relevant functions above: ```fsharp (* Event stream naming + schemas *) -let [] Category = "Favorites" -let streamId = Equinox.StreamId.gen ClientId.toString +module Stream = + let [] Category = "Favorites" + let id = FsCodec.StreamId.gen ClientId.toString type Item = { id: int; name: string; added: DateTimeOffset } type Event = @@ -589,7 +589,7 @@ let toSnapshot state = [| Event.Snapshotted (Array.ofList state) |] (* * The Service defines operations in business terms, neutral to any concrete * store selection or implementation supplied only a `resolve` function that can - * be used to map from ids (as supplied to the `streamId` function) to an + * be used to map from ids (as supplied to the `Stream.id` function) to an * Equinox.Decider; Typically the service should be a stateless Singleton *) @@ -613,7 +613,7 @@ type Service internal (resolve: ClientId -> Equinox.Decider> resolve Category) + Service(Stream.id >> resolve) ``` @@ -692,13 +692,13 @@ Equinox’s Command Handling consists of < 200 lines including interfaces and comments in https://github.com/jet/equinox/tree/master/src/Equinox - the elements you'll touch in a normal application are: -- [`module Impl`](https://github.com/jet/equinox/blob/master/src/Equinox/Core.fs#L33) - +- [`module Stream`](https://github.com/jet/equinox/blob/master/src/Equinox/Stream.fs#L30) - internal implementation of Optimistic Concurrency Control / retry loop used by `Decider`. It's recommended to at least scan this file as it defines the Transaction semantics that are central to Equinox and the overall `Decider` concept. -- [`type Decider`](https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L7) - +- [`type Decider`](https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L11) - surface API one uses to `Transact` or `Query` against a specific stream's state -- [`type LoadOption` Discriminated Union](https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L110) - +- [`type LoadOption` Discriminated Union](https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L218) - used to specify optimization overrides to be applied when a `Decider`'s `Query` or `Transact` operations establishes the state of the stream Its recommended to read the examples in conjunction with perusing the code in @@ -828,8 +828,9 @@ context #### `Decider` usage ```fsharp -let [] Category = "Favorites" -let streamId = Equinox.StreamId.gen ClientId.toString +module Stream = + let [] Category = "Favorites" + let id = FsCodec.StreamId.gen ClientId.toString type Service internal (resolve: ClientId -> Equinox.Decider) = @@ -841,7 +842,7 @@ type Service internal (resolve: ClientId -> Equinox.Decider> resolve Category) +let create resolve = Service(Stream.id >> resolve) ``` `Read` above will do a roundtrip to the Store in order to fetch the most recent @@ -1082,7 +1083,7 @@ type Service internal (resolve: ClientId -> Equinox.Decider= `3.27`, `FsCodec`, `System.Text.Json`, `FSharp.Control.TaskSeq`) - `Equinox.CosmosStore.Prometheus` [![CosmosStore.Prometheus NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.Prometheus.svg)](https://www.nuget.org/packages/Equinox.CosmosStore.Prometheus/): Integration package providing a `Serilog.Core.ILogEventSink` that extracts detailed metrics information attached to the `LogEvent`s and feeds them to the `prometheus-net`'s `Prometheus.Metrics` static instance. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore.Prometheus) on `Equinox.CosmosStore`, `prometheus-net >= 3.6.0`) @@ -875,7 +875,7 @@ Ouch, not looking forward to reading all that logic :frown: ? [Have a read, it's > I'm having some trouble understanding how Equinox+ESDB handles "expected version". Most of the examples use `Equinox.Decider.Transact` which is storage agnostic and doesn't offer any obvious concurrency checking. In `Equinox.EventStore.Context`, there's a `Sync` that takes a `Token` which holds a `streamVersion`. Should I be be using that instead of `Transact`? -The bulk of the implementation is in [`Equinox/Decider.fs`](https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs), see the `let run` function. +The bulk of the implementation is in [`Equinox/Stream.fs`](https://github.com/jet/equinox/blob/master/src/Equinox/Stream.fs#L32), see the `let run` function. There are [sequence diagrams in Documentation MD](https://github.com/jet/equinox/blob/master/DOCUMENTATION.md#code-diagrams-for-equinoxeventstore--equinoxsqlstreamstore) but I'll summarize here: @@ -958,7 +958,7 @@ As teased in both, there will hopefully eventually (but hopefully not [inevitabl #### In Equinox The Equinox `type Decider` exposes an [API that covers the needs of making Consistent Decisions against a State derived from Events on a Stream]( -https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L22-L56). At a high level, we have: +https://github.com/jet/equinox/blob/master/src/Equinox/Decider.fs#L11-L96). At a high level, we have: - `Transact*` functions - these run a decision function that may result in a change to the State, including management of the retry cycle when a consistency violation occurs during the syncing of the state with the backing store (See [Optmimistic Concurrency Control](https://en.wikipedia.org/wiki/Optimistic_concurrency_control)). Some variants can also yield an outcome to the caller after the syncing to the store has taken place. - `Query*` functions - these run a render function projecting from the State that the Decider manages (but can't mutate it or trigger changes). The concept of [CQRS](https://martinfowler.com/bliki/CQRS.html) is a consideration here - using the Decider to read state should not be a default approach (but equally should not be considered off limits). diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index 50cb6819b..ff7ed7313 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -37,21 +37,21 @@ type ServiceBuilder(storageConfig, handlerLog) = member _.CreateFavoritesService() = let fold, initial = Favorites.Fold.fold, Favorites.Fold.initial let snapshot = Favorites.Fold.Snapshot.config - store.Category(Favorites.Category, Favorites.Events.codec, fold, initial, snapshot) + store.Category(Favorites.Stream.Category, Favorites.Events.codec, fold, initial, snapshot) |> Decider.forStream handlerLog |> Favorites.create member _.CreateSaveForLaterService() = let fold, initial = SavedForLater.Fold.fold, SavedForLater.Fold.initial - let snapshot = SavedForLater.Fold.isOrigin, SavedForLater.Fold.compact - store.Category(SavedForLater.Category, SavedForLater.Events.codec, fold, initial, snapshot) + let snapshot = SavedForLater.Fold.Snapshot.config + store.Category(SavedForLater.Stream.Category, SavedForLater.Events.codec, fold, initial, snapshot) |> Decider.forStream handlerLog |> SavedForLater.create 50 member _.CreateTodosService() = let fold, initial = TodoBackend.Fold.fold, TodoBackend.Fold.initial let snapshot = TodoBackend.Fold.Snapshot.config - store.Category(TodoBackend.Category, TodoBackend.Events.codec, fold, initial, snapshot) + store.Category(TodoBackend.Stream.Category, TodoBackend.Events.codec, fold, initial, snapshot) |> Decider.forStream handlerLog |> TodoBackend.create diff --git a/samples/Infrastructure/Store.fs b/samples/Infrastructure/Store.fs index cb3445126..f438399a2 100644 --- a/samples/Infrastructure/Store.fs +++ b/samples/Infrastructure/Store.fs @@ -108,7 +108,7 @@ module Cosmos = let createClient (a : Arguments) connectionString = let connector = CosmosStoreConnector(Discovery.ConnectionString connectionString, a.Timeout, a.Retries, a.MaxRetryWaitTime, ?mode=a.Mode) connector.CreateUninitialized() - let connect (log : ILogger) (a : Arguments) = + let connect (log: ILogger) (a: Arguments) = let primaryClient, primaryDatabase, primaryContainer as primary = createClient a a.Connection, a.Database, a.Container logContainer log "Primary" (a.Mode, primaryClient.Endpoint, primaryDatabase, primaryContainer) let archive = @@ -119,15 +119,17 @@ module Cosmos = archive |> Option.iter (fun (client, db, container) -> logContainer log "Archive" (a.Mode, client.Endpoint, db, container)) primary, archive let config (log : ILogger) (cache, unfolds) (a : Arguments) = - let connection = + let context = match connect log a with | (client, databaseId, containerId), None -> - CosmosStoreClient(client, databaseId, containerId) + let c = CosmosStoreClient client + CosmosStoreContext(c, databaseId, containerId, a.TipMaxEvents, queryMaxItems = a.QueryMaxItems, tipMaxJsonLength = a.TipMaxJsonLength) | (client, databaseId, containerId), Some (aClient, aDatabaseId, aContainerId) -> - CosmosStoreClient(client, databaseId, containerId, archiveClient = aClient, archiveDatabaseId = aDatabaseId, archiveContainerId = aContainerId) + let c = CosmosStoreClient(client, aClient) + CosmosStoreContext(c, databaseId, containerId, a.TipMaxEvents, queryMaxItems = a.QueryMaxItems, tipMaxJsonLength = a.TipMaxJsonLength, + archiveDatabaseId = aDatabaseId, archiveContainerId = aContainerId) log.Information("CosmosStore Max Events in Tip: {maxTipEvents}e {maxTipJsonLength}b Items in Query: {queryMaxItems}", a.TipMaxEvents, a.TipMaxJsonLength, a.QueryMaxItems) - let context = CosmosStoreContext(connection, a.TipMaxEvents, queryMaxItems = a.QueryMaxItems, tipMaxJsonLength = a.TipMaxJsonLength) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching Context.Cosmos (context, cacheStrategy, unfolds) @@ -135,18 +137,14 @@ module Dynamo = type Equinox.DynamoStore.DynamoStoreConnector with - member x.LogConfiguration(log : ILogger) = + member x.LogConfiguration(log: ILogger) = log.Information("DynamoStore {endpoint} Timeout {timeoutS}s Retries {retries}", x.Endpoint, (let t = x.Timeout in t.TotalSeconds), x.Retries) - type Equinox.DynamoStore.DynamoStoreClient with - - member internal x.LogConfiguration(role, log : ILogger) = - log.Information("DynamoStore {role:l} Table {table} Archive {archive}", role, x.TableName, Option.toObj x.ArchiveTableName) - type Equinox.DynamoStore.DynamoStoreContext with - member internal x.LogConfiguration(log : ILogger) = + member internal x.LogConfiguration(role, log: ILogger) = + log.Information("DynamoStore {role:l} Table {table} Archive {archive}", role, x.TableName, Option.toObj x.ArchiveTableName) log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) @@ -212,11 +210,10 @@ module Dynamo = let config (log : ILogger) (cache, unfolds) (a : Arguments) = a.Connector.LogConfiguration(log) - let client = a.Connector.CreateClient() - let storeClient = DynamoStoreClient(client, a.Table, ?archiveTableName = a.ArchiveTable) - storeClient.LogConfiguration("Main", log) - let context = DynamoStoreContext(storeClient, maxBytes = a.TipMaxBytes, queryMaxItems = a.QueryMaxItems, ?tipMaxEvents = a.TipMaxEvents) - context.LogConfiguration(log) + let client = a.Connector.CreateDynamoDbClient() |> DynamoStoreClient + let context = DynamoStoreContext(client, a.Table, maxBytes = a.TipMaxBytes, queryMaxItems = a.QueryMaxItems, + ?tipMaxEvents = a.TipMaxEvents, ?archiveTableName = a.ArchiveTable) + context.LogConfiguration("Main", log) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching Context.Dynamo (context, cacheStrategy, unfolds) diff --git a/samples/Store/Domain/Cart.fs b/samples/Store/Domain/Cart.fs index 0bfe6bec2..c13fe59ea 100644 --- a/samples/Store/Domain/Cart.fs +++ b/samples/Store/Domain/Cart.fs @@ -1,7 +1,9 @@ module Domain.Cart -let [] Category = "Cart" -let streamId = Equinox.StreamId.gen CartId.toString +module Stream = + let [] Category = "Cart" + let id = FsCodec.StreamId.gen CartId.toString + let name = id >> FsCodec.StreamName.create Category // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care [] @@ -159,7 +161,7 @@ type Service internal (resolve: CartId -> Equinox.Decider = @@ -176,4 +178,4 @@ type Service internal (resolve: CartId -> Equinox.Decider> resolve) + Service(Stream.id >> resolve) diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index 29377b86f..2aeec7f56 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -3,8 +3,10 @@ type ClientId = ClientId of email: string module ClientId = let toString (ClientId email) = email -let [] Category = "ContactPreferences" -let streamId = Equinox.StreamId.gen ClientId.toString // TODO hash >> base64 +module Stream = + let [] Category = "ContactPreferences" + let id = FsCodec.StreamId.gen ClientId.toString // TODO hash >> base64 + let name = id >> FsCodec.StreamName.create Category // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -57,7 +59,7 @@ type Service internal (resolve: ClientId -> Equinox.Decider> resolve) + Service(Stream.id >> resolve) diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index fedbcff61..b750acd7f 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -17,8 +17,8 @@ - - + + diff --git a/samples/Store/Domain/Favorites.fs b/samples/Store/Domain/Favorites.fs index 0cd9da9e5..dd8b8b4f5 100644 --- a/samples/Store/Domain/Favorites.fs +++ b/samples/Store/Domain/Favorites.fs @@ -1,7 +1,9 @@ module Domain.Favorites -let [] Category = "Favorites" -let streamId = Equinox.StreamId.gen ClientId.toString +module Stream = + let [] Category = "Favorites" + let id = FsCodec.StreamId.gen ClientId.toString + let name = id >> FsCodec.StreamName.create Category // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -83,4 +85,4 @@ type Service internal (resolve: ClientId -> Equinox.Decider (), decideUnfavorite sku c.State), fun () c -> c.Version) let create resolve = - Service(streamId >> resolve) + Service(Stream.id >> resolve) diff --git a/samples/Store/Domain/Infrastructure.fs b/samples/Store/Domain/Infrastructure.fs index 9eec285ec..71f6b07be 100644 --- a/samples/Store/Domain/Infrastructure.fs +++ b/samples/Store/Domain/Infrastructure.fs @@ -8,7 +8,7 @@ open System /// Endows any type that inherits this class with standard .NET comparison semantics using a supplied token identifier [] type Comparable<'TComp, 'Token when 'TComp :> Comparable<'TComp, 'Token> and 'Token : comparison>(token: 'Token) = - member private _.Token = token + member val private Token = token override x.Equals y = match y with :? Comparable<'TComp, 'Token> as y -> x.Token = y.Token | _ -> false override _.GetHashCode() = hash token interface IComparable with @@ -32,9 +32,9 @@ module Guid = /// - Guards against XSS by only permitting initialization based on Guid.Parse /// - Implements comparison/equality solely to enable tests to leverage structural equality [); System.Text.Json.Serialization.JsonConverter(typeof)>] -type SkuId private (id: string) = - inherit StringId(id) - new(value: Guid) = SkuId(value.ToString "N") +type SkuId = + inherit StringId + new(value: Guid) = { inherit StringId(value.ToString "N") } /// Required to support empty [] new() = SkuId(Guid.NewGuid()) /// Represent as a Guid.ToString("N") output externally diff --git a/samples/Store/Domain/InventoryItem.fs b/samples/Store/Domain/InventoryItem.fs index 0b847c475..404a3b27a 100644 --- a/samples/Store/Domain/InventoryItem.fs +++ b/samples/Store/Domain/InventoryItem.fs @@ -3,8 +3,9 @@ module Domain.InventoryItem open System -let [] Category = "InventoryItem" -let streamId = Equinox.StreamId.gen InventoryItemId.toString +module Stream = + let [] Category = "InventoryItem" + let id = FsCodec.StreamId.gen InventoryItemId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -67,4 +68,4 @@ type Service internal (resolve: InventoryItemId -> Equinox.Decider> resolve Category) + Service(Stream.id >> resolve) diff --git a/samples/Store/Domain/SavedForLater.fs b/samples/Store/Domain/SavedForLater.fs index fd1565c0c..6c939a040 100644 --- a/samples/Store/Domain/SavedForLater.fs +++ b/samples/Store/Domain/SavedForLater.fs @@ -3,8 +3,9 @@ open System open System.Collections.Generic -let [] Category = "SavedForLater" -let streamId = Equinox.StreamId.gen ClientId.toString +module Stream = + let [] Category = "SavedForLater" + let id = FsCodec.StreamId.gen ClientId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -53,6 +54,13 @@ module Fold = type State = Item [] let initial = Array.empty + + module Snapshot = + + let generate state = Compacted { items = state } + let isOrigin = function Compacted _ -> true | _ -> false + let config = isOrigin, generate + let fold (state: State) (events: seq): State = let index = InternalState state for event in events do @@ -66,8 +74,6 @@ module Fold = let proposedEventsWouldExceedLimit maxSavedItems events state = let newState = fold state events Array.length newState > maxSavedItems - let isOrigin = function Compacted _ -> true | _ -> false - let compact state = Compacted { items = state } type Command = | Merge of merges: Events.Item [] @@ -144,4 +150,4 @@ type Service internal (resolve: ClientId -> Equinox.Decider> resolve, maxSavedItems) + Service(Stream.id >> resolve, maxSavedItems) diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 65b30dc48..810f1f763 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -11,7 +11,7 @@ let snapshot = Cart.Fold.Snapshot.config let createMemoryStore () = MemoryStore.VolatileStore>() let createServiceMemory log store = - MemoryStore.MemoryStoreCategory(store, Cart.Category, Cart.Events.codec, fold, initial) + MemoryStore.MemoryStoreCategory(store, Cart.Stream.Category, Cart.Events.codec, fold, initial) |> Decider.forStream log |> Cart.create @@ -19,14 +19,14 @@ let codec = Cart.Events.codec let codecJe = Cart.Events.codecJe let categoryGesStreamWithRollingSnapshots context = - EventStoreDb.EventStoreCategory(context, Cart.Category, codec, fold, initial, EventStoreDb.AccessStrategy.RollingSnapshots snapshot, CachingStrategy.NoCaching) + EventStoreDb.EventStoreCategory(context, Cart.Stream.Category, codec, fold, initial, EventStoreDb.AccessStrategy.RollingSnapshots snapshot, CachingStrategy.NoCaching) let categoryGesStreamWithoutCustomAccessStrategy context = - EventStoreDb.EventStoreCategory(context, Cart.Category, codec, fold, initial, EventStoreDb.AccessStrategy.Unoptimized, CachingStrategy.NoCaching) + EventStoreDb.EventStoreCategory(context, Cart.Stream.Category, codec, fold, initial, EventStoreDb.AccessStrategy.Unoptimized, CachingStrategy.NoCaching) let categoryCosmosStreamWithSnapshotStrategy context = - CosmosStore.CosmosStoreCategory(context, Cart.Category, codecJe, fold, initial, CosmosStore.AccessStrategy.Snapshot snapshot, CachingStrategy.NoCaching) + CosmosStore.CosmosStoreCategory(context, Cart.Stream.Category, codecJe, fold, initial, CosmosStore.AccessStrategy.Snapshot snapshot, CachingStrategy.NoCaching) let categoryCosmosStreamWithoutCustomAccessStrategy context = - CosmosStore.CosmosStoreCategory(context, Cart.Category, codecJe, fold, initial, CosmosStore.AccessStrategy.Unoptimized, CachingStrategy.NoCaching) + CosmosStore.CosmosStoreCategory(context, Cart.Stream.Category, codecJe, fold, initial, CosmosStore.AccessStrategy.Unoptimized, CachingStrategy.NoCaching) let addAndThenRemoveItemsManyTimesExceptTheLastOne context cartId skuId (service: Cart.Service) count = service.ExecuteManyAsync(cartId, false, seq { diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index b5a0fab94..95cbeee54 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -7,13 +7,13 @@ open Swensen.Unquote let fold, initial = ContactPreferences.Fold.fold, ContactPreferences.Fold.initial +let Category = ContactPreferences.Stream.Category let createMemoryStore () = MemoryStore.VolatileStore<_>() let createServiceMemory log store = - MemoryStore.MemoryStoreCategory(store, ContactPreferences.Category, FsCodec.Box.Codec.Create(), fold, initial) + MemoryStore.MemoryStoreCategory(store, Category, FsCodec.Box.Codec.Create(), fold, initial) |> Decider.forStream log |> ContactPreferences.create -let Category = ContactPreferences.Category let codec = ContactPreferences.Events.codec let codecJe = ContactPreferences.Events.codecJe let categoryGesWithOptimizedStorageSemantics context = diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 82a1b0b4b..190217dbc 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -5,7 +5,7 @@ open Equinox open Equinox.CosmosStore.Integration.CosmosFixtures open Swensen.Unquote -let [] Category = Favorites.Category +let [] Category = Favorites.Stream.Category let fold, initial = Favorites.Fold.fold, Favorites.Fold.initial let snapshot = Favorites.Fold.Snapshot.config diff --git a/samples/TodoBackend/Todo.fs b/samples/TodoBackend/Todo.fs index 49cea809e..ff196877a 100644 --- a/samples/TodoBackend/Todo.fs +++ b/samples/TodoBackend/Todo.fs @@ -4,8 +4,9 @@ open Domain // The TodoBackend spec does not dictate having multiple lists, tenants or clients // Here, we implement such a discriminator in order to allow each virtual client to maintain independent state -let [] Category = "Todos" -let streamId = Equinox.StreamId.gen ClientId.toString +module Stream = + let [] Category = "Todos" + let id = FsCodec.StreamId.gen ClientId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -84,4 +85,4 @@ type Service internal (resolve: ClientId -> Equinox.Decider x.id = item.id) state' } -let create resolve = Service(streamId >> resolve) +let create resolve = Service(Stream.id >> resolve) diff --git a/samples/Tutorial/AsAt.fsx b/samples/Tutorial/AsAt.fsx index 3c01cd709..87e55e852 100644 --- a/samples/Tutorial/AsAt.fsx +++ b/samples/Tutorial/AsAt.fsx @@ -41,8 +41,9 @@ open System -let [] Category = "Account" -let streamId = Equinox.StreamId.gen id +module Stream = + let [] Category = "Account" + let id = FsCodec.StreamId.gen id module Events = @@ -141,6 +142,8 @@ module Log = let [] AppName = "equinox-tutorial" let cache = Equinox.Cache(AppName, 20) +// cache so normal read pattern is to read from whatever we've built in memory +let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching module EventStore = @@ -152,11 +155,9 @@ module EventStore = let esc = connector.Connect(AppName, Discovery.ConnectionString "esdb://localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false") let connection = EventStoreConnection(esc) let context = EventStoreContext(connection, batchSize = snapshotWindow) - // cache so normal read pattern is to read from whatever we've built in memory - let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching // rig snapshots to be injected as events into the stream every `snapshotWindow` events let accessStrategy = AccessStrategy.RollingSnapshots (Fold.isValid,Fold.snapshot) - let cat = EventStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + let cat = EventStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) let resolve = Equinox.Decider.forStream Log.log cat module Cosmos = @@ -166,15 +167,15 @@ module Cosmos = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get let discovery = Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION") let connector = CosmosStoreConnector(discovery, TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., Microsoft.Azure.Cosmos.ConnectionMode.Gateway) - let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") |> Async.RunSynchronously - let context = CosmosStoreContext(storeClient, tipMaxEvents = 10) - let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching + let databaseId, containerId = read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER" + let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, databaseId, containerId) |> Async.RunSynchronously + let context = CosmosStoreContext(storeClient, databaseId, containerId, tipMaxEvents = 10) let accessStrategy = AccessStrategy.Snapshot (Fold.isValid,Fold.snapshot) - let cat = CosmosStoreCategory(context, Category, Events.codecJe, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + let cat = CosmosStoreCategory(context, Stream.Category, Events.codecJe, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) let resolve = Equinox.Decider.forStream Log.log cat -let service = Service(streamId >> EventStore.resolve) -//let service= Service(streamId >> Cosmos.resolve) +let service = Service(Stream.id >> EventStore.resolve) +//let service= Service(Stream.id >> Cosmos.resolve) let client = "ClientA" service.Add(client, 1) |> Async.RunSynchronously diff --git a/samples/Tutorial/Cosmos.fsx b/samples/Tutorial/Cosmos.fsx index ecab85245..56aa3d285 100644 --- a/samples/Tutorial/Cosmos.fsx +++ b/samples/Tutorial/Cosmos.fsx @@ -39,8 +39,9 @@ module Log = module Favorites = - let Category = "Favorites" - let streamId = Equinox.StreamId.gen id + module Stream = + let Category = "Favorites" + let id = FsCodec.StreamId.gen id module Events = @@ -80,7 +81,7 @@ module Favorites = let decider = resolve clientId decider.Query id - let create cat = Service(streamId >> Equinox.Decider.forStream Log.log cat) + let create cat = Service(Stream.id >> Equinox.Decider.forStream Log.log cat) module Cosmos = @@ -88,7 +89,7 @@ module Favorites = let accessStrategy = AccessStrategy.Unoptimized // Or Snapshot etc https://github.com/jet/equinox/blob/master/DOCUMENTATION.md#access-strategies let category (context, cache) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) let [] appName = "equinox-tutorial" @@ -101,8 +102,9 @@ module Store = let discovery = Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION") let connector = CosmosStoreConnector(discovery, System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5., Microsoft.Azure.Cosmos.ConnectionMode.Gateway) - let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") |> Async.RunSynchronously - let context = CosmosStoreContext(storeClient, tipMaxEvents = 10) + let databaseId, containerId = read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER" + let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, databaseId, containerId) |> Async.RunSynchronously + let context = CosmosStoreContext(storeClient, databaseId, containerId, tipMaxEvents = 10) let cache = Equinox.Cache(appName, 20) let service = Favorites.Cosmos.category (Store.context, Store.cache) |> Favorites.create diff --git a/samples/Tutorial/Counter.fsx b/samples/Tutorial/Counter.fsx index 9b9abbe02..b8f10624e 100644 --- a/samples/Tutorial/Counter.fsx +++ b/samples/Tutorial/Counter.fsx @@ -12,7 +12,6 @@ #r "FSharp.UMX.dll" #r "FsCodec.dll" #r "FsCodec.Box.dll" -#r "FsCodec.NewtonsoftJson.dll" #else #r "nuget:Equinox.MemoryStore, *-*" #r "nuget:FsCodec.Box" @@ -32,12 +31,13 @@ type Event = | Cleared of Cleared interface TypeShape.UnionContract.IUnionContract -// Events for a given DDD aggregate are considered to be in the same 'Category' for indexing purposes -// When reacting to events (using Propulsion), the Category will be a key thing to filter events based on -let [] Category = "Counter" -// Maps from an app-level counter name (perhaps a strongly typed id), to a well-formed StreamId that can be stored in the Event Store -// For this sample, we let callers just pass a string, and we trust it's suitable for use as a StreamId directly -let streamId = Equinox.StreamId.gen id +module Stream = + // Events for a given DDD aggregate are considered to be in the same 'Category' for indexing purposes + // When reacting to events (using Propulsion), the Category will be a key thing to filter events based on + let [] Category = "Counter" + // Maps from an app-level counter name (perhaps a strongly typed id), to a well-formed StreamId that can be stored in the Event Store + // For this sample, we let callers just pass a string, and we trust it's suitable for use as a StreamId directly + let id = FsCodec.StreamId.gen id type State = State of int let initial : State = State 0 @@ -99,8 +99,8 @@ let logEvents sn (events : FsCodec.ITimelineEvent<_>[]) = let store = Equinox.MemoryStore.VolatileStore() let _ = store.Committed.Subscribe(fun struct (sn, xs) -> logEvents sn xs) let codec = FsCodec.Box.Codec.Create() -let cat = Equinox.MemoryStore.MemoryStoreCategory(store, Category, codec, fold, initial) -let service = Service(streamId >> Equinox.Decider.forStream log cat) +let cat = Equinox.MemoryStore.MemoryStoreCategory(store, Stream.Category, codec, fold, initial) +let service = Service(Stream.id >> Equinox.Decider.forStream log cat) let clientId = "ClientA" service.Read(clientId) |> Async.RunSynchronously diff --git a/samples/Tutorial/Favorites.fsx b/samples/Tutorial/Favorites.fsx index 6fac04ede..3c11eb0a9 100644 --- a/samples/Tutorial/Favorites.fsx +++ b/samples/Tutorial/Favorites.fsx @@ -95,7 +95,7 @@ let log = LoggerConfiguration().WriteTo.Console().CreateLogger() // related streams are termed a Category; Each client will have it's own Stream. let Category = "Favorites" -let clientAFavoritesStreamId = Equinox.StreamId.gen id "ClientA" +let clientAFavoritesStreamId = FsCodec.StreamId.gen id "ClientA" // For test purposes, we use the in-memory store let store = Equinox.MemoryStore.VolatileStore() @@ -161,7 +161,7 @@ type Service(deciderFor : string -> Handler) = (* See Counter.fsx and Cosmos.fsx for a more compact representation which makes the Handler wiring less obtrusive *) let handlerFor (clientId: string) = - let streamId = Equinox.StreamId.gen id clientId + let streamId = FsCodec.StreamId.gen id clientId let decider = Equinox.Decider.forStream log cat streamId Handler(decider) diff --git a/samples/Tutorial/FulfilmentCenter.fsx b/samples/Tutorial/FulfilmentCenter.fsx index 906723d7a..f6d86797f 100644 --- a/samples/Tutorial/FulfilmentCenter.fsx +++ b/samples/Tutorial/FulfilmentCenter.fsx @@ -10,7 +10,6 @@ #r "FSharp.UMX.dll" #r "FsCodec.dll" #r "TypeShape.dll" -#r "FsCodec.NewtonsoftJson.dll" #r "FsCodec.SystemTextJson.dll" #r "Microsoft.Azure.Cosmos.Client.dll" #r "Serilog.Sinks.Seq.dll" @@ -52,8 +51,9 @@ module Types = module FulfilmentCenter = - let [] Category = "FulfilmentCenter" - let streamId = Equinox.StreamId.gen id + module Stream = + let [] Category = "FulfilmentCenter" + let id = FsCodec.StreamId.gen id module Events = @@ -137,16 +137,17 @@ module Store = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get let appName = "equinox-tutorial" let connector = CosmosStoreConnector(Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION"), TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5.) - let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") |> Async.RunSynchronously - let context = CosmosStoreContext(storeClient, tipMaxEvents = 256) + let databaseId, containerId = read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER" + let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, databaseId, containerId) |> Async.RunSynchronously + let context = CosmosStoreContext(storeClient, databaseId, containerId, tipMaxEvents = 256) let cache = Equinox.Cache(appName, 20) let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching open FulfilmentCenter let service = - let cat = CosmosStoreCategory(Store.context, Category, Events.codec, Fold.fold, Fold.initial, AccessStrategy.Unoptimized, Store.cacheStrategy) - Service(streamId >> Equinox.Decider.forStream Log.log cat) + let cat = CosmosStoreCategory(Store.context, Stream.Category, Events.codec, Fold.fold, Fold.initial, AccessStrategy.Unoptimized, Store.cacheStrategy) + Service(Stream.id >> Equinox.Decider.forStream Log.log cat) let fc = "fc0" service.UpdateName(fc, { code="FC000"; name="Head" }) |> Async.RunSynchronously @@ -157,15 +158,16 @@ Log.dumpMetrics () /// Manages ingestion of summary events tagged with the version emitted from FulfilmentCenter.Service.QueryWithVersion module FulfilmentCenterSummary = - let [] Category = "FulfilmentCenter" - let streamId = Equinox.StreamId.gen id + module Stream = + let [] Category = "FulfilmentCenter" + let id = FsCodec.StreamId.gen id module Events = type UpdatedData = { version : int64; state : Summary } type Event = | Updated of UpdatedData interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + let codec = FsCodec.SystemTextJson.Codec.Create() type StateSummary = { version : int64; state : Types.Summary } type State = StateSummary option diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index 67c246932..f5359f451 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -4,8 +4,9 @@ module Gapless open System -let [] Category = "Gapless" -let streamId = Equinox.StreamId.gen SequenceId.toString +module Stream = + let [] Category = "Gapless" + let id = FsCodec.StreamId.gen SequenceId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -78,14 +79,14 @@ type Service internal (resolve: SequenceId -> Equinox.Decider] appName = "equinox-tutorial-gapless" -let create cat = Service(streamId >> Equinox.Decider.forStream Serilog.Log.Logger cat) +let create cat = Service(Stream.id >> Equinox.Decider.forStream Serilog.Log.Logger cat) module Cosmos = open Equinox.CosmosStore let private category (context, cache, accessStrategy) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) module Snapshot = diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index ad7b93fb5..88cde7276 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -1,7 +1,8 @@ module Index -let [] Category = "Index" -let streamId = Equinox.StreamId.gen IndexId.toString +module Stream = + let [] Category = "Index" + let id = FsCodec.StreamId.gen IndexId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -46,7 +47,7 @@ type Service<'t> internal (decider : Equinox.Decider, Fold.Stat decider.Query id let create<'t> indexId cat = - Service(streamId indexId |> Equinox.Decider.forStream Serilog.Log.Logger cat) + Service(Stream.id indexId |> Equinox.Decider.forStream Serilog.Log.Logger cat) module Cosmos = @@ -54,9 +55,9 @@ module Cosmos = let category (context,cache) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) module MemoryStore = let category store = - Equinox.MemoryStore.MemoryStoreCategory(store, Category, Events.codec, Fold.fold, Fold.initial) + Equinox.MemoryStore.MemoryStoreCategory(store, Stream.Category, Events.codec, Fold.fold, Fold.initial) diff --git a/samples/Tutorial/Infrastructure.fs b/samples/Tutorial/Infrastructure.fs index e12258878..3d651f81a 100644 --- a/samples/Tutorial/Infrastructure.fs +++ b/samples/Tutorial/Infrastructure.fs @@ -27,4 +27,4 @@ module EventCodec = /// For stores other than CosmosStore, we encode to UTF-8 and have the store do the right thing let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - FsCodec.NewtonsoftJson.Codec.Create<'t>() + FsCodec.SystemTextJson.Codec.Create<'t>() diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 769db083c..6af218e2f 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -4,8 +4,9 @@ module Sequence open System -let [] Category = "Sequence" -let streamId = Equinox.StreamId.gen SequenceId.toString +module Stream = + let [] Category = "Sequence" + let id = FsCodec.StreamId.gen SequenceId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -36,14 +37,14 @@ type Service internal (resolve : SequenceId -> Equinox.Decider> Equinox.Decider.forStream (Serilog.Log.ForContext()) cat) +let create cat = Service(Stream.id >> Equinox.Decider.forStream (Serilog.Log.ForContext()) cat) module Cosmos = open Equinox.CosmosStore let private create (context, cache, accessStrategy) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) module LatestKnownEvent = diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index 614e8e82f..5ea548944 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -1,7 +1,8 @@ module Set -let [] Category = "Set" -let streamId = Equinox.StreamId.gen SetId.toString +module Stream = + let [] Category = "Set" + let id = FsCodec.StreamId.gen SetId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -53,16 +54,16 @@ type Service internal (decider: Equinox.Decider) = decider.Query id let create setId cat = - Service(streamId setId |> Equinox.Decider.forStream Serilog.Log.Logger cat) + Service(Stream.id setId |> Equinox.Decider.forStream Serilog.Log.Logger cat) module Cosmos = let category (context, cache) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = Equinox.CosmosStore.AccessStrategy.RollingState Fold.Snapshot.generate - Equinox.CosmosStore.CosmosStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) + Equinox.CosmosStore.CosmosStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, accessStrategy, cacheStrategy) module MemoryStore = let category store = - Equinox.MemoryStore.MemoryStoreCategory(store, Category, Events.codec, Fold.fold, Fold.initial) + Equinox.MemoryStore.MemoryStoreCategory(store, Stream.Category, Events.codec, Fold.fold, Fold.initial) diff --git a/samples/Tutorial/Todo.fsx b/samples/Tutorial/Todo.fsx index eaa637bce..d589bda20 100644 --- a/samples/Tutorial/Todo.fsx +++ b/samples/Tutorial/Todo.fsx @@ -27,8 +27,9 @@ open System (* NB It's recommended to look at Favorites.fsx first as it establishes the groundwork This tutorial stresses different aspects *) -let Category = "Todos" -let streamId = Equinox.StreamId.gen id +module Stream = + let Category = "Todos" + let id = FsCodec.StreamId.gen id type Todo = { id: int; order: int; title: string; completed: bool } type DeletedInfo = { id: int } @@ -132,14 +133,15 @@ module Store = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get let discovery = Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION") let connector = CosmosStoreConnector(discovery, TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5.) - let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") |> Async.RunSynchronously - let context = CosmosStoreContext(storeClient, tipMaxEvents = 100) // Keep up to 100 events in tip before moving events to a new document + let databaseId, containerId = read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER" + let storeClient = CosmosStoreClient.Connect(connector.CreateAndInitialize, databaseId, containerId) |> Async.RunSynchronously + let context = CosmosStoreContext(storeClient, databaseId, containerId, tipMaxEvents = 100) // Keep up to 100 events in tip before moving events to a new document let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) let access = AccessStrategy.Snapshot Snapshot.config - let category = CosmosStoreCategory(context, Category, codec, fold, initial, access, cacheStrategy) + let category = CosmosStoreCategory(context, Stream.Category, codec, fold, initial, access, cacheStrategy) -let service = Service(streamId >> Equinox.Decider.forStream log Store.category) +let service = Service(Stream.id >> Equinox.Decider.forStream log Store.category) let client = "ClientJ" let item = { id = 0; order = 0; title = "Feed cat"; completed = false } diff --git a/samples/Tutorial/Tutorial.fsproj b/samples/Tutorial/Tutorial.fsproj index 26b8332f3..c6b32cd8f 100644 --- a/samples/Tutorial/Tutorial.fsproj +++ b/samples/Tutorial/Tutorial.fsproj @@ -28,8 +28,7 @@ - - + diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index 227dd4941..ea7cf101a 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -14,8 +14,9 @@ and [] companyId module CompanyId = let toString (value : CompanyId) : string = %value -let [] Category = "Upload" -let streamId = Equinox.StreamId.gen2 CompanyId.toString PurchaseOrderId.toString +module Stream = + let [] Category = "Upload" + let id = FsCodec.StreamId.gen2 CompanyId.toString PurchaseOrderId.toString type UploadId = string and [] uploadId @@ -52,16 +53,16 @@ type Service internal (resolve : CompanyId * PurchaseOrderId -> Equinox.Decider< let decider = resolve (companyId, purchaseOrderId) decider.Transact(decide value) -let create cat = Service(streamId >> Equinox.Decider.forStream Serilog.Log.Logger cat) +let create cat = Service(Stream.id >> Equinox.Decider.forStream Serilog.Log.Logger cat) module Cosmos = open Equinox.CosmosStore let category (context, cache) = let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - CosmosStoreCategory(context, Category, Events.codecJe, Fold.fold, Fold.initial, AccessStrategy.LatestKnownEvent, cacheStrategy) + CosmosStoreCategory(context, Stream.Category, Events.codecJe, Fold.fold, Fold.initial, AccessStrategy.LatestKnownEvent, cacheStrategy) module EventStore = open Equinox.EventStoreDb let category context = - EventStoreCategory(context, Category, Events.codec, Fold.fold, Fold.initial, AccessStrategy.LatestKnownEvent, Equinox.CachingStrategy.NoCaching) + EventStoreCategory(context, Stream.Category, Events.codec, Fold.fold, Fold.initial, AccessStrategy.LatestKnownEvent, Equinox.CachingStrategy.NoCaching) diff --git a/src/Equinox.Core/Batching.fs b/src/Equinox.Core/Batching.fs index 868c38822..5251c25ee 100644 --- a/src/Equinox.Core/Batching.fs +++ b/src/Equinox.Core/Batching.fs @@ -43,7 +43,7 @@ type Batcher<'Req, 'Res>(dispatch: Func<'Req[], CancellationToken, Task<'Res[]>> let lingerMs = match linger with None -> 1 | Some x -> int x.TotalMilliseconds let mutable cell = AsyncBatch<'Req, 'Res>() - new (dispatch: 'Req[] -> Async<'Res[]>, ?linger) = Batcher((fun items ct -> Async.StartImmediateAsTask(dispatch items, ct)), ?linger = linger) + new(dispatch: 'Req[] -> Async<'Res[]>, ?linger) = Batcher((fun items ct -> Async.StartImmediateAsTask(dispatch items, ct)), ?linger = linger) /// Include an item in the batch; await the collective dispatch (subject to the configured linger time) member x.ExecuteAsync(req, ct) = task { @@ -75,7 +75,7 @@ type BatcherDictionary<'Id, 'Entry>(create: Func<'Id, 'Entry>) = /// NOTE if the number of items is bounded, BatcherDictionary is significantly more efficient type BatcherCache<'Id, 'Entry>(cache: Cache<'Entry>, toKey: Func<'Id, string>, create: Func<'Id, 'Entry>, ?cacheWindow) = let cacheWindow = defaultArg cacheWindow (TimeSpan.FromMinutes 1) - let cachePolicy = Caching.policySlidingExpiration cacheWindow () + let cachePolicy = System.Runtime.Caching.CacheItemPolicy(SlidingExpiration = cacheWindow) /// Maintains the entries in an internal cache limited to the specified size, with entries identified by "{id}" new(name, create: Func<'Id, 'Entry>, sizeMb: int, ?cacheWindow) = diff --git a/src/Equinox.Core/Cache.fs b/src/Equinox.Core/Cache.fs index 38576f2db..fb320184e 100755 --- a/src/Equinox.Core/Cache.fs +++ b/src/Equinox.Core/Cache.fs @@ -53,7 +53,7 @@ type private CacheEntry<'state>(initialToken: StreamToken, initialState: 'state, x.MergeUpdates(isStale, timestamp, token, state) // merge observed result into the cache return res } -type Cache private (inner: System.Runtime.Caching.MemoryCache) = +type Cache(inner: System.Runtime.Caching.MemoryCache) = let tryLoad key = match inner.Get key with | null -> ValueNone @@ -73,10 +73,12 @@ type Cache private (inner: System.Runtime.Caching.MemoryCache) = match addOrGet key options entry with | Ok _ -> () // Our fresh one got added | Error existingEntry -> existingEntry.MergeUpdates(isStale, timestamp, token, state) - new (name, sizeMb: int) = + new(name, sizeMb: int) = let config = System.Collections.Specialized.NameValueCollection(1) config.Add("cacheMemoryLimitMegabytes", string sizeMb); Cache(new System.Runtime.Caching.MemoryCache(name, config)) + /// Exposes the internal MemoryCache + member val Inner = inner // if there's a non-zero maxAge, concurrent read attempts share the roundtrip (and its fate, if it throws) member internal _.Load(key, maxAge, isStale, policy, loadOrReload, ct) = task { let loadOrReload maybeBaseState = task { @@ -97,9 +99,6 @@ type Cache private (inner: System.Runtime.Caching.MemoryCache) = member internal _.Save(key, isStale, policy, timestamp, token, state) = addOrMergeCacheEntry isStale key policy timestamp (token, state) - /// Exposes the internal MemoryCache - member val Inner = inner - type [] CachingStrategy = /// Do not apply any caching strategy for this Category. | NoCaching diff --git a/src/Equinox.Core/Caching.fs b/src/Equinox.Core/Caching.fs index 7f85019a0..51db51746 100644 --- a/src/Equinox.Core/Caching.fs +++ b/src/Equinox.Core/Caching.fs @@ -33,7 +33,7 @@ type private Decorator<'event, 'state, 'context, 'cat when 'cat :> ICategory<'ev let private mkKey prefix streamName = prefix + streamName -let internal policySlidingExpiration (slidingExpiration: System.TimeSpan) () = +let private policySlidingExpiration (slidingExpiration: System.TimeSpan) () = System.Runtime.Caching.CacheItemPolicy(SlidingExpiration = slidingExpiration) let private policyFixedTimeSpan (period: System.TimeSpan) () = let expirationPoint = let creationDate = System.DateTimeOffset.UtcNow in creationDate.Add period diff --git a/src/Equinox.Core/Equinox.Core.fsproj b/src/Equinox.Core/Equinox.Core.fsproj index 24bfe9ebc..fe7aceba0 100644 --- a/src/Equinox.Core/Equinox.Core.fsproj +++ b/src/Equinox.Core/Equinox.Core.fsproj @@ -5,9 +5,7 @@ - - @@ -24,8 +22,7 @@ - - + diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index b900ac8ed..2e3ce82e0 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -178,7 +178,7 @@ module Log = type Measurement = { database: string; container: string; stream: string interval: StopwatchInterval; bytes: int; count: int; ru: float } - member x.Category = StreamName.category (FSharp.UMX.UMX.tag x.stream) + member x.Category = x.stream |> StreamName.Internal.trust |> StreamName.Category.ofStreamName [] type Metric = /// Individual read request for the Tip @@ -575,12 +575,12 @@ module Initialization = let! d = createOrProvisionDatabase client dName mode return! createOrProvisionContainer d (cName, "/id", applyAuxContainerProperties) mode } // as per Cosmos team, Partition Key must be "/id" - /// Holds Container state, coordinating initialization activities - type internal ContainerInitializerGuard(container: Container, fallback: Container option, ?initContainer: Container -> CancellationToken -> Task) = + /// Per Container, we need to ensure the stored procedure has been created exactly once (per process lifetime) + type internal ContainerInitializerGuard(container: Container, ?initContainer: Container -> CancellationToken -> Task) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) - member _.Container = container - member _.Fallback = fallback - member internal _.Initialize(ct): System.Threading.Tasks.ValueTask = + member val Container = container + /// Coordinates max of one in flight call to the init logic, retrying on next request if it fails. Calls after it has succeeded noop + member _.Initialize(ct): System.Threading.Tasks.ValueTask = match initGuard with | Some g when not (g.IsValid()) -> g.Await(ct) |> ValueTask.ofTask |> ValueTask.ignore | _ -> System.Threading.Tasks.ValueTask.CompletedTask @@ -1010,7 +1010,7 @@ type TipOptions member val ReadRetryPolicy = readRetryPolicy member val WriteRetryPolicy = writeRetryPolicy -type StoreClient(container: Container, archive: Container option, query: QueryOptions, tip: TipOptions) = +type StoreClient(container: Container, fallback: Container option, query: QueryOptions, tip: TipOptions) = let loadTip log stream pos = Tip.tryLoad log tip.ReadRetryPolicy (container, stream) (pos, None) let ignoreMissing = tip.IgnoreMissingEvents @@ -1021,7 +1021,7 @@ type StoreClient(container: Container, archive: Container option, query: QueryOp let includeTip = Option.isNone tip let walk log container = Query.scan log (container, stream) includeTip query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) let walkFallback = - match archive with + match fallback with | None -> Choice1Of2 ignoreMissing | Some f -> Choice2Of2 (walk (log |> Log.prop "fallback" true) f) @@ -1065,7 +1065,7 @@ type StoreClient(container: Container, archive: Container option, query: QueryOp member _.Prune(log, stream, index, ct) = Prune.until log (container, stream) query.MaxItems index ct -type internal Category<'event, 'state, 'context> +type internal StoreCategory<'event, 'state, 'context> ( store: StoreClient, createStoredProcIfNotExistsExactlyOnce: CancellationToken -> System.Threading.Tasks.ValueTask, codec: IEventCodec<'event, EventBody, 'context>, fold: 'state -> 'event[] -> 'state, initial: 'state, isOrigin: 'event -> bool, checkUnfolds, compressUnfolds, mapUnfolds: Choice 'state -> 'event[], 'event[] -> 'state -> 'event[] * 'event[]>) = @@ -1217,77 +1217,50 @@ type CosmosStoreConnector member _.CreateUninitialized() = factory.CreateUninitialized(discovery) /// Creates and validates a Client [including loading metadata](https://devblogs.microsoft.com/cosmosdb/improve-net-sdk-initialization) for the specified containers - member _.CreateAndInitialize(containers) = factory.CreateAndInitialize(discovery, containers) + member _.CreateAndInitialize(containers): Async = factory.CreateAndInitialize(discovery, containers) /// Holds all relevant state for a Store within a given CosmosDB Database /// - The CosmosDB CosmosClient (there should be a single one of these per process, plus an optional fallback one for pruning scenarios) /// - The (singleton) per Container Stored Procedure initialization state type CosmosStoreClient - ( // Facilitates custom mapping of Stream Category Name to underlying Cosmos Database/Container names - categoryAndStreamNameToDatabaseContainerStream: string * string -> string * string * string, - createContainer: string * string -> Container, - createFallbackContainer: string * string -> Container option, - [] ?primaryDatabaseAndContainerToArchive: string * string -> string * string, - // Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. - [] ?createGateway, - // Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time - [] ?disableInitialization) = - let createGateway = match createGateway with Some creator -> creator | None -> id - let primaryDatabaseAndContainerToArchive = defaultArg primaryDatabaseAndContainerToArchive id - // Index of database*container -> Initialization Context - let containerInitGuards = System.Collections.Concurrent.ConcurrentDictionary() - new(client, databaseId: string, containerId: string, - // Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time - [] ?disableInitialization, - // Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. - [] ?createGateway: Container -> Container, + ( client: CosmosClient, // Client to use for fallback Containers. Default: use client [] ?archiveClient: CosmosClient, - // Database Name to use for locating missing events. Default: use databaseId - [] ?archiveDatabaseId, - // Container Name to use for locating missing events. Default: use containerId - [] ?archiveContainerId) = - let genStreamName (categoryName, streamId) = if categoryName = null then streamId else StreamName.render categoryName streamId - let catAndStreamToDatabaseContainerStream (categoryName, streamId) = databaseId, containerId, genStreamName (categoryName, streamId) - let primaryContainer (d, c) = (client: CosmosClient).GetDatabase(d).GetContainer(c) - let fallbackContainer = - if Option.isNone archiveClient && Option.isNone archiveDatabaseId && Option.isNone archiveContainerId then fun (_, _) -> None - else fun (d, c) -> Some ((defaultArg archiveClient client).GetDatabase(defaultArg archiveDatabaseId d).GetContainer(defaultArg archiveContainerId c)) - CosmosStoreClient(catAndStreamToDatabaseContainerStream, primaryContainer, fallbackContainer, - ?disableInitialization = disableInitialization, ?createGateway = createGateway) - member internal _.ResolveContainerGuardAndStreamName(categoryName, streamId): struct (Initialization.ContainerInitializerGuard * string) = - let createContainerInitializerGuard (d, c) = - let init = - if Some true = disableInitialization then None - else Some (Initialization.createSyncStoredProcIfNotExists None) - let archiveD, archiveC = primaryDatabaseAndContainerToArchive (d, c) - let primaryContainer, fallbackContainer = createContainer (d, c), createFallbackContainer (archiveD, archiveC) - Initialization.ContainerInitializerGuard(createGateway primaryContainer, Option.map createGateway fallbackContainer, ?initContainer = init) - let databaseId, containerId, streamName = categoryAndStreamNameToDatabaseContainerStream (categoryName, streamId) - let g = containerInitGuards.GetOrAdd((databaseId, containerId), createContainerInitializerGuard) - struct (g, streamName) - - /// Connect to an Equinox.CosmosStore in the specified Container + // Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying Cosmos Container. + [] ?customize: Container -> Container, + // Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time + [] ?disableInitialization) = + + let containerInitGuards = System.Collections.Concurrent.ConcurrentDictionary() + let customize = defaultArg customize id + let fallbackClient = defaultArg archiveClient client + let createContainer (client: CosmosClient) struct (d, c) = + client.GetDatabase(d).GetContainer(c) |> customize + + member val CosmosClient = client + member internal x.GetOrAddPrimaryContainer(databaseId, containerId): Initialization.ContainerInitializerGuard = + let createContainerInitializerGuard databaseIdAndContainerId = + let init = match disableInitialization with Some true -> None | _ -> Some (Initialization.createSyncStoredProcIfNotExists None) + Initialization.ContainerInitializerGuard(createContainer client databaseIdAndContainerId, ?initContainer = init) + containerInitGuards.GetOrAdd(struct (databaseId, containerId), createContainerInitializerGuard) + member internal _.CreateFallbackContainer(databaseId, containerId) = + createContainer fallbackClient (databaseId, containerId) + + /// Connect to an Equinox.CosmosStore in the specified databaseId/containerId, including warmup to establish the metadata etc. /// NOTE: The returned CosmosStoreClient instance should be held as a long-lived singleton within the application. /// /// let createStoreClient (connectionString, database, container) = /// let connector = CosmosStoreConnector(Discovery.ConnectionString connectionString, System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5.) /// CosmosStoreClient.Connect(connector.CreateAndInitialize, database, container) /// - static member Connect(connectContainers, databaseId: string, containerId: string): Async = async { - let! client = connectContainers [| struct (databaseId, containerId) |] - return CosmosStoreClient(client, databaseId, containerId) } - - /// Connect to a hot-warm CosmosStore pair within the same account - /// Events that have been archived and purged (and hence are determined to be missing from the primary) are retrieved from the archive via a fallback request where necessary. - /// NOTE: The returned CosmosStoreClient instance should be held as a long-lived singleton within the application. - static member Connect(connectContainers, databaseId: string, containerId: string, archiveContainerId): Async = async { - let! client = connectContainers [| struct (databaseId, containerId); struct (databaseId, archiveContainerId) |] - return CosmosStoreClient(client, databaseId, containerId, archiveContainerId = archiveContainerId) } - -/// Defines a set of related access policies for a given CosmosDB, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) -type CosmosStoreContext(storeClient: CosmosStoreClient, tipOptions, queryOptions) = - new(storeClient: CosmosStoreClient, + static member Connect(createClientWithContainersInitialized, databaseId: string, [] containerIds: string[]): Async = async { + let! client = createClientWithContainersInitialized [| for c in containerIds -> struct (databaseId, c) |] + return CosmosStoreClient(client) } + +/// Defines the policies for accessing a given Container (And optional fallback Container for retrieval of archived data). +type CosmosStoreContext(client: CosmosStoreClient, databaseId, containerId, tipOptions, queryOptions, ?archive) = + let containerGuard = client.GetOrAddPrimaryContainer(databaseId, containerId) + new(client: CosmosStoreClient, databaseId, containerId, // Maximum number of events permitted in Tip. When this is exceeded, events are moved out to a standalone Batch. // NOTE Equinox.Cosmos versions <= 3.0.0 cannot read events in Tip, hence using a non-zero value will not be interoperable. tipMaxEvents, @@ -1298,17 +1271,26 @@ type CosmosStoreContext(storeClient: CosmosStoreClient, tipOptions, queryOptions // Max number of Batches to return per paged query response. Default: 10. [] ?queryMaxItems, // Maximum number of trips to permit when slicing the work into multiple responses limited by `queryMaxItems`. Default: unlimited. - [] ?queryMaxRequests) = + [] ?queryMaxRequests, + // Database Name to use for locating missing events. Default: use databaseId, if archiveContainerId specified. + [] ?archiveDatabaseId, + // Container Name to use for locating missing events. Default: use containerId, if archiveDatabaseId specified. + [] ?archiveContainerId) = let tipOptions = TipOptions(maxEvents = tipMaxEvents, ?maxJsonLength = tipMaxJsonLength, ?ignoreMissingEvents = ignoreMissingEvents) let queryOptions = QueryOptions(?maxItems = queryMaxItems, ?maxRequests = queryMaxRequests) - CosmosStoreContext(storeClient, tipOptions, queryOptions) - member val StoreClient = storeClient + let archive = + match archiveDatabaseId, archiveContainerId with + | None, None -> None + | None, Some c -> Some (databaseId, c) + | Some d, c -> Some (d, defaultArg c containerId) + CosmosStoreContext(client, databaseId, containerId, tipOptions, queryOptions, ?archive = archive) member val QueryOptions = queryOptions member val TipOptions = tipOptions - member internal x.ResolveStoreClientAndStreamNameAndInit(categoryName, streamId) = - let struct (cg, streamName) = storeClient.ResolveContainerGuardAndStreamName(categoryName, streamId) - let store = StoreClient(cg.Container, cg.Fallback, x.QueryOptions, x.TipOptions) - struct (store, streamName, cg.Initialize) + member val internal StoreClient = + let fallback = archive |> Option.map client.CreateFallbackContainer + StoreClient(containerGuard.Container, fallback, queryOptions, tipOptions) + // Writes go through the stored proc, which we need to provision per container + member internal _.EnsureStoredProcedureInitialized ct = containerGuard.Initialize ct [] type AccessStrategy<'event, 'state> = @@ -1342,8 +1324,8 @@ type AccessStrategy<'event, 'state> = /// | Custom of isOrigin: ('event -> bool) * transmute: ('event[] -> 'state -> 'event[] * 'event[]) -type CosmosStoreCategory<'event, 'state, 'context> internal (name, resolveStream) = - inherit Equinox.Category<'event, 'state, 'context>(name, resolveStream = resolveStream) +type CosmosStoreCategory<'event, 'state, 'context> = + inherit Equinox.Category<'event, 'state, 'context> new(context: CosmosStoreContext, name, codec, fold, initial, access, // For CosmosDB, caching is typically a central aspect of managing RU consumption to maintain performance and capacity. // The cache holds the Tip document's etag, which enables use of etag-contingent Reads (which cost only 1RU in the case where the document is unchanged) @@ -1367,16 +1349,9 @@ type CosmosStoreCategory<'event, 'state, 'context> internal (name, resolveStream | AccessStrategy.MultiSnapshot (isOrigin, unfold) -> isOrigin, true, Choice2Of3 (fun _ state -> unfold state) | AccessStrategy.RollingState toSnapshot -> (fun _ -> true), true, Choice3Of3 (fun _ state -> Array.empty, toSnapshot state |> Array.singleton) | AccessStrategy.Custom (isOrigin, transmute) -> isOrigin, true, Choice3Of3 transmute - let categories = System.Collections.Concurrent.ConcurrentDictionary>() - let resolveInner struct (container, categoryName, init) = - let createCategory _name: ICategory<_, _, 'context> = - Category<'event, 'state, 'context>(container, init, codec, fold, initial, isOrigin, checkUnfolds, compressUnfolds, mapUnfolds) - |> Caching.apply Token.isStale caching - categories.GetOrAdd(categoryName, createCategory) - let resolveStream streamId = - let struct (_, streamName, _) as args = context.ResolveStoreClientAndStreamNameAndInit(name, streamId) - struct (resolveInner args, streamName) - CosmosStoreCategory(name, resolveStream) + { inherit Equinox.Category<'event, 'state, 'context>(name, + StoreCategory<'event, 'state, 'context>(context.StoreClient, context.EnsureStoredProcedureInitialized, codec, fold, initial, isOrigin, checkUnfolds, compressUnfolds, mapUnfolds) + |> Caching.apply Token.isStale caching) } module Exceptions = @@ -1403,10 +1378,11 @@ type AppendResult<'t> = | ConflictUnknown of index: 't /// Encapsulates the core facilities Equinox.CosmosStore offers for operating directly on Events in Streams. -type EventsContext internal - ( context: Equinox.CosmosStore.CosmosStoreContext, store: StoreClient, +type EventsContext + ( context: Equinox.CosmosStore.CosmosStoreContext, // Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log: Serilog.ILogger) = + let resolve streamName = context.StoreClient, StreamName.toString streamName do if log = null then nullArg "log" let maxCountPredicate count = let acc = ref (max (count-1) 0) @@ -1425,21 +1401,13 @@ type EventsContext internal | Direction.Forward -> startPos, None | Direction.Backward -> None, startPos - new (context: Equinox.CosmosStore.CosmosStoreContext, log) = - let struct (store, _streamId, _init) = context.ResolveStoreClientAndStreamNameAndInit(null, null) - EventsContext(context, store, log) - - member _.ResolveStream(streamName) = - let struct (_cc, streamName, init) = context.ResolveStoreClientAndStreamNameAndInit(null, streamName) - struct (streamName, init) - member x.StreamId(streamName): string = x.ResolveStream streamName |> ValueTuple.fst - - member internal _.GetLazy(stream, ?ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex): IAsyncEnumerable[]> = + member internal _.GetLazy(streamName, ?ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex): IAsyncEnumerable[]> = let direction = defaultArg direction Direction.Forward let batching = match queryMaxItems with Some qmi -> QueryOptions(qmi) | _ -> context.QueryOptions + let store, stream = resolve streamName store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ?ct = ct, ?minIndex = minIndex, ?maxIndex = maxIndex) - member internal _.GetInternal((stream, startPos), ?ct, ?maxCount, ?direction) = task { + member internal _.GetInternal((streamName, startPos), ?ct, ?maxCount, ?direction) = task { let direction = defaultArg direction Direction.Forward if maxCount = Some 0 then // Search semantics include the first hit so we need to special case this anyway @@ -1450,32 +1418,32 @@ type EventsContext internal | Some limit -> maxCountPredicate limit | None -> fun _ -> false let minIndex, maxIndex = getRange direction startPos + let store, stream = resolve streamName let! token, events = store.Read(log, stream, direction, (ValueSome, isOrigin), ?ct = ct, ?minIndex = minIndex, ?maxIndex = maxIndex) if direction = Direction.Backward then System.Array.Reverse events return token, events } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency validation in the case of an unchanged Tip) - member _.Sync(stream, ct, [] ?position: Position): Task = task { + member _.Sync(streamName, ct, [] ?position: Position): Task = task { + let store, stream = resolve streamName let! Token.Unpack pos' = store.GetPosition(log, stream, ct, ?pos = position) return pos' } /// Query (with MaxItems set to `queryMaxItems`) from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! - member x.Walk(stream, queryMaxItems, [] ?ct, [] ?minIndex, [] ?maxIndex, [] ?direction): IAsyncEnumerable[]> = - x.GetLazy(stream, ?ct = ct, queryMaxItems = queryMaxItems, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) + member x.Walk(streamName, queryMaxItems, [] ?ct, [] ?minIndex, [] ?maxIndex, [] ?direction): IAsyncEnumerable[]> = + x.GetLazy(streamName, ?ct = ct, queryMaxItems = queryMaxItems, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) /// Reads all Events from a `Position` in a given `direction` - member x.Read(stream, [] ?ct, [] ?position, [] ?maxCount, [] ?direction): Task[]> = - x.GetInternal((stream, position), ?ct = ct, ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData + member x.Read(streamName, [] ?ct, [] ?position, [] ?maxCount, [] ?direction): Task[]> = + x.GetInternal((streamName, position), ?ct = ct, ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData /// Appends the supplied batch of events, subject to a consistency check based on the `position` /// Callers should implement appropriate idempotent handling, or use Equinox.Decider for that purpose - member x.Sync(stream, position, events: IEventData<_>[], ct): Task> = task { - // Writes go through the stored proc, which we need to provision per container - // The way this is routed is definitely hacky, but the entire existence of this API is pretty questionable, so ugliness is apppropiate - let struct (_, createStoredProcIfNotExistsExactlyOnce) = x.ResolveStream(stream) - do! createStoredProcIfNotExistsExactlyOnce ct + member x.Sync(streamName, position, events: IEventData<_>[], ct): Task> = task { + do! context.EnsureStoredProcedureInitialized ct + let store, stream = resolve streamName let batch = Sync.mkBatch stream events Seq.empty match! store.Sync(log, stream, SyncExp.Version position.index, batch, ct) with | InternalSyncResult.Written (Token.Unpack pos) -> return AppendResult.Ok pos @@ -1484,12 +1452,13 @@ type EventsContext internal /// Low level, non-idempotent call appending events to a stream without a concurrency control mechanism in play /// NB Should be used sparingly; Equinox.Decider enables building equivalent equivalent idempotent handling with minimal code. - member x.NonIdempotentAppend(stream, events: IEventData<_>[], ct): Task = task { - match! x.Sync(stream, Position.fromAppendAtEnd, events, ct) with + member x.NonIdempotentAppend(streamName, events: IEventData<_>[], ct): Task = task { + match! x.Sync(streamName, Position.fromAppendAtEnd, events, ct) with | AppendResult.Ok token -> return token - | x -> return x |> sprintf "Conflict despite it being disabled %A" |> invalidOp } + | x -> return invalidOp $"Conflict despite it being disabled %A{x}" } - member _.Prune(stream, index, ct): Task = + member _.Prune(streamName, index, ct): Task = + let store, stream = resolve streamName store.Prune(log, stream, index, ct) /// Provides mechanisms for building `EventData` records to be supplied to the `Events` API @@ -1524,52 +1493,52 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx: EventsContext) (streamName: string) (index: int64) (batchSize: int): Async[]>> = async { + let getAll (ctx: EventsContext) (streamName: StreamName) (index: int64) (batchSize: int): Async[]>> = async { let! ct = Async.CancellationToken - return ctx.Walk(ctx.StreamId streamName, batchSize, ct, minIndex = index) } + return ctx.Walk(streamName, batchSize, ct, minIndex = index) } /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx: EventsContext) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = - Async.call (fun ct -> ctx.Read(ctx.StreamId streamName, ct, ?position = index, maxCount = maxCount) |> dropPosition) + let get (ctx: EventsContext) (streamName: StreamName) (MinPosition index: int64) (maxCount: int): Async[]> = + Async.call (fun ct -> ctx.Read(streamName, ct, ?position = index, maxCount = maxCount) |> dropPosition) /// Appends a batch of events to a stream at the specified expected sequence number. /// If the specified expected sequence number does not match the stream, the events are not appended /// and a failure is returned. - let append (ctx: EventsContext) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = - Async.call (fun ct -> ctx.Sync(ctx.StreamId streamName, Position.fromI index, events, ct) |> stripSyncResult) + let append (ctx: EventsContext) (streamName: StreamName) (index: int64) (events: IEventData<_>[]): Async> = + Async.call (fun ct -> ctx.Sync(streamName, Position.fromI index, events, ct) |> stripSyncResult) /// Appends a batch of events to a stream at the the present Position without any conflict checks. /// NB typically, it is recommended to ensure idempotency of operations by using the `append` and related API as /// this facilitates ensuring consistency is maintained, and yields reduced latency and Request Charges impacts /// (See equivalent APIs on `Context` that yield `Position` values) - let appendAtEnd (ctx: EventsContext) (streamName: string) (events: IEventData<_>[]): Async = - Async.call (fun ct -> ctx.NonIdempotentAppend(ctx.StreamId streamName, events, ct) |> stripPosition) + let appendAtEnd (ctx: EventsContext) (streamName: StreamName) (events: IEventData<_>[]): Async = + Async.call (fun ct -> ctx.NonIdempotentAppend(streamName, events, ct) |> stripPosition) /// Requests deletion of events up and including the specified index. /// Due to the need to preserve ordering of data in the stream, only complete Batches will be removed. /// If the index is within the Tip, events are removed via an etag-checked update. Does not alter the unfolds held in the Tip, or remove the Tip itself. /// Returns count of events deleted this time, events that could not be deleted due to partial batches, and the stream's lowest remaining sequence number. - let pruneUntil (ctx: EventsContext) (streamName: string) (index: int64): Async = - Async.call (fun ct -> ctx.Prune(ctx.StreamId streamName, index, ct)) + let pruneUntil (ctx: EventsContext) (streamName: StreamName) (index: int64): Async = + Async.call (fun ct -> ctx.Prune(streamName, index, ct)) /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx: EventsContext) (streamName: string) (index: int64) (batchSize: int): Async[]>> = async { + let getAllBackwards (ctx: EventsContext) (streamName: StreamName) (index: int64) (batchSize: int): Async[]>> = async { let! ct = Async.CancellationToken - return ctx.Walk(ctx.StreamId streamName, batchSize, ct, maxIndex = index, direction = Direction.Backward) } + return ctx.Walk(streamName, batchSize, ct, maxIndex = index, direction = Direction.Backward) } /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx: EventsContext) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = - Async.call (fun ct -> ctx.Read(ctx.StreamId streamName, ct, ?position = index, maxCount = maxCount, direction = Direction.Backward) |> dropPosition) + let getBackwards (ctx: EventsContext) (streamName: StreamName) (MaxPosition index: int64) (maxCount: int): Async[]> = + Async.call (fun ct -> ctx.Read(streamName, ct, ?position = index, maxCount = maxCount, direction = Direction.Backward) |> dropPosition) /// Obtains the `index` from the current write Position - let getNextIndex (ctx: EventsContext) (streamName: string): Async = - Async.call (fun ct -> ctx.Sync(ctx.StreamId streamName, ct = ct) |> stripPosition) + let getNextIndex (ctx: EventsContext) (streamName: StreamName): Async = + Async.call (fun ct -> ctx.Sync(streamName, ct = ct) |> stripPosition) diff --git a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj index 23bbe90e7..768d45791 100644 --- a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj +++ b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj @@ -20,7 +20,6 @@ - diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index dc11bbc68..4a9c5cbba 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -213,7 +213,7 @@ module Log = type Measurement = { table: string; stream: string interval: StopwatchInterval; bytes: int; count: int; ru: float } - member x.Category = StreamName.category (FSharp.UMX.UMX.tag x.stream) + member x.Category = x.stream |> StreamName.Internal.trust |> StreamName.Category.ofStreamName let inline metric table stream t bytes count rc: Measurement = { table = table; stream = stream; interval = t; bytes = bytes; count = count; ru = rc.total } [] @@ -424,15 +424,15 @@ module private Async = let inline executeAsTask ct (computation: Async<'T>) : Task<'T> = startImmediateAsTask computation ct type internal BatchIndices = { isTip: bool; index: int64; n: int64 } -type Container(tableName, createContext: (RequestMetrics -> unit) -> TableContext) = +type StoreTable(name, createContext: (RequestMetrics -> unit) -> TableContext) = member _.Context(collector) = createContext collector - member _.TableName = tableName + member _.Name = name /// As per Equinox.CosmosStore, we assume the table to be provisioned correctly (see DynamoStoreClient.Connect(ConnectMode) re validating on startup) static member Create(client, tableName) = let createContext collector = TableContext(client, tableName, metricsCollector = collector) - Container(tableName, createContext) + StoreTable(tableName, createContext) member x.TryGetTip(stream: string, consistentRead, ct): Task = task { let rm = Metrics() @@ -563,11 +563,11 @@ module internal Sync = type private Res = | Written of etag': string | ConflictUnknown - let private transact (container: Container, stream: string) requestArgs ct: Task = task { + let private transact (table: StoreTable, stream: string) requestArgs ct: Task = task { let etag' = let g = Guid.NewGuid() in g.ToString "N" let actions = generateRequests stream requestArgs etag' let rm = Metrics() - try do! let context = container.Context(rm.Add) + try do! let context = table.Context(rm.Add) match actions with | [ TransactWrite.Put (item, Some cond) ] -> context.PutItemAsync(item, cond) |> Async.Ignore | [ TransactWrite.Update (key, Some cond, updateExpr) ] -> context.UpdateItemAsync(key, updateExpr, cond) |> Async.Ignore @@ -577,9 +577,9 @@ module internal Sync = with DynamoDbConflict -> return rm.Consumed, Res.ConflictUnknown } - let private transactLogged (container, stream) (baseBytes, baseEvents, req, unfolds, exp, b', n', ct) (log: ILogger) + let private transactLogged (table, stream) (baseBytes, baseEvents, req, unfolds, exp, b', n', ct) (log: ILogger) : Task = task { - let! t, ({ total = ru } as rc, result) = transact (container, stream) (req, unfolds, exp, b', n') |> Stopwatch.time ct + let! t, ({ total = ru } as rc, result) = transact (table, stream) (req, unfolds, exp, b', n') |> Stopwatch.time ct let calfBytes, calfCount, tipBytes, tipEvents, appended = req |> function | Req.Append (_tipWasEmpty, appends) -> 0, 0, baseBytes + Event.arrayBytes appends, baseEvents + appends.Length, appends | Req.Calve (calf, tip) -> Event.arrayBytes calf, calf.Length, baseBytes + Event.arrayBytes tip, tip.Length, tip @@ -587,7 +587,7 @@ module internal Sync = | Exp.Etag etag -> "e="+etag, log |> Log.prop "expectedEtag" etag | Exp.Version ev -> "v="+string ev, log |> Log.prop "expectedVersion" ev let outcome, log = - let reqMetric = Log.metric container.TableName stream t (calfBytes + tipBytes) (appended.Length + unfolds.Length) rc + let reqMetric = Log.metric table.Name stream t (calfBytes + tipBytes) (appended.Length + unfolds.Length) rc match result with | Res.Written etag' -> "OK", log |> Log.event ((if calfBytes = 0 then Log.Metric.SyncAppend else Log.Metric.SyncCalve) reqMetric) |> Log.prop "nextPos" n' @@ -608,7 +608,7 @@ module internal Sync = | Written of etag: string * predecessorBytes: int * events: Event[] * unfolds: Unfold[] | ConflictUnknown - let handle log (maxEvents, maxBytes, maxEventBytes) (container, stream) + let handle log (maxEvents, maxBytes, maxEventBytes) (table, stream) (pos, exp, n', events: IEventData[], unfolds: IEventData[], ct) = task { let baseIndex = int n' - events.Length let events: Event[] = events |> Array.mapi (fun i e -> @@ -625,7 +625,7 @@ module internal Sync = && (not << Array.isEmpty) cur.events then // even if a rule says we should calve, we don't want to produce empty ones Req.Calve (cur.events, events), cur.calvedBytes + Event.arrayBytes cur.events, events else Req.Append (Array.isEmpty cur.events, events), cur.calvedBytes, Array.append cur.events events - match! transactLogged (container, stream) (cur.baseBytes, cur.events.Length, req, unfolds, exp pos, predecessorBytes', n', ct) log with + match! transactLogged (table, stream) (cur.baseBytes, cur.events.Length, req, unfolds, exp pos, predecessorBytes', n', ct) log with | Res.Written etag' -> return Result.Written (etag', predecessorBytes', tipEvents', unfolds) | Res.ConflictUnknown -> return Result.ConflictUnknown } @@ -636,15 +636,15 @@ module internal Tip = | Found of 'T | NotFound | NotModified - let private get (container: Container, stream: string) consistentRead (maybePos: Position option) ct = task { - match! container.TryGetTip(stream, consistentRead, ct) with + let private get (table: StoreTable, stream: string) consistentRead (maybePos: Position option) ct = task { + match! table.TryGetTip(stream, consistentRead, ct) with | Some { etag = fe }, rc when fe = Position.toEtag maybePos -> return rc, Res.NotModified | Some t, rc -> return rc, Res.Found t | None, rc -> return rc, Res.NotFound } - let private loggedGet (get: Container * string -> bool -> Position option -> CancellationToken -> Task<_>) (container, stream) consistentRead (maybePos: Position option) (log: ILogger) ct = task { + let private loggedGet (get: StoreTable * string -> bool -> Position option -> CancellationToken -> Task<_>) (table, stream) consistentRead (maybePos: Position option) (log: ILogger) ct = task { let log = log |> Log.prop "stream" stream - let! t, ({ total = ru } as rc, res: Res<_>) = get (container, stream) consistentRead maybePos |> Stopwatch.time ct - let logMetric bytes count (f: Log.Measurement -> _) = log |> Log.event (f (Log.metric container.TableName stream t bytes count rc)) + let! t, ({ total = ru } as rc, res: Res<_>) = get (table, stream) consistentRead maybePos |> Stopwatch.time ct + let logMetric bytes count (f: Log.Measurement -> _) = log |> Log.event (f (Log.metric table.Name stream t bytes count rc)) match res with | Res.NotModified -> (logMetric 0 0 Log.Metric.TipNotModified).Information("EqxDynamo {action:l} {res} {stream:l} {ms:f1}ms {ru}RU", @@ -665,8 +665,8 @@ module internal Tip = |> Seq.sortBy (fun x -> x.Index, x.IsUnfold) |> Array.ofSeq /// `pos` being Some implies that the caller holds a cached value and hence is ready to deal with Result.NotModified - let tryLoad (log: ILogger) containerStream consistentRead (maybePos: Position option, maxIndex) ct: Task[]>> = task { - let! _rc, res = loggedGet get containerStream consistentRead maybePos log ct + let tryLoad (log: ILogger) tableStream consistentRead (maybePos: Position option, maxIndex) ct: Task[]>> = task { + let! _rc, res = loggedGet get tableStream consistentRead maybePos log ct match res with | Res.NotModified -> return Res.NotModified | Res.NotFound -> return Res.NotFound @@ -676,14 +676,14 @@ module internal Tip = module internal Query = - let private mkQuery (log: ILogger) (container: Container, stream: string) consistentRead maxItems (direction: Direction, minIndex, maxIndex) ct = + let private mkQuery (log: ILogger) (table: StoreTable, stream: string) consistentRead maxItems (direction: Direction, minIndex, maxIndex) ct = let minN, maxI = minIndex, maxIndex log.Debug("EqxDynamo Query {stream}; n>{minIndex} i<{maxIndex}", stream, Option.toNullable minIndex, Option.toNullable maxIndex) - container.QueryBatches(stream, consistentRead, minN, maxI, (direction = Direction.Backward), maxItems, ct) + table.QueryBatches(stream, consistentRead, minN, maxI, (direction = Direction.Backward), maxItems, ct) // Unrolls the Batches in a response // NOTE when reading backwards, the events are emitted in reverse Index order to suit the takeWhile consumption - let private mapPage direction (container: Container, stream: string) (minIndex, maxIndex, maxItems) (maxRequests: int option) + let private mapPage direction (table: StoreTable, stream: string) (minIndex, maxIndex, maxItems) (maxRequests: int option) (log: ILogger) (i, t, batches: Batch[], rc) : Event[] * Position option * RequestConsumption = match maxRequests with @@ -696,15 +696,15 @@ module internal Query = let usedEventsCount, usedBytes, totalBytes = events.Length, Event.arrayBytes events, Batch.bytesTotal batches let baseIndex = if usedEventsCount = 0 then Nullable () else Nullable (Seq.map Batch.baseIndex batches |> Seq.min) let minI, maxI = match events with [||] -> Nullable (), Nullable () | xs -> Nullable events[0].i, Nullable events[xs.Length - 1].i - (log|> Log.event (Log.Metric.QueryResponse (direction, Log.metric container.TableName stream t totalBytes usedEventsCount rc))) + (log|> Log.event (Log.Metric.QueryResponse (direction, Log.metric table.Name stream t totalBytes usedEventsCount rc))) .Information("EqxDynamo {action:l} {page} {minIndex}-{maxIndex} {ms:f1}ms {ru}RU {batches}/{batchSize}@{index} {count}e {bytes}/{totalBytes}b {direction:l}", "Page", i, minI, maxI, t.ElapsedMilliseconds, rc.total, batches.Length, maxItems, baseIndex, usedEventsCount, usedBytes, totalBytes, direction) let maybePosition = batches |> Array.tryPick Position.tryFromBatch events, maybePosition, rc - let private logQuery (direction, minIndex, maxIndex) (container: Container, stream) interval (responsesCount, events: Event[]) n (rc: RequestConsumption) (log: ILogger) = + let private logQuery (direction, minIndex, maxIndex) (table: StoreTable, stream) interval (responsesCount, events: Event[]) n (rc: RequestConsumption) (log: ILogger) = let count, bytes = events.Length, Event.arrayBytes events - let reqMetric = Log.metric container.TableName stream interval bytes count rc + let reqMetric = Log.metric table.Name stream interval bytes count rc let evt = Log.Metric.Query (direction, responsesCount, reqMetric) let action = match direction with Direction.Forward -> "QueryF" | Direction.Backward -> "QueryB" (log|> Log.event evt).Information( @@ -736,7 +736,7 @@ module internal Query = let f, e = xs |> Seq.tryFindBack isOrigin' |> Option.isSome, items.ToArray() { found = f; maybeTipPos = Some pos; minIndex = i; next = pos.index + 1L; events = e } - let scan<'event> (log: ILogger) (container, stream) consistentRead maxItems maxRequests direction + let scan<'event> (log: ILogger) (table, stream) consistentRead maxItems maxRequests direction (tryDecode: ITimelineEvent -> 'event voption, isOrigin: 'event -> bool) (minIndex, maxIndex, ct) : Task option> = task { @@ -768,8 +768,8 @@ module internal Query = let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction let batches ct: taskSeq = - mkQuery readLog (container, stream) consistentRead maxItems (direction, minIndex, maxIndex) ct - |> TaskSeq.map (mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests readLog) + mkQuery readLog (table, stream) consistentRead maxItems (direction, minIndex, maxIndex) ct + |> TaskSeq.map (mapPage direction (table, stream) (minIndex, maxIndex, maxItems) maxRequests readLog) let! t, (events, maybeTipPos, ru) = batches >> mergeBatches log |> Stopwatch.time ct let raws = Array.map ValueTuple.fst events let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else let xs = Array.chooseV ValueTuple.snd events in Array.Reverse xs; xs @@ -779,19 +779,19 @@ module internal Query = | Some { index = max }, _ | _, Some (_, max) -> max + 1L | None, None -> 0L - log |> logQuery (direction, minIndex, maxIndex) (container, stream) t (responseCount, raws) version ru + log |> logQuery (direction, minIndex, maxIndex) (table, stream) t (responseCount, raws) version ru match minMax, maybeTipPos with | Some (i, m), _ -> return Some ({ found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded }: ScanResult<_>) | None, Some { index = tipI } -> return Some { found = found; minIndex = tipI; next = tipI; maybeTipPos = maybeTipPos; events = [||] } | None, _ -> return None } - let walkLazy<'event> (log: ILogger) (container, stream) maxItems maxRequests + let walkLazy<'event> (log: ILogger) (table, stream) maxItems maxRequests (tryDecode: ITimelineEvent -> 'event option, isOrigin: 'event -> bool) (direction, minIndex, maxIndex) ct : taskSeq<'event[]> = taskSeq { - let query = mkQuery log (container, stream) (*consistentRead*)false maxItems (direction, minIndex, maxIndex) + let query = mkQuery log (table, stream) (*consistentRead*)false maxItems (direction, minIndex, maxIndex) - let readPage = mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests + let readPage = mapPage direction (table, stream) (minIndex, maxIndex, maxItems) maxRequests let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction let startTicks = System.Diagnostics.Stopwatch.GetTimestamp() @@ -830,7 +830,7 @@ module internal Query = finally let endTicks = System.Diagnostics.Stopwatch.GetTimestamp() let t = StopwatchInterval(startTicks, endTicks) - log |> logQuery (direction, minIndex, maxIndex) (container, stream) t (i, allEvents.ToArray()) -1L { total = ru } } + log |> logQuery (direction, minIndex, maxIndex) (table, stream) t (i, allEvents.ToArray()) -1L { total = ru } } type [] LoadRes = Pos of Position | Empty | Next of int64 let toPosition = function LoadRes.Pos p -> Some p | LoadRes.Next _ | LoadRes.Empty -> None /// Manages coalescing of spans of events obtained from various sources: @@ -892,16 +892,16 @@ module internal Query = // NOTE: module is public so BatchIndices can be deserialized into module internal Prune = - let until (log: ILogger) (container: Container, stream: string) maxItems indexInclusive ct: Task = task { + let until (log: ILogger) (table: StoreTable, stream: string) maxItems indexInclusive ct: Task = task { let log = log |> Log.prop "stream2" stream let deleteItem i count: Task = task { - let! t, rc = (fun ct -> container.DeleteItem(stream, i, ct)) |> Stopwatch.time ct - let reqMetric = Log.metric container.TableName stream t -1 count rc + let! t, rc = (fun ct -> table.DeleteItem(stream, i, ct)) |> Stopwatch.time ct + let reqMetric = Log.metric table.Name stream t -1 count rc let log = let evt = Log.Metric.Delete reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {i} {ms:f1}ms {ru}RU", "Delete", i, t.ElapsedMilliseconds, rc) return rc } let trimTip expectedN count = task { - match! container.TryGetTip(stream, (*consistentRead = *)false, ct) with + match! table.TryGetTip(stream, (*consistentRead = *)false, ct) with | None, _rc -> return failwith "unexpected NotFound" | Some tip, _rc when tip.n <> expectedN -> return failwith $"Concurrent write detected; Expected n=%d{expectedN} actual=%d{tip.n}" | Some tip, tipRc -> @@ -911,18 +911,18 @@ module internal Prune = let updEtag = let g = Guid.NewGuid() in g.ToString "N" let condExpr: Quotations.Expr bool> = <@ fun t -> t.etag = Some tip.etag @> let updateExpr: Quotations.Expr _> = <@ fun t -> { t with etag = Some updEtag; c = tC'; e = tE' } @> - let! t, (_updated, updRc) = (fun ct -> container.TryUpdateTip(stream, updateExpr, ct, condExpr)) |> Stopwatch.time ct + let! t, (_updated, updRc) = (fun ct -> table.TryUpdateTip(stream, updateExpr, ct, condExpr)) |> Stopwatch.time ct let rc = { total = tipRc.total + updRc.total } - let reqMetric = Log.metric container.TableName stream t -1 count rc + let reqMetric = Log.metric table.Name stream t -1 count rc let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {count} {ms:f1}ms {ru}RU", "Trim", count, t.ElapsedMilliseconds, rc.total) return rc } let log = log |> Log.prop "index" indexInclusive // need to sort by n to guarantee we don't ever leave an observable gap in the sequence - let query ct = container.QueryIAndNOrderByNAscending(stream, maxItems, ct) + let query ct = table.QueryIAndNOrderByNAscending(stream, maxItems, ct) let mapPage (i, t: StopwatchInterval, batches: BatchIndices[], rc) = let next = Array.tryLast batches |> Option.map (fun x -> x.n) - let reqMetric = Log.metric container.TableName stream t -1 batches.Length rc + let reqMetric = Log.metric table.Name stream t -1 batches.Length rc let log = let evt = Log.Metric.PruneResponse reqMetric in log |> Log.prop "batchIndex" i |> Log.event evt log.Information("EqxDynamo {action:l} {batches} {ms:f1}ms n={next} {ru}RU", "PruneResponse", batches.Length, t.ElapsedMilliseconds, Option.toNullable next, rc.total) @@ -972,7 +972,7 @@ module internal Prune = eventsDeleted <- eventsDeleted + eDel eventsDeferred <- eventsDeferred + eDef outcomes |> Array.iter accumulate - let reqMetric = Log.metric container.TableName stream pt eventsDeleted batches { total = queryCharges } + let reqMetric = Log.metric table.Name stream pt eventsDeleted batches { total = queryCharges } let log = let evt = Log.Metric.Prune (responses, reqMetric) in log |> Log.event evt let lwm = lwm |> Option.defaultValue 0L // If we've seen no batches at all, then the write position is 0L log.Information("EqxDynamo {action:l} {events}/{batches} lwm={lwm} {ms:f1}ms queryRu={queryRu} deleteRu={deleteRu} trimRu={trimRu}", @@ -1056,9 +1056,9 @@ type TipOptions /// Maximum serialized size of events to accumulate in Tip before a Calve operation is forced (independent of capacity consumed by unfolds). Default: 32K. member val MaxEventBytes = defaultArg maxBytes defaultTipMaxBytes -type internal StoreClient(container: Container, fallback: Container option, query: QueryOptions, tip: TipOptions) = +type internal StoreClient(table: StoreTable, fallback: StoreTable option, query: QueryOptions, tip: TipOptions) = - let loadTip log stream consistentRead pos = Tip.tryLoad log (container, stream) consistentRead (pos, None) + let loadTip log stream consistentRead pos = Tip.tryLoad log (table, stream) consistentRead (pos, None) // Always yields events forward, regardless of direction member _.Read(log, stream, consistentRead, direction, (tryDecode, isOrigin), ct, ?minIndex, ?maxIndex, ?tip): Task = task { @@ -1067,17 +1067,17 @@ type internal StoreClient(container: Container, fallback: Container option, quer | Some _ as mi -> mi | None when Option.isSome tip -> Some Batch.tipMagicI | None -> None - let walk log container = Query.scan log (container, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) + let walk log table = Query.scan log (table, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) let walkFallback = match fallback with | None -> Choice1Of2 query.IgnoreMissingEvents | Some f -> Choice2Of2 (walk (log |> Log.prop "fallback" true) f) let log = log |> Log.prop "stream" stream - let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback ct + let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log table) walkFallback ct return Token.create_ pos, events } member _.ReadLazy(log, batching: QueryOptions, stream, direction, (tryDecode, isOrigin), ct, ?minIndex, ?maxIndex): taskSeq<'event[]> = - Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) ct + Query.walkLazy log (table, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) ct member store.Load(log, (stream, maybePos), consistentRead, (tryDecode, isOrigin), checkUnfolds: bool, ct): Task = if not checkUnfolds then store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct) @@ -1104,15 +1104,15 @@ type internal StoreClient(container: Container, fallback: Container option, quer | Tip.Res.Found (pos, i, xs) -> return! read (pos, i, xs) } member _.Sync(log, stream, pos, exp, n': int64, eventsEncoded, unfoldsEncoded, ct): Task = task { - match! Sync.handle log (tip.MaxEvents, tip.MaxBytes, tip.MaxEventBytes) (container, stream) (pos, exp, n', eventsEncoded, unfoldsEncoded, ct) with + match! Sync.handle log (tip.MaxEvents, tip.MaxBytes, tip.MaxEventBytes) (table, stream) (pos, exp, n', eventsEncoded, unfoldsEncoded, ct) with | Sync.Result.Written (etag', b', events, unfolds) -> return InternalSyncResult.Written (Token.create (Position.fromElements (stream, b', n', events, unfolds, etag'))) | Sync.Result.ConflictUnknown -> return InternalSyncResult.ConflictUnknown } member _.Prune(log, stream, index, ct) = - Prune.until log (container, stream) query.MaxItems index ct + Prune.until log (table, stream) query.MaxItems index ct -type internal Category<'event, 'state, 'context> +type internal StoreCategory<'event, 'state, 'context> ( store: StoreClient, codec: IEventCodec<'event, EncodedBody, 'context>, fold: 'state -> 'event[] -> 'state, initial: 'state, isOrigin: 'event -> bool, checkUnfolds, mapUnfolds: Choice 'state -> 'event[], 'event[] -> 'state -> 'event[] * 'event[]>) = @@ -1155,7 +1155,7 @@ type DynamoStoreConnector(clientConfig: Amazon.DynamoDBv2.AmazonDynamoDBConfig, /// Connect explicitly with a triplet of serviceUrl, accessKey, secretKey. No fallback behaviors are applied. /// timeout: Required; AWS SDK Default: 100s /// maxRetries: Required; AWS SDK Default: 10 - new (serviceUrl, accessKey, secretKey, timeout: TimeSpan, retries) = + new(serviceUrl, accessKey, secretKey, timeout: TimeSpan, retries) = let m = Amazon.Runtime.RequestRetryMode.Standard let clientConfig = Amazon.DynamoDBv2.AmazonDynamoDBConfig(ServiceURL = serviceUrl, RetryMode = m, MaxErrorRetry = retries, Timeout = timeout) DynamoStoreConnector(clientConfig, Amazon.Runtime.BasicAWSCredentials(accessKey, secretKey)) @@ -1164,13 +1164,13 @@ type DynamoStoreConnector(clientConfig: Amazon.DynamoDBv2.AmazonDynamoDBConfig, /// systemName: Amazon SystemName, e.g. "us-west-1" /// timeout: Required; AWS SDK Default: 100s /// maxRetries: Required; AWS SDK Default: 10 - new (systemName, timeout: TimeSpan, retries) = + new(systemName, timeout: TimeSpan, retries) = let regionEndpoint = Amazon.RegionEndpoint.GetBySystemName(systemName) let m = Amazon.Runtime.RequestRetryMode.Standard let clientConfig = Amazon.DynamoDBv2.AmazonDynamoDBConfig(RegionEndpoint = regionEndpoint, RetryMode = m, MaxErrorRetry = retries, Timeout = timeout) DynamoStoreConnector(clientConfig) - member _.Options = clientConfig + member val Options = clientConfig member x.Retries = x.Options.MaxErrorRetry member x.Timeout = let t = x.Options.Timeout in t.Value member x.Endpoint = @@ -1178,7 +1178,7 @@ type DynamoStoreConnector(clientConfig: Amazon.DynamoDBv2.AmazonDynamoDBConfig, | null -> ConnectionMode.AwsEnvironment x.Options.RegionEndpoint.SystemName | x -> ConnectionMode.AwsKeyCredentials x - member _.CreateClient() = + member _.CreateDynamoDbClient() = match credentials with | None -> new Amazon.DynamoDBv2.AmazonDynamoDBClient(clientConfig) // this uses credentials=FallbackCredentialsFactory.GetCredentials() | Some credentials -> new Amazon.DynamoDBv2.AmazonDynamoDBClient(credentials, clientConfig) @@ -1201,46 +1201,17 @@ module internal ConnectMode = | Verify -> Initialization.verify client tableName | CreateIfNotExists throughput -> Initialization.createIfNotExists client tableName (throughput, Initialization.StreamingMode.New) -/// Holds all relevant state for a Store. There should be a single one of these per process. -type DynamoStoreClient - ( tableName, - // Facilitates custom mapping of Stream Category Name to underlying Table and Stream names - categoryAndStreamIdToTableAndStreamNames: string * string -> string * string, - createContainer: string -> Container, - createFallbackContainer: string -> Container option, - [] ?archiveTableName: string, - [] ?primaryTableToArchive: string -> string) = - let primaryTableToSecondary = defaultArg primaryTableToArchive id - member val TableName = tableName - member val ArchiveTableName = archiveTableName - new( client: Amazon.DynamoDBv2.IAmazonDynamoDB, tableName: string, - // Table name to use for archive store. Default: (if archiveClient specified) use same tableName but via archiveClient. - [] ?archiveTableName, - // Client to use for archive store. Default: (if archiveTableName specified) use same archiveTableName but via client. - // Events that have been archived and purged (and hence are missing from the primary) are retrieved from this Table - [] ?archiveClient: Amazon.DynamoDBv2.IAmazonDynamoDB) = - let genStreamName (categoryName, streamId) = if categoryName = null then streamId else StreamName.render categoryName streamId - let catAndStreamToTableStream (categoryName, streamId) = tableName, genStreamName (categoryName, streamId) - let primaryContainer t = Container.Create(client, t) - let fallbackContainer = - if Option.isNone archiveClient && Option.isNone archiveTableName then fun _ -> None - else fun primaryContainerName -> Some (Container.Create(defaultArg archiveClient client, defaultArg archiveTableName primaryContainerName)) - DynamoStoreClient(tableName, catAndStreamToTableStream, primaryContainer, fallbackContainer, ?archiveTableName = archiveTableName) - member internal _.ResolveContainerFallbackAndStreamName(categoryName, streamId): Container * Container option * string = - let tableName, streamName = categoryAndStreamIdToTableAndStreamNames (categoryName, streamId) - let fallbackTableName = primaryTableToSecondary tableName - createContainer tableName, createFallbackContainer fallbackTableName, streamName - - /// Verifies or Creates the underlying Tables comprising the Store before creating a `DynamoStoreClient` - static member Establish(client, tableName: string, [] ?archiveTableName, [] ?mode: ConnectMode): Async = async { - let init t = ConnectMode.apply client t (defaultArg mode ConnectMode.Verify) - do! init tableName - match archiveTableName with None -> () | Some archiveTable-> do! init archiveTable - return DynamoStoreClient(client, tableName, ?archiveTableName = archiveTableName) } - -/// Defines a set of related access policies for a given Table, together with a Containers map defining mappings from (category, streamId) to (tableName, streamName) -type DynamoStoreContext(storeClient: DynamoStoreClient, tipOptions, queryOptions) = - new(storeClient: DynamoStoreClient, +/// Holds the DynamoDB Client(s). There should not need to be more than a single instance per process +type DynamoStoreClient(client: Amazon.DynamoDBv2.IAmazonDynamoDB, + // Client to use for fallback tables. + // Events that have been archived and purged (and hence are missing from the primary) are retrieved from this Table + [] ?fallbackClient: Amazon.DynamoDBv2.IAmazonDynamoDB) = + member val internal Primary = client + member val internal Fallback = defaultArg fallbackClient client + +/// Defines the policies for accessing a given Table (And optional fallback Table for retrieval of archived data). +type DynamoStoreContext(client: DynamoStoreClient, tableName, tipOptions, queryOptions, ?archiveTableName) = + new(client: DynamoStoreClient, tableName, // Maximum serialized event size to permit to accumulate in Tip before they get moved out to a standalone Batch. Default: 32K. [] ?maxBytes, // Maximum number of events permitted in Tip. When this is exceeded, events are moved out to a standalone Batch. Default: limited by maxBytes @@ -1250,16 +1221,26 @@ type DynamoStoreContext(storeClient: DynamoStoreClient, tipOptions, queryOptions // Maximum number of trips to permit when slicing the work into multiple responses limited by `queryMaxItems`. Default: unlimited. [] ?queryMaxRequests, // Inhibit throwing when events are missing, but no Archive Table has been supplied as a fallback - [] ?ignoreMissingEvents) = + [] ?ignoreMissingEvents, + [] ?archiveTableName) = let tipOptions = TipOptions(?maxBytes = maxBytes, ?maxEvents = tipMaxEvents) let queryOptions = QueryOptions(?maxItems = queryMaxItems, ?maxRequests = queryMaxRequests, ?ignoreMissingEvents = ignoreMissingEvents) - DynamoStoreContext(storeClient, tipOptions, queryOptions) - member val StoreClient = storeClient - member val QueryOptions = queryOptions + DynamoStoreContext(client, tableName, tipOptions, queryOptions, ?archiveTableName = archiveTableName) + member val TableName = tableName member val TipOptions = tipOptions - member internal x.ResolveContainerClientAndStreamName(categoryName, streamId) = - let container, fallback, streamName = storeClient.ResolveContainerFallbackAndStreamName(categoryName, streamId) - struct (StoreClient(container, fallback, x.QueryOptions, x.TipOptions), streamName) + member val QueryOptions = queryOptions + member val ArchiveTableName = archiveTableName + member val internal StoreClient = + let primary = StoreTable.Create(client.Primary, tableName) + let fallback = archiveTableName |> Option.map(fun t -> StoreTable.Create(client.Fallback, t)) + StoreClient(primary, fallback, queryOptions, tipOptions) + + /// Verifies or Creates the underlying Tables comprising the Store before creating a `DynamoStoreContext` + static member Establish(client: DynamoStoreClient, tableName: string, [] ?archiveTableName, [] ?mode: ConnectMode): Async = async { + let init ddb t = ConnectMode.apply ddb t (defaultArg mode ConnectMode.Verify) + do! init client.Primary tableName + match archiveTableName with None -> () | Some archiveTable-> do! init client.Fallback archiveTable + return DynamoStoreContext(client, tableName, ?archiveTableName = archiveTableName) } [] type AccessStrategy<'event, 'state> = @@ -1293,8 +1274,8 @@ type AccessStrategy<'event, 'state> = /// | Custom of isOrigin: ('event -> bool) * transmute: ('event[] -> 'state -> 'event[] * 'event[]) -type DynamoStoreCategory<'event, 'state, 'context>(name, resolveStream) = - inherit Equinox.Category<'event, 'state, 'context>(name, resolveStream = resolveStream) +type DynamoStoreCategory<'event, 'state, 'context> = + inherit Equinox.Category<'event, 'state, 'context> new(context: DynamoStoreContext, name, codec, fold, initial, access, // For DynamoDB, caching is typically a central aspect of managing RU consumption to maintain performance and capacity. // Omitting can make sense in specific cases; if streams are short, or there's always a usable snapshot in the Tip @@ -1313,16 +1294,9 @@ type DynamoStoreCategory<'event, 'state, 'context>(name, resolveStream) = | AccessStrategy.MultiSnapshot (isOrigin, unfold) -> isOrigin, true, Choice2Of3 (fun _ (state: 'state) -> unfold state) | AccessStrategy.RollingState toSnapshot -> (fun _ -> true), true, Choice3Of3 (fun _ state -> Array.empty, toSnapshot state |> Array.singleton) | AccessStrategy.Custom (isOrigin, transmute) -> isOrigin, true, Choice3Of3 transmute - let categories = System.Collections.Concurrent.ConcurrentDictionary>() - let resolveInner (categoryName, container) = - let createCategory _name: ICategory<_, _, 'context> = - Category<'event, 'state, 'context>(container, codec, fold, initial, isOrigin, checkUnfolds, mapUnfolds) - |> Caching.apply Token.isStale caching - categories.GetOrAdd(categoryName, createCategory) - let resolveStream streamId = - let struct (container, streamName) = context.ResolveContainerClientAndStreamName(name, streamId) - struct (resolveInner (name, container), streamName) - DynamoStoreCategory(name, resolveStream) + { inherit Equinox.Category<'event, 'state, 'context>(name, + StoreCategory<'event, 'state, 'context>(context.StoreClient, codec, fold, initial, isOrigin, checkUnfolds, mapUnfolds) + |> Caching.apply Token.isStale caching) } module Exceptions = @@ -1342,11 +1316,12 @@ type AppendResult<'t> = | ConflictUnknown /// Encapsulates the core facilities Equinox.DynamoStore offers for operating directly on Events in Streams. -type EventsContext internal - ( context: Equinox.DynamoStore.DynamoStoreContext, store: StoreClient, +type EventsContext + ( context: Equinox.DynamoStore.DynamoStoreContext, // Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log: Serilog.ILogger) = do if log = null then nullArg "log" + let resolve streamName = context.StoreClient, StreamName.toString streamName let maxCountPredicate count = let acc = ref (max (count-1) 0) fun _ -> @@ -1354,15 +1329,10 @@ type EventsContext internal acc.Value <- acc.Value - 1 false - new (context: Equinox.DynamoStore.DynamoStoreContext, log) = - let storeClient = context.ResolveContainerClientAndStreamName(null, null) |> ValueTuple.fst - EventsContext(context, storeClient, log) - - member _.StreamName(rawStreamName) = context.ResolveContainerClientAndStreamName(null, rawStreamName) |> ValueTuple.snd |> StreamName.parse - - member internal _.GetLazy(stream, ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex): taskSeq[]> = + member internal _.GetLazy(streamName, ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex): taskSeq[]> = let direction = defaultArg direction Direction.Forward let batching = match queryMaxItems with Some qmi -> QueryOptions(qmi) | _ -> context.QueryOptions + let store, stream = resolve streamName store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) member internal _.GetInternal(streamName, ct, ?minIndex, ?maxIndex, ?maxCount, ?direction) = task { @@ -1376,21 +1346,23 @@ type EventsContext internal match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - let! token, events = store.Read(log, StreamName.toString streamName, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) + let store, stream = resolve streamName + let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) if direction = Direction.Backward then System.Array.Reverse events return token, events } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of efficient validation of an unchanged state) member _.Sync(streamName, ct, [] ?position: Position): Task = task { - let! Token.Unpack pos' = store.GetPosition(log, StreamName.toString streamName, ct, ?pos = position) + let store, stream = resolve streamName + let! Token.Unpack pos' = store.GetPosition(log, stream, ct, ?pos = position) return Position.flatten pos' } /// Query (with MaxItems set to `queryMaxItems`) from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! member x.Walk(streamName, queryMaxItems, ct, [] ?minIndex, [] ?maxIndex, [] ?direction) : taskSeq[]> = - x.GetLazy(StreamName.toString streamName, queryMaxItems, ct, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) + x.GetLazy(streamName, queryMaxItems, ct, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) /// Reads all Events from a `Position` in a given `direction` member x.Read(streamName, ct, [] ?minIndex, [] ?maxIndex, [] ?maxCount, [] ?direction) @@ -1401,14 +1373,16 @@ type EventsContext internal /// Appends the supplied batch of events, subject to a consistency check based on the `position` /// Callers should implement appropriate idempotent handling, or use Equinox.Decider for that purpose - member x.Sync(stream, position, events: IEventData<_>[]): Async> = async { + member x.Sync(streamName, position, events: IEventData<_>[]): Async> = async { + let store, stream = resolve streamName match! store.Sync(log, stream, Some position, Position.toIndex >> Sync.Exp.Version, position.index, events, Seq.empty) with | InternalSyncResult.Written (Token.Unpack pos) -> return AppendResult.Ok (Position.flatten pos) | InternalSyncResult.ConflictUnknown -> return AppendResult.ConflictUnknown } #endif member _.Prune(streamName, index, ct): Task = - store.Prune(log, StreamName.toString streamName, index, ct) + let store, stream = resolve streamName + store.Prune(log, stream, index, ct) /// Provides mechanisms for building `EventData` records to be supplied to the `Events` API type EventData() = @@ -1432,46 +1406,46 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx: EventsContext) (streamName: string) (index: int64) (batchSize: int) ct: taskSeq[]> = - ctx.Walk(ctx.StreamName streamName, ct, batchSize, minIndex = index) + let getAll (ctx: EventsContext) (streamName: StreamName) (index: int64) (batchSize: int) ct: taskSeq[]> = + ctx.Walk(streamName, ct, batchSize, minIndex = index) /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx: EventsContext) (streamName: string) (index: int64) (maxCount: int): Async[]> = Async.call <| fun ct -> - ctx.Read(ctx.StreamName streamName, ct, ?minIndex = (if index = 0 then None else Some index), maxCount = maxCount) + let get (ctx: EventsContext) (streamName: StreamName) (index: int64) (maxCount: int): Async[]> = Async.call <| fun ct -> + ctx.Read(streamName, ct, ?minIndex = (if index = 0 then None else Some index), maxCount = maxCount) #if APPEND_SUPPORT /// Appends a batch of events to a stream at the specified expected sequence number. /// If the specified expected sequence number does not match the stream, the events are not appended /// and a failure is returned. - let append (ctx: EventsContext) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = - ctx.Sync(ctx.StreamId streamName, Sync.Exp.Version index, events) |> stripSyncResult + let append (ctx: EventsContext) (streamName: StreamName) (index: int64) (events: IEventData<_>[]): Async> = + ctx.Sync(streamName, Sync.Exp.Version index, events) |> stripSyncResult #endif /// Requests deletion of events up and including the specified index. /// Due to the need to preserve ordering of data in the stream, only complete Batches will be removed. /// If the index is within the Tip, events are removed via an etag-checked update. Does not alter the unfolds held in the Tip, or remove the Tip itself. /// Returns count of events deleted this time, events that could not be deleted due to partial batches, and the stream's lowest remaining sequence number. - let pruneUntil (ctx: EventsContext) (streamName: string) (index: int64): Async = Async.call <| fun ct -> - ctx.Prune(ctx.StreamName streamName, index, ct) + let pruneUntil (ctx: EventsContext) (streamName: StreamName) (index: int64): Async = Async.call <| fun ct -> + ctx.Prune(streamName, index, ct) /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx: EventsContext) (streamName: string) (index: int64) (batchSize: int) ct: taskSeq[]> = - ctx.Walk(ctx.StreamName streamName, ct, batchSize, maxIndex = index, direction = Direction.Backward) + let getAllBackwards (ctx: EventsContext) (streamName: StreamName) (index: int64) (batchSize: int) ct: taskSeq[]> = + ctx.Walk(streamName, ct, batchSize, maxIndex = index, direction = Direction.Backward) /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx: EventsContext) (streamName: string) (index: int64) (maxCount: int): Async[]> = Async.call <| fun ct -> - ctx.Read(ctx.StreamName streamName, ct, ?maxIndex = (match index with int64.MaxValue -> None | i -> Some (i + 1L)), maxCount = maxCount, direction = Direction.Backward) + let getBackwards (ctx: EventsContext) (streamName: StreamName) (index: int64) (maxCount: int): Async[]> = Async.call <| fun ct -> + ctx.Read(streamName, ct, ?maxIndex = (match index with int64.MaxValue -> None | i -> Some (i + 1L)), maxCount = maxCount, direction = Direction.Backward) /// Obtains the `index` from the current write Position - let getNextIndex (ctx: EventsContext) (streamName: string): Async = - Async.call (fun ct -> ctx.Sync(ctx.StreamName streamName, ct)) + let getNextIndex (ctx: EventsContext) (streamName: StreamName): Async = + Async.call (fun ct -> ctx.Sync(streamName, ct)) |> stripPosition diff --git a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj index 817423b92..653427bad 100644 --- a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj +++ b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj @@ -15,7 +15,6 @@ - diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index 0dfd77e38..2c7dc89af 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -20,7 +20,6 @@ - diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index 9f426b771..9ac963ede 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -350,7 +350,7 @@ type EventStoreConnection(readConnection, [] ?writeConnection, [, []?batchCountLimit) = - new (batchSize) = BatchOptions(fun () -> batchSize) + new(batchSize) = BatchOptions(fun () -> batchSize) member _.BatchSize = getBatchSize.Invoke() member _.MaxBatches = batchCountLimit @@ -361,9 +361,9 @@ type EventStoreContext(connection: EventStoreConnection, batchOptions: BatchOpti let isResolvedEventEventType (tryDecode, predicate) (x: ResolvedEvent) = predicate (tryDecode x.Event.Data) let tryIsResolvedEventEventType predicateOption = predicateOption |> Option.map isResolvedEventEventType let conn requireLeader = if requireLeader then connection.WriteConnection else connection.ReadConnection - new ( connection: EventStoreConnection, - // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. - [] ?batchSize) = + new(connection: EventStoreConnection, + // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. + [] ?batchSize) = EventStoreContext(connection, BatchOptions(batchSize = defaultArg batchSize 500)) member val BatchOptions = batchOptions @@ -438,7 +438,7 @@ type private CompactionContext(eventsLen: int, capacityBeforeCompaction: int) = /// Determines whether writing a Compaction event is warranted (based on the existing state and the current accumulated changes) member _.IsCompactionDue = eventsLen > capacityBeforeCompaction -type private Category<'event, 'state, 'context>(context: EventStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = +type private StoreCategory<'event, 'state, 'context>(context: EventStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = let tryDecode (e: ResolvedEvent) = e |> UnionEncoderAdapters.encodedEventOfResolvedEvent |> codec.TryDecode let isOrigin = match access with @@ -474,8 +474,8 @@ type private Category<'event, 'state, 'context>(context: EventStoreContext, code | GatewaySyncResult.ConflictUnknown _ -> return SyncResult.Conflict (fun _ct -> reload (log, streamName, true, streamToken, state)) } interface Caching.IReloadable<'state> with member _.Reload(log, sn, leader, token, state, _ct) = reload (log, sn, leader, token, state) -type EventStoreCategory<'event, 'state, 'context> internal (name, inner) = - inherit Equinox.Category<'event, 'state, 'context>(name, inner = inner) +type EventStoreCategory<'event, 'state, 'context> = + inherit Equinox.Category<'event, 'state, 'context> new(context: EventStoreContext, name, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access, // Caching can be overkill for EventStore esp considering the degree to which its intrinsic caching is a first class feature // e.g., a key benefit is that reads of streams more than a few pages long get completed in constant time after the initial load @@ -485,8 +485,8 @@ type EventStoreCategory<'event, 'state, 'context> internal (name, inner) = | AccessStrategy.LatestKnownEvent, _ -> invalidOp "Equinox.EventStore does not support mixing AccessStrategy.LatestKnownEvent with Caching at present." | _ -> () - let cat = Category<'event, 'state, 'context>(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching - EventStoreCategory(name, inner = cat) + let cat = StoreCategory<'event, 'state, 'context>(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching + { inherit Equinox.Category<'event, 'state, 'context>(name, cat) } type private SerilogAdapter(log: ILogger) = interface EventStore.ClientAPI.ILogger with diff --git a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj index f535611ff..59618faf5 100644 --- a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj +++ b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj @@ -19,7 +19,6 @@ contentfiles - diff --git a/src/Equinox.EventStoreDb/EventStoreDb.fs b/src/Equinox.EventStoreDb/EventStoreDb.fs index 264773beb..cccac663e 100644 --- a/src/Equinox.EventStoreDb/EventStoreDb.fs +++ b/src/Equinox.EventStoreDb/EventStoreDb.fs @@ -292,7 +292,7 @@ type EventStoreConnection(readConnection, [] ?writeConnection, [) = - new (batchSize) = BatchOptions(fun () -> batchSize) + new(batchSize) = BatchOptions(fun () -> batchSize) member _.BatchSize = getBatchSize.Invoke() [] @@ -303,9 +303,9 @@ type EventStoreContext(connection: EventStoreConnection, batchOptions: BatchOpti let isResolvedEventEventType (tryDecode, predicate) (x: ResolvedEvent) = predicate (tryDecode x.Event.Data) let tryIsResolvedEventEventType predicateOption = predicateOption |> Option.map isResolvedEventEventType let conn requireLeader = if requireLeader then connection.WriteConnection else connection.ReadConnection - new ( connection: EventStoreConnection, - // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. - [] ?batchSize) = + new(connection: EventStoreConnection, + // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. + [] ?batchSize) = EventStoreContext(connection, BatchOptions(batchSize = defaultArg batchSize 500)) member val BatchOptions = batchOptions @@ -380,7 +380,7 @@ type private CompactionContext(eventsLen: int, capacityBeforeCompaction: int) = /// Determines whether writing a Compaction event is warranted (based on the existing state and the current accumulated changes) member _.IsCompactionDue = eventsLen > capacityBeforeCompaction -type private Category<'event, 'state, 'context>(context: EventStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = +type private StoreCategory<'event, 'state, 'context>(context: EventStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = let tryDecode (e: ResolvedEvent) = e.Event |> ClientCodec.timelineEvent |> codec.TryDecode let isOrigin = match access with @@ -417,8 +417,8 @@ type private Category<'event, 'state, 'context>(context: EventStoreContext, code | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state events) | GatewaySyncResult.ConflictUnknown _ -> return SyncResult.Conflict (reload (log, streamName, (*requireLeader*)true, streamToken, state)) } -type EventStoreCategory<'event, 'state, 'context> internal (name, inner) = - inherit Equinox.Category<'event, 'state, 'context>(name, inner = inner) +type EventStoreCategory<'event, 'state, 'context> = + inherit Equinox.Category<'event, 'state, 'context> new(context: EventStoreContext, name, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access, // Caching can be overkill for EventStore esp considering the degree to which its intrinsic caching is a first class feature // e.g., a key benefit is that reads of streams more than a few pages long get completed in constant time after the initial load @@ -428,8 +428,9 @@ type EventStoreCategory<'event, 'state, 'context> internal (name, inner) = | AccessStrategy.LatestKnownEvent, _ -> invalidOp "Equinox.EventStoreDb does not support mixing AccessStrategy.LatestKnownEvent with Caching at present." | _ -> () - let cat = Category<'event, 'state, 'context>(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching - EventStoreCategory(name, inner = cat) + { inherit Equinox.Category<'event, 'state, 'context>(name, + StoreCategory<'event, 'state, 'context>(context, codec, fold, initial, access) + |> Caching.apply Token.isStale caching) } [] type Discovery = diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index 1af1eceaa..51457c91d 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -10,15 +10,13 @@ - + - - diff --git a/src/Equinox.MemoryStore/MemoryStore.fs b/src/Equinox.MemoryStore/MemoryStore.fs index 9416a8a8f..1f21317e5 100644 --- a/src/Equinox.MemoryStore/MemoryStore.fs +++ b/src/Equinox.MemoryStore/MemoryStore.fs @@ -45,41 +45,26 @@ type VolatileStore<'Format>() = // NOTE the lock could be more granular, the guarantee of notification ordering is/needs to be at stream level only lock streams <| fun () -> let struct (succeeded, _) as outcome = trySync streamName expectedCount events - if succeeded then committed.Trigger(FSharp.UMX.UMX.tag streamName, events) + if succeeded then committed.Trigger(FsCodec.StreamName.Internal.trust streamName, events) outcome -type Token = int - -/// Internal implementation detail of MemoryStore -module private Token = - - let private streamTokenOfEventCount (eventCount: int): StreamToken = - // TOCONSIDER Could implement streamBytes tracking based on a supplied event size function (store is agnostic to format) - { value = box eventCount; version = int64 eventCount; streamBytes = -1 } - let (|Unpack|) (token: StreamToken): int = unbox token.value - /// Represent a stream known to be empty - let ofEmpty = streamTokenOfEventCount 0 - let ofValue (value: 'event[]) = streamTokenOfEventCount value.Length - -/// Represents the state of a set of streams in a style consistent withe the concrete Store types - no constraints on memory consumption (but also no persistence!). -type private Category<'event, 'state, 'context, 'Format>(store: VolatileStore<'Format>, codec: FsCodec.IEventCodec<'event, 'Format, 'context>, fold, initial) = +type private StoreCategory<'event, 'state, 'context, 'Format>(store: VolatileStore<'Format>, codec, fold, initial) = + let res version state events = struct ({ value = null; version = version; streamBytes = -1 }, fold state events) + let decode events = Array.chooseV (codec : FsCodec.IEventCodec<'event, 'Format, 'context>).TryDecode events interface ICategory<'event, 'state, 'context> with - member _.Empty = Token.ofEmpty, initial + member _.Empty = res 0 initial Array.empty member _.Load(_log, _categoryName, _streamId, streamName, _maxAge, _requireLeader, _ct) = task { match store.Load(streamName) with - | null -> return (Token.ofEmpty, initial) - | xs -> return (Token.ofValue xs, fold initial (Array.chooseV codec.TryDecode xs)) } - member _.Sync(_log, categoryName, streamId, streamName, context, Token.Unpack eventCount, state, events, _ct) = task { - let inline map i (e: FsCodec.IEventData<'Format>) = FsCodec.Core.TimelineEvent.Create(int64 i, e) - let encoded = Array.ofSeq events |> Array.mapi (fun i e -> map (eventCount + i) (codec.Encode(context, e))) - match store.TrySync(streamName, categoryName, streamId, eventCount, encoded) with + | null -> return res 0 initial Array.empty + | xs -> return res xs.Length initial (decode xs) } + member _.Sync(_log, categoryName, streamId, streamName, context, token, state, events, _ct) = task { + let encoded = events |> Array.mapi (fun i e -> FsCodec.Core.TimelineEvent.Create(token.version + int64 i, codec.Encode(context, e))) + match store.TrySync(streamName, categoryName, streamId, int token.version, encoded) with | true, streamEvents' -> - return SyncResult.Written (Token.ofValue streamEvents', fold state events) + return SyncResult.Written (res streamEvents'.Length state events) | false, conflictingEvents -> - let resync _ct = task { - let token' = Token.ofValue conflictingEvents - return struct (token', fold state (conflictingEvents |> Seq.skip eventCount |> Array.chooseV codec.TryDecode)) } - return SyncResult.Conflict resync } + let eventsSinceExpectedVersion = conflictingEvents |> Seq.skip (int token.version) |> decode + return SyncResult.Conflict (fun _ct -> task { return res conflictingEvents.Length state eventsSinceExpectedVersion }) } type MemoryStoreCategory<'event, 'state, 'Format, 'context>(store: VolatileStore<'Format>, name: string, codec, fold, initial) = - inherit Equinox.Category<'event, 'state, 'context>(name, inner = Category(store, codec, fold, initial)) + inherit Equinox.Category<'event, 'state, 'context>(name, StoreCategory(store, codec, fold, initial)) diff --git a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj index eaaf3325a..df46ca555 100644 --- a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj +++ b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj @@ -24,8 +24,7 @@ - - + diff --git a/src/Equinox.MessageDb/MessageDb.fs b/src/Equinox.MessageDb/MessageDb.fs index 627693c34..d1864f4d2 100644 --- a/src/Equinox.MessageDb/MessageDb.fs +++ b/src/Equinox.MessageDb/MessageDb.fs @@ -277,7 +277,7 @@ module private Token = module private Snapshot = let inline snapshotCategory original = original + ":snapshot" - let inline streamName category (streamId: string) = Equinox.Core.StreamName.render (snapshotCategory category) streamId + let inline streamName category (streamId: string) = StreamName.create (snapshotCategory category) (StreamId.Elements.trust streamId) |> StreamName.toString type Meta = {| streamVersion: int64 |} // STJ doesn't want to serialize it unless its anonymous let private streamVersion (evt: ITimelineEvent) = let meta = evt.Meta // avoid defensive copy @@ -306,7 +306,7 @@ type MessageDbClient internal (reader, writer, ?readRetryPolicy, ?writeRetryPoli MessageDbClient(reader, writer, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) type BatchOptions(getBatchSize: Func, []?batchCountLimit) = - new (batchSize) = BatchOptions(fun () -> batchSize) + new(batchSize) = BatchOptions(fun () -> batchSize) member _.BatchSize = getBatchSize.Invoke() member val MaxBatches = batchCountLimit @@ -373,7 +373,7 @@ type AccessStrategy<'event, 'state> = /// | AdjacentSnapshots of snapshotEventCaseName: string * toSnapshot: ('state -> 'event) -type private Category<'event, 'state, 'context>(context: MessageDbContext, codec: IEventCodec<_, _, 'context>, fold, initial, access) = +type private StoreCategory<'event, 'state, 'context>(context: MessageDbContext, codec: IEventCodec<_, _, 'context>, fold, initial, access) = let loadAlgorithm log category streamId streamName requireLeader ct = match access with | AccessStrategy.Unoptimized -> context.LoadBatched(log, streamName, requireLeader, codec.TryDecode, fold, initial, ct) @@ -419,4 +419,4 @@ type MessageDbCategory<'event, 'state, 'context>(context: MessageDbContext, name // e.g. if streams are always short, events are always small, you are absolutely certain there will be no cache hits // (and you have a cheerful but bored DBA) caching) = - inherit Equinox.Category<'event, 'state, 'context>(name, Category(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching) + inherit Equinox.Category<'event, 'state, 'context>(name, StoreCategory(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching) diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 1357d42e9..0fdcbf4ad 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -19,7 +19,6 @@ - diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index be95c42ea..399b3c164 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -336,7 +336,7 @@ type SqlStreamStoreConnection(readConnection, []?writeConnection, [< member _.WriteRetryPolicy = writeRetryPolicy type BatchOptions(getBatchSize: Func, []?batchCountLimit) = - new (batchSize) = BatchOptions(fun () -> batchSize) + new(batchSize) = BatchOptions(fun () -> batchSize) member _.BatchSize = getBatchSize.Invoke() member _.MaxBatches = batchCountLimit @@ -349,9 +349,9 @@ type SqlStreamStoreContext(connection: SqlStreamStoreConnection, batchOptions: B let tryIsResolvedEventEventType predicateOption = predicateOption |> Option.map isResolvedEventEventType let conn requireLeader = if requireLeader then connection.WriteConnection else connection.ReadConnection - new ( connection: SqlStreamStoreConnection, - // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. - [] ?batchSize) = + new(connection: SqlStreamStoreConnection, + // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. + [] ?batchSize) = SqlStreamStoreContext(connection, BatchOptions(batchSize = defaultArg batchSize 500)) member val BatchOptions = batchOptions @@ -414,7 +414,7 @@ type private CompactionContext(eventsLen: int, capacityBeforeCompaction: int) = /// Determines whether writing a Compaction event is warranted (based on the existing state and the current accumulated changes) member _.IsCompactionDue = eventsLen > capacityBeforeCompaction -type private Category<'event, 'state, 'context>(context: SqlStreamStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = +type private StoreCategory<'event, 'state, 'context>(context: SqlStreamStoreContext, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access) = let tryDecode (e: ResolvedEvent) = e |> UnionEncoderAdapters.encodedEventOfResolvedEvent |> codec.TryDecode let isOrigin = match access with @@ -450,8 +450,8 @@ type private Category<'event, 'state, 'context>(context: SqlStreamStoreContext, | GatewaySyncResult.ConflictUnknown -> return SyncResult.Conflict (reload (log, streamName, (*requireLeader*)true, streamToken, state)) } interface Caching.IReloadable<'state> with member _.Reload(log, sn, leader, token, state, ct) = reload (log, sn, leader, token, state) ct -type SqlStreamStoreCategory<'event, 'state, 'context> internal (name, inner) = - inherit Equinox.Category<'event, 'state, 'context>(name, inner = inner) +type SqlStreamStoreCategory<'event, 'state, 'context> = + inherit Equinox.Category<'event, 'state, 'context> new(context: SqlStreamStoreContext, name, codec: FsCodec.IEventCodec<_, _, 'context>, fold, initial, access, // For SqlStreamStore, caching is less critical than it is for e.g. CosmosDB // As such, it can sometimes be omitted, particularly if streams are short, or events are small and/or database latency aligns with request latency requirements @@ -461,8 +461,9 @@ type SqlStreamStoreCategory<'event, 'state, 'context> internal (name, inner) = | AccessStrategy.LatestKnownEvent, _ -> invalidOp "Equinox.SqlStreamStore does not support mixing AccessStrategy.LatestKnownEvent with Caching at present." | _ -> () - let cat = Category<'event, 'state, 'context>(context, codec, fold, initial, access) |> Caching.apply Token.isStale caching - SqlStreamStoreCategory(name, cat) + { inherit Equinox.Category<'event, 'state, 'context>(name, + StoreCategory<'event, 'state, 'context>(context, codec, fold, initial, access) + |> Caching.apply Token.isStale caching) } [] type ConnectorBase([]?readRetryPolicy, []?writeRetryPolicy) = diff --git a/src/Equinox.Core/Category.fs b/src/Equinox/Category.fs similarity index 72% rename from src/Equinox.Core/Category.fs rename to src/Equinox/Category.fs index 1655176cb..04b799bfb 100755 --- a/src/Equinox.Core/Category.fs +++ b/src/Equinox/Category.fs @@ -1,43 +1,36 @@ -namespace Equinox.Core - -open Serilog +// Defines base Contract with Stores; Decider talks to an IStream, which is implemented in full within this file +namespace Equinox.Core /// Store-agnostic interface representing interactions a Decider can have with a set of streams with a given pair of Event and State types type ICategory<'event, 'state, 'context> = /// Obtain a Null state for optimistic processing abstract Empty: struct (StreamToken * 'state) /// Obtain the state from the target stream - abstract Load: log: ILogger * categoryName: string * streamId: string * streamName: string + abstract Load: log: Serilog.ILogger * categoryName: string * streamId: string * streamName: string * maxAge: System.TimeSpan * requireLeader: bool * ct: CancellationToken -> Task - /// Given the supplied `token`, attempt to sync to the proposed updated `state'` by appending the supplied `events` to the underlying stream, yielding: /// - Written: signifies synchronization has succeeded, implying the included StreamState should now be assumed to be the state of the stream /// - Conflict: signifies the sync failed, and the proposed decision hence needs to be reconsidered in light of the supplied conflicting Stream State /// NB the central precondition upon which the sync is predicated is that the stream has not diverged from the `originState` represented by `token` /// where the precondition is not met, the SyncResult.Conflict bears a [lazy] async result (in a specific manner optimal for the store) - abstract Sync: log: ILogger * categoryName: string * streamId: string * streamName: string * 'context + abstract Sync: log: Serilog.ILogger * categoryName: string * streamId: string * streamName: string * 'context * originToken: StreamToken * originState: 'state * events: 'event[] * CancellationToken -> Task> -// Low level stream operations skeleton; base type for Store-specific Category types namespace Equinox open Equinox.Core.Tracing /// Store-agnostic baseline functionality for Load and Syncing a Category of 'event representations that fold to a given 'state +/// Provides access to the low level store operations used for Loading and/or Syncing updates via the Decider +/// (Normal usage is via the adjacent `module Decider` / `Stream.Resolve` helpers) [] -type Category<'event, 'state, 'context>(categoryName, resolveStream) = - - /// Stores without custom routing for categoryName/streamId to Table/Container etc use this default impl - new(categoryName, inner) = Category(categoryName, fun streamId -> struct (inner, Core.StreamName.render categoryName streamId)) - - /// Provides access to the low level store operations used for Loading and/or Syncing updates via the Decider - /// (Normal usage is via the adjacent `module Decider` / `Stream.Resolve` helpers) - member _.Stream(log: Serilog.ILogger, context: 'context, streamId: string) = - let struct (inner: Core.ICategory<'event, 'state, 'context>, streamName) = resolveStream streamId +type Category<'event, 'state, 'context>(categoryName, inner: Core.ICategory<'event, 'state, 'context>) = + member _.Stream(log: Serilog.ILogger, context: 'context, streamId: FsCodec.StreamId) = + let streamName = FsCodec.StreamName.create categoryName streamId |> FsCodec.StreamName.toString + let streamId = FsCodec.StreamId.toString streamId { new Core.IStream<'event, 'state> with - member _.Name = streamName member _.LoadEmpty() = inner.Empty member _.Load(maxAge, requireLeader, ct) = task { use act = source.StartActivity("Load", System.Diagnostics.ActivityKind.Client) @@ -52,10 +45,10 @@ type Category<'event, 'state, 'context>(categoryName, resolveStream) = [] type Stream private () = [] - static member Resolve(cat: Category<'event, 'state, 'context>, log, context): System.Func> = - System.Func<_, _>(fun sid -> cat.Stream(log, context, Core.StreamId.toString sid) |> DeciderCore<'event, 'state>) + static member Resolve(cat: Category<'event, 'state, 'context>, log, context): System.Func> = + System.Func<_, _>(fun sid -> cat.Stream(log, context, sid) |> DeciderCore<'event, 'state>) [] - static member Resolve(cat: Category<'event, 'state, unit>, log): System.Func> = + static member Resolve(cat: Category<'event, 'state, unit>, log): System.Func> = Stream.Resolve(cat, log, ()) module Decider = diff --git a/src/Equinox/Core.fs b/src/Equinox/Core.fs deleted file mode 100755 index a580a3136..000000000 --- a/src/Equinox/Core.fs +++ /dev/null @@ -1,31 +0,0 @@ -// Internal data structures/impl. While these are intended to be legible, understanding the abstractions involved is only necessary if you are implementing a Store or a decorator thereof. -// i.e., if you're seeking to understand the main usage flows of the Equinox library, that's in Decider.fs, not here -namespace Equinox.Core - -/// Store-agnostic interface representing interactions a Flow can have with the state of a given event stream. Not intended for direct use by consumer code. -type IStream<'event, 'state> = - - /// The StreamName, derived from the Name of the Category, and the StreamId supplied to Category.Stream - abstract Name: string - - /// Generate a stream token that represents a stream one believes to be empty to use as a Null Object when optimizing out the initial load roundtrip - abstract LoadEmpty: unit -> struct (StreamToken * 'state) - - /// Obtain the state from the target stream - abstract Load: maxAge: System.TimeSpan * requireLeader: bool * ct: CancellationToken -> Task - - /// Given the supplied `token` [and related `originState`], attempt to move to state `state'` by appending the supplied `events` to the underlying stream - /// SyncResult.Written: implies the state is now the value represented by the Result's value - /// SyncResult.Conflict: implies the `events` were not synced; if desired the consumer can use the included resync workflow in order to retry - abstract Sync: attempt: int * token: StreamToken * state: 'state * events: 'event[] * CancellationToken -> Task> - -/// Internal type used to represent the outcome of a Sync -and [] SyncResult<'state> = - /// The write succeeded (the supplied token and state can be used to efficiently continue the processing if, and only if, desired) - | Written of struct (StreamToken * 'state) - /// The set of changes supplied Sync conflict with the present state of the underlying stream based on the configured policy for that store - /// The inner is Async as some stores (and/or states) are such that determining the conflicting state (if, and only if, required) needs an extra trip to obtain - | Conflict of (CancellationToken -> Task) - -/// Store-specific opaque token to be used for synchronization purposes -and [] StreamToken = { value: obj; version: int64; streamBytes: int64 } diff --git a/src/Equinox/Decider.fs b/src/Equinox/Decider.fs index bfc648a5b..319d3fdae 100755 --- a/src/Equinox/Decider.fs +++ b/src/Equinox/Decider.fs @@ -1,36 +1,13 @@ -namespace Equinox +// Application-facing APIs. Domain services should only need to use or reference a Decider. +// Application Composition roots are responsible for wiring a Decider to a concrete store, and configuring its Access Strategies etc +namespace Equinox open Equinox.Core open System -module private Impl = - - let private run (decide: struct (StreamToken * 's) -> Task) sync fromConflict mapResult = - let rec loop attempt tokenAndState: Task<'v> = task { // don't try to inline or it won't compile as an efficient state machine - match! decide tokenAndState with - | result, [||] -> return mapResult result tokenAndState - | result, events -> - match! sync attempt tokenAndState events with - | SyncResult.Written tokenAndState' -> return mapResult result tokenAndState' - | SyncResult.Conflict loadConflictingTokenAndState -> - let! tokenAndState = fromConflict attempt loadConflictingTokenAndState - return! loop (attempt + 1) tokenAndState } - loop 1 - - let transact (stream: IStream<'e, 's>, fetch, decide, validateResync, mapResult: Func<'r, struct (StreamToken * 's), 'v>, ct): Task<'v> = task { - let! originTokenAndState = fetch stream ct - let decide tokenAndState = decide tokenAndState ct - let sync attempt struct (token, state) events = stream.Sync(attempt, token, state, events, ct) - let resyncFromConflict attempt resync = task { validateResync attempt; return! resync ct } - let mapResult result tokenAndState = mapResult.Invoke(result, tokenAndState) - return! run decide sync resyncFromConflict mapResult originTokenAndState } - - let query (stream, fetch, render: Func, ct): Task<'v> = task { - let! tokenAndState = fetch stream ct - return render.Invoke tokenAndState } - /// Central Application-facing API for F#. Wraps the handling of decision or query flows in a manner that is store agnostic /// NOTE: For C#, direct usage of DeciderCore is recommended +[] type Decider<'event, 'state>(inner: DeciderCore<'event, 'state>) = /// Provides access to lower-level APIs to enable building custom Transact / Query variations @@ -120,7 +97,8 @@ type Decider<'event, 'state>(inner: DeciderCore<'event, 'state>) = /// Central Application-facing API. Wraps the handling of decision or query flows in a manner that is store agnostic /// For F#, the async and FSharpFunc signatures in Decider tend to work better, but the API set is equivalent -and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = +and [] + DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = let (|Context|) = SyncContext<'state>.Map @@ -131,7 +109,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, []?ct): Task = let inline decide struct (_t: StreamToken, state) _ct = Task.FromResult struct ((), interpret.Invoke state) let inline mapRes () struct (_t: StreamToken, _s: 'state) = () - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied interpret function with the present state /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -141,7 +119,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, []?ct): Task<'view> = let inline decide struct (_token, state) _ct = Task.FromResult struct ((), interpret.Invoke state) let inline mapRes () struct (_token, state) = render.Invoke state - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied decide function with the present state, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -151,7 +129,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'result> = let inline decide struct (_token, state) _ct = decide.Invoke state |> Task.FromResult let inline mapRes r _ = r - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied decide function with the present state, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -161,7 +139,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'view> = let inline decide struct (_token, state) _ct = decide.Invoke state |> Task.FromResult let inline mapRes r struct (_, s) = mapResult.Invoke(r, s) - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied decide function with the current complete context, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -171,7 +149,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'result> = let inline decide' (Context c) _ct = decide.Invoke(c) |> Task.FromResult let inline mapRes r _ = r - Impl.transact (stream, LoadPolicy.Fetch load, decide', AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide', AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied decide function with the current complete context, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -181,17 +159,17 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'view> = let inline decide (Context c) _ct = c |> decide.Invoke |> Task.FromResult let inline mapRes r (Context c) = mapResult.Invoke(r, c) - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// Project from the folded 'state, but without executing a decision flow as Transact does member _.Query(render: Func<'state, 'view>, [] ?load, [] ?ct): Task<'view> = let render struct (_token, state) = render.Invoke(state) - Impl.query (stream, LoadPolicy.Fetch load, render, defaultArg ct CancellationToken.None) + Stream.query (stream, LoadPolicy.Fetch load, render, defaultArg ct CancellationToken.None) /// Project from the stream's complete context, but without executing a decision flow as TransactEx does member _.QueryEx(render: Func, 'view>, [] ?load, [] ?ct): Task<'view> = let render (Context c) = render.Invoke(c) - Impl.query (stream, LoadPolicy.Fetch load, render, defaultArg ct CancellationToken.None) + Stream.query (stream, LoadPolicy.Fetch load, render, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied Async interpret function with the present state /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -201,7 +179,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'view> = let inline decide struct (_token, state) ct = task { let! es = interpret.Invoke(state, ct) in return struct ((), es) } let inline mapRes () struct (_token, state) = render.Invoke state - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied Async decide function with the present state, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -211,7 +189,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'result> = let inline decide struct (_token, state) ct = decide.Invoke(state, ct) let inline mapRes r _ = r - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied Async decide function with the current complete context, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -221,7 +199,7 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'result> = let inline decide (Context c) ct = decide.Invoke(c, ct) let inline mapRes r _ = r - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) /// 1. Invoke the supplied Async decide function with the current complete context, holding the 'result /// 2. (if events yielded) Attempt to sync the yielded events to the stream. @@ -231,12 +209,13 @@ and DeciderCore<'event, 'state>(stream: IStream<'event, 'state>) = [] ?load, [] ?attempts, [] ?ct): Task<'view> = let inline decide (Context c) ct = decide.Invoke(c, ct) let inline mapRes r (Context c) = mapResult.Invoke(r, c) - Impl.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) + Stream.transact (stream, LoadPolicy.Fetch load, decide, AttemptsPolicy.Validate attempts, mapRes, defaultArg ct CancellationToken.None) (* Options to tune loading policy - default is RequireLoad *) /// Store-agnostic Loading Options -and [] LoadOption<'state> = +and [] + LoadOption<'state> = /// Default policy; Obtain latest state from store based on consistency level configured | RequireLoad /// Request that data be read with a quorum read / from a Leader connection @@ -251,21 +230,21 @@ and [] LoadOption<'state> = | AllowStale of maxAge: TimeSpan /// Inhibit load from database based on the fact that the stream is likely not to have been initialized yet, and we will be generating events | AssumeEmpty -and internal LoadPolicy() = - static member Fetch<'state, 'event>(x: LoadOption<'state> option) - : IStream<'event, 'state> -> CancellationToken -> Task = +and [] internal LoadPolicy private () = + static member Fetch<'state, 'event>(x: LoadOption<'state> option) : IStream<'event, 'state> -> CancellationToken -> Task = match x with - | None | Some RequireLoad -> fun stream ct -> stream.Load(maxAge = TimeSpan.Zero, requireLeader = false, ct = ct) - | Some RequireLeader -> fun stream ct -> stream.Load(maxAge = TimeSpan.Zero, requireLeader = true, ct = ct) - | Some AnyCachedValue -> fun stream ct -> stream.Load(maxAge = TimeSpan.MaxValue, requireLeader = false, ct = ct) - | Some (AllowStale maxAge) -> fun stream ct -> stream.Load(maxAge = maxAge, requireLeader = false, ct = ct) - | Some AssumeEmpty -> fun stream _ct -> Task.FromResult(stream.LoadEmpty()) + | None | Some LoadOption.RequireLoad -> fun stream ct -> stream.Load(maxAge = TimeSpan.Zero, requireLeader = false, ct = ct) + | Some LoadOption.RequireLeader -> fun stream ct -> stream.Load(maxAge = TimeSpan.Zero, requireLeader = true, ct = ct) + | Some LoadOption.AnyCachedValue -> fun stream ct -> stream.Load(maxAge = TimeSpan.MaxValue, requireLeader = false, ct = ct) + | Some (LoadOption.AllowStale maxAge) -> fun stream ct -> stream.Load(maxAge = maxAge, requireLeader = false, ct = ct) + | Some LoadOption.AssumeEmpty -> fun stream _ct -> Task.FromResult(stream.LoadEmpty()) (* Retry / Attempts policy used to define policy for retrying based on the conflicting state when there's an Append conflict (default 3 retries) *) -and [] Attempts = +and [] + Attempts = | Max of count: int -and internal AttemptsPolicy() = +and [] internal AttemptsPolicy private () = static member Validate(opt: Attempts option) = let maxAttempts = match opt with Some (Attempts.Max n) -> n | None -> 3 if maxAttempts < 1 then raise (ArgumentOutOfRangeException(nameof opt, maxAttempts, "should be >= 1")) @@ -273,7 +252,7 @@ and internal AttemptsPolicy() = /// Exception yielded by Decider.Transact after `count` attempts have yielded conflicts at the point of syncing with the Store and MaxResyncsExhaustedException(count) = - inherit exn(sprintf "Concurrency violation; aborting after %i attempts." count) + inherit exn $"Concurrency violation; aborting after %i{count} attempts." /// Exposed by TransactEx / QueryEx, providing access to extended state information for cases where that's required and [] @@ -292,8 +271,7 @@ and [] /// The present State of the stream within the context of this Flow abstract member State: 'state -and internal SyncContext<'state> = - +and [] internal SyncContext<'state> private () = static member Map(struct (token: StreamToken, state: 'state)) = { new ISyncContext<'state> with member _.State = state diff --git a/src/Equinox/Equinox.fsproj b/src/Equinox/Equinox.fsproj index 9a9a2d288..d980c1b73 100644 --- a/src/Equinox/Equinox.fsproj +++ b/src/Equinox/Equinox.fsproj @@ -6,9 +6,10 @@ - + + - + @@ -18,7 +19,9 @@ contentfiles + + + - diff --git a/src/Equinox/Stream.fs b/src/Equinox/Stream.fs new file mode 100755 index 000000000..006dbb29d --- /dev/null +++ b/src/Equinox/Stream.fs @@ -0,0 +1,54 @@ +// Defines base IStream interface that a Decider uses to load and append to a Stream. Stores have Category implementations fulfilling this contract. +// (IStream could be marked `internal`, but has been left public in order to facilitate experimenting with custom `Decider` re-implementations within an app) +namespace Equinox.Core + +/// Store-agnostic interface implemented by Category, representing interactions a Transact/Query can have with the state of a given event stream. +type IStream<'event, 'state> = + + /// Generate a stream token representing a stream one believes to be empty for use as a Null Object when optimizing out the initial load roundtrip + abstract LoadEmpty: unit -> struct (StreamToken * 'state) + + /// Obtain the state from the target stream + abstract Load: maxAge: System.TimeSpan * requireLeader: bool * ct: CancellationToken -> Task + + /// Given the supplied `token` [and related `originState`], attempt to move to state `state'` by appending the supplied `events` to the underlying stream + /// SyncResult.Written: implies the state is now the value represented by the Result's value + /// SyncResult.Conflict: implies the `events` were not synced; if desired the consumer can use the included resync workflow in order to retry + abstract Sync: attempt: int * token: StreamToken * state: 'state * events: 'event[] * CancellationToken -> Task> + +/// Internal type used to represent the outcome of a Sync +and [] SyncResult<'state> = + /// The write succeeded (the supplied token and state can be used to efficiently continue the processing if, and only if, desired) + | Written of struct (StreamToken * 'state) + /// The set of changes supplied Sync conflict with the present state of the underlying stream based on the configured policy for that store + /// The inner is Async as some stores (and/or states) are such that determining the conflicting state (if, and only if, required) needs an extra trip to obtain + | Conflict of (CancellationToken -> Task) + +/// Store-specific opaque token to be used for synchronization purposes +and [] StreamToken = { value: obj; version: int64; streamBytes: int64 } + +module internal Stream = + + let private run (decide: struct (StreamToken * 's) -> Task) sync fromConflict mapResult = + let rec loop attempt tokenAndState: Task<'v> = task { // don't try to inline or it won't compile as an efficient state machine + match! decide tokenAndState with + | result, [||] -> return mapResult result tokenAndState + | result, events -> + match! sync attempt tokenAndState events with + | SyncResult.Written tokenAndState' -> return mapResult result tokenAndState' + | SyncResult.Conflict loadConflictingTokenAndState -> + let! tokenAndState = fromConflict attempt loadConflictingTokenAndState + return! loop (attempt + 1) tokenAndState } + loop 1 + + let transact (stream: IStream<'e, 's>, fetch, decide, validateResync, mapResult: System.Func<'r, struct (StreamToken * 's), 'v>, ct): Task<'v> = task { + let! originTokenAndState = fetch stream ct + let decide tokenAndState = decide tokenAndState ct + let sync attempt struct (token, state) events = stream.Sync(attempt, token, state, events, ct) + let resyncFromConflict attempt resync = task { validateResync attempt; return! resync ct } + let mapResult result tokenAndState = mapResult.Invoke(result, tokenAndState) + return! run decide sync resyncFromConflict mapResult originTokenAndState } + + let query (stream, fetch, render: System.Func, ct): Task<'v> = task { + let! tokenAndState = fetch stream ct + return render.Invoke tokenAndState } diff --git a/src/Equinox/StreamId.fs b/src/Equinox/StreamId.fs deleted file mode 100644 index aaaba723d..000000000 --- a/src/Equinox/StreamId.fs +++ /dev/null @@ -1,67 +0,0 @@ -namespace Equinox.Core - -open FSharp.UMX -type private String = System.String - -module StreamName = - - /// Throws if a candidate categoryName includes a '-', is null, or is empty - let inline validateCategoryName (rawCategory: string) = - if rawCategory |> String.IsNullOrEmpty then invalidArg (nameof rawCategory) "may not be null or empty" - if rawCategory.IndexOf '-' <> -1 then invalidArg (nameof rawCategory) "may not contain embedded '-' symbols" - /// Render in canonical {categoryName}-{streamId} format. Throws if categoryName contains embedded `-` symbols - let render categoryName streamId = - validateCategoryName categoryName - String.Concat(categoryName, '-', streamId) - -/// Represents the second half of a canonical StreamName, i.e., the streamId in "{categoryName}-{streamId}" -type StreamId = string -and [] streamId -/// Low-level helpers for composing and rendering StreamId values; prefer the ones in the Equinox namespace -module StreamId = - - /// Throws if a candidate streamId fragment includes a '_', is null, or is empty - let inline validateStreamIdFragment (rawFragment: string) = - if rawFragment |> String.IsNullOrEmpty then invalidArg (nameof rawFragment) "may not contain null or empty fragments" - if rawFragment.IndexOf '_' <> -1 then invalidArg (nameof rawFragment) "may not contain embedded '_' symbols" - /// Create a StreamId, trusting the input to be well-formed (see the gen* functions for composing with validation) - let ofRaw (raw: string): StreamId = UMX.tag raw - /// Validates and generates a StreamId from an application level fragment. Throws if any of the fragments embed a `_`, are `null`, or are empty - let ofFragment (fragment: string): StreamId = - // arguably rejection of `_` chars is a step too far, but this accommodates for more easily dealing with namespacing dictated by unforeseen needs - validateStreamIdFragment fragment - ofRaw fragment - /// Combines streamId fragments. Throws if any of the fragments embed a `_`, are `null`, or are empty - let ofFragments (fragments: string[]): StreamId = - fragments |> Array.iter validateStreamIdFragment - String.Join("_", fragments) |> ofRaw - /// Render as a string for external use - let toString: StreamId -> string = UMX.untag - /// Render as a canonical "{categoryName}-{streamId}" StreamName. Throws if the categoryName embeds `-` chars. - let renderStreamName categoryName (x: StreamId): string = toString x |> StreamName.render categoryName - -namespace Equinox - -[] -type StreamId private () = - - /// Generate a StreamId from a single application-level id, given a rendering function that maps to a non empty fragment without embedded `_` chars - static member Map(f: 'a -> string) = System.Func<'a, Core.StreamId>(fun id -> Core.StreamId.ofFragment (f id)) - /// Generate a StreamId from a tuple of application-level ids, given 2 rendering functions that map to a non empty fragment without embedded `_` chars - static member Map(f, f2) = System.Func<_, _, _>(fun id1 id2 -> Core.StreamId.ofFragments ([| f id1; f2 id2 |])) - /// Generate a StreamId from a triple of application-level ids, given 3 rendering functions that map to a non empty fragment without embedded `_` chars - static member Map(f1, f2, f3) = System.Func<_, _, _, _>(fun id1 id2 id3 -> Core.StreamId.ofFragments ([| f1 id1; f2 id2; f3 id3 |])) - /// Generate a StreamId from a 4-tuple of application-level ids, given 4 rendering functions that map to a non empty fragment without embedded `_` chars - static member Map(f1, f2, f3, f4) = System.Func<_, _, _, _, _>(fun id1 id2 id3 id4 -> Core.StreamId.ofFragments ([| f1 id1; f2 id2; f3 id3; f4 id4 |])) - -/// Helpers for composing and rendering StreamId values -module StreamId = - - /// Generate a StreamId from a single application-level id, given a rendering function that maps to a non empty fragment without embedded `_` chars - let gen (f: 'a -> string): 'a -> Core.StreamId = StreamId.Map(f).Invoke - /// Generate a StreamId from a tuple of application-level ids, given two rendering functions that map to a non empty fragment without embedded `_` chars - let gen2 f1 f2: 'a * 'b -> Core.StreamId = StreamId.Map(f1, f2).Invoke - /// Generate a StreamId from a triple of application-level ids, given three rendering functions that map to a non empty fragment without embedded `_` chars - let gen3 f1 f2 f3: 'a * 'b * 'c -> Core.StreamId = StreamId.Map(f1, f2, f3).Invoke - /// Generate a StreamId from a 4-tuple of application-level ids, given four rendering functions that map to a non empty fragment without embedded `_` chars - let gen4 f1 f2 f3 f4: 'a * 'b * 'c * 'd -> Core.StreamId = StreamId.Map(f1, f2, f3, f4).Invoke diff --git a/src/Equinox.Core/Tracing.fs b/src/Equinox/Tracing.fs similarity index 100% rename from src/Equinox.Core/Tracing.fs rename to src/Equinox/Tracing.fs diff --git a/tests/Equinox.CosmosStore.Integration/AccessStrategies.fs b/tests/Equinox.CosmosStore.Integration/AccessStrategies.fs index 5c6e0bfa9..14bd63b63 100644 --- a/tests/Equinox.CosmosStore.Integration/AccessStrategies.fs +++ b/tests/Equinox.CosmosStore.Integration/AccessStrategies.fs @@ -33,8 +33,9 @@ module WiringHelpers = /// This is especially relevant when events are spread between a Tip page and preceding pages as the Tip reading logic is special cased compared to querying module SequenceCheck = - let [] Category = "_SequenceCheck" - let streamId = Equinox.StreamId.gen (fun (g : Guid) -> g.ToString "N") + module Stream = + let [] Category = "_SequenceCheck" + let id = FsCodec.StreamId.gen (fun (g : Guid) -> g.ToString "N") module Events = @@ -71,14 +72,14 @@ module SequenceCheck = decider.Transact(decide (value, count), id) let private create resolve = - Service(streamId >> resolve) + Service(Stream.id >> resolve) module Config = let createUncached log context = - createCategoryUnoptimizedUncached Category Events.codec Fold.initial Fold.fold context |> Equinox.Decider.forStream log |> create + createCategoryUnoptimizedUncached Stream.Category Events.codec Fold.initial Fold.fold context |> Equinox.Decider.forStream log |> create let create log (context, cache) = - createCategoryUnoptimized Category Events.codec Fold.initial Fold.fold (context, cache) |> Equinox.Decider.forStream log |> create + createCategoryUnoptimized Stream.Category Events.codec Fold.initial Fold.fold (context, cache) |> Equinox.Decider.forStream log |> create module Props = open FsCheck diff --git a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index 71fca2baa..68877beb7 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -25,7 +25,7 @@ type Tests(testOutputHelper) = let mutable testIterations = 0 let (|TestStream|) (name: Guid) = testIterations <- testIterations + 1 - $"events-{name}-%i{testIterations}" + StreamName.parse $"events-{name}-%i{testIterations}" let verifyRequestChargesMax rus = let tripRequestCharges = [ for e, c in capture.RequestCharges -> $"%A{e}", c ] @@ -142,25 +142,23 @@ type Tests(testOutputHelper) = capture.Clear() pos =! res - // Demonstrate benefit/mechanism for using the Position-based API to avail of the etag tracking - let stream = ctx.StreamId streamName + (* Demonstrate benefit/mechanism for using the Position-based API to avail of the etag tracking *) let extrasCount = match extras with x when x > 50 -> 5000 | x when x < 1 -> 1 | x -> x*100 - let! _pos = Async.call (fun ct -> ctx.NonIdempotentAppend(stream, TestEvents.Create (int pos,extrasCount), ct)) + let! _pos = Async.call (fun ct -> ctx.NonIdempotentAppend(streamName, TestEvents.Create (int pos,extrasCount), ct)) test <@ [EqxAct.Append] = capture.ExternalCalls @> if eventsInTip then verifyRequestChargesMax 451 // 450.03 else verifyRequestChargesMax 448 // 447.5 // 463.01 observed capture.Clear() - let! pos = Async.call (fun ct -> ctx.Sync(stream, ct, ?position = None)) + let! pos = Async.call (fun ct -> ctx.Sync(streamName, ct, ?position = None)) test <@ [EqxAct.Tip] = capture.ExternalCalls @> verifyRequestChargesMax 5 // 41 observed // for a 200, you'll pay a lot (we omitted to include the position that NonIdempotentAppend yielded) capture.Clear() - let! _pos = Async.call (fun ct -> ctx.Sync(stream, ct, pos)) + let! _pos = Async.call (fun ct -> ctx.Sync(streamName, ct, pos)) test <@ [EqxAct.TipNotModified] = capture.ExternalCalls @> - verifyRequestChargesMax 1 // for a 304 by definition - when an etag IfNotMatch is honored, you only pay one RU - } + verifyRequestChargesMax 1 } // for a 304 by definition - when an etag IfNotMatch is honored, you only pay one RU [] let ``append - fails on non-matching`` (eventsInTip, TestStream streamName) = async { diff --git a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs index 36459ee59..73a456afc 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -4,10 +4,9 @@ module Equinox.DynamoStore.Integration.CosmosFixtures open Amazon.DynamoDBv2 open Equinox.DynamoStore -open System // docker compose up dynamodb-local will stand up a simulator instance that this wiring can connect to -let private tryRead env = Environment.GetEnvironmentVariable env |> Option.ofObj +let private tryRead env = System.Environment.GetEnvironmentVariable env |> Option.ofObj let private tableName = tryRead "EQUINOX_DYNAMO_TABLE" |> Option.defaultValue "equinox-test" let private archiveTableName = tryRead "EQUINOX_DYNAMO_TABLE_ARCHIVE" |> Option.defaultValue "equinox-test-archive" @@ -16,7 +15,7 @@ let discoverConnection () = match tryRead "EQUINOX_DYNAMO_CONNECTION" with | None -> "dynamodb-local", "http://localhost:8000" | Some connectionString -> "EQUINOX_DYNAMO_CONNECTION", connectionString // e.g "https://dynamodb.eu-west-1.amazonaws.com" -let isSimulatorServiceUrl url = Uri(url).IsLoopback +let isSimulatorServiceUrl url = System.Uri(url).IsLoopback let createClient (log : Serilog.ILogger) name serviceUrl = // See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.DownloadingAndRunning.html#docker for details of how to deploy a simulator instance @@ -25,27 +24,16 @@ let createClient (log : Serilog.ILogger) name serviceUrl = if isSimulatorServiceUrl serviceUrl then // Credentials are not validated if connecting to local instance so anything will do (this avoids it looking for profiles to be configured) let credentials = Amazon.Runtime.BasicAWSCredentials("A", "A") - new AmazonDynamoDBClient(credentials, clientConfig) :> IAmazonDynamoDB + new AmazonDynamoDBClient(credentials, clientConfig) |> DynamoStoreClient else // omitting credentials to ctor in order to trigger use of keychain configured access - new AmazonDynamoDBClient(clientConfig) :> IAmazonDynamoDB - -let connectPrimary log = - let name, serviceUrl = discoverConnection () - let client = createClient log name serviceUrl - DynamoStoreClient(client, tableName) + new AmazonDynamoDBClient(clientConfig) |> DynamoStoreClient -let connectArchive log = +let connect log = let name, serviceUrl = discoverConnection () - let client = createClient log name serviceUrl - DynamoStoreClient(client, archiveTableName) + createClient log name serviceUrl -let connectWithFallback log = - let name, serviceUrl = discoverConnection () - let client = createClient log name serviceUrl - DynamoStoreClient(client, tableName, archiveTableName = archiveTableName) - -// Prepares the two required tables that the tests use via connectPrimary/Archive/WithFallback +// Prepares the two required tables that the tests use via connect + tableName/archiveTableName type DynamoTablesFixture() = interface Xunit.IAsyncLifetime with @@ -54,17 +42,29 @@ type DynamoTablesFixture() = let client = createClient Serilog.Log.Logger name serviceUrl let throughput = ProvisionedThroughput (100L, 100L) let throughput = Throughput.Provisioned throughput - DynamoStoreClient.Establish(client, tableName, archiveTableName = archiveTableName, mode = CreateIfNotExists throughput) + DynamoStoreContext.Establish(client, tableName, archiveTableName = archiveTableName, mode = CreateIfNotExists throughput) + |> Async.Ignore |> Async.StartImmediateAsTask - :> System.Threading.Tasks.Task + |> FSharp.Control.Task.ignore member _.DisposeAsync() = task { () } [] type DocStoreCollection() = interface Xunit.ICollectionFixture +let createPrimaryContextIgnoreMissing client tableName queryMaxItems tipMaxEvents ignoreMissing = + DynamoStoreContext(client, tableName, tipMaxEvents = tipMaxEvents, queryMaxItems = queryMaxItems, ignoreMissingEvents = ignoreMissing) +let defaultTipMaxEvents = 10 +let createArchiveContext log queryMaxItems = + let client = connect log + DynamoStoreContext(client, archiveTableName, defaultTipMaxEvents, queryMaxItems = queryMaxItems) +let createFallbackContext log queryMaxItems = + let client = connect log + DynamoStoreContext(client, tableName, defaultTipMaxEvents, queryMaxItems = queryMaxItems, archiveTableName = archiveTableName) + type StoreContext = DynamoStoreContext type StoreCategory<'E, 'S> = DynamoStoreCategory<'E, 'S, unit> +let primaryTarget = tableName #else [] module Equinox.CosmosStore.Integration.CosmosFixtures @@ -88,53 +88,43 @@ let discoverConnection () = | Some connectionString -> "EQUINOX_COSMOS_CONNECTION", Discovery.ConnectionString connectionString let createClient (log : Serilog.ILogger) name (discovery : Discovery) = - let connector = CosmosStoreConnector(discovery, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - log.Information("CosmosDB Connecting {name} to {endpoint}", name, discovery.Endpoint) + let connector = CosmosStoreConnector(discovery, requestTimeout = TimeSpan.FromSeconds 3., + maxRetryAttemptsOnRateLimitedRequests = 2, maxRetryWaitTimeOnRateLimitedRequests = TimeSpan.FromMinutes 1.) + log.Information("CosmosStore {name} {endpoint}", name, discovery.Endpoint) connector.CreateUninitialized() -let connectPrimary log = - let name, discovery = discoverConnection () - let client = createClient log name discovery - CosmosStoreClient(client, databaseId, containerId) - -let connectArchive log = - let name, discovery = discoverConnection () - let client = createClient log name discovery - CosmosStoreClient(client, databaseId, archiveContainerId) - -let connectWithFallback log = +let connect log = let name, discovery = discoverConnection () let client = createClient log name discovery - CosmosStoreClient(client, databaseId, containerId, archiveContainerId = archiveContainerId) + CosmosStoreClient(client) [] type DocStoreCollection() = do () +let createPrimaryContextIgnoreMissing client containerId queryMaxItems tipMaxEvents ignoreMissing = + CosmosStoreContext(client, databaseId, containerId, tipMaxEvents = tipMaxEvents, queryMaxItems = queryMaxItems, ignoreMissingEvents = ignoreMissing) + +let defaultTipMaxEvents = 10 +let createArchiveContext log queryMaxItems = + let client = connect log + CosmosStoreContext(client, databaseId, containerId, defaultTipMaxEvents, queryMaxItems = queryMaxItems) +let createFallbackContext log queryMaxItems = + let client = connect log + CosmosStoreContext(client, databaseId, containerId, defaultTipMaxEvents, queryMaxItems = queryMaxItems, archiveContainerId = archiveContainerId) + type StoreContext = CosmosStoreContext type StoreCategory<'E, 'S> = CosmosStoreCategory<'E, 'S, unit> +let primaryTarget = containerId #endif -let createPrimaryContextIgnoreMissing client queryMaxItems tipMaxEvents ignoreMissing = - StoreContext(client, tipMaxEvents = tipMaxEvents, queryMaxItems = queryMaxItems, ignoreMissingEvents = ignoreMissing) - let createPrimaryContextEx log queryMaxItems tipMaxEvents = - let connection = connectPrimary log - createPrimaryContextIgnoreMissing connection queryMaxItems tipMaxEvents false - -let defaultTipMaxEvents = 10 + let client = connect log + createPrimaryContextIgnoreMissing client primaryTarget queryMaxItems tipMaxEvents false let createPrimaryContext log queryMaxItems = createPrimaryContextEx log queryMaxItems defaultTipMaxEvents -let createArchiveContext log queryMaxItems = - let connection = connectArchive log - StoreContext(connection, defaultTipMaxEvents, queryMaxItems = queryMaxItems) - -let createFallbackContext log queryMaxItems = - let connection = connectWithFallback log - StoreContext(connection, defaultTipMaxEvents, queryMaxItems = queryMaxItems) - let defaultQueryMaxItems = 10 let createPrimaryEventsContext log queryMaxItems tipMaxItems = @@ -142,9 +132,9 @@ let createPrimaryEventsContext log queryMaxItems tipMaxItems = Core.EventsContext(context, log) let createPrimaryEventsContextWithUnsafe log queryMaxItems tipMaxItems = - let connection = connectPrimary log + let client = connect log let create ignoreMissing = - let context = createPrimaryContextIgnoreMissing connection queryMaxItems tipMaxItems ignoreMissing + let context = createPrimaryContextIgnoreMissing client primaryTarget queryMaxItems tipMaxItems ignoreMissing Core.EventsContext(context, log) create false, create true diff --git a/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs index dda05cbda..4986d6c02 100644 --- a/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs @@ -22,30 +22,29 @@ module Cart = let codec = Cart.Events.codecJe #endif let createServiceWithoutOptimization log context = - StoreCategory(context, Cart.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) + StoreCategory(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create /// Trigger looking in Tip (we want those calls to occur, but without leaning on snapshots, which would reduce the paths covered) let createServiceWithEmptyUnfolds log context = let unfArgs = Cart.Fold.Snapshot.isOrigin, fun _ -> Array.empty - StoreCategory(context, Cart.Category, codec, fold, initial, AccessStrategy.MultiSnapshot unfArgs, Equinox.CachingStrategy.NoCaching) + StoreCategory(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.MultiSnapshot unfArgs, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create let createServiceWithSnapshotStrategy log context = - StoreCategory(context, Cart.Category, codec, fold, initial, AccessStrategy.Snapshot Cart.Fold.Snapshot.config, Equinox.CachingStrategy.NoCaching) + StoreCategory(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.Snapshot Cart.Fold.Snapshot.config, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create let createServiceWithSnapshotStrategyAndCaching log context cache = let sliding20m = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - StoreCategory(context, Cart.Category, codec, fold, initial, AccessStrategy.Snapshot Cart.Fold.Snapshot.config, sliding20m) + StoreCategory(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.Snapshot Cart.Fold.Snapshot.config, sliding20m) |> Equinox.Decider.forStream log |> Cart.create let createServiceWithRollingState log context = let access = AccessStrategy.RollingState Cart.Fold.Snapshot.generate - StoreCategory(context, Cart.Category, codec, fold, initial, access, Equinox.CachingStrategy.NoCaching) + StoreCategory(context, Cart.Stream.Category, codec, fold, initial, access, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create - let streamName = Cart.streamId >> StreamName.render Cart.Category module ContactPreferences = let fold, initial = ContactPreferences.Fold.fold, ContactPreferences.Fold.initial @@ -55,7 +54,7 @@ module ContactPreferences = let codec = ContactPreferences.Events.codecJe #endif let private createServiceWithLatestKnownEvent context log cachingStrategy = - StoreCategory(context, ContactPreferences.Category, codec, fold, initial, AccessStrategy.LatestKnownEvent, cachingStrategy) + StoreCategory(context, ContactPreferences.Stream.Category, codec, fold, initial, AccessStrategy.LatestKnownEvent, cachingStrategy) |> Equinox.Decider.forStream log |> ContactPreferences.create let createServiceWithoutCaching log context = @@ -63,7 +62,6 @@ module ContactPreferences = let createServiceWithCaching log context cache = let sliding20m = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) createServiceWithLatestKnownEvent context log sliding20m - let streamName = ContactPreferences.streamId >> StreamName.render ContactPreferences.Category [] type Tests(testOutputHelper) = @@ -174,9 +172,9 @@ type Tests(testOutputHelper) = let! ct = Async.CancellationToken let eventsContext = Equinox.DynamoStore.Core.EventsContext(context, log) - let streamName = Cart.streamName cartId + let streamName = Cart.Stream.name cartId let countToTry = max addRemoveCount countToTry - let! events = eventsContext.Read(FsCodec.StreamName.parse streamName, ct, 1L, maxCount = countToTry) |> Async.AwaitTask + let! events = eventsContext.Read(streamName, ct, 1L, maxCount = countToTry) |> Async.AwaitTask [| 1..1+countToTry-1 |] =! [| for e in events -> int e.Index |] } #endif @@ -292,7 +290,7 @@ type Tests(testOutputHelper) = // Needs to share the same context (with inner CosmosClient) for the session token to be threaded through // If we run on an independent context, we won't see (and hence prune) the full set of events let ctx = Core.EventsContext(context, log) - let streamName = ContactPreferences.streamName id + let streamName = ContactPreferences.Stream.name id // Prune all the events let! deleted, deferred, trimmedPos = Core.Events.pruneUntil ctx streamName 14L @@ -456,7 +454,7 @@ type Tests(testOutputHelper) = let ctx = Core.EventsContext(context, log) // Prune all the events - let streamName = Cart.streamName cartId + let streamName = Cart.Stream.name cartId let! deleted, deferred, trimmedPos = Core.Events.pruneUntil ctx streamName 11L test <@ deleted = 12 && deferred = 0 && trimmedPos = 12L @> @@ -519,7 +517,7 @@ type Tests(testOutputHelper) = (* Verify pruning does not affect snapshots, and does not touch the Tip *) let ctx = Core.EventsContext(context, log) - let streamName = Cart.streamName cartId + let streamName = Cart.Stream.name cartId // Prune all the events let! deleted, deferred, trimmedPos = Core.Events.pruneUntil ctx streamName 12L test <@ deleted = 13 && deferred = 0 && trimmedPos = 13L @> diff --git a/tests/Equinox.EventStoreDb.Integration/StoreIntegration.fs b/tests/Equinox.EventStoreDb.Integration/StoreIntegration.fs index 2837712f5..5aa47145b 100644 --- a/tests/Equinox.EventStoreDb.Integration/StoreIntegration.fs +++ b/tests/Equinox.EventStoreDb.Integration/StoreIntegration.fs @@ -101,7 +101,7 @@ module SimplestThing = let fold = Array.fold evolve let initial = StuffHappened let [] CategoryName = "SimplestThing" - let streamId = Equinox.StreamId.gen Guid.toStringN + let streamId = FsCodec.StreamId.gen Guid.toStringN let decider log context id = let cat = Category(context, CategoryName, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) Equinox.Decider.forStream log cat (streamId id) @@ -110,40 +110,40 @@ module Cart = let fold, initial = Cart.Fold.fold, Cart.Fold.initial let codec = Cart.Events.codec let createServiceWithoutOptimization log context = - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create #if STORE_MESSAGEDB let snapshot = Cart.Fold.Snapshot.eventCaseName, Cart.Fold.Snapshot.generate let createServiceWithAdjacentSnapshotting log context = - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.AdjacentSnapshots snapshot, Equinox.CachingStrategy.NoCaching) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.AdjacentSnapshots snapshot, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create #else let snapshot = Cart.Fold.Snapshot.config let createServiceWithCompaction log context = - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.RollingSnapshots snapshot, Equinox.CachingStrategy.NoCaching) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.RollingSnapshots snapshot, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> Cart.create #endif let createServiceWithCaching log context cache = let sliding20m = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.Unoptimized, caching = sliding20m) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.Unoptimized, caching = sliding20m) |> Equinox.Decider.forStream log |> Cart.create #if STORE_MESSAGEDB let createServiceWithSnapshottingAndCaching log context cache = let sliding20m = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.AdjacentSnapshots snapshot, sliding20m) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.AdjacentSnapshots snapshot, sliding20m) |> Equinox.Decider.forStream log |> Cart.create #else let createServiceWithCompactionAndCaching log context cache = let sliding20m = Equinox.CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - Category(context, Cart.Category, codec, fold, initial, AccessStrategy.RollingSnapshots snapshot, sliding20m) + Category(context, Cart.Stream.Category, codec, fold, initial, AccessStrategy.RollingSnapshots snapshot, sliding20m) |> Equinox.Decider.forStream log |> Cart.create #endif @@ -153,12 +153,12 @@ module ContactPreferences = let codec = ContactPreferences.Events.codec let createServiceWithoutOptimization log connection = let context = createContext connection defaultBatchSize - Category(context, ContactPreferences.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) + Category(context, ContactPreferences.Stream.Category, codec, fold, initial, AccessStrategy.Unoptimized, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> ContactPreferences.create let createService log connection = - Category(createContext connection 1, ContactPreferences.Category, codec, fold, initial, AccessStrategy.LatestKnownEvent, Equinox.CachingStrategy.NoCaching) + Category(createContext connection 1, ContactPreferences.Stream.Category, codec, fold, initial, AccessStrategy.LatestKnownEvent, Equinox.CachingStrategy.NoCaching) |> Equinox.Decider.forStream log |> ContactPreferences.create diff --git a/tests/Equinox.MemoryStore.Integration/MemoryStoreIntegration.fs b/tests/Equinox.MemoryStore.Integration/MemoryStoreIntegration.fs index 9c5a5ee06..302000591 100644 --- a/tests/Equinox.MemoryStore.Integration/MemoryStoreIntegration.fs +++ b/tests/Equinox.MemoryStore.Integration/MemoryStoreIntegration.fs @@ -17,7 +17,7 @@ type AutoDataAttribute() = let createMemoryStore () = VolatileStore<_>() let createServiceMemory log store = - let cat = MemoryStoreCategory(store, Cart.Category, FsCodec.Box.Codec.Create(), Cart.Fold.fold, Cart.Fold.initial) + let cat = MemoryStoreCategory(store, Cart.Stream.Category, FsCodec.Box.Codec.Create(), Cart.Fold.fold, Cart.Fold.initial) cat |> Equinox.Decider.forStream log |> Cart.create type Tests(testOutputHelper) = @@ -58,7 +58,7 @@ type Tests(testOutputHelper) = } let createFavoritesServiceMemory store log : Favorites.Service = - let cat = MemoryStoreCategory(store, Favorites.Category, FsCodec.Box.Codec.Create(), Favorites.Fold.fold, Favorites.Fold.initial) + let cat = MemoryStoreCategory(store, Favorites.Stream.Category, FsCodec.Box.Codec.Create(), Favorites.Fold.fold, Favorites.Fold.initial) cat |> Equinox.Decider.forStream log |> Favorites.create type ChangeFeed(testOutputHelper) = @@ -74,7 +74,7 @@ type ChangeFeed(testOutputHelper) = List.ofArray xs use _ = store.Committed.Subscribe(fun struct (sn, xs) -> events.Add(FsCodec.StreamName.toString sn, List.ofArray xs)) let service = createFavoritesServiceMemory store log - let expectedStream = Favorites.streamId clientId |> Equinox.Core.StreamId.renderStreamName Favorites.Category + let expectedStream = Favorites.Stream.name clientId |> FsCodec.StreamName.toString do! service.Favorite(clientId, [sku]) let written = takeCaptured () diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 20d8ff619..a2ff76a53 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -411,7 +411,7 @@ module DynamoInit = | Throughput.OnDemand -> log.Information("DynamoStore Provisioning Table {table} with On-Demand capacity management; streaming {streaming}", sa.Table, a.StreamingMode) - let client = sa.Connector.CreateClient() + let client = sa.Connector.CreateDynamoDbClient() let! t = Core.Initialization.provision client sa.Table (t, a.StreamingMode) log.Information("DynamoStore DynamoDB Streams ARN {streamArn}", Core.Initialization.tryGetActiveStreamsArn t) | x -> return Store.missingArg $"unexpected subcommand %A{x}" } @@ -460,7 +460,7 @@ module CosmosStats = | StatsParameters.Dynamo sp -> async { let sa = Store.Dynamo.Arguments sp sa.Connector.LogConfiguration(log) - let client = sa.Connector.CreateClient() + let client = sa.Connector.CreateDynamoDbClient() let! t = Equinox.DynamoStore.Core.Initialization.describe client sa.Table match t.BillingModeSummary, t.ProvisionedThroughput, Equinox.DynamoStore.Core.Initialization.tryGetActiveStreamsArn t with | null, p, streamsArn when p <> null -> @@ -505,10 +505,10 @@ module Dump = with e -> log.ForContext("str", s).Warning(e, "JSON Parse failure - use --JsonSkip option to inhibit"); reraise() else $"(%d{s.Length} chars)" with e -> log.Warning(e, "UTF-8 Parse failure - use --Blobs option to inhibit"); reraise() - let dumpEvents (streamName : FsCodec.StreamName) = async { - let struct (categoryName, sid) = FsCodec.StreamName.splitCategoryAndStreamId streamName + let dumpEvents (streamName: FsCodec.StreamName) = async { + let struct (categoryName, sid) = FsCodec.StreamName.split streamName let cat = store.Category(categoryName, idCodec, fold, initial, isOriginAndSnapshot) - let decider = Equinox.Decider.forStream storeLog cat (UMX.tag sid) + let decider = Equinox.Decider.forStream storeLog cat sid let! streamBytes, events = decider.QueryEx(fun c -> c.StreamEventBytes, c.State) let mutable prevTs = None for x in events |> Seq.filter (fun e -> (e.IsUnfold && doU) || (not e.IsUnfold && doE)) do diff --git a/tools/Equinox.Tools.TestHarness/HttpHelpers.fs b/tools/Equinox.Tools.TestHarness/HttpHelpers.fs index 86a9ae336..da1b95c0f 100644 --- a/tools/Equinox.Tools.TestHarness/HttpHelpers.fs +++ b/tools/Equinox.Tools.TestHarness/HttpHelpers.fs @@ -33,7 +33,7 @@ type InvalidHttpResponseException = member x.RequestMethod = HttpMethod(x.requestMethod) - private new (userMessage: string, requestMethod: HttpMethod, requestUri: Uri, requestBody: string, + private new(userMessage: string, requestMethod: HttpMethod, requestUri: Uri, requestBody: string, statusCode: HttpStatusCode, reasonPhrase: string, responseBody: string, ?innerException: exn) = { @@ -56,7 +56,7 @@ type InvalidHttpResponseException = add "requestUri" e.RequestUri ; add "requestMethod" e.requestMethod ; add "requestBody" e.RequestBody add "statusCode" e.StatusCode ; add "reasonPhrase" e.ReasonPhrase ; add "responseBody" e.ResponseBody - new (si: SerializationInfo, sc: StreamingContext) = + new(si: SerializationInfo, sc: StreamingContext) = let get name = si.GetValue(name, typeof<'a>) :?> 'a { inherit Exception(si, sc) ; userMessage = get "userMessage" ;