diff --git a/CLAUDE.md b/CLAUDE.md index 0c5973977..70d4baa60 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -97,7 +97,7 @@ samples/ Sample apps (esdb, postgres, kurrentdb, banking) ## Documentation Site -The `docs/` directory is a Docusaurus v3 site (https://eventuous.dev). Requires Node >=18.19.0 and pnpm. +The `docs/` directory is an Astro + Starlight site (https://eventuous.dev). Requires Node >=20.0.0 and pnpm. ```bash cd docs @@ -106,19 +106,16 @@ cd docs pnpm install # Local dev server with hot reload -pnpm start +pnpm dev -# Production build (output to docs/build/) +# Production build (output to docs/dist/) pnpm build # Serve the production build locally -pnpm serve - -# TypeScript validation -pnpm typecheck +pnpm preview ``` -Docs content lives in `docs/docs/` as `.md` and `.mdx` files organized by topic: `domain/`, `persistence/`, `application/`, `subscriptions/`, `read-models/`, `producers/`, `gateway/`, `diagnostics/`, and `infra/` (per-provider: esdb, postgres, mongodb, mssql, kafka, rabbitmq, pubsub, elastic). MDX files can embed React components. Mermaid diagrams are supported in markdown code blocks. Versioned docs are in `versioned_docs/` (current version: 0.15). The build enforces no broken links. +Docs content lives in `docs/src/content/docs/` as `.md` and `.mdx` files organized by topic: `domain/`, `persistence/`, `application/`, `subscriptions/`, `read-models/`, `producers/`, `gateway/`, `diagnostics/`, and `infra/` (per-provider: esdb, postgres, mongodb, mssql, sqlite, kafka, rabbitmq, pubsub, elastic). MDX files can use Astro components. Mermaid diagrams are supported via `starlight-client-mermaid` plugin. Versioned docs (0.15) are managed by the `starlight-versions` plugin in `docs/src/content/docs/0.15/`. Sidebar is configured in `astro.config.mjs`. Frontmatter uses Starlight format (`sidebar.order` for ordering, not `sidebar_position`). ## Code Style diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..ddce69b68 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,3 @@ +node_modules/ +dist/ +.astro/ diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 0c6c2c27b..000000000 --- a/docs/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Website - -This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. - -### Installation - -``` -$ yarn -``` - -### Local Development - -``` -$ yarn start -``` - -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. - -### Build - -``` -$ yarn build -``` - -This command generates static content into the `build` directory and can be served using any static contents hosting service. - -### Deployment - -Using SSH: - -``` -$ USE_SSH=true yarn deploy -``` - -Not using SSH: - -``` -$ GIT_USER= yarn deploy -``` - -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs new file mode 100644 index 000000000..3ff708870 --- /dev/null +++ b/docs/astro.config.mjs @@ -0,0 +1,70 @@ +import { defineConfig } from 'astro/config'; +import starlight from '@astrojs/starlight'; +import starlightVersions from 'starlight-versions'; +import starlightMermaid from '@pasqal-io/starlight-client-mermaid'; + +export default defineConfig({ + site: 'https://eventuous.dev', + integrations: [ + starlight({ + title: 'Eventuous', + logo: { + src: './src/assets/logo.png', + }, + social: [ + { icon: 'github', label: 'GitHub', href: 'https://github.com/eventuous/eventuous' }, + { icon: 'discord', label: 'Discord', href: 'https://discord.gg/ZrqM6vnnmf' }, + ], + customCss: ['./src/styles/custom.css'], + plugins: [ + starlightVersions({ + current: { label: 'v0.15 (Stable)' }, + versions: [{ slug: 'next', label: 'Preview' }], + }), + starlightMermaid(), + ], + sidebar: [ + { label: 'Introduction', slug: 'intro' }, + { label: "What's New", slug: 'whats-new' }, + { + label: 'Concepts', + collapsed: true, + items: [ + { label: 'Prologue', autogenerate: { directory: 'prologue' } }, + { label: 'Domain', autogenerate: { directory: 'domain' } }, + { label: 'Persistence', autogenerate: { directory: 'persistence' } }, + ], + }, + { + label: 'Building Apps', + collapsed: true, + items: [ + { label: 'Application', autogenerate: { directory: 'application' } }, + { label: 'Subscriptions', autogenerate: { directory: 'subscriptions' } }, + { label: 'Read Models', autogenerate: { directory: 'read-models' } }, + { label: 'Producers', autogenerate: { directory: 'producers' } }, + { label: 'Gateway', autogenerate: { directory: 'gateway' } }, + ], + }, + { + label: 'Operations', + collapsed: true, + items: [ + { label: 'Diagnostics', autogenerate: { directory: 'diagnostics' } }, + { label: 'Infrastructure', autogenerate: { directory: 'infra' } }, + { label: 'FAQ', autogenerate: { directory: 'faq' } }, + ], + }, + ], + head: [ + { + tag: 'link', + attrs: { rel: 'icon', href: '/favicon.ico', sizes: '32x32' }, + }, + ], + editLink: { + baseUrl: 'https://github.com/eventuous/eventuous/edit/dev/docs/', + }, + }), + ], +}); diff --git a/docs/babel.config.js b/docs/babel.config.js deleted file mode 100644 index e00595dae..000000000 --- a/docs/babel.config.js +++ /dev/null @@ -1,3 +0,0 @@ -module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], -}; diff --git a/docs/docs/application/command-api.md b/docs/docs/application/command-api.md deleted file mode 100644 index 1acd53fce..000000000 --- a/docs/docs/application/command-api.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: "Command API" -description: "Auto-generated HTTP API for command handling" -sidebar_position: 3 ---- - -## Controller base - -When using a command service from an HTTP controller, you'd usually inject the service as a dependency, and call it's `Handle` method using the request body: - -```csharp title="Api/BookingCommandApi.cs" -[Route("/booking")] -public class CommandApi : ControllerBase { - ICommandService _service; - - public CommandApi(ICommandService service) => _service = service; - - [HttpPost] - [Route("book")] - public async Task> BookRoom( - [FromBody] BookRoom cmd, - CancellationToken cancellationToken - ) { - var result = await _service.Handle(cmd, cancellationToken); - result Ok(result); - } -} -``` - -The issue here is there's no way to know if the command was successful or not. As the command service won't throw an exception if the command fails, we can't return an error via the HTTP response, unless we parse the [result](app-service.md#result) and return a meaningful HTTP response. - -Eventuous allows you to simplify the command handling in the API controller by providing a `CommandHttpApiBase` abstract class, which implements the `ControllerBase` and contains the `Handle` method. The class takes `ICommandService` as a dependency. The `Handle` method will call the command service, and also convert the handling result to `ActionResult`. Here are the rules for exception handling: - -| Result exception | HTTP response | -|----------------------------------|---------------| -| `OptimisticConcurrencyException` | `Conflict` | -| `AggregateNotFoundException` | `NotFound` | -| Any other exception | `BadRequest` | - -Here is an example of a command API controller: - -```csharp -[Route("/booking")] -public class CommandApi : CommandHttpApiBase { - public CommandApi(ICommandService service) : base(service) { } - - [HttpPost] - [Route("book")] - public Task> BookRoom( - [FromBody] BookRoom cmd, - CancellationToken cancellationToken - ) => Handle(cmd, cancellationToken); -} -``` - -We recommend using the `CommandHttpApiBase` class when you want to handle commands using the HTTP API. - -When using [functional services](./func-service.md) you can use the `CommandHttpApiBaseFunc` base class, which works exactly the same way: - -```csharp -[Route("/booking")] -public class CommandApi : CommandHttpApiBaseFunc { - public CommandApi(IFuncCommandService service) : base(service) { } - - [HttpPost] - [Route("book")] - public Task> BookRoom( - [FromBody] BookRoom cmd, - CancellationToken cancellationToken - ) => Handle(cmd, cancellationToken); -} -``` - -## Generated command API - -Eventuous can use your command service to generate a command API. Such an API will accept JSON models matching the application service command contracts, and pass those commands as-is to the application service. This feature removes the need to create API endpoints manually using controllers or .NET minimal API. - -To use generated APIs, you need to add `Eventuous.AspNetCore.Web` package. - -All the auto-generated API endpoints will use the `POST` HTTP method. - -### Annotating commands - -For Eventuous to understand what commands need to be exposed as API endpoints and on what routes, those commands need to be annotated by the `HttpCommand` attribute: - -```csharp -[HttpCommand(Route = "payment")] -public record ProcessPayment(string BookingId, float PaidAmount); -``` - -You can skip the `Route` property, in that case Eventuous will use the command class name. For the example above the generated route would be `processPayment`. We recommend specifying the route explicitly as you might refactor the command class and give it a different name, and it will break your API if the route is auto-generated. - -If your application has a single command service working with a single aggregate type, you don't need to specify the aggregate type, and then use a different command registration method (described below). - -Another way to specify the aggregate type for a group of commands is to annotate the parent class (command container): - -```csharp -[AggregateCommands()] -public static class BookingCommands { - [HttpCommand(Route = "payment")] - public record ProcessPayment(string BookingId, float PaidAmount); -} -``` - -In such case, Eventuous will treat all the commands defined inside the `BookingCommands` static class as commands operating on the `Booking` aggregate. - -Also, you don't need to specify the aggregate type in the command annotation if you use the `MapAggregateCommands` registration (see below). - -Finally, you don't need to annotate the command at all if you use the explicit command registration with the route parameter. - -### Registering commands - -There are several extensions for `IEndpointRouteBuilder` that allow you to register HTTP endpoints for one or more commands. - -#### Single command - -The simplest way to register a single command is to make it explicitly in the bootstrap code: - -```csharp -var builder = WebApplication.CreateBuilder(); - -// Register the app service -builder.Services.AddCommandService(); - -var app = builder.Build(); - -// Map the command to an API endpoint -app.MapCommand("payment"); - -app.Run(); - -record ProcessPayment(string BookingId, float PaidAmount); -``` - -If you annotate the command with the `HttpCommand` attribute, and specify the route, you can avoid providing the route when registering the command: - -```csharp -app.MapCommand(); - -... - -[HttpCommand(Route = "payment")] -public record ProcessPayment(string BookingId, float PaidAmount); -``` - -#### Multiple commands for an aggregate - -You can also register multiple commands for the same aggregate type, without a need to provide the aggregate type in the command annotation. To do that, use the extension that will create an `CommandServiceRouteBuilder`, then register commands using that builder: - -```csharp -app - .MapAggregateCommands() - .MapCommand() - .MapCommand("discount"); - -... - -// route specified in the annotation -[HttpCommand(Route = "payment")] -public record ProcessPayment(string BookingId, float PaidAmount); - -// No annotation needed -public record ApplyDiscount(string BookingId, float Discount); -``` - -#### Discover commands - -There are two extensions that are able to scan your application for annotated commands, and register them automatically. - -First, the `MapDiscoveredCommand`, which assumes your application only serves commands for a single aggregate type: - -```csharp -app.MapDiscoveredCommands(); - -... - -[HttpCommand(Route = "payment")] -record ProcessPayment(string BookingId, float PaidAmount); -``` - -For it to work, all the commands must be annotated and have the route defined in the annotation. - -The second extension will discover all the annotated commands, which need to have an association with the aggregate type by using the `Aggregate` argument of the attribute, or by using the `AggregateCommands` attribute on the container class (described above): - -```csharp -app.MapDiscoveredCommands(); - -... - -[HttpCommand(Route = "bookings/payment")] -record ProcessPayment(string BookingId, float PaidAmount); - -[AggregateCommands] -class V1.PaymentCommands { - [HttpCommand(Route = "payments/register")] - public record RegisterPayment(string PaymentId, string Provider, float Amount); - - [HttpCommand(Route = "payments/refund")] - public record RefundPayment(string PaymentId); -} -``` - -Both extensions will scan the current assembly by default, but you can also provide a list of assemblies to scan as an argument: - -```csharp -app.MapDiscoveredCommands(typeof(V1.PaymentCommands).Assembly); -``` - -### Using HttpContext data - -Commands processed by the command service might include properties that aren't provided by the API client, but are available in the `HttpContext` object. For example, you can think about the user that is making the request. The details about the user, and the user claims, are available in `HttpContext`. - -You can instruct Eventuous to enrich the command before it gets sent to the command service, using the `HttpContext` data. In that case, you also might want to hide the command property from being exposed to the client in the OpenAPI spec. - -To hide a property from being exposed to the client, use the `JsonIgnore` attribute: - -```csharp -[HttpCommand(Route = "book")] -public record BookRoom(string RoomId, string BookingId, [property: JsonIgnore] string UserId); -``` - -Then, you can use the `HttpContext` data in your command: - -```csharp -app - .MapAggregateCommands() - .MapCommand((cmd, ctx) => cmd with { UserId = ctx.User.Identity.Name }); -``` - -When the command is mapped to the API endpoint like that, and the property is ignored, the OpenAPI specification won't include the ignored property, and the command service will get the command populated with the user id from `HttpContext`. diff --git a/docs/docs/application/func-service.md b/docs/docs/application/func-service.md deleted file mode 100644 index eaef45124..000000000 --- a/docs/docs/application/func-service.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: "Functional service" -description: "Functional command service and unit of work without aggregates" -sidebar_position: 3 ---- - -## Concept - -The functional command service is an alternative way to handle commands. There, you don't use aggregates for the domain model. Instead, you define a set of stateless functions that receive the restored state instance and the collection of previously stored events, and produces new events. The service performs the following operations when handling one command: -1. Extract the stream name from the command, if necessary. -2. Instantiate all the necessary value objects. This could effectively reject the command if value objects cannot be constructed. The command service could also load some other streams, or any other information, which is needed to execute the command but won't change state. -3. If the command expects to operate on an existing stream, the stream events get loaded from the [Event Store](../persistence/event-store). -4. Restore state from the loaded events. -5. Execute an operation on the loaded (or new) state and events, using values from the command, and the constructed value objects. -6. The function either performs the operation and produces new events, or rejects the operation. It can also do nothing. -7. If the operation was successful, the service persists new events to the store. Otherwise, it returns a failure to the edge. - -```mermaid -sequenceDiagram - participant Client - participant API Endpoint - participant Command Service - participant Domain Module - participant Event Store - - Client->>+API Endpoint: Request - API Endpoint->>API Endpoint: Deserialize request - API Endpoint->>+Command Service: Command - Command Service->>+Event Store: Load stream - Event Store-->>-Command Service: Events - Command Service->>+Domain Module: Execute - Domain Module-->>-Command Service: New events - Command Service->>+Event Store: Append events - Event Store-->>-Command Service: Return result - Command Service-->>-API Endpoint: Return result - API Endpoint-->>-Client: Return result -``` - -:::caution Handling failures -The last point above translates to: the command service **does not throw exceptions**. It [returns](./app-service.md#result) an instance of `ErrorResult` instead. It is your responsibility to handle the error. -::: - -## Implementation - -Eventuous provides a base class for you to build functional command services. It is a generic abstract class, which is typed to the state type. You should create your own implementation of a service for each state type. As command execution is transactional, it can only operate on a single stream, and, logically, only one state type. However, there is no strong link between the state type and the stream name. You can use the same state type for multiple streams, or use different state types for the same stream. - -### Handling commands - -The base class has three methods, which you call in your class constructor to register the command handlers: - -| Function | What's it for | -|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `OnNew` | Registers the handler, which expects that the stream doesn't exist. It will get a new state object instance. The operation will fail when it will try storing events due to version mismatch. | -| `OnExisting` | Registers the handler, which expect an existing stream where it will load events from. You need to provide a function to extract the stream name from the command. The handler will get the events loaded from the store, and will throw if there's no stream to load. | -| `OnAny` | Used for handlers, which can operate both on new and existing streams. The command service will _try_ to load events from the given stream, but won't throw if the load fails, and will pass a new state instance instead. | - -Here is an example of a functional command service form our test project: - -```csharp title="BookingFuncService.cs" -public class BookingFuncService : FunctionalCommandService { - public BookingFuncService(IEventStore store, TypeMapper? typeMap = null) : base(store, typeMap) { - // Register command handlers - OnNew(cmd => GetStream(cmd.BookingId), BookRoom); - OnExisting(cmd => GetStream(cmd.BookingId), RecordPayment); - - // Helper function to get the stream name from the command - static StreamName GetStream(string id) => new StreamName($"Booking-{id}"); - - // When there's no stream to load, the function only receives the command - static IEnumerable BookRoom(BookRoom cmd) { - yield return new RoomBooked(cmd.RoomId, cmd.CheckIn, cmd.CheckOut, cmd.Price); - } - - // For an existing stream, the function receives the state and the events - static IEnumerable RecordPayment( - BookingState state, - object[] originalEvents, - RecordPayment cmd - ) { - if (state.HasPayment(cmd.PaymentId)) yield break; - - var registered = new BookingPaymentRegistered(cmd.PaymentId, cmd.Amount.Amount); - - yield return registered; - - // Apply the payment to the state - var newState = state.When(registered); - if (newState.IsFullyPaid()) - yield return new BookingFullyPaid(cmd.PaidAt); - if (newState.IsOverpaid()) - yield return new BookingOverpaid((state.AmountPaid - state.Price).Amount); - } - } -} -``` - -The service uses the same `BookingState` record as described on the [State](../domain/state) page. - -### Usage - -Because the functional service base class implements the same `ICommandService` interface, it can be used the same way as any other command service, by calling the `Handle` method from the API controller. You can, therefore, use it in API controllers similar to the command service: - -```csharp title="Api/Bookings.cs" -[Route("/booking")] -public class CommandApi : ControllerBase { - IFuncCommandService _service; - - public CommandApi(IFuncCommandService service) - => _service = service; - - [HttpPost] - [Route("book")] - public async Task> BookRoom( - [FromBody] BookRoom cmd, - CancellationToken cancellationToken - ) { - var result = await _service.Handle(cmd, cancellationToken); - result Ok(result); - } -} -``` - -:::caution -Mapping commands to HTTP endpoints won't work with functional services. These features are coming in upcoming releases. -::: - -You can also use the `CommandHttpApiBaseFunc` base class as described on the [command API](./command-api.md) page. - -### Registration - -You can add a functional service to the DI container using the `AddFunctionalService` extensions methods: - -```csharp title="Program.cs" -// Any dependency will be injected -builder.Services.AddFunctionalService(); - -// Specify dependencies explicitly -builder.Services.AddFunctionalService(sp => - new AnotherFuncService(sp.GetRequiredService(), customTypeMap) -); -``` \ No newline at end of file diff --git a/docs/docs/diagnostics/_category_.json b/docs/docs/diagnostics/_category_.json deleted file mode 100644 index 0343033f1..000000000 --- a/docs/docs/diagnostics/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Diagnostics", - "position": 9, - "link": { - "type": "generated-index", - "description": "Observability of systems powered by Eventuous." - } -} diff --git a/docs/docs/domain/domain-events.md b/docs/docs/domain/domain-events.md deleted file mode 100644 index 32d782139..000000000 --- a/docs/docs/domain/domain-events.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "Domain events" -description: "Domain events: persisted behaviour" -sidebar_position: 3 ---- - -## Concept - -If you ever read the [Blue Book](https://www.domainlanguage.com/ddd/blue-book/), you'd notice that the `Domain Event` concept is not mentioned there. Still, years after the book was published, events have become popular, and domain events in particular. - -Eric Evans, the author of the Blue Book, has added the definition to his [Domain-Design Reference](https://www.domainlanguage.com/ddd/reference/). Let us start with Eric's definition: - -> Model information about activity in the domain as a series of discrete events. Represent each event as a domain object. [...] -> A domain event is a full-fledged part of the domain model, a representation of something that happened in the domain. Ignore irrelevant domain activity while making explicit the events that the domain experts want to track or be notified of, or which are associated with state changes in the other model objects. - -When talking about Event Sourcing, we focus on the last bit: "making explicit the events [...], which are associated with state changes." Event Sourcing takes this definition further, and suggests: - -> Persist the domain objects state as series of domain events. Each domain event represents an explicit state transition. Applying previously recorded events to a domain object allows us to recover the current state of the object itself. - -We can also cite an [article](https://suzdalnitski.medium.com/oop-will-make-you-suffer-846d072b4dce) from Medium (a bit controversial one): - -> In the past, the goto statement was widely used in programming languages, before the advent of procedures/functions. The goto statement simply allowed the program to jump to any part of the code during execution. This made it really hard for the developers to answer the question “how did I get to this point of execution?”. And yes, this has caused a large number of bugs. -> A very similar problem is happening nowadays. Only this time the difficult question is **“how did I get to this state”** instead of “how did I get to this point of execution”. - -Event Sourcing effectively answers this question by giving you a history of all the state transitions for your domain objects, represented as domain events. - -So, what is this page about? It doesn't look like a conventional documentation page, does it? Nevertheless, let's see what domain events look like when you build a system with Eventuous. - -```csharp title="BookingEvents.cs" -public static class BookingEvents { - public record RoomBooked( - string RoomId, - LocalDate CheckIn, - LocalDate CheckOut, - decimal Price - ); - - public record BookingPaid( - decimal AmountPaid, - bool PaidInFull - ); - - public record BookingCancelled(string Reason); - - public record BookingImported( - string RoomId, - LocalDate CheckIn, - LocalDate CheckOut - ); -} -``` - -Oh, that's it? A record? Yes, a record. Domain events are property bags. Their only purpose is to convey the state transition using the language of your domain. Technically, a domain event should just be an object, which can be serialised and deserialized for the purpose of persistence. - -Eventuous do's and dont's: -- **Do** make sure your domain events can be serialised to a commonly understood format, like JSON. -- **Do** make domain events immutable. -- **Do** implement equality by value for domain events. -- **Don't** apply things like marker interfaces (or any interfaces) to domain events. -- **Don't** use constructor logic, which can prevent domain events from deserializing. -- **Don't** use value objects in your domain events. - -The last point might require some elaboration. The `Value Object` pattern in DDD doesn't only require for those objects to be immutable and implement equality by value. The main attribute of a value object is that it must be _correct_. It means that you can try instantiating a value object with invalid arguments, but it will deny them. This characteristic along forbids value objects from being used in domain events, as events must be _unconditionally deserializable_. No matter what logic your current domain model has, events from the past are equally valid today. By bringing value objects to domain events you make them prone to failure when their validity rules change, which might prevent them from being deserialized. As a result, your aggregates won't be able to restore their state from previously persistent events and nothing will work. diff --git a/docs/docs/faq/_category_.json b/docs/docs/faq/_category_.json deleted file mode 100644 index e5b69bd1a..000000000 --- a/docs/docs/faq/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "FAQ", - "position": 100, - "link": { - "type": "generated-index", - "description": "Find answers to most common questions here." - } -} diff --git a/docs/docs/gateway/index.mdx b/docs/docs/gateway/index.mdx deleted file mode 100644 index ffd26d728..000000000 --- a/docs/docs/gateway/index.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Gateway" -description: "Event gateway copies events between databases and brokers" -sidebar_position: 80 ---- -import DocCardList from '@theme/DocCardList'; - -An event gateway is an engine to bridge Event Sourcing with Event-Driven Architecture (EDA). When you store events to an [event store](../persistence/event-store.md), you can use an event gateway to receive stored events, transform them, and distribute downstream using different transport. - -Scenarios where an event gateway is useful: -* Publish transformed domain events as integration events using a broker -* Scale out projections using a partitioned, event-based broker, such as Kafka, Amazon Kinesis, Google PubSub or Azure Event Hub -* Backup or archive domain events in another event store or time-series database -* Send events to an analytics store or service - -## How a gateway works - -A gateway needs three components that form a gateway event pipeline: -* [Subscription](../subscriptions) to the source event store -* [Transformation](implementation/#transformation) function that can also be used as a filter -* [Producer](../producers) to a broker, another event store, or a database - - diff --git a/docs/docs/infra/_category_.json b/docs/docs/infra/_category_.json deleted file mode 100644 index 2a9411fd8..000000000 --- a/docs/docs/infra/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Infrastructure", - "position": 10, - "link": { - "type": "generated-index", - "description": "Supported infrastructure." - } -} diff --git a/docs/docs/persistence/aggregate-stream.md b/docs/docs/persistence/aggregate-stream.md deleted file mode 100644 index 88401a19c..000000000 --- a/docs/docs/persistence/aggregate-stream.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Aggregate stream" -description: "Aggregate as a stream of events" -sidebar_position: 1 ---- - -## Concept - -So far, we figured out that an [Aggregate](../domain/aggregate.md) is the transaction and consistency boundary within the domain model. - -The [aggregate store](aggregate-store) (application-level abstraction) uses an [event store](event-store.md) (infrastructure) to store events in streams. Each aggregate instance has its own stream, so the event store needs to be capable to read and write events from/to the correct stream. - -When appending events to a stream, the append operation for a single stream must be transactional to ensure that the stream is consistent. Eventuous handles commands using the [command service](../application/app-service.md), and one command handler is the unit of work. All the events generated by the aggregate instance during the unit of work are appended to the stream as the final step in the command handling process. - -## Stream name - -By default, Eventuous uses the `AggregateType.Name` combined with the aggregate id as the stream name. For example, the `Booking` aggregate with id `1` has a stream name `Booking-1`. That's what `StreamName.For(1)` returns. - -However, you might want to have more fine-grained control over the stream name. For example, you might want to include the tenant id in the stream name. It's possible to override the default convention by configuring the stream name mapping. The stream map contains a mapping between the aggregate identity type (derived from `AggregateId`) and the stream name generation function. Therefore, any additional property of the aggregate identity type can be used to generate the stream name. - -For example, the following code registers a stream name mapping for the `Booking` aggregate: - -```csharp title="BookingId.cs" -public record BookingId : AggregateId { - public BookingId(string id, string tenantId) : base(id) { - TenantId = tenantId; - } - - public string TenantId { get; } -} -``` - -Create a `StreamNameMap` and register in the container: - -```csharp title="Program.cs" -var streamNameMap = new StreamNameMap(); -streamNameMap.Register( - id => new StreamName($"Booking-{id.TenantId}:{id.Value}") // Split in example with : if you use a Guid as identifier. -); -builder.Services.AddSingleton(streamNameMap); -builder.Services.AddCommandService(); -``` - -Then, use the registered `StreamNameMap` in the `CommandService`: - -```csharp title="BookingService.cs" -public class BookingService : CommandService { - public BookingService(IAggregateStore store, StreamNameMap streamNameMap) - : base(store, streamNameMap: streamNameMap) { - // command handlers registered here - } -} -``` - -In your projections you can retrieve the `Id` and `TenantId` from the `StreamName` in the `IMessageConsumeContext`: - -```csharp title="BookingStateProjection.cs" -static UpdateDefinition HandleRoomBooked( - IMessageConsumeContext ctx, - UpdateDefinitionBuilder update -) { - var evt = ctx.Message; - - // Get Id and TenantId - var (id, tenantId) = ctx.Stream.ExtractMultiTenantIds(); - - return update - .SetOnInsert(x => x.Id, id) - .SetOnInsert(x => x.TenantId, tenantId) - .Set(x => x.GuestId, evt.GuestId) - .Set(x => x.RoomId, evt.RoomId) - .Set(x => x.CheckInDate, evt.CheckInDate) - .Set(x => x.CheckOutDate, evt.CheckOutDate) - .Set(x => x.BookingPrice, evt.BookingPrice) - .Set(x => x.Outstanding, evt.OutstandingAmount); -} -``` - -The snippet above uses the following extension method to extract the `Id` and `TenantId` from the `StreamName`: - -```csharp title="StreamNameExtensions.cs" -public static class StreamNameExtensions -{ - /// - /// Split the StreamName into multiple parts for multi tenant stream id. - /// - /// The streamname - /// The seperator for splitting. Default is ':'. - /// A tuple with TenantId and Id property. - /// When stream id can't be split in 2 sections. - public static (string TenantId, string Id) ExtractMultiTenantIds(this StreamName stream, char separator = ':') - { - string streamId = stream.GetId(); - var streamIdParts = streamId.Split(separator); - - if (streamIdParts.Length != 2) - { - throw new InvalidStreamName(streamId); - } - - return (streamIdParts[0], streamIdParts[1]); - } -} -``` diff --git a/docs/docs/whats-new.mdx b/docs/docs/whats-new.mdx deleted file mode 100644 index 4fb2112fb..000000000 --- a/docs/docs/whats-new.mdx +++ /dev/null @@ -1,5 +0,0 @@ ---- -sidebar_position: 2 ---- - -# What's new in vNext diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts deleted file mode 100644 index 257fc3204..000000000 --- a/docs/docusaurus.config.ts +++ /dev/null @@ -1,157 +0,0 @@ -import type {Config} from "@docusaurus/types"; -import type * as Preset from "@docusaurus/preset-classic"; -import {themes} from "prism-react-renderer"; -import versions from "./versions.json"; - -const config: Config = { - title: 'Eventuous', - tagline: 'Event Sourcing for .NET', - favicon: 'img/favicon.ico', - - url: 'https://eventuous.dev', - baseUrl: '/', - - organizationName: 'eventuous', - projectName: 'eventuous', - - onBrokenLinks: 'throw', - - markdown: { - mermaid: true, - hooks: { - onBrokenMarkdownLinks: 'warn', - }, - }, - themes: ['@docusaurus/theme-mermaid'], - - i18n: { - defaultLocale: 'en', - locales: ['en'], - }, - - presets: [[ - 'classic', - { - docs: { - sidebarPath: require.resolve('./sidebars.js'), - editUrl: "https://github.com/eventuous/eventuous/docs/edit/master", - includeCurrentVersion: true, - lastVersion: '0.15', - versions: { - current: { - label: 'Latest', - path: 'next', - banner: 'unreleased', - }, - }, - }, - blog: { - showReadingTime: true, - }, - theme: { - customCss: require.resolve('./src/css/custom.css'), - }, - sitemap: { - lastmod: 'date', - changefreq: 'weekly', - } - } satisfies Preset.Options, - ]], - - themeConfig: { - metadata: [{name: 'keywords', content: 'event sourcing, eventsourcing, dotnet, .NET, .NET Core'}], - image: 'img/social-card.png', - algolia: { - appId: 'YQSSKN21VQ', - apiKey: 'd62759f3b1948de19fea5476182dbd66', - indexName: 'eventuous', - }, - colorMode: { - defaultMode: "dark", - respectPrefersColorScheme: true, - }, - navbar: { - title: 'Eventuous', - logo: { - alt: 'Eventuous logo', - src: 'img/logo.png', - }, - items: [ - { - type: 'doc', - docId: 'intro', - position: 'left', - label: 'Documentation', - }, - { - type: 'docsVersionDropdown', - position: 'right', - }, - { - href: 'https://github.com/sponsors/Eventuous', - position: 'right', - label: "Sponsor" - }, - { - href: 'https://blog.eventuous.dev', - position: 'right', - label: "Blog" - }, - { - href: 'https://github.com/eventuous/eventuous', - position: 'right', - className: 'header-github-link', - 'aria-label': 'GitHub repository', - }, - ], - }, - footer: { - style: 'dark', - links: [ - { - title: 'Docs', - items: [ - { - label: 'Documentation', - to: '/docs/intro', - }, - { - label: 'Connector', - href: "https://connect.eventuous.dev" - }, - ], - }, - { - title: 'Community', - items: [ - { - label: 'Discord', - href: 'https://discord.gg/ZrqM6vnnmf', - }, - ], - }, - { - title: 'More', - items: [ - { - label: 'Blog', - href: 'https://blog.eventuous.dev', - }, - { - label: 'GitHub', - href: 'https://github.com/eventuous/eventuous', - }, - ], - }, - ], - copyright: `Copyright © ${new Date().getFullYear()} Eventuous HQ OÜ. Built with Docusaurus.`, - }, - prism: { - theme: themes.vsLight, - darkTheme: themes.vsDark, - additionalLanguages: ['csharp'], - }, - } satisfies Preset.ThemeConfig, -}; - -module.exports = config; diff --git a/docs/package.json b/docs/package.json index 0802c816b..e577d4a28 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,49 +3,21 @@ "version": "0.0.0", "private": true, "scripts": { - "docusaurus": "docusaurus", - "start": "docusaurus start", - "build": "docusaurus build", - "swizzle": "docusaurus swizzle", - "deploy": "docusaurus deploy", - "clear": "docusaurus clear", - "serve": "docusaurus serve", - "write-translations": "docusaurus write-translations", - "write-heading-ids": "docusaurus write-heading-ids", - "typecheck": "tsc" + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview", + "astro": "astro" }, - "dependencies": { - "@docusaurus/core": "^3.9.2", - "@docusaurus/preset-classic": "^3.9.2", - "@docusaurus/theme-common": "^3.9.2", - "@docusaurus/theme-mermaid": "^3.9.2", - "@mdx-js/react": "^3.1.1", - "@types/react": "^18.3.28", - "clsx": "^2.1.1", - "prism-react-renderer": "^2.4.1", - "react": "^18.3.1", - "react-dom": "^18.3.1" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "^3.9.2", - "@docusaurus/types": "^3.9.2", - "@tsconfig/docusaurus": "^2.0.9", - "typescript": "^5.9.3" - }, - "browserslist": { - "production": [ - ">0.5%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" + "pnpm": { + "onlyBuiltDependencies": [ + "esbuild", + "sharp" ] }, - "engines": { - "node": ">=20.0.0" - }, - "packageManager": "pnpm@10.23.0" + "dependencies": { + "@astrojs/starlight": "^0.37.6", + "@pasqal-io/starlight-client-mermaid": "^0.1.0", + "astro": "^5.18.0", + "starlight-versions": "^0.7.1" + } } diff --git a/docs/plans/2026-03-03-docusaurus-to-starlight-design.md b/docs/plans/2026-03-03-docusaurus-to-starlight-design.md new file mode 100644 index 000000000..8edc4d78a --- /dev/null +++ b/docs/plans/2026-03-03-docusaurus-to-starlight-design.md @@ -0,0 +1,125 @@ +# Docusaurus to Astro/Starlight Migration Design + +**Date**: 2026-03-03 +**Status**: Approved + +## Context + +Migrate the Eventuous documentation site (eventuous.dev) from Docusaurus v3 to Astro + Starlight. Motivations: flexibility and future-proofing. + +## Decisions + +- **Approach**: In-place migration — replace `docs/` directory contents +- **Framework**: Astro + Starlight + `starlight-versions` plugin +- **Versions**: 0.15 (archived via starlight-versions) + current/next. Version 0.14 dropped. +- **Homepage**: Docs-first — `intro.mdx` is the landing page, no custom homepage +- **Design**: Fresh look — start with Starlight defaults, iterate on colors later +- **Deployment**: Cloudflare Pages (already in use), `_redirects` carried over in `public/` + +## Project Structure + +``` +docs/ +├── astro.config.mjs +├── package.json +├── tsconfig.json +├── src/ +│ ├── assets/ (logos, themed images) +│ ├── components/ (ThemedImage, Highlight Astro components) +│ ├── content/ +│ │ └── docs/ (current "next" version) +│ │ ├── index.mdx +│ │ ├── whats-new.mdx +│ │ ├── prologue/ +│ │ ├── domain/ +│ │ ├── persistence/ +│ │ ├── application/ +│ │ ├── subscriptions/ +│ │ ├── read-models/ +│ │ ├── producers/ +│ │ ├── gateway/ +│ │ ├── diagnostics/ +│ │ ├── infra/ +│ │ └── faq/ +│ ├── content.config.ts (uses docsVersionsLoader from starlight-versions) +│ └── styles/ +│ └── custom.css +├── public/ +│ ├── favicon.ico (+ other favicons) +│ ├── social-card.png +│ ├── site.webmanifest +│ └── _redirects +``` + +## Content Migration + +### Frontmatter conversion + +```yaml +# Docusaurus # Starlight +--- --- +id: aggregate # removed (slug from path) +title: Aggregate title: Aggregate +sidebar_label: Aggregate sidebar: +sidebar_position: 1 label: Aggregate +description: "..." order: 1 +--- description: "..." + --- +``` + +### MDX component replacements + +| Docusaurus | Starlight | +|---|---| +| `DocCardList` from `@theme/DocCardList` | Removed — Starlight auto-generates index pages via `autogenerate` sidebar config | +| `ThemedImage` from `@theme/ThemedImage` | Custom Astro `` component using `` + `prefers-color-scheme` | +| `Highlight` from `@site/src/components/highlight` | Custom Astro `` component — same inline span logic | +| `:::tip` / `:::note` / `:::caution` admonitions | Same syntax — Starlight supports natively | +| Mermaid code blocks | `starlight-mermaid` plugin — same ` ```mermaid ` syntax | +| YouTube iframe | Standard HTML ` - -Check out [The Right Way](prologue/the-right-way) to understand how things go wrong sometimes. - -## Next steps - -If you are convinced that Eventuous is the right tool for you, here are the next steps: -- Read the [Prologue](prologue/introduction) to learn what Eventuous provides -- Learn more about the basic building blocks: - - [Domain model](domain) components - - [Persistence](persistence) for events, and how it differs from the traditional approach - - [Application service](application) components - - [Subscriptions](subscriptions) for event processing (like [read models](read-models)) -- Check sample applications to see how Eventuous is used in practice: - - [EventStoreDB](https://github.com/eventuous/dotnet-sample) sample - - [PostgreSQL](https://github.com/eventuous/dotnet-sample-postgres) sample - -## Community - -Eventuous is actively maintained. You can report issues in the [GitHub repository](https://github.com/eventuous/eventuous/issues). - -Ensure you follow the [Code of Conduct](https://github.com/Eventuous/eventuous/blob/da47a6918626b26428063be0e115ff75c539602b/CODE_OF_CONDUCT.md) when interacting with the community. When contributing, please follow the [Contributing Guidelines](https://github.com/Eventuous/eventuous/blob/da47a6918626b26428063be0e115ff75c539602b/CONTRIBUTING.md). - -Eventuous is open source and licensed under the Apache 2.0 licence. - -Support the project by providing [sponsorships](https://github.com/sponsors/Eventuous). Eventuous has sponsor plans for both individuals and companies. You can get paid support for your projects, or just show your appreciation for the project. \ No newline at end of file diff --git a/docs/versioned_docs/version-0.15/persistence/images/reading-dark.png b/docs/versioned_docs/version-0.15/persistence/images/reading-dark.png deleted file mode 100644 index c6f850ceb..000000000 Binary files a/docs/versioned_docs/version-0.15/persistence/images/reading-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/persistence/images/reading.png b/docs/versioned_docs/version-0.15/persistence/images/reading.png deleted file mode 100644 index dfda40fd2..000000000 Binary files a/docs/versioned_docs/version-0.15/persistence/images/reading.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/persistence/images/replication-dark.png b/docs/versioned_docs/version-0.15/persistence/images/replication-dark.png deleted file mode 100644 index 19f6afc54..000000000 Binary files a/docs/versioned_docs/version-0.15/persistence/images/replication-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/persistence/images/replication.png b/docs/versioned_docs/version-0.15/persistence/images/replication.png deleted file mode 100644 index bc5e84a22..000000000 Binary files a/docs/versioned_docs/version-0.15/persistence/images/replication.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/persistence/serialisation.md b/docs/versioned_docs/version-0.15/persistence/serialisation.md deleted file mode 100644 index e626ef611..000000000 --- a/docs/versioned_docs/version-0.15/persistence/serialisation.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: "Serialization" -description: "How events are serialized and deserialized" ---- - -As described on the [Domain events](../domain/domain-events.md) page, events must be (de)serializable. Eventuous doesn't care about the serialization format, but requires you to provide a serializer instance, which implements the `IEventSerializer` interface. - -The serializer interface is simple: - -```csharp title="IEventSerializer.cs" -public interface IEventSerializer { - DeserializationResult DeserializeEvent(ReadOnlySpan data, string eventType, string contentType); - - SerializationResult SerializeEvent(object evt); -} -``` - -The serialization result contains not only the serialized object as bytes, but also the event type as string (see below), and the content type: - -```csharp -public record SerializationResult(string EventType, string ContentType, byte[] Payload); -``` - -### Type map - -For deserialization, the serializer will get the binary payload and the event type as string. Event store is unaware of your event types, it just stores the payload in a binary format to the database, along with the event type as string. It is up to you how your strong event types map to the event type string. - -:::caution -We do not advise using fully-qualified type names as event types. It will block your ability to refactor the domain model code. -::: - -Therefore, we need to have a way to map strong types of the events to strings, which are used to identify those types in the database and for serialization. For that purpose, Eventuous uses the `TypeMap`. It is a singleton, which is available globally. When you add new events to your domain model, remember to also add a mapping for those events. The mapping is static, so you can implement it anywhere in the application. The only requirement is that the mapping code must execute when the application starts. - -For example, if you have a place where domain events are defined, you can put the mapping code there, as a static member: - -```csharp title="BookingEvents.cs" -static class BookingEvents { - // events are defined here - - public static void MapBookingEvents() { - TypeMap.AddType("RoomBooked"); - TypeMap.AddType("BookingPaid"); - TypeMap.AddType("BookingCancelled"); - TypeMap.AddType("BookingImported"); - } -} -``` - -Then, you can call this code in your bootstrap code: - -```csharp title="Program.cs" -BookingEvents.MapBookingEvents(); -``` - -### Auto-registration of types - -For convenience purposes, you can avoid manual mapping between type names and types by using the `EventType` attribute. - -Annotate your events with it like this: - -```csharp -[EventType("V1.FullyPaid")] -public record BookingFullyPaid(string BookingId, DateTimeOffset FullyPaidAt); -``` - -Then, use the registration code in the bootstrap code: - -```csharp -TypeMap.RegisterKnownEventTypes(); -``` - -The registration won't work if event classes are defined in another assembly, which hasn't been loaded yet. You can work around this limitation by specifying one or more assemblies explicitly: - -```csharp -TypeMap.RegisterKnownEventTypes(typeof(BookingFullyPaid).Assembly); -``` - -If you use the .NET version that supports module initializers, you can register event types in the module. For example, if the domain event classes are located in a separate project, add the file `DomainModule.cs` to that project with the following code: - -```csharp title="DomainModule.cs" -using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using Eventuous; - -namespace Bookings.Domain; - -static class DomainModule { - [ModuleInitializer] - [SuppressMessage("Usage", "CA2255", MessageId = "The \'ModuleInitializer\' attribute should not be used in libraries")] - internal static void InitializeDomainModule() => TypeMap.RegisterKnownEventTypes(); -} -``` - -Then, you won't need to call the `TypeMap` registration in the application code at all. - -### Default serializer - -Eventuous provides a default serializer implementation, which uses `System.Text.Json`. You just need to register it in the `Startup` to make it available for the infrastructure components, like [aggregate store](aggregate-store) and [subscriptions](../subscriptions). - -Normally, you don't need to register or provide the serializer instance to any of the Eventuous classes that perform serialization and deserialization work. It's because they will use the default serializer instance instead. - -However, you can register the default serializer with different options, or a custom serializer instead: - -```csharp title="Program.cs" -builder.Services.AddSingleton( - new DefaultEventSerializer( - new JsonSerializerOptions(JsonSerializerDefaults.Default) - ) -); -``` - -You might want to avoid registering the serializer and override the one that Eventuous uses as the default instance: - -```csharp title="Program.cs" -var defaultSerializer = new DefaultEventSerializer( - new JsonSerializerOptions(JsonSerializerDefaults.Default) -); -DefaultEventSerializer.SetDefaultSerializer(serializer); -``` - -### Metadata serializer - -In many cases you might want to store event metadata in addition to the event payload. Normally, you'd use the same way to serialize both the event payload and its metadata, but it's not always the case. For example, you might store your events in Protobuf, but keep metadata as JSON. - -Eventuous only uses the metadata serializer when the event store implementation, or a producer can store metadata as a byte array. For example, EventStoreDB supports that, but Google PubSub doesn't. Therefore, the event store and producer that use EventStoreDB will use the metadata serializer, but the Google PubSub producer will add metadata to events as headers, and won't use the metadata serializer. - -For the metadata serializer the same principles apply as for the event serializer. Eventuous has a separate interface `IMetadataSerializer`, which has a default instance created on startup by implicitly. You can register a custom metadata serializer as a singleton or override the default one by calling `DefaultMetadataSerializer.SetDefaultSerializer` function. diff --git a/docs/versioned_docs/version-0.15/prologue/quick-start.md b/docs/versioned_docs/version-0.15/prologue/quick-start.md deleted file mode 100644 index c5c38fdb5..000000000 --- a/docs/versioned_docs/version-0.15/prologue/quick-start.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Quick start" -description: "Sample application" -sidebar_position: 2 ---- - -You can find a bookings sample application here: -- [.NET EventStoreDB write => MongoDB read](https://github.com/Eventuous/eventuous/tree/dev/samples/esdb) -- [.NET PostgreSQL write => MongoDB read](https://github.com/Eventuous/eventuous/tree/dev/samples/postgres) - -Samples are being updated with the latest features and improvements. diff --git a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/flaming-bus.jpg b/docs/versioned_docs/version-0.15/prologue/the-right-way/images/flaming-bus.jpg deleted file mode 100644 index 219596e6a..000000000 Binary files a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/flaming-bus.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way-dark.png b/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way-dark.png deleted file mode 100644 index b3f4ae193..000000000 Binary files a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way.png b/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way.png deleted file mode 100644 index 2c217b70d..000000000 Binary files a/docs/versioned_docs/version-0.15/prologue/the-right-way/images/the-right-way.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/read-models/supported-projectors.md b/docs/versioned_docs/version-0.15/read-models/supported-projectors.md deleted file mode 100644 index 4ebc02562..000000000 --- a/docs/versioned_docs/version-0.15/read-models/supported-projectors.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Supported projectors -description: Built-in projectors supported by Eventuous. ---- - -Eventuous supports the following projection targets: - -- [MongoDB projections](/docs/infra/mongodb) -- [PostgreSQL projections](/docs/infra/postgres/#projections) -- [Microsoft SQL Server projections](/docs/infra/mssql/#projections) - -You can project to any other database using a custom projector, which can be built as a [custom event handler](/docs/subscriptions/eventhandler/#custom-handlers). \ No newline at end of file diff --git a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler-dark.png b/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler-dark.png deleted file mode 100644 index 7e207a483..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler.png b/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler.png deleted file mode 100644 index 9d2dce1fd..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/images/commit-handler.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/index.mdx b/docs/versioned_docs/version-0.15/subscriptions/checkpoint/index.mdx deleted file mode 100644 index 7eb0a945c..000000000 --- a/docs/versioned_docs/version-0.15/subscriptions/checkpoint/index.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: "Checkpoints" -description: "What's a checkpoint and why you need to store it" ---- -import ThemedImage from '@theme/ThemedImage'; -import darkUrl from "./images/commit-handler-dark.png"; -import lightUrl from "./images/commit-handler.png"; - -When subscribing to an event store, it is important to consider which events you wish to receive. An effective event store should allow you to subscribe to individual event streams or to a global stream known as the "All stream", which contains all events in the store, organized in the order they were recorded. Many event-driven brokers that persist events as ordered logs also support subscriptions, which are often referred to as "consumers". - -The subscription you choose will determine at what point in the stream you begin to receive events. If you want to process all historical events, it is necessary to subscribe from the beginning of the stream. However, if you only wish to receive real-time events, it is necessary to subscribe from the current point in time. - -## What's the checkpoint? - -As the subscription receives and processes events, it progresses along the subscribed stream. Each event that is received and processed has a unique position within the stream, which serves as a checkpoint for the subscription. If the application hosting the subscription shuts down, it is necessary to resume processing events from the last recorded checkpoint, which is the position of the last processed event plus one. This ensures that each event is handled exactly once. As a result, the subscription must keep track of its checkpoint, either by storing it in a dedicated checkpoint store or by using the event store's built-in functionality. - -In some log-based brokers, the concept of a checkpoint is referred to as an "offset". Some event-driven brokers manage subscriptions on the server-side, eliminating the need for client-side checkpoint storage. For example, persistent subscriptions in EventStoreDB and subscriptions in Google PubSub do not require a client-side checkpoint store. Other subscriptions, such as those managed by RabbitMQ, do not have the concept of a checkpoint as RabbitMQ does not retain consumed messages, whether they have been acknowledged or not. - -## Checkpoint store - -Eventuous offers an abstraction layer that enables subscriptions to store checkpoints securely and reliably. You can choose to store the checkpoint in a file or database and determine the frequency at which you wish to store the checkpoint, whether after processing each event or periodically. Although periodic checkpoint storage reduces the stress on the infrastructure supporting the checkpoint store, it requires the subscription to be idempotent. This can be challenging, especially in integration scenarios where it is often difficult or impossible to determine if an event has been published to the broker or not. However, this approach may work for read model projections. - -:::caution Keep the checkpoint safe -It is important to keep the checkpoint safe, as its loss will result in the subscription receiving all events. This may be intentional when creating a new [read model](../../read-models), but it can also have unintended consequences in other scenarios. -::: - -On top of the abstraction Eventuous provides a few implementations of the checkpoint store, which you can use out of the box. You can also implement your own checkpoint store if you need to store the checkpoint in a custom location. - -### Abstraction - -The checkpoint store interface is simple, it only has two functions: - -```csharp title="ICheckpointStore.cs" -interface ICheckpointStore { - ValueTask GetLastCheckpoint( - string checkpointId, - CancellationToken cancellationToken - ); - - ValueTask StoreCheckpoint( - Checkpoint checkpoint, - CancellationToken cancellationToken - ); -} -``` - -The `Checkpoint` record is a simple record, which aims to represent a stream position in any kind of event store: - -```csharp -record Checkpoint(string Id, ulong? Position); -``` - -### Available stores - -If a supported projection type in an Eventuous package for projections requires a checkpoint store, you can find its implementation in that package. For example, the `Eventuous.MongoDB` package has a checkpoint store implementation for MongoDB. - -If you register subscriptions in the DI container, you also need to register the checkpoint store: - -```csharp title="Program.cs" -builder.Services.AddSingleton(Mongo.ConfigureMongo()); -builder.Services.AddCheckpointStore(); -``` - -In case you have multiple subscriptions in one service, and you project to different databases (for example, MongoDB and PostgreSQL), you need to specify the checkpoint store for each subscription. In this case, you don't need to register the checkpoint store globally in the DI container, but use the `UseCheckpointStore` method when building your subscription: - -```csharp title="Program.cs" -builder.Services.AddSubscription( - "BookingsProjections", - builder => builder - .Configure(cfg => cfg.ConcurrencyLimit = 2) - .UseCheckpointStore() - .AddEventHandler() - .AddEventHandler() - .WithPartitioningByStream(2) -); -``` - -#### MongoDB - -The MongoDB checkpoint store will create a collection called `checkpoint` where it will keep one document per subscription. - -Each checkpoint document contains the checkpoint id, which is the subscription id. Therefore, you only get one `checkpoint` collection per database. - -#### Elasticsearch - -The Elasticsearch checkpoint store will create and use the `checkpoint` index, and the document id there would be the subscription id. - -#### PostgreSQL - -The Postgres checkpoint store will create and use the `checkpoint` table, and the row id there would be the subscription id. Here is the script used to create that table: - -```sql -create table if not exists __schema__.checkpoints ( - id varchar primary key, - position bigint null -); -``` - -#### Other stores - -In addition to that, Eventuous has two implementations in the core subscriptions package: -- `MeasuredCheckpointStore`: creates a trace for all the IO operations, wraps an existing store -- `NoOpCheckpointStore`: does nothing, used in Eventuous tests - -The measured store is used by default if Eventuous diagnostics aren't disabled, and you use the `AddCheckpointStore` container registration extension. - -## Checkpoint commit handler - -In addition to checkpoint store, Eventuous has a more advanced way to work with checkpoints. It doesn't load or store checkpoints by itself, for that purpose it uses the provided checkpoint store. However, the commit handler is able to receive a stream of unordered checkpoints, reorder them, detect possible gaps, and only store the checkpoint that is the latest before the gap. - -For subscriptions that support delayed consume (see [Partitioning filter](../pipes/#partitioning-filter)) and require a checkpoint store, you must use the commit handler. All such subscription types provided by Eventuous use the checkpoint commit handler. - -Unless you create your own subscription with such requirements, you don't need to know the internals of the commit handler. However, you would benefit to know the consequences of delayed event processing with supported subscriptions. - -When events get partitioned by the filter, several consumer instances process events in parallel. As a result, each partition will get checkpoints with gaps. When partitioned consumers process events, they run at different speed. Each event inside `DelayedConsumeContext` is explicitly acknowledged, and when it happens, the checkpoint gets to the commit handler queue. The commit handler then is able to accumulate checkpoints, detect gaps in the sequence, and only store the latest checkpoint in a gap-less sequence. - - - -:::note -On the illustration above, the commit queue has a gap, and event **95** is still in-flight. As soon as the event **95** is processed, its position will get to the queue, the commit handler will detect a gap-less sequence, and commit the checkpoint **97**. -::: - -As we talk about gaps, you might face a situation when the commit handler has a list of uncommitted checkpoints with gaps, and the application stops. When this happens, some events were already processed, whilst checkpoints for those events remain in-flight. When the application restarts, it loads the checkpoint that points to some position in the stream that is _earlier_ than positions of already processed events. Because of that, some events will be processed by event handlers _again_. Therefore, you need to make sure that your event handlers are _idempotent_, so when the same events are processed again, the result of the processing won't create any undesired side effects. diff --git a/docs/versioned_docs/version-0.15/subscriptions/eventhandler/index.md b/docs/versioned_docs/version-0.15/subscriptions/eventhandler/index.md deleted file mode 100644 index db6da4c7f..000000000 --- a/docs/versioned_docs/version-0.15/subscriptions/eventhandler/index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Event handlers" -description: "The last bit of the subscription process" ---- - -Event handlers are the final step in the subscription event processing [pipeline](../pipes). Each subscription has a single consumer that holds a collection of event handlers added to the subscription. The consumer calls all the event handlers simultaneously, collects the results, and then acknowledges the event to the subscription. - -One common example of an event handler is a [read model](../../read-models) projector. Eventuous currently supports projecting events to MongoDB, but you can use any other database or file system. - -## Abstractions - -The default consumer holds classes that implement the basic interface of an event handler, defined as: - -```csharp title="IEventHandler.cs" -public interface IEventHandler { - string DiagnosticName { get; } - - ValueTask HandleEvent(IMessageConsumeContext context); -} -``` - -The `DiagnosticName` property provides information that is used in log messages when the handler processes or fails to process the event. The `HandleEvent` function is called for each event received by the consumer and contains the actual event processing code. It should return a result of type `EventHandlingResult`. - -The `BaseEventHandler` abstract class is commonly used as the base class for all event handlers, including custom ones, instead of implementing the interface directly. This class sets the DiagnosticName property to the type name of the event handler class. - -Higher-level event handlers in Eventuous, such as `MongoProjection` and `GatewayHandler`, inherit from the `BaseEventHandler`. - -## Handler results - -A handler typically returns `Success` if the event was handled successfully, `Error` if the event handling failed, or `Ignored` if the handler has no code to process the event. The consumer determines the combined result based on the results returned by the handlers: - -- Ignored events are considered processed successfully -- If all events are processed successfully, the consumer acknowledges the event -- If one or more handlers return an error, the consumer considers it an error and explicitly NACKs the event. -- -The outcome of events that were not acknowledged by the consumer depends on the subscription type and its configuration. - -## Custom handlers - -If you need to implement a custom handler, such as a projector to a relational database, you typically use the `EventHandler` abstraction provided by Eventuous. This abstraction allows you to register typed handlers for specific event types in a map, and the HandleEvent function is already implemented in the interface, which will call the registered handler or return Ignored if no handler is registered for a given event type. - -The `EventHandler` base class takes a [`TypeMapper`](../../persistence/serialisation.md#type-map) instance as a constructor argument. If a constructor argument is not provided, the default type mapper instance will be used. The `On` function uses the type mapper to check if the event type `TEvent` is registered in the type map, thus proactively causing the program to crash during startup if a handler is defined for an unregistered event type. - -As an example, consider a simple handler that prints *$$$ MONEY! You got USD 100!* to the console when it receives the `PaymentRegistered` event, where the event's paid amount property is 100 and its currency is USD. - -```csharp title="MoneyHandler.cs" -class MoneyHandler : EventHandler { - public MoneyHandler(TypeMapper? typeMap = null) : base(typeMap) { - On( - async context => { - await Console.Out.WriteLineAsync( - $"$$$ MONEY! You got {context.Message.Currency} {context.Message.AmountPaid}" - ); - } - ); - } -} -``` - -Another example would be a base class for a projector, which would use the handlers map and allow adding extended handlers for projecting events to a query model. Below is an example of a base class for a Postgres projector: - -```csharp title="PostgresProjector.cs" -public abstract class PostgresProjector : EventHandler { - readonly GetPostgresConnection _getConnection; - - protected PostgresProjector( - GetPostgresConnection getConnection, - TypeMapper? mapper = null) : base(mapper) { - _getConnection = getConnection; - } - - protected void On(ProjectToPostgres handler) where T : class { - base.On(async ctx => await Handle(ctx).NoContext()); - - async Task Handle(MessageConsumeContext context) { - await using var connection = _getConnection(); - await connection.OpenAsync(context.CancellationToken).ConfigureAwait(false); - var cmd = await handler(connection, context).ConfigureAwait(false); - await cmd.ExecuteNonQueryAsync(context.CancellationToken).ConfigureAwait(false); - } - } -} - -public delegate Task ProjectToPostgres( - NpgsqlConnection connection, - MessageConsumeContext consumeContext) - where T : class; -``` - -## Registering handlers - -For an event handler to work, it needs to be added to a subscription. The `AddHandler` function on the subscription registration builder takes an instance of the `IEventHandler` interface as an argument. The `AddHandler` function is overloaded to accept a handler instance or a factory function that returns a handler instance. - -You can find examples of adding handlers to subscriptions in the [subscription documentation](../sub-base/#registration). - -Built-in projectors are event handlers, and they are added to the subscription in the same way as custom handlers. diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter-dark.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter-dark.png deleted file mode 100644 index 692216b2e..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter.png deleted file mode 100644 index eddf9d72b..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/concurrent-filter.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe-dark.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe-dark.png deleted file mode 100644 index 3b1158783..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe.png deleted file mode 100644 index c62791d37..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/default-pipe.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter-dark.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter-dark.png deleted file mode 100644 index 13eafbd87..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter.png b/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter.png deleted file mode 100644 index feac40e59..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/pipes/images/partitioning-filter.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions-dark.png b/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions-dark.png deleted file mode 100644 index 155d09b36..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions-dark.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions.png b/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions.png deleted file mode 100644 index b0321e607..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/subs-concept/images/subscriptions.png and /dev/null differ diff --git a/docs/versioned_docs/version-0.15/subscriptions/subs-diagnostics/images/sub-trace.png b/docs/versioned_docs/version-0.15/subscriptions/subs-diagnostics/images/sub-trace.png deleted file mode 100644 index d98c5a8fa..000000000 Binary files a/docs/versioned_docs/version-0.15/subscriptions/subs-diagnostics/images/sub-trace.png and /dev/null differ diff --git a/docs/versioned_sidebars/version-0.14-sidebars.json b/docs/versioned_sidebars/version-0.14-sidebars.json deleted file mode 100644 index 0b29b77ed..000000000 --- a/docs/versioned_sidebars/version-0.14-sidebars.json +++ /dev/null @@ -1,161 +0,0 @@ -{ - "docsSidebar": [ - "intro", - { - "type": "category", - "label": "Prologue", - "link": { - "type": "generated-index", - "description": "Introduction to Eventuous, how Eventuous embraces the original idea of Event Sourcing as formulated by Greg Young.", - "slug": "prologue" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "prologue" - } - ] - }, - { - "type": "category", - "label": "Domain", - "link": { - "type": "generated-index", - "description": "Building blocks for your domain model.", - "slug": "domain" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "domain" - } - ] - }, - { - "type": "category", - "label": "Persistence", - "link": { - "type": "doc", - "id": "persistence/index" - }, - "items": [ - "persistence/aggregate-stream", - "persistence/event-store", - "persistence/aggregate-store/index", - "persistence/serialisation" - ] - }, - { - "type": "category", - "label": "Application", - "link": { - "type": "doc", - "id": "application/index" - }, - "items": [ - "application/app-service", - "application/func-service", - "application/command-api", - "application/command-map" - ] - }, - { - "type": "category", - "label": "Subscriptions", - "link": { - "type": "generated-index", - "description": "Real-time event processing using subscriptions.", - "slug": "subscriptions" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "subscriptions" - } - ] - }, - { - "type": "category", - "label": "Read models", - "link": { - "type": "generated-index", - "description": "Read (query, reporting) models.", - "slug": "read-models" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "read-models" - } - ] - }, - { - "type": "category", - "label": "Producers", - "link": { - "type": "doc", - "id": "producers/index" - }, - "items": [ - "producers/implementation", - "producers/providers" - ] - }, - { - "type": "category", - "label": "Gateway", - "link": { - "type": "doc", - "id": "gateway/index" - }, - "items": [ - "gateway/implementation/index" - ] - }, - { - "type": "category", - "label": "Diagnostics", - "link": { - "type": "generated-index", - "description": "Observability of applications build with Eventuous. WIP.", - "slug": "diagnostics" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "diagnostics" - } - ] - }, - { - "type": "category", - "label": "Infrastructure", - "link": { - "type": "generated-index", - "description": "Supported infrastructure.", - "slug": "infra" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "infra" - } - ] - }, - { - "type": "category", - "label": "FAQ", - "link": { - "type": "generated-index", - "description": "Frequently asked questions.", - "slug": "faq" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "faq" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-0.15-sidebars.json b/docs/versioned_sidebars/version-0.15-sidebars.json deleted file mode 100644 index 83d17fb68..000000000 --- a/docs/versioned_sidebars/version-0.15-sidebars.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "docsSidebar": [ - "intro", - "whats-new", - { - "type": "category", - "label": "Prologue", - "link": { - "type": "generated-index", - "description": "Introduction to Eventuous, how Eventuous embraces the original idea of Event Sourcing as formulated by Greg Young.", - "slug": "prologue" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "prologue" - } - ] - }, - { - "type": "category", - "label": "Domain", - "link": { - "type": "generated-index", - "description": "Building blocks for your domain model.", - "slug": "domain" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "domain" - } - ] - }, - { - "type": "category", - "label": "Persistence", - "link": { - "type": "doc", - "id": "persistence/index" - }, - "items": [ - "persistence/aggregate-stream", - "persistence/event-store", - "persistence/aggregate-store", - "persistence/serialisation" - ] - }, - { - "type": "category", - "label": "Application", - "link": { - "type": "doc", - "id": "application/index" - }, - "items": [ - "application/app-service", - "application/func-service", - "application/command-api" - ] - }, - { - "type": "category", - "label": "Subscriptions", - "link": { - "type": "generated-index", - "description": "Real-time event processing using subscriptions.", - "slug": "subscriptions" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "subscriptions" - } - ] - }, - { - "type": "category", - "label": "Read models", - "link": { - "type": "generated-index", - "description": "Read (query, reporting) models.", - "slug": "read-models" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "read-models" - } - ] - }, - { - "type": "category", - "label": "Producers", - "link": { - "type": "doc", - "id": "producers/index" - }, - "items": [ - "producers/implementation", - "producers/providers" - ] - }, - { - "type": "category", - "label": "Gateway", - "link": { - "type": "doc", - "id": "gateway/index" - }, - "items": [ - "gateway/implementation/index" - ] - }, - { - "type": "category", - "label": "Observability", - "link": { - "type": "doc", - "id": "diagnostics/index" - }, - "items": [ - "diagnostics/metrics", - "diagnostics/traces", - "diagnostics/logs", - "diagnostics/opentelemetry" - ] - }, - { - "type": "category", - "label": "Infrastructure", - "link": { - "type": "generated-index", - "description": "Supported infrastructure.", - "slug": "infra" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "infra" - } - ] - }, - { - "type": "category", - "label": "FAQ", - "link": { - "type": "generated-index", - "description": "Frequently asked questions.", - "slug": "faq" - }, - "items": [ - { - "type": "autogenerated", - "dirName": "faq" - } - ] - } - ] -} diff --git a/docs/versions.json b/docs/versions.json deleted file mode 100644 index 3cd6f25c2..000000000 --- a/docs/versions.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "0.15", - "0.14" -]