diff --git a/.changeset/thick-bottles-act.md b/.changeset/thick-bottles-act.md new file mode 100644 index 0000000000..77aa9b61a2 --- /dev/null +++ b/.changeset/thick-bottles-act.md @@ -0,0 +1,6 @@ +--- +"@electric-sql/client": patch +"@core/sync-service": patch +--- + +Add experimental SSE support. diff --git a/package.json b/package.json index aff3bfbc53..e7012119ce 100644 --- a/package.json +++ b/package.json @@ -20,5 +20,10 @@ }, "devDependencies": { "glob": "^10.3.10" + }, + "pnpm": { + "patchedDependencies": { + "@microsoft/fetch-event-source": "patches/@microsoft__fetch-event-source.patch" + } } } diff --git a/packages/sync-service/lib/electric/plug/serve_shape_plug.ex b/packages/sync-service/lib/electric/plug/serve_shape_plug.ex index 6e1531a1a9..702ac42da2 100644 --- a/packages/sync-service/lib/electric/plug/serve_shape_plug.ex +++ b/packages/sync-service/lib/electric/plug/serve_shape_plug.ex @@ -31,6 +31,7 @@ defmodule Electric.Plug.ServeShapePlug do all_params = Map.merge(conn.query_params, conn.path_params) |> Map.update("live", "false", &(&1 != "false")) + |> Map.update("experimental_live_sse", "false", &(&1 != "false")) case Api.validate(api, all_params) do {:ok, request} -> diff --git a/packages/sync-service/lib/electric/shapes.ex b/packages/sync-service/lib/electric/shapes.ex index 5f361daee8..feb158ddca 100644 --- a/packages/sync-service/lib/electric/shapes.ex +++ b/packages/sync-service/lib/electric/shapes.ex @@ -23,7 +23,7 @@ defmodule Electric.Shapes do else # If we have a shape handle, but no shape, it means the shape was deleted. Send a 409 # and expect the client to retry - if the state of the world allows, it'll get a new handle. - {:error, Electric.Shapes.Api.Error.must_refetch()} + {:error, Electric.Shapes.Api.Error.must_refetch(opts)} end end diff --git a/packages/sync-service/lib/electric/shapes/api.ex b/packages/sync-service/lib/electric/shapes/api.ex index c4db87c015..97b852a346 100644 --- a/packages/sync-service/lib/electric/shapes/api.ex +++ b/packages/sync-service/lib/electric/shapes/api.ex @@ -9,6 +9,7 @@ defmodule Electric.Shapes.Api do alias __MODULE__ alias __MODULE__.Request alias __MODULE__.Response + alias __MODULE__.SseState import Electric.Replication.LogOffset, only: [is_log_offset_lt: 2] @@ -27,7 +28,9 @@ defmodule Electric.Shapes.Api do required: true ], allow_shape_deletion: [type: :boolean], + keepalive_interval: [type: :integer], long_poll_timeout: [type: :integer], + sse_timeout: [type: :integer], max_age: [type: :integer], stack_ready_timeout: [type: :integer], stale_age: [type: :integer], @@ -50,12 +53,15 @@ defmodule Electric.Shapes.Api do :stack_id, :storage, allow_shape_deletion: false, + keepalive_interval: 21_000, long_poll_timeout: 20_000, + sse_timeout: 60_000, max_age: 60, stack_ready_timeout: 5_000, stale_age: 300, send_cache_headers?: true, encoder: Electric.Shapes.Api.Encoder.JSON, + sse_encoder: Electric.Shapes.Api.Encoder.SSE, configured: false ] @@ -65,7 +71,6 @@ defmodule Electric.Shapes.Api do # Aliasing for pattern matching @before_all_offset LogOffset.before_all() @offset_out_of_bounds %{offset: ["out of bounds for this shape"]} - @must_refetch [%{headers: %{control: "must-refetch"}}] # Need to implement Access behaviour because we use that to extract config # when using shapes api @@ -320,10 +325,14 @@ defmodule Electric.Shapes.Api do # TODO: discuss returning a 307 redirect rather than a 409, the client # will have to detect this and throw out old data + + %{params: %{experimental_live_sse: in_sse?}} = request + error = Api.Error.must_refetch(experimental_live_sse: in_sse?) + {:error, - Response.error(request, @must_refetch, + Response.error(request, error.message, handle: active_shape_handle, - status: 409 + status: error.status )} end @@ -489,17 +498,21 @@ defmodule Electric.Shapes.Api do handle: shape_handle, chunk_end_offset: chunk_end_offset, global_last_seen_lsn: global_last_seen_lsn, - params: %{offset: offset, live: live?}, + params: %{offset: offset, live: live?, experimental_live_sse: in_sse?}, api: api, response: response } = request - case Shapes.get_merged_log_stream(api, shape_handle, since: offset, up_to: chunk_end_offset) do + case Shapes.get_merged_log_stream(api, shape_handle, + since: offset, + up_to: chunk_end_offset, + experimental_live_sse: in_sse? + ) do {:ok, log} -> if live? && Enum.take(log, 1) == [] do request |> update_attrs(%{ot_is_immediate_response: false}) - |> hold_until_change() + |> handle_live_request() else up_to_date_lsn = if live? do @@ -512,9 +525,9 @@ defmodule Electric.Shapes.Api do max(global_last_seen_lsn, chunk_end_offset.tx_offset) end - body = Stream.concat([log, maybe_up_to_date(request, up_to_date_lsn)]) + log_stream = Stream.concat(log, maybe_up_to_date(request, up_to_date_lsn)) - %{response | chunked: true, body: encode_log(request, body)} + %{response | chunked: true, body: encode_log(request, log_stream)} end {:error, %Api.Error{} = error} -> @@ -523,10 +536,11 @@ defmodule Electric.Shapes.Api do {:error, :unknown} -> # the shape has been deleted between the request validation and the attempt # to return the log stream - Response.error(request, @must_refetch, status: 409) + error = Api.Error.must_refetch(experimental_live_sse: in_sse?) + Response.error(request, error.message, status: error.status) {:error, %SnapshotError{type: :schema_changed}} -> - error = Api.Error.must_refetch() + error = Api.Error.must_refetch(experimental_live_sse: in_sse?) Logger.warning("Schema changed while creating snapshot for #{shape_handle}") Response.error(request, error.message, status: error.status) @@ -563,12 +577,20 @@ defmodule Electric.Shapes.Api do end end + defp handle_live_request(%Request{params: %{experimental_live_sse: true}} = request) do + stream_sse_events(request) + end + + defp handle_live_request(%Request{} = request) do + hold_until_change(request) + end + defp hold_until_change(%Request{} = request) do %{ new_changes_ref: ref, last_offset: last_offset, handle: shape_handle, - params: %{shape_definition: shape_def}, + params: %{shape_definition: shape_def, experimental_live_sse: in_sse?}, api: %{long_poll_timeout: long_poll_timeout} = api } = request @@ -603,13 +625,16 @@ defmodule Electric.Shapes.Api do |> do_serve_shape_log() {^ref, :shape_rotation, new_handle} -> - Response.error(request, @must_refetch, + error = Api.Error.must_refetch(experimental_live_sse: in_sse?) + + Response.error(request, error.message, handle: new_handle, - status: 409 + status: error.status ) {^ref, :shape_rotation} -> - Response.error(request, @must_refetch, status: 409) + error = Api.Error.must_refetch(experimental_live_sse: in_sse?) + Response.error(request, error.message, status: error.status) after # If we timeout, return an up-to-date message long_poll_timeout -> @@ -628,6 +653,160 @@ defmodule Electric.Shapes.Api do end end + defp stream_sse_events(%Request{} = request) do + %{ + new_changes_ref: ref, + handle: shape_handle, + api: %{keepalive_interval: keepalive_interval, sse_timeout: sse_timeout}, + params: %{offset: since_offset} + } = request + + Logger.debug( + "Client #{inspect(self())} is streaming SSE for changes to #{shape_handle} since #{inspect(since_offset)}" + ) + + # Set up timer for SSE comment as keep-alive + keepalive_ref = Process.send_after(self(), {:sse_keepalive, ref}, keepalive_interval) + + # Set up timer for SSE timeout + timeout_ref = Process.send_after(self(), {:sse_timeout, ref}, sse_timeout) + + # Stream changes as SSE events for the duration of the timer. + sse_event_stream = + Stream.resource( + fn -> + %SseState{ + mode: :receive, + request: request, + stream: nil, + since_offset: since_offset, + last_message_time: System.monotonic_time(:millisecond), + keepalive_ref: keepalive_ref + } + end, + &next_sse_event/1, + fn %SseState{keepalive_ref: latest_keepalive_ref} -> + Process.cancel_timer(latest_keepalive_ref) + Process.cancel_timer(timeout_ref) + end + ) + + response = %{request.response | chunked: true, body: sse_event_stream} + + %{response | trace_attrs: Map.put(response.trace_attrs || %{}, :ot_is_sse_response, true)} + end + + defp next_sse_event(%SseState{mode: :receive} = state) do + %{ + keepalive_ref: keepalive_ref, + last_message_time: last_message_time, + request: + %{ + api: %{ + keepalive_interval: keepalive_interval + }, + handle: shape_handle, + new_changes_ref: ref + } = request, + since_offset: since_offset + } = state + + receive do + {^ref, :new_changes, latest_log_offset} -> + updated_request = + %{request | last_offset: latest_log_offset} + |> determine_global_last_seen_lsn() + |> determine_log_chunk_offset() + |> determine_up_to_date() + + # This is usually but not always the `latest_log_offset` + # as per `determine_log_chunk_offset/1`. + end_offset = updated_request.chunk_end_offset + + in_sse? = true + + case Shapes.get_merged_log_stream( + updated_request.api, + shape_handle, + since: since_offset, + up_to: end_offset, + experimental_live_sse: in_sse? + ) do + {:ok, log} -> + Process.cancel_timer(keepalive_ref) + + control_messages = maybe_up_to_date(updated_request, end_offset.tx_offset) + message_stream = Stream.concat(log, control_messages) + encoded_stream = encode_log(updated_request, message_stream) + + current_time = System.monotonic_time(:millisecond) + + new_keepalive_ref = + Process.send_after(self(), {:sse_keepalive, ref}, keepalive_interval) + + {[], + %{ + state + | mode: :emit, + stream: encoded_stream, + since_offset: end_offset, + last_message_time: current_time, + keepalive_ref: new_keepalive_ref + }} + + {:error, _error} -> + {[], state} + end + + {^ref, :shape_rotation} -> + must_refetch = %{headers: %{control: "must-refetch"}} + message = encode_message(request, must_refetch) + + {message, %{state | mode: :done}} + + {:sse_keepalive, ^ref} -> + current_time = System.monotonic_time(:millisecond) + time_since_last_message = current_time - last_message_time + + if time_since_last_message >= keepalive_interval do + new_keepalive_ref = + Process.send_after(self(), {:sse_keepalive, ref}, keepalive_interval) + + {[": keep-alive\n\n"], + %{state | last_message_time: current_time, keepalive_ref: new_keepalive_ref}} + else + # Not time to send a keep-alive yet, schedule for the remaining time + remaining_time = keepalive_interval - time_since_last_message + new_keepalive_ref = Process.send_after(self(), {:sse_keepalive, ref}, remaining_time) + + {[], %{state | keepalive_ref: new_keepalive_ref}} + end + + {:sse_timeout, ^ref} -> + {[], %{state | mode: :done}} + end + end + + defp next_sse_event(%SseState{mode: :emit} = state) do + %{stream: stream} = state + + # Can change the number taken to adjust the grouping. Currently three + # because there's typically 3 elements per SSE -- the actual message + # and the "data: " and "\n\n" delimiters around it. + # + # The JSON encoder groups stream elements by 500. So perhaps this + # could be a larger number for more efficiency? + case StreamSplit.take_and_drop(stream, 3) do + {[], _tail} -> + {[], %{state | mode: :receive, stream: nil}} + + {head, tail} -> + {head, %{state | stream: tail}} + end + end + + defp next_sse_event(%SseState{mode: :done} = state), do: {:halt, state} + defp no_change_response(%Request{} = request) do %{response: response, global_last_seen_lsn: global_last_seen_lsn} = update_attrs(request, %{ot_is_empty_response: true}) @@ -671,16 +850,35 @@ defmodule Electric.Shapes.Api do def stack_id(%Api{stack_id: stack_id}), do: stack_id def stack_id(%{api: %{stack_id: stack_id}}), do: stack_id + defp encode_log(%Request{api: api, params: %{live: true, experimental_live_sse: true}}, stream) do + encode_sse(api, :log, stream) + end + defp encode_log(%Request{api: api}, stream) do encode(api, :log, stream) end - @spec encode_message(Api.t() | Request.t(), term()) :: Enum.t() - def encode_message(%Request{api: api}, message) do + # Error messages are encoded normally, even when using SSE + # because they are returned on the original fetch request + # with a status code that is not 2xx. + @spec encode_error_message(Api.t() | Request.t(), term()) :: Enum.t() + def encode_error_message(%Api{} = api, message) do + encode(api, :message, message) + end + + def encode_error_message(%Request{api: api}, message) do encode(api, :message, message) end - def encode_message(%Api{} = api, message) do + @spec encode_message(Request.t(), term()) :: Enum.t() + def encode_message( + %Request{api: api, params: %{live: true, experimental_live_sse: true}}, + message + ) do + encode_sse(api, :message, message) + end + + def encode_message(%Request{api: api}, message) do encode(api, :message, message) end @@ -689,6 +887,10 @@ defmodule Electric.Shapes.Api do apply(encoder, type, [message]) end + defp encode_sse(%Api{sse_encoder: sse_encoder}, type, message) when type in [:message, :log] do + apply(sse_encoder, type, [message]) + end + def schema(%Response{ api: %Api{inspector: inspector}, shape_definition: %Shapes.Shape{} = shape diff --git a/packages/sync-service/lib/electric/shapes/api/encoder.ex b/packages/sync-service/lib/electric/shapes/api/encoder.ex index f6f84fdaa9..babb6c2b30 100644 --- a/packages/sync-service/lib/electric/shapes/api/encoder.ex +++ b/packages/sync-service/lib/electric/shapes/api/encoder.ex @@ -56,6 +56,36 @@ defmodule Electric.Shapes.Api.Encoder.JSON do end end +defmodule Electric.Shapes.Api.Encoder.SSE do + @behaviour Electric.Shapes.Api.Encoder + + @impl Electric.Shapes.Api.Encoder + def log(item_stream) do + # Note that, unlike the JSON log encoder, this doesn't currently use + # `Stream.chunk_every/1`. + # + # This is because it's only handling live events and is usually used + # for small updates (the point of enabling SSE mode is to avoid request + # overhead when consuming small changes). + + item_stream + |> Stream.flat_map(&message/1) + end + + @impl Electric.Shapes.Api.Encoder + def message(message) do + ["data: ", ensure_json(message), "\n\n"] + end + + defp ensure_json(json) when is_binary(json) do + json + end + + defp ensure_json(term) do + Jason.encode_to_iodata!(term) + end +end + defmodule Electric.Shapes.Api.Encoder.Term do @behaviour Electric.Shapes.Api.Encoder diff --git a/packages/sync-service/lib/electric/shapes/api/error.ex b/packages/sync-service/lib/electric/shapes/api/error.ex index 4498c330b5..d24e0a5e7d 100644 --- a/packages/sync-service/lib/electric/shapes/api/error.ex +++ b/packages/sync-service/lib/electric/shapes/api/error.ex @@ -1,10 +1,22 @@ defmodule Electric.Shapes.Api.Error do defstruct [:message, :status] - def must_refetch() do - %__MODULE__{ - message: [%{headers: %{control: "must-refetch"}}], - status: 409 - } + @must_refetch %{headers: %{control: "must-refetch"}} + + @doc """ + When responding to client HTTP requests, the value of the `experimental_live_sse` option + passed to `must_refetch/1` (based on whether the fetch request is using SSE mode or not) + determines the formatting of the response body: SSE clients expect single events but long + polling clients expect an array of messages. + """ + def must_refetch(opts) do + message = + if Keyword.get(opts, :experimental_sse_mode, false) do + @must_refetch + else + [@must_refetch] + end + + %__MODULE__{message: message, status: 409} end end diff --git a/packages/sync-service/lib/electric/shapes/api/params.ex b/packages/sync-service/lib/electric/shapes/api/params.ex index 0f85d9b0ad..6c556a111c 100644 --- a/packages/sync-service/lib/electric/shapes/api/params.ex +++ b/packages/sync-service/lib/electric/shapes/api/params.ex @@ -8,6 +8,7 @@ defmodule Electric.Shapes.Api.Params do import Ecto.Changeset @tmp_compaction_flag :experimental_compaction + @tmp_sse_flag :experimental_live_sse @primary_key false defmodule ColumnList do @@ -50,6 +51,7 @@ defmodule Electric.Shapes.Api.Params do field(:replica, Ecto.Enum, values: [:default, :full], default: :default) field(:params, {:map, :string}, default: %{}) field(@tmp_compaction_flag, :boolean, default: false) + field(@tmp_sse_flag, :boolean, default: false) end @type t() :: %__MODULE__{} @@ -61,6 +63,7 @@ defmodule Electric.Shapes.Api.Params do |> cast_offset() |> validate_handle_with_offset() |> validate_live_with_offset() + |> validate_live_sse() |> cast_root_table(api) |> apply_action(:validate) |> convert_error(api) @@ -150,6 +153,20 @@ defmodule Electric.Shapes.Api.Params do end end + def validate_live_sse(%Ecto.Changeset{valid?: false} = changeset), do: changeset + + def validate_live_sse(%Ecto.Changeset{} = changeset) do + live = get_field(changeset, :live) + + if live do + changeset + else + validate_exclusion(changeset, @tmp_sse_flag, [true], + message: "can't be true unless live is also true" + ) + end + end + def cast_root_table(%Ecto.Changeset{valid?: false} = changeset, _), do: changeset def cast_root_table(%Ecto.Changeset{} = changeset, %Api{shape: nil} = api) do diff --git a/packages/sync-service/lib/electric/shapes/api/response.ex b/packages/sync-service/lib/electric/shapes/api/response.ex index 18af9dbfb0..1c204ae81e 100644 --- a/packages/sync-service/lib/electric/shapes/api/response.ex +++ b/packages/sync-service/lib/electric/shapes/api/response.ex @@ -120,7 +120,7 @@ defmodule Electric.Shapes.Api.Response do message end - Api.encode_message(api_or_request, body) + Api.encode_error_message(api_or_request, body) end @spec send(Plug.Conn.t(), t()) :: Plug.Conn.t() @@ -215,6 +215,7 @@ defmodule Electric.Shapes.Api.Response do |> put_up_to_date_header(response) |> put_offset_header(response) |> put_known_error_header(response) + |> put_sse_headers(response) end defp put_shape_handle_header(conn, %__MODULE__{handle: nil}) do @@ -275,6 +276,20 @@ defmodule Electric.Shapes.Api.Response do ) end + # For live SSE requests we want to cache for just under the + # sse_timeout, in order to enable request collapsing. + defp put_cache_headers(conn, %__MODULE__{ + params: %{live: true, experimental_live_sse: true}, + api: api + }) do + conn + |> put_cache_header( + "cache-control", + "public, max-age=#{max(1, div(api.sse_timeout, 1000) - 1)}", + api + ) + end + # For live requests we want short cache lifetimes and to update the live cursor defp put_cache_headers(conn, %__MODULE__{params: %{live: true}, api: api}) do conn @@ -354,6 +369,16 @@ defmodule Electric.Shapes.Api.Response do :ok end + defp put_sse_headers(conn, %__MODULE__{params: %{live: true, experimental_live_sse: true}}) do + conn + |> Plug.Conn.put_resp_header("content-type", "text/event-stream") + |> Plug.Conn.put_resp_header("connection", "keep-alive") + end + + defp put_sse_headers(conn, _response) do + conn + end + defp send_stream(%Plug.Conn{} = conn, %__MODULE__{status: status} = response) do validate_response_finalized!(response) diff --git a/packages/sync-service/lib/electric/shapes/api/sse_state.ex b/packages/sync-service/lib/electric/shapes/api/sse_state.ex new file mode 100644 index 0000000000..f35deed29b --- /dev/null +++ b/packages/sync-service/lib/electric/shapes/api/sse_state.ex @@ -0,0 +1,22 @@ +defmodule Electric.Shapes.Api.SseState do + alias Electric.Shapes.Api + alias Electric.Replication.LogOffset + + defstruct [ + :mode, + :request, + :stream, + :since_offset, + :last_message_time, + :keepalive_ref + ] + + @type t() :: %__MODULE__{ + mode: :receive | :emit | :done, + request: Api.Request.t(), + stream: Enumerable.t() | nil, + since_offset: LogOffset.t(), + last_message_time: pos_integer(), + keepalive_ref: reference() + } +end diff --git a/packages/sync-service/lib/electric/shapes/consumer.ex b/packages/sync-service/lib/electric/shapes/consumer.ex index 814b0b40b2..6698197af0 100644 --- a/packages/sync-service/lib/electric/shapes/consumer.ex +++ b/packages/sync-service/lib/electric/shapes/consumer.ex @@ -141,7 +141,7 @@ defmodule Electric.Shapes.Consumer do state = state - |> reply_to_snapshot_waiters({:error, Api.Error.must_refetch()}) + |> reply_to_snapshot_waiters({:error, Api.Error.must_refetch([])}) |> terminate_safely() {:reply, :ok, state} diff --git a/packages/sync-service/mix.exs b/packages/sync-service/mix.exs index 5d629c8ed0..359a3e8387 100644 --- a/packages/sync-service/mix.exs +++ b/packages/sync-service/mix.exs @@ -110,6 +110,7 @@ defmodule Electric.MixProject do {:retry, "~> 0.19"}, {:remote_ip, "~> 1.2"}, {:req, "~> 0.5"}, + {:stream_split, "~> 0.1"}, {:telemetry_poller, "~> 1.2"}, # tls_certificate_check is required by otel_exporter_otlp {:tls_certificate_check, "~> 1.27"}, diff --git a/packages/sync-service/mix.lock b/packages/sync-service/mix.lock index 905cd16c82..c0f518f60e 100644 --- a/packages/sync-service/mix.lock +++ b/packages/sync-service/mix.lock @@ -53,6 +53,7 @@ "sentry": {:hex, :sentry, "10.10.0", "d058b635f3796947545c8057a42996f6dbefd12152da947209b56d16af41b161", [:mix], [{:hackney, "~> 1.8", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:nimble_options, "~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_ownership, "~> 0.3.0 or ~> 1.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.6", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, "~> 0.20 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.6", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "7c7ddd3cfdd63fcee53b1e28f9a653037e6927b2b1dbd300b7aeee9687c7a8f6"}, "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "stream_data": {:hex, :stream_data, "1.2.0", "58dd3f9e88afe27dc38bef26fce0c84a9e7a96772b2925c7b32cd2435697a52b", [:mix], [], "hexpm", "eb5c546ee3466920314643edf68943a5b14b32d1da9fe01698dc92b73f89a9ed"}, + "stream_split": {:hex, :stream_split, "0.1.7", "2d3fd1fd21697da7f91926768d65f79409086052c9ec7ae593987388f52425f8", [:mix], [], "hexpm", "1dc072ff507a64404a0ad7af90df97096183fee8eeac7b300320cea7c4679147"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"}, diff --git a/packages/sync-service/test/electric/plug/serve_shape_plug_test.exs b/packages/sync-service/test/electric/plug/serve_shape_plug_test.exs index d513709670..4433096296 100644 --- a/packages/sync-service/test/electric/plug/serve_shape_plug_test.exs +++ b/packages/sync-service/test/electric/plug/serve_shape_plug_test.exs @@ -69,6 +69,7 @@ defmodule Electric.Plug.ServeShapePlugTest do inspector: @inspector, registry: @registry, long_poll_timeout: long_poll_timeout(ctx), + sse_timeout: sse_timeout(ctx), max_age: max_age(ctx), stale_age: stale_age(ctx), persistent_kv: ctx.persistent_kv @@ -799,6 +800,122 @@ defmodule Electric.Plug.ServeShapePlugTest do end end + describe "serving shapes with sse mode" do + setup :with_lsn_tracker + + setup ctx do + {:via, _, {registry_name, registry_key}} = + Electric.Replication.Supervisor.name(ctx) + + {:ok, _} = Registry.register(registry_name, registry_key, nil) + set_status_to_active(ctx) + :ok + end + + test "returns proper SSE format response when experimental_live_sse=true and live=true", + ctx do + Mock.ShapeCache + |> expect(:get_shape, fn @test_shape, _opts -> + {@test_shape_handle, @test_offset} + end) + |> stub(:has_shape?, fn @test_shape_handle, _opts -> true end) + |> stub(:await_snapshot_start, fn @test_shape_handle, _ -> :started end) + + # Remove unused variable + # Set up storage mocks + Mock.Storage + |> stub(:for_shape, fn @test_shape_handle, _opts -> @test_opts end) + |> expect(:get_chunk_end_log_offset, fn @test_offset, _ -> nil end) + |> expect(:get_log_stream, fn @test_offset, @test_offset, @test_opts -> [] end) + + # Use a short SSE timeout for the test + ctx = Map.put(ctx, :sse_timeout, 100) + + conn = + ctx + |> conn( + :get, + %{"table" => "public.users"}, + "?offset=#{@test_offset}&handle=#{@test_shape_handle}&live=true&experimental_live_sse=true" + ) + |> call_serve_shape_plug(ctx) + + # Validate response headers for SSE + assert {"content-type", "text/event-stream"} in conn.resp_headers + assert {"connection", "keep-alive"} in conn.resp_headers + + # Verify cache control header for request collapsing + cache_control = + Enum.find_value(conn.resp_headers, fn + {"cache-control", value} -> value + _ -> nil + end) + + assert cache_control =~ "public" + assert cache_control =~ "max-age=" + + # Verify chunked transfer encoding + assert conn.state == :chunked + + # Verify response format (should be SSE events) + assert conn.status == 200 + assert conn.state == :chunked + end + + test "returns 400 when experimental_live_sse=true but live=false", ctx do + conn = + ctx + |> conn( + :get, + %{"table" => "public.users"}, + "?offset=#{@test_offset}&handle=#{@test_shape_handle}&experimental_live_sse=true" + ) + |> call_serve_shape_plug(ctx) + + assert conn.status == 400 + + assert Jason.decode!(conn.resp_body) == %{ + "message" => "Invalid request", + "errors" => %{ + "experimental_live_sse" => ["can't be true unless live is also true"] + } + } + end + + test "sends properly formatted SSE events", ctx do + next_offset = LogOffset.increment(@test_offset) + test_content = %{key: "test-key", value: "test-value", headers: %{}, offset: next_offset} + + # Set up mocks + Mock.ShapeCache + |> expect(:get_shape, fn @test_shape, _opts -> + {@test_shape_handle, @test_offset} + end) + |> stub(:has_shape?, fn @test_shape_handle, _opts -> true end) + |> stub(:await_snapshot_start, fn @test_shape_handle, _ -> :started end) + + Mock.Storage + |> stub(:for_shape, fn @test_shape_handle, _opts -> @test_opts end) + |> expect(:get_chunk_end_log_offset, fn @test_offset, _ -> next_offset end) + |> expect(:get_log_stream, fn @test_offset, _, @test_opts -> [test_content] end) + + # Make the request + %{resp_body: body} = + ctx + |> conn( + :get, + %{"table" => "public.users"}, + "?offset=#{@test_offset}&handle=#{@test_shape_handle}&live=true&experimental_live_sse=true" + ) + |> call_serve_shape_plug(ctx) + + # Verify the SSE formatted body + assert body =~ "data:" + assert body =~ "test-key" + assert body =~ "test-value" + end + end + describe "stack not ready" do test "returns 503", ctx do conn = @@ -847,4 +964,5 @@ defmodule Electric.Plug.ServeShapePlugTest do defp max_age(ctx), do: Access.get(ctx, :max_age, 60) defp stale_age(ctx), do: Access.get(ctx, :stale_age, 300) defp long_poll_timeout(ctx), do: Access.get(ctx, :long_poll_timeout, 20_000) + defp sse_timeout(ctx), do: Access.get(ctx, :sse_timeout, 60_000) end diff --git a/packages/typescript-client/package.json b/packages/typescript-client/package.json index a7e90caecd..8f504bcfca 100644 --- a/packages/typescript-client/package.json +++ b/packages/typescript-client/package.json @@ -6,6 +6,9 @@ "bugs": { "url": "https://github.com/electric-sql/electric/issues" }, + "dependencies": { + "@microsoft/fetch-event-source": "^2.0.1" + }, "devDependencies": { "@types/pg": "^8.11.6", "@types/uuid": "^10.0.0", @@ -24,7 +27,7 @@ "tsup": "^8.0.1", "typescript": "^5.5.2", "uuid": "^10.0.0", - "vitest": "^2.0.2" + "vitest": "^3.0.0" }, "type": "module", "exports": { diff --git a/packages/typescript-client/src/client.ts b/packages/typescript-client/src/client.ts index 5c7f61b84e..5e028702fe 100644 --- a/packages/typescript-client/src/client.ts +++ b/packages/typescript-client/src/client.ts @@ -7,7 +7,7 @@ import { GetExtensions, } from './types' import { MessageParser, Parser } from './parser' -import { isUpToDateMessage } from './helpers' +import { getOffset, isUpToDateMessage } from './helpers' import { FetchError, FetchBackoffAbortError, @@ -40,7 +40,12 @@ import { REPLICA_PARAM, FORCE_DISCONNECT_AND_REFRESH, PAUSE_STREAM, + EXPERIMENTAL_LIVE_SSE_QUERY_PARAM, } from './constants' +import { + EventSourceMessage, + fetchEventSource, +} from '@microsoft/fetch-event-source' const RESERVED_PARAMS: Set = new Set([ LIVE_CACHE_BUSTER_QUERY_PARAM, @@ -245,6 +250,11 @@ export interface ShapeStreamOptions { */ subscribe?: boolean + /** + * Experimental support for Server-Sent Events (SSE) for live updates. + */ + experimentalLiveSse?: boolean + signal?: AbortSignal fetchClient?: typeof fetch backoffOptions?: BackoffOptions @@ -284,8 +294,9 @@ export interface ShapeStreamInterface = Row> { } /** - * Reads updates to a shape from Electric using HTTP requests and long polling. Notifies subscribers - * when new messages come in. Doesn't maintain any history of the + * Reads updates to a shape from Electric using HTTP requests and long polling or + * Server-Sent Events (SSE). + * Notifies subscribers when new messages come in. Doesn't maintain any history of the * log but does keep track of the offset position and is the best way * to consume the HTTP `GET /v1/shape` api. * @@ -300,6 +311,14 @@ export interface ShapeStreamInterface = Row> { * }) * ``` * + * To use Server-Sent Events (SSE) for real-time updates: + * ``` + * const stream = new ShapeStream({ + * url: `http://localhost:3000/v1/shape`, + * experimentalLiveSse: true + * }) + * ``` + * * To abort the stream, abort the `signal` * passed in via the `ShapeStreamOptions`. * ``` @@ -326,6 +345,7 @@ export class ShapeStream = Row> #error: unknown = null readonly #fetchClient: typeof fetch + readonly #sseFetchClient: typeof fetch readonly #messageParser: MessageParser readonly #subscribers = new Map< @@ -351,6 +371,7 @@ export class ShapeStream = Row> #tickPromise?: Promise #tickPromiseResolver?: () => void #tickPromiseRejecter?: (reason?: unknown) => void + #messageChain = Promise.resolve([]) // promise chain for incoming messages constructor(options: ShapeStreamOptions>) { this.options = { subscribe: true, ...options } @@ -365,13 +386,17 @@ export class ShapeStream = Row> options.fetchClient ?? ((...args: Parameters) => fetch(...args)) - const fetchWithBackoffClient = createFetchWithBackoff(baseFetchClient, { + const backOffOpts = { ...(options.backoffOptions ?? BackoffDefaults), onFailedAttempt: () => { this.#connected = false options.backoffOptions?.onFailedAttempt?.() }, - }) + } + const fetchWithBackoffClient = createFetchWithBackoff( + baseFetchClient, + backOffOpts + ) this.#fetchClient = createFetchWithConsumedMessages( createFetchWithResponseHeadersCheck( @@ -379,6 +404,16 @@ export class ShapeStream = Row> ) ) + const sseFetchWithBackoffClient = createFetchWithBackoff( + baseFetchClient, + backOffOpts, + true + ) + + this.#sseFetchClient = createFetchWithResponseHeadersCheck( + createFetchWithChunkBuffer(sseFetchWithBackoffClient) + ) + this.#subscribeToVisibilityChanges() } @@ -436,6 +471,7 @@ export class ShapeStream = Row> async #requestShape(): Promise { if (this.#state === `pause-requested`) { this.#state = `paused` + return } @@ -450,7 +486,70 @@ export class ShapeStream = Row> this.#state = `active` const { url, signal } = this.options + const { fetchUrl, requestHeaders } = await this.#constructUrl( + url, + resumingFromPause + ) + const abortListener = await this.#createAbortListener(signal) + const requestAbortController = this.#requestAbortController! // we know that it is not undefined because it is set by `this.#createAbortListener` + + try { + await this.#fetchShape({ + fetchUrl, + requestAbortController, + headers: requestHeaders, + resumingFromPause: true, + }) + } catch (e) { + // Handle abort error triggered by refresh + if ( + (e instanceof FetchError || e instanceof FetchBackoffAbortError) && + requestAbortController.signal.aborted && + requestAbortController.signal.reason === FORCE_DISCONNECT_AND_REFRESH + ) { + // Start a new request + return this.#requestShape() + } + + if (e instanceof FetchBackoffAbortError) { + if ( + requestAbortController.signal.aborted && + requestAbortController.signal.reason === PAUSE_STREAM + ) { + this.#state = `paused` + } + return // interrupted + } + if (!(e instanceof FetchError)) throw e // should never happen + + if (e.status == 409) { + // Upon receiving a 409, we should start from scratch + // with the newly provided shape handle + const newShapeHandle = e.headers[SHAPE_HANDLE_HEADER] + this.#reset(newShapeHandle) + await this.#publish(e.json as Message[]) + return this.#requestShape() + } else { + // Notify subscribers + this.#sendErrorToSubscribers(e) + + // errors that have reached this point are not actionable without + // additional user input, such as 400s or failures to read the + // body of a response, so we exit the loop + throw e + } + } finally { + if (abortListener && signal) { + signal.removeEventListener(`abort`, abortListener) + } + this.#requestAbortController = undefined + } + + this.#tickPromiseResolver?.() + return this.#requestShape() + } + async #constructUrl(url: string, resumingFromPause: boolean) { // Resolve headers and params in parallel const [requestHeaders, params] = await Promise.all([ resolveHeaders(this.options.headers), @@ -513,75 +612,34 @@ export class ShapeStream = Row> // sort query params in-place for stable URLs and improved cache hits fetchUrl.searchParams.sort() + return { + fetchUrl, + requestHeaders, + } + } + + async #createAbortListener(signal?: AbortSignal) { // Create a new AbortController for this request this.#requestAbortController = new AbortController() // If user provided a signal, listen to it and pass on the reason for the abort - let abortListener: (() => void) | undefined if (signal) { - abortListener = () => { + const abortListener = () => { this.#requestAbortController?.abort(signal.reason) } + signal.addEventListener(`abort`, abortListener, { once: true }) + if (signal.aborted) { // If the signal is already aborted, abort the request immediately this.#requestAbortController?.abort(signal.reason) } - } - let response!: Response - try { - response = await this.#fetchClient(fetchUrl.toString(), { - signal: this.#requestAbortController.signal, - headers: requestHeaders, - }) - this.#connected = true - } catch (e) { - // Handle abort error triggered by refresh - if ( - (e instanceof FetchError || e instanceof FetchBackoffAbortError) && - this.#requestAbortController.signal.aborted && - this.#requestAbortController.signal.reason === - FORCE_DISCONNECT_AND_REFRESH - ) { - // Loop back to the top of the while loop to start a new request - return this.#requestShape() - } - - if (e instanceof FetchBackoffAbortError) { - if ( - this.#requestAbortController.signal.aborted && - this.#requestAbortController.signal.reason === PAUSE_STREAM - ) { - this.#state = `paused` - } - return // interrupted - } - if (!(e instanceof FetchError)) throw e // should never happen - - if (e.status == 409) { - // Upon receiving a 409, we should start from scratch - // with the newly provided shape handle - const newShapeHandle = e.headers[SHAPE_HANDLE_HEADER] - this.#reset(newShapeHandle) - await this.#publish(e.json as Message[]) - return this.#requestShape() - } else { - // Notify subscribers - this.#sendErrorToSubscribers(e) - - // errors that have reached this point are not actionable without - // additional user input, such as 400s or failures to read the - // body of a response, so we exit the loop - throw e - } - } finally { - if (abortListener && signal) { - signal.removeEventListener(`abort`, abortListener) - } - this.#requestAbortController = undefined + return abortListener } + } + async #onInitialResponse(response: Response) { const { headers, status } = response const shapeHandle = headers.get(SHAPE_HANDLE_HEADER) if (shapeHandle) { @@ -611,23 +669,122 @@ export class ShapeStream = Row> // There's no content so we are live and up to date this.#lastSyncedAt = Date.now() } + } - const messages = (await response.text()) || `[]` - const batch = this.#messageParser.parse(messages, this.#schema) + async #onMessages(messages: string, schema: Schema, isSseMessage = false) { + const batch = this.#messageParser.parse(messages, schema) // Update isUpToDate if (batch.length > 0) { const lastMessage = batch[batch.length - 1] if (isUpToDateMessage(lastMessage)) { + if (isSseMessage) { + // Only use the offset from the up-to-date message if this was an SSE message. + // If we would use this offset from a regular fetch, then it will be wrong + // and we will get an "offset is out of bounds for this shape" error + const offset = getOffset(lastMessage) + if (offset) { + this.#lastOffset = offset + } + } this.#lastSyncedAt = Date.now() this.#isUpToDate = true } await this.#publish(batch) } + } + + /** + * Fetches the shape from the server using either long polling or SSE. + * Upon receiving a successfull response, the #onInitialResponse method is called. + * Afterwards, the #onMessages method is called for all the incoming updates. + * @param opts - The options for the request. + * @returns A promise that resolves when the request is complete (i.e. the long poll receives a response or the SSE connection is closed). + */ + async #fetchShape(opts: { + fetchUrl: URL + requestAbortController: AbortController + headers: Record + resumingFromPause?: boolean + }): Promise { + if ( + this.#isUpToDate && + this.options.experimentalLiveSse && + !this.#isRefreshing && + !opts.resumingFromPause + ) { + opts.fetchUrl.searchParams.set(EXPERIMENTAL_LIVE_SSE_QUERY_PARAM, `true`) + return this.#requestShapeSSE(opts) + } - this.#tickPromiseResolver?.() - return this.#requestShape() + return this.#requestShapeLongPoll(opts) + } + + async #requestShapeLongPoll(opts: { + fetchUrl: URL + requestAbortController: AbortController + headers: Record + }): Promise { + const { fetchUrl, requestAbortController, headers } = opts + const response = await this.#fetchClient(fetchUrl.toString(), { + signal: requestAbortController.signal, + headers, + }) + + this.#connected = true + await this.#onInitialResponse(response) + + const schema = this.#schema! // we know that it is not undefined because it is set by `this.#onInitialResponse` + const res = await response.text() + const messages = res || `[]` + + await this.#onMessages(messages, schema) + } + + async #requestShapeSSE(opts: { + fetchUrl: URL + requestAbortController: AbortController + headers: Record + }): Promise { + const { fetchUrl, requestAbortController, headers } = opts + const fetch = this.#sseFetchClient + try { + await fetchEventSource(fetchUrl.toString(), { + headers, + fetch, + onopen: async (response: Response) => { + this.#connected = true + await this.#onInitialResponse(response) + }, + onmessage: (event: EventSourceMessage) => { + if (event.data) { + // Process the SSE message + // The event.data is a single JSON object, so we wrap it in an array + const messages = `[${event.data}]` + const schema = this.#schema! // we know that it is not undefined because it is set in onopen when we call this.#onInitialResponse + this.#onMessages(messages, schema, true) + } + }, + onerror: (error: Error) => { + // rethrow to close the SSE connection + throw error + }, + signal: requestAbortController.signal, + }) + } catch (error) { + if (requestAbortController.signal.aborted) { + // During an SSE request, the fetch might have succeeded + // and we are parsing the incoming stream. + // If the abort happens while we're parsing the stream, + // then it won't be caught by our `createFetchWithBackoff` wrapper + // and instead we will get a raw AbortError here + // which we need to turn into a `FetchBackoffAbortError` + // such that #start handles it correctly.` + throw new FetchBackoffAbortError() + } + throw error + } } #pause() { @@ -724,18 +881,26 @@ export class ShapeStream = Row> this.#isRefreshing = false } - async #publish(messages: Message[]): Promise { - await Promise.all( - Array.from(this.#subscribers.values()).map(async ([callback, __]) => { - try { - await callback(messages) - } catch (err) { - queueMicrotask(() => { - throw err - }) - } - }) + async #publish(messages: Message[]): Promise { + // We process messages asynchronously + // but SSE's `onmessage` handler is synchronous. + // We use a promise chain to ensure that the handlers + // execute sequentially in the order the messages were received. + this.#messageChain = this.#messageChain.then(() => + Promise.all( + Array.from(this.#subscribers.values()).map(async ([callback, __]) => { + try { + await callback(messages) + } catch (err) { + queueMicrotask(() => { + throw err + }) + } + }) + ) ) + + return this.#messageChain } #sendErrorToSubscribers(error: Error) { diff --git a/packages/typescript-client/src/constants.ts b/packages/typescript-client/src/constants.ts index bdf6210b30..d71008556a 100644 --- a/packages/typescript-client/src/constants.ts +++ b/packages/typescript-client/src/constants.ts @@ -12,5 +12,6 @@ export const TABLE_QUERY_PARAM = `table` export const WHERE_QUERY_PARAM = `where` export const REPLICA_PARAM = `replica` export const WHERE_PARAMS_PARAM = `params` +export const EXPERIMENTAL_LIVE_SSE_QUERY_PARAM = `experimental_live_sse` export const FORCE_DISCONNECT_AND_REFRESH = `force-disconnect-and-refresh` export const PAUSE_STREAM = `pause-stream` diff --git a/packages/typescript-client/src/fetch.ts b/packages/typescript-client/src/fetch.ts index 4818a97a52..9bd1bb211a 100644 --- a/packages/typescript-client/src/fetch.ts +++ b/packages/typescript-client/src/fetch.ts @@ -38,7 +38,8 @@ export const BackoffDefaults = { export function createFetchWithBackoff( fetchClient: typeof fetch, - backoffOptions: BackoffOptions = BackoffDefaults + backoffOptions: BackoffOptions = BackoffDefaults, + sseMode: boolean = false ): typeof fetch { const { initialDelay, @@ -64,7 +65,16 @@ export function createFetchWithBackoff( try { const result = await fetchClient(...args) if (result.ok) return result - else throw await FetchError.fromResponse(result, url.toString()) + + const err = await FetchError.fromResponse(result, url.toString()) + if (err.status === 409 && sseMode) { + // The json body is [ { headers: { control: 'must-refetch' } } ] in normal mode + // and is { headers: { control: 'must-refetch' } } in SSE mode + // So in SSE mode we need to wrap it in an array + err.json = [err.json] + } + + throw err } catch (e) { onFailedAttempt?.() if (options?.signal?.aborted) { diff --git a/packages/typescript-client/src/helpers.ts b/packages/typescript-client/src/helpers.ts index c30ed129f0..1c1aa21f6a 100644 --- a/packages/typescript-client/src/helpers.ts +++ b/packages/typescript-client/src/helpers.ts @@ -1,4 +1,4 @@ -import { ChangeMessage, ControlMessage, Message, Row } from './types' +import { ChangeMessage, ControlMessage, Message, Offset, Row } from './types' /** * Type guard for checking {@link Message} is {@link ChangeMessage}. @@ -51,3 +51,15 @@ export function isUpToDateMessage = Row>( ): message is ControlMessage & { up_to_date: true } { return isControlMessage(message) && message.headers.control === `up-to-date` } + +/** + * Parses the LSN from the up-to-date message and turns it into an offset. + * The LSN is only present in the up-to-date control message when in SSE mode. + * If we are not in SSE mode this function will return undefined. + */ +export function getOffset(message: ControlMessage): Offset | undefined { + const lsn = Number(message.headers.global_last_seen_lsn) + if (lsn && !isNaN(lsn)) { + return `${lsn}_0` + } +} diff --git a/packages/typescript-client/src/types.ts b/packages/typescript-client/src/types.ts index 35eefc7a34..ce476c1af6 100644 --- a/packages/typescript-client/src/types.ts +++ b/packages/typescript-client/src/types.ts @@ -26,7 +26,10 @@ interface Header { export type Operation = `insert` | `update` | `delete` export type ControlMessage = { - headers: Header & { control: `up-to-date` | `must-refetch` } + headers: Header & { + control: `up-to-date` | `must-refetch` + global_last_seen_lsn?: string + } } export type ChangeMessage = Row> = { diff --git a/packages/typescript-client/test/__snapshots__/client.test.ts.snap b/packages/typescript-client/test/__snapshots__/client.test.ts.snap index 7c8bb2bc64..8ec47c9336 100644 --- a/packages/typescript-client/test/__snapshots__/client.test.ts.snap +++ b/packages/typescript-client/test/__snapshots__/client.test.ts.snap @@ -1,3 +1,5 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html -exports[`Shape > should throw on a reserved parameter 1`] = `[ReservedParamError: Cannot use reserved Electric parameter names in custom params: live]`; +exports[`Shape (liveSSE=false) > should throw on a reserved parameter 1`] = `[ReservedParamError: Cannot use reserved Electric parameter names in custom params: live]`; + +exports[`Shape (liveSSE=true) > should throw on a reserved parameter 1`] = `[ReservedParamError: Cannot use reserved Electric parameter names in custom params: live]`; diff --git a/packages/typescript-client/test/cache.test.ts b/packages/typescript-client/test/cache.test.ts index 89fd17166b..9e3add02d3 100644 --- a/packages/typescript-client/test/cache.test.ts +++ b/packages/typescript-client/test/cache.test.ts @@ -66,7 +66,7 @@ const it = testWithIssuesTable.extend<{ }, }) -describe(`HTTP Proxy Cache`, { timeout: 30000 }, () => { +describe(`HTTP Proxy Cache`, () => { it(`should get a short max-age cache-conrol header in live mode`, async ({ insertIssues, proxyCacheBaseUrl, @@ -269,10 +269,10 @@ describe(`HTTP Proxy Cache`, { timeout: 30000 }, () => { expect(staleRes.status).toBe(200) expect(getCacheStatus(staleRes)).toBe(CacheStatus.REVALIDATED) - }) + }, 10_000) }) -describe(`HTTP Initial Data Caching`, { timeout: 30000 }, () => { +describe(`HTTP Initial Data Caching`, () => { it(`tells client to resync when shape is out of scope`, async ({ proxyCacheBaseUrl, issuesTableUrl, diff --git a/packages/typescript-client/test/client.test.ts b/packages/typescript-client/test/client.test.ts index de09fd0d3e..039702803c 100644 --- a/packages/typescript-client/test/client.test.ts +++ b/packages/typescript-client/test/client.test.ts @@ -9,6 +9,11 @@ import { resolveValue } from '../src' const BASE_URL = inject(`baseUrl`) +const fetchAndSse = [ + { experimentalLiveSse: false }, + { experimentalLiveSse: true }, +] + /** * Mocks the browser's visibility API * and returns `pause` and `resume` functions @@ -42,1020 +47,1057 @@ function mockVisibilityApi() { } } -describe(`Shape`, () => { - it(`should sync an empty shape`, async ({ issuesTableUrl, aborter }) => { - const start = Date.now() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, +describe.for(fetchAndSse)( + `Shape (liveSSE=$experimentalLiveSse)`, + ({ experimentalLiveSse }) => { + it(`should sync an empty shape`, async ({ issuesTableUrl, aborter }) => { + const start = Date.now() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + const shape = new Shape(shapeStream) + + expect(await shape.value).toEqual(new Map()) + expect(await shape.rows).toEqual([]) + expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) + expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) }) - const shape = new Shape(shapeStream) - expect(await shape.value).toEqual(new Map()) - expect(await shape.rows).toEqual([]) - expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) - expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - }) + it(`should throw on a reserved parameter`, async ({ aborter }) => { + expect(() => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: `foo`, + // @ts-expect-error should not allow reserved parameters + live: `false`, + }, + experimentalLiveSse, + signal: aborter.signal, + }) + new Shape(shapeStream) + }).toThrowErrorMatchingSnapshot() + }) + + it(`should notify with the initial value`, async ({ + issuesTableUrl, + insertIssues, + aborter, + }) => { + const [id] = await insertIssues({ title: `test title` }) - it(`should throw on a reserved parameter`, async ({ aborter }) => { - expect(() => { + const start = Date.now() const shapeStream = new ShapeStream({ url: `${BASE_URL}/v1/shape`, params: { - table: `foo`, - // @ts-expect-error should not allow reserved parameters - live: `false`, + table: issuesTableUrl, }, signal: aborter.signal, + experimentalLiveSse, }) - new Shape(shapeStream) - }).toThrowErrorMatchingSnapshot() - }) - - it(`should notify with the initial value`, async ({ - issuesTableUrl, - insertIssues, - aborter, - }) => { - const [id] = await insertIssues({ title: `test title` }) - - const start = Date.now() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - }) - const shape = new Shape(shapeStream) + const shape = new Shape(shapeStream) - const rows = await new Promise((resolve) => { - shape.subscribe(({ rows }) => resolve(rows)) - }) + const rows = await new Promise((resolve) => { + shape.subscribe(({ rows }) => resolve(rows)) + }) - expect(rows).toEqual([{ id: id, title: `test title`, priority: 10 }]) - expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) - expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - }) - - it(`should continually sync a shape/table`, async ({ - issuesTableUrl, - insertIssues, - deleteIssue, - updateIssue, - waitForIssues, - aborter, - }) => { - const [id] = await insertIssues({ title: `test title` }) - - const expectedValue1 = [ - { - id: id, - title: `test title`, - priority: 10, - }, - ] - - const start = Date.now() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, + expect(rows).toEqual([{ id: id, title: `test title`, priority: 10 }]) + expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) + expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) }) - const shape = new Shape(shapeStream) - const rows = await shape.rows - expect(rows).toEqual(expectedValue1) - expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) - expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) + it(`should continually sync a shape/table`, async ({ + issuesTableUrl, + insertIssues, + deleteIssue, + updateIssue, + waitForIssues, + aborter, + }) => { + const [id] = await insertIssues({ title: `test title` }) + + const expectedValue1 = [ + { + id: id, + title: `test title`, + priority: 10, + }, + ] - await sleep(105) - expect(shape.lastSynced()).toBeGreaterThanOrEqual(100) + const start = Date.now() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + const shape = new Shape(shapeStream) + const rows = await shape.rows - // FIXME: might get notified before all changes are submitted - const intermediate = Date.now() - const hasNotified = new Promise((resolve) => { - shape.subscribe(resolve) - }) - const [id2] = await insertIssues({ title: `other title` }) - const [id3] = await insertIssues({ title: `other title2` }) - await deleteIssue({ id: id3, title: `other title2` }) - // Test an update too because we're sending patches that should be correctly merged in - await updateIssue({ id: id2, title: `new title` }) - await waitForIssues({ numChangesExpected: 5 }) - await vi.waitUntil(() => hasNotified) - - const expectedValue2 = [ - ...expectedValue1, - { - id: id2, - title: `new title`, - priority: 10, - }, - ] - - await vi.waitFor(() => expect(shape.currentRows).toEqual(expectedValue2)) - expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(intermediate) - expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - intermediate) - - shape.unsubscribeAll() - }) - - it(`should resync from scratch on a shape rotation`, async ({ - issuesTableUrl, - insertIssues, - deleteIssue, - waitForIssues, - clearIssuesShape, - aborter, - }) => { - const id1 = uuidv4() - const id2 = uuidv4() - await insertIssues({ id: id1, title: `foo1` }) - - const expectedValue1 = [ - { - id: id1, - title: `foo1`, - priority: 10, - }, - ] - - const expectedValue2 = [ - { - id: id2, - title: `foo2`, - priority: 10, - }, - ] - - const start = Date.now() - let rotationTime: number = Infinity - let fetchPausePromise = Promise.resolve() - const fetchWrapper = async (...args: Parameters) => { - await fetchPausePromise - return await fetch(...args) - } - - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: fetchWrapper, - }) - const shape = new Shape(shapeStream) - let dataUpdateCount = 0 - await new Promise((resolve, reject) => { - setTimeout(() => reject(`Timed out waiting for data changes`), 1000) - shape.subscribe(async ({ rows }) => { - dataUpdateCount++ - if (dataUpdateCount === 1) { - expect(rows).toEqual(expectedValue1) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - - // clear the shape and modify the data after the initial request - fetchPausePromise = Promise.resolve().then(async () => { - await deleteIssue({ id: id1, title: `foo1` }) - await insertIssues({ id: id2, title: `foo2` }) - await waitForIssues({ numChangesExpected: 3 }) - await clearIssuesShape(shapeStream.shapeHandle) - }) + expect(rows).toEqual(expectedValue1) + expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) + expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - rotationTime = Date.now() - return - } else if (dataUpdateCount === 2) { - expect(rows).toEqual(expectedValue2) - expect(shape.lastSynced()).toBeLessThanOrEqual( - Date.now() - rotationTime - ) - return resolve() - } - throw new Error(`Received more data updates than expected`) + await sleep(105) + expect(shape.lastSynced()).toBeGreaterThanOrEqual(100) + + // FIXME: might get notified before all changes are submitted + const intermediate = Date.now() + const hasNotified = new Promise((resolve) => { + shape.subscribe(resolve) }) - }) - }) - - it(`should notify subscribers when the value changes`, async ({ - issuesTableUrl, - insertIssues, - aborter, - }) => { - const [id] = await insertIssues({ title: `test title` }) - - const start = Date.now() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - }) - const shape = new Shape(shapeStream) + const [id2] = await insertIssues({ title: `other title` }) + const [id3] = await insertIssues({ title: `other title2` }) + await deleteIssue({ id: id3, title: `other title2` }) + // Test an update too because we're sending patches that should be correctly merged in + await updateIssue({ id: id2, title: `new title` }) + await waitForIssues({ numChangesExpected: 5 }) + await vi.waitUntil(() => hasNotified) + + const expectedValue2 = [ + ...expectedValue1, + { + id: id2, + title: `new title`, + priority: 10, + }, + ] - const hasNotified = new Promise((resolve) => { - shape.subscribe(({ rows }) => resolve(rows)) - }) + await vi.waitFor(() => expect(shape.currentRows).toEqual(expectedValue2)) + expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(intermediate) + expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - intermediate) - const [id2] = await insertIssues({ title: `other title` }) - - const value = await hasNotified - const expectedValue = [ - { - id: id, - title: `test title`, - priority: 10, - }, - { - id: id2, - title: `other title`, - priority: 10, - }, - ] - expect(value).toEqual(expectedValue) - expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) - expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) - expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - - shape.unsubscribeAll() - }) - - it(`should support unsubscribe`, async ({ issuesTableUrl, aborter }) => { - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - }) - await waitForFetch(shapeStream) - const shape = new Shape(shapeStream) - - const subFn = vi.fn((_) => void 0) - - const unsubscribeFn = shape.subscribe(subFn) - unsubscribeFn() - - expect(shape.numSubscribers).toBe(0) - expect(subFn).not.toHaveBeenCalled() - }) - - it(`should expose connection status`, async ({ issuesTableUrl }) => { - const aborter = new AbortController() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, + shape.unsubscribeAll() }) - // give some time for the initial fetch to complete - await waitForFetch(shapeStream) - expect(shapeStream.isConnected()).true - - const shape = new Shape(shapeStream) - await shape.rows - - expect(shapeStream.isConnected()).true - - // Abort the shape stream and check connectivity status - aborter.abort() - await vi.waitFor(() => expect(shapeStream.isConnected()).false) - }) - - it(`should set isConnected to false on fetch error and back on true when fetch succeeds again`, async ({ - issuesTableUrl, - aborter, - }) => { - let fetchShouldFail = false - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (_input, _init) => { - if (fetchShouldFail) - throw new FetchError( - 500, - `Artifical fetch error.`, - undefined, - {}, - ``, - undefined - ) - await sleep(50) - return new Response( - JSON.stringify([{ headers: { control: `up-to-date` } }]), - { - status: 200, - headers: new Headers({ - [`electric-offset`]: `0_0`, - [`electric-handle`]: `foo`, - [`electric-schema`]: ``, - [`electric-cursor`]: `123`, - }), + it(`should resync from scratch on a shape rotation`, async ({ + issuesTableUrl, + insertIssues, + deleteIssue, + waitForIssues, + clearIssuesShape, + aborter, + }) => { + const id1 = uuidv4() + const id2 = uuidv4() + await insertIssues({ id: id1, title: `foo1` }) + + const expectedValue1 = [ + { + id: id1, + title: `foo1`, + priority: 10, + }, + ] + + const expectedValue2 = [ + { + id: id2, + title: `foo2`, + priority: 10, + }, + ] + + const start = Date.now() + let rotationTime: number = Infinity + let fetchPausePromise = Promise.resolve() + const fetchWrapper = async (...args: Parameters) => { + await fetchPausePromise + return await fetch(...args) + } + + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: fetchWrapper, + experimentalLiveSse, + }) + const shape = new Shape(shapeStream) + let dataUpdateCount = 0 + await new Promise((resolve, reject) => { + setTimeout(() => reject(`Timed out waiting for data changes`), 1000) + shape.subscribe(async ({ rows }) => { + dataUpdateCount++ + if (dataUpdateCount === 1) { + expect(rows).toEqual(expectedValue1) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) + + // clear the shape and modify the data after the initial request + fetchPausePromise = Promise.resolve().then(async () => { + await deleteIssue({ id: id1, title: `foo1` }) + await insertIssues({ id: id2, title: `foo2` }) + await waitForIssues({ numChangesExpected: 3 }) + await clearIssuesShape(shapeStream.shapeHandle) + }) + + rotationTime = Date.now() + return + } else if (dataUpdateCount === 2) { + expect(rows).toEqual(expectedValue2) + expect(shape.lastSynced()).toBeLessThanOrEqual( + Date.now() - rotationTime + ) + return resolve() } - ) - }, + throw new Error(`Received more data updates than expected`) + }) + }) }) - const unsubscribe = shapeStream.subscribe(() => unsubscribe()) + it(`should notify subscribers when the value changes`, async ({ + issuesTableUrl, + insertIssues, + aborter, + }) => { + const [id] = await insertIssues({ title: `test title` }) - await vi.waitFor(() => expect(shapeStream.isConnected()).true) + const start = Date.now() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + const shape = new Shape(shapeStream) - // Now make fetch fail and check the status - fetchShouldFail = true - await vi.waitFor(() => expect(shapeStream.isConnected()).false) + const hasNotified = new Promise((resolve) => { + shape.subscribe(({ rows }) => resolve(rows)) + }) - fetchShouldFail = false - await vi.waitFor(() => expect(shapeStream.isConnected()).true) - }) + const [id2] = await insertIssues({ title: `other title` }) - it(`should set isConnected to false when the stream is paused an back on true when the fetch succeeds again`, async ({ - issuesTableUrl, - aborter, - }) => { - const { pause, resume } = mockVisibilityApi() + const value = await hasNotified + const expectedValue = [ + { + id: id, + title: `test title`, + priority: 10, + }, + { + id: id2, + title: `other title`, + priority: 10, + }, + ] + expect(value).toEqual(expectedValue) + expect(shape.lastSyncedAt()).toBeGreaterThanOrEqual(start) + expect(shape.lastSyncedAt()).toBeLessThanOrEqual(Date.now()) + expect(shape.lastSynced()).toBeLessThanOrEqual(Date.now() - start) - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, + shape.unsubscribeAll() }) - const unsubscribe = shapeStream.subscribe(() => unsubscribe()) - - await vi.waitFor(() => expect(shapeStream.isConnected()).true) - - pause() - await vi.waitFor(() => expect(shapeStream.isConnected()).false) - - resume() - await vi.waitFor(() => expect(shapeStream.isConnected()).true) - }) - - it(`should support pausing the stream and resuming it`, async ({ - issuesTableUrl, - insertIssues, - aborter, - }) => { - const { pause, resume } = mockVisibilityApi() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, + it(`should support unsubscribe`, async ({ issuesTableUrl, aborter }) => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + experimentalLiveSse, + signal: aborter.signal, + }) + await waitForFetch(shapeStream) + const shape = new Shape(shapeStream) + + const subFn = vi.fn((_) => void 0) + + const unsubscribeFn = shape.subscribe(subFn) + unsubscribeFn() + + expect(shape.numSubscribers).toBe(0) + expect(subFn).not.toHaveBeenCalled() }) - const shape = new Shape(shapeStream) - function makePromise() { - let resolve: (value: T) => void = () => {} + it(`should expose connection status`, async ({ issuesTableUrl }) => { + const aborter = new AbortController() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + + // give some time for the initial fetch to complete + await waitForFetch(shapeStream) + expect(shapeStream.isConnected()).true + + const shape = new Shape(shapeStream) + await shape.rows + + expect(shapeStream.isConnected()).true - const promise = new Promise((res) => { - resolve = res + // Abort the shape stream and check connectivity status + aborter.abort() + await vi.waitFor(() => expect(shapeStream.isConnected()).false) + }) + + it(`should set isConnected to false on fetch error and back on true when fetch succeeds again`, async ({ + issuesTableUrl, + aborter, + }) => { + let fetchShouldFail = false + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (_input, _init) => { + if (fetchShouldFail) + throw new FetchError( + 500, + `Artifical fetch error.`, + undefined, + {}, + ``, + undefined + ) + await sleep(50) + return new Response( + JSON.stringify([{ headers: { control: `up-to-date` } }]), + { + status: 200, + headers: new Headers({ + [`electric-offset`]: `0_0`, + [`electric-handle`]: `foo`, + [`electric-schema`]: ``, + [`electric-cursor`]: `123`, + }), + } + ) + }, + experimentalLiveSse, }) - return { - promise, - resolve, - } - } + const unsubscribe = shapeStream.subscribe(() => unsubscribe()) - const promises = [makePromise(), makePromise()] - let i = 0 + await vi.waitFor(() => expect(shapeStream.isConnected()).true) - shape.subscribe(({ rows }) => { - const prom = promises[i] - if (prom) { - prom.resolve(rows) - } - i++ + // Now make fetch fail and check the status + fetchShouldFail = true + await vi.waitFor(() => expect(shapeStream.isConnected()).false) + + fetchShouldFail = false + await vi.waitFor(() => expect(shapeStream.isConnected()).true) }) - // Insert an issue - const [id] = await insertIssues({ title: `test title` }) + it(`should set isConnected to false when the stream is paused an back on true when the fetch succeeds again`, async ({ + issuesTableUrl, + aborter, + }) => { + const { pause, resume } = mockVisibilityApi() - const expectedValue = [ - { - id: id, - title: `test title`, - priority: 10, - }, - ] + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) - // Wait for the update to arrive - const value = await promises[0].promise + const unsubscribe = shapeStream.subscribe(() => unsubscribe()) - expect(value).toEqual(expectedValue) + await vi.waitFor(() => expect(shapeStream.isConnected()).true) - pause() - await vi.waitFor(() => expect(shapeStream.isConnected()).false) + pause() + await vi.waitFor(() => expect(shapeStream.isConnected()).false) - // Now that the stream is paused, insert another issue - const [id2] = await insertIssues({ title: `other title` }) + resume() + await vi.waitFor(() => expect(shapeStream.isConnected()).true) + }) - // The update should not arrive while paused - const timeout = new Promise((resolve) => - setTimeout(() => resolve(`timeout`), 100) - ) - await expect(Promise.race([promises[1].promise, timeout])).resolves.toBe( - `timeout` - ) + it(`should support pausing the stream and resuming it`, async ({ + issuesTableUrl, + insertIssues, + aborter, + }) => { + const { pause, resume } = mockVisibilityApi() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + const shape = new Shape(shapeStream) + + function makePromise() { + let resolve: (value: T) => void = () => {} - // Resume the stream - resume() - - // Now the update should arrive - const value2 = await promises[1].promise - expect(value2).toEqual([ - ...expectedValue, - { - id: id2, - title: `other title`, - priority: 10, - }, - ]) - }) - - it(`should not throw error if an error handler is provided`, async ({ - issuesTableUrl, - aborter, - }) => { - const mockErrorHandler = vi.fn() - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (_input, _init) => { - return new Response(undefined, { - status: 401, + const promise = new Promise((res) => { + resolve = res }) - }, - onError: mockErrorHandler, - }) - await waitForFetch(shapeStream) - expect(mockErrorHandler.mock.calls.length).toBe(1) - expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) - }) - - it(`should retry on error if error handler returns modified params`, async ({ - issuesTableUrl, - aborter, - }) => { - // This test creates a shapestream but provides wrong query params - // the fetch client therefore returns a 401 status code - // the custom error handler handles it by correcting the query param - // after which the fetch succeeds - - const mockErrorHandler = vi.fn().mockImplementation((error) => { - if (error instanceof FetchError && error.status === 401) { return { - params: { - todo: `pass`, - }, + promise, + resolve, } } + + const promises = [makePromise(), makePromise()] + let i = 0 + + shape.subscribe(({ rows }) => { + const prom = promises[i] + if (prom) { + prom.resolve(rows) + } + i++ + }) + + // Insert an issue + const [id] = await insertIssues({ title: `test title` }) + + const expectedValue = [ + { + id: id, + title: `test title`, + priority: 10, + }, + ] + + // Wait for the update to arrive + const value = await promises[0].promise + + expect(value).toEqual(expectedValue) + + pause() + await vi.waitFor(() => expect(shapeStream.isConnected()).false) + + // Now that the stream is paused, insert another issue + const [id2] = await insertIssues({ title: `other title` }) + + // The update should not arrive while paused + const timeout = new Promise((resolve) => + setTimeout(() => resolve(`timeout`), 100) + ) + await expect(Promise.race([promises[1].promise, timeout])).resolves.toBe( + `timeout` + ) + + // Resume the stream + resume() + + // Now the update should arrive + const value2 = await promises[1].promise + expect(value2).toEqual([ + ...expectedValue, + { + id: id2, + title: `other title`, + priority: 10, + }, + ]) }) - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - todo: `fail`, - }, - signal: aborter.signal, - fetchClient: async (input, _init) => { - const url = new URL(input as string | URL) - if (url.searchParams.get(`todo`) === `fail`) { + it(`should not throw error if an error handler is provided`, async ({ + issuesTableUrl, + aborter, + }) => { + const mockErrorHandler = vi.fn() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (_input, _init) => { return new Response(undefined, { status: 401, }) - } + }, + experimentalLiveSse, + onError: mockErrorHandler, + }) - return new Response( - JSON.stringify([{ headers: { control: `up-to-date` } }]), - { status: 200 } - ) - }, - onError: mockErrorHandler, + await waitForFetch(shapeStream) + expect(mockErrorHandler.mock.calls.length).toBe(1) + expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) }) - await waitForFetch(shapeStream) - expect(mockErrorHandler.mock.calls.length).toBe(1) - expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) - }) - - it(`should retry on error if error handler returns modified headers`, async ({ - issuesTableUrl, - aborter, - }) => { - // This test creates a shapestream but provides invalid auth credentials - // the fetch client therefore returns a 401 status code - // the custom error handler handles it by replacing the credentials with valid credentials - // after which the fetch succeeds - - const mockErrorHandler = vi.fn().mockImplementation((error) => { - if (error instanceof FetchError && error.status === 401) { - return { - headers: { - Authorization: `valid credentials`, - }, + it(`should retry on error if error handler returns modified params`, async ({ + issuesTableUrl, + aborter, + }) => { + // This test creates a shapestream but provides wrong query params + // the fetch client therefore returns a 401 status code + // the custom error handler handles it by correcting the query param + // after which the fetch succeeds + + const mockErrorHandler = vi.fn().mockImplementation((error) => { + if (error instanceof FetchError && error.status === 401) { + return { + params: { + todo: `pass`, + }, + } } - } - }) + }) - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - headers: { - Authorization: `invalid credentials`, - }, - signal: aborter.signal, - fetchClient: async (input, init) => { - const headers = init?.headers as Record - if (headers && headers.Authorization === `valid credentials`) { - return fetch(input, init) - } + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + todo: `fail`, + }, + signal: aborter.signal, + fetchClient: async (input, _init) => { + const url = new URL(input instanceof Request ? input.url : input) + if (url.searchParams.get(`todo`) === `fail`) { + return new Response(undefined, { + status: 401, + }) + } - return new Response(undefined, { - status: 401, - }) - }, - onError: mockErrorHandler, - }) + return new Response( + JSON.stringify([{ headers: { control: `up-to-date` } }]), + { status: 200 } + ) + }, + experimentalLiveSse, + onError: mockErrorHandler, + }) - await waitForFetch(shapeStream) - expect(mockErrorHandler.mock.calls.length).toBe(1) - expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) - }) - - it(`should support async error handler`, async ({ - issuesTableUrl, - aborter, - }) => { - let authChanged: () => void - const authChangePromise = new Promise((res) => { - authChanged = res + await waitForFetch(shapeStream) + expect(mockErrorHandler.mock.calls.length).toBe(1) + expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) }) - const mockErrorHandler = vi.fn().mockImplementation(async (error) => { - if (error instanceof FetchError && error.status === 401) { - authChanged() - return { - headers: { - Authorization: `valid credentials`, - }, + + it(`should retry on error if error handler returns modified headers`, async ({ + issuesTableUrl, + aborter, + }) => { + // This test creates a shapestream but provides invalid auth credentials + // the fetch client therefore returns a 401 status code + // the custom error handler handles it by replacing the credentials with valid credentials + // after which the fetch succeeds + + const mockErrorHandler = vi.fn().mockImplementation((error) => { + if (error instanceof FetchError && error.status === 401) { + return { + headers: { + Authorization: `valid credentials`, + }, + } } - } + }) + + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + headers: { + Authorization: `invalid credentials`, + }, + signal: aborter.signal, + fetchClient: async (input, init) => { + const headers = init?.headers as Record + if (headers && headers.Authorization === `valid credentials`) { + return fetch(input, init) + } + + return new Response(undefined, { + status: 401, + }) + }, + experimentalLiveSse, + onError: mockErrorHandler, + }) + + await waitForFetch(shapeStream) + expect(mockErrorHandler.mock.calls.length).toBe(1) + expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) }) - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - headers: { - Authorization: `invalid credentials`, - }, - signal: aborter.signal, - fetchClient: async (input, init) => { - const headers = init?.headers as Record - if (headers && headers.Authorization === `valid credentials`) { - return fetch(input, init) + it(`should support async error handler`, async ({ + issuesTableUrl, + aborter, + }) => { + let authChanged: () => void + const authChangePromise = new Promise((res) => { + authChanged = res + }) + const mockErrorHandler = vi.fn().mockImplementation(async (error) => { + if (error instanceof FetchError && error.status === 401) { + authChanged() + return { + headers: { + Authorization: `valid credentials`, + }, + } } + }) - return new Response(undefined, { - status: 401, - }) - }, - onError: mockErrorHandler, - }) + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + headers: { + Authorization: `invalid credentials`, + }, + signal: aborter.signal, + fetchClient: async (input, init) => { + const headers = init?.headers as Record + if (headers && headers.Authorization === `valid credentials`) { + return fetch(input, init) + } - await waitForFetch(shapeStream) - expect(mockErrorHandler.mock.calls.length).toBe(1) - expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) - expect(shapeStream.isConnected()).toBe(false) - - await authChangePromise - // give some time for the error handler to modify the authorization header - await vi.waitFor(() => expect(shapeStream.isConnected()).true) - }) - - it(`should stop fetching and report an error if response is missing required headers`, async ({ - issuesTableUrl, - aborter, - }) => { - let url: string = `` - let error1: Error, error2: Error - - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (input, _init) => { - url = input.toString() - const headers = new Headers() - headers.set(`electric-offset`, `0_0`) - return new Response(``, { status: 200, headers }) - }, - onError: (err) => { - error1 = err - }, - }) + return new Response(undefined, { + status: 401, + }) + }, + experimentalLiveSse, + onError: mockErrorHandler, + }) - const unsub = shapeStream.subscribe(() => unsub()) - expect(shapeStream.isConnected()).false + await waitForFetch(shapeStream) + expect(mockErrorHandler.mock.calls.length).toBe(1) + expect(mockErrorHandler.mock.calls[0][0]).toBeInstanceOf(FetchError) + expect(shapeStream.isConnected()).toBe(false) - await vi.waitFor(() => { - const expectedErrorMessage = new MissingHeadersError(url, [ - `electric-handle`, - `electric-schema`, - ]).message - expect(error1!.message).equals(expectedErrorMessage) - expect((shapeStream.error as Error).message).equals(expectedErrorMessage) + await authChangePromise + // give some time for the error handler to modify the authorization header + await vi.waitFor(() => expect(shapeStream.isConnected()).true) }) - expect(shapeStream.isConnected()).false - - // Also check that electric-cursor is a required header for responses to live queries - const shapeStreamLive = new ShapeStream({ - url: `${BASE_URL}/v1/shape?live=true`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (input, _init) => { - url = input.toString() - const headers = new Headers() - headers.set(`electric-offset`, `0_0`) - return new Response(undefined, { status: 200, headers }) - }, - onError: (err) => { - error2 = err - }, - }) + it(`should stop fetching and report an error if response is missing required headers`, async ({ + issuesTableUrl, + aborter, + }) => { + let url: string = `` + let error1: Error, error2: Error - const unsubLive = shapeStreamLive.subscribe(() => unsubLive()) - expect(shapeStreamLive.isConnected()).false - - await vi.waitFor(() => { - const expectedErrorMessageLive = new MissingHeadersError(url, [ - `electric-handle`, - `electric-cursor`, - ]).message - expect(error2!.message).equals(expectedErrorMessageLive) - expect((shapeStreamLive.error as Error).message).equals( - expectedErrorMessageLive - ) - }) - expect(shapeStreamLive.isConnected()).false - }) - - it(`should set isConnected to false after fetch if not subscribed`, async ({ - issuesTableUrl, - aborter, - }) => { - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - subscribe: false, - signal: aborter.signal, + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (input, _init) => { + url = input.toString() + const headers = new Headers() + headers.set(`electric-offset`, `0_0`) + return new Response(``, { status: 200, headers }) + }, + onError: (err) => { + error1 = err + }, + experimentalLiveSse, + }) + + const unsub = shapeStream.subscribe(() => unsub()) + expect(shapeStream.isConnected()).false + + await vi.waitFor(() => { + const expectedErrorMessage = new MissingHeadersError(url, [ + `electric-handle`, + `electric-schema`, + ]).message + expect(error1!.message).equals(expectedErrorMessage) + expect((shapeStream.error as Error).message).equals( + expectedErrorMessage + ) + }) + + expect(shapeStream.isConnected()).false + + // Also check that electric-cursor is a required header for responses to live queries + const shapeStreamLive = new ShapeStream({ + url: `${BASE_URL}/v1/shape?live=true`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (input, _init) => { + url = input.toString() + const headers = new Headers() + headers.set(`electric-offset`, `0_0`) + return new Response(undefined, { status: 200, headers }) + }, + onError: (err) => { + error2 = err + }, + experimentalLiveSse, + }) + + const unsubLive = shapeStreamLive.subscribe(() => unsubLive()) + expect(shapeStreamLive.isConnected()).false + + await vi.waitFor(() => { + const expectedErrorMessageLive = new MissingHeadersError(url, [ + `electric-handle`, + `electric-cursor`, + ]).message + expect(error2!.message).equals(expectedErrorMessageLive) + expect((shapeStreamLive.error as Error).message).equals( + expectedErrorMessageLive + ) + }) + expect(shapeStreamLive.isConnected()).false }) - await waitForFetch(shapeStream) - - // We should no longer be connected because - // the initial fetch finished and we've not subscribed to changes - await vi.waitFor(() => expect(shapeStream.isConnected()).false) - }) - - it(`should expose isLoading status`, async ({ issuesTableUrl, aborter }) => { - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (input, init) => { - await sleep(20) - return fetch(input, init) - }, + it(`should set isConnected to false after fetch if not subscribed`, async ({ + issuesTableUrl, + aborter, + }) => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + subscribe: false, + experimentalLiveSse, + signal: aborter.signal, + }) + + await waitForFetch(shapeStream) + + // We should no longer be connected because + // the initial fetch finished and we've not subscribed to changes + await vi.waitFor(() => expect(shapeStream.isConnected()).false) }) - expect(shapeStream.isLoading()).true + it(`should expose isLoading status`, async ({ + issuesTableUrl, + aborter, + }) => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (input, init) => { + await sleep(20) + return fetch(input, init) + }, + experimentalLiveSse, + }) - await waitForFetch(shapeStream) + expect(shapeStream.isLoading()).true - expect(shapeStream.isLoading()).false - }) + await waitForFetch(shapeStream) - it(`should expose lastOffset`, async ({ issuesTableUrl, aborter }) => { - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (input, init) => { - await sleep(20) - return fetch(input, init) - }, - }) - const shape = new Shape(shapeStream) - - expect(shapeStream.lastOffset).toBe(`-1`) - expect(shape.lastOffset).toBe(shapeStream.lastOffset) - await waitForFetch(shapeStream) - - shape.unsubscribeAll() - }) - - it(`should honour replica: full`, async ({ - issuesTableUrl, - insertIssues, - updateIssue, - clearIssuesShape, - aborter, - }) => { - const [id] = await insertIssues({ title: `first title` }) - - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - replica: `full`, - }, - signal: aborter.signal, + expect(shapeStream.isLoading()).false }) - let unsub: () => void = () => {} - try { - const lastMsgs: Message[] = [] - unsub = shapeStream.subscribe((msgs) => { - lastMsgs.push(...msgs) + it(`should expose lastOffset`, async ({ issuesTableUrl, aborter }) => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (input, init) => { + await sleep(20) + return fetch(input, init) + }, + experimentalLiveSse, }) + const shape = new Shape(shapeStream) - await vi.waitFor(() => { - const msg = lastMsgs.shift() - expect(msg?.headers.control).toEqual(`up-to-date`) + expect(shapeStream.lastOffset).toBe(`-1`) + expect(shape.lastOffset).toBe(shapeStream.lastOffset) + await waitForFetch(shapeStream) + + shape.unsubscribeAll() + }) + + it(`should honour replica: full`, async ({ + issuesTableUrl, + insertIssues, + updateIssue, + clearIssuesShape, + aborter, + }) => { + const [id] = await insertIssues({ title: `first title` }) + + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + replica: `full`, + }, + experimentalLiveSse, + signal: aborter.signal, }) - const expectedValue = { - id: id, - title: `updated title`, - // because we're sending the full row, the update will include the - // unchanged `priority` column - priority: 10, - } - await updateIssue({ id: id, title: `updated title` }) + let unsub: () => void = () => {} + try { + const lastMsgs: Message[] = [] + unsub = shapeStream.subscribe((msgs) => { + lastMsgs.push(...msgs) + }) - await vi.waitFor( - () => { + await vi.waitFor(() => { const msg = lastMsgs.shift() - if (!msg) throw new Error(`Update message not yet received`) - const changeMsg: ChangeMessage = msg as ChangeMessage - expect(changeMsg.headers.operation).toEqual(`update`) - expect(changeMsg.value).toEqual(expectedValue) - }, - { timeout: 2000 } - ) - } finally { - unsub() - // the normal cleanup doesn't work because our shape definition is - // changed by the updates: 'full' param - await clearIssuesShape(shapeStream.shapeHandle) - } - }) - - it(`should support function-based params and headers`, async ({ - issuesTableUrl, - aborter, - }) => { - const mockParamFn = vi.fn().mockReturnValue(`test-value`) - const mockAsyncParamFn = vi.fn().mockResolvedValue(`test-value`) - const mockHeaderFn = vi.fn().mockReturnValue(`test-value`) - const mockAsyncHeaderFn = vi.fn().mockResolvedValue(`test-value`) - - // Test with synchronous functions - const shapeStream1 = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - customParam: mockParamFn, - }, - headers: { - 'X-Custom-Header': mockHeaderFn, - }, - signal: aborter.signal, - }) - const shape1 = new Shape(shapeStream1) - await shape1.value - - expect(mockParamFn).toHaveBeenCalled() - expect(mockHeaderFn).toHaveBeenCalled() - - // Test with async functions - const shapeStream2 = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - customParam: mockAsyncParamFn, - }, - headers: { - 'X-Custom-Header': mockAsyncHeaderFn, - }, - signal: aborter.signal, - }) - const shape2 = new Shape(shapeStream2) - await shape2.value - - expect(mockAsyncParamFn).toHaveBeenCalled() - expect(mockAsyncHeaderFn).toHaveBeenCalled() - - // Verify the resolved values - expect(await resolveValue(mockParamFn())).toBe(`test-value`) - expect(await resolveValue(mockAsyncParamFn())).toBe(`test-value`) - }) - - it(`should support forceDisconnectAndRefresh() to force a sync`, async ({ - issuesTableUrl, - insertIssues, - updateIssue, - waitForIssues, - aborter, - }) => { - // Create initial data - const [id] = await insertIssues({ title: `initial title` }) - await waitForIssues({ numChangesExpected: 1 }) - - // Track fetch requests - const pendingRequests: Array< - [string | URL | Request, () => Promise] - > = [] - - const resolveRequests = async () => { - await Promise.all(pendingRequests.map(([_, doFetch]) => doFetch())) - pendingRequests.length = 0 // Clear the array - } - - const fetchClient = async ( - input: string | URL | Request, - init?: RequestInit - ) => { - const signal = init?.signal - return new Promise((resolve, reject) => { - signal?.addEventListener( - `abort`, + expect(msg?.headers.control).toEqual(`up-to-date`) + }) + + const expectedValue = { + id: id, + title: `updated title`, + // because we're sending the full row, the update will include the + // unchanged `priority` column + priority: 10, + } + await updateIssue({ id: id, title: `updated title` }) + + await vi.waitFor( () => { - reject(new Error(`AbortError`)) + const msg = lastMsgs.shift() + if (!msg) throw new Error(`Update message not yet received`) + const changeMsg: ChangeMessage = msg as ChangeMessage + expect(changeMsg.headers.operation).toEqual(`update`) + expect(changeMsg.value).toEqual(expectedValue) }, - { once: true } + { timeout: 2000 } ) - console.log(input) - pendingRequests.push([ - input, - async () => { - try { - const response = await fetch(input, init) - resolve(response) - } catch (e) { - reject(e) - } - }, - ]) + } finally { + unsub() + // the normal cleanup doesn't work because our shape definition is + // changed by the updates: 'full' param + await clearIssuesShape(shapeStream.shapeHandle) + } + }) + + it(`should support function-based params and headers`, async ({ + issuesTableUrl, + aborter, + }) => { + const mockParamFn = vi.fn().mockReturnValue(`test-value`) + const mockAsyncParamFn = vi.fn().mockResolvedValue(`test-value`) + const mockHeaderFn = vi.fn().mockReturnValue(`test-value`) + const mockAsyncHeaderFn = vi.fn().mockResolvedValue(`test-value`) + + // Test with synchronous functions + const shapeStream1 = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + customParam: mockParamFn, + }, + headers: { + 'X-Custom-Header': mockHeaderFn, + }, + experimentalLiveSse, + signal: aborter.signal, + }) + const shape1 = new Shape(shapeStream1) + await shape1.value + + expect(mockParamFn).toHaveBeenCalled() + expect(mockHeaderFn).toHaveBeenCalled() + + // Test with async functions + const shapeStream2 = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + customParam: mockAsyncParamFn, + }, + headers: { + 'X-Custom-Header': mockAsyncHeaderFn, + }, + experimentalLiveSse, + signal: aborter.signal, }) - } - - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient, + const shape2 = new Shape(shapeStream2) + await shape2.value + + expect(mockAsyncParamFn).toHaveBeenCalled() + expect(mockAsyncHeaderFn).toHaveBeenCalled() + + // Verify the resolved values + expect(await resolveValue(mockParamFn())).toBe(`test-value`) + expect(await resolveValue(mockAsyncParamFn())).toBe(`test-value`) }) - // Subscribe to start the stream - const shape = new Shape(shapeStream) + it(`should support forceDisconnectAndRefresh() to force a sync`, async ({ + issuesTableUrl, + insertIssues, + updateIssue, + waitForIssues, + aborter, + }) => { + // Create initial data + const [id] = await insertIssues({ title: `initial title` }) + await waitForIssues({ numChangesExpected: 1 }) + + // Track fetch requests + let pendingRequests: Array< + [string | URL | Request, () => Promise] + > = [] + + const resolveRequests = async () => { + for (const [_, doFetch] of pendingRequests) { + await doFetch() + } + pendingRequests = [] // clear the array + } - // Wait for initial fetch to start: offset: -1 - await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) - expect(pendingRequests[0][0].toString()).toContain(`offset=-1`) + const fetchClient = async ( + input: string | URL | Request, + init?: RequestInit + ) => { + const signal = init?.signal + return new Promise((resolve, reject) => { + signal?.addEventListener( + `abort`, + () => { + reject(new Error(`AbortError`)) + }, + { once: true } + ) + pendingRequests.push([ + input, + async () => { + try { + const response = await fetch(input, init) + resolve(response) + } catch (e) { + reject(e) + } + }, + ]) + }) + } - // Complete initial fetch - await resolveRequests() + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient, + experimentalLiveSse, + }) - // Wait for second fetch to start: offset: 0_0 - await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) - expect(pendingRequests[0][0].toString()).toContain(`offset=0_0`) + // Subscribe to start the stream + const shape = new Shape(shapeStream) - // Complete second fetch - await resolveRequests() + // Wait for initial fetch to start: offset: -1 + await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) + expect(pendingRequests[0][0].toString()).toContain(`offset=-1`) - // We should be in live mode - await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) - expect(pendingRequests[0][0].toString()).toContain(`live=true`) + // Complete initial fetch + await resolveRequests() - // Update data while stream is long polling and ensure it has been processed - await updateIssue({ id, title: `updated title` }) - await waitForIssues({ - numChangesExpected: 1, - shapeStreamOptions: { - offset: shapeStream.lastOffset, - handle: shapeStream.shapeHandle, - }, - }) + // Wait for second fetch to start: offset: 0_0 + await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) + expect(pendingRequests[0][0].toString()).toContain(`offset=0_0`) - // Start refresh - const refreshPromise = shapeStream.forceDisconnectAndRefresh() - - // Verify the long polling request was aborted and a new request started - await vi.waitFor(() => expect(pendingRequests.length).toBe(2)) - expect(pendingRequests.length).toBe(2) // Aborted long poll + refresh request - expect(pendingRequests[0][0].toString()).toContain(`live=true`) // The aborted long poll - expect(pendingRequests[1][0].toString()).not.toContain(`live=true`) // The refresh request - - // Complete refresh request - // This will abort the long poll and start a new one - await resolveRequests() - - // Wait for the refresh to complete, this resolves once the next request - // after calling forceDisconnectAndRefresh() has completed - await refreshPromise - - // Verify we got the updated data - expect(shape.currentRows).toEqual([ - { - id, - title: `updated title`, - priority: 10, - }, - ]) - - // Verify we return to normal processing (long polling) - await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) // New long poll - expect(pendingRequests[0][0].toString()).toContain(`live=true`) - }) -}) - -describe(`Shape - backwards compatible`, () => { - it(`should set isConnected to false on fetch error and back on true when fetch succeeds again`, async ({ - issuesTableUrl, - aborter, - }) => { - const shapeStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: async (_input, _init) => { - await sleep(20) - return new Response(null, { - status: 204, - headers: new Headers({ - [`electric-offset`]: `0_0`, - [`electric-handle`]: `foo`, - [`electric-schema`]: ``, - [`electric-cursor`]: `123`, - }), - }) - }, + // Complete second fetch + await resolveRequests() + + // We should be in live mode + await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) + expect(pendingRequests[0][0].toString()).toContain(`live=true`) + + // Update data while stream is long polling and ensure it has been processed + await updateIssue({ id, title: `updated title` }) + await waitForIssues({ + numChangesExpected: 1, + shapeStreamOptions: { + offset: shapeStream.lastOffset, + handle: shapeStream.shapeHandle, + }, + }) + + // Start refresh + const refreshPromise = shapeStream.forceDisconnectAndRefresh() + + // Verify the long polling request was aborted and a new request started + await vi.waitFor(() => expect(pendingRequests.length).toBe(2)) + expect(pendingRequests.length).toBe(2) // Aborted long poll + refresh request + expect(pendingRequests[0][0].toString()).toContain(`live=true`) // The aborted long poll + expect(pendingRequests[1][0].toString()).not.toContain(`live=true`) // The refresh request + + // Complete refresh request + // This will abort the long poll and start a new one + await resolveRequests() + + // Wait for the refresh to complete, this resolves once the next request + // after calling forceDisconnectAndRefresh() has completed + await refreshPromise + + // Verify we got the updated data + expect(shape.currentRows).toEqual([ + { + id, + title: `updated title`, + priority: 10, + }, + ]) + + // Verify we return to normal processing (long polling) + await vi.waitFor(() => expect(pendingRequests.length).toBe(1)) // New long poll + expect(pendingRequests[0][0].toString()).toContain(`live=true`) }) + } +) + +describe.for(fetchAndSse)( + `Shape - backwards compatible (liveSSE=$experimentalLiveSse)`, + ({ experimentalLiveSse }) => { + it(`should set isConnected to false on fetch error and back on true when fetch succeeds again`, async ({ + issuesTableUrl, + aborter, + }) => { + const shapeStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: async (_input, _init) => { + await sleep(20) + return new Response(null, { + status: 204, + headers: new Headers({ + [`electric-offset`]: `0_0`, + [`electric-handle`]: `foo`, + [`electric-schema`]: ``, + [`electric-cursor`]: `123`, + }), + }) + }, + experimentalLiveSse, + }) - const unsubscribe = shapeStream.subscribe(() => unsubscribe()) + const unsubscribe = shapeStream.subscribe(() => unsubscribe()) - await vi.waitFor(() => expect(shapeStream.isConnected()).true) - expect(shapeStream.lastSyncedAt()).closeTo(Date.now(), 200) + await vi.waitFor(() => expect(shapeStream.isConnected()).true) + expect(shapeStream.lastSyncedAt()).closeTo(Date.now(), 200) - await sleep(400) + await sleep(400) - expect(shapeStream.lastSyncedAt()).closeTo(Date.now(), 200) - }) -}) + expect(shapeStream.lastSyncedAt()).closeTo(Date.now(), 200) + }) + } +) function waitForFetch(stream: ShapeStream): Promise { let unsub = () => {} diff --git a/packages/typescript-client/test/integration.test.ts b/packages/typescript-client/test/integration.test.ts index ea7560fa61..34611e7fc0 100644 --- a/packages/typescript-client/test/integration.test.ts +++ b/packages/typescript-client/test/integration.test.ts @@ -18,6 +18,11 @@ import * as h from './support/test-helpers' const BASE_URL = inject(`baseUrl`) +const fetchAndSse = [ + { experimentalLiveSse: false }, + { experimentalLiveSse: true }, +] + it(`sanity check`, async ({ dbClient, issuesTableSql }) => { const result = await dbClient.query(`SELECT * FROM ${issuesTableSql}`) @@ -25,102 +30,123 @@ it(`sanity check`, async ({ dbClient, issuesTableSql }) => { }) describe(`HTTP Sync`, () => { - it(`should work with empty shape/table`, async ({ - issuesTableUrl, - aborter, - }) => { - // Get initial data - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - subscribe: false, - signal: aborter.signal, - }) - - await new Promise((resolve, reject) => { - issueStream.subscribe((messages) => { - messages.forEach((message) => { - if (isChangeMessage(message)) { - shapeData.set(message.key, message.value) - } - if (isUpToDateMessage(message)) { - aborter.abort() - return resolve() - } - }) - }, reject) - }) - const values = [...shapeData.values()] + it.for(fetchAndSse)( + `should work with empty shape/table (liveSSE=$experimentalLiveSse)`, + async ({ experimentalLiveSse }, { issuesTableUrl, aborter }) => { + // Get initial data + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + subscribe: false, + signal: aborter.signal, + experimentalLiveSse, + }) - expect(values).toHaveLength(0) - }) + await new Promise((resolve, reject) => { + issueStream.subscribe((messages) => { + messages.forEach((message) => { + if (isChangeMessage(message)) { + shapeData.set(message.key, message.value) + } + if (isUpToDateMessage(message)) { + aborter.abort() + return resolve() + } + }) + }, reject) + }) + const values = [...shapeData.values()] - it(`should wait properly for updates on an empty shape/table`, async ({ - issuesTableUrl, - aborter, - }) => { - const urlsRequested: URL[] = [] - const fetchWrapper = (...args: Parameters) => { - const url = new URL(args[0] as string | URL) - urlsRequested.push(url) - return fetch(...args) + expect(values).toHaveLength(0) } + ) - // Get initial data - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: fetchWrapper, - }) + it.for(fetchAndSse)( + `should wait properly for updates on an empty shape/table (liveSSE=$experimentalLiveSse)`, + async ({ experimentalLiveSse }, { issuesTableUrl, aborter }) => { + const urlsRequested: URL[] = [] + const fetchWrapper = async (...args: Parameters) => { + //console.log('fetch sse', experimentalLiveSse) + const url = new URL(args[0] instanceof Request ? args[0].url : args[0]) + //console.log("url", url) + urlsRequested.push(url) + const res = await fetch(...args) + //console.log("res", res) + return res + } - let upToDateMessageCount = 0 + // Get initial data + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: fetchWrapper, + experimentalLiveSse, + }) - await new Promise((resolve, reject) => { - issueStream.subscribe((messages) => { - messages.forEach((message) => { - if (isChangeMessage(message)) { - shapeData.set(message.key, message.value) - } - if (isUpToDateMessage(message)) { - upToDateMessageCount += 1 - } - }) - }, reject) - - // count updates received over 1 second - proper long polling - // should wait for far longer than this time period - setTimeout(() => { - aborter.abort() - resolve() - }, 1000) - }) + let upToDateMessageCount = 0 + + // TODO: this test fails in SSE mode because we don't use the provided fetchWrapper + // SSE uses the built-in fetch. + // Should fix that + + await new Promise((resolve, reject) => { + issueStream.subscribe((messages) => { + //console.log("sse", experimentalLiveSse) + //console.log("messages", messages) + messages.forEach((message) => { + if (isChangeMessage(message)) { + shapeData.set(message.key, message.value) + } + if (isUpToDateMessage(message)) { + upToDateMessageCount += 1 + } + }) + }, reject) + + // count updates received over 1 second - proper long polling + // should wait for far longer than this time period + setTimeout(() => { + aborter.abort() + resolve() + }, 1000) + }) - // first request was -1, last requests should be live ones - const numRequests = urlsRequested.length - expect(numRequests).toBeGreaterThan(2) - expect(urlsRequested[0].searchParams.get(`offset`)).toBe(`-1`) - expect(urlsRequested[0].searchParams.has(`live`)).false - expect(urlsRequested[numRequests - 1].searchParams.get(`offset`)).not.toBe( - `-1` - ) - expect(urlsRequested[numRequests - 1].searchParams.has(`live`)).true - expect(urlsRequested[numRequests - 1].searchParams.has(`cursor`)).true + // first request was -1, last requests should be live ones + const numRequests = urlsRequested.length + //console.log("urlsRequested", urlsRequested) - // first request comes back immediately and is up to date, second one - // should hang while waiting for updates - expect(upToDateMessageCount).toBe(1) + if (experimentalLiveSse) { + // We expect 3 requests: 2 requests for the initial fetch and the live request (which is 1 request streaming all updates) + expect(numRequests).toBe(3) + } else { + // We expect more than 2 requests: the initial fetch + 1 request per live update + expect(numRequests).toBeGreaterThan(2) + } - // data should be 0 - const values = [...shapeData.values()] - expect(values).toHaveLength(0) - }) + expect(urlsRequested[0].searchParams.get(`offset`)).toBe(`-1`) + expect(urlsRequested[0].searchParams.has(`live`)).false + expect( + urlsRequested[numRequests - 1].searchParams.get(`offset`) + ).not.toBe(`-1`) + expect(urlsRequested[numRequests - 1].searchParams.has(`live`)).true + expect(urlsRequested[numRequests - 1].searchParams.has(`cursor`)).true + + // first request comes back immediately and is up to date, second one + // should hang while waiting for updates + expect(upToDateMessageCount).toBe(1) + + // data should be 0 + const values = [...shapeData.values()] + expect(values).toHaveLength(0) + } + ) it(`returns a header with the server shape handle`, async ({ issuesTableUrl, @@ -144,46 +170,52 @@ describe(`HTTP Sync`, () => { expect(lastOffset).to.exist }) - it(`should get initial data`, async ({ - insertIssues, - issuesTableUrl, - aborter, - }) => { - // Add an initial row. - const uuid = uuidv4() - await insertIssues({ id: uuid, title: `foo + ${uuid}` }) - - // Get initial data - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - }) + it.for(fetchAndSse)( + `should get initial data (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { insertIssues, issuesTableUrl, aborter } + ) => { + // Add an initial row. + const uuid = uuidv4() + await insertIssues({ id: uuid, title: `foo + ${uuid}` }) - await new Promise((resolve) => { - issueStream.subscribe((messages) => { - messages.forEach((message) => { - if (isChangeMessage(message)) { - shapeData.set(message.key, message.value) - } - if (isUpToDateMessage(message)) { - aborter.abort() - return resolve() - } + // Get initial data + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + + await new Promise((resolve) => { + issueStream.subscribe((messages) => { + messages.forEach((message) => { + if (isChangeMessage(message)) { + shapeData.set(message.key, message.value) + } + if (isUpToDateMessage(message)) { + aborter.abort() + return resolve() + } + }) }) }) - }) - const values = [...shapeData.values()] + const values = [...shapeData.values()] - expect(values).toMatchObject([{ title: `foo + ${uuid}` }]) - }) + expect(values).toMatchObject([{ title: `foo + ${uuid}` }]) + } + ) - mit( - `should parse incoming data`, - async ({ dbClient, aborter, tableSql, tableUrl }) => { + mit.for(fetchAndSse)( + `should parse incoming data (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { dbClient, aborter, tableSql, tableUrl } + ) => { // Create a table with data we want to be parsed await dbClient.query( ` @@ -239,6 +271,7 @@ describe(`HTTP Sync`, () => { table: tableUrl, }, signal: aborter.signal, + experimentalLiveSse, }) const client = new Shape(issueStream) const rows = await client.rows @@ -354,207 +387,229 @@ describe(`HTTP Sync`, () => { } ) - it(`should get initial data and then receive updates`, async ({ - aborter, - issuesTableUrl, - issuesTableKey, - updateIssue, - insertIssues, - waitForIssues, - }) => { - // With initial data - const rowId = uuidv4() - await insertIssues({ id: rowId, title: `original insert` }) - await waitForIssues({ numChangesExpected: 1 }) - - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - }) - let secondRowId = `` - await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { - if (!isChangeMessage(msg)) return - shapeData.set(msg.key, msg.value) - - if (nth === 0) { - updateIssue({ id: rowId, title: `foo1` }) - } else if (nth === 1) { - ;[secondRowId] = await insertIssues({ title: `foo2` }) - } else if (nth === 2) { - res() + it.for(fetchAndSse)( + `should get initial data and then receive updates (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { + aborter, + issuesTableUrl, + issuesTableKey, + updateIssue, + insertIssues, + waitForIssues, } - }) + ) => { + // With initial data + const rowId = uuidv4() + await insertIssues({ id: rowId, title: `original insert` }) + await waitForIssues({ numChangesExpected: 1 }) - // Only initial insert has the full row, the update contains only PK & changed columns. - // This test doesn't merge in updates, so we don't have `priority` on the row. - expect(shapeData).toEqual( - new Map([ - [`${issuesTableKey}/"${rowId}"`, { id: rowId, title: `foo1` }], - [ - `${issuesTableKey}/"${secondRowId}"`, - { id: secondRowId, title: `foo2`, priority: 10 }, - ], - ]) - ) - }) - - it(`should wait for processing before advancing stream`, async ({ - aborter, - issuesTableUrl, - insertIssues, - waitForIssues, - }) => { - // With initial data - await insertIssues({ id: uuidv4(), title: `original insert` }) + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + let secondRowId = `` + await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { + //console.log("GOT msg:", msg) + //console.log("nth", nth) + //console.log('isChangeMessage', isChangeMessage(msg)) + if (!isChangeMessage(msg)) return + shapeData.set(msg.key, msg.value) - const fetchWrapper = vi - .fn() - .mockImplementation((...args: Parameters) => { - return fetch(...args) + if (nth === 0) { + await updateIssue({ id: rowId, title: `foo1` }) + } else if (nth === 1) { + ;[secondRowId] = await insertIssues({ title: `foo2` }) + } else if (nth === 2) { + res() + } }) - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter.signal, - fetchClient: fetchWrapper, - }) + // Only initial insert has the full row, the update contains only PK & changed columns. + // This test doesn't merge in updates, so we don't have `priority` on the row. + expect(shapeData).toEqual( + new Map([ + [`${issuesTableKey}/"${rowId}"`, { id: rowId, title: `foo1` }], + [ + `${issuesTableKey}/"${secondRowId}"`, + { id: secondRowId, title: `foo2`, priority: 10 }, + ], + ]) + ) + } + ) - let numFetchCalls = 0 + it.for(fetchAndSse)( + `should wait for processing before advancing stream (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { aborter, issuesTableUrl, insertIssues, waitForIssues } + ) => { + // With initial data + await insertIssues({ id: uuidv4(), title: `original insert` }) + + const fetchWrapper = vi + .fn() + .mockImplementation((...args: Parameters) => { + return fetch(...args) + }) - await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { - if (!isChangeMessage(msg)) return - shapeData.set(msg.key, msg.value) + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter.signal, + fetchClient: fetchWrapper, + experimentalLiveSse, + }) - if (nth === 0) { - await sleep(100) - numFetchCalls = fetchWrapper.mock.calls.length + let numFetchCalls = 0 - // ensure fetch has not been called again while - // waiting for processing - await insertIssues({ title: `foo1` }) + await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { + if (!isChangeMessage(msg)) return + shapeData.set(msg.key, msg.value) - // independent stream should be able to see this item, - // but the stream we have is waiting - await waitForIssues({ numChangesExpected: 1 }) - expect(fetchWrapper).toHaveBeenCalledTimes(numFetchCalls) - } else if (nth === 1) { - expect(fetchWrapper.mock.calls.length).greaterThan(numFetchCalls) - res() - } - }) - }) + if (nth === 0) { + await sleep(100) + numFetchCalls = fetchWrapper.mock.calls.length - it(`multiple clients can get the same data in parallel`, async ({ - issuesTableUrl, - updateIssue, - insertIssues, - }) => { - const rowId = uuidv4(), - rowId2 = uuidv4() - await insertIssues( - { id: rowId, title: `first original insert` }, - { id: rowId2, title: `second original insert` } - ) + // ensure fetch has not been called again while + // waiting for processing + await insertIssues({ title: `foo1` }) - const shapeData1 = new Map() - const aborter1 = new AbortController() - const issueStream1 = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter1.signal, - }) + // independent stream should be able to see this item, + // but the stream we have is waiting + await waitForIssues({ numChangesExpected: 1 }) + expect(fetchWrapper).toHaveBeenCalledTimes(numFetchCalls) + } else if (nth === 1) { + expect(fetchWrapper.mock.calls.length).greaterThan(numFetchCalls) + res() + } + }) + } + ) - const shapeData2 = new Map() - const aborter2 = new AbortController() - const issueStream2 = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - signal: aborter2.signal, - }) + it.for(fetchAndSse)( + `multiple clients can get the same data in parallel (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { issuesTableUrl, updateIssue, insertIssues } + ) => { + const rowId = uuidv4(), + rowId2 = uuidv4() + await insertIssues( + { id: rowId, title: `first original insert` }, + { id: rowId2, title: `second original insert` } + ) - const p1 = h.forEachMessage(issueStream1, aborter1, (res, msg, nth) => { - if (!isChangeMessage(msg)) return - shapeData1.set(msg.key, msg.value) + const shapeData1 = new Map() + const aborter1 = new AbortController() + const issueStream1 = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter1.signal, + experimentalLiveSse, + }) - if (nth === 1) { - setTimeout(() => updateIssue({ id: rowId, title: `foo3` }), 50) - } else if (nth === 2) { - return res() - } - }) + const shapeData2 = new Map() + const aborter2 = new AbortController() + const issueStream2 = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + signal: aborter2.signal, + experimentalLiveSse, + }) - const p2 = h.forEachMessage(issueStream2, aborter2, (res, msg, nth) => { - if (!isChangeMessage(msg)) return - shapeData2.set(msg.key, msg.value) + const p1 = h.forEachMessage(issueStream1, aborter1, (res, msg, nth) => { + if (!isChangeMessage(msg)) return + shapeData1.set(msg.key, msg.value) - if (nth === 2) { - return res() - } - }) + if (nth === 1) { + setTimeout(() => updateIssue({ id: rowId, title: `foo3` }), 50) + } else if (nth === 2) { + return res() + } + }) - await Promise.all([p1, p2]) + const p2 = h.forEachMessage(issueStream2, aborter2, (res, msg, nth) => { + if (!isChangeMessage(msg)) return + shapeData2.set(msg.key, msg.value) - expect(shapeData1).toEqual(shapeData2) - }) + if (nth === 2) { + return res() + } + }) - it(`can go offline and then catchup`, async ({ - aborter, - issuesTableUrl, - insertIssues, - waitForIssues, - }) => { - // initialize storage for the cases where persisted shape streams are tested - await insertIssues({ title: `foo1` }, { title: `foo2` }, { title: `foo3` }) + await Promise.all([p1, p2]) - const streamState = await waitForIssues({ numChangesExpected: 3 }) + expect(shapeData1).toEqual(shapeData2) + } + ) - const numIssuesToAdd = 9 - await insertIssues( - ...Array.from({ length: numIssuesToAdd }, (_, i) => ({ - title: `foo${i + 5}`, - })) - ) + it.for(fetchAndSse)( + `can go offline and then catchup (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { aborter, issuesTableUrl, insertIssues, waitForIssues } + ) => { + // initialize storage for the cases where persisted shape streams are tested + await insertIssues( + { title: `foo1` }, + { title: `foo2` }, + { title: `foo3` } + ) - // And wait until it's definitely seen - await waitForIssues({ - shapeStreamOptions: streamState, - numChangesExpected: numIssuesToAdd, - }) + const streamState = await waitForIssues({ numChangesExpected: 3 }) - let catchupOpsCount = 0 - const newIssueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - subscribe: true, - signal: aborter.signal, - offset: streamState.offset, - handle: streamState.handle, - }) + const numIssuesToAdd = 9 + await insertIssues( + ...Array.from({ length: numIssuesToAdd }, (_, i) => ({ + title: `foo${i + 5}`, + })) + ) - await h.forEachMessage(newIssueStream, aborter, (res, msg, nth) => { - if (isUpToDateMessage(msg)) { - res() - } else { - catchupOpsCount = nth + 1 - } - }) + // And wait until it's definitely seen + await waitForIssues({ + shapeStreamOptions: streamState, + numChangesExpected: numIssuesToAdd, + }) - expect(catchupOpsCount).toBe(9) - }) + let catchupOpsCount = 0 + const newIssueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + subscribe: true, + signal: aborter.signal, + offset: streamState.offset, + handle: streamState.handle, + experimentalLiveSse, + }) + + await h.forEachMessage(newIssueStream, aborter, (res, msg, nth) => { + if (isUpToDateMessage(msg)) { + res() + } else { + catchupOpsCount = nth + 1 + } + }) + + expect(catchupOpsCount).toBe(9) + } + ) it(`should return correct caching headers`, async ({ issuesTableUrl, @@ -675,53 +730,63 @@ describe(`HTTP Sync`, () => { expect(catchupStatus).toEqual(304) }) - it(`should correctly use a where clause for initial sync and updates`, async ({ - insertIssues, - updateIssue, - issuesTableUrl, - issuesTableKey, - clearShape, - aborter, - }) => { - // Add an initial rows - const id1 = uuidv4() - const id2 = uuidv4() - - await insertIssues({ id: id1, title: `foo` }, { id: id2, title: `bar` }) - - // Get initial data - const shapeData = new Map() - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - where: `title LIKE 'foo%'`, - }, - signal: aborter.signal, - }) + it.for(fetchAndSse)( + `should correctly use a where clause for initial sync and updates (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { + insertIssues, + updateIssue, + issuesTableUrl, + issuesTableKey, + clearShape, + aborter, + } + ) => { + // Add an initial rows + const id1 = uuidv4() + const id2 = uuidv4() - await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { - if (!isChangeMessage(msg)) return - shapeData.set(msg.key, msg.value) + await insertIssues({ id: id1, title: `foo` }, { id: id2, title: `bar` }) - if (nth === 0) { - updateIssue({ id: id1, title: `foo1` }) - updateIssue({ id: id2, title: `bar1` }) - } else if (nth === 1) { - res() - } - }) + // Get initial data + const shapeData = new Map() + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + where: `title LIKE 'foo%'`, + }, + signal: aborter.signal, + experimentalLiveSse, + }) + + await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { + if (!isChangeMessage(msg)) return + shapeData.set(msg.key, msg.value) - await clearShape(issuesTableUrl, { handle: issueStream.shapeHandle! }) + if (nth === 0) { + updateIssue({ id: id1, title: `foo1` }) + updateIssue({ id: id2, title: `bar1` }) + } else if (nth === 1) { + res() + } + }) - expect(shapeData).toEqual( - new Map([[`${issuesTableKey}/"${id1}"`, { id: id1, title: `foo1` }]]) - ) - }) + await clearShape(issuesTableUrl, { handle: issueStream.shapeHandle! }) - mit( - `should correctly select columns for initial sync and updates`, - async ({ dbClient, aborter, tableSql, tableUrl }) => { + expect(shapeData).toEqual( + new Map([[`${issuesTableKey}/"${id1}"`, { id: id1, title: `foo1` }]]) + ) + } + ) + + mit.for(fetchAndSse)( + `should correctly select columns for initial sync and updates (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { dbClient, aborter, tableSql, tableUrl } + ) => { await dbClient.query( `INSERT INTO ${tableSql} (txt, i2, i4, i8) VALUES ($1, $2, $3, $4)`, [`test1`, 1, 10, 100] @@ -736,6 +801,7 @@ describe(`HTTP Sync`, () => { columns: [`txt`, `i2`, `i4`], }, signal: aborter.signal, + experimentalLiveSse, }) await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { if (!isChangeMessage(msg)) return @@ -830,189 +896,192 @@ describe(`HTTP Sync`, () => { } }) - it(`should handle invalid requests by terminating stream`, async ({ - expect, - issuesTableUrl, - aborter, - waitForIssues, - }) => { - const streamState = await waitForIssues({ numChangesExpected: 0 }) - - let error: Error - const invalidIssueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - where: `1 x 1`, // invalid SQL - }, - signal: aborter.signal, - handle: streamState.handle, - onError: (err) => { - error = err - }, - }) + it.for(fetchAndSse)( + `should handle invalid requests by terminating stream (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { expect, issuesTableUrl, aborter, waitForIssues } + ) => { + const streamState = await waitForIssues({ numChangesExpected: 0 }) - const errorSubscriberPromise = new Promise((_, reject) => - invalidIssueStream.subscribe(() => {}, reject) - ) + let error: Error + const invalidIssueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + where: `1 x 1`, // invalid SQL + }, + signal: aborter.signal, + handle: streamState.handle, + onError: (err) => { + error = err + }, + experimentalLiveSse, + }) - await expect(errorSubscriberPromise).rejects.toThrow(FetchError) - expect(invalidIssueStream.error).instanceOf(FetchError) - expect((invalidIssueStream.error! as FetchError).status).toBe(400) - expect(invalidIssueStream.isConnected()).false - expect((error! as FetchError).json).toStrictEqual({ - message: `Invalid request`, - errors: { - where: [`At location 17: syntax error at or near "x"`], - }, - }) - }) + const errorSubscriberPromise = new Promise((_, reject) => + invalidIssueStream.subscribe(() => {}, reject) + ) - it(`should handle invalid requests by terminating stream`, async ({ - expect, - issuesTableUrl, - aborter, - }) => { - let error: Error - const invalidIssueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - where: `1=1`, - }, - signal: aborter.signal, - // handle: streamState.handle, - onError: (err) => { - error = err - }, - fetchClient: async (...args) => { - const res = await fetch(...args) - await res.text() - return res - }, - }) + await expect(errorSubscriberPromise).rejects.toThrow(FetchError) + expect(invalidIssueStream.error).instanceOf(FetchError) + expect((invalidIssueStream.error! as FetchError).status).toBe(400) + expect(invalidIssueStream.isConnected()).false + expect((error! as FetchError).json).toStrictEqual({ + message: `Invalid request`, + errors: { + where: [`At location 17: syntax error at or near "x"`], + }, + }) + } + ) - const errorSubscriberPromise = new Promise((_, reject) => - invalidIssueStream.subscribe(() => {}, reject) - ) + it.for(fetchAndSse)( + `should handle invalid requests by terminating stream (liveSSE=$experimentalLiveSse)`, + async ({ experimentalLiveSse }, { expect, issuesTableUrl, aborter }) => { + let error: Error + const invalidIssueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + where: `1=1`, + }, + signal: aborter.signal, + // handle: streamState.handle, + onError: (err) => { + error = err + }, + fetchClient: async (...args) => { + const res = await fetch(...args) + await res.text() + return res + }, + experimentalLiveSse, + }) - await expect(errorSubscriberPromise).rejects.toThrow(FetchError) - expect(invalidIssueStream.error).instanceOf(FetchError) - expect(invalidIssueStream.isConnected()).false - expect(error!.message).contains( - `Body is unusable: Body has already been read` - ) - }) + const errorSubscriberPromise = new Promise((_, reject) => + invalidIssueStream.subscribe(() => {}, reject) + ) - it(`should detect shape deprecation and restart syncing`, async ({ - expect, - insertIssues, - issuesTableUrl, - aborter, - clearIssuesShape, - }) => { - // With initial data - const rowId = uuidv4(), - rowId2 = uuidv4() - await insertIssues({ id: rowId, title: `foo1` }) + await expect(errorSubscriberPromise).rejects.toThrow(FetchError) + expect(invalidIssueStream.error).instanceOf(FetchError) + expect(invalidIssueStream.isConnected()).false + expect(error!.message).contains( + `Body is unusable: Body has already been read` + ) + } + ) - const statusCodesReceived: number[] = [] - let numRequests = 0 + it.for(fetchAndSse)( + `should detect shape deprecation and restart syncing (liveSSE=$experimentalLiveSse)`, + async ( + { experimentalLiveSse }, + { expect, insertIssues, issuesTableUrl, aborter, clearIssuesShape } + ) => { + // With initial data + const rowId = uuidv4(), + rowId2 = uuidv4() + await insertIssues({ id: rowId, title: `foo1` }) + + const statusCodesReceived: number[] = [] + let numRequests = 0 + + const fetchWrapper = async (...args: Parameters) => { + // before any subsequent requests after the initial one, ensure + // that the existing shape is deleted and some more data is inserted + if (numRequests === 2) { + await insertIssues({ id: rowId2, title: `foo2` }) + await clearIssuesShape(issueStream.shapeHandle) + } - const fetchWrapper = async (...args: Parameters) => { - // before any subsequent requests after the initial one, ensure - // that the existing shape is deleted and some more data is inserted - if (numRequests === 2) { - await insertIssues({ id: rowId2, title: `foo2` }) - await clearIssuesShape(issueStream.shapeHandle) - } + numRequests++ + const response = await fetch(...args) - numRequests++ - const response = await fetch(...args) + if (response.status < 500) { + statusCodesReceived.push(response.status) + } - if (response.status < 500) { - statusCodesReceived.push(response.status) + return response } - return response - } + const issueStream = new ShapeStream({ + url: `${BASE_URL}/v1/shape`, + params: { + table: issuesTableUrl, + }, + subscribe: true, + signal: aborter.signal, + fetchClient: fetchWrapper, + experimentalLiveSse, + }) - const issueStream = new ShapeStream({ - url: `${BASE_URL}/v1/shape`, - params: { - table: issuesTableUrl, - }, - subscribe: true, - signal: aborter.signal, - fetchClient: fetchWrapper, - }) + expect.assertions(12) - expect.assertions(12) - - let originalShapeHandle: string | undefined - let upToDateReachedCount = 0 - await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { - // shapeData.set(msg.key, msg.value) - if (isUpToDateMessage(msg)) { - upToDateReachedCount++ - if (upToDateReachedCount === 1) { - // upon reaching up to date initially, we have one - // response with the initial data - expect(statusCodesReceived).toHaveLength(2) - expect(statusCodesReceived[0]).toBe(200) - expect(statusCodesReceived[1]).toBe(200) - } else if (upToDateReachedCount === 2) { - // the next up to date message should have had - // a 409 interleaved before it that instructed the - // client to go and fetch data from scratch - expect(statusCodesReceived.length).greaterThanOrEqual(5) - expect(statusCodesReceived[2]).toBe(409) - expect(statusCodesReceived[3]).toBe(200) - return res() + let originalShapeHandle: string | undefined + let upToDateReachedCount = 0 + await h.forEachMessage(issueStream, aborter, async (res, msg, nth) => { + // shapeData.set(msg.key, msg.value) + if (isUpToDateMessage(msg)) { + upToDateReachedCount++ + if (upToDateReachedCount === 1) { + // upon reaching up to date initially, we have one + // response with the initial data + expect(statusCodesReceived).toHaveLength(2) + expect(statusCodesReceived[0]).toBe(200) + expect(statusCodesReceived[1]).toBe(200) + } else if (upToDateReachedCount === 2) { + // the next up to date message should have had + // a 409 interleaved before it that instructed the + // client to go and fetch data from scratch + expect(statusCodesReceived.length).greaterThanOrEqual(5) + expect(statusCodesReceived[2]).toBe(409) + expect(statusCodesReceived[3]).toBe(200) + return res() + } + return } - return - } - if (!isChangeMessage(msg)) return + if (!isChangeMessage(msg)) return - switch (nth) { - case 0: - // first message is the initial row - expect(msg.value).toEqual({ - id: rowId, - title: `foo1`, - priority: 10, - }) - expect(issueStream.shapeHandle).to.exist - originalShapeHandle = issueStream.shapeHandle - break - case 1: - case 2: - // Second snapshot queries PG without `ORDER BY`, so check that it's generally correct. - // We're checking that both messages arrive by using `expect.assertions(N)` above. - - if (msg.value.id == rowId) { - // message is the initial row again as it is a new shape - // with different shape handle + switch (nth) { + case 0: + // first message is the initial row expect(msg.value).toEqual({ id: rowId, title: `foo1`, priority: 10, }) - expect(issueStream.shapeHandle).not.toBe(originalShapeHandle) - } else { - // should get the second row as well with the new shape handle - expect(msg.value).toEqual({ - id: rowId2, - title: `foo2`, - priority: 10, - }) - expect(issueStream.shapeHandle).not.toBe(originalShapeHandle) - } - break - default: - expect.unreachable(`Received more messages than expected`) - } - }) - }) + expect(issueStream.shapeHandle).to.exist + originalShapeHandle = issueStream.shapeHandle + break + case 1: + case 2: + // Second snapshot queries PG without `ORDER BY`, so check that it's generally correct. + // We're checking that both messages arrive by using `expect.assertions(N)` above. + + if (msg.value.id == rowId) { + // message is the initial row again as it is a new shape + // with different shape handle + expect(msg.value).toEqual({ + id: rowId, + title: `foo1`, + priority: 10, + }) + expect(issueStream.shapeHandle).not.toBe(originalShapeHandle) + } else { + // should get the second row as well with the new shape handle + expect(msg.value).toEqual({ + id: rowId2, + title: `foo2`, + priority: 10, + }) + expect(issueStream.shapeHandle).not.toBe(originalShapeHandle) + } + break + default: + expect.unreachable(`Received more messages than expected`) + } + }) + } + ) }) diff --git a/patches/@microsoft__fetch-event-source.patch b/patches/@microsoft__fetch-event-source.patch new file mode 100644 index 0000000000..2eae7c4855 --- /dev/null +++ b/patches/@microsoft__fetch-event-source.patch @@ -0,0 +1,124 @@ +diff --git a/lib/cjs/fetch.js b/lib/cjs/fetch.js +index ab40f1eeff0ec0a30043e45478f81cd1dc845adb..47be859185ab0276954300f06b1c74eafae62ba7 100644 +--- a/lib/cjs/fetch.js ++++ b/lib/cjs/fetch.js +@@ -26,31 +26,33 @@ function fetchEventSource(input, _a) { + let curRequestController; + function onVisibilityChange() { + curRequestController.abort(); +- if (!document.hidden) { ++ if (typeof document !== 'undefined' && !document.hidden) { + create(); + } + } +- if (!openWhenHidden) { ++ if (typeof document !== 'undefined' && !openWhenHidden) { + document.addEventListener('visibilitychange', onVisibilityChange); + } + let retryInterval = DefaultRetryInterval; + let retryTimer = 0; + function dispose() { +- document.removeEventListener('visibilitychange', onVisibilityChange); +- window.clearTimeout(retryTimer); ++ if (typeof document !== 'undefined') { ++ document.removeEventListener('visibilitychange', onVisibilityChange); ++ } ++ clearTimeout(retryTimer); + curRequestController.abort(); + } + inputSignal === null || inputSignal === void 0 ? void 0 : inputSignal.addEventListener('abort', () => { + dispose(); +- resolve(); + }); + const fetch = inputFetch !== null && inputFetch !== void 0 ? inputFetch : window.fetch; + const onopen = inputOnOpen !== null && inputOnOpen !== void 0 ? inputOnOpen : defaultOnOpen; + async function create() { + var _a; + curRequestController = new AbortController(); ++ const sig = inputSignal.aborted ? inputSignal : curRequestController.signal + try { +- const response = await fetch(input, Object.assign(Object.assign({}, rest), { headers, signal: curRequestController.signal })); ++ const response = await fetch(input, Object.assign(Object.assign({}, rest), { headers, signal: sig })); + await onopen(response); + await parse_1.getBytes(response.body, parse_1.getLines(parse_1.getMessages(id => { + if (id) { +@@ -67,11 +69,14 @@ function fetchEventSource(input, _a) { + resolve(); + } + catch (err) { +- if (!curRequestController.signal.aborted) { ++ if (sig.aborted) { ++ dispose(); ++ reject(err); ++ } else if (!curRequestController.signal.aborted) { + try { + const interval = (_a = onerror === null || onerror === void 0 ? void 0 : onerror(err)) !== null && _a !== void 0 ? _a : retryInterval; +- window.clearTimeout(retryTimer); +- retryTimer = window.setTimeout(create, interval); ++ clearTimeout(retryTimer); ++ retryTimer = setTimeout(create, interval); + } + catch (innerErr) { + dispose(); +diff --git a/lib/esm/fetch.js b/lib/esm/fetch.js +index 31eb2278da53ba79d9fc78ea32d69f2d15f325ff..5c56bd9909f89156e4176b14f07c4e35edf91220 100644 +--- a/lib/esm/fetch.js ++++ b/lib/esm/fetch.js +@@ -23,31 +23,33 @@ export function fetchEventSource(input, _a) { + let curRequestController; + function onVisibilityChange() { + curRequestController.abort(); +- if (!document.hidden) { ++ if (typeof document !== 'undefined' && !document.hidden) { + create(); + } + } +- if (!openWhenHidden) { ++ if (typeof document !== 'undefined' && !openWhenHidden) { + document.addEventListener('visibilitychange', onVisibilityChange); + } + let retryInterval = DefaultRetryInterval; + let retryTimer = 0; + function dispose() { +- document.removeEventListener('visibilitychange', onVisibilityChange); +- window.clearTimeout(retryTimer); ++ if (typeof document !== 'undefined') { ++ document.removeEventListener('visibilitychange', onVisibilityChange); ++ } ++ clearTimeout(retryTimer); + curRequestController.abort(); + } + inputSignal === null || inputSignal === void 0 ? void 0 : inputSignal.addEventListener('abort', () => { + dispose(); +- resolve(); + }); + const fetch = inputFetch !== null && inputFetch !== void 0 ? inputFetch : window.fetch; + const onopen = inputOnOpen !== null && inputOnOpen !== void 0 ? inputOnOpen : defaultOnOpen; + async function create() { + var _a; + curRequestController = new AbortController(); ++ const sig = inputSignal.aborted ? inputSignal : curRequestController.signal + try { +- const response = await fetch(input, Object.assign(Object.assign({}, rest), { headers, signal: curRequestController.signal })); ++ const response = await fetch(input, Object.assign(Object.assign({}, rest), { headers, signal: sig })); + await onopen(response); + await getBytes(response.body, getLines(getMessages(id => { + if (id) { +@@ -64,11 +66,14 @@ export function fetchEventSource(input, _a) { + resolve(); + } + catch (err) { +- if (!curRequestController.signal.aborted) { ++ if (sig.aborted) { ++ dispose(); ++ reject(err); ++ } else if (!curRequestController.signal.aborted) { + try { + const interval = (_a = onerror === null || onerror === void 0 ? void 0 : onerror(err)) !== null && _a !== void 0 ? _a : retryInterval; +- window.clearTimeout(retryTimer); +- retryTimer = window.setTimeout(create, interval); ++ clearTimeout(retryTimer); ++ retryTimer = setTimeout(create, interval); + } + catch (innerErr) { + dispose(); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 715927469e..9562efc145 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -4,6 +4,11 @@ settings: autoInstallPeers: true excludeLinksFromLockfile: false +patchedDependencies: + '@microsoft/fetch-event-source': + hash: lgwcujj3mimdfutlwueisfm32u + path: patches/@microsoft__fetch-event-source.patch + importers: .: @@ -1387,6 +1392,10 @@ importers: packages/sync-service: {} packages/typescript-client: + dependencies: + '@microsoft/fetch-event-source': + specifier: ^2.0.1 + version: 2.0.1(patch_hash=lgwcujj3mimdfutlwueisfm32u) optionalDependencies: '@rollup/rollup-darwin-arm64': specifier: ^4.18.1 @@ -1406,7 +1415,7 @@ importers: version: 7.18.0(eslint@8.57.1)(typescript@5.6.3) '@vitest/coverage-istanbul': specifier: 2.1.4 - version: 2.1.4(vitest@2.1.4(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0)) + version: 2.1.4(vitest@3.2.4(@types/debug@4.1.12)(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0)) cache-control-parser: specifier: ^2.0.6 version: 2.0.6 @@ -1444,8 +1453,8 @@ importers: specifier: ^10.0.0 version: 10.0.0 vitest: - specifier: ^2.0.2 - version: 2.1.4(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0) + specifier: ^3.0.0 + version: 3.2.4(@types/debug@4.1.12)(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0) packages/y-electric: dependencies: @@ -3256,6 +3265,9 @@ packages: '@mdx-js/mdx@2.3.0': resolution: {integrity: sha512-jLuwRlz8DQfQNiUCJR50Y09CGPq3fLtmtUQfVrj79E0JWu3dvsVcxVIcfhR5h0iXu+/z++zDrYeiJqifRynJkA==} + '@microsoft/fetch-event-source@2.0.1': + resolution: {integrity: sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==} + '@next/env@14.2.17': resolution: {integrity: sha512-MCgO7VHxXo8sYR/0z+sk9fGyJJU636JyRmkjc7ZJY8Hurl8df35qG5hoAh5KMs75FLjhlEo9bb2LGe89Y/scDA==} @@ -4635,6 +4647,9 @@ packages: '@types/body-parser@1.19.5': resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} + '@types/chai@5.2.2': + resolution: {integrity: sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==} + '@types/connect@3.4.38': resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} @@ -4644,6 +4659,9 @@ packages: '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + '@types/estree-jsx@1.0.5': resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} @@ -5029,6 +5047,9 @@ packages: '@vitest/expect@2.1.4': resolution: {integrity: sha512-DOETT0Oh1avie/D/o2sgMHGrzYUFFo3zqESB2Hn70z6QB1HrS2IQ9z5DfyTqU8sg4Bpu13zZe9V4+UTNQlUeQA==} + '@vitest/expect@3.2.4': + resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + '@vitest/mocker@2.1.4': resolution: {integrity: sha512-Ky/O1Lc0QBbutJdW0rqLeFNbuLEyS+mIPiNdlVlp2/yhJ0SbyYqObS5IHdhferJud8MbbwMnexg4jordE5cCoQ==} peerDependencies: @@ -5040,21 +5061,47 @@ packages: vite: optional: true + '@vitest/mocker@3.2.4': + resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/pretty-format@2.1.4': resolution: {integrity: sha512-L95zIAkEuTDbUX1IsjRl+vyBSLh3PwLLgKpghl37aCK9Jvw0iP+wKwIFhfjdUtA2myLgjrG6VU6JCFLv8q/3Ww==} + '@vitest/pretty-format@3.2.4': + resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + '@vitest/runner@2.1.4': resolution: {integrity: sha512-sKRautINI9XICAMl2bjxQM8VfCMTB0EbsBc/EDFA57V6UQevEKY/TOPOF5nzcvCALltiLfXWbq4MaAwWx/YxIA==} + '@vitest/runner@3.2.4': + resolution: {integrity: sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==} + '@vitest/snapshot@2.1.4': resolution: {integrity: sha512-3Kab14fn/5QZRog5BPj6Rs8dc4B+mim27XaKWFWHWA87R56AKjHTGcBFKpvZKDzC4u5Wd0w/qKsUIio3KzWW4Q==} + '@vitest/snapshot@3.2.4': + resolution: {integrity: sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==} + '@vitest/spy@2.1.4': resolution: {integrity: sha512-4JOxa+UAizJgpZfaCPKK2smq9d8mmjZVPMt2kOsg/R8QkoRzydHH1qHxIYNvr1zlEaFj4SXiaaJWxq/LPLKaLg==} + '@vitest/spy@3.2.4': + resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + '@vitest/utils@2.1.4': resolution: {integrity: sha512-MXDnZn0Awl2S86PSNIim5PWXgIAx8CIkzu35mBdSApUip6RFOGXBCf3YFyeEu8n1IHk4bWD46DeYFu9mQlFIRg==} + '@vitest/utils@3.2.4': + resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + '@vue/compiler-core@3.5.12': resolution: {integrity: sha512-ISyBTRMmMYagUxhcpyEH0hpXRd/KqDU4ymofPgl2XAkY9ZhQ+h0ovEZJIiPop13UmR/54oA2cgMDjgroRelaEw==} @@ -5449,6 +5496,10 @@ packages: resolution: {integrity: sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==} engines: {node: '>=12'} + chai@5.2.0: + resolution: {integrity: sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==} + engines: {node: '>=12'} + chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} @@ -5784,6 +5835,15 @@ packages: supports-color: optional: true + debug@4.4.1: + resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + decimal.js@10.4.3: resolution: {integrity: sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==} @@ -6004,6 +6064,9 @@ packages: es-module-lexer@1.5.4: resolution: {integrity: sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==} + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + es-object-atoms@1.0.0: resolution: {integrity: sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==} engines: {node: '>= 0.4'} @@ -6395,6 +6458,10 @@ packages: resolution: {integrity: sha512-bFi65yM+xZgk+u/KRIpekdSYkTB5W1pEf0Lt8Q8Msh7b+eQ7LXVtIB1Bkm4fvclDEL1b2CZkMhv2mOeF8tMdkA==} engines: {node: '>=12.0.0'} + expect-type@1.2.1: + resolution: {integrity: sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==} + engines: {node: '>=12.0.0'} + expect@29.7.0: resolution: {integrity: sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -6449,6 +6516,14 @@ packages: picomatch: optional: true + fdir@6.4.6: + resolution: {integrity: sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w==} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + fetch-blob@3.2.0: resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} engines: {node: ^12.20 || >= 14.13} @@ -7149,6 +7224,9 @@ packages: js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + js-yaml@3.14.1: resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} hasBin: true @@ -7345,6 +7423,9 @@ packages: loupe@3.1.2: resolution: {integrity: sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==} + loupe@3.1.4: + resolution: {integrity: sha512-wJzkKwJrheKtknCOKNEtDK4iqg/MxmZheEMtSTYvnzRdEYaZzmgH976nenp8WdJRdx5Vc1X/9MO0Oszl6ezeXg==} + lower-case@2.0.2: resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} @@ -7375,6 +7456,9 @@ packages: magic-string@0.30.12: resolution: {integrity: sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw==} + magic-string@0.30.17: + resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==} + magicast@0.3.5: resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} @@ -8078,6 +8162,9 @@ packages: pathe@1.1.2: resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + pathval@2.0.0: resolution: {integrity: sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==} engines: {node: '>= 14.16'} @@ -9201,6 +9288,9 @@ packages: std-env@3.7.0: resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} + std-env@3.9.0: + resolution: {integrity: sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==} + stickyfill@1.1.1: resolution: {integrity: sha512-GCp7vHAfpao+Qh/3Flh9DXEJ/qSi0KJwJw6zYlZOtRYXWUIpMM6mC2rIep/dK8RQqwW0KxGJIllmjPIBOGN8AA==} @@ -9284,6 +9374,9 @@ packages: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} + strip-literal@3.0.0: + resolution: {integrity: sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==} + style-mod@4.1.2: resolution: {integrity: sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==} @@ -9428,22 +9521,41 @@ packages: tinyexec@0.3.1: resolution: {integrity: sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==} + tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + tinyglobby@0.2.10: resolution: {integrity: sha512-Zc+8eJlFMvgatPZTl6A9L/yht8QqdmUNtURHaKZLmKBE12hNPSrqNkUp2cs3M/UKmNVVAMFQYSjYIVHDjW5zew==} engines: {node: '>=12.0.0'} + tinyglobby@0.2.14: + resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==} + engines: {node: '>=12.0.0'} + tinypool@1.0.1: resolution: {integrity: sha512-URZYihUbRPcGv95En+sz6MfghfIc2OJ1sv/RmhWZLouPY0/8Vo80viwPvg3dlaS9fuq7fQMEfgRRK7BBZThBEA==} engines: {node: ^18.0.0 || >=20.0.0} + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} + engines: {node: ^18.0.0 || >=20.0.0} + tinyrainbow@1.2.0: resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==} engines: {node: '>=14.0.0'} + tinyrainbow@2.0.0: + resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} + engines: {node: '>=14.0.0'} + tinyspy@3.0.2: resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} engines: {node: '>=14.0.0'} + tinyspy@4.0.3: + resolution: {integrity: sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==} + engines: {node: '>=14.0.0'} + tippy.js@6.3.7: resolution: {integrity: sha512-E1d3oP2emgJ9dRQZdf3Kkn0qJgI6ZLpyS5z6ZkY1DF3kaQaBsGZsndEpHwx+eC+tYM41HaSNvNtLx8tU57FzTQ==} @@ -9834,6 +9946,11 @@ packages: engines: {node: ^18.0.0 || >=20.0.0} hasBin: true + vite-node@3.2.4: + resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + vite-plugin-pwa@0.21.0: resolution: {integrity: sha512-gnDE5sN2hdxA4vTl0pe6PCTPXqChk175jH8dZVVTBjFhWarZZoXaAdoTIKCIa8Zbx94sC0CnCOyERBWpxvry+g==} engines: {node: '>=16.0.0'} @@ -9969,6 +10086,34 @@ packages: jsdom: optional: true + vitest@3.2.4: + resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/debug': ^4.1.12 + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + '@vitest/browser': 3.2.4 + '@vitest/ui': 3.2.4 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/debug': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vue-demi@0.14.10: resolution: {integrity: sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==} engines: {node: '>=12'} @@ -12202,6 +12347,8 @@ snapshots: transitivePeerDependencies: - supports-color + '@microsoft/fetch-event-source@2.0.1(patch_hash=lgwcujj3mimdfutlwueisfm32u)': {} + '@next/env@14.2.17': {} '@next/swc-darwin-arm64@14.2.17': @@ -13745,6 +13892,10 @@ snapshots: '@types/connect': 3.4.38 '@types/node': 20.17.6 + '@types/chai@5.2.2': + dependencies: + '@types/deep-eql': 4.0.2 + '@types/connect@3.4.38': dependencies: '@types/node': 20.17.6 @@ -13755,6 +13906,8 @@ snapshots: dependencies: '@types/ms': 0.7.34 + '@types/deep-eql@4.0.2': {} + '@types/estree-jsx@1.0.5': dependencies: '@types/estree': 1.0.6 @@ -14328,6 +14481,22 @@ snapshots: transitivePeerDependencies: - supports-color + '@vitest/coverage-istanbul@2.1.4(vitest@3.2.4(@types/debug@4.1.12)(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0))': + dependencies: + '@istanbuljs/schema': 0.1.3 + debug: 4.4.0 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-instrument: 6.0.3 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 5.0.6 + istanbul-reports: 3.1.7 + magicast: 0.3.5 + test-exclude: 7.0.1 + tinyrainbow: 1.2.0 + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0) + transitivePeerDependencies: + - supports-color + '@vitest/expect@2.1.4': dependencies: '@vitest/spy': 2.1.4 @@ -14335,6 +14504,14 @@ snapshots: chai: 5.1.2 tinyrainbow: 1.2.0 + '@vitest/expect@3.2.4': + dependencies: + '@types/chai': 5.2.2 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.2.0 + tinyrainbow: 2.0.0 + '@vitest/mocker@2.1.4(vite@5.4.10(@types/node@18.19.99)(terser@5.36.0))': dependencies: '@vitest/spy': 2.1.4 @@ -14351,31 +14528,65 @@ snapshots: optionalDependencies: vite: 5.4.10(@types/node@20.17.6)(terser@5.36.0) + '@vitest/mocker@3.2.4(vite@5.4.10(@types/node@20.17.6)(terser@5.36.0))': + dependencies: + '@vitest/spy': 3.2.4 + estree-walker: 3.0.3 + magic-string: 0.30.17 + optionalDependencies: + vite: 5.4.10(@types/node@20.17.6)(terser@5.36.0) + '@vitest/pretty-format@2.1.4': dependencies: tinyrainbow: 1.2.0 + '@vitest/pretty-format@3.2.4': + dependencies: + tinyrainbow: 2.0.0 + '@vitest/runner@2.1.4': dependencies: '@vitest/utils': 2.1.4 pathe: 1.1.2 + '@vitest/runner@3.2.4': + dependencies: + '@vitest/utils': 3.2.4 + pathe: 2.0.3 + strip-literal: 3.0.0 + '@vitest/snapshot@2.1.4': dependencies: '@vitest/pretty-format': 2.1.4 magic-string: 0.30.12 pathe: 1.1.2 + '@vitest/snapshot@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + magic-string: 0.30.17 + pathe: 2.0.3 + '@vitest/spy@2.1.4': dependencies: tinyspy: 3.0.2 + '@vitest/spy@3.2.4': + dependencies: + tinyspy: 4.0.3 + '@vitest/utils@2.1.4': dependencies: '@vitest/pretty-format': 2.1.4 loupe: 3.1.2 tinyrainbow: 1.2.0 + '@vitest/utils@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + loupe: 3.1.4 + tinyrainbow: 2.0.0 + '@vue/compiler-core@3.5.12': dependencies: '@babel/parser': 7.26.2 @@ -14828,6 +15039,14 @@ snapshots: loupe: 3.1.2 pathval: 2.0.0 + chai@5.2.0: + dependencies: + assertion-error: 2.0.1 + check-error: 2.1.1 + deep-eql: 5.0.2 + loupe: 3.1.2 + pathval: 2.0.0 + chalk@4.1.2: dependencies: ansi-styles: 4.3.0 @@ -15162,6 +15381,10 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.1: + dependencies: + ms: 2.1.3 + decimal.js@10.4.3: {} decko@1.2.0: {} @@ -15449,6 +15672,8 @@ snapshots: es-module-lexer@1.5.4: {} + es-module-lexer@1.7.0: {} + es-object-atoms@1.0.0: dependencies: es-errors: 1.3.0 @@ -15995,6 +16220,8 @@ snapshots: expect-type@1.1.0: {} + expect-type@1.2.1: {} + expect@29.7.0: dependencies: '@jest/expect-utils': 29.7.0 @@ -16081,6 +16308,10 @@ snapshots: optionalDependencies: picomatch: 4.0.2 + fdir@6.4.6(picomatch@4.0.2): + optionalDependencies: + picomatch: 4.0.2 + fetch-blob@3.2.0: dependencies: node-domexception: 1.0.0 @@ -16819,6 +17050,8 @@ snapshots: js-tokens@4.0.0: {} + js-tokens@9.0.1: {} + js-yaml@3.14.1: dependencies: argparse: 1.0.10 @@ -17010,6 +17243,8 @@ snapshots: loupe@3.1.2: {} + loupe@3.1.4: {} + lower-case@2.0.2: dependencies: tslib: 2.8.1 @@ -17038,6 +17273,10 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.0 + magic-string@0.30.17: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.0 + magicast@0.3.5: dependencies: '@babel/parser': 7.26.2 @@ -17937,6 +18176,8 @@ snapshots: pathe@1.1.2: {} + pathe@2.0.3: {} + pathval@2.0.0: {} peek-stream@1.1.3: @@ -19153,6 +19394,8 @@ snapshots: std-env@3.7.0: {} + std-env@3.9.0: {} + stickyfill@1.1.1: {} stop-iteration-iterator@1.0.0: @@ -19250,6 +19493,10 @@ snapshots: strip-json-comments@3.1.1: {} + strip-literal@3.0.0: + dependencies: + js-tokens: 9.0.1 + style-mod@4.1.2: {} style-to-object@0.4.4: @@ -19456,17 +19703,30 @@ snapshots: tinyexec@0.3.1: {} + tinyexec@0.3.2: {} + tinyglobby@0.2.10: dependencies: fdir: 6.4.2(picomatch@4.0.2) picomatch: 4.0.2 + tinyglobby@0.2.14: + dependencies: + fdir: 6.4.6(picomatch@4.0.2) + picomatch: 4.0.2 + tinypool@1.0.1: {} + tinypool@1.1.1: {} + tinyrainbow@1.2.0: {} + tinyrainbow@2.0.0: {} + tinyspy@3.0.2: {} + tinyspy@4.0.3: {} + tippy.js@6.3.7: dependencies: '@popperjs/core': 2.11.8 @@ -19908,6 +20168,24 @@ snapshots: - supports-color - terser + vite-node@3.2.4(@types/node@20.17.6)(terser@5.36.0): + dependencies: + cac: 6.7.14 + debug: 4.4.1 + es-module-lexer: 1.7.0 + pathe: 2.0.3 + vite: 5.4.10(@types/node@20.17.6)(terser@5.36.0) + transitivePeerDependencies: + - '@types/node' + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vite-plugin-pwa@0.21.0(vite@5.4.10(@types/node@20.17.6)(terser@5.36.0))(workbox-build@7.3.0(@types/babel__core@7.20.5))(workbox-window@7.3.0): dependencies: debug: 4.3.7(supports-color@5.5.0) @@ -20107,6 +20385,46 @@ snapshots: - supports-color - terser + vitest@3.2.4(@types/debug@4.1.12)(@types/node@20.17.6)(jsdom@25.0.1)(terser@5.36.0): + dependencies: + '@types/chai': 5.2.2 + '@vitest/expect': 3.2.4 + '@vitest/mocker': 3.2.4(vite@5.4.10(@types/node@20.17.6)(terser@5.36.0)) + '@vitest/pretty-format': 3.2.4 + '@vitest/runner': 3.2.4 + '@vitest/snapshot': 3.2.4 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.2.0 + debug: 4.4.1 + expect-type: 1.2.1 + magic-string: 0.30.17 + pathe: 2.0.3 + picomatch: 4.0.2 + std-env: 3.9.0 + tinybench: 2.9.0 + tinyexec: 0.3.2 + tinyglobby: 0.2.14 + tinypool: 1.1.1 + tinyrainbow: 2.0.0 + vite: 5.4.10(@types/node@20.17.6)(terser@5.36.0) + vite-node: 3.2.4(@types/node@20.17.6)(terser@5.36.0) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/debug': 4.1.12 + '@types/node': 20.17.6 + jsdom: 25.0.1 + transitivePeerDependencies: + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vue-demi@0.14.10(vue@3.5.12(typescript@5.7.2)): dependencies: vue: 3.5.12(typescript@5.7.2)