diff --git a/.github/workflows/elixir_tests.yml b/.github/workflows/elixir_tests.yml index 00645f2..7e24b2d 100644 --- a/.github/workflows/elixir_tests.yml +++ b/.github/workflows/elixir_tests.yml @@ -24,14 +24,14 @@ jobs: image: "postgres:17-alpine" env: POSTGRES_PASSWORD: password - POSTGRES_DB: electric + POSTGRES_DB: phoenix_sync options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: - - 54321:5432 + - 55555:5432 steps: - uses: actions/checkout@v4 @@ -60,7 +60,7 @@ jobs: with: path: | _build/*/lib - !_build/*/lib/electric_phoenix + !_build/*/lib/phoenix_sync key: ${{ runner.os }}-build-${{ env.MIX_ENV }}-[${{ github.ref_name }}]-${{ github.sha }} restore-keys: | ${{ runner.os }}-build-${{ env.MIX_ENV }}-[${{ github.ref_name }}]-${{ github.sha }} @@ -77,9 +77,6 @@ jobs: - name: Run tests run: mix test --trace - - name: Test installation as a dependency - run: mix test.as_a_dep - test-as-dep: name: Test installation as a dependency runs-on: ubuntu-latest @@ -99,6 +96,38 @@ jobs: - name: Test installation as a dependency run: mix test.as_a_dep + test-apps: + name: Test integration apps + runs-on: ubuntu-latest + env: + MIX_ENV: test + services: + postgres: + image: "postgres:17-alpine" + env: + POSTGRES_PASSWORD: password + POSTGRES_DB: phoenix_sync + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 55555:5432 + steps: + - uses: actions/checkout@v4 + + - uses: erlef/setup-beam@v1 + with: + version-type: strict + version-file: ".tool-versions" + + - name: Install dependencies + run: mix deps.get + + - name: Run tests on apps + run: mix test.apps + formatting: name: mix format --check-formatted runs-on: ubuntu-latest diff --git a/README.md b/README.md index 9eb60dd..e28e423 100644 --- a/README.md +++ b/README.md @@ -349,6 +349,10 @@ children = [ ] ``` +## Testing + +See `Phoenix.Sync.Sandbox` for details on how to test sync endpoints (liveview, controllers and routers) in your Phoenix/Ecto application. + ## Notes ### ElectricSQL diff --git a/apps/phoenix_sync_example/.formatter.exs b/apps/phoenix_sync_example/.formatter.exs new file mode 100644 index 0000000..ef8840c --- /dev/null +++ b/apps/phoenix_sync_example/.formatter.exs @@ -0,0 +1,6 @@ +[ + import_deps: [:ecto, :ecto_sql, :phoenix], + subdirectories: ["priv/*/migrations"], + plugins: [Phoenix.LiveView.HTMLFormatter], + inputs: ["*.{heex,ex,exs}", "{config,lib,test}/**/*.{heex,ex,exs}", "priv/*/seeds.exs"] +] diff --git a/apps/phoenix_sync_example/.gitignore b/apps/phoenix_sync_example/.gitignore new file mode 100644 index 0000000..798e9e3 --- /dev/null +++ b/apps/phoenix_sync_example/.gitignore @@ -0,0 +1,27 @@ +# The directory Mix will write compiled artifacts to. +_build/ + +# If you run "mix test --cover", coverage assets end up here. +cover/ + +# The directory Mix downloads your dependencies sources to. +deps/ + +# Where 3rd-party dependencies like ExDoc output generated docs. +doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Temporary files, for example, from tests. +tmp/ + +# Ignore package tarball (built via "mix hex.build"). +phoenix_sync_example-*.tar + diff --git a/apps/phoenix_sync_example/README.md b/apps/phoenix_sync_example/README.md new file mode 100644 index 0000000..20bf439 --- /dev/null +++ b/apps/phoenix_sync_example/README.md @@ -0,0 +1,3 @@ +A small Phoenix app for testing behavior of the Phoenix Sync libraryy in-situ. + +**Not an official example** -- see the [examples folder for those](../../examples). diff --git a/apps/phoenix_sync_example/config/config.exs b/apps/phoenix_sync_example/config/config.exs new file mode 100644 index 0000000..b797c4a --- /dev/null +++ b/apps/phoenix_sync_example/config/config.exs @@ -0,0 +1,35 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :phoenix_sync_example, + ecto_repos: [PhoenixSyncExample.Repo], + generators: [timestamp_type: :utc_datetime, binary_id: true] + +# Configures the endpoint +config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + url: [host: "localhost"], + adapter: Bandit.PhoenixAdapter, + render_errors: [ + formats: [html: PhoenixSyncExampleWeb.ErrorHTML, json: PhoenixSyncExampleWeb.ErrorJSON], + layout: false + ], + pubsub_server: PhoenixSyncExample.PubSub, + live_view: [signing_salt: "tLgDbjrX"] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/apps/phoenix_sync_example/config/dev.exs b/apps/phoenix_sync_example/config/dev.exs new file mode 100644 index 0000000..4049a23 --- /dev/null +++ b/apps/phoenix_sync_example/config/dev.exs @@ -0,0 +1,84 @@ +import Config + +# Configure your database +config :phoenix_sync_example, PhoenixSyncExample.Repo, + username: "postgres", + password: "password", + hostname: "localhost", + database: "phoenix_sync_example_dev", + port: 55555, + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 10 + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we can use it +# to bundle .js and .css sources. +# Binding to loopback ipv4 address prevents access from other machines. +config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {127, 0, 0, 1}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "uBmq7NHSNLSxZJ+aqrrggr+2tIiPQh39Xe5c0G13UPk5HWbDF6e7A2EN0BlGL/qd", + watchers: [] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/(?!uploads/).*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"lib/phoenix_sync_example_web/(controllers|live|components)/.*(ex|heex)$" + ] + ] + +# Enable dev routes for dashboard and mailbox +config :phoenix_sync_example, dev_routes: true + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime + +config :phoenix_live_view, + # Include HEEx debug annotations as HTML comments in rendered markup + debug_heex_annotations: true, + # Enable helpful, but potentially expensive runtime checks + enable_expensive_runtime_checks: true + +config :phoenix_sync, + env: config_env(), + repo: PhoenixSyncExample.Repo, + mode: :embedded diff --git a/apps/phoenix_sync_example/config/prod.exs b/apps/phoenix_sync_example/config/prod.exs new file mode 100644 index 0000000..1fe2d9e --- /dev/null +++ b/apps/phoenix_sync_example/config/prod.exs @@ -0,0 +1,7 @@ +import Config + +# Do not print debug messages in production +config :logger, level: :info + +# Runtime production configuration, including reading +# of environment variables, is done on config/runtime.exs. diff --git a/apps/phoenix_sync_example/config/runtime.exs b/apps/phoenix_sync_example/config/runtime.exs new file mode 100644 index 0000000..a272415 --- /dev/null +++ b/apps/phoenix_sync_example/config/runtime.exs @@ -0,0 +1,99 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. + +# ## Using releases +# +# If you use `mix release`, you need to explicitly enable the server +# by passing the PHX_SERVER=true when you start it: +# +# PHX_SERVER=true bin/phoenix_sync_example start +# +# Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` +# script that automatically sets the env var above. +if System.get_env("PHX_SERVER") do + config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, server: true +end + +if config_env() == :prod do + database_url = + System.get_env("DATABASE_URL") || + raise """ + environment variable DATABASE_URL is missing. + For example: ecto://USER:PASS@HOST/DATABASE + """ + + maybe_ipv6 = if System.get_env("ECTO_IPV6") in ~w(true 1), do: [:inet6], else: [] + + config :phoenix_sync_example, PhoenixSyncExample.Repo, + # ssl: true, + url: database_url, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10"), + socket_options: maybe_ipv6 + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + host = System.get_env("PHX_HOST") || "example.com" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :phoenix_sync_example, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") + + config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + url: [host: host, port: 443, scheme: "https"], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/bandit/Bandit.html#t:options/0 + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0}, + port: port + ], + secret_key_base: secret_key_base + + # ## SSL Support + # + # To get SSL working, you will need to add the `https` key + # to your endpoint configuration: + # + # config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + # https: [ + # ..., + # port: 443, + # cipher_suite: :strong, + # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), + # certfile: System.get_env("SOME_APP_SSL_CERT_PATH") + # ] + # + # The `cipher_suite` is set to `:strong` to support only the + # latest and more secure SSL ciphers. This means old browsers + # and clients may not be supported. You can set it to + # `:compatible` for wider support. + # + # `:keyfile` and `:certfile` expect an absolute path to the key + # and cert in disk or a relative path inside priv, for example + # "priv/ssl/server.key". For all supported SSL configuration + # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 + # + # We also recommend setting `force_ssl` in your config/prod.exs, + # ensuring no data is ever sent via http, always redirecting to https: + # + # config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + # force_ssl: [hsts: true] + # + # Check `Plug.SSL` for all available options in `force_ssl`. +end diff --git a/apps/phoenix_sync_example/config/test.exs b/apps/phoenix_sync_example/config/test.exs new file mode 100644 index 0000000..4aa53ae --- /dev/null +++ b/apps/phoenix_sync_example/config/test.exs @@ -0,0 +1,36 @@ +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :phoenix_sync_example, PhoenixSyncExample.Repo, + username: "postgres", + password: "password", + hostname: "localhost", + database: "phoenix_sync_example_test#{System.get_env("MIX_TEST_PARTITION")}", + port: 55555, + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: System.schedulers_online() * 2 + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :phoenix_sync_example, PhoenixSyncExampleWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "JxgUv3Dd0aEmBGFLgfyPpQPTidmD3BaW9NYtWDCnk0Yo3EaPDQgUNyLOXkm3+//h", + server: false + +# Print only warnings and errors during test +config :logger, level: :warning + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime + +# Enable helpful, but potentially expensive runtime checks +config :phoenix_live_view, + enable_expensive_runtime_checks: true + +config :phoenix_sync, + env: config_env(), + mode: :sandbox diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example.ex new file mode 100644 index 0000000..5524da1 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example.ex @@ -0,0 +1,9 @@ +defmodule PhoenixSyncExample do + @moduledoc """ + PhoenixSyncExample keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/application.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/application.ex new file mode 100644 index 0000000..316dfa7 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/application.ex @@ -0,0 +1,36 @@ +defmodule PhoenixSyncExample.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + children = [ + PhoenixSyncExampleWeb.Telemetry, + PhoenixSyncExample.Repo, + {DNSCluster, + query: Application.get_env(:phoenix_sync_example, :dns_cluster_query) || :ignore}, + {Phoenix.PubSub, name: PhoenixSyncExample.PubSub}, + # Start a worker by calling: PhoenixSyncExample.Worker.start_link(arg) + # {PhoenixSyncExample.Worker, arg}, + # Start to serve requests, typically the last entry + PhoenixSyncExampleWeb.Endpoint + # PhoenixSyncExample.Sync + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: PhoenixSyncExample.Supervisor] + Supervisor.start_link(children, opts) + end + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + PhoenixSyncExampleWeb.Endpoint.config_change(changed, removed) + :ok + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/cars.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars.ex new file mode 100644 index 0000000..963294c --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars.ex @@ -0,0 +1,111 @@ +defmodule PhoenixSyncExample.Cars do + @moduledoc """ + The Cars context. + """ + + import Ecto.Query, warn: false + alias PhoenixSyncExample.Repo + + alias PhoenixSyncExample.Cars.Make + alias PhoenixSyncExample.Cars.Model + + @doc """ + Returns the list of cars. + + ## Examples + + iex> list_cars() + [%Model{}, ...] + + """ + def list_cars do + Repo.all(Model) + end + + @doc """ + Gets a single car. + + Raises `Ecto.NoResultsError` if the Model does not exist. + + ## Examples + + iex> get_car!(123) + %Model{} + + iex> get_car!(456) + ** (Ecto.NoResultsError) + + """ + def get_car!(id), do: Repo.get!(Model, id) + + @doc """ + Creates a car. + + ## Examples + + iex> create_car(%{field: value}) + {:ok, %Model{}} + + iex> create_car(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def create_car(attrs \\ %{}) do + %Model{} + |> Model.changeset(attrs) + |> Repo.insert() + end + + def create_make(attrs \\ %{}) do + %Make{} + |> Make.changeset(attrs) + |> Repo.insert() + end + + @doc """ + Updates a car. + + ## Examples + + iex> update_car(car, %{field: new_value}) + {:ok, %Model{}} + + iex> update_car(car, %{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def update_car(%Model{} = car, attrs) do + car + |> Model.changeset(attrs) + |> Repo.update() + end + + @doc """ + Deletes a car. + + ## Examples + + iex> delete_car(car) + {:ok, %Model{}} + + iex> delete_car(car) + {:error, %Ecto.Changeset{}} + + """ + def delete_car(%Model{} = car) do + Repo.delete(car) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for tracking car changes. + + ## Examples + + iex> change_car(car) + %Ecto.Changeset{data: %Model{}} + + """ + def change_car(%Model{} = car, attrs \\ %{}) do + Model.changeset(car, attrs) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/make.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/make.ex new file mode 100644 index 0000000..c0ae0aa --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/make.ex @@ -0,0 +1,21 @@ +defmodule PhoenixSyncExample.Cars.Make do + use Ecto.Schema + import Ecto.Changeset + + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id + schema "makes" do + field :name, :string + + has_many :models, PhoenixSyncExample.Cars.Model, on_delete: :delete_all + + timestamps(type: :utc_datetime) + end + + @doc false + def changeset(make, attrs) do + make + |> cast(attrs, [:name]) + |> validate_required([:name]) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/model.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/model.ex new file mode 100644 index 0000000..c641029 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/cars/model.ex @@ -0,0 +1,22 @@ +defmodule PhoenixSyncExample.Cars.Model do + use Ecto.Schema + import Ecto.Changeset + + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id + schema "models" do + field :name, :string + field :cost, :integer + + belongs_to :make, PhoenixSyncExample.Cars.Make, type: :binary_id + + timestamps(type: :utc_datetime) + end + + @doc false + def changeset(model, attrs) do + model + |> cast(attrs, [:name, :cost, :make_id]) + |> validate_required([:name, :cost, :make_id]) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/repo.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/repo.ex new file mode 100644 index 0000000..29d6cc5 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/repo.ex @@ -0,0 +1,7 @@ +defmodule PhoenixSyncExample.Repo do + use Phoenix.Sync.Sandbox.Postgres + + use Ecto.Repo, + otp_app: :phoenix_sync_example, + adapter: Phoenix.Sync.Sandbox.Postgres.adapter() +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example/sync.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example/sync.ex new file mode 100644 index 0000000..8baeb85 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example/sync.ex @@ -0,0 +1,44 @@ +defmodule PhoenixSyncExample.Sync do + use GenServer + + def start_link(_opts) do + GenServer.start_link(__MODULE__, :ok, name: __MODULE__) + end + + def subscribe(pid \\ self()) do + GenServer.call(__MODULE__, {:subscribe, pid}) + end + + def init(_args) do + client = Phoenix.Sync.client!() |> dbg + parent = self() + + {:ok, sync_task} = + Task.start_link(fn -> + for msg <- Electric.Client.stream(client, PhoenixSyncExample.Model, replica: :full) do + GenServer.cast(parent, {:sync_message, msg}) + end + end) + + {:ok, %{task: sync_task, subscribers: MapSet.new()}} + end + + def handle_call({:subscribe, pid}, _from, state) do + Process.monitor(pid) + new_subscribers = MapSet.put(state.subscribers, pid) + {:reply, :ok, %{state | subscribers: new_subscribers}} + end + + def handle_cast({:sync_message, msg}, state) do + for pid <- MapSet.to_list(state.subscribers) do + send(pid, {:sync_message, msg}) + end + + {:noreply, state} + end + + def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do + new_subscribers = MapSet.delete(state.subscribers, pid) + {:noreply, %{state | subscribers: new_subscribers}} + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web.ex new file mode 100644 index 0000000..af36682 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web.ex @@ -0,0 +1,111 @@ +defmodule PhoenixSyncExampleWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use PhoenixSyncExampleWeb, :controller + use PhoenixSyncExampleWeb, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) + + def router do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + end + end + + def controller do + quote do + use Phoenix.Controller, + formats: [:html, :json], + layouts: [html: PhoenixSyncExampleWeb.Layouts] + + import Plug.Conn + + unquote(verified_routes()) + end + end + + def live_view do + quote do + use Phoenix.LiveView, + layout: {PhoenixSyncExampleWeb.Layouts, :app} + + unquote(html_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(html_helpers()) + end + end + + def html do + quote do + use Phoenix.Component + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_csrf_token: 0, view_module: 1, view_template: 1] + + # Include general helpers for rendering HTML + unquote(html_helpers()) + end + end + + defp html_helpers do + quote do + # HTML escaping functionality + import Phoenix.HTML + # Core UI components + import PhoenixSyncExampleWeb.CoreComponents + + # Shortcut for generating JS commands + alias Phoenix.LiveView.JS + + # Routes generation with the ~p sigil + unquote(verified_routes()) + end + end + + def verified_routes do + quote do + use Phoenix.VerifiedRoutes, + endpoint: PhoenixSyncExampleWeb.Endpoint, + router: PhoenixSyncExampleWeb.Router, + statics: PhoenixSyncExampleWeb.static_paths() + end + end + + @doc """ + When used, dispatch to the appropriate controller/live_view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/core_components.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/core_components.ex new file mode 100644 index 0000000..cbb6fb7 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/core_components.ex @@ -0,0 +1,672 @@ +defmodule PhoenixSyncExampleWeb.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as modals, tables, and + forms. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The default components use Tailwind CSS, a utility-first CSS framework. + See the [Tailwind CSS documentation](https://tailwindcss.com) to learn + how to customize them or feel free to swap in another framework altogether. + + Icons are provided by [heroicons](https://heroicons.com). See `icon/1` for usage. + """ + use Phoenix.Component + + alias Phoenix.LiveView.JS + + @doc """ + Renders a modal. + + ## Examples + + <.modal id="confirm-modal"> + This is a modal. + + + JS commands may be passed to the `:on_cancel` to configure + the closing/cancel event, for example: + + <.modal id="confirm" on_cancel={JS.navigate(~p"/posts")}> + This is another modal. + + + """ + attr :id, :string, required: true + attr :show, :boolean, default: false + attr :on_cancel, JS, default: %JS{} + slot :inner_block, required: true + + def modal(assigns) do + ~H""" + + """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + @doc """ + Renders a label. + """ + attr :for, :string, default: nil + slot :inner_block, required: true + + def label(assigns) do + ~H""" + + """ + end + + @doc """ + Generates a generic error message. + """ + slot :inner_block, required: true + + def error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle-mini" class="mt-0.5 h-5 w-5 flex-none" /> + {render_slot(@inner_block)} +

+ """ + end + + @doc """ + Renders a header with title. + """ + attr :class, :string, default: nil + + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ {render_slot(@inner_block)} +

+

+ {render_slot(@subtitle)} +

+
+
{render_slot(@actions)}
+
+ """ + end + + @doc ~S""" + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id">{user.id} + <:col :let={user} label="username">{user.username} + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" +
+ + + + + + + + + + + + + +
{col[:label]} + Actions +
+
+ + + {render_slot(col, @row_item.(row))} + +
+
+
+ + + {render_slot(action, @row_item.(row))} + +
+
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title">{@post.title} + <:item title="Views">{@post.views} + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
+
+
+
{item.title}
+
{render_slot(item)}
+
+
+
+ """ + end + + @doc """ + Renders a back navigation link. + + ## Examples + + <.back navigate={~p"/posts"}>Back to posts + """ + attr :navigate, :any, required: true + slot :inner_block, required: true + + def back(assigns) do + ~H""" +
+ <.link + navigate={@navigate} + class="text-sm font-semibold leading-6 text-zinc-900 hover:text-zinc-700" + > + <.icon name="hero-arrow-left-solid" class="h-3 w-3" /> + {render_slot(@inner_block)} + +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in your `assets/tailwind.config.js`. + + ## Examples + + <.icon name="hero-x-mark-solid" /> + <.icon name="hero-arrow-path" class="ml-1 w-3 h-3 animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :string, default: nil + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all transform ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all transform ease-in duration-200", + "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end + + def show_modal(js \\ %JS{}, id) when is_binary(id) do + js + |> JS.show(to: "##{id}") + |> JS.show( + to: "##{id}-bg", + time: 300, + transition: {"transition-all transform ease-out duration-300", "opacity-0", "opacity-100"} + ) + |> show("##{id}-container") + |> JS.add_class("overflow-hidden", to: "body") + |> JS.focus_first(to: "##{id}-content") + end + + def hide_modal(js \\ %JS{}, id) do + js + |> JS.hide( + to: "##{id}-bg", + transition: {"transition-all transform ease-in duration-200", "opacity-100", "opacity-0"} + ) + |> hide("##{id}-container") + |> JS.hide(to: "##{id}", transition: {"block", "block", "hidden"}) + |> JS.remove_class("overflow-hidden", to: "body") + |> JS.pop_focus() + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(PhoenixSyncExampleWeb.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(PhoenixSyncExampleWeb.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts.ex new file mode 100644 index 0000000..d7dbca1 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts.ex @@ -0,0 +1,14 @@ +defmodule PhoenixSyncExampleWeb.Layouts do + @moduledoc """ + This module holds different layouts used by your application. + + See the `layouts` directory for all templates available. + The "root" layout is a skeleton rendered as part of the + application router. The "app" layout is set as the default + layout on both `use PhoenixSyncExampleWeb, :controller` and + `use PhoenixSyncExampleWeb, :live_view`. + """ + use PhoenixSyncExampleWeb, :html + + embed_templates "layouts/*" +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/app.html.heex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/app.html.heex new file mode 100644 index 0000000..3b3b607 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/app.html.heex @@ -0,0 +1,32 @@ +
+
+
+ + + +

+ v{Application.spec(:phoenix, :vsn)} +

+
+ +
+
+
+
+ <.flash_group flash={@flash} /> + {@inner_content} +
+
diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/root.html.heex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/root.html.heex new file mode 100644 index 0000000..fdbd68c --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/components/layouts/root.html.heex @@ -0,0 +1,17 @@ + + + + + + + <.live_title default="PhoenixSyncExample" suffix=" · Phoenix Framework"> + {assigns[:page_title]} + + + + + + {@inner_content} + + diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_controller.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_controller.ex new file mode 100644 index 0000000..fd0f5e8 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_controller.ex @@ -0,0 +1,43 @@ +defmodule PhoenixSyncExampleWeb.CarController do + use PhoenixSyncExampleWeb, :controller + + alias PhoenixSyncExample.Cars + alias PhoenixSyncExample.Cars.Model + + action_fallback PhoenixSyncExampleWeb.FallbackController + + def index(conn, _params) do + cars = Cars.list_cars() + render(conn, :index, cars: cars) + end + + def create(conn, %{"car" => car_params}) do + with {:ok, %Model{} = car} <- Cars.create_car(car_params) do + conn + |> put_status(:created) + |> put_resp_header("location", ~p"/api/cars/#{car}") + |> render(:show, car: car) + end + end + + def show(conn, %{"id" => id}) do + car = Cars.get_car!(id) + render(conn, :show, car: car) + end + + def update(conn, %{"id" => id, "car" => car_params}) do + car = Cars.get_car!(id) + + with {:ok, %Model{} = car} <- Cars.update_car(car, car_params) do + render(conn, :show, car: car) + end + end + + def delete(conn, %{"id" => id}) do + car = Cars.get_car!(id) + + with {:ok, %Model{}} <- Cars.delete_car(car) do + send_resp(conn, :no_content, "") + end + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_json.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_json.ex new file mode 100644 index 0000000..ceb46bd --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/car_json.ex @@ -0,0 +1,25 @@ +defmodule PhoenixSyncExampleWeb.CarJSON do + alias PhoenixSyncExample.Cars.Model + + @doc """ + Renders a list of cars. + """ + def index(%{cars: cars}) do + %{data: for(car <- cars, do: data(car))} + end + + @doc """ + Renders a single car. + """ + def show(%{car: car}) do + %{data: data(car)} + end + + defp data(%Model{} = car) do + %{ + id: car.id, + name: car.name, + cost: car.cost + } + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/changeset_json.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/changeset_json.ex new file mode 100644 index 0000000..e395233 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/changeset_json.ex @@ -0,0 +1,25 @@ +defmodule PhoenixSyncExampleWeb.ChangesetJSON do + @doc """ + Renders changeset errors. + """ + def error(%{changeset: changeset}) do + # When encoded, the changeset returns its errors + # as a JSON object. So we just pass it forward. + %{errors: Ecto.Changeset.traverse_errors(changeset, &translate_error/1)} + end + + defp translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(PhoenixSyncExampleWeb.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(PhoenixSyncExampleWeb.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_html.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_html.ex new file mode 100644 index 0000000..e382476 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_html.ex @@ -0,0 +1,24 @@ +defmodule PhoenixSyncExampleWeb.ErrorHTML do + @moduledoc """ + This module is invoked by your endpoint in case of errors on HTML requests. + + See config/config.exs. + """ + use PhoenixSyncExampleWeb, :html + + # If you want to customize your error pages, + # uncomment the embed_templates/1 call below + # and add pages to the error directory: + # + # * lib/phoenix_sync_example_web/controllers/error_html/404.html.heex + # * lib/phoenix_sync_example_web/controllers/error_html/500.html.heex + # + # embed_templates "error_html/*" + + # The default is to render a plain text page based on + # the template name. For example, "404.html" becomes + # "Not Found". + def render(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_json.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_json.ex new file mode 100644 index 0000000..c1735b4 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/error_json.ex @@ -0,0 +1,21 @@ +defmodule PhoenixSyncExampleWeb.ErrorJSON do + @moduledoc """ + This module is invoked by your endpoint in case of errors on JSON requests. + + See config/config.exs. + """ + + # If you want to customize a particular status code, + # you may add your own clauses, such as: + # + # def render("500.json", _assigns) do + # %{errors: %{detail: "Internal Server Error"}} + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.json" becomes + # "Not Found". + def render(template, _assigns) do + %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/fallback_controller.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/fallback_controller.ex new file mode 100644 index 0000000..67c1167 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/fallback_controller.ex @@ -0,0 +1,24 @@ +defmodule PhoenixSyncExampleWeb.FallbackController do + @moduledoc """ + Translates controller action results into valid `Plug.Conn` responses. + + See `Phoenix.Controller.action_fallback/1` for more details. + """ + use PhoenixSyncExampleWeb, :controller + + # This clause handles errors returned by Ecto's insert/update/delete. + def call(conn, {:error, %Ecto.Changeset{} = changeset}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(json: PhoenixSyncExampleWeb.ChangesetJSON) + |> render(:error, changeset: changeset) + end + + # This clause is an example of how to handle resources that cannot be found. + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(html: PhoenixSyncExampleWeb.ErrorHTML, json: PhoenixSyncExampleWeb.ErrorJSON) + |> render(:"404") + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_controller.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_controller.ex new file mode 100644 index 0000000..7063e06 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_controller.ex @@ -0,0 +1,9 @@ +defmodule PhoenixSyncExampleWeb.PageController do + use PhoenixSyncExampleWeb, :controller + + def home(conn, _params) do + # The home page is often custom made, + # so skip the default app layout. + render(conn, :home, layout: false) + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html.ex new file mode 100644 index 0000000..7b45bee --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html.ex @@ -0,0 +1,10 @@ +defmodule PhoenixSyncExampleWeb.PageHTML do + @moduledoc """ + This module contains pages rendered by PageController. + + See the `page_html` directory for all templates available. + """ + use PhoenixSyncExampleWeb, :html + + embed_templates "page_html/*" +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html/home.html.heex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html/home.html.heex new file mode 100644 index 0000000..798d8f7 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/controllers/page_html/home.html.heex @@ -0,0 +1,223 @@ + +<.flash_group flash={@flash} /> + +
+
+ +

+ Phoenix Framework + + v{Application.spec(:phoenix, :vsn)} + +

+

+ Peace of mind from prototype to production. +

+

+ Build rich, interactive web applications quickly, with less code and fewer moving parts. Join our growing community of developers using Phoenix to craft APIs, HTML5 apps and more, for fun or at scale. +

+ +
+
diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/endpoint.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/endpoint.ex new file mode 100644 index 0000000..f0d6be0 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/endpoint.ex @@ -0,0 +1,49 @@ +defmodule PhoenixSyncExampleWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :phoenix_sync_example + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_phoenix_sync_example_key", + signing_salt: "UrryUPvm", + same_site: "Lax" + ] + + socket "/live", Phoenix.LiveView.Socket, + websocket: [connect_info: [session: @session_options]], + longpoll: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # You should set gzip to true if you are running phx.digest + # when deploying your static files in production. + plug Plug.Static, + at: "/", + from: :phoenix_sync_example, + gzip: false, + only: PhoenixSyncExampleWeb.static_paths() + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + plug Phoenix.Ecto.CheckRepoStatus, otp_app: :phoenix_sync_example + end + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug PhoenixSyncExampleWeb.Router +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/router.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/router.ex new file mode 100644 index 0000000..9fa0895 --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/router.ex @@ -0,0 +1,29 @@ +defmodule PhoenixSyncExampleWeb.Router do + use PhoenixSyncExampleWeb, :router + + pipeline :browser do + plug :accepts, ["html"] + plug :fetch_session + plug :fetch_live_flash + plug :put_root_layout, html: {PhoenixSyncExampleWeb.Layouts, :root} + plug :protect_from_forgery + plug :put_secure_browser_headers + end + + pipeline :api do + plug :accepts, ["json"] + end + + scope "/", PhoenixSyncExampleWeb do + pipe_through :browser + + get "/", PageController, :home + end + + # Other scopes may use custom stacks. + scope "/api", PhoenixSyncExampleWeb do + pipe_through :api + + resources "/cars", CarController + end +end diff --git a/apps/phoenix_sync_example/lib/phoenix_sync_example_web/telemetry.ex b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/telemetry.ex new file mode 100644 index 0000000..24ad8ac --- /dev/null +++ b/apps/phoenix_sync_example/lib/phoenix_sync_example_web/telemetry.ex @@ -0,0 +1,92 @@ +defmodule PhoenixSyncExampleWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.start.system_time", + unit: {:native, :millisecond} + ), + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.start.system_time", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.exception.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.socket_connected.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_joined.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_handled_in.duration", + tags: [:event], + unit: {:native, :millisecond} + ), + + # Database Metrics + summary("phoenix_sync_example.repo.query.total_time", + unit: {:native, :millisecond}, + description: "The sum of the other measurements" + ), + summary("phoenix_sync_example.repo.query.decode_time", + unit: {:native, :millisecond}, + description: "The time spent decoding the data received from the database" + ), + summary("phoenix_sync_example.repo.query.query_time", + unit: {:native, :millisecond}, + description: "The time spent executing the query" + ), + summary("phoenix_sync_example.repo.query.queue_time", + unit: {:native, :millisecond}, + description: "The time spent waiting for a database connection" + ), + summary("phoenix_sync_example.repo.query.idle_time", + unit: {:native, :millisecond}, + description: + "The time the connection spent waiting before being checked out for the query" + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {PhoenixSyncExampleWeb, :count_users, []} + ] + end +end diff --git a/apps/phoenix_sync_example/mix.exs b/apps/phoenix_sync_example/mix.exs new file mode 100644 index 0000000..4932ce1 --- /dev/null +++ b/apps/phoenix_sync_example/mix.exs @@ -0,0 +1,77 @@ +defmodule PhoenixSyncExample.MixProject do + use Mix.Project + + def project do + [ + app: :phoenix_sync_example, + version: "0.1.0", + elixir: "~> 1.14", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + aliases: aliases(), + deps: deps() + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {PhoenixSyncExample.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + def cli do + [preferred_envs: [test: :test]] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + {:phoenix, "~> 1.7.19"}, + {:phoenix_ecto, "~> 4.5"}, + {:ecto_sql, "~> 3.10"}, + {:postgrex, ">= 0.0.0"}, + {:phoenix_html, "~> 4.1"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 1.0.0"}, + {:floki, ">= 0.30.0", only: :test}, + {:telemetry_metrics, "~> 1.0"}, + {:telemetry_poller, "~> 1.0"}, + {:jason, "~> 1.2"}, + {:dns_cluster, "~> 0.1.1"}, + {:bandit, "~> 1.5"}, + {:phoenix_sync, path: "../.."}, + {:electric, "~> 1.0.24"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "ecto.setup"], + "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], + "ecto.setup.quiet": [ + "ecto.create --quiet", + "ecto.migrate --quiet", + "run priv/repo/seeds.exs" + ], + "ecto.reset": ["ecto.drop", "ecto.setup"], + "ecto.reset.quiet": ["ecto.drop --quiet", "ecto.setup.quiet"], + test: ["ecto.reset.quiet", "test"] + ] + end +end diff --git a/apps/phoenix_sync_example/mix.lock b/apps/phoenix_sync_example/mix.lock new file mode 100644 index 0000000..e07b849 --- /dev/null +++ b/apps/phoenix_sync_example/mix.lock @@ -0,0 +1,53 @@ +%{ + "backoff": {:hex, :backoff, "1.1.6", "83b72ed2108ba1ee8f7d1c22e0b4a00cfe3593a67dbc792799e8cce9f42f796b", [:rebar3], [], "hexpm", "cf0cfff8995fb20562f822e5cc47d8ccf664c5ecdc26a684cbe85c225f9d7c39"}, + "bandit": {:hex, :bandit, "1.7.0", "d1564f30553c97d3e25f9623144bb8df11f3787a26733f00b21699a128105c0c", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "3e2f7a98c7a11f48d9d8c037f7177cd39778e74d55c7af06fe6227c742a8168a"}, + "castore": {:hex, :castore, "1.0.14", "4582dd7d630b48cf5e1ca8d3d42494db51e406b7ba704e81fbd401866366896a", [:mix], [], "hexpm", "7bc1b65249d31701393edaaac18ec8398d8974d52c647b7904d01b964137b9f4"}, + "combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"}, + "db_connection": {:hex, :db_connection, "2.8.0", "64fd82cfa6d8e25ec6660cea73e92a4cbc6a18b31343910427b702838c4b33b2", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "008399dae5eee1bf5caa6e86d204dcb44242c82b1ed5e22c881f2c34da201b15"}, + "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, + "dns_cluster": {:hex, :dns_cluster, "0.1.3", "0bc20a2c88ed6cc494f2964075c359f8c2d00e1bf25518a6a6c7fd277c9b0c66", [:mix], [], "hexpm", "46cb7c4a1b3e52c7ad4cbe33ca5079fbde4840dedeafca2baf77996c2da1bc33"}, + "dotenvy": {:hex, :dotenvy, "1.1.0", "316aee89c11a4ec8be3d74a69d17d17ea2e21e633e0cac9f155cf420e237ccb4", [:mix], [], "hexpm", "0519bda67fdfa1c22279c2654b2f292485f0caae7360fe29205f74f28a93df18"}, + "ecto": {:hex, :ecto, "3.13.2", "7d0c0863f3fc8d71d17fc3ad3b9424beae13f02712ad84191a826c7169484f01", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "669d9291370513ff56e7b7e7081b7af3283d02e046cf3d403053c557894a0b3e"}, + "ecto_sql": {:hex, :ecto_sql, "3.13.2", "a07d2461d84107b3d037097c822ffdd36ed69d1cf7c0f70e12a3d1decf04e2e1", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "539274ab0ecf1a0078a6a72ef3465629e4d6018a3028095dc90f60a19c371717"}, + "electric": {:hex, :electric, "1.0.24", "f17ee7971390cf710a731a349456f6da43750fbc6582d62793c8702c636ab203", [:mix], [{:backoff, "~> 1.1", [hex: :backoff, repo: "hexpm", optional: false]}, {:bandit, "~> 1.6", [hex: :bandit, repo: "hexpm", optional: false]}, {:dotenvy, "~> 1.1", [hex: :dotenvy, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:electric_cubdb, "~> 2.0", [hex: :electric_cubdb, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.5", [hex: :opentelemetry, repo: "hexpm", optional: true]}, {:opentelemetry_exporter, "~> 1.8", [hex: :opentelemetry_exporter, repo: "hexpm", optional: true]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_metric_exporter, "~> 0.3.9", [hex: :otel_metric_exporter, repo: "hexpm", optional: true]}, {:pg_query_ex, "0.7.0", [hex: :pg_query_ex, repo: "hexpm", optional: false]}, {:plug, "~> 1.17", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.20", [hex: :postgrex, repo: "hexpm", optional: false]}, {:remote_ip, "~> 1.2", [hex: :remote_ip, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:retry, "~> 0.19", [hex: :retry, repo: "hexpm", optional: false]}, {:sentry, "~> 10.9", [hex: :sentry, repo: "hexpm", optional: true]}, {:stream_split, "~> 0.1", [hex: :stream_split, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.1", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: true]}, {:telemetry_metrics_statsd, "~> 0.7", [hex: :telemetry_metrics_statsd, repo: "hexpm", optional: true]}, {:telemetry_poller, "~> 1.2", [hex: :telemetry_poller, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.27", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}, {:tz, "~> 0.28", [hex: :tz, repo: "hexpm", optional: false]}], "hexpm", "91e3a8b957c1e02d07da3a0e1e902420f32e1d7d5da25814475175517698fb61"}, + "electric_client": {:hex, :electric_client, "0.6.4", "a582b5df5aa6c94296e4d11c98431114f21136766c7f336f14cb1dabd44800d5", [:mix], [{:ecto_sql, "~> 3.12", [hex: :ecto_sql, repo: "hexpm", optional: true]}, {:electric, "~> 1.0.6", [hex: :electric, repo: "hexpm", optional: true]}, {:gen_stage, "~> 1.2", [hex: :gen_stage, repo: "hexpm", optional: true]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}], "hexpm", "911768e0740cbe738e815c0ab9e378682ea4f800aa87442424f841d0b2c06fe9"}, + "electric_cubdb": {:hex, :electric_cubdb, "2.0.2", "36f86e3c52dc26f4e077a49fbef813b1a38d3897421cece851f149190b34c16c", [:mix], [], "hexpm", "0c0e24b31fb76ad1b33c5de2ab35c41a4ff9da153f5c1f9b15e2de78575acaf2"}, + "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, + "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, + "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"}, + "floki": {:hex, :floki, "0.38.0", "62b642386fa3f2f90713f6e231da0fa3256e41ef1089f83b6ceac7a3fd3abf33", [:mix], [], "hexpm", "a5943ee91e93fb2d635b612caf5508e36d37548e84928463ef9dd986f0d1abd9"}, + "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"}, + "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, + "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, + "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.0", "63ca1742f92f00059298f478048dfb826f4b20d49534493d6919a0db39b6db04", [:mix, :rebar3], [], "hexpm", "3dfbbfaa2c2ed3121c5c483162836c4f9027def469c41578af5ef32589fcfc58"}, + "opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "1.27.0", "acd0194a94a1e57d63da982ee9f4a9f88834ae0b31b0bd850815fe9be4bbb45f", [:mix, :rebar3], [], "hexpm", "9681ccaa24fd3d810b4461581717661fd85ff7019b082c2dff89c7d5b1fc2864"}, + "opentelemetry_telemetry": {:hex, :opentelemetry_telemetry, "1.1.2", "410ab4d76b0921f42dbccbe5a7c831b8125282850be649ee1f70050d3961118a", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.3", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "641ab469deb181957ac6d59bce6e1321d5fe2a56df444fc9c19afcad623ab253"}, + "pg_query_ex": {:hex, :pg_query_ex, "0.7.0", "189f0c0d2b6fce78def670f3cba411baa9311a099bcd0cdb0501adcfede37677", [:make, :mix], [{:elixir_make, "~> 0.4", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:protox, "~> 1.7", [hex: :protox, repo: "hexpm", optional: false]}], "hexpm", "c39cb58690fa8f19cdd1939c41c5906b65f1e70351ea4a45a9da680ca3ad8c66"}, + "phoenix": {:hex, :phoenix, "1.7.21", "14ca4f1071a5f65121217d6b57ac5712d1857e40a0833aff7a691b7870fc9a3b", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "336dce4f86cba56fed312a7d280bf2282c720abb6074bdb1b61ec8095bdd0bc9"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.5", "c4ef322acd15a574a8b1a08eff0ee0a85e73096b53ce1403b6563709f15e1cea", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "26ec3208eef407f31b748cadd044045c6fd485fbff168e35963d2f9dfff28d4b"}, + "phoenix_html": {:hex, :phoenix_html, "4.2.1", "35279e2a39140068fc03f8874408d58eef734e488fc142153f055c5454fd1c08", [:mix], [], "hexpm", "cff108100ae2715dd959ae8f2a8cef8e20b593f8dfd031c9cba92702cf23e053"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.6.0", "2791fac0e2776b640192308cc90c0dbcf67843ad51387ed4ecae2038263d708d", [:mix], [{:file_system, "~> 0.2.10 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b3a1fa036d7eb2f956774eda7a7638cf5123f8f2175aca6d6420a7f95e598e1c"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.0.17", "beeb16d83a7d3760f7ad463df94e83b087577665d2acc0bf2987cd7d9778068f", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a4ca05c1eb6922c4d07a508a75bfa12c45e5f4d8f77ae83283465f02c53741e1"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"}, + "postgrex": {:hex, :postgrex, "0.21.1", "2c5cc830ec11e7a0067dd4d623c049b3ef807e9507a424985b8dcf921224cd88", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "27d8d21c103c3cc68851b533ff99eef353e6a0ff98dc444ea751de43eb48bdac"}, + "protox": {:hex, :protox, "1.7.8", "ccae41afec6e63cf061bee874d7d042ed585d501df1cd004661ffac0e5628686", [:mix], [{:decimal, "~> 1.9 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}, {:poison, "~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}], "hexpm", "f6702c9deb9fb7cd2eadd73d3dbc0303c506dc87635e509228c61309f7062933"}, + "remote_ip": {:hex, :remote_ip, "1.2.0", "fb078e12a44414f4cef5a75963c33008fe169b806572ccd17257c208a7bc760f", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "2ff91de19c48149ce19ed230a81d377186e4412552a597d6a5137373e5877cb7"}, + "req": {:hex, :req, "0.5.15", "662020efb6ea60b9f0e0fac9be88cd7558b53fe51155a2d9899de594f9906ba9", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "a6513a35fad65467893ced9785457e91693352c70b58bbc045b47e5eb2ef0c53"}, + "retry": {:hex, :retry, "0.19.0", "aeb326d87f62295d950f41e1255fe6f43280a1b390d36e280b7c9b00601ccbc2", [:mix], [], "hexpm", "85ef376aa60007e7bff565c366310966ec1bd38078765a0e7f20ec8a220d02ca"}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, + "stream_split": {:hex, :stream_split, "0.1.7", "2d3fd1fd21697da7f91926768d65f79409086052c9ec7ae593987388f52425f8", [:mix], [], "hexpm", "1dc072ff507a64404a0ad7af90df97096183fee8eeac7b300320cea7c4679147"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"}, + "thousand_island": {:hex, :thousand_island, "1.3.14", "ad45ebed2577b5437582bcc79c5eccd1e2a8c326abf6a3464ab6c06e2055a34a", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "d0d24a929d31cdd1d7903a4fe7f2409afeedff092d277be604966cd6aa4307ef"}, + "tls_certificate_check": {:hex, :tls_certificate_check, "1.28.0", "c39bf21f67c2d124ae905454fad00f27e625917e8ab1009146e916e1df6ab275", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3ab058c3f9457fffca916729587415f0ddc822048a0e5b5e2694918556d92df1"}, + "tz": {:hex, :tz, "0.28.1", "717f5ffddfd1e475e2a233e221dc0b4b76c35c4b3650b060c8e3ba29dd6632e9", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:mint, "~> 1.6", [hex: :mint, repo: "hexpm", optional: true]}], "hexpm", "bfdca1aa1902643c6c43b77c1fb0cb3d744fd2f09a8a98405468afdee0848c8a"}, + "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, +} diff --git a/apps/phoenix_sync_example/priv/repo/migrations/.formatter.exs b/apps/phoenix_sync_example/priv/repo/migrations/.formatter.exs new file mode 100644 index 0000000..49f9151 --- /dev/null +++ b/apps/phoenix_sync_example/priv/repo/migrations/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto_sql], + inputs: ["*.exs"] +] diff --git a/apps/phoenix_sync_example/priv/repo/migrations/20250804095639_create_cars.exs b/apps/phoenix_sync_example/priv/repo/migrations/20250804095639_create_cars.exs new file mode 100644 index 0000000..4f0f88e --- /dev/null +++ b/apps/phoenix_sync_example/priv/repo/migrations/20250804095639_create_cars.exs @@ -0,0 +1,22 @@ +defmodule PhoenixSyncExample.Repo.Migrations.CreateCars do + use Ecto.Migration + + def change do + create table(:makes, primary_key: false) do + add(:id, :binary_id, primary_key: true) + add(:name, :string) + + timestamps(type: :utc_datetime) + end + + create table(:models, primary_key: false) do + add(:id, :binary_id, primary_key: true) + add(:name, :string) + add(:cost, :integer) + + add(:make_id, references(:makes, type: :binary_id, on_delete: :delete_all), null: false) + + timestamps(type: :utc_datetime) + end + end +end diff --git a/apps/phoenix_sync_example/priv/repo/seeds.exs b/apps/phoenix_sync_example/priv/repo/seeds.exs new file mode 100644 index 0000000..75101ef --- /dev/null +++ b/apps/phoenix_sync_example/priv/repo/seeds.exs @@ -0,0 +1,16 @@ +# Script for populating the database. You can run it as: +# +# mix run priv/repo/seeds.exs +# +# Inside the script, you can read and write to any of your +# repositories directly: +# +# PhoenixSyncExample.Repo.insert!(%PhoenixSyncExample.SomeSchema{}) +# +# We recommend using the bang functions (`insert!`, `update!` +# and so on) as they will fail if something goes wrong. + +alias PhoenixSyncExample.Cars.Make +alias PhoenixSyncExample.Repo + +_ford = Repo.insert!(%Make{id: "52e372d3-cb45-401c-8d2a-6e898e99cea3", name: "Ford"}) diff --git a/apps/phoenix_sync_example/priv/static/assets/app.css b/apps/phoenix_sync_example/priv/static/assets/app.css new file mode 100644 index 0000000..35d6918 --- /dev/null +++ b/apps/phoenix_sync_example/priv/static/assets/app.css @@ -0,0 +1 @@ +/* This file is for your main application CSS */ diff --git a/apps/phoenix_sync_example/priv/static/assets/app.js b/apps/phoenix_sync_example/priv/static/assets/app.js new file mode 100644 index 0000000..4d5caaa --- /dev/null +++ b/apps/phoenix_sync_example/priv/static/assets/app.js @@ -0,0 +1,11 @@ +// For Phoenix.HTML support, including form and button helpers +// copy the following scripts into your javascript bundle: +// * deps/phoenix_html/priv/static/phoenix_html.js + +// For Phoenix.Channels support, copy the following scripts +// into your javascript bundle: +// * deps/phoenix/priv/static/phoenix.js + +// For Phoenix.LiveView support, copy the following scripts +// into your javascript bundle: +// * deps/phoenix_live_view/priv/static/phoenix_live_view.js diff --git a/apps/phoenix_sync_example/priv/static/assets/home.css b/apps/phoenix_sync_example/priv/static/assets/home.css new file mode 100644 index 0000000..7109c21 --- /dev/null +++ b/apps/phoenix_sync_example/priv/static/assets/home.css @@ -0,0 +1,1054 @@ +/* Default styling for the home page, this file can be deleted safely */ + +/* +! tailwindcss v3.2.4 | MIT License | https://tailwindcss.com +*/ + +/* +1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4) +2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116) +*/ + +*, +::before, +::after { + box-sizing: border-box; + /* 1 */ + border-width: 0; + /* 2 */ + border-style: solid; + /* 2 */ + border-color: #e5e7eb; + /* 2 */ +} + +::before, +::after { + --tw-content: ''; +} + +/* +1. Use a consistent sensible line-height in all browsers. +2. Prevent adjustments of font size after orientation changes in iOS. +3. Use a more readable tab size. +4. Use the user's configured `sans` font-family by default. +5. Use the user's configured `sans` font-feature-settings by default. +*/ + +html { + line-height: 1.5; + /* 1 */ + -webkit-text-size-adjust: 100%; + /* 2 */ + -moz-tab-size: 4; + /* 3 */ + -o-tab-size: 4; + tab-size: 4; + /* 3 */ + font-family: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + /* 4 */ + font-feature-settings: normal; + /* 5 */ +} + +/* +1. Remove the margin in all browsers. +2. Inherit line-height from `html` so users can set them as a class directly on the `html` element. +*/ + +body { + margin: 0; + /* 1 */ + line-height: inherit; + /* 2 */ +} + +/* +1. Add the correct height in Firefox. +2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655) +3. Ensure horizontal rules are visible by default. +*/ + +hr { + height: 0; + /* 1 */ + color: inherit; + /* 2 */ + border-top-width: 1px; + /* 3 */ +} + +/* +Add the correct text decoration in Chrome, Edge, and Safari. +*/ + +abbr:where([title]) { + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; +} + +/* +Remove the default font size and weight for headings. +*/ + +h1, +h2, +h3, +h4, +h5, +h6 { + font-size: inherit; + font-weight: inherit; +} + +/* +Reset links to optimize for opt-in styling instead of opt-out. +*/ + +a { + color: inherit; + text-decoration: inherit; +} + +/* +Add the correct font weight in Edge and Safari. +*/ + +b, +strong { + font-weight: bolder; +} + +/* +1. Use the user's configured `mono` font family by default. +2. Correct the odd `em` font sizing in all browsers. +*/ + +code, +kbd, +samp, +pre { + font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + /* 1 */ + font-size: 1em; + /* 2 */ +} + +/* +Add the correct font size in all browsers. +*/ + +small { + font-size: 80%; +} + +/* +Prevent `sub` and `sup` elements from affecting the line height in all browsers. +*/ + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} + +sub { + bottom: -0.25em; +} + +sup { + top: -0.5em; +} + +/* +1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297) +2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016) +3. Remove gaps between table borders by default. +*/ + +table { + text-indent: 0; + /* 1 */ + border-color: inherit; + /* 2 */ + border-collapse: collapse; + /* 3 */ +} + +/* +1. Change the font styles in all browsers. +2. Remove the margin in Firefox and Safari. +3. Remove default padding in all browsers. +*/ + +button, +input, +optgroup, +select, +textarea { + font-family: inherit; + /* 1 */ + font-size: 100%; + /* 1 */ + font-weight: inherit; + /* 1 */ + line-height: inherit; + /* 1 */ + color: inherit; + /* 1 */ + margin: 0; + /* 2 */ + padding: 0; + /* 3 */ +} + +/* +Remove the inheritance of text transform in Edge and Firefox. +*/ + +button, +select { + text-transform: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Remove default button styles. +*/ + +button, +[type='button'], +[type='reset'], +[type='submit'] { + -webkit-appearance: button; + /* 1 */ + background-color: transparent; + /* 2 */ + background-image: none; + /* 2 */ +} + +/* +Use the modern Firefox focus style for all focusable elements. +*/ + +:-moz-focusring { + outline: auto; +} + +/* +Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737) +*/ + +:-moz-ui-invalid { + box-shadow: none; +} + +/* +Add the correct vertical alignment in Chrome and Firefox. +*/ + +progress { + vertical-align: baseline; +} + +/* +Correct the cursor style of increment and decrement buttons in Safari. +*/ + +::-webkit-inner-spin-button, +::-webkit-outer-spin-button { + height: auto; +} + +/* +1. Correct the odd appearance in Chrome and Safari. +2. Correct the outline style in Safari. +*/ + +[type='search'] { + -webkit-appearance: textfield; + /* 1 */ + outline-offset: -2px; + /* 2 */ +} + +/* +Remove the inner padding in Chrome and Safari on macOS. +*/ + +::-webkit-search-decoration { + -webkit-appearance: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Change font properties to `inherit` in Safari. +*/ + +::-webkit-file-upload-button { + -webkit-appearance: button; + /* 1 */ + font: inherit; + /* 2 */ +} + +/* +Add the correct display in Chrome and Safari. +*/ + +summary { + display: list-item; +} + +/* +Removes the default spacing and border for appropriate elements. +*/ + +blockquote, +dl, +dd, +h1, +h2, +h3, +h4, +h5, +h6, +hr, +figure, +p, +pre { + margin: 0; +} + +fieldset { + margin: 0; + padding: 0; +} + +legend { + padding: 0; +} + +ol, +ul, +menu { + list-style: none; + margin: 0; + padding: 0; +} + +/* +Prevent resizing textareas horizontally by default. +*/ + +textarea { + resize: vertical; +} + +/* +1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300) +2. Set the default placeholder color to the user's configured gray 400 color. +*/ + +input::-moz-placeholder, textarea::-moz-placeholder { + opacity: 1; + /* 1 */ + color: #9ca3af; + /* 2 */ +} + +input::placeholder, +textarea::placeholder { + opacity: 1; + /* 1 */ + color: #9ca3af; + /* 2 */ +} + +/* +Set the default cursor for buttons. +*/ + +button, +[role="button"] { + cursor: pointer; +} + +/* +Make sure disabled buttons don't get the pointer cursor. +*/ + +:disabled { + cursor: default; +} + +/* +1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14) +2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210) + This can trigger a poorly considered lint error in some tools but is included by design. +*/ + +img, +svg, +video, +canvas, +audio, +iframe, +embed, +object { + display: block; + /* 1 */ + vertical-align: middle; + /* 2 */ +} + +/* +Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14) +*/ + +img, +video { + max-width: 100%; + height: auto; +} + +/* Make elements with the HTML hidden attribute stay hidden by default */ + +[hidden] { + display: none; +} + +[type='text'],[type='email'],[type='url'],[type='password'],[type='number'],[type='date'],[type='datetime-local'],[type='month'],[type='search'],[type='tel'],[type='time'],[type='week'],[multiple],textarea,select { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background-color: #fff; + border-color: #6b7280; + border-width: 1px; + border-radius: 0px; + padding-top: 0.5rem; + padding-right: 0.75rem; + padding-bottom: 0.5rem; + padding-left: 0.75rem; + font-size: 1rem; + line-height: 1.5rem; + --tw-shadow: 0 0 #0000; +} + +[type='text']:focus, [type='email']:focus, [type='url']:focus, [type='password']:focus, [type='number']:focus, [type='date']:focus, [type='datetime-local']:focus, [type='month']:focus, [type='search']:focus, [type='tel']:focus, [type='time']:focus, [type='week']:focus, [multiple]:focus, textarea:focus, select:focus { + outline: 2px solid transparent; + outline-offset: 2px; + --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: #2563eb; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); + border-color: #2563eb; +} + +input::-moz-placeholder, textarea::-moz-placeholder { + color: #6b7280; + opacity: 1; +} + +input::placeholder,textarea::placeholder { + color: #6b7280; + opacity: 1; +} + +::-webkit-datetime-edit-fields-wrapper { + padding: 0; +} + +::-webkit-date-and-time-value { + min-height: 1.5em; +} + +::-webkit-datetime-edit,::-webkit-datetime-edit-year-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute-field,::-webkit-datetime-edit-second-field,::-webkit-datetime-edit-millisecond-field,::-webkit-datetime-edit-meridiem-field { + padding-top: 0; + padding-bottom: 0; +} + +select { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e"); + background-position: right 0.5rem center; + background-repeat: no-repeat; + background-size: 1.5em 1.5em; + padding-right: 2.5rem; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; +} + +[multiple] { + background-image: initial; + background-position: initial; + background-repeat: unset; + background-size: initial; + padding-right: 0.75rem; + -webkit-print-color-adjust: unset; + print-color-adjust: unset; +} + +[type='checkbox'],[type='radio'] { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + padding: 0; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; + display: inline-block; + vertical-align: middle; + background-origin: border-box; + -webkit-user-select: none; + -moz-user-select: none; + user-select: none; + flex-shrink: 0; + height: 1rem; + width: 1rem; + color: #2563eb; + background-color: #fff; + border-color: #6b7280; + border-width: 1px; + --tw-shadow: 0 0 #0000; +} + +[type='checkbox'] { + border-radius: 0px; +} + +[type='radio'] { + border-radius: 100%; +} + +[type='checkbox']:focus,[type='radio']:focus { + outline: 2px solid transparent; + outline-offset: 2px; + --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); + --tw-ring-offset-width: 2px; + --tw-ring-offset-color: #fff; + --tw-ring-color: #2563eb; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); +} + +[type='checkbox']:checked,[type='radio']:checked { + border-color: transparent; + background-color: currentColor; + background-size: 100% 100%; + background-position: center; + background-repeat: no-repeat; +} + +[type='checkbox']:checked { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); +} + +[type='radio']:checked { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); +} + +[type='checkbox']:checked:hover,[type='checkbox']:checked:focus,[type='radio']:checked:hover,[type='radio']:checked:focus { + border-color: transparent; + background-color: currentColor; +} + +[type='checkbox']:indeterminate { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3e%3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3e%3c/svg%3e"); + border-color: transparent; + background-color: currentColor; + background-size: 100% 100%; + background-position: center; + background-repeat: no-repeat; +} + +[type='checkbox']:indeterminate:hover,[type='checkbox']:indeterminate:focus { + border-color: transparent; + background-color: currentColor; +} + +[type='file'] { + background: unset; + border-color: inherit; + border-width: 0; + border-radius: 0; + padding: 0; + font-size: unset; + line-height: inherit; +} + +[type='file']:focus { + outline: 1px solid ButtonText; + outline: 1px auto -webkit-focus-ring-color; +} + +*, ::before, ::after { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-scroll-snap-strictness: proximity; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(59 130 246 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; +} + +::backdrop { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-scroll-snap-strictness: proximity; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(59 130 246 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; +} + +.static { + position: static; +} + +.fixed { + position: fixed; +} + +.absolute { + position: absolute; +} + +.relative { + position: relative; +} + +.inset-0 { + top: 0px; + right: 0px; + bottom: 0px; + left: 0px; +} + +.inset-y-0 { + top: 0px; + bottom: 0px; +} + +.right-0 { + right: 0px; +} + +.left-\[40rem\] { + left: 40rem; +} + +.z-0 { + z-index: 0; +} + +.mx-auto { + margin-left: auto; + margin-right: auto; +} + +.-mx-2 { + margin-left: -0.5rem; + margin-right: -0.5rem; +} + +.-my-0\.5 { + margin-top: -0.125rem; + margin-bottom: -0.125rem; +} + +.-my-0 { + margin-top: -0px; + margin-bottom: -0px; +} + +.mt-10 { + margin-top: 2.5rem; +} + +.ml-3 { + margin-left: 0.75rem; +} + +.mt-4 { + margin-top: 1rem; +} + +.flex { + display: flex; +} + +.inline-flex { + display: inline-flex; +} + +.grid { + display: grid; +} + +.contents { + display: contents; +} + +.hidden { + display: none; +} + +.h-6 { + height: 1.5rem; +} + +.h-full { + height: 100%; +} + +.h-12 { + height: 3rem; +} + +.h-4 { + height: 1rem; +} + +.w-full { + width: 100%; +} + +.w-6 { + width: 1.5rem; +} + +.w-4 { + width: 1rem; +} + +.max-w-2xl { + max-width: 42rem; +} + +.max-w-xl { + max-width: 36rem; +} + +.grid-cols-1 { + grid-template-columns: repeat(1, minmax(0, 1fr)); +} + +.items-center { + align-items: center; +} + +.justify-between { + justify-content: space-between; +} + +.gap-4 { + gap: 1rem; +} + +.gap-3 { + gap: 0.75rem; +} + +.gap-x-6 { + -moz-column-gap: 1.5rem; + column-gap: 1.5rem; +} + +.gap-y-4 { + row-gap: 1rem; +} + +.rounded-full { + border-radius: 9999px; +} + +.rounded-lg { + border-radius: 0.5rem; +} + +.rounded-2xl { + border-radius: 1rem; +} + +.border-b { + border-bottom-width: 1px; +} + +.border-zinc-100 { + --tw-border-opacity: 1; + border-color: rgb(244 244 245 / var(--tw-border-opacity)); +} + +.bg-brand\/5 { + background-color: rgb(253 79 0 / 0.05); +} + +.bg-zinc-100 { + --tw-bg-opacity: 1; + background-color: rgb(244 244 245 / var(--tw-bg-opacity)); +} + +.bg-white { + --tw-bg-opacity: 1; + background-color: rgb(255 255 255 / var(--tw-bg-opacity)); +} + +.bg-zinc-50 { + --tw-bg-opacity: 1; + background-color: rgb(250 250 250 / var(--tw-bg-opacity)); +} + +.fill-zinc-400 { + fill: #a1a1aa; +} + +.px-4 { + padding-left: 1rem; + padding-right: 1rem; +} + +.py-3 { + padding-top: 0.75rem; + padding-bottom: 0.75rem; +} + +.px-2 { + padding-left: 0.5rem; + padding-right: 0.5rem; +} + +.py-1 { + padding-top: 0.25rem; + padding-bottom: 0.25rem; +} + +.py-20 { + padding-top: 5rem; + padding-bottom: 5rem; +} + +.py-10 { + padding-top: 2.5rem; + padding-bottom: 2.5rem; +} + +.px-6 { + padding-left: 1.5rem; + padding-right: 1.5rem; +} + +.py-4 { + padding-top: 1rem; + padding-bottom: 1rem; +} + +.py-0\.5 { + padding-top: 0.125rem; + padding-bottom: 0.125rem; +} + +.py-0 { + padding-top: 0px; + padding-bottom: 0px; +} + +.text-\[0\.8125rem\] { + font-size: 0.8125rem; +} + +.text-sm { + font-size: 0.875rem; + line-height: 1.25rem; +} + +.text-\[2rem\] { + font-size: 2rem; +} + +.text-base { + font-size: 1rem; + line-height: 1.5rem; +} + +.font-medium { + font-weight: 500; +} + +.font-semibold { + font-weight: 600; +} + +.leading-6 { + line-height: 1.5rem; +} + +.leading-10 { + line-height: 2.5rem; +} + +.leading-7 { + line-height: 1.75rem; +} + +.tracking-tighter { + letter-spacing: -0.05em; +} + +.text-brand { + --tw-text-opacity: 1; + color: rgb(253 79 0 / var(--tw-text-opacity)); +} + +.text-zinc-900 { + --tw-text-opacity: 1; + color: rgb(24 24 27 / var(--tw-text-opacity)); +} + +.text-zinc-600 { + --tw-text-opacity: 1; + color: rgb(82 82 91 / var(--tw-text-opacity)); +} + +.text-zinc-700 { + --tw-text-opacity: 1; + color: rgb(63 63 70 / var(--tw-text-opacity)); +} + +.transition { + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, -webkit-backdrop-filter; + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter; + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter, -webkit-backdrop-filter; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 150ms; +} + +/* This file is for your main application CSS */ + +.hover\:bg-zinc-200\/80:hover { + background-color: rgb(228 228 231 / 0.8); +} + +.hover\:bg-zinc-50:hover { + --tw-bg-opacity: 1; + background-color: rgb(250 250 250 / var(--tw-bg-opacity)); +} + +.hover\:text-zinc-700:hover { + --tw-text-opacity: 1; + color: rgb(63 63 70 / var(--tw-text-opacity)); +} + +.hover\:text-zinc-900:hover { + --tw-text-opacity: 1; + color: rgb(24 24 27 / var(--tw-text-opacity)); +} + +.active\:text-zinc-900\/70:active { + color: rgb(24 24 27 / 0.7); +} + +.group:hover .group-hover\:bg-zinc-100 { + --tw-bg-opacity: 1; + background-color: rgb(244 244 245 / var(--tw-bg-opacity)); +} + +.group:hover .group-hover\:fill-zinc-600 { + fill: #52525b; +} + +@media (min-width: 640px) { + .sm\:w-auto { + width: auto; + } + + .sm\:grid-cols-3 { + grid-template-columns: repeat(3, minmax(0, 1fr)); + } + + .sm\:grid-cols-2 { + grid-template-columns: repeat(2, minmax(0, 1fr)); + } + + .sm\:flex-col { + flex-direction: column; + } + + .sm\:px-6 { + padding-left: 1.5rem; + padding-right: 1.5rem; + } + + .sm\:py-28 { + padding-top: 7rem; + padding-bottom: 7rem; + } + + .sm\:py-6 { + padding-top: 1.5rem; + padding-bottom: 1.5rem; + } + + .group:hover .sm\:group-hover\:scale-105 { + --tw-scale-x: 1.05; + --tw-scale-y: 1.05; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); + } +} + +@media (min-width: 1024px) { + .lg\:mx-0 { + margin-left: 0px; + margin-right: 0px; + } + + .lg\:block { + display: block; + } + + .lg\:px-8 { + padding-left: 2rem; + padding-right: 2rem; + } +} + +@media (min-width: 1280px) { + .xl\:left-\[50rem\] { + left: 50rem; + } + + .xl\:py-32 { + padding-top: 8rem; + padding-bottom: 8rem; + } + + .xl\:px-28 { + padding-left: 7rem; + padding-right: 7rem; + } +} diff --git a/apps/phoenix_sync_example/priv/static/favicon.ico b/apps/phoenix_sync_example/priv/static/favicon.ico new file mode 100644 index 0000000..7f372bf Binary files /dev/null and b/apps/phoenix_sync_example/priv/static/favicon.ico differ diff --git a/apps/phoenix_sync_example/priv/static/images/logo.svg b/apps/phoenix_sync_example/priv/static/images/logo.svg new file mode 100644 index 0000000..9f26bab --- /dev/null +++ b/apps/phoenix_sync_example/priv/static/images/logo.svg @@ -0,0 +1,6 @@ + diff --git a/apps/phoenix_sync_example/priv/static/robots.txt b/apps/phoenix_sync_example/priv/static/robots.txt new file mode 100644 index 0000000..26e06b5 --- /dev/null +++ b/apps/phoenix_sync_example/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/apps/phoenix_sync_example/test/phoenix_sync_example/cars_test.exs b/apps/phoenix_sync_example/test/phoenix_sync_example/cars_test.exs new file mode 100644 index 0000000..b821eb2 --- /dev/null +++ b/apps/phoenix_sync_example/test/phoenix_sync_example/cars_test.exs @@ -0,0 +1,85 @@ +defmodule PhoenixSyncExample.CarsTest do + use PhoenixSyncExample.DataCase + + alias PhoenixSyncExample.Cars + alias PhoenixSyncExample.Cars.Model + + import PhoenixSyncExample.CarsFixtures + + describe "cars" do + @invalid_attrs %{name: nil, cost: nil} + + test "list_cars/0 returns all cars" do + car = car_fixture() + assert Cars.list_cars() == [car] + end + + test "get_car!/1 returns the car with given id" do + car = car_fixture() + assert Cars.get_car!(car.id) == car + end + + test "create_car/1 with valid data creates a car" do + make = make_fixture() + valid_attrs = %{name: "some name", cost: 42, make_id: make.id} + + assert {:ok, %Model{} = car} = Cars.create_car(valid_attrs) + assert car.name == "some name" + assert car.cost == 42 + end + + test "create_car/1 with invalid data returns error changeset" do + assert {:error, %Ecto.Changeset{}} = Cars.create_car(@invalid_attrs) + end + + test "update_car/2 with valid data updates the car" do + car = car_fixture() + update_attrs = %{name: "some updated name", cost: 43} + + assert {:ok, %Model{} = car} = Cars.update_car(car, update_attrs) + assert car.name == "some updated name" + assert car.cost == 43 + end + + test "update_car/2 with invalid data returns error changeset" do + car = car_fixture() + assert {:error, %Ecto.Changeset{}} = Cars.update_car(car, @invalid_attrs) + assert car == Cars.get_car!(car.id) + end + + test "delete_car/1 deletes the car" do + car = car_fixture() + assert {:ok, %Model{}} = Cars.delete_car(car) + assert_raise Ecto.NoResultsError, fn -> Cars.get_car!(car.id) end + end + + test "change_car/1 returns a car changeset" do + car = car_fixture() + assert %Ecto.Changeset{} = Cars.change_car(car) + end + end + + describe "sync" do + test "works with the sandbox" do + parent = self() + client = Phoenix.Sync.client!() + + start_supervised!( + {Task, + fn -> + for msg <- Electric.Client.stream(client, Model, replica: :full), + do: send(parent, {:change, msg}) + end} + ) + + %{id: id} = car_fixture(%{name: "Bunny"}) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Model{id: ^id, name: "Bunny"}, + headers: %{operation: :insert} + }}, + 1000 + end + end +end diff --git a/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/car_controller_test.exs b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/car_controller_test.exs new file mode 100644 index 0000000..deb1d7e --- /dev/null +++ b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/car_controller_test.exs @@ -0,0 +1,90 @@ +defmodule PhoenixSyncExampleWeb.CarControllerTest do + use PhoenixSyncExampleWeb.ConnCase + + import PhoenixSyncExample.CarsFixtures + + alias PhoenixSyncExample.Cars.Model + + @create_attrs %{ + name: "some name", + cost: 42, + # see priv/repo/seeds.exs for the make_id + make_id: "52e372d3-cb45-401c-8d2a-6e898e99cea3" + } + @update_attrs %{ + name: "some updated name", + cost: 43 + } + @invalid_attrs %{name: nil, cost: nil} + + setup %{conn: conn} do + {:ok, conn: put_req_header(conn, "accept", "application/json")} + end + + describe "index" do + test "lists all cars", %{conn: conn} do + conn = get(conn, ~p"/api/cars") + assert json_response(conn, 200)["data"] == [] + end + end + + describe "create car" do + test "renders car when data is valid", %{conn: conn} do + conn = post(conn, ~p"/api/cars", car: @create_attrs) + assert %{"id" => id} = json_response(conn, 201)["data"] + + conn = get(conn, ~p"/api/cars/#{id}") + + assert %{ + "id" => ^id, + "cost" => 42, + "name" => "some name" + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn} do + conn = post(conn, ~p"/api/cars", car: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "update car" do + setup [:create_car] + + test "renders car when data is valid", %{conn: conn, car: %Model{id: id} = car} do + conn = put(conn, ~p"/api/cars/#{car}", car: @update_attrs) + assert %{"id" => ^id} = json_response(conn, 200)["data"] + + conn = get(conn, ~p"/api/cars/#{id}") + + assert %{ + "id" => ^id, + "cost" => 43, + "name" => "some updated name" + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn, car: car} do + conn = put(conn, ~p"/api/cars/#{car}", car: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "delete car" do + setup [:create_car] + + test "deletes chosen car", %{conn: conn, car: car} do + conn = delete(conn, ~p"/api/cars/#{car}") + assert response(conn, 204) + + assert_error_sent 404, fn -> + get(conn, ~p"/api/cars/#{car}") + end + end + end + + defp create_car(_) do + car = car_fixture() + %{car: car} + end +end diff --git a/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_html_test.exs b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_html_test.exs new file mode 100644 index 0000000..8a8e445 --- /dev/null +++ b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_html_test.exs @@ -0,0 +1,14 @@ +defmodule PhoenixSyncExampleWeb.ErrorHTMLTest do + use PhoenixSyncExampleWeb.ConnCase, async: true + + # Bring render_to_string/4 for testing custom views + import Phoenix.Template + + test "renders 404.html" do + assert render_to_string(PhoenixSyncExampleWeb.ErrorHTML, "404", "html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(PhoenixSyncExampleWeb.ErrorHTML, "500", "html", []) == "Internal Server Error" + end +end diff --git a/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_json_test.exs b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_json_test.exs new file mode 100644 index 0000000..97067c8 --- /dev/null +++ b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/error_json_test.exs @@ -0,0 +1,12 @@ +defmodule PhoenixSyncExampleWeb.ErrorJSONTest do + use PhoenixSyncExampleWeb.ConnCase, async: true + + test "renders 404" do + assert PhoenixSyncExampleWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{detail: "Not Found"}} + end + + test "renders 500" do + assert PhoenixSyncExampleWeb.ErrorJSON.render("500.json", %{}) == + %{errors: %{detail: "Internal Server Error"}} + end +end diff --git a/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/page_controller_test.exs b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/page_controller_test.exs new file mode 100644 index 0000000..0cb0aa5 --- /dev/null +++ b/apps/phoenix_sync_example/test/phoenix_sync_example_web/controllers/page_controller_test.exs @@ -0,0 +1,8 @@ +defmodule PhoenixSyncExampleWeb.PageControllerTest do + use PhoenixSyncExampleWeb.ConnCase + + test "GET /", %{conn: conn} do + conn = get(conn, ~p"/") + assert html_response(conn, 200) =~ "Peace of mind from prototype to production" + end +end diff --git a/apps/phoenix_sync_example/test/support/conn_case.ex b/apps/phoenix_sync_example/test/support/conn_case.ex new file mode 100644 index 0000000..bb2b1be --- /dev/null +++ b/apps/phoenix_sync_example/test/support/conn_case.ex @@ -0,0 +1,38 @@ +defmodule PhoenixSyncExampleWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use PhoenixSyncExampleWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # The default endpoint for testing + @endpoint PhoenixSyncExampleWeb.Endpoint + + use PhoenixSyncExampleWeb, :verified_routes + + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import PhoenixSyncExampleWeb.ConnCase + end + end + + setup tags do + PhoenixSyncExample.DataCase.setup_sandbox(tags) + {:ok, conn: Phoenix.ConnTest.build_conn()} + end +end diff --git a/apps/phoenix_sync_example/test/support/data_case.ex b/apps/phoenix_sync_example/test/support/data_case.ex new file mode 100644 index 0000000..b0aa0c5 --- /dev/null +++ b/apps/phoenix_sync_example/test/support/data_case.ex @@ -0,0 +1,61 @@ +defmodule PhoenixSyncExample.DataCase do + @moduledoc """ + This module defines the setup for tests requiring + access to the application's data layer. + + You may define functions here to be used as helpers in + your tests. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use PhoenixSyncExample.DataCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + alias PhoenixSyncExample.Repo + + import Ecto + import Ecto.Changeset + import Ecto.Query + import PhoenixSyncExample.DataCase + end + end + + setup tags do + PhoenixSyncExample.DataCase.setup_sandbox(tags) + :ok + end + + @doc """ + Sets up the sandbox based on the test tags. + """ + def setup_sandbox(tags) do + pid = + Ecto.Adapters.SQL.Sandbox.start_owner!(PhoenixSyncExample.Repo, shared: not tags[:async]) + + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + Phoenix.Sync.Sandbox.start!(PhoenixSyncExample.Repo, pid, shared: not tags[:async]) + end + + @doc """ + A helper that transforms changeset errors into a map of messages. + + assert {:error, changeset} = Accounts.create_user(%{password: "short"}) + assert "password is too short" in errors_on(changeset).password + assert %{password: ["password is too short"]} = errors_on(changeset) + + """ + def errors_on(changeset) do + Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> + Regex.replace(~r"%{(\w+)}", message, fn _, key -> + opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + end) + end) + end +end diff --git a/apps/phoenix_sync_example/test/support/fixtures/cars_fixtures.ex b/apps/phoenix_sync_example/test/support/fixtures/cars_fixtures.ex new file mode 100644 index 0000000..a8decdb --- /dev/null +++ b/apps/phoenix_sync_example/test/support/fixtures/cars_fixtures.ex @@ -0,0 +1,29 @@ +defmodule PhoenixSyncExample.CarsFixtures do + @moduledoc """ + This module defines test helpers for creating + entities via the `PhoenixSyncExample.Cars` context. + """ + + def make_fixture(attrs \\ %{}) do + {:ok, make} = + attrs + |> Enum.into(%{name: "Blue"}) + |> PhoenixSyncExample.Cars.create_make() + + make + end + + @doc """ + Generate a car. + """ + def car_fixture(attrs \\ %{}) do + make = Map.get_lazy(attrs, :make, &make_fixture/0) + + {:ok, car} = + attrs + |> Enum.into(%{cost: 42, name: "Phantom", make_id: make.id}) + |> PhoenixSyncExample.Cars.create_car() + + car + end +end diff --git a/apps/phoenix_sync_example/test/test_helper.exs b/apps/phoenix_sync_example/test/test_helper.exs new file mode 100644 index 0000000..df57574 --- /dev/null +++ b/apps/phoenix_sync_example/test/test_helper.exs @@ -0,0 +1,2 @@ +ExUnit.start() +Ecto.Adapters.SQL.Sandbox.mode(PhoenixSyncExample.Repo, :manual) diff --git a/config/runtime.exs b/config/runtime.exs index 275b059..4ee1216 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -2,7 +2,9 @@ import Config if config_env() == :test do # port = 3333 - default_database_url = "postgresql://postgres:password@localhost:54321/electric?sslmode=disable" + default_database_url = + "postgresql://postgres:password@localhost:55555/phoenix_sync?sslmode=disable" + database_url = System.get_env("DATABASE_URL", default_database_url) config :electric, diff --git a/config/test.exs b/config/test.exs index b0a3aeb..9f84a90 100644 --- a/config/test.exs +++ b/config/test.exs @@ -1,32 +1,59 @@ import Config -config :logger, level: :warning +config :logger, + level: :warning, + compile_time_purge_matching: [ + [application: :electric, level_lower_than: :error] + ] -config :phoenix_sync, mode: :disabled +config :logger, :default_formatter, + format: "[$level] $message $metadata\n", + metadata: [:application, :file, :line] config :phoenix_sync, Phoenix.Sync.LiveViewTest.Endpoint, [] -config :phoenix_sync, Electric.Client, base_url: "http://localhost:3000" - -# configure the support repo with random options so we can validate them in Phoenix.Sync.ConfigTest -config :phoenix_sync, Support.Repo, - username: "postgres", - password: "password", - hostname: "localhost", - database: "electric", - port: 54321, - stacktrace: true, - show_sensitive_data_on_connection_error: true, - pool_size: 10, - pool: Ecto.Adapters.SQL.Sandbox - -config :phoenix_sync, Support.ConfigTestRepo, +db_config = [ username: "postgres", password: "password", hostname: "localhost", - database: "electric", - # phoenix_sync should fill in default port - # port: 54321, - stacktrace: true, - show_sensitive_data_on_connection_error: true, - pool_size: 10 + database: "phoenix_sync", + port: 55555 +] + +# configure the support repo with random options so we can validate them in Phoenix.Sync.ConfigTest +config :phoenix_sync, + Support.Repo, + db_config ++ + [ + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 10, + pool: Ecto.Adapters.SQL.Sandbox + ] + +config :phoenix_sync, + Support.ConfigTestRepo, + Keyword.delete(db_config, :port) ++ + [ + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 10 + ] + +config :phoenix_sync, + Support.SandboxRepo, + db_config ++ + [ + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 10, + ownership_log: :warning + ] + +config :phoenix_sync, env: :test, mode: :sandbox + +config :phoenix_sync, + Phoenix.Sync.SandboxTest.Endpoint, + secret_key_base: "GEp12Mvwia8mEwCJ", + live_view: [signing_salt: "GEp12Mvwia8mEwCJ"] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..9a1258e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,18 @@ +services: + postgres: + image: postgres:17-alpine + environment: + POSTGRES_DB: phoenix_sync + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + ports: + - "55555:5432" + volumes: + - ./postgres.conf:/etc/postgresql.conf:ro + tmpfs: + - /var/lib/postgresql/data + - /tmp + entrypoint: + - docker-entrypoint.sh + - -c + - config_file=/etc/postgresql.conf diff --git a/lib/phoenix/sync/electric.ex b/lib/phoenix/sync/electric.ex index df7143f..bc72c49 100644 --- a/lib/phoenix/sync/electric.ex +++ b/lib/phoenix/sync/electric.ex @@ -91,7 +91,7 @@ defmodule Phoenix.Sync.Electric do @behaviour Phoenix.Sync.Adapter @behaviour Plug - @valid_modes [:http, :embedded, :disabled] + @valid_modes [:http, :embedded, :sandbox, :disabled] @client_valid_modes @valid_modes -- [:disabled] @electric_available? Code.ensure_loaded?(Electric.Application) @@ -149,6 +149,9 @@ defmodule Phoenix.Sync.Electric do :disabled -> {:ok, []} + :sandbox -> + {:ok, [Phoenix.Sync.Sandbox]} + mode when mode in @valid_modes -> embedded_children(env, mode, electric_opts) @@ -187,9 +190,7 @@ defmodule Phoenix.Sync.Electric do @impl Phoenix.Sync.Adapter def client(env, opts) do {mode, electric_opts} = - opts - |> set_environment_defaults(env) - |> electric_opts(env) + opts |> set_environment_defaults(env) |> electric_opts(env) case mode do mode when mode in @client_valid_modes -> @@ -287,12 +288,30 @@ defmodule Phoenix.Sync.Electric do |> Electric.Application.api_plug_opts() |> Keyword.fetch!(:api) end + + if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defp plug_opts(_env, :sandbox, _electric_opts) do + %Phoenix.Sync.Sandbox.APIAdapter{} + end + else + defp plug_opts(_env, :sandbox, _electric_opts) do + raise ArgumentError, + message: + "phoenix_sync configured in `mode: :sandbox` but Ecto.SQL not installed. Please add `:ecto_sql` to your dependencies or use `:http` mode." + end + end else defp plug_opts(_env, :embedded, _electric_opts) do raise ArgumentError, message: "phoenix_sync configured in `mode: :embedded` but electric not installed. Please add `:electric` to your dependencies or use `:http` mode." end + + defp plug_opts(_env, :sandbox, _electric_opts) do + raise ArgumentError, + message: + "phoenix_sync configured in `mode: :sandbox` but electric not installed. Please add `:electric` to your dependencies or use `:http` mode." + end end defp embedded_children(_env, :disabled, _opts) do @@ -327,7 +346,7 @@ defmodule Phoenix.Sync.Electric do http_server = case mode do :http -> electric_api_server(electric_config) - :embedded -> [] + _ -> [] end {:ok, @@ -513,10 +532,28 @@ defmodule Phoenix.Sync.Electric do defp configure_client(opts, :embedded) do Electric.Client.embedded(opts) end + + # with a sandbox config, we're basically abandoning the idea of connecting + # to a real instance -- I think that's reasonable. The overhead of a real + # electric consuming a real replication stream is way too high for a simple + # consumer of streams + if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defp configure_client(_opts, :sandbox) do + Phoenix.Sync.Sandbox.client() + end + else + defp configure_client(_opts, :sandbox) do + {:error, "sandbox mode is only available if Ecto.SQL is installed"} + end + end else defp configure_client(_opts, :embedded) do {:error, "electric not installed, unable to created embedded client"} end + + defp configure_client(_opts, :sandbox) do + {:error, "electric not installed, unable to created sandbox client"} + end end defp configure_client(electric_config, :http) do diff --git a/lib/phoenix/sync/live_view.ex b/lib/phoenix/sync/live_view.ex index 7d82cb6..a4270a7 100644 --- a/lib/phoenix/sync/live_view.ex +++ b/lib/phoenix/sync/live_view.ex @@ -207,7 +207,11 @@ if Code.ensure_loaded?(Phoenix.Component) do end if Phoenix.LiveView.connected?(socket) do - client = Keyword.get_lazy(electric_opts, :client, &Phoenix.Sync.client!/0) + client = + Keyword.get_lazy(electric_opts, :client, fn -> + get_in(socket.private[:connect_info].private[:electric_client]) || + Phoenix.Sync.client!() + end) Phoenix.LiveView.stream( socket, diff --git a/lib/phoenix/sync/sandbox.ex b/lib/phoenix/sync/sandbox.ex new file mode 100644 index 0000000..dd3c7b3 --- /dev/null +++ b/lib/phoenix/sync/sandbox.ex @@ -0,0 +1,462 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox do + @moduledoc """ + Integration between `Ecto.Adapters.SQL.Sandbox` and `Electric` that produces + replication events from Ecto operations within a sandboxed connection. + + In normal operation `Electric` creates and consumes a logical replication + slot on your Postgres database. This makes testing difficult because this + replication stream is stateful and will not emit events when testing using + `Ecto.Adapters.SQL.Sandbox` (which aborts all operations before they could + appear in the replication stream). + + `Phoenix.Sync.Sandbox` uses a custom `Ecto.Adapter` that intercepts writes + within a sandboxed connection and emits change events to a per-test + replication stack. + + ## Integration + + ### Step 1 + + In your `config/test.exs` file, set the `mode` to `:sandbox`: + + config :phoenix_sync, + env: config_env(), + mode: :sandbox + + ### Step 2 + + Replace your `Ecto.Repo`'s adapter with our sandbox adapter macro: + + + # before + defmodule MyApp.Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres + end + + + # after + defmodule MyApp.Repo do + use Phoenix.Sync.Sandbox.Postgres + + use Ecto.Repo, + otp_app: :my_app, + adapter: Phoenix.Sync.Sandbox.Postgres.adapter() + end + + This macro will configure the repo with `Ecto.Adapters.Postgres` in `dev` + and `prod` environments but enable intercepting db writes in tests. + + ### Step 3 + + In your test file, after your `Ecto.Adapters.SQL.Sandbox.checkout(Repo)` + setup call, start a sandbox stack for your repo: + + + # before + setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + end + + # after + setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + # start our sandbox replication stack + Phoenix.Sync.Sandbox.start!(Repo) + end + + Or if you're using `Ecto.Adapters.SQL.Sandbox.start_owner!/2`: + + # before + setup do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + end + + # after + setup do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + # start our sandbox replication stack + Phoenix.Sync.Sandbox.start!(Repo, pid) + end + + Now in your tests, inserting via the configured repo will emit change + messages for sandboxed writes, exactly as if you were reading from the + Postgres replication stream. + + ## Collaborating processes + + ### Allowances + + The `Phoenix.Sync.Sandbox` uses the same ownership model as + `Ecto.Adapters.SQL.Sandbox` -- and processes that automatically inherit + access to the sandboxed connection will also register themselves to the + test electric stack. + + test "tasks have access" do + start_supervised!({Task, fn -> + # this will succeed and broadcast a change event on the test's + # sandbox replication stream + Repo.insert!(%MyApp.Task{title: "Test Task"}) + end}) + end + + However processes started outside of the test process tree that need to be + [explictly granted access to the sandboxed + connection](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html#module-allowances) + will also need to be explicitly registered to the current test's + replication stream using `#{inspect(__MODULE__)}.allow/3`. This calls + `Ecto.Adapters.SQL.Sandbox.allow/3` and also registers the `allow` pid + against the current process's replication stack. + + So where you would normally need to call + `Ecto.Adapters.SQL.Sandbox.allow/3` simply call + `#{inspect(__MODULE__)}.allow/3` instead: + + test "calls worker that runs a query" do + allow = Process.whereis(MyApp.Worker) + #{inspect(__MODULE__)}.allow(Repo, self(), allow) + GenServer.call(MyApp.Worker, :run_query) + end + + ### Shared mode + + If you're configuring your Ecto sandbox in shared mode, you also need to configure + the `Phoenix.Sync.Sandbox` to use shared mode by passing `shared: true` to `Phoenix.Sync.Sandbox.start!/3`. + + # before + setup(tags) do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + end + + # after + setup(tags) do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + Phoenix.Sync.Sandbox.start!(Repo, pid, shared: not tags[:async]) + end + + ## Integrations + + ### Oban + + Because `Phoenix.Sync.Sandbox.Postgres.adapter/0` configures your + `Ecto.Repo` to use a non-standard adapter module it causes problems + with Oban's migration system. + + If you see an error like: + + ``` txt + ** (KeyError) key :migrator not found in: [ + # your repo config... + ] + ``` + + then the solution is to force Oban to use its Postgres migrator in your `config/test.exs`: + + config :my_app, MyApp.Repo, + # base config... + migrator: Oban.Migrations.Postgres + + ## Limitations + + The sandbox adapter will intercept the following functions: + + - `c:Ecto.Repo.delete_all/2` + - `c:Ecto.Repo.update_all/3` + - `c:Ecto.Repo.delete/2` + - `c:Ecto.Repo.delete!/2` + - `c:Ecto.Repo.insert/2` + - `c:Ecto.Repo.insert!/2` + - `c:Ecto.Repo.insert_all/3` + - `c:Ecto.Repo.update/2` + - `c:Ecto.Repo.update!/2` + + It does this by potentially re-writing the SQL for the query. Because of this + there will be some more complex queries that will fail. Please [raise an + issue](https://github.com/electric-sql/phoenix_sync/issues/new) + if you hit problems. + """ + + @doc false + use Supervisor + + alias __MODULE__ + + @type start_opts() :: [{:shared, boolean()}] + + @registry __MODULE__.Registry + + @doc false + def start_link(_args \\ []) do + Supervisor.start_link(__MODULE__, [], name: __MODULE__) + end + + @doc """ + See `start!/3`. + """ + @spec start!(Ecto.Repo.t()) :: :ok | no_return() + def start!(repo) when is_atom(repo) do + start!(repo, self(), []) + end + + @doc """ + See `start!/3`. + """ + @spec start!(Ecto.Repo.t(), pid() | start_opts()) :: :ok | no_return() + def start!(repo, opts_or_owner) + + def start!(repo, opts) when is_atom(repo) and is_list(opts) do + start!(repo, self(), opts) + end + + def start!(repo, owner) when is_atom(repo) and is_pid(owner) do + start!(repo, owner, []) + end + + @doc """ + Start a sandbox instance for `repo` linked to the given `owner` process. + + Call this after your `Ecto.Adapters.SQL.Sandbox.start_owner!/2` or + `Ecto.Adapters.SQL.Sandbox.checkout/2` call. + + setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + Phoenix.Sync.Sandbox.start!(Repo) + end + + ### Options + + - `shared` (default: `false`) - start the sandbox in shared mode. Only + enable shared mode if your repo sandbox is also in shared mode. + - `tags` - pass the test tags in order to generate a stack identifier based + on the current test context + """ + @spec start!(Ecto.Repo.t(), pid(), start_opts()) :: :ok | no_return() + def start!(repo, owner, opts) when is_atom(repo) and is_pid(owner) and is_list(opts) do + test_pid = self() + validate_sandbox_repo!(repo) + + case stack_id() do + nil -> + stack_id = generate_stack_id(opts) + + {:ok, _pid} = + ExUnit.Callbacks.start_supervised( + {__MODULE__.Stack, stack_id: stack_id, repo: repo, owner: owner} + ) + + :ok = maybe_set_shared_mode(owner, stack_id, opts) + + # give the inspector access to the sandboxed connection + Ecto.Adapters.SQL.Sandbox.allow(repo, owner, Sandbox.Inspector.name(stack_id)) + + # mark the stack as ready + Electric.StatusMonitor.mark_pg_lock_acquired(stack_id, owner) + Electric.StatusMonitor.mark_replication_client_ready(stack_id, owner) + Electric.StatusMonitor.mark_connection_pool_ready(stack_id, owner) + + api_config = Sandbox.Stack.config(stack_id, repo) + api = Electric.Application.api(api_config) + + {:ok, client} = + Phoenix.Sync.Electric.client(:test, Keyword.put(api_config, :mode, :embedded)) + + client = Map.put(client, :pool, {Phoenix.Sync.Sandbox.Fetch, stack_id: stack_id}) + + # we link the sandbox to the current (test) process not the connection + # owner because that's the ownership route that works. The owner + # is a convenience to link the repo connection to a process who's lifetime + # is explictly managed rather than a mechanism for linking the test + # to the sandbox txn. + Sandbox.StackRegistry.register(test_pid, stack_id) + + # register the electric api and configured client to the stack + Sandbox.StackRegistry.configure(stack_id, api, client) + + :ok + + stack_id -> + case GenServer.whereis(__MODULE__.Stack.name(stack_id)) do + nil -> raise RuntimeError, message: "no stack found for #{inspect(stack_id)}" + _pid -> :ok + end + end + end + + defp generate_stack_id(opts) do + tags = Keyword.get(opts, :tags, %{}) + # with parameterised tests the same file:line can be running simultaneously + uid = System.unique_integer() |> to_string() + + suffix = + case Map.fetch(tags, :line) do + {:ok, line} -> ":#{line}" + :error -> "" + end + + prefix = + case Map.fetch(tags, :file) do + {:ok, file} -> "-" <> Path.relative_to(file, Path.dirname(Mix.Project.project_file())) + :error -> "" + end + + "#{inspect(__MODULE__.Stack)}#{uid}#{prefix}#{suffix}" + end + + defp maybe_set_shared_mode(owner, stack_id, opts) do + if opts[:shared] do + Sandbox.StackRegistry.shared_mode(owner, stack_id) + else + :ok + end + end + + @doc false + def name(id) do + {:via, Registry, {@registry, id}} + end + + @doc false + def retrieve_api!() do + stack_id = stack_id!() + {:ok, api} = Sandbox.StackRegistry.get_api(stack_id) + api + end + + @doc false + def stack_id! do + stack_id() || raise "No stack_id found. Did you call Phoenix.Sync.Sandbox.start!/1?" + end + + @doc false + def stack_id() do + [self() | Process.get(:"$callers") || []] + |> lookup_stack_id() + end + + defp lookup_stack_id(pids) when is_list(pids) do + # things can be inserted into a repo before the Phoenix.Sync application + # has even started + case GenServer.whereis(Sandbox.StackRegistry) do + nil -> + raise """ + Phoenix.Sync.Sandbox is not running. Have you set the mode to `:sandbox` in `config/test.exs`? + + # config/test.exs + config :phoenix_sync, + env: config_env(), + mode: :sandbox + """ + + registry_pid when is_pid(registry_pid) -> + pids + |> Stream.map(fn pid -> + Sandbox.StackRegistry.lookup(registry_pid, pid) + end) + |> Enum.find(&(!is_nil(&1))) + end + end + + @doc """ + Allows the process `name_or_pid` to access the sandboxed transaction. + + This is a wrapper around `Ecto.Adapters.SQL.Sandbox.allow/3` that also + connects the given process to the active sync instance. + + You should use it instead of `Ecto.Adapters.SQL.Sandbox.allow/3` for tests + that are using the sandbox replication stack. + + `opts` is passed to `Ecto.Adapters.SQL.Sandbox.allow/3`. + """ + @spec allow(Ecto.Repo.t(), pid(), pid() | GenServer.name(), keyword()) :: :ok | no_return() + def allow(repo, parent, name_or_pid, opts \\ []) do + with :ok <- Ecto.Adapters.SQL.Sandbox.allow(repo, parent, name_or_pid, opts) do + [parent] + |> lookup_stack_id() + |> case do + nil -> + raise RuntimeError, + "No stack_id found for process #{inspect(parent)}. Did you call Phoenix.Sync.Sandbox.start!/1?" + + stack_id -> + case GenServer.whereis(name_or_pid) do + pid when is_pid(pid) -> + Sandbox.StackRegistry.register(pid, stack_id) + + other -> + raise "`allow/4` expects a PID or a locally registered process name but lookup returned: #{inspect(other)}" + end + end + end + end + + @doc false + @spec init_test_session(Plug.Conn.t(), Ecto.Repo.t(), map()) :: Plug.Conn.t() + def init_test_session(conn, _repo, session \\ %{}) do + client = client!() + + conn + |> Plug.Test.init_test_session(session) + |> Plug.Conn.put_private(:electric_client, client) + end + + @doc false + def plug_opts do + Phoenix.Sync.Application.plug_opts(mode: :sandbox) + end + + @doc """ + Retrieve a client configured for the current sandbox stack. + + Example: + + {:ok, client} = Phoenix.Sync.Sandbox.client() + Electric.Client.stream(client, Todo, replica: :full), + + """ + @spec client() :: {:ok, Electric.Client.t()} | {:error, String.t()} + def client do + if stack_id = stack_id() do + Sandbox.StackRegistry.get_client(stack_id) + else + {:error, "No stack_id found. Did you call Phoenix.Sync.Sandbox.start!/1?"} + end + end + + @doc """ + As per `client/0` but raises if there is no stack configured for the + current process. + """ + @spec client!() :: Electric.Client.t() | no_return() + def client!() do + {:ok, client} = Sandbox.StackRegistry.get_client(stack_id!()) + + client + end + + @impl true + @doc false + def init(_) do + children = [ + {Registry, keys: :unique, name: @registry}, + Sandbox.StackRegistry + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + defp validate_sandbox_repo!(repo) do + if Code.ensure_loaded?(repo) && function_exported?(repo, :__adapter__, 0) do + if repo.__adapter__() != Sandbox.Postgres.Adapter do + raise RuntimeError, + "Repo #{inspect(repo)} is not using the Phoenix.Sync.Sandbox.Postgres adapter. Please ensure you have configured your repo to use the sandbox adapter." + end + end + end + end +end diff --git a/lib/phoenix/sync/sandbox/api_adapter.ex b/lib/phoenix/sync/sandbox/api_adapter.ex new file mode 100644 index 0000000..49176ed --- /dev/null +++ b/lib/phoenix/sync/sandbox/api_adapter.ex @@ -0,0 +1,31 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.APIAdapter do + @moduledoc false + + defstruct [:shape] + + alias Phoenix.Sync.Adapter.PlugApi + + defimpl Phoenix.Sync.Adapter.PlugApi do + def predefined_shape(adapter, shape) do + {:ok, %{adapter | shape: shape}} + end + + def call(%{shape: nil} = _adapter, conn, params) do + shape_api = lookup_api!() + PlugApi.call(shape_api, conn, params) + end + + def call(%{shape: shape} = _adapter, conn, params) do + shape_api = lookup_api!() + {:ok, shape_api} = PlugApi.predefined_shape(shape_api, shape) + + PlugApi.call(shape_api, conn, params) + end + + defp lookup_api!() do + Phoenix.Sync.Sandbox.retrieve_api!() + end + end + end +end diff --git a/lib/phoenix/sync/sandbox/fetch.ex b/lib/phoenix/sync/sandbox/fetch.ex new file mode 100644 index 0000000..36467b5 --- /dev/null +++ b/lib/phoenix/sync/sandbox/fetch.ex @@ -0,0 +1,66 @@ +defmodule Phoenix.Sync.Sandbox.Fetch do + @moduledoc false + + alias Electric.Client + alias Electric.Client.Fetch + + require Logger + + @callback request(Client.t(), Fetch.Request.t(), opts :: Keyword.t()) :: + Fetch.Response.t() | {:error, Fetch.Response.t() | term()} + + @behaviour Electric.Client.Fetch.Pool + + def name(stack_id) do + Phoenix.Sync.Sandbox.name({__MODULE__, stack_id}) + end + + @impl Electric.Client.Fetch.Pool + def request(%Client{} = client, %Fetch.Request{} = request, opts) do + {:ok, stack_id} = Keyword.fetch(opts, :stack_id) + + request_id = request_id(client, request, stack_id) + + # The monitor process is unique to the request and launches the actual + # request as a linked process. + # + # This coalesces requests, so no matter how many simultaneous + # clients we have, we only ever make one request to the backend. + {:ok, monitor_pid} = start_monitor(stack_id, request_id, request, client) + + try do + ref = Fetch.Monitor.register(monitor_pid, self()) + + Fetch.Monitor.wait(ref) + catch + :exit, {reason, _} -> + Logger.debug(fn -> + "Request process ended with reason #{inspect(reason)} before we could register. Re-attempting." + end) + + request(client, request, opts) + end + end + + defp start_monitor(stack_id, request_id, request, client) do + DynamicSupervisor.start_child( + name(stack_id), + {Electric.Client.Fetch.Monitor, {request_id, request, client}} + ) + |> return_existing() + end + + defp return_existing({:ok, pid}), do: {:ok, pid} + defp return_existing({:error, {:already_started, pid}}), do: {:ok, pid} + defp return_existing(error), do: error + + defp request_id(%Client{fetch: {fetch_impl, _}}, %Fetch.Request{} = request, stack_id) do + { + fetch_impl, + stack_id, + URI.to_string(request.endpoint), + request.headers, + Fetch.Request.params(request) + } + end +end diff --git a/lib/phoenix/sync/sandbox/inspector.ex b/lib/phoenix/sync/sandbox/inspector.ex new file mode 100644 index 0000000..22d332f --- /dev/null +++ b/lib/phoenix/sync/sandbox/inspector.ex @@ -0,0 +1,120 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.Inspector do + @moduledoc false + + use GenServer + + @behaviour Electric.Postgres.Inspector + + @impl Electric.Postgres.Inspector + + def load_relation_oid(relation, stack_id) do + with {:ok, pid} <- validate_stack_alive(stack_id) do + GenServer.call(pid, {:load_relation_oid, relation}) + end + end + + @impl Electric.Postgres.Inspector + def load_relation_info(relation, stack_id) do + with {:ok, pid} <- validate_stack_alive(stack_id) do + GenServer.call(pid, {:load_relation_info, relation}) + end + end + + @impl Electric.Postgres.Inspector + def load_column_info(relation, stack_id) do + with {:ok, pid} <- validate_stack_alive(stack_id) do + GenServer.call(pid, {:load_column_info, relation}) + end + end + + @impl Electric.Postgres.Inspector + def clean(_, _), do: true + + @impl Electric.Postgres.Inspector + def list_relations_with_stale_cache(_), do: {:ok, []} + + def start_link(args) do + GenServer.start_link(__MODULE__, args, name: name(args[:stack_id])) + end + + def name(stack_id) do + Phoenix.Sync.Sandbox.name({__MODULE__, stack_id}) + end + + defp validate_stack_alive(stack_id) do + case GenServer.whereis(name(stack_id)) do + nil -> + {:error, "stack #{inspect(stack_id)} is not available"} + + pid -> + {:ok, pid} + end + end + + @impl GenServer + def init(args) do + {:ok, stack_id} = Keyword.fetch(args, :stack_id) + {:ok, repo} = Keyword.fetch(args, :repo) + + {:ok, %{repo: repo, stack_id: stack_id, relations: %{}, columns: %{}, oids: %{}}} + end + + @impl GenServer + def handle_call({:load_relation_oid, relation}, _from, state) do + {result, state} = + fetch_lazy(state, :oids, relation, fn -> + Electric.Postgres.Inspector.DirectInspector.load_relation_oid(relation, pool(state)) + end) + + {:reply, result, state} + end + + def handle_call({:load_relation_info, relation}, _from, state) do + {result, state} = + fetch_lazy(state, :relations, relation, fn -> + Electric.Postgres.Inspector.DirectInspector.load_relation_info(relation, pool(state)) + end) + + {:reply, result, state} + end + + def handle_call({:load_column_info, relation}, _from, state) do + {result, state} = + fetch_lazy(state, :columns, relation, fn -> + Electric.Postgres.Inspector.DirectInspector.load_column_info(relation, pool(state)) + end) + + {:reply, result, state} + end + + defp pool(state) do + %{pid: pool} = Ecto.Adapter.lookup_meta(state.repo.get_dynamic_repo()) + pool + end + + defp fetch_lazy(state, cache, key, fun) do + case Map.fetch(Map.fetch!(state, cache), key) do + {:ok, value} -> + {{:ok, value}, state} + + :error -> + case handling_exits(fun, state) do + {:ok, value} -> + {{:ok, value}, Map.update!(state, cache, &Map.put(&1, key, value))} + + {:error, _reason} = error -> + {error, state} + end + end + end + + defp handling_exits(fun, %{stack_id: stack_id}) do + try do + fun.() + catch + :exit, _reason -> {:error, "Stack #{inspect(stack_id)} down"} + end + end + end +end diff --git a/lib/phoenix/sync/sandbox/postgres.ex b/lib/phoenix/sync/sandbox/postgres.ex new file mode 100644 index 0000000..3b4a795 --- /dev/null +++ b/lib/phoenix/sync/sandbox/postgres.ex @@ -0,0 +1,52 @@ +defmodule Phoenix.Sync.Sandbox.Postgres do + defmacro __using__(_opts) do + quote do + require Phoenix.Sync.Sandbox.Postgres + end + end + + @doc """ + Replace hard-coded references to `Ecto.Adapters.Postgres` with this macro to + enable the `Phoenix.Sync.Sandbox` in test mode. + + In development or production environments the repo will use the standard + Postgres adapter directly, only if `Mix.env() == :test` (see below) will it + use the sandbox adapter shim. + + Example: + + defmodule MyApp.Repo do + use Phoenix.Sync.Sandbox.Postgres + + use Ecto.Repo, + otp_app: :my_app, + adapter: Phoenix.Sync.Sandbox.Postgres.adapter() + end + + ### Custom environments + + If you want to enable the sandbox adapter in different environments, you can + use your own evaluation logic: + + defmodule MyApp.Repo do + use Phoenix.Sync.Sandbox.Postgres + + use Ecto.Repo, + otp_app: :my_app, + adapter: Phoenix.Sync.Sandbox.Postgres.adapter(Mix.env() in [:test, :special]) + end + + > #### Warning {: .warning} + > + > The expression passed to `adapter/1` will be evaluated at **compile time**, + > not run-time so only use expressions that can be evaluated at then (this + > includes the various `Mix` functions). + """ + defmacro adapter(expr \\ Mix.env() == :test) do + {enable_sandbox?, _binding} = Code.eval_quoted(expr, binding()) + + if enable_sandbox?, + do: Phoenix.Sync.Sandbox.Postgres.Adapter, + else: Ecto.Adapters.Postgres + end +end diff --git a/lib/phoenix/sync/sandbox/postgres/adapter.ex b/lib/phoenix/sync/sandbox/postgres/adapter.ex new file mode 100644 index 0000000..fed7ca5 --- /dev/null +++ b/lib/phoenix/sync/sandbox/postgres/adapter.ex @@ -0,0 +1,515 @@ +if Code.ensure_loaded?(Ecto.Adapters.Postgres) do + defmodule Phoenix.Sync.Sandbox.Postgres.Adapter do + @moduledoc false + + @behaviour Ecto.Adapter + @behaviour Ecto.Adapter.Migration + @behaviour Ecto.Adapter.Queryable + @behaviour Ecto.Adapter.Schema + @behaviour Ecto.Adapter.Transaction + @behaviour Ecto.Adapter.Storage + + @adapter Ecto.Adapters.Postgres + @driver :postgrex + + @impl true + defmacro __before_compile__(env) do + Ecto.Adapters.SQL.__before_compile__(@driver, env) + end + + @impl true + defdelegate ensure_all_started(config, type), to: @adapter + + @impl true + defdelegate init(config), to: @adapter + + @impl true + defdelegate checkout(meta, opts, fun), to: @adapter + + @impl true + defdelegate checked_out?(meta), to: @adapter + + @impl true + defdelegate loaders(primitive_type, ecto_type), to: @adapter + + @impl true + defdelegate dumpers(primitive_type, ecto_type), to: @adapter + + ## Query + + # prepare(atom :: :all | :update_all | :delete_all, query :: Ecto.Query.t()) + @impl true + def prepare(:update_all, query) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + {table, schema, source_prefix} = elem(query.sources, 0) + + [cte, prefix, fields, join, where | returning] = + Ecto.Adapters.Postgres.Connection.update_all(query) + + [_, source_alias] = + Regex.run(~r/"#{table}" AS ([a-z0-9]+)/, IO.iodata_to_binary(prefix)) + + field_names = schema.__schema__(:fields) + + column_names = + Enum.map(field_names, "e_name(to_string(schema.__schema__(:field_source, &1)))) + + old_join = [ + "(SELECT ", + Enum.intersperse(column_names, ", "), + " FROM ", + quote_name(source_prefix, table), + " FOR UPDATE) AS old" + ] + + join = + case join do + [] -> [" FROM " | old_join] + join -> [join | [", " | old_join]] + end + + on = + Enum.map_intersperse(schema.__schema__(:primary_key), " AND ", fn pk -> + ["old.", quote_name(pk), " = ", source_alias, ".", quote_name(pk)] + end) + + where = + case where do + [] -> [" WHERE ", on] + where -> [where | [" AND ", on]] + end + + return_old = Enum.map_intersperse(column_names, ", ", &["old.", &1]) + return_new = Enum.map_intersperse(column_names, ", ", &[source_alias, ".", &1]) + + {returning, original_count} = + case returning do + [] -> + {[" RETURNING ", return_old, ", ", return_new], 0} + + returning -> + count = length(query.select.fields) + {[returning | [",", return_old, ",", return_new]], count} + end + + meta = %{ + returning: + {original_count, + Enum.map(field_names, &{:old, &1}) ++ Enum.map(field_names, &{:new, &1})}, + schema_meta: %{schema: schema, source: table, prefix: source_prefix}, + stack_id: stack_id + } + + {:nocache, {{:update_all_sync, meta}, [cte, prefix, fields, join, where, returning]}} + else + # disable caching for update_all queries. otherwise any matching query with + # the sandbox disabled will override the sandboxed version + with {:cache, prepared} <- @adapter.prepare(:update_all, query) do + {:nocache, prepared} + end + end + end + + def prepare(:delete_all, query) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + {table, schema, source_prefix} = elem(query.sources, 0) + + [cte, prefix, from, as, source_alias, join, where | returning] = + Ecto.Adapters.Postgres.Connection.delete_all(query) + + field_names = schema.__schema__(:fields) + column_names = Enum.map(field_names, &to_string(schema.__schema__(:field_source, &1))) + + return_old = + Enum.map_intersperse(column_names, ", ", &[source_alias, ".", quote_name(&1)]) + + {returning, original_count} = + case returning do + [] -> + {[" RETURNING ", return_old], 0} + + returning -> + count = length(query.select.fields) + {[returning | [",", return_old]], count} + end + + meta = %{ + returning: {original_count, Enum.map(field_names, &{:old, &1})}, + schema_meta: %{schema: schema, source: table, prefix: source_prefix}, + stack_id: stack_id + } + + {:nocache, + {{:delete_all_sync, meta}, + [cte, prefix, from, as, source_alias, join, where | returning]}} + else + # disable query caching for same reasons as update_all + with {:cache, prepared} <- @adapter.prepare(:delete_all, query) do + {:nocache, prepared} + end + end + end + + def prepare(type, query) do + @adapter.prepare(type, query) + end + + @impl true + def execute( + adapter_meta, + query_meta, + {:nocache, {{:update_all_sync, meta}, _sql}} = query, + params, + opts + ) do + with {n, rows} <- @adapter.execute(adapter_meta, query_meta, query, params, opts) do + {keep_count, columns} = meta.returning + + {return, changes} = + Enum.reduce(rows, {[], []}, fn row, {return, emit} -> + {return_cols, emit_cols} = Enum.split(row, keep_count) + + {new_row, old_row} = + columns + |> Enum.zip(emit_cols) + |> Enum.reduce({[], []}, fn + {{:new, col}, value}, {new_acc, old_acc} -> {[{col, value} | new_acc], old_acc} + {{:old, col}, value}, {new_acc, old_acc} -> {new_acc, [{col, value} | old_acc]} + end) + + {[return_cols | return], [{:update, meta.schema_meta, old_row, new_row} | emit]} + end) + + Phoenix.Sync.Sandbox.Producer.emit_changes(meta.stack_id, Enum.reverse(changes)) + + {n, if(keep_count > 0, do: Enum.reverse(return), else: nil)} + end + end + + def execute( + adapter_meta, + query_meta, + {:nocache, {{:delete_all_sync, meta}, _sql}} = query, + params, + opts + ) do + with {n, rows} <- @adapter.execute(adapter_meta, query_meta, query, params, opts) do + {keep_count, columns} = meta.returning + + {return, changes} = + Enum.reduce(rows, {[], []}, fn row, {return, emit} -> + {return_cols, emit_cols} = Enum.split(row, keep_count) + + old_row = + columns + |> Enum.zip(emit_cols) + |> Enum.reduce([], fn + {{:old, col}, value}, old_acc -> [{col, value} | old_acc] + end) + + {[return_cols | return], [{:delete, meta.schema_meta, old_row} | emit]} + end) + + Phoenix.Sync.Sandbox.Producer.emit_changes(meta.stack_id, Enum.reverse(changes)) + + {n, if(keep_count > 0, do: Enum.reverse(return), else: nil)} + end + end + + def execute(adapter_meta, query_meta, query, params, opts) do + @adapter.execute(adapter_meta, query_meta, query, params, opts) + end + + @impl true + defdelegate stream(adapter_meta, query_meta, query, params, opts), to: @adapter + + ## Schema + + @impl true + defdelegate autogenerate(type), to: @adapter + + @impl true + def insert_all( + adapter_meta, + schema_meta, + header, + rows, + on_conflict, + returning, + placeholders, + opts + ) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + all_columns = schema_meta.schema.__schema__(:fields) + + with {count, rows} <- + @adapter.insert_all( + adapter_meta, + schema_meta, + header, + rows, + on_conflict, + all_columns, + placeholders, + opts + ) do + inserted = + Enum.map(rows, fn row -> {:insert, schema_meta, Enum.zip(all_columns, row)} end) + + Phoenix.Sync.Sandbox.Producer.emit_changes(stack_id, inserted) + + return_rows = + case returning do + [] -> + nil + + columns -> + # need to keep returning column order + Enum.map(inserted, fn {:insert, _, insert} -> + Enum.map(columns, &Keyword.fetch!(insert, &1)) + end) + end + + {count, return_rows} + end + else + @adapter.insert_all( + adapter_meta, + schema_meta, + header, + rows, + on_conflict, + returning, + placeholders, + opts + ) + end + end + + @impl true + def insert( + adapter_meta, + %{source: "schema_migrations"} = schema_meta, + params, + on_conflict, + returning, + opts + ) do + @adapter.insert(adapter_meta, schema_meta, params, on_conflict, returning, opts) + end + + def insert(adapter_meta, schema_meta, params, on_conflict, returning, opts) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + all_columns = schema_meta.schema.__schema__(:fields) + + with {:ok, inserted} <- + @adapter.insert(adapter_meta, schema_meta, params, on_conflict, all_columns, opts) do + Phoenix.Sync.Sandbox.Producer.emit_changes(stack_id, [ + {:insert, schema_meta, inserted} + ]) + + {:ok, take(inserted, returning)} + end + else + @adapter.insert( + adapter_meta, + schema_meta, + params, + on_conflict, + returning, + opts + ) + end + end + + defp quote_names(names) do + Enum.map_intersperse(names, ?,, "e_name/1) + end + + defp quote_name(nil, name), do: quote_name(name) + + defp quote_name(prefix, name), do: [quote_name(prefix), ?., quote_name(name)] + + defp quote_name({prefix, name}) do + quote_name(prefix, name) + end + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, "\"") do + raise ArgumentError, + "bad literal/field/index/table name #{inspect(name)} (\" is not permitted)" + end + + [?", name, ?"] + end + + defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) + + defp intersperse_reduce([], _separator, user_acc, _reducer, acc), + do: {acc, user_acc} + + defp intersperse_reduce([elem], _separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + {[acc | elem], user_acc} + end + + defp intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) + end + + defp returning([]), + do: [] + + defp returning(returning), + do: [" RETURNING " | quote_names(returning)] + + @impl true + def update(adapter_meta, schema_meta, fields, params, returning, opts) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + %{source: source, prefix: prefix} = schema_meta + + # This is adapted from this function: + # sql = Ecto.Adapters.Postgres.Connection.update(prefix, source, fields, params, []) + + {fields, field_values} = :lists.unzip(fields) + filter_values = Keyword.values(params) + + {fields, count} = + intersperse_reduce(fields, ", ", 1, fn field, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + {filters_new, _count} = update_filters(params, count, "e_name("new", &1)) + {filters_old, _count} = update_filters(params, count, "e_name/1) + + all_columns = schema_meta.schema.__schema__(:fields) + cols = Enum.map_intersperse(all_columns, ?,, "e_name/1) + return_all = Enum.map(all_columns, &{:old, &1}) ++ Enum.map(all_columns, &{:new, &1}) + + # https://stackoverflow.com/a/7927957 + # return the old and new values from an update by aquiring an exclusive lock + # on the row and using a sub-query. + # UPDATE table new + # SET value = $1 + # FROM (select id, value FROM table WHERE id = $2 FOR UPDATE) old + # WHERE new.id = $2 + # RETURNING old.id, old.value, new.id, new.value; + + sql = [ + "UPDATE ", + quote_name(prefix, source), + " new SET ", + fields, + " FROM (SELECT ", + cols, + " FROM ", + quote_name(prefix, source), + " WHERE ", + filters_old, + " FOR UPDATE) old", + " WHERE ", + filters_new | returning(return_all) + ] + + with {:ok, updated} <- + Ecto.Adapters.SQL.struct( + adapter_meta, + Ecto.Adapters.Postgres.Connection, + sql, + :update, + source, + params, + field_values ++ filter_values, + :raise, + return_all, + opts + ) do + {new, old} = + Enum.reduce(updated, {[], []}, fn + {{:new, k}, v}, {new, old} -> {[{k, v} | new], old} + {{:old, k}, v}, {new, old} -> {new, [{k, v} | old]} + end) + + Phoenix.Sync.Sandbox.Producer.emit_changes(stack_id, [ + {:update, schema_meta, old, new} + ]) + + {:ok, take(new, returning)} + end + else + @adapter.update(adapter_meta, schema_meta, fields, params, returning, opts) + end + end + + defp update_filters(params, count, quote_fun) do + intersperse_reduce(params, " AND ", count, fn + {field, nil}, acc -> + {[quote_fun.(field), " IS NULL"], acc} + + {field, _value}, acc -> + {[quote_fun.(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + end + + @impl true + def delete(adapter_meta, schema_meta, params, returning, opts) do + if stack_id = Phoenix.Sync.Sandbox.stack_id() do + all_columns = schema_meta.schema.__schema__(:fields) + + with {:ok, deleted} <- + @adapter.delete(adapter_meta, schema_meta, params, all_columns, opts) do + Phoenix.Sync.Sandbox.Producer.emit_changes(stack_id, [ + {:delete, schema_meta, deleted} + ]) + + {:ok, take(deleted, returning)} + end + else + @adapter.delete(adapter_meta, schema_meta, params, returning, opts) + end + end + + # basically Keyword.take/2 but preserves the order of the columns + defp take(row, columns) do + Enum.map(columns, &{&1, Keyword.fetch!(row, &1)}) + end + + ## Transaction + + @impl true + defdelegate transaction(meta, opts, fun), to: @adapter + + @impl true + defdelegate in_transaction?(meta), to: @adapter + + @impl true + defdelegate rollback(meta, value), to: @adapter + + ## Migration + + @impl true + defdelegate execute_ddl(meta, definition, opts), to: @adapter + + @impl true + defdelegate supports_ddl_transaction?(), to: @adapter + + @impl true + defdelegate lock_for_migrations(meta, opts, fun), to: @adapter + + ## Storage + + @impl true + defdelegate storage_up(opts), to: @adapter + + @impl true + defdelegate storage_down(opts), to: @adapter + + @impl true + defdelegate storage_status(opts), to: @adapter + end +end diff --git a/lib/phoenix/sync/sandbox/producer.ex b/lib/phoenix/sync/sandbox/producer.ex new file mode 100644 index 0000000..71ac0f6 --- /dev/null +++ b/lib/phoenix/sync/sandbox/producer.ex @@ -0,0 +1,240 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.Producer do + @moduledoc false + + alias Electric.Replication.Changes.{ + Transaction, + NewRecord, + UpdatedRecord, + DeletedRecord + } + + alias Electric.Replication.LogOffset + alias Electric.Replication.ShapeLogCollector + + def child_spec(opts) do + {:ok, stack_id} = Keyword.fetch(opts, :stack_id) + + %{ + id: {__MODULE__, stack_id}, + start: {__MODULE__, :start_link, [stack_id]}, + type: :worker, + restart: :transient + } + end + + def emit_changes(stack_id \\ Phoenix.Sync.Sandbox.stack_id(), changes) + + def emit_changes(nil, _changes) do + raise RuntimeError, "Process #{inspect(self())} is not registered to a sandbox" + end + + def emit_changes(stack_id, changes) when is_binary(stack_id) do + GenServer.cast(name(stack_id), {:emit_changes, changes}) + end + + def name(stack_id) do + Phoenix.Sync.Sandbox.name({__MODULE__, stack_id}) + end + + def start_link(stack_id) do + GenServer.start_link(__MODULE__, stack_id, name: name(stack_id)) + end + + def init(stack_id) do + state = %{txid: 10000, stack_id: stack_id} + {:ok, state} + end + + def handle_cast({:emit_changes, changes}, %{txid: txid, stack_id: stack_id} = state) do + {msgs, next_txid} = + changes + |> Enum.with_index(0) + |> Enum.map_reduce(txid, &msg_from_change(&1, &2, txid)) + + :ok = + txid + |> transaction(msgs) + |> ShapeLogCollector.store_transaction(ShapeLogCollector.name(stack_id)) + + {:noreply, %{state | txid: next_txid}} + end + + defp transaction(txid, changes) do + %Transaction{ + xid: txid, + lsn: Electric.Postgres.Lsn.from_integer(txid), + last_log_offset: Enum.at(changes, -1) |> Map.fetch!(:log_offset), + changes: changes, + num_changes: length(changes), + commit_timestamp: DateTime.utc_now(), + affected_relations: Enum.into(changes, MapSet.new(), & &1.relation) + } + end + + defp msg_from_change({{:insert, schema_meta, values}, i}, lsn, txid) do + { + %NewRecord{ + relation: relation(schema_meta), + record: record(values, schema_meta), + log_offset: log_offset(txid, i) + }, + lsn + 100 + } + end + + defp msg_from_change({{:update, schema_meta, old, new}, i}, lsn, txid) do + { + UpdatedRecord.new( + relation: relation(schema_meta), + old_record: record(old, schema_meta), + record: record(new, schema_meta), + log_offset: log_offset(txid, i) + ), + lsn + 100 + } + end + + defp msg_from_change({{:delete, schema_meta, old}, i}, lsn, txid) do + { + %DeletedRecord{ + relation: relation(schema_meta), + old_record: record(old, schema_meta), + log_offset: log_offset(txid, i) + }, + lsn + 100 + } + end + + defp relation(%{source: source, prefix: prefix}) do + {namespace(prefix), source} + end + + defp namespace(nil), do: "public" + defp namespace(ns) when is_binary(ns), do: ns + + defp record(values, %{schema: schema}) do + Map.new(values, &load_value(&1, schema)) + end + + defp load_value({field, raw_value}, schema) do + type = schema.__schema__(:type, field) + + {:ok, value} = + Ecto.Type.adapter_load( + Ecto.Adapters.Postgres, + type, + raw_value + ) + + # Converts to lower level postgrex type which depends on the type's + # `type/0` value. Postgrex does the actual serialization of maps in real + # usage so this converts embed structs to plain maps. + # + # Postgrex also encodes date & time types, lists and decimals itself (so + # ecto just leaves these as-is) so the `dump/1` function needs to do the + # work normally done by postgrex + {:ok, value} = Ecto.Type.dump(type, value) + + {to_string(field), dump(value, type)} + end + + defp dump(%Decimal{} = decimal, _type) do + Decimal.to_string(decimal) + end + + defp dump(%type{} = datetime, _type) + when type in [NaiveDateTime, DateTime, Time, Date] do + type.to_iso8601(datetime) + end + + defp dump(map, _type) when is_map(map), do: JSON.encode!(map) + + defp dump(list, type) when is_list(list) do + if encode_list_json?(type) do + JSON.encode!(list) + else + encode_array(list, type) + end + end + + defp dump(nil, _type), do: nil + defp dump(value, _type), do: to_string(value) + + defp log_offset(txid, index) do + LogOffset.new(txid, index) + end + + defp encode_list_json?(type) do + case type do + {:array, _inner_type} -> + false + + {t, _} when t in [:map, :json, :jsonb] -> + true + + {:parameterized, {module, params}} -> + encode_list_json?(module.type(params)) + + t -> + if function_exported?(t, :type, 0) do + encode_list_json?(t.type()) + else + false + end + end + end + + @doc ~S""" + ## Examples + + iex> encode_array([1, 2, 3], {:array, :integer}) + "{1,2,3}" + + iex> encode_array(["a", "b", "c"], {:array, :string}) + ~s|{"a","b","c"}| + + iex> encode_array(["\"a\"", "b", "c"], {:array, :string}) + ~S|{"\"a\"","b","c"}| + + iex> encode_array([], {:array, :string}) + "{}" + + iex> encode_array([1, nil, 3], {:array, :integer}) + "{1,NULL,3}" + + iex> encode_array([[1, [2]], [3, 4]], {:array, :integer}) + "{{1,{2}},{3,4}}" + + iex> encode_array([%{value: 1}, %{value: 2}], {:array, :jsonb}) + ~S|{"{\"value\":1}","{\"value\":2}"}| + """ + def encode_array(array, type) when is_list(array) do + encode_array_inner(array, type) |> IO.iodata_to_binary() + end + + defp encode_array_inner(array, type) do + [?{, Enum.map_intersperse(array, ",", &encode_value(&1, type)), ?}] + end + + defp encode_value(list, type) when is_list(list) do + encode_array_inner(list, type) + end + + defp encode_value(nil, _type) do + "NULL" + end + + defp encode_value(value, _type) when is_binary(value) do + [?", String.replace(value, "\"", "\\\""), ?"] + end + + defp encode_value(int, _type) when is_integer(int) do + to_string(int) + end + + defp encode_value(value, {:array, value_type} = type) do + value |> dump(value_type) |> encode_value(type) + end + end +end diff --git a/lib/phoenix/sync/sandbox/publication_manager.ex b/lib/phoenix/sync/sandbox/publication_manager.ex new file mode 100644 index 0000000..8536305 --- /dev/null +++ b/lib/phoenix/sync/sandbox/publication_manager.ex @@ -0,0 +1,60 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.PublicationManager do + @moduledoc false + + use GenServer + + @behaviour Electric.Replication.PublicationManager + + def start_link(_) do + :ignore + end + + def init(_arg) do + :ignore + end + + def name(stack_id) when is_binary(stack_id) do + Phoenix.Sync.Sandbox.name({__MODULE__, stack_id}) + end + + def name(opts) when is_list(opts) do + opts + |> Keyword.fetch!(:stack_id) + |> name() + end + + def recover_shape(_shape_handle, _shape, _opts) do + :ok + end + + def recover_shape(_shape, _opts) do + :ok + end + + def add_shape(_shape_handle, _shape, opts) do + snapshotter = self() + {:ok, owner} = Keyword.fetch(opts, :owner) + {:ok, repo} = Keyword.fetch(opts, :repo) + + Ecto.Adapters.SQL.Sandbox.allow(repo, owner, snapshotter) + :ok + end + + def add_shape(_shape, _opts) do + :ok + end + + def remove_shape(_shape_handle, _shape, _opts) do + :ok + end + + def remove_shape(_shape, _opts) do + :ok + end + + def refresh_publication(_opts) do + :ok + end + end +end diff --git a/lib/phoenix/sync/sandbox/schema_reconciler.ex b/lib/phoenix/sync/sandbox/schema_reconciler.ex new file mode 100644 index 0000000..9051158 --- /dev/null +++ b/lib/phoenix/sync/sandbox/schema_reconciler.ex @@ -0,0 +1,9 @@ +defmodule Phoenix.Sync.Sandbox.SchemaReconciler do + @moduledoc false + + use GenServer + + def start_link(_args), do: :ignore + def init(_arg), do: :ignore + def reconcile_now(_name_or_pid), do: :ok +end diff --git a/lib/phoenix/sync/sandbox/stack.ex b/lib/phoenix/sync/sandbox/stack.ex new file mode 100644 index 0000000..eef6e9c --- /dev/null +++ b/lib/phoenix/sync/sandbox/stack.ex @@ -0,0 +1,151 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.Stack do + @moduledoc false + + use Supervisor, restart: :transient + + alias Phoenix.Sync.Sandbox + + def child_spec(opts) do + {:ok, stack_id} = Keyword.fetch(opts, :stack_id) + {:ok, repo} = Keyword.fetch(opts, :repo) + {:ok, owner} = Keyword.fetch(opts, :owner) + + %{ + id: {__MODULE__, stack_id}, + start: {__MODULE__, :start_link, [stack_id, repo, owner]}, + type: :supervisor, + restart: :transient + } + end + + def name(stack_id) do + Phoenix.Sync.Sandbox.name({__MODULE__, stack_id}) + end + + def start_link(stack_id, repo, owner) do + Supervisor.start_link(__MODULE__, {stack_id, repo, owner}, name: name(stack_id)) + end + + alias Electric.Shapes.Querying + alias Electric.ShapeCache.Storage + + def snapshot_query( + parent, + shape_handle, + shape, + db_pool, + storage, + stack_id, + chunk_bytes_threshold + ) do + Postgrex.transaction( + db_pool, + fn conn -> + GenServer.cast(parent, {:pg_snapshot_known, shape_handle, {1000, 1100, []}}) + + # Enforce display settings *before* querying initial data to maintain consistent + # formatting between snapshot and live log entries. + Enum.each(Electric.Postgres.display_settings(), &Postgrex.query!(conn, &1, [])) + + stream = + Querying.stream_initial_data(conn, stack_id, shape, chunk_bytes_threshold) + |> Stream.transform( + fn -> false end, + fn item, acc -> + if not acc, do: GenServer.cast(parent, {:snapshot_started, shape_handle}) + {[item], true} + end, + fn acc -> + if not acc, do: GenServer.cast(parent, {:snapshot_started, shape_handle}) + acc + end + ) + + # could pass the shape and then make_new_snapshot! can pass it to row_to_snapshot_item + # that way it has the relation, but it is still missing the pk_cols + Storage.make_new_snapshot!(stream, storage) + end, + timeout: :infinity + ) + end + + def config(stack_id, repo, owner \\ nil) do + publication_manager_spec = + {Sandbox.PublicationManager, stack_id: stack_id, owner: owner, repo: repo} + + # persistent_kv = Electric.PersistentKV.Memory.new!() + inspector = {Sandbox.Inspector, stack_id} + + %{pid: pool} = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + + registry = :"#{__MODULE__}.Registry-#{stack_id}" + + [ + purge_all_shapes?: false, + stack_id: stack_id, + storage: { + Electric.ShapeCache.InMemoryStorage, + %{stack_id: stack_id, table_base_name: :"#{stack_id}"} + }, + inspector: inspector, + publication_manager: publication_manager_spec, + chunk_bytes_threshold: 10_485_760, + db_pool: pool, + create_snapshot_fn: &snapshot_query/7, + log_producer: Electric.Replication.ShapeLogCollector.name(stack_id), + consumer_supervisor: Electric.Shapes.DynamicConsumerSupervisor.name(stack_id), + registry: registry, + max_shapes: nil + ] + end + + def init({stack_id, repo, owner}) do + # shape_cache_spec = {Electric.ShapeCache, shape_cache_opts} + + config = config(stack_id, repo, owner) + shape_cache_spec = {Electric.ShapeCache, config} + persistent_kv = Electric.PersistentKV.Memory.new!() + + shape_status_spec = + {Electric.ShapeCache.ShapeStatus, + %Electric.ShapeCache.ShapeStatus{ + shape_meta_table: Electric.ShapeCache.get_shape_meta_table(stack_id: stack_id) + }} + + children = [ + {Registry, keys: :duplicate, name: config[:registry]}, + {Electric.ProcessRegistry, stack_id: stack_id}, + {Electric.StatusMonitor, stack_id}, + {Electric.Shapes.Monitor, + stack_id: stack_id, + storage: config[:storage], + shape_status: shape_status_spec, + publication_manager: config[:publication_manager]}, + # TODO: start an electric stack, decoupled from the db connection + # with in memory storage, a mock publication_manager and inspector + Supervisor.child_spec( + { + Electric.Replication.Supervisor, + stack_id: stack_id, + shape_cache: shape_cache_spec, + publication_manager: config[:publication_manager], + log_collector: { + Electric.Replication.ShapeLogCollector, + stack_id: stack_id, inspector: config[:inspector], persistent_kv: persistent_kv + }, + schema_reconciler: {Phoenix.Sync.Sandbox.SchemaReconciler, stack_id}, + stack_events_registry: config[:registry] + }, + restart: :temporary + ), + {Sandbox.Inspector, stack_id: stack_id, repo: repo}, + {Sandbox.Producer, stack_id: stack_id}, + {DynamicSupervisor, + name: Phoenix.Sync.Sandbox.Fetch.name(stack_id), strategy: :one_for_one} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + end +end diff --git a/lib/phoenix/sync/sandbox/stack_registry.ex b/lib/phoenix/sync/sandbox/stack_registry.ex new file mode 100644 index 0000000..3cb3aca --- /dev/null +++ b/lib/phoenix/sync/sandbox/stack_registry.ex @@ -0,0 +1,120 @@ +if Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) do + defmodule Phoenix.Sync.Sandbox.StackRegistry do + @moduledoc false + + use GenServer + + def start_link(_) do + GenServer.start_link(__MODULE__, [], name: __MODULE__) + end + + def register(registry \\ __MODULE__, pid, stack_id) do + GenServer.call(registry, {:register, pid, stack_id}) + end + + def lookup(registry \\ __MODULE__, pid) do + GenServer.call(registry, {:lookup, pid}) + end + + def configure(registry \\ __MODULE__, stack_id, api, client) do + GenServer.call(registry, {:configure, self(), stack_id, api, client}) + end + + # api should be removed when test pid exits + # registered pid should be removed when it exits + + def get_api(registry \\ __MODULE__, stack_id) do + GenServer.call(registry, {:get, :api, stack_id}) + end + + def get_client(registry \\ __MODULE__, stack_id) do + GenServer.call(registry, {:get, :client, stack_id}) + end + + def shared_mode(registry \\ __MODULE__, owner, stack_id) do + GenServer.call(registry, {:shared_mode, owner, stack_id}) + end + + ## callbacks + + def init(_) do + {:ok, %{stack_pids: %{}, stacks: %{}, shared: nil}} + end + + def handle_call({:register, pid, stack_id}, _from, state) do + _ref = Process.monitor(pid, tag: {:down, :register}) + + state = Map.update!(state, :stack_pids, &Map.put(&1, pid, stack_id)) + + {:reply, :ok, state} + end + + def handle_call({:lookup, _pid}, _from, %{shared: {_owner, stack_id}} = state) do + {:reply, stack_id, state} + end + + def handle_call({:lookup, pid}, _from, state) do + {:reply, Map.get(state.stack_pids, pid), state} + end + + def handle_call({:configure, pid, stack_id, api, client}, _from, state) do + _ref = Process.monitor(pid, tag: {:down, :stack, stack_id}) + + state = Map.update!(state, :stacks, &Map.put(&1, stack_id, %{api: api, client: client})) + + {:reply, :ok, state} + end + + def handle_call({:get, key, stack_id}, _from, state) when key in [:api, :client] do + {:reply, get_config(state, stack_id, key), state} + end + + # shared mode only works for non-async tests, which means that only + # one test is running at any time, so we can set the shared pid globally + def handle_call({:shared_mode, owner, stack_id}, _from, %{shared: nil} = state) + when is_pid(owner) do + _ref = Process.monitor(owner, tag: {:down, :shared, stack_id}) + {:reply, :ok, Map.put(state, :shared, {owner, stack_id})} + end + + def handle_call({:shared_mode, owner, stack_id}, _from, %{shared: {owner, stack_id}} = state) do + {:reply, :ok, state} + end + + def handle_call( + {:shared_mode, _owner, _stack_id}, + _from, + %{shared: {owner, stack_id}} = state + ) do + {:reply, + {:error, + "Shared mode already registered to pid #{inspect(owner)} for stack #{inspect(stack_id)}"}, + state} + end + + def handle_info({{:down, :stack, stack_id}, _ref, :process, _pid, _reason}, state) do + state = Map.update!(state, :stacks, &Map.delete(&1, stack_id)) + + {:noreply, state} + end + + def handle_info( + {{:down, :shared, stack_id}, _ref, :process, pid, _reason}, + %{shared: {pid, stack_id}} = state + ) do + {:noreply, Map.put(state, :shared, nil)} + end + + def handle_info({{:down, :register}, _ref, :process, pid, _reason}, state) do + state = Map.update!(state, :stack_pids, &Map.delete(&1, pid)) + + {:noreply, state} + end + + defp get_config(state, stack_id, key) do + with {:ok, config} <- Map.fetch(state.stacks, stack_id) do + Map.fetch(config, key) + end + end + end +end diff --git a/mix.exs b/mix.exs index f7ebe41..92ba8e6 100644 --- a/mix.exs +++ b/mix.exs @@ -19,11 +19,8 @@ defmodule Phoenix.Sync.MixProject do description: description(), source_url: "https://github.com/electric-sql/phoenix_sync", homepage_url: "https://hexdocs.pm/phoenix_sync", - aliases: [ - "test.all": ["test", "test.as_a_dep"], - "test.as_a_dep": &test_as_a_dep/1 - ], - preferred_cli_env: ["test.all": :test] + aliases: aliases(), + preferred_cli_env: ["test.all": :test, "test.apps": :test] ] end @@ -34,6 +31,10 @@ defmodule Phoenix.Sync.MixProject do ] end + def cli do + [preferred_envs: ["test.all": :test, "test.apps": :test]] + end + defp deps do [ {:nimble_options, "~> 1.1"}, @@ -41,17 +42,19 @@ defmodule Phoenix.Sync.MixProject do {:plug, "~> 1.0"}, {:jason, "~> 1.0"}, {:ecto_sql, "~> 3.10", optional: true}, - {:electric, "~> 1.0.21", optional: true}, - {:electric_client, "~> 0.6.3"} + {:electric, "~> 1.0.24", optional: true}, + # 0.6.5 has the decoding fix + {:electric_client, "> 0.6.4"} ] ++ deps_for_env(Mix.env()) end defp deps_for_env(:test) do [ - {:floki, "~> 0.36", only: [:test]}, {:bandit, "~> 1.5", only: [:test], override: true}, - {:uuid, "~> 1.1", only: [:test]}, - {:mox, "~> 1.1", only: [:test]} + {:floki, "~> 0.36", only: [:test]}, + {:lazy_html, ">= 0.1.0", only: :test}, + {:mox, "~> 1.1", only: [:test]}, + {:uuid, "~> 1.1", only: [:test]} ] end @@ -66,6 +69,16 @@ defmodule Phoenix.Sync.MixProject do [] end + defp aliases do + [ + "test.all": ["test", "test.as_a_dep", "test.apps"], + "test.as_a_dep": &test_as_a_dep/1, + "test.apps": &test_apps/1, + start_dev: "cmd docker compose up -d", + stop_dev: "cmd docker compose down -v" + ] + end + defp docs do [ main: "readme", @@ -124,7 +137,30 @@ defmodule Phoenix.Sync.MixProject do end """) - mix_cmd_with_status_check(["do", "deps.get,", "compile", "--force" | args]) + mix_cmd_with_status_check([ + "do", + "deps.get,", + "compile", + "--force", + "--warnings-as-errors" | args + ]) + end) + end + + defp test_apps(args) do + IO.puts("==> Running tests in Phoenix Sync example apps") + + Path.wildcard("apps/*") + |> Enum.each(fn app -> + File.cd!(app, fn -> + mix_cmd_with_status_check(["deps.get"]) + + mix_cmd_with_status_check([ + "test", + "--force", + "--warnings-as-errors" | args + ]) + end) end) end diff --git a/mix.lock b/mix.lock index a516a67..f5a6d00 100644 --- a/mix.lock +++ b/mix.lock @@ -3,6 +3,7 @@ "backoff": {:hex, :backoff, "1.1.6", "83b72ed2108ba1ee8f7d1c22e0b4a00cfe3593a67dbc792799e8cce9f42f796b", [:rebar3], [], "hexpm", "cf0cfff8995fb20562f822e5cc47d8ccf664c5ecdc26a684cbe85c225f9d7c39"}, "bandit": {:hex, :bandit, "1.7.0", "d1564f30553c97d3e25f9623144bb8df11f3787a26733f00b21699a128105c0c", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "3e2f7a98c7a11f48d9d8c037f7177cd39778e74d55c7af06fe6227c742a8168a"}, "castore": {:hex, :castore, "1.0.14", "4582dd7d630b48cf5e1ca8d3d42494db51e406b7ba704e81fbd401866366896a", [:mix], [], "hexpm", "7bc1b65249d31701393edaaac18ec8398d8974d52c647b7904d01b964137b9f4"}, + "cc_precompiler": {:hex, :cc_precompiler, "0.1.10", "47c9c08d8869cf09b41da36538f62bc1abd3e19e41701c2cea2675b53c704258", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "f6e046254e53cd6b41c6bacd70ae728011aa82b2742a80d6e2214855c6e06b22"}, "chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"}, "combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"}, "ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"}, @@ -13,20 +14,22 @@ "earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"}, "ecto": {:hex, :ecto, "3.13.2", "7d0c0863f3fc8d71d17fc3ad3b9424beae13f02712ad84191a826c7169484f01", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "669d9291370513ff56e7b7e7081b7af3283d02e046cf3d403053c557894a0b3e"}, "ecto_sql": {:hex, :ecto_sql, "3.13.2", "a07d2461d84107b3d037097c822ffdd36ed69d1cf7c0f70e12a3d1decf04e2e1", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "539274ab0ecf1a0078a6a72ef3465629e4d6018a3028095dc90f60a19c371717"}, - "electric": {:hex, :electric, "1.0.21", "b2e281b82c075d1056dc404df854e3904cb9c74b3d02628cf46857a2658968c1", [:mix], [{:backoff, "~> 1.1", [hex: :backoff, repo: "hexpm", optional: false]}, {:bandit, "~> 1.6", [hex: :bandit, repo: "hexpm", optional: false]}, {:dotenvy, "~> 1.1", [hex: :dotenvy, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:electric_cubdb, "~> 2.0", [hex: :electric_cubdb, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.5", [hex: :opentelemetry, repo: "hexpm", optional: true]}, {:opentelemetry_exporter, "~> 1.8", [hex: :opentelemetry_exporter, repo: "hexpm", optional: true]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_metric_exporter, "~> 0.3.9", [hex: :otel_metric_exporter, repo: "hexpm", optional: true]}, {:pg_query_ex, "0.7.0", [hex: :pg_query_ex, repo: "hexpm", optional: false]}, {:plug, "~> 1.17", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.20", [hex: :postgrex, repo: "hexpm", optional: false]}, {:remote_ip, "~> 1.2", [hex: :remote_ip, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:retry, "~> 0.19", [hex: :retry, repo: "hexpm", optional: false]}, {:sentry, "~> 10.9", [hex: :sentry, repo: "hexpm", optional: true]}, {:stream_split, "~> 0.1", [hex: :stream_split, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.1", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: true]}, {:telemetry_metrics_statsd, "~> 0.7", [hex: :telemetry_metrics_statsd, repo: "hexpm", optional: true]}, {:telemetry_poller, "~> 1.2", [hex: :telemetry_poller, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.27", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}, {:tz, "~> 0.28", [hex: :tz, repo: "hexpm", optional: false]}], "hexpm", "f0063f0fc383e319072fea8c0cba083cd105dcf794050d7473807c73b589e52d"}, - "electric_client": {:hex, :electric_client, "0.6.3", "17d7aebaebcc293feff97ce8b683502ca61ffc03cf30429b3f9f5e1113bf6c27", [:mix], [{:ecto_sql, "~> 3.12", [hex: :ecto_sql, repo: "hexpm", optional: true]}, {:electric, "~> 1.0.0", [hex: :electric, repo: "hexpm", optional: true]}, {:gen_stage, "~> 1.2", [hex: :gen_stage, repo: "hexpm", optional: true]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}], "hexpm", "da7e9e7cff19dc22d0e7f1017aa675dbeaa60ae0953f908e5442e32b43c6e808"}, + "electric": {:hex, :electric, "1.0.24", "f17ee7971390cf710a731a349456f6da43750fbc6582d62793c8702c636ab203", [:mix], [{:backoff, "~> 1.1", [hex: :backoff, repo: "hexpm", optional: false]}, {:bandit, "~> 1.6", [hex: :bandit, repo: "hexpm", optional: false]}, {:dotenvy, "~> 1.1", [hex: :dotenvy, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:electric_cubdb, "~> 2.0", [hex: :electric_cubdb, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.5", [hex: :opentelemetry, repo: "hexpm", optional: true]}, {:opentelemetry_exporter, "~> 1.8", [hex: :opentelemetry_exporter, repo: "hexpm", optional: true]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_metric_exporter, "~> 0.3.9", [hex: :otel_metric_exporter, repo: "hexpm", optional: true]}, {:pg_query_ex, "0.7.0", [hex: :pg_query_ex, repo: "hexpm", optional: false]}, {:plug, "~> 1.17", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.20", [hex: :postgrex, repo: "hexpm", optional: false]}, {:remote_ip, "~> 1.2", [hex: :remote_ip, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:retry, "~> 0.19", [hex: :retry, repo: "hexpm", optional: false]}, {:sentry, "~> 10.9", [hex: :sentry, repo: "hexpm", optional: true]}, {:stream_split, "~> 0.1", [hex: :stream_split, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.1", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: true]}, {:telemetry_metrics_statsd, "~> 0.7", [hex: :telemetry_metrics_statsd, repo: "hexpm", optional: true]}, {:telemetry_poller, "~> 1.2", [hex: :telemetry_poller, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.27", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}, {:tz, "~> 0.28", [hex: :tz, repo: "hexpm", optional: false]}], "hexpm", "91e3a8b957c1e02d07da3a0e1e902420f32e1d7d5da25814475175517698fb61"}, + "electric_client": {:hex, :electric_client, "0.6.5-beta-1", "7e7153c28b30f1a4b54b84f3b08d20ca7827ecbbabce26e6429b76a732ba6e1f", [:mix], [{:ecto_sql, "~> 3.12", [hex: :ecto_sql, repo: "hexpm", optional: true]}, {:electric, "~> 1.0.6", [hex: :electric, repo: "hexpm", optional: true]}, {:gen_stage, "~> 1.2", [hex: :gen_stage, repo: "hexpm", optional: true]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}], "hexpm", "444cd8002fee9473bfa2030a17c721603ad565d1991588650c36dd5e077c43b2"}, "electric_cubdb": {:hex, :electric_cubdb, "2.0.2", "36f86e3c52dc26f4e077a49fbef813b1a38d3897421cece851f149190b34c16c", [:mix], [], "hexpm", "0c0e24b31fb76ad1b33c5de2ab35c41a4ff9da153f5c1f9b15e2de78575acaf2"}, "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, "ex_doc": {:hex, :ex_doc, "0.37.3", "f7816881a443cd77872b7d6118e8a55f547f49903aef8747dbcb345a75b462f9", [:mix], [{:earmark_parser, "~> 1.4.42", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "e6aebca7156e7c29b5da4daa17f6361205b2ae5f26e5c7d8ca0d3f7e18972233"}, - "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"}, - "floki": {:hex, :floki, "0.37.0", "b83e0280bbc6372f2a403b2848013650b16640cd2470aea6701f0632223d719e", [:mix], [], "hexpm", "516a0c15a69f78c47dc8e0b9b3724b29608aa6619379f91b1ffa47109b5d0dd3"}, + "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"}, + "fine": {:hex, :fine, "0.1.2", "85cf7dd190c7c6c54c2840754ae977c9acc0417316255b674fad9f2678e4ecc7", [:mix], [], "hexpm", "9113531982c2b60dbea6c7233917ddf16806947cd7104b5d03011bf436ca3072"}, + "floki": {:hex, :floki, "0.38.0", "62b642386fa3f2f90713f6e231da0fa3256e41ef1089f83b6ceac7a3fd3abf33", [:mix], [], "hexpm", "a5943ee91e93fb2d635b612caf5508e36d37548e84928463ef9dd986f0d1abd9"}, "gen_stage": {:hex, :gen_stage, "1.2.1", "19d8b5e9a5996d813b8245338a28246307fd8b9c99d1237de199d21efc4c76a1", [:mix], [], "hexpm", "83e8be657fa05b992ffa6ac1e3af6d57aa50aace8f691fcf696ff02f8335b001"}, "gproc": {:hex, :gproc, "0.9.1", "f1df0364423539cf0b80e8201c8b1839e229e5f9b3ccb944c5834626998f5b8c", [:rebar3], [], "hexpm", "905088e32e72127ed9466f0bac0d8e65704ca5e73ee5a62cb073c3117916d507"}, "grpcbox": {:hex, :grpcbox, "0.17.1", "6e040ab3ef16fe699ffb513b0ef8e2e896da7b18931a1ef817143037c454bcce", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.15.1", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.9.1", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "4a3b5d7111daabc569dc9cbd9b202a3237d81c80bf97212fbc676832cb0ceb17"}, "hpack": {:hex, :hpack_erl, "0.3.0", "2461899cc4ab6a0ef8e970c1661c5fc6a52d3c25580bc6dd204f84ce94669926", [:rebar3], [], "hexpm", "d6137d7079169d8c485c6962dfe261af5b9ef60fbc557344511c1e65e3d95fb0"}, "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"}, "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "lazy_html": {:hex, :lazy_html, "0.1.3", "8b9c8c135e95f7bc483de6195c4e1c0b2c913a5e2c57353ef4e82703b7ac8bd1", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.9.0", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:fine, "~> 0.1.0", [hex: :fine, repo: "hexpm", optional: false]}], "hexpm", "5f96f29587dcfed8a22281e8c44c6607e958ba821d90b9dfc003d1ef610f7d07"}, "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, @@ -47,16 +50,16 @@ "pg_query_ex": {:hex, :pg_query_ex, "0.7.0", "189f0c0d2b6fce78def670f3cba411baa9311a099bcd0cdb0501adcfede37677", [:make, :mix], [{:elixir_make, "~> 0.4", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:protox, "~> 1.7", [hex: :protox, repo: "hexpm", optional: false]}], "hexpm", "c39cb58690fa8f19cdd1939c41c5906b65f1e70351ea4a45a9da680ca3ad8c66"}, "phoenix": {:hex, :phoenix, "1.7.21", "14ca4f1071a5f65121217d6b57ac5712d1857e40a0833aff7a691b7870fc9a3b", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "336dce4f86cba56fed312a7d280bf2282c720abb6074bdb1b61ec8095bdd0bc9"}, "phoenix_html": {:hex, :phoenix_html, "4.2.1", "35279e2a39140068fc03f8874408d58eef734e488fc142153f055c5454fd1c08", [:mix], [], "hexpm", "cff108100ae2715dd959ae8f2a8cef8e20b593f8dfd031c9cba92702cf23e053"}, - "phoenix_live_view": {:hex, :phoenix_live_view, "1.0.17", "beeb16d83a7d3760f7ad463df94e83b087577665d2acc0bf2987cd7d9778068f", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a4ca05c1eb6922c4d07a508a75bfa12c45e5f4d8f77ae83283465f02c53741e1"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.1.3", "0473936730cc76f9b02e52f131e081c63e5e5c3851003878dd3cbe12124fb39f", [:mix], [{:igniter, ">= 0.6.16 and < 1.0.0-0", [hex: :igniter, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:lazy_html, "~> 0.1.0", [hex: :lazy_html, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "942967524e8d256ce6847ca3143d94425fa5125b53563790a609c78740cfb6c9"}, "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, - "plug": {:hex, :plug, "1.18.0", "d78df36c41f7e798f2edf1f33e1727eae438e9dd5d809a9997c463a108244042", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "819f9e176d51e44dc38132e132fe0accaf6767eab7f0303431e404da8476cfa2"}, + "plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"}, "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"}, - "postgrex": {:hex, :postgrex, "0.20.0", "363ed03ab4757f6bc47942eff7720640795eb557e1935951c1626f0d303a3aed", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "d36ef8b36f323d29505314f704e21a1a038e2dc387c6409ee0cd24144e187c0f"}, + "postgrex": {:hex, :postgrex, "0.21.1", "2c5cc830ec11e7a0067dd4d623c049b3ef807e9507a424985b8dcf921224cd88", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "27d8d21c103c3cc68851b533ff99eef353e6a0ff98dc444ea751de43eb48bdac"}, "protobuf": {:hex, :protobuf, "0.13.0", "7a9d9aeb039f68a81717eb2efd6928fdf44f03d2c0dfdcedc7b560f5f5aae93d", [:mix], [{:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "21092a223e3c6c144c1a291ab082a7ead32821ba77073b72c68515aa51fef570"}, "protox": {:hex, :protox, "1.7.8", "ccae41afec6e63cf061bee874d7d042ed585d501df1cd004661ffac0e5628686", [:mix], [{:decimal, "~> 1.9 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}, {:poison, "~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}], "hexpm", "f6702c9deb9fb7cd2eadd73d3dbc0303c506dc87635e509228c61309f7062933"}, "remote_ip": {:hex, :remote_ip, "1.2.0", "fb078e12a44414f4cef5a75963c33008fe169b806572ccd17257c208a7bc760f", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "2ff91de19c48149ce19ed230a81d377186e4412552a597d6a5137373e5877cb7"}, - "req": {:hex, :req, "0.5.12", "7ce85835867a114c28b6cfc2d8a412f86660290907315ceb173a00e587b853d2", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "d65f3d0e7032eb245706554cb5240dbe7a07493154e2dd34e7bb65001aa6ef32"}, + "req": {:hex, :req, "0.5.15", "662020efb6ea60b9f0e0fac9be88cd7558b53fe51155a2d9899de594f9906ba9", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "a6513a35fad65467893ced9785457e91693352c70b58bbc045b47e5eb2ef0c53"}, "retry": {:hex, :retry, "0.19.0", "aeb326d87f62295d950f41e1255fe6f43280a1b390d36e280b7c9b00601ccbc2", [:mix], [], "hexpm", "85ef376aa60007e7bff565c366310966ec1bd38078765a0e7f20ec8a220d02ca"}, "sentry": {:hex, :sentry, "10.8.1", "aa45309785e1521416225adb16e0b4d8b957578804527f3c7babb6fefbc5e456", [:mix], [{:hackney, "~> 1.8", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:nimble_options, "~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_ownership, "~> 0.3.0 or ~> 1.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.6", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, "~> 0.20 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.6", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "495b3cdadad90ba72eef973aa3dec39b3b8b2a362fe87e2f4ef32133ac3b4097"}, "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, @@ -65,7 +68,7 @@ "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"}, "telemetry_metrics_statsd": {:hex, :telemetry_metrics_statsd, "0.7.1", "3502235bb5b35ce50d608bf0f34369ef76eb92a4dbc8708c7e8780ca0da2d53e", [:mix], [{:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "06338d9dc3b4a202f11a6e706fd3feba4c46100d0aca23688dea0b8f801c361f"}, - "telemetry_poller": {:hex, :telemetry_poller, "1.2.0", "ba82e333215aed9dd2096f93bd1d13ae89d249f82760fcada0850ba33bac154b", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7216e21a6c326eb9aa44328028c34e9fd348fb53667ca837be59d0aa2a0156e8"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"}, "thousand_island": {:hex, :thousand_island, "1.3.14", "ad45ebed2577b5437582bcc79c5eccd1e2a8c326abf6a3464ab6c06e2055a34a", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "d0d24a929d31cdd1d7903a4fe7f2409afeedff092d277be604966cd6aa4307ef"}, "tls_certificate_check": {:hex, :tls_certificate_check, "1.28.0", "c39bf21f67c2d124ae905454fad00f27e625917e8ab1009146e916e1df6ab275", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3ab058c3f9457fffca916729587415f0ddc822048a0e5b5e2694918556d92df1"}, "tz": {:hex, :tz, "0.28.1", "717f5ffddfd1e475e2a233e221dc0b4b76c35c4b3650b060c8e3ba29dd6632e9", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:mint, "~> 1.6", [hex: :mint, repo: "hexpm", optional: true]}], "hexpm", "bfdca1aa1902643c6c43b77c1fb0cb3d744fd2f09a8a98405468afdee0848c8a"}, diff --git a/postgres.conf b/postgres.conf new file mode 100644 index 0000000..e18a913 --- /dev/null +++ b/postgres.conf @@ -0,0 +1,3 @@ +listen_addresses = '*' +wal_level = logical # minimal, replica, or logical +max_replication_slots = 100 diff --git a/test/phoenix/sync/application_test.exs b/test/phoenix/sync/application_test.exs index e99a1e1..169b231 100644 --- a/test/phoenix/sync/application_test.exs +++ b/test/phoenix/sync/application_test.exs @@ -18,7 +18,7 @@ defmodule Phoenix.Sync.ApplicationTest do base_opts = [ username: "postgres", hostname: "localhost", - database: "electric", + database: "phoenix_sync", port: 5432, sslmode: :disable ] @@ -96,6 +96,15 @@ defmodule Phoenix.Sync.ApplicationTest do end) =~ ~r/No `env` specified for :phoenix_sync: defaulting to `:prod`/ end + test "sandbox mode" do + config = [ + mode: :sandbox, + repo: Support.ConfigTestRepo + ] + + assert {:ok, [Phoenix.Sync.Sandbox]} = App.children(config) + end + test "embedded mode dev env" do tmp_dir = System.tmp_dir!() @@ -212,13 +221,13 @@ defmodule Phoenix.Sync.ApplicationTest do replication_connection_opts: [ username: "postgres", hostname: "localhost", - database: "electric", + database: "phoenix_sync", password: "password" ], query_connection_opts: [ username: "postgres", hostname: "localhost-pooled", - database: "electric", + database: "phoenix_sync", password: "password" ], storage_dir: storage_dir @@ -232,7 +241,7 @@ defmodule Phoenix.Sync.ApplicationTest do assert connection_opts == [ username: "postgres", hostname: "localhost", - database: "electric" + database: "phoenix_sync" ] assert {"password", connection_opts} = @@ -241,7 +250,7 @@ defmodule Phoenix.Sync.ApplicationTest do assert connection_opts == [ username: "postgres", hostname: "localhost-pooled", - database: "electric" + database: "phoenix_sync" ] assert %{ @@ -259,7 +268,7 @@ defmodule Phoenix.Sync.ApplicationTest do connection_opts: [ username: "postgres", hostname: "localhost", - database: "electric", + database: "phoenix_sync", password: "password" ], storage_dir: storage_dir @@ -272,7 +281,7 @@ defmodule Phoenix.Sync.ApplicationTest do assert connection_opts == [ username: "postgres", hostname: "localhost", - database: "electric" + database: "phoenix_sync" ] assert %{ @@ -388,5 +397,16 @@ defmodule Phoenix.Sync.ApplicationTest do } } = api end + + test "sandbox mode" do + config = [ + mode: :sandbox, + repo: Support.ConfigTestRepo + ] + + api = App.plug_opts(config) + + assert %Phoenix.Sync.Sandbox.APIAdapter{} = api + end end end diff --git a/test/phoenix/sync/live_view_test.exs b/test/phoenix/sync/live_view_test.exs index 8fcf763..d5e2def 100644 --- a/test/phoenix/sync/live_view_test.exs +++ b/test/phoenix/sync/live_view_test.exs @@ -14,6 +14,8 @@ defmodule Phoenix.Sync.LiveViewTest do Code.ensure_loaded(Support.User) + @moduletag :capture_log + setup do {:ok, conn: Plug.Test.init_test_session(build_conn(), %{})} end diff --git a/test/phoenix/sync/sandbox/postgres_adapter_test.exs b/test/phoenix/sync/sandbox/postgres_adapter_test.exs new file mode 100644 index 0000000..e45b3c5 --- /dev/null +++ b/test/phoenix/sync/sandbox/postgres_adapter_test.exs @@ -0,0 +1,25 @@ +defmodule Phoenix.Sync.Sandbox.PostgresAdapterTest do + use ExUnit.Case, async: true + + describe "adapter/1" do + defmodule Adapter do + import Phoenix.Sync.Sandbox.Postgres, only: [adapter: 0, adapter: 1] + + def default, do: adapter() + def prod, do: adapter(Mix.env() == :prod) + def always, do: adapter(true) + end + + test "defaults to testing for :test env" do + assert Adapter.default() == Phoenix.Sync.Sandbox.Postgres.Adapter + end + + test "allows for custom test conditions" do + assert Adapter.prod() == Ecto.Adapters.Postgres + end + + test "accepts hard-coded bools" do + assert Adapter.always() == Phoenix.Sync.Sandbox.Postgres.Adapter + end + end +end diff --git a/test/phoenix/sync/sandbox/producer_test.exs b/test/phoenix/sync/sandbox/producer_test.exs new file mode 100644 index 0000000..80ff833 --- /dev/null +++ b/test/phoenix/sync/sandbox/producer_test.exs @@ -0,0 +1,5 @@ +defmodule Phoenix.Sync.Sandbox.ProducerTest do + use ExUnit.Case, async: true + + doctest Phoenix.Sync.Sandbox.Producer, import: true +end diff --git a/test/phoenix/sync/sandbox/sandbox_adapter_test.exs b/test/phoenix/sync/sandbox/sandbox_adapter_test.exs new file mode 100644 index 0000000..bf1e7ca --- /dev/null +++ b/test/phoenix/sync/sandbox/sandbox_adapter_test.exs @@ -0,0 +1,115 @@ +defmodule Phoenix.Sync.Sandbox.SandboxAdapterTest do + use ExUnit.Case, async: true + + Code.ensure_loaded!(Support.SandboxRepo) + Code.ensure_loaded!(Support.Todo) + + alias Support.SandboxRepo, as: Repo + alias Support.Todo + + import Ecto.Query, only: [from: 2] + + @moduletag sandbox: true + @moduletag table: { + "todos", + [ + "id int8 not null primary key generated always as identity", + "title text", + "completed boolean default false" + ] + } + + setup(_ctx) do + Ecto.Adapters.SQL.Sandbox.mode(Support.SandboxRepo, :manual) + + owner = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + end + + setup [ + :with_repo_table + ] + + describe "no active sandbox" do + # the sandbox adapter should just pass on queries if no sandbox is active + # otherwise you **have** to start a sandbox for all db tests + test "insert" do + Repo.transaction(fn -> + assert {:ok, %Todo{}} = Repo.insert(%Todo{title: "aberrant"}) + end) + end + + test "update_all" do + Repo.transaction(fn -> + assert {:ok, %Todo{}} = Repo.insert(%Todo{title: "aberrant"}) + + from(t in Todo, + update: [ + set: [completed: true, title: fragment("upper(?)", t.title)] + ] + ) + |> Repo.update_all([]) + end) + end + + test "insert_all" do + Repo.transaction(fn -> + todos = [ + %{title: "one"}, + %{title: "two"}, + %{title: "three"} + ] + + assert {3, _} = Repo.insert_all(Todo, todos) + end) + end + + test "delete_all" do + Repo.transaction(fn -> + todos = [ + %{title: "one"}, + %{title: "two"}, + %{title: "three"} + ] + + assert {3, _} = Repo.insert_all(Todo, todos) + assert {3, _} = Repo.delete_all(Todo) + end) + end + + test "delete" do + Repo.transaction(fn -> + assert {:ok, %Todo{} = todo} = Repo.insert(%Todo{title: "aberrant"}) + assert {:ok, %Todo{}} = Repo.delete(todo) + end) + end + + test "update" do + Repo.transaction(fn -> + assert {:ok, %Todo{} = todo} = Repo.insert(%Todo{title: "aberrant"}) + assert {:ok, %Todo{}} = Repo.update(Ecto.Changeset.change(todo, title: "changed")) + end) + end + end + + defp with_repo_table(ctx) do + case ctx do + %{table: {name, columns}} -> + sql = + """ + CREATE TABLE #{Support.DbSetup.inspect_relation(name)} ( + #{Enum.join(columns, ",\n")} + ) + """ + + Repo.query!(sql, []) + + :ok + + _ -> + :ok + end + + :ok + end +end diff --git a/test/phoenix/sync/sandbox/sandbox_repo_test.exs b/test/phoenix/sync/sandbox/sandbox_repo_test.exs new file mode 100644 index 0000000..16b3277 --- /dev/null +++ b/test/phoenix/sync/sandbox/sandbox_repo_test.exs @@ -0,0 +1,761 @@ +defmodule Phoenix.Sync.Sandbox.RepoTest do + use ExUnit.Case, + async: true, + parameterize: [ + %{ownership_model: :checkout}, + %{ownership_model: :owner} + ] + + @moduletag :sandbox + + @todos [ + table: { + "todos", + [ + "id int8 not null primary key generated always as identity", + "title text", + "completed boolean default false" + ] + }, + data: { + Support.Todo, + ["title", "completed"], + [["one", false], ["two", false], ["three", true]] + } + ] + + Code.ensure_loaded!(Support.SandboxRepo) + Code.ensure_loaded!(Support.Todo) + + alias Support.SandboxRepo, as: Repo + alias Support.Todo + + import Ecto.Query, only: [from: 2] + + defp with_repo_table(ctx) do + case ctx do + %{table: {name, columns}} -> + sql = + """ + CREATE TABLE #{Support.DbSetup.inspect_relation(name)} ( + #{Enum.join(columns, ",\n")} + ) + """ + + Repo.query!(sql, []) + + :ok + + _ -> + :ok + end + + :ok + end + + defp with_repo_data(ctx) do + case Map.get(ctx, :data, nil) do + {schema, columns, values} -> + Enum.each(values, fn row_values -> + todo = + struct( + schema, + Enum.zip(columns, row_values) |> Enum.map(fn {c, v} -> {String.to_atom(c), v} end) + ) + + Repo.insert(todo) + end) + + :ok + + nil -> + :ok + end + end + + setup(ctx) do + Ecto.Adapters.SQL.Sandbox.mode(Support.SandboxRepo, :manual) + + case ctx.ownership_model do + :checkout -> + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + Phoenix.Sync.Sandbox.start!(Repo, tags: ctx) + + :owner -> + owner = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + Phoenix.Sync.Sandbox.start!(Repo, owner, tags: ctx) + end + end + + setup [ + :with_repo_table, + :with_repo_data + ] + + describe "update_all" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {3, nil} = + from(t in Todo, + update: [ + set: [completed: true, title: fragment("upper(?)", t.title)] + ] + ) + |> Repo.update_all([]) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "ONE", completed: true}, + old_value: %Support.Todo{title: "one", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "TWO", completed: true}, + old_value: %Support.Todo{title: "two", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 3, title: "THREE", completed: true}, + old_value: %Support.Todo{title: "three"}, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + end + + test "with returning" do + receive_sandbox_updates(fn -> + assert {3, [%Todo{id: 1}, %Todo{id: 2}, %Todo{id: 3}]} = + from(t in Todo, + update: [ + set: [completed: true, title: fragment("upper(?)", t.title)] + ], + select: [:id, :completed] + ) + |> Repo.update_all([]) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "ONE", completed: true}, + old_value: %Support.Todo{title: "one", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "TWO", completed: true}, + old_value: %Support.Todo{title: "two", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 3, title: "THREE", completed: true}, + old_value: %Support.Todo{ + title: "three" + }, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + end + + test "with joins" do + receive_sandbox_updates(fn -> + assert {3, nil} = + from(t in Todo, + join: o in Todo, + on: o.id == t.id, + update: [ + set: [completed: true, title: fragment("upper(?)", o.title)] + ] + ) + |> Repo.update_all([]) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "ONE", completed: true}, + old_value: %Support.Todo{title: "one", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "TWO", completed: true}, + old_value: %Support.Todo{title: "two", completed: false}, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 3, title: "THREE", completed: true}, + old_value: %Support.Todo{ + title: "three" + }, + headers: %{operation: :update, relation: ["public", "todos"]} + }} + end + end + + describe "insert_all" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {2, nil} = + Repo.insert_all(Todo, [%{title: "more"}, %{title: "even more"}]) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 4, title: "more", completed: false}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 5, title: "even more", completed: false}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }} + end + + test "returning" do + receive_sandbox_updates(fn -> + assert {2, [%Todo{title: "more"}, %Todo{title: "even more"}]} = + Repo.insert_all(Todo, [%{title: "more"}, %{title: "even more"}], + returning: [:title, :completed] + ) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 4, title: "more", completed: false}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 5, title: "even more", completed: false}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }} + end + end + + describe "delete_all" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {2, nil} = + from(t in Todo, where: t.completed == false) + |> Repo.delete_all() + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "two", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }} + end + + test "returning" do + receive_sandbox_updates(fn -> + assert {2, [%Todo{title: "one"}, %Todo{title: "two"}]} = + from(t in Todo, where: t.completed == false, select: [:id, :title]) + |> Repo.delete_all() + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "two", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }} + end + end + + describe "delete" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{}} = Repo.delete(%Todo{id: 1}) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }}, + 1000 + end + + test "returning" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{id: 1}} = Repo.delete(%Todo{id: 1}, returning: [:id, :title]) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :delete, relation: ["public", "todos"]} + }}, + 1000 + end + end + + describe "update" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{}} = Repo.update(Ecto.Changeset.change(%Todo{id: 1}, title: "changed")) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "changed", completed: false}, + old_value: %Support.Todo{title: "one"}, + headers: %{operation: :update, relation: ["public", "todos"]} + }}, + 1000 + end + + test "returning" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{}} = + Repo.update(Ecto.Changeset.change(%Todo{id: 1}, title: "changed"), + returning: [:id, :title] + ) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "changed", completed: false}, + old_value: %Support.Todo{title: "one"}, + headers: %{operation: :update, relation: ["public", "todos"]} + }}, + 1000 + end + end + + describe "insert" do + @describetag @todos + + test "simple" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{}} = Repo.insert(%Todo{title: "ticked", completed: true}) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: _, title: "ticked", completed: true}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + end + + test "returning" do + receive_sandbox_updates(fn -> + assert {:ok, %Todo{}} = + Repo.insert(%Todo{title: "ticked", completed: true}, + returning: [:id, :completed] + ) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: _, title: "ticked", completed: true}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + end + end + + describe "allow" do + @describetag @todos + + test "connects any process to a stack" do + parent = self() + + {:ok, supervisor} = + start_supervised( + {DynamicSupervisor, name: __MODULE__.DynamicSupervisor, strategy: :one_for_one} + ) + + receive_sandbox_updates(fn -> + {:ok, pid} = + DynamicSupervisor.start_child( + supervisor, + {Task, + fn -> + receive do + :insert -> + assert %Todo{} = Repo.insert!(%Todo{title: "distant"}) + end + end} + ) + + :ok = Phoenix.Sync.Sandbox.allow(Repo, parent, pid) + + send(pid, :insert) + end) + + # prove that the insert succeeded + assert Repo.all(Todo) |> Enum.map(& &1.title) |> Enum.find(&(&1 == "distant")) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: _, title: "distant"}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + end + + test "allows for named processes" do + parent = self() + + {:ok, supervisor} = + start_supervised( + {DynamicSupervisor, name: __MODULE__.DynamicSupervisor, strategy: :one_for_one} + ) + + receive_sandbox_updates(fn -> + task = self() + + {:ok, pid} = + DynamicSupervisor.start_child( + supervisor, + {Task, + fn -> + Process.register(self(), :sandbox_test_process) + send(task, :registered) + + receive do + :insert -> + assert %Todo{} = Repo.insert!(%Todo{title: "distant"}) + end + end} + ) + + assert_receive :registered, 100 + + :ok = Phoenix.Sync.Sandbox.allow(Repo, parent, :sandbox_test_process) + + send(pid, :insert) + end) + + # prove that the insert succeeded + assert Repo.all(Todo) |> Enum.map(& &1.title) |> Enum.find(&(&1 == "distant")) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: _, title: "distant"}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + end + end + + describe "type and embed mapping" do + defmodule BinaryId do + use Ecto.Schema + + @primary_key {:id, :binary_id, autogenerate: false} + + schema "binary_ids" do + field :the_time, :time + field :the_date, :date + field :the_price, :decimal + field :the_array, {:array, :integer} + field :the_json, :map + + embeds_many :things, Thing do + field :name, :string + field :timestamp, :utc_datetime + end + + timestamps(type: :utc_datetime) + end + end + + @tag table: { + "binary_ids", + [ + "id uuid not null primary key", + "things jsonb", + "inserted_at timestamp with time zone", + "updated_at timestamp with time zone", + "the_time time", + "the_date date", + "the_price decimal(10, 2)", + "the_array integer[]", + "the_json jsonb" + ] + }, + data: { + BinaryId, + [ + "id", + "things", + "inserted_at", + "updated_at", + "the_time", + "the_date", + "the_price", + "the_array", + "the_json" + ], + [ + [ + "6a939b05-f467-442b-ad30-de81df681b3e", + [ + %{ + __struct__: BinaryId.Thing, + id: "c65ae689-3cbe-4e41-8da6-7b212d26b587", + name: "thing 1", + timestamp: ~U[2025-01-01T12:25:17Z] + }, + %{ + __struct__: BinaryId.Thing, + id: "21bdbe9b-0b51-4dbd-b326-5ad381092b56", + name: "thing 2", + timestamp: ~U[2025-01-01T12:25:18Z] + } + ], + ~U[2025-08-12T16:34:04Z], + ~U[2025-08-12T16:34:05Z], + ~T[13:24:00], + ~D[2025-01-02], + Decimal.new("6.99"), + [1, 2, 1], + %{a: 1, b: 2} + ], + [ + "74828fe4-1339-420a-8c37-f474900d62d5", + [ + %{ + __struct__: BinaryId.Thing, + id: "788f7861-0116-4da7-b218-b36e97c6d478", + name: "thing 3", + timestamp: ~U[2025-02-02T12:25:17Z] + }, + %{ + __struct__: BinaryId.Thing, + id: "26995e04-665d-4dd6-a8f2-0954b12d8555", + name: "thing 4", + timestamp: ~U[2025-02-02T12:25:18Z] + } + ], + ~U[2025-08-13T17:44:04Z], + ~U[2025-08-13T17:44:05Z], + ~T[13:25:01], + ~D[2025-02-02], + Decimal.new("9.99"), + [2, 3, 2], + %{c: 1, d: 2} + ] + ] + } + @tag encoding: true + test "uuids" do + parent = self() + + {:ok, client} = Phoenix.Sync.Sandbox.client() + + start_supervised!( + {Task, + fn -> + for msg <- Electric.Client.stream(client, BinaryId, replica: :full), + do: send(parent, {:change, msg}) + end} + ) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %BinaryId{ + id: "6a939b05-f467-442b-ad30-de81df681b3e", + things: [ + %BinaryId.Thing{ + id: "c65ae689-3cbe-4e41-8da6-7b212d26b587", + name: "thing 1", + timestamp: ~U[2025-01-01T12:25:17Z] + }, + %BinaryId.Thing{ + id: "21bdbe9b-0b51-4dbd-b326-5ad381092b56", + name: "thing 2", + timestamp: ~U[2025-01-01T12:25:18Z] + } + ], + inserted_at: ~U[2025-08-12T16:34:04Z], + updated_at: ~U[2025-08-12T16:34:05Z], + the_time: ~T[13:24:00], + the_date: ~D[2025-01-02], + the_price: %Decimal{exp: -2, sign: 1, coef: 699}, + the_array: [1, 2, 1], + the_json: %{"a" => 1, "b" => 2} + }, + headers: %{operation: :insert} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %BinaryId{ + id: "74828fe4-1339-420a-8c37-f474900d62d5", + things: [ + %BinaryId.Thing{ + id: "788f7861-0116-4da7-b218-b36e97c6d478", + name: "thing 3", + timestamp: ~U[2025-02-02T12:25:17Z] + }, + %BinaryId.Thing{ + id: "26995e04-665d-4dd6-a8f2-0954b12d8555", + name: "thing 4", + timestamp: ~U[2025-02-02T12:25:18Z] + } + ], + inserted_at: ~U[2025-08-13T17:44:04Z], + updated_at: ~U[2025-08-13T17:44:05Z], + the_time: ~T[13:25:01], + the_date: ~D[2025-02-02], + the_price: %Decimal{exp: -2, sign: 1, coef: 999}, + the_array: [2, 3, 2], + the_json: %{"c" => 1, "d" => 2} + }, + headers: %{operation: :insert} + }} + + assert_receive {:change, + %Electric.Client.Message.ControlMessage{ + control: :up_to_date + }} + + Repo.transaction(fn -> + Repo.insert!(%BinaryId{ + id: "778247a6-2dcb-4278-b696-8e1b974cf073", + things: [ + %BinaryId.Thing{ + id: "b266d1aa-cd5a-4bbe-8766-d7f7c041ffb3", + name: "thing 5", + timestamp: ~U[2025-03-03T12:25:17Z] + }, + %BinaryId.Thing{ + id: "77f2929f-5ad7-4c06-8d60-6efcd9dcb19c", + name: "thing 6", + timestamp: ~U[2025-03-03T12:25:18Z] + } + ], + inserted_at: ~U[2025-08-13T18:44:04Z], + updated_at: ~U[2025-08-13T18:44:05Z], + the_time: ~T[14:34:00], + the_date: ~D[2025-03-02], + the_price: Decimal.new("16.51"), + the_array: [1, 2, 3], + the_json: %{"e" => 1, "f" => 2} + }) + end) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %BinaryId{ + id: "778247a6-2dcb-4278-b696-8e1b974cf073", + things: [ + %BinaryId.Thing{ + id: "b266d1aa-cd5a-4bbe-8766-d7f7c041ffb3", + name: "thing 5", + timestamp: ~U[2025-03-03T12:25:17Z] + }, + %BinaryId.Thing{ + id: "77f2929f-5ad7-4c06-8d60-6efcd9dcb19c", + name: "thing 6", + timestamp: ~U[2025-03-03T12:25:18Z] + } + ], + inserted_at: ~U[2025-08-13T18:44:04Z], + updated_at: ~U[2025-08-13T18:44:05Z], + the_time: ~T[14:34:00], + the_array: [1, 2, 3], + the_json: %{"e" => 1, "f" => 2} + }, + headers: %{operation: :insert} + }} + end + end + + defp receive_sandbox_updates(write_fun) do + parent = self() + ref = make_ref() + + {:ok, task_supervisor} = start_supervised(Task.Supervisor) + + task = + Task.Supervisor.async(task_supervisor, fn -> + receive do + {:ready, ^ref} -> Repo.transaction(write_fun) + end + end) + + {:ok, client} = Phoenix.Sync.Sandbox.client() + + start_supervised!( + {Task, + fn -> + for msg <- Electric.Client.stream(client, Todo, replica: :full), + do: send(parent, {:change, msg}) + end} + ) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :insert} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "two", completed: false}, + headers: %{operation: :insert} + }} + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 3, title: "three", completed: true}, + headers: %{operation: :insert} + }} + + assert_receive {:change, + %Electric.Client.Message.ControlMessage{ + control: :up_to_date + }} + + send(task.pid, {:ready, ref}) + + Task.await(task) + end +end diff --git a/test/phoenix/sync/sandbox/sandbox_shared_test.exs b/test/phoenix/sync/sandbox/sandbox_shared_test.exs new file mode 100644 index 0000000..5b8e689 --- /dev/null +++ b/test/phoenix/sync/sandbox/sandbox_shared_test.exs @@ -0,0 +1,119 @@ +defmodule Phoenix.Sync.Sandbox.SandboxSharedTest do + use ExUnit.Case, async: false + + Code.ensure_loaded!(Support.SandboxRepo) + Code.ensure_loaded!(Support.Todo) + Code.ensure_loaded!(Support.User) + + alias Support.SandboxRepo, as: Repo + alias Support.Todo + + @moduletag :sandbox + @moduletag table: { + "todos", + [ + "id int8 not null primary key generated always as identity", + "title text", + "completed boolean default false" + ] + } + + setup(ctx) do + owner = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo, shared: true) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + Phoenix.Sync.Sandbox.start!(Repo, owner, shared: true, tags: ctx) + end + + setup [ + :with_repo_table + ] + + describe "shared sandbox" do + test "allows any process to access the test stack" do + parent = self() + + {:ok, supervisor} = + start_supervised( + {DynamicSupervisor, name: __MODULE__.DynamicSupervisor, strategy: :one_for_one} + ) + + receive_sandbox_updates(fn -> + {:ok, pid} = + DynamicSupervisor.start_child( + supervisor, + {Task, + fn -> + receive do + :insert -> + Repo.transaction(fn -> + assert {:ok, %Todo{}} = Repo.insert(%Todo{title: "fragrant"}) + send(parent, :inserted) + end) + end + end} + ) + + send(pid, :insert) + end) + + assert_receive :inserted, 500 + + # prove that the insert succeeded + assert Repo.all(Todo) |> Enum.map(& &1.title) |> Enum.find(&(&1 == "fragrant")) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Todo{id: _, title: "fragrant"}, + headers: %{operation: :insert, relation: ["public", "todos"]} + }}, + 1000 + end + end + + defp receive_sandbox_updates(write_fun) do + parent = self() + ref = make_ref() + + task = + Task.async(fn -> + receive do + {:ready, ^ref} -> write_fun.() + end + end) + + {:ok, client} = Phoenix.Sync.Sandbox.client() + + start_supervised!( + {Task, + fn -> + for msg <- Electric.Client.stream(client, Todo, replica: :full), + do: send(parent, {:change, msg}) + end} + ) + + send(task.pid, {:ready, ref}) + + Task.await(task) + end + + defp with_repo_table(ctx) do + case ctx do + %{table: {name, columns}} -> + sql = + """ + CREATE TABLE #{Support.DbSetup.inspect_relation(name)} ( + #{Enum.join(columns, ",\n")} + ) + """ + + Repo.query!(sql, []) + + :ok + + _ -> + :ok + end + + :ok + end +end diff --git a/test/phoenix/sync/sandbox_test.exs b/test/phoenix/sync/sandbox_test.exs new file mode 100644 index 0000000..a989212 --- /dev/null +++ b/test/phoenix/sync/sandbox_test.exs @@ -0,0 +1,383 @@ +defmodule Phoenix.Sync.SandboxTest do + use ExUnit.Case, async: true + use Support.ElectricHelpers, endpoint: __MODULE__.Endpoint + + @moduletag :sandbox + + Code.ensure_loaded!(Support.SandboxRepo) + Code.ensure_loaded!(Support.Todo) + Code.ensure_loaded!(Support.User) + + defmodule Controller do + use Phoenix.Controller, formats: [:html, :json] + + import Plug.Conn + import Phoenix.Sync.Controller + + def all(conn, params) do + sync_render(conn, params, table: "todos") + end + end + + defmodule Router do + use Phoenix.Router + + import Phoenix.Sync.Router + require Phoenix.LiveView.Router + + scope "/sync" do + # by default we take the table name from the path + # note that this does not handle weird table names that need quoting + # or namespaces + sync "/todos", Support.Todo + end + + scope "/stream" do + Phoenix.LiveView.Router.live("/sandbox", Phoenix.Sync.LiveViewTest.StreamSandbox) + end + + scope "/todos" do + get "/all", Controller, :all + end + end + + defmodule Endpoint do + use Phoenix.Endpoint, otp_app: :phoenix_sync + + plug Router + end + + alias Support.SandboxRepo, as: Repo + alias Support.Todo + + import Phoenix.ConnTest + import Phoenix.LiveViewTest + import Plug.Conn + + defp with_repo_table(ctx) do + case ctx do + %{table: {name, columns}} -> + sql = + """ + CREATE TABLE #{Support.DbSetup.inspect_relation(name)} ( + #{Enum.join(columns, ",\n")} + ) + """ + + Repo.query!(sql, []) + + :ok + + _ -> + :ok + end + + :ok + end + + defp with_repo_data(ctx) do + case Map.get(ctx, :data, nil) do + {schema, columns, values} -> + Enum.each(values, fn row_values -> + todo = + struct( + schema, + Enum.zip(columns, row_values) |> Enum.map(fn {c, v} -> {String.to_atom(c), v} end) + ) + + Repo.insert(todo) + end) + + :ok + + nil -> + :ok + end + end + + setup(ctx) do + Ecto.Adapters.SQL.Sandbox.mode(Repo, :manual) + + owner = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(owner) end) + Phoenix.Sync.Sandbox.start!(Repo, owner, tags: ctx) + end + + setup [ + :define_endpoint, + :with_repo_table, + :with_repo_data + ] + + setup(ctx) do + endpoint = Map.get(ctx, :endpoint, @endpoint) + + Phoenix.Config.put(endpoint, :phoenix_sync, Phoenix.Sync.plug_opts()) + + [endpoint: endpoint] + end + + @moduletag table: { + "todos", + [ + "id int8 not null primary key generated always as identity", + "title text", + "completed boolean default false" + ] + } + @moduletag data: { + Support.Todo, + ["title", "completed"], + [["one", false], ["two", false], ["three", true]] + } + + test "live view", _ctx do + {:ok, lv, html} = + build_conn() + |> put_private(:test_pid, self()) + |> live("/stream/sandbox") + + assert_receive {:sync, {:todos, :loaded}} + assert_receive {:sync, {:todos, :live}} + + for todo <- Repo.all(Todo) do + assert html =~ todo.title + end + + Repo.insert!(%Todo{title: "fourth", completed: false}) + + assert_receive {:sync, _event} + + assert render(lv) =~ "fourth" + end + + test "Client.stream" do + parent = self() + ref = make_ref() + + task = + Task.async(fn -> + receive do + {:ready, ^ref} -> + Repo.transaction(fn -> + Repo.insert!(%Todo{title: "super"}) + end) + end + end) + + start_supervised!( + {Task, + fn -> + for msg <- Phoenix.Sync.Client.stream(Todo, replica: :full), + do: send(parent, {:change, msg}) + end} + ) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 1, title: "one", completed: false}, + headers: %{operation: :insert} + }}, + 1000 + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 2, title: "two", completed: false}, + headers: %{operation: :insert} + }} + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: 3, title: "three", completed: true}, + headers: %{operation: :insert} + }} + + assert_receive {:change, + %Electric.Client.Message.ControlMessage{ + control: :up_to_date + }} + + send(task.pid, {:ready, ref}) + + assert_receive {:change, + %Electric.Client.Message.ChangeMessage{ + value: %Support.Todo{id: _, title: "super"}, + headers: %{operation: :insert} + }} + end + + describe "Phoenix.Sync.Router sandbox integration" do + @describetag :router + + test "basic sync" do + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get("/sync/todos", %{offset: "-1"}) + + assert resp.status == 200 + assert Plug.Conn.get_resp_header(resp, "electric-offset") == ["0_0"] + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "one"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "two"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "three"}} + ] = Jason.decode!(resp.resp_body) + end + + test "live changes" do + parent = self() + + task1 = + Task.async(fn -> + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get("/sync/todos", %{offset: "-1"}) + + assert resp.status == 200 + assert Plug.Conn.get_resp_header(resp, "electric-offset") == ["0_0"] + [offset] = Plug.Conn.get_resp_header(resp, "electric-offset") + [handle] = Plug.Conn.get_resp_header(resp, "electric-handle") + + snapshot = Jason.decode!(resp.resp_body) + + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get("/sync/todos", %{offset: offset, handle: handle}) + + assert [%{"headers" => %{"control" => "up-to-date", "global_last_seen_lsn" => "0"}}] = + Jason.decode!(resp.resp_body) + + [offset] = Plug.Conn.get_resp_header(resp, "electric-offset") + [handle] = Plug.Conn.get_resp_header(resp, "electric-handle") + + send(parent, {:snapshot, snapshot}) + receive(do: (:request -> :ok)) + + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get("/sync/todos?live=true", %{offset: offset, handle: handle}) + + assert resp.status == 200 + + send(parent, {:live, Jason.decode!(resp.resp_body)}) + end) + + task2 = + Task.async(fn -> + receive do + :insert -> + Repo.transaction(fn -> + Repo.insert!(%Todo{title: "wild"}) + end) + end + end) + + snapshot = receive(do: ({:snapshot, response} -> response)) + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "one"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "two"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "three"}} + ] = snapshot + + send(task1.pid, :request) + send(task2.pid, :insert) + live = receive(do: ({:live, response} -> response)) + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "wild"}}, + %{"headers" => %{"control" => "up-to-date"}} + ] = live + + Task.await_many([task1, task2]) + end + end + + describe "Phoenix.Sync.Controller sandbox integration" do + @describetag :controller + + test "returns the shape data", _ctx do + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get("/todos/all", %{offset: "-1"}) + + assert resp.status == 200 + assert Plug.Conn.get_resp_header(resp, "electric-offset") == ["0_0"] + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "one"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "two"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "three"}} + ] = Jason.decode!(resp.resp_body) + end + + test "live changes" do + parent = self() + path = "/todos/all" + + task1 = + Task.async(fn -> + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get(path, %{offset: "-1"}) + + assert resp.status == 200 + assert Plug.Conn.get_resp_header(resp, "electric-offset") == ["0_0"] + [offset] = Plug.Conn.get_resp_header(resp, "electric-offset") + [handle] = Plug.Conn.get_resp_header(resp, "electric-handle") + + snapshot = Jason.decode!(resp.resp_body) + + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get(path, %{offset: offset, handle: handle}) + + assert [%{"headers" => %{"control" => "up-to-date", "global_last_seen_lsn" => "0"}}] = + Jason.decode!(resp.resp_body) + + [offset] = Plug.Conn.get_resp_header(resp, "electric-offset") + [handle] = Plug.Conn.get_resp_header(resp, "electric-handle") + + send(parent, {:snapshot, snapshot}) + receive(do: (:request -> :ok)) + + resp = + Phoenix.ConnTest.build_conn() + |> Phoenix.ConnTest.get(path, %{live: true, offset: offset, handle: handle}) + + assert resp.status == 200 + + send(parent, {:live, Jason.decode!(resp.resp_body)}) + end) + + task2 = + Task.async(fn -> + receive do + :insert -> + Repo.transaction(fn -> + Repo.insert!(%Todo{title: "wild"}) + end) + end + end) + + snapshot = receive(do: ({:snapshot, response} -> response)) + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "one"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "two"}}, + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "three"}} + ] = snapshot + + send(task1.pid, :request) + send(task2.pid, :insert) + live = receive(do: ({:live, response} -> response)) + + assert [ + %{"headers" => %{"operation" => "insert"}, "value" => %{"title" => "wild"}}, + %{"headers" => %{"control" => "up-to-date"}} + ] = live + + Task.await_many([task1, task2]) + end + end +end diff --git a/test/support/db_setup.ex b/test/support/db_setup.ex index 18dd300..6737539 100644 --- a/test/support/db_setup.ex +++ b/test/support/db_setup.ex @@ -10,7 +10,7 @@ defmodule Support.DbSetup do def with_unique_db(ctx) do base_config = Application.fetch_env!(:electric, :connection_opts) - {:ok, utility_pool} = start_db_pool(base_config) + {:ok, utility_pool} = start_db_pool(base_config, :linked) Process.unlink(utility_pool) full_db_name = to_string(ctx.test) @@ -44,7 +44,7 @@ defmodule Support.DbSetup do end) updated_config = Keyword.put(base_config, :database, db_name) - {:ok, pool} = start_db_pool(updated_config) + {:ok, pool} = start_db_pool(updated_config, :supervised) {:ok, %{utility_pool: utility_pool, db_config: updated_config, pool: pool, db_conn: pool}} end @@ -64,7 +64,7 @@ defmodule Support.DbSetup do def with_shared_db(_ctx) do config = Application.fetch_env!(:electric, :connection_opts) - {:ok, pool} = start_db_pool(config) + {:ok, pool} = start_db_pool(config, :supervised) {:ok, %{pool: pool, db_config: config, db_conn: pool}} end @@ -127,11 +127,20 @@ defmodule Support.DbSetup do |> Base.encode64() |> String.replace_trailing("==", "") - defp start_db_pool(connection_opts) do + defp start_db_pool(connection_opts, :linked) do start_opts = Electric.Utils.deobfuscate_password(connection_opts) ++ @postgrex_start_opts + Postgrex.start_link(start_opts) end + defp start_db_pool(connection_opts, :supervised) do + start_opts = Electric.Utils.deobfuscate_password(connection_opts) ++ @postgrex_start_opts + + ExUnit.Callbacks.start_supervised({Postgrex, start_opts}, + id: :"#{System.unique_integer([:positive])}_db_pool" + ) + end + def with_table(ctx) do case ctx do %{table: {name, columns}} -> diff --git a/test/support/electric_helpers.ex b/test/support/electric_helpers.ex index c67e1ee..85ebf4c 100644 --- a/test/support/electric_helpers.ex +++ b/test/support/electric_helpers.ex @@ -9,7 +9,7 @@ defmodule Support.ElectricHelpers do endpoint_module = opts[:endpoint] || @endpoint start_endpoint = - if endpoint_module != @endpoint do + if endpoint_module && endpoint_module != @endpoint do quote do setup_all do ExUnit.CaptureLog.capture_log(fn -> start_supervised!(unquote(endpoint_module)) end) diff --git a/test/support/error_view.ex b/test/support/error_view.ex new file mode 100644 index 0000000..2f38002 --- /dev/null +++ b/test/support/error_view.ex @@ -0,0 +1,5 @@ +defmodule Phoenix.ErrorView do + def render("500.html", _params) do + "" + end +end diff --git a/test/support/live_views/stream.ex b/test/support/live_views/stream.ex index 0639f7a..a3ed432 100644 --- a/test/support/live_views/stream.ex +++ b/test/support/live_views/stream.ex @@ -210,3 +210,38 @@ defmodule Phoenix.Sync.LiveViewTest.StreamLiveComponent do |> Phoenix.Sync.LiveView.sync_stream(:users, Support.User, client: assigns.client)} end end + +defmodule Phoenix.Sync.LiveViewTest.StreamSandbox do + use Phoenix.LiveView + + def run(lv, func) do + GenServer.call(lv.pid, {:run, func}) + end + + def render(assigns) do + ~H""" +
+
+ [<%= if(todo.completed, do: "X", else: " ") %>] <%= todo.title %> +
+
+ """ + end + + def mount(_params, _session, socket) do + parent = + get_in(socket.private.connect_info.private, [:test_pid]) || + raise "missing parent pid configuration" + + {:ok, + socket + |> assign(:test_pid, parent) + |> Phoenix.Sync.LiveView.sync_stream(:todos, Support.Todo)} + end + + def handle_info({:sync, event}, socket) do + # send messsage to test pid, just for sync + send(socket.assigns.test_pid, {:sync, event}) + {:noreply, Phoenix.Sync.LiveView.sync_stream_update(socket, event)} + end +end diff --git a/test/support/repo.ex b/test/support/repo.ex index ef80aec..56b089c 100644 --- a/test/support/repo.ex +++ b/test/support/repo.ex @@ -4,6 +4,14 @@ defmodule Support.Repo do adapter: Ecto.Adapters.Postgres end +defmodule Support.SandboxRepo do + use Phoenix.Sync.Sandbox.Postgres + + use Ecto.Repo, + otp_app: :phoenix_sync, + adapter: Phoenix.Sync.Sandbox.Postgres.adapter() +end + defmodule Support.ConfigTestRepo do use Ecto.Repo, otp_app: :phoenix_sync, diff --git a/test/support/router.ex b/test/support/router.ex index e878b3e..2a892b9 100644 --- a/test/support/router.ex +++ b/test/support/router.ex @@ -23,6 +23,7 @@ defmodule Phoenix.Sync.LiveViewTest.Router do live "/stream", StreamLive live "/stream/with-component", StreamLiveWithComponent + live "/stream/sandbox", StreamSandbox end scope "/" do diff --git a/test/test_helper.exs b/test/test_helper.exs index 79288c7..a9de407 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,19 +1,4 @@ -# Application.put_env(:phoenix_sync, Phoenix.Sync.LiveViewTest.Endpoint, -# http: [ip: {127, 0, 0, 1}, port: 4004], -# adapter: Bandit.PhoenixAdapter, -# server: true, -# live_view: [signing_salt: "aaaaaaaa"], -# secret_key_base: String.duplicate("a", 64), -# render_errors: [ -# formats: [ -# html: Phoenix.LiveViewTest.E2E.ErrorHTML -# ], -# layout: false -# ], -# pubsub_server: Phoenix.LiveViewTest.E2E.PubSub, -# debug_errors: false -# ) - +{:ok, _} = Support.SandboxRepo.start_link() {:ok, _} = Phoenix.Sync.LiveViewTest.Endpoint.start_link() ExUnit.start(capture_log: true)