diff --git a/.github/workflows/elixir.yml b/.github/workflows/elixir.yml index 7c9c7352..e900a00e 100644 --- a/.github/workflows/elixir.yml +++ b/.github/workflows/elixir.yml @@ -3,6 +3,7 @@ on: pull_request: branches: - main + - v2 concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -35,10 +36,10 @@ jobs: restore-keys: | ${{ runner.os }}-mix- - name: Install dependencies - run: mix deps.get + run: mix deps.get --check-locked - compile: - name: Compile project in test env + format: + name: Formatting checks runs-on: u22-arm-runner needs: [deps] @@ -49,39 +50,17 @@ jobs: uses: erlef/setup-beam@v1 with: otp-version: '25.3.2.7' - elixir-version: '1.14.5' + elixir-version: '1.17' - name: Cache Mix uses: actions/cache@v4 with: path: deps key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - restore-keys: | - ${{ runner.os }}-mix- - - name: Cache Build - uses: actions/cache@v4 - with: - path: | - _build/${{ env.MIX_ENV }} - key: ${{ runner.os }}-build-${{ env.MIX_ENV }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-build-${{ env.MIX_ENV }}- - - name: Cache native - uses: actions/cache@v4 - id: native-cache - with: - path: | - priv/native - key: ${{ runner.os }}-build-native-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/*')) }} - - name: Set up Rust - uses: dtolnay/rust-toolchain@v1 - if: steps.native-cache.outputs.cache-hit != 'true' || steps.elixir-cache.output.cache-hit != 'true' - with: - toolchain: stable - - name: Compile - run: mix compile + - name: Run format check + run: mix format --check-formatted - format: - name: Formatting checks + credo: + name: Code style runs-on: u22-arm-runner needs: [deps] @@ -92,21 +71,21 @@ jobs: uses: erlef/setup-beam@v1 with: otp-version: '25.3.2.7' - elixir-version: '1.17' + elixir-version: '1.14.5' - name: Cache Mix uses: actions/cache@v4 with: path: deps key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - restore-keys: | - ${{ runner.os }}-mix- - - name: Run format check - run: mix format --check-formatted + - name: Compile deps + run: mix deps.compile + - name: Credo checks + run: mix credo --strict --all --mute-exit-status - credo: - name: Code style + tests: + name: Run tests runs-on: u22-arm-runner - needs: [compile] + needs: [deps] steps: - uses: actions/checkout@v4 @@ -116,28 +95,42 @@ jobs: with: otp-version: '25.3.2.7' elixir-version: '1.14.5' + - name: Set up Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: stable - name: Cache Mix uses: actions/cache@v4 with: path: deps key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - restore-keys: | - ${{ runner.os }}-mix- - - name: Cache Build + - name: Cache native uses: actions/cache@v4 with: path: | - _build/${{ env.MIX_ENV }} - key: ${{ runner.os }}-build-${{ env.MIX_ENV }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}-${{ github.sha }} + _build/${{ env.MIX_ENV }}/lib/supavisor/native + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-build-native-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/Cargo.lock')) }} restore-keys: | - ${{ runner.os }}-build-${{ env.MIX_ENV }}- - - name: Credo checks - run: mix credo --strict --mute-exit-status + ${{ runner.os }}-build-native- + - name: Compile deps + run: mix deps.compile + - name: Compile + run: mix compile + - name: Set up Postgres + run: docker-compose -f ./docker-compose.db.yml up -d + - name: Start epmd + run: epmd -daemon + - name: Run tests + run: mix test - tests: - name: Run tests + integration: + name: Run integration tests runs-on: u22-arm-runner - needs: [compile] + needs: [deps] steps: - uses: actions/checkout@v4 @@ -147,9 +140,11 @@ jobs: with: otp-version: '25.3.2.7' elixir-version: '1.14.5' + - uses: actions/setup-node@v4 + with: + node-version: 'lts/*' - name: Set up Rust uses: dtolnay/rust-toolchain@v1 - if: steps.native-cache.outputs.cache-hit != 'true' || steps.elixir-cache.output.cache-hit != 'true' with: toolchain: stable - name: Cache Mix @@ -157,33 +152,33 @@ jobs: with: path: deps key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - restore-keys: | - ${{ runner.os }}-mix- - - name: Cache Build - uses: actions/cache@v4 - with: - path: | - _build/${{ env.MIX_ENV }} - key: ${{ runner.os }}-build-${{ env.MIX_ENV }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-build-${{ env.MIX_ENV }}- - name: Cache native uses: actions/cache@v4 with: path: | - priv/native - key: ${{ runner.os }}-build-native-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/*')) }} + _build/${{ env.MIX_ENV }}/lib/supavisor/native + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-build-native-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/Cargo.lock')) }} + restore-keys: | + ${{ runner.os }}-build-native- + - name: Compile deps + run: mix deps.compile + - name: Compile + run: mix compile - name: Set up Postgres run: docker-compose -f ./docker-compose.db.yml up -d - name: Start epmd run: epmd -daemon - name: Run tests - run: mix test + run: mix test --only integration --trace dialyzer: name: Dialyze runs-on: u22-arm-runner - needs: [compile] + needs: [deps] steps: - uses: actions/checkout@v4 @@ -195,7 +190,6 @@ jobs: elixir-version: '1.14.5' - name: Set up Rust uses: dtolnay/rust-toolchain@v1 - if: steps.native-cache.outputs.cache-hit != 'true' || steps.elixir-cache.output.cache-hit != 'true' with: toolchain: stable - name: Cache Mix @@ -203,28 +197,31 @@ jobs: with: path: deps key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - restore-keys: | - ${{ runner.os }}-mix- - - name: Cache Build - uses: actions/cache@v4 - with: - path: | - _build/${{ env.MIX_ENV }} - key: ${{ runner.os }}-build-${{ env.MIX_ENV }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-build-${{ env.MIX_ENV }}- - name: Cache native uses: actions/cache@v4 with: path: | - priv/native - key: ${{ runner.os }}-build-native-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/*')) }} + _build/${{ env.MIX_ENV }}/lib/supavisor/native + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-build-native-${{ hashFiles(format('{0}{1}', github.workspace, '/native/**/Cargo.lock')) }} + restore-keys: | + ${{ runner.os }}-build-native- + - name: Compile deps + run: mix deps.compile + - name: Compile + run: mix compile - name: Retrieve PLT Cache uses: actions/cache@v4 id: plt-cache with: path: _build/${{ env.MIX_ENV }}/*.plt key: ${{ runner.os }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-plts-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} + restore-keys: | + ${{ runner.os }}-${{ steps.beam.outputs.otp-version }}-${{ steps.beam.outputs.elixir-version }}-plts- + - name: Create PLTs if: steps.plt-cache.outputs.cache-hit != 'true' run: | diff --git a/.gitignore b/.gitignore index c396724a..a9065a53 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,5 @@ burrito_out/* supavisor-*.tar.gz priv/native/* +*.bggg +/.pre-commit-config.yaml diff --git a/.mailmap b/.mailmap index c3543e56..4bc24c8f 100644 --- a/.mailmap +++ b/.mailmap @@ -1,5 +1,6 @@ Stanislav Muzhyk + Joel Lee Joel Lee -Dimitris Zorbas +Łukasz Jan Niemier diff --git a/Makefile b/Makefile index 7efcc144..21a4a7e4 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,8 @@ dev: SECRET_KEY_BASE="dev" \ CLUSTER_POSTGRES="true" \ DB_POOL_SIZE="5" \ + METRICS_DISABLED="false" \ + AVAILABILITY_ZONE="ap-southeast-1b" \ ERL_AFLAGS="-kernel shell_history enabled +zdbbl 2097151" \ iex --name node1@127.0.0.1 --cookie cookie -S mix run --no-halt @@ -26,7 +28,9 @@ dev.node2: CLUSTER_POSTGRES="true" \ PROXY_PORT_SESSION="5442" \ PROXY_PORT_TRANSACTION="6553" \ - PARTISAN_PEER_PORT="10201" \ + PROXY_PORT="5402" \ + NODE_IP=localhost \ + AVAILABILITY_ZONE="ap-southeast-1c" \ ERL_AFLAGS="-kernel shell_history enabled" \ iex --name node2@127.0.0.1 --cookie cookie -S mix phx.server @@ -41,15 +45,8 @@ dev.node3: CLUSTER_POSTGRES="true" \ PROXY_PORT_SESSION="5443" \ PROXY_PORT_TRANSACTION="6554" \ - PARTISAN_PEER_PORT="10202" \ ERL_AFLAGS="-kernel shell_history enabled" \ - iex --name node3@127.0.0.1 --cookie cookie -S mix phx.server - -dev_bin: - MIX_ENV=dev mix release supavisor_bin && ls -l burrito_out - -bin: - MIX_ENV=prod mix release supavisor_bin && ls -l burrito_out + iex --name node3@127.0.0.1 --cookie cookie -S mix phx.server db_migrate: mix ecto.migrate --prefix _supavisor --log-migrator-sql @@ -65,15 +62,29 @@ db_rebuild: docker-compose -f ./docker-compose.db.yml build make db_start +PGBENCH_USER ?= postgres.sys +PGBENCH_PORT ?= 6543 +PGBENCH_RATE ?= 5000 +PGBENCH_DURATION ?= 60 +PGBENCH_CLIENTS ?= 1000 + pgbench_init: PGPASSWORD=postgres pgbench -i -h 127.0.0.1 -p 6432 -U postgres -d postgres pgbench_short: - PGPASSWORD=postgres pgbench -M extended --transactions 5 --jobs 4 --client 1 -h localhost -p 7654 -U transaction.localhost postgres + PGPASSWORD=postgres pgbench -M extended --transactions 5 --jobs 4 --client 1 -h localhost -p 6543 -U postgres.sys postgres pgbench_long: PGPASSWORD=postgres pgbench -M extended --transactions 100 --jobs 10 --client 60 -h localhost -p 7654 -U transaction.localhost postgres +pgbench: + PGPASSWORD="postgres" pgbench \ + postgres://${PGBENCH_USER}@localhost:${PGBENCH_PORT}/postgres?sslmode=disable \ + -Srn -T ${PGBENCH_DURATION} \ + -j 8 -c ${PGBENCH_CLIENTS} \ + -P 10 -M extended \ + --rate ${PGBENCH_RATE} + clean: rm -rf _build && rm -rf deps @@ -94,5 +105,37 @@ dev_start_rel: FLY_ALLOC_ID=111e4567-e89b-12d3-a456-426614174000 \ SECRET_KEY_BASE="dev" \ CLUSTER_POSTGRES="true" \ - ERL_AFLAGS="-kernel shell_history enabled" \ - ./_build/dev/rel/supavisor/bin/supavisor start_iex + DB_POOL_SIZE="5" \ + _build/prod/rel/supavisor/bin/supavisor start_iex + +prod_rel: + MIX_ENV=prod METRICS_DISABLED=true mix compile && \ + MIX_ENV=prod METRICS_DISABLED=true mix release supavisor + +prod_start_rel: + MIX_ENV=prod \ + NODE_NAME="localhost" \ + VAULT_ENC_KEY="aHD8DZRdk2emnkdktFZRh3E9RNg4aOY7" \ + API_JWT_SECRET=dev \ + METRICS_JWT_SECRET=dev \ + REGION=eu \ + FLY_ALLOC_ID=111e4567-e89b-12d3-a456-426614174000 \ + SECRET_KEY_BASE="dev" \ + CLUSTER_POSTGRES="true" \ + DB_POOL_SIZE="5" \ + _build/prod/rel/supavisor/bin/supavisor start_iex + +prod_start_rel2: + MIX_ENV=prod \ + NODE_NAME=node2 \ + PORT=4001 \ + VAULT_ENC_KEY="aHD8DZRdk2emnkdktFZRh3E9RNg4aOY7" \ + API_JWT_SECRET=dev \ + METRICS_JWT_SECRET=dev \ + REGION=eu \ + SECRET_KEY_BASE="dev" \ + CLUSTER_POSTGRES="true" \ + PROXY_PORT_SESSION="5442" \ + PROXY_PORT_TRANSACTION="6553" \ + NODE_IP=localhost \ + _build/prod/rel/supavisor/bin/supavisor start_iex diff --git a/VERSION b/VERSION index 470abefa..f1547e6d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.1.67 +2.0.7 diff --git a/config/config.exs b/config/config.exs index 7255b2ec..46291625 100644 --- a/config/config.exs +++ b/config/config.exs @@ -10,7 +10,13 @@ import Config config :supavisor, ecto_repos: [Supavisor.Repo], version: Mix.Project.config()[:version], - env: Mix.env() + env: Mix.env(), + metrics_disabled: System.get_env("METRICS_DISABLED") == "true", + switch_active_count: System.get_env("SWITCH_ACTIVE_COUNT", "100") |> String.to_integer(), + reconnect_retries: System.get_env("RECONNECT_RETRIES", "5") |> String.to_integer(), + subscribe_retries: System.get_env("SUBSCRIBE_RETRIES", "20") |> String.to_integer() + +config :prom_ex, storage_adapter: PromEx.Storage.Peep # Configures the endpoint config :supavisor, SupavisorWeb.Endpoint, @@ -23,7 +29,18 @@ config :supavisor, SupavisorWeb.Endpoint, # Configures Elixir's Logger config :logger, :console, format: "$time $metadata[$level] $message\n", - metadata: [:request_id, :project, :user, :region, :instance_id, :mode, :type] + metadata: [ + :request_id, + :project, + :user, + :region, + :instance_id, + :mode, + :type, + :app_name, + :peer_ip, + :local + ] # Use Jason for JSON parsing in Phoenix config :phoenix, :json_library, Jason diff --git a/config/dev.exs b/config/dev.exs index 843b9f47..eefe173f 100644 --- a/config/dev.exs +++ b/config/dev.exs @@ -62,8 +62,20 @@ config :supavisor, SupavisorWeb.Endpoint, config :logger, :console, format: "$time [$level] $message $metadata\n", level: :debug, - # level: :error, - metadata: [:error_code, :file, :line, :pid, :project, :user, :mode, :type] + # level: :notice, + metadata: [ + :error_code, + :file, + :line, + :pid, + :project, + :user, + :mode, + :type, + :app_name, + :peer_ip, + :local + ] # Set a higher stacktrace during development. Avoid configuring such # in production as building large stacktraces may be expensive. diff --git a/config/runtime.exs b/config/runtime.exs index d5909ca5..4bcec40b 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -1,7 +1,7 @@ import Config require Logger -alias Supavisor.Helpers, as: H +alias Supavisor.Helpers secret_key_base = if config_env() in [:dev, :test] do @@ -85,8 +85,7 @@ topologies = config: [ url: System.get_env("DATABASE_URL", "ecto://postgres:postgres@localhost:6432/postgres"), heartbeat_interval: 5_000, - channel_name: "supavisor_#{region}_#{maj}_#{min}", - channel_name_partisan: "supavisor_partisan_#{region}_#{maj}_#{min}" + channel_name: "supavisor_#{region}_#{maj}_#{min}" ] ] @@ -102,7 +101,7 @@ config :libcluster, upstream_ca = if path = System.get_env("GLOBAL_UPSTREAM_CA_PATH") do File.read!(path) - |> H.cert_to_bin() + |> Helpers.cert_to_bin() |> case do {:ok, bin} -> Logger.info("Loaded upstream CA from $GLOBAL_UPSTREAM_CA_PATH", @@ -152,13 +151,17 @@ if config_env() != :test do proxy_port_transaction: System.get_env("PROXY_PORT_TRANSACTION", "6543") |> String.to_integer(), proxy_port_session: System.get_env("PROXY_PORT_SESSION", "5432") |> String.to_integer(), + proxy_port: System.get_env("PROXY_PORT", "5412") |> String.to_integer(), prom_poll_rate: System.get_env("PROM_POLL_RATE", "15000") |> String.to_integer(), global_upstream_ca: upstream_ca, global_downstream_cert: downstream_cert, global_downstream_key: downstream_key, reconnect_on_db_close: System.get_env("RECONNECT_ON_DB_CLOSE") == "true", api_blocklist: System.get_env("API_TOKEN_BLOCKLIST", "") |> String.split(","), - metrics_blocklist: System.get_env("METRICS_TOKEN_BLOCKLIST", "") |> String.split(",") + metrics_blocklist: System.get_env("METRICS_TOKEN_BLOCKLIST", "") |> String.split(","), + node_host: System.get_env("NODE_IP", "127.0.0.1"), + availability_zone: System.get_env("AVAILABILITY_ZONE"), + local_proxy_multiplier: System.get_env("LOCAL_PROXY_MULTIPLIER", "20") |> String.to_integer() config :supavisor, Supavisor.Repo, url: System.get_env("DATABASE_URL", "ecto://postgres:postgres@localhost:6432/postgres"), @@ -174,23 +177,6 @@ if config_env() != :test do tag: "AES.GCM.V1", key: System.get_env("VAULT_ENC_KEY") } ] - - config :partisan, - # Which overlay to use - peer_service_manager: :partisan_pluggable_peer_service_manager, - listen_addrs: [ - { - System.get_env("PARTISAN_PEER_IP", "127.0.0.1"), - String.to_integer(System.get_env("PARTISAN_PEER_PORT", "20100")) - } - ], - channels: [ - data: %{parallelism: System.get_env("PARTISAN_PARALLELISM", "5") |> String.to_integer()} - ], - # Encoding for pid(), reference() and names - pid_encoding: false, - ref_encoding: false, - remote_ref_format: :improper_list end if System.get_env("LOGS_ENGINE") == "logflare" do diff --git a/config/test.exs b/config/test.exs index dcd59c57..0afa2fab 100644 --- a/config/test.exs +++ b/config/test.exs @@ -8,13 +8,20 @@ config :supavisor, jwt_claim_validators: %{}, proxy_port_session: System.get_env("PROXY_PORT_SESSION", "7653") |> String.to_integer(), proxy_port_transaction: System.get_env("PROXY_PORT_TRANSACTION", "7654") |> String.to_integer(), + proxy_port: System.get_env("PROXY_PORT", "5412") |> String.to_integer(), secondary_proxy_port: 7655, secondary_http: 4003, prom_poll_rate: 500, api_blocklist: [ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJvbGUiOiJibG9ja2VkIiwiaWF0IjoxNjQ1MTkyODI0LCJleHAiOjE5NjA3Njg4MjR9.y-V3D1N2e8UTXc5PJzmV9cqMteq0ph2wl0yt42akQgA" ], - metrics_blocklist: [] + metrics_blocklist: [], + node_host: System.get_env("NODE_IP", "127.0.0.1"), + availability_zone: System.get_env("AVAILABILITY_ZONE"), + max_pools: 5, + reconnect_retries: System.get_env("RECONNECT_RETRIES", "5") |> String.to_integer(), + subscribe_retries: System.get_env("SUBSCRIBE_RETRIES", "5") |> String.to_integer(), + local_proxy_multiplier: System.get_env("LOCAL_PROXY_MULTIPLIER", "20") |> String.to_integer() config :supavisor, Supavisor.Repo, username: "postgres", @@ -25,17 +32,6 @@ config :supavisor, Supavisor.Repo, pool_size: 10, port: 6432 -config :partisan, - # Which overlay to use - peer_service_manager: :partisan_pluggable_peer_service_manager, - # The listening port for Partisan TCP/IP connections - peer_port: 10200, - channels: [data: %{parallelism: 1}], - # Encoding for pid(), reference() and names - pid_encoding: false, - ref_encoding: false, - remote_ref_format: :improper_list - # We don't run a server during test. If one is required, # you can enable the server option below. config :supavisor, SupavisorWeb.Endpoint, @@ -52,24 +48,8 @@ config :supavisor, Supavisor.Vault, # Print only warnings and errors during test config :logger, :console, - level: :info, - format: "$time [$level] $message $metadata\n", + level: :error, metadata: [:error_code, :file, :line, :pid, :project, :user, :mode] # Initialize plugs at runtime for faster test compilation config :phoenix, :plug_init_mode, :runtime - -config :partisan, - peer_service_manager: :partisan_pluggable_peer_service_manager, - listen_addrs: [ - { - System.get_env("PARTISAN_PEER_IP", "127.0.0.1"), - String.to_integer(System.get_env("PARTISAN_PEER_PORT", "10200")) - } - ], - channels: [ - data: %{parallelism: System.get_env("PARTISAN_PARALLELISM", "5") |> String.to_integer()} - ], - pid_encoding: false, - ref_encoding: false, - remote_ref_format: :improper_list diff --git a/deploy/service/supavisor.service b/deploy/service/supavisor.service index 4a3f362e..25dc545d 100644 --- a/deploy/service/supavisor.service +++ b/deploy/service/supavisor.service @@ -19,3 +19,5 @@ WantedBy=multi-user.target [Service] TasksMax=infinity + +# vi: ft=systemd diff --git a/docker-compose.db.yml b/docker-compose.db.yml index 5d7e9150..00a8f5de 100644 --- a/docker-compose.db.yml +++ b/docker-compose.db.yml @@ -8,11 +8,11 @@ services: - "6432:5432" volumes: - ./dev/postgres:/docker-entrypoint-initdb.d/ - # Uncomment to set MD5 authentication method on unitialized databases + # Uncomment to set MD5 authentication method on uninitialized databases # - ./dev/postgres/md5/etc/postgresql/pg_hba.conf:/etc/postgresql/pg_hba.conf - command: postgres -c config_file=/etc/postgresql/postgresql.conf + command: postgres -c config_file=/etc/postgresql/postgresql.conf -c max_prepared_transactions=2000 environment: POSTGRES_HOST: /var/run/postgresql POSTGRES_PASSWORD: postgres - # Uncomment to set MD5 authentication method on unitialized databases + # Uncomment to set MD5 authentication method on uninitialized databases # POSTGRES_INITDB_ARGS: --auth-host=md5 diff --git a/docker-compose.yml b/docker-compose.yml index cd12697a..b09159f4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,13 +8,13 @@ services: - "6432:5432" volumes: - ./dev/postgres:/docker-entrypoint-initdb.d/ - # Uncomment to set MD5 authentication method on unitialized databases + # Uncomment to set MD5 authentication method on uninitialized databases # - ./dev/postgres/md5/etc/postgresql/pg_hba.conf:/etc/postgresql/pg_hba.conf - command: postgres -c config_file=/etc/postgresql/postgresql.conf + command: postgres -c config_file=/etc/postgresql/postgresql.conf -c max_prepared_transactions=2000 environment: POSTGRES_HOST: /var/run/postgresql POSTGRES_PASSWORD: postgres - # Uncomment to set MD5 authentication method on unitialized databases + # Uncomment to set MD5 authentication method on uninitialized databases # POSTGRES_INITDB_ARGS: --auth-host=md5 supavisor: build: . diff --git a/docs/configuration/tenants.md b/docs/configuration/tenants.md index 595650cc..4e1560b2 100644 --- a/docs/configuration/tenants.md +++ b/docs/configuration/tenants.md @@ -33,7 +33,7 @@ server `require_user` - require client connection credentials to match `user` credentials in the metadata database -`auth_query` - the query to use when matching credential agains a client +`auth_query` - the query to use when matching credential against a client connection `default_pool_size` - the default size of the database pool diff --git a/docs/connecting/authentication.md b/docs/connecting/authentication.md index 24c1cb0f..f199e48f 100644 --- a/docs/connecting/authentication.md +++ b/docs/connecting/authentication.md @@ -1,7 +1,7 @@ When a client connection is established Supavisor needs to verify the credentials of the connection. -Credential verificiation is done either via `user` records or an `auth_query`. +Credential verification is done either via `user` records or an `auth_query`. ## Tenant User Record diff --git a/docs/development/profiling.md b/docs/development/profiling.md new file mode 100644 index 00000000..35e926e2 --- /dev/null +++ b/docs/development/profiling.md @@ -0,0 +1,34 @@ +Profiling of the Supabase can be done using [eFlambé][eflambe] project. + +Example profiling session looks like: + +- Start application within IEx session (for example by using `make dev`) +- Within given session you can specify which function you want to trace, by + calling `:eflambe.capture({mod, func, arity}, no_of_captures)`, however it is + useful to have some separate directory to store all traces, for that one can use + quick snippet + + ```elixir + dir = "./tmp/capture-#{DateTime.utc_now()}"; File.mkdir_p!(dir); :eflambe.capture({Supavisor.ClientHandler, :handle_event, 4}, 0, [output_directory: dir]) + ``` + + Which provides separate directory for each tracing session. +- Generated traces can be viewed in [Speedoscope][] for visual navigation. + +![Speedoscope session example](/docs/images/trace-example.png) + +### Problems to be resolved + +- Currently you can monitor only function calls. Sometimes it would be handy to + monitor whole process instead, so it would provide better view into process work. + [Stratus3D/eflambe#47](https://github.com/Stratus3D/eflambe/issues/47) +- Currently if there is less than `no_of_captures` calls, then eFlambé will try + to wait for more calls indefinitely. There is no way to listen only for some + period and then just stop. [Stratus3D/eflambe#48](https://github.com/Stratus3D/eflambe/issues/48) +- You will not see arguments of called functions in traces, which mean that you + want to trace long running processes that have a lot of calls to similarly + named function (like `gen_statem` process) you will need some manual work to + find which clause matched given trace. [Stratus3D/eflambe#46](https://github.com/Stratus3D/eflambe/issues/46) + +[eflambe]: https://github.com/Stratus3D/eflambe +[Speedoscope]: https://www.speedscope.app/ diff --git a/docs/faq.md b/docs/faq.md index 720b39e4..31a6096b 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -36,5 +36,5 @@ the tenant. Also running N pools on N nodes for N clients will not scale horizontally as well because all nodes will be doing all the same work of issuing database connections to clients. While not a lot of overhead, at some point this won't -scale and we'd have to run multiple independant clusters and route tenants to +scale and we'd have to run multiple independent clusters and route tenants to clusters to scale horizontally. diff --git a/docs/images/trace-example.png b/docs/images/trace-example.png new file mode 100644 index 00000000..2abd5dcd Binary files /dev/null and b/docs/images/trace-example.png differ diff --git a/docs/migrating/pgbouncer.md b/docs/migrating/pgbouncer.md index 02fe0af7..66dbae54 100644 --- a/docs/migrating/pgbouncer.md +++ b/docs/migrating/pgbouncer.md @@ -29,7 +29,7 @@ select count(*) from pg_stat_activity; ## Change Postgres `max_connections` Based on the responses above configure the `default_pool_size` accordingly or -increase your `max_connections` limit on Postgres to accomadate two connection +increase your `max_connections` limit on Postgres to accommodate two connection poolers. e.g if you're using 30 connections out of 100 and you set your diff --git a/docs/monitoring/logs.md b/docs/monitoring/logs.md new file mode 100644 index 00000000..1858a04f --- /dev/null +++ b/docs/monitoring/logs.md @@ -0,0 +1,9 @@ +Supavisor will emit various logs during operation. + +Use these error codes to debug a running Supavisor cluster. + +## Error Codes + +| Code | Description | +| ----------------------- | -------------------------------------------------------------------- | +| MaxClientsInSessionMode | When in Session mode client connections are limited by the pool_size | diff --git a/docs/orms/prisma.md b/docs/orms/prisma.md index c15e887e..9396f535 100644 --- a/docs/orms/prisma.md +++ b/docs/orms/prisma.md @@ -1,6 +1,6 @@ Connecting to a Postgres database with Prisma is easy. -## PgBouncer Compatability +## PgBouncer Compatibility Supavisor pool modes behave the same way as PgBouncer. You should be able to connect to Supavisor with the exact same connection string as you use for @@ -13,7 +13,7 @@ Prisma will use named prepared statements to query Postgres by default. To turn off named prepared statements use `pgbouncer=true` in your connection string with Prisma. -The `pgbouncer=true` connection string parameter is compatable with Supavisor. +The `pgbouncer=true` connection string parameter is compatible with Supavisor. ## Prisma Connection Management diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..d676e8df --- /dev/null +++ b/flake.lock @@ -0,0 +1,455 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": [ + "nixpkgs" + ], + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1723487333, + "narHash": "sha256-jqi/hVQL6S9lj/HkWaPPZQW/BfP0D0Veb45cpSvfRVE=", + "owner": "cachix", + "repo": "devenv", + "rev": "b285601679c7686f623791ad93a8e0debc322633", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1722555600, + "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1722555339, + "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1723603349, + "narHash": "sha256-VMg6N7MryOuvSJ8Sj6YydarnUCkL7cvMdrMcnsJnJCE=", + "path": "/nix/store/qp204s0cpzbhj9yd5vpy7cpa9wxca0f9-source", + "rev": "daf7bb95821b789db24fc1ac21f613db0c1bf2cb", + "type": "path" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..6822cc68 --- /dev/null +++ b/flake.nix @@ -0,0 +1,147 @@ +{ + description = "Elixir's application"; + + inputs.nixpkgs.url = "flake:nixpkgs"; + inputs.flake-parts.url = "github:hercules-ci/flake-parts"; + + inputs.devenv = { + url = "github:cachix/devenv"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + outputs = { + self, + flake-parts, + devenv, + ... + } @ inputs: + flake-parts.lib.mkFlake {inherit inputs;} { + flake = {}; + + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-linux" + "aarch64-darwin" + ]; + + perSystem = { + self', + inputs', + pkgs, + lib, + ... + }: { + formatter = pkgs.alejandra; + + packages = { + # Expose Devenv supervisor + devenv-up = self'.devShells.default.config.procfileScript; + + supavisor = let + erl = pkgs.beam_nox.packages.erlang_27; + in + erl.callPackage ./nix/package.nix {}; + + default = self'.packages.supavisor; + }; + + devShells.default = devenv.lib.mkShell { + inherit inputs pkgs; + + modules = [ + { + pre-commit.hooks = { + alejandra.enable = true; + typos = { + enable = true; + excludes = [ + "test/integration/" + ]; + }; + }; + } + { + languages.elixir = { + enable = true; + package = pkgs.beam.packages.erlang_27.elixir_1_17; + }; + packages = [ + pkgs.lexical + ]; + + pre-commit.hooks = { + # credo.enable = true; + }; + + # env.DYLD_INSERT_LIBRARIES = "${pkgs.mimalloc}/lib/libmimalloc.dylib"; + } + { + packages = [ + pkgs.pgbouncer + ]; + + services.postgres = { + enable = true; + package = pkgs.postgresql_15; + initialScript = '' + ${builtins.readFile ./dev/postgres/00-setup.sql} + + CREATE USER postgres SUPERUSER PASSWORD 'postgres'; + ''; + listen_addresses = "127.0.0.1"; + port = 6432; + settings = { + max_prepared_transactions = 262143; + }; + }; + + process.implementation = "honcho"; + + # Force connection through TCP instead of Unix socket + env.PGHOST = lib.mkForce ""; + } + { + languages.javascript = { + enable = true; + bun.enable = true; + yarn.enable = true; + }; + } + ({ + pkgs, + lib, + config, + ... + }: { + languages.rust.enable = true; + languages.cplusplus.enable = true; + + packages = + [ + pkgs.protobuf + pkgs.cargo-outdated + ] + ++ lib.optionals pkgs.stdenv.isDarwin (with pkgs.darwin.apple_sdk; [ + frameworks.System + frameworks.CoreFoundation + frameworks.CoreServices + frameworks.DiskArbitration + frameworks.IOKit + frameworks.CFNetwork + frameworks.Security + libs.libDER + ]); + + # Workaround for https://github.com/rust-lang/cargo/issues/5376 + env.RUSTFLAGS = lib.mkForce (lib.optionals pkgs.stdenv.isDarwin [ + "-L framework=${config.devenv.profile}/Library/Frameworks" + "-C link-arg=-undefined" + "-C link-arg=dynamic_lookup" + ]); + }) + ]; + }; + }; + }; +} diff --git a/lib/cluster/strategy/postgres.ex b/lib/cluster/strategy/postgres.ex index 89493841..08d8484e 100644 --- a/lib/cluster/strategy/postgres.ex +++ b/lib/cluster/strategy/postgres.ex @@ -18,8 +18,8 @@ defmodule Cluster.Strategy.Postgres do @vsn "1.1.49" - alias Cluster.Strategy alias Cluster.Logger + alias Cluster.Strategy alias Postgrex, as: P def start_link(args), do: GenServer.start_link(__MODULE__, args) @@ -38,7 +38,6 @@ defmodule Cluster.Strategy.Postgres do state.config |> Keyword.put_new(:heartbeat_interval, 5_000) |> Keyword.put_new(:channel_name, "cluster") - |> Keyword.put_new(:channel_name_partisan, "cluster_partisan") |> Keyword.delete(:url) meta = %{ @@ -54,8 +53,7 @@ defmodule Cluster.Strategy.Postgres do def handle_continue(:connect, state) do with {:ok, conn} <- P.start_link(state.meta.opts.()), {:ok, conn_notif} <- P.Notifications.start_link(state.meta.opts.()), - {_, _} <- P.Notifications.listen(conn_notif, state.config[:channel_name]), - {_, _} <- P.Notifications.listen(conn_notif, state.config[:channel_name_partisan]) do + {_, _} <- P.Notifications.listen(conn_notif, state.config[:channel_name]) do Logger.info(state.topology, "Connected to Postgres database") meta = %{ @@ -76,24 +74,15 @@ defmodule Cluster.Strategy.Postgres do def handle_info(:heartbeat, state) do Process.cancel_timer(state.meta.heartbeat_ref) P.query(state.meta.conn, "NOTIFY #{state.config[:channel_name]}, '#{node()}'", []) - - P.query( - state.meta.conn, - "NOTIFY #{state.config[:channel_name_partisan]}, '#{partisan_peer_spec_enc()}'", - [] - ) - ref = heartbeat(state.config[:heartbeat_interval]) {:noreply, put_in(state.meta.heartbeat_ref, ref)} end def handle_info({:notification, _, _, channel, msg}, state) do disterl = state.config[:channel_name] - partisan = state.config[:channel_name_partisan] case channel do ^disterl -> handle_channels(:disterl, msg, state) - ^partisan -> handle_channels(:partisan, msg, state) other -> Logger.error(state.topology, "Unknown channel: #{other}") end @@ -105,20 +94,6 @@ defmodule Cluster.Strategy.Postgres do {:noreply, state} end - def code_change("1.1.48", state, _) do - Logger.info(state.topology, "Update state from 1.1.48") - - partisan_channel = - Application.get_env(:libcluster, :topologies) - |> get_in([:postgres, :config, :channel_name_partisan]) - - new_config = - state.config - |> Keyword.put(:channel_name_partisan, partisan_channel) - - {:ok, %{state | config: new_config}} - end - def code_change(_, state, _), do: {:ok, state} ### Internal functions @@ -127,7 +102,7 @@ defmodule Cluster.Strategy.Postgres do Process.send_after(self(), :heartbeat, interval) end - @spec handle_channels(:disterl | :partisan, String.t(), map()) :: any() + @spec handle_channels(:disterl, String.t(), map()) :: any() def handle_channels(:disterl, msg, state) do node = String.to_atom(msg) @@ -144,39 +119,4 @@ defmodule Cluster.Strategy.Postgres do end end end - - def handle_channels(:partisan, msg, state) do - spec = partisan_peer_spec_dec(msg) - - if spec.name not in [:partisan.node() | :partisan.nodes()] do - spec = partisan_peer_spec_dec(msg) - topology = state.topology - - Logger.debug( - topology, - "Trying to connect to partisan node: #{inspect(spec, pretty: true)}" - ) - - case :partisan_peer_service.join(spec) do - :ok -> - Logger.debug(topology, "Connected to node: #{inspect(spec, pretty: true)}") - - other -> - Logger.error(topology, "Failed to connect to partisan node: #{other}") - end - end - end - - @spec partisan_peer_spec_enc() :: String.t() - def partisan_peer_spec_enc() do - :partisan.node_spec() - |> :erlang.term_to_binary() - |> Base.encode64() - end - - @spec partisan_peer_spec_dec(String.t()) :: term() - def partisan_peer_spec_dec(spec) do - Base.decode64!(spec) - |> :erlang.binary_to_term() - end end diff --git a/lib/supavisor.ex b/lib/supavisor.ex index 0745b658..5507ccad 100644 --- a/lib/supavisor.ex +++ b/lib/supavisor.ex @@ -1,38 +1,45 @@ defmodule Supavisor do @moduledoc false + require Logger - import Cachex.Spec - alias Supavisor.Helpers, as: H - alias Supavisor.Tenants, as: T - alias Supavisor.Manager + + alias Supavisor.{ + Helpers, + Manager, + Tenants + } @type sock :: tcp_sock() | ssl_sock() @type ssl_sock :: {:ssl, :ssl.sslsocket()} @type tcp_sock :: {:gen_tcp, :gen_tcp.socket()} @type workers :: %{manager: pid, pool: pid} @type secrets :: {:password | :auth_query, fun()} - @type mode :: :transaction | :session | :native - @type id :: {{:single | :cluster, String.t()}, String.t(), mode, String.t()} + @type mode :: :transaction | :session | :native | :proxy + @type id :: {{:single | :cluster, String.t()}, String.t(), mode, String.t(), String.t() | nil} @type subscribe_opts :: %{workers: workers, ps: list, idle_timeout: integer} @registry Supavisor.Registry.Tenants + @max_pools Application.compile_env(:supavisor, :max_pools, 20) @spec start_dist(id, secrets, keyword()) :: {:ok, pid()} | {:error, any()} def start_dist(id, secrets, options \\ []) do - options = Keyword.validate!(options, log_level: nil, force_node: false) + options = + Keyword.validate!(options, log_level: nil, force_node: false, availability_zone: nil) + log_level = Keyword.fetch!(options, :log_level) force_node = Keyword.fetch!(options, :force_node) + availability_zone = Keyword.fetch!(options, :availability_zone) case get_global_sup(id) do nil -> - node = if force_node, do: force_node, else: determine_node(id) + node = if force_node, do: force_node, else: determine_node(id, availability_zone) if node == node() do Logger.debug("Starting local pool for #{inspect(id)}") - start_local_pool(id, secrets, log_level) + try_start_local_pool(id, secrets, log_level) else Logger.debug("Starting remote pool for #{inspect(id)}") - H.rpc(node, __MODULE__, :start_local_pool, [id, secrets, log_level]) + Helpers.rpc(node, __MODULE__, :try_start_local_pool, [id, secrets, log_level]) end pid -> @@ -44,7 +51,7 @@ defmodule Supavisor do def start(id, secrets) do case get_global_sup(id) do nil -> - start_local_pool(id, secrets) + try_start_local_pool(id, secrets, nil) pid -> {:ok, pid} @@ -75,13 +82,10 @@ defmodule Supavisor do end @spec subscribe_local(pid, id) :: {:ok, subscribe_opts} | {:error, any()} - def(subscribe_local(pid, id)) do + def subscribe_local(pid, id) do with {:ok, workers} <- get_local_workers(id), {:ok, ps, idle_timeout} <- Manager.subscribe(workers.manager, pid) do {:ok, %{workers: workers, ps: ps, idle_timeout: idle_timeout}} - else - error -> - error end end @@ -92,7 +96,7 @@ defmodule Supavisor do if node() == dest_node do subscribe_local(pid, id) else - H.rpc(dest_node, __MODULE__, :subscribe_local, [pid, id], 15_000) + Helpers.rpc(dest_node, __MODULE__, :subscribe_local, [pid, id], 15_000) end end @@ -148,6 +152,7 @@ defmodule Supavisor do {:secrets, ^tenant, ^user} = key, acc -> del.(key, acc) {:user_cache, _, ^user, ^tenant, _} = key, acc -> del.(key, acc) {:tenant_cache, ^tenant, _} = key, acc -> del.(key, acc) + {:pool_config_cache, ^tenant, ^user} = key, acc -> del.(key, acc) _, acc -> acc end) end @@ -161,17 +166,25 @@ defmodule Supavisor do [%{inspect(key) => inspect(result)} | acc] end - Supavisor.Cache - |> Cachex.stream!() - |> Enum.reduce([], fn entry(key: key), acc -> - case key do - {:metrics, ^tenant} -> del.(key, acc) - {:secrets, ^tenant, _} -> del.(key, acc) - {:user_cache, _, _, ^tenant, _} -> del.(key, acc) - {:tenant_cache, ^tenant, _} -> del.(key, acc) - _ -> acc - end - end) + :ets.foldl( + fn + {:entry, key, _, _, _result}, acc -> + case key do + {:metrics, ^tenant} -> del.(key, acc) + {:secrets, ^tenant, _} -> del.(key, acc) + {:user_cache, _, _, ^tenant, _} -> del.(key, acc) + {:tenant_cache, ^tenant, _} -> del.(key, acc) + {:pool_config_cache, ^tenant, _} -> del.(key, acc) + _ -> acc + end + + other, acc -> + Logger.error("Unknown key: #{inspect(other)}") + acc + end, + [], + Supavisor.Cache + ) end @spec del_all_cache_dist(String.t(), pos_integer()) :: [map()] @@ -210,8 +223,9 @@ defmodule Supavisor do end end - @spec id({:single | :cluster, String.t()}, String.t(), mode, mode, String.t()) :: id - def id(tenant, user, port_mode, user_mode, db_name) do + @spec id({:single | :cluster, String.t()}, String.t(), mode, mode, String.t(), String.t() | nil) :: + id + def id(tenant, user, port_mode, user_mode, db_name, search_path) do # temporary hack mode = if port_mode == :transaction do @@ -220,42 +234,72 @@ defmodule Supavisor do port_mode end - {tenant, user, mode, db_name} + {tenant, user, mode, db_name, search_path} end @spec tenant(id) :: String.t() - def tenant({{_, tenant}, _, _, _}), do: tenant + def tenant({{_, tenant}, _, _, _, _}), do: tenant @spec mode(id) :: atom() - def mode({_, _, mode, _}), do: mode + def mode({_, _, mode, _, _}), do: mode - @spec determine_node(id) :: Node.t() - def determine_node(id) do + @spec search_path(id) :: String.t() | nil + def search_path({_, _, _, _, search_path}), do: search_path + + @spec determine_node(id, String.t() | nil) :: Node.t() + def determine_node(id, availability_zone) do tenant_id = tenant(id) - nodes = [node() | Node.list()] |> Enum.sort() + + # If the AWS zone group is empty, we will use all nodes. + # If the AWS zone group exists with the same zone, we will use nodes from this group. + # :syn.members(:availability_zone, "1c") + # [{#PID<0.381.0>, [node: :"node1@127.0.0.1"]}] + nodes = + with zone when is_binary(zone) <- availability_zone, + zone_nodes when zone_nodes != [] <- :syn.members(:availability_zone, zone) do + zone_nodes + |> Enum.map(fn {_, [node: node]} -> node end) + else + _ -> [node() | Node.list()] + end + index = :erlang.phash2(tenant_id, length(nodes)) - Enum.at(nodes, index) + + nodes + |> Enum.sort() + |> Enum.at(index) + end + + @spec try_start_local_pool(id, secrets, atom()) :: {:ok, pid} | {:error, any} + def try_start_local_pool(id, secrets, log_level) do + if count_pools(tenant(id)) < @max_pools, + do: start_local_pool(id, secrets, log_level), + else: {:error, :max_pools_reached} end @spec start_local_pool(id, secrets, atom()) :: {:ok, pid} | {:error, any} - def start_local_pool({{type, tenant}, _user, _mode, _db_name} = id, secrets, log_level \\ nil) do - Logger.debug("Starting pool(s) for #{inspect(id)}") + def start_local_pool( + {{type, tenant}, _user, _mode, _db_name, _search_path} = id, + secrets, + log_level \\ nil + ) do + Logger.info("Starting pool(s) for #{inspect(id)}") user = elem(secrets, 1).().alias case type do - :single -> T.get_pool_config(tenant, user) - :cluster -> T.get_cluster_config(tenant, user) + :single -> Tenants.get_pool_config_cache(tenant, user) + :cluster -> Tenants.get_cluster_config(tenant, user) end |> case do [_ | _] = replicas -> opts = Enum.map(replicas, fn replica -> case replica do - %T.ClusterTenants{tenant: tenant, type: type} -> + %Tenants.ClusterTenants{tenant: tenant, type: type} -> Map.put(tenant, :replica_type, type) - %T.Tenant{} = tenant -> + %Tenants.Tenant{} = tenant -> Map.put(tenant, :replica_type, :write) end |> supervisor_args(id, secrets, log_level) @@ -281,7 +325,7 @@ defmodule Supavisor do defp supervisor_args( tenant_record, - {tenant, user, mode, db_name} = id, + {tenant, user, mode, db_name, _search_path} = id, {method, secrets}, log_level ) do @@ -289,17 +333,20 @@ defmodule Supavisor do db_host: db_host, db_port: db_port, db_database: db_database, + auth_query: auth_query, default_parameter_status: ps, ip_version: ip_ver, default_pool_size: def_pool_size, default_max_clients: def_max_clients, client_idle_timeout: client_idle_timeout, replica_type: replica_type, + sni_hostname: sni_hostname, users: [ %{ db_user: db_user, db_password: db_pass, pool_size: pool_size, + db_user_alias: alias, # mode_type: mode_type, max_clients: max_clients } @@ -315,15 +362,18 @@ defmodule Supavisor do auth = %{ host: String.to_charlist(db_host), + sni_hostname: if(sni_hostname != nil, do: to_charlist(sni_hostname)), port: db_port, user: db_user, + alias: alias, + auth_query: auth_query, database: if(db_name != nil, do: db_name, else: db_database), password: fn -> db_pass end, application_name: "Supavisor", - ip_version: H.ip_version(ip_ver, db_host), + ip_version: Helpers.ip_version(ip_ver, db_host), upstream_ssl: tenant_record.upstream_ssl, upstream_verify: tenant_record.upstream_verify, - upstream_tls_ca: H.upstream_cert(tenant_record.upstream_tls_ca), + upstream_tls_ca: Helpers.upstream_cert(tenant_record.upstream_tls_ca), require_user: tenant_record.require_user, method: method, secrets: secrets @@ -352,4 +402,39 @@ defmodule Supavisor do pid -> Manager.set_parameter_status(pid, ps) end end + + @spec get_pool_ranch(id) :: {:ok, map()} | {:error, :not_found} + def get_pool_ranch(id) do + case :syn.lookup(:tenants, id) do + {_sup_pid, %{port: _port, host: _host} = meta} -> {:ok, meta} + _ -> {:error, :not_found} + end + end + + @spec start_local_server(map()) :: {:ok, map()} | {:error, any()} + def start_local_server(%{max_clients: max_clients} = args) do + # max_clients=-1 is used for testing the maximum allowed clients in ProxyTest + {acceptors, max_clients} = + if max_clients > 0, + do: {ceil(max_clients / 100), max_clients}, + else: {1, 100} + + opts = %{ + max_connections: max_clients * Application.get_env(:supavisor, :local_proxy_multiplier), + num_acceptors: max(acceptors, 10), + socket_opts: [port: 0, keepalive: true] + } + + handler = Supavisor.ClientHandler + args = Map.put(args, :local, true) + + with {:ok, pid} <- :ranch.start_listener(args.id, :ranch_tcp, opts, handler, args) do + host = Application.get_env(:supavisor, :node_host) + {:ok, %{listener: pid, host: host, port: :ranch.get_port(args.id)}} + end + end + + @spec count_pools(String.t()) :: non_neg_integer() + def count_pools(tenant), + do: Registry.count_match(Supavisor.Registry.TenantSups, tenant, :_) end diff --git a/lib/supavisor/application.ex b/lib/supavisor/application.ex index 0a90c5e5..2b7321b1 100644 --- a/lib/supavisor/application.ex +++ b/lib/supavisor/application.ex @@ -4,9 +4,13 @@ defmodule Supavisor.Application do @moduledoc false use Application + require Logger + alias Supavisor.Monitoring.PromEx + @metrics_disabled Application.compile_env(:supavisor, :metrics_disabled, false) + @impl true def start(_type, _args) do primary_config = :logger.get_primary_config() @@ -15,7 +19,10 @@ defmodule Supavisor.Application do :logger.set_primary_config( :metadata, Enum.into( - [region: System.get_env("REGION"), instance_id: System.get_env("INSTANCE_ID")], + [ + region: System.get_env("AVAILABILITY_ZONE") || System.get_env("REGION"), + instance_id: System.get_env("INSTANCE_ID") + ], primary_config.metadata ) ) @@ -29,41 +36,51 @@ defmodule Supavisor.Application do proxy_ports = [ {:pg_proxy_transaction, Application.get_env(:supavisor, :proxy_port_transaction), - :transaction}, - {:pg_proxy_session, Application.get_env(:supavisor, :proxy_port_session), :session} + :transaction, Supavisor.ClientHandler}, + {:pg_proxy_session, Application.get_env(:supavisor, :proxy_port_session), :session, + Supavisor.ClientHandler}, + {:pg_proxy, Application.get_env(:supavisor, :proxy_port), :proxy, Supavisor.ClientHandler} ] - for {key, port, mode} <- proxy_ports do - :ranch.start_listener( - key, - :ranch_tcp, - %{ - max_connections: String.to_integer(System.get_env("MAX_CONNECTIONS") || "25000"), - num_acceptors: String.to_integer(System.get_env("NUM_ACCEPTORS") || "100"), - socket_opts: [port: port, keepalive: true] - }, - Supavisor.ClientHandler, - %{mode: mode} - ) - |> then(&"Proxy started #{mode} on port #{port}, result: #{inspect(&1)}") - |> Logger.warning() + for {key, port, mode, handler} <- proxy_ports do + case :ranch.start_listener( + key, + :ranch_tcp, + %{ + max_connections: String.to_integer(System.get_env("MAX_CONNECTIONS") || "75000"), + num_acceptors: String.to_integer(System.get_env("NUM_ACCEPTORS") || "100"), + socket_opts: [port: port, keepalive: true] + }, + handler, + %{mode: mode} + ) do + {:ok, _pid} -> + Logger.notice("Proxy started #{mode} on port #{port}") + + error -> + Logger.error("Proxy on #{port} not started because of #{inspect(error)}") + end end :syn.set_event_handler(Supavisor.SynHandler) - :syn.add_node_to_scopes([:tenants]) + :syn.add_node_to_scopes([:tenants, :availability_zone]) - PromEx.set_metrics_tags() + :syn.join(:availability_zone, Application.get_env(:supavisor, :availability_zone), self(), + node: node() + ) topologies = Application.get_env(:libcluster, :topologies) || [] children = [ Supavisor.ErlSysMon, - PromEx, {Registry, keys: :unique, name: Supavisor.Registry.Tenants}, {Registry, keys: :unique, name: Supavisor.Registry.ManagerTables}, {Registry, keys: :unique, name: Supavisor.Registry.PoolPids}, {Registry, keys: :duplicate, name: Supavisor.Registry.TenantSups}, - {Registry, keys: :duplicate, name: Supavisor.Registry.TenantClients}, + {Registry, + keys: :duplicate, + name: Supavisor.Registry.TenantClients, + partitions: System.schedulers_online()}, {Cluster.Supervisor, [topologies, [name: Supavisor.ClusterSupervisor]]}, Supavisor.Repo, # Start the Telemetry supervisor @@ -75,11 +92,21 @@ defmodule Supavisor.Application do child_spec: DynamicSupervisor, strategy: :one_for_one, name: Supavisor.DynamicSupervisor }, Supavisor.Vault, - Supavisor.TenantsMetrics, + # Start the Endpoint (http/https) SupavisorWeb.Endpoint ] + Logger.warning("metrics_disabled is #{inspect(@metrics_disabled)}") + + children = + if @metrics_disabled do + children + else + PromEx.set_metrics_tags() + children ++ [PromEx, Supavisor.TenantsMetrics, Supavisor.MetricsCleaner] + end + # start Cachex only if the node uses names, this is necessary for test setup children = if node() != :nonode@nohost do diff --git a/lib/supavisor/client_handler.ex b/lib/supavisor/client_handler.ex index 4c46f5fe..f4864395 100644 --- a/lib/supavisor/client_handler.ex +++ b/lib/supavisor/client_handler.ex @@ -1,7 +1,7 @@ defmodule Supavisor.ClientHandler do @moduledoc """ This module is responsible for handling incoming connections to the Supavisor server. It is - implemented as a Ranch protocol behavior and a partisan_gen_statem behavior. It handles SSL negotiation, + implemented as a Ranch protocol behavior and a gen_statem behavior. It handles SSL negotiation, user authentication, tenant subscription, and dispatching of messages to the appropriate tenant supervisor. Each client connection is assigned to a specific tenant supervisor. """ @@ -9,16 +9,25 @@ defmodule Supavisor.ClientHandler do require Logger @behaviour :ranch_protocol - @behaviour :partisan_gen_statem - - alias Supavisor, as: S - alias Supavisor.DbHandler, as: Db - alias Supavisor.Helpers, as: H - alias Supavisor.HandlerHelpers, as: HH - alias Supavisor.{Tenants, Monitoring.Telem, Protocol.Client, Protocol.Server} + @behaviour :gen_statem + @proto [:tcp, :ssl] + @cancel_query_msg <<16::32, 1234::16, 5678::16>> + @switch_active_count Application.compile_env(:supavisor, :switch_active_count) + @subscribe_retries Application.compile_env(:supavisor, :subscribe_retries) + @timeout_subscribe 500 + + alias Supavisor.{ + DbHandler, + HandlerHelpers, + Helpers, + Monitoring.Telem, + Protocol.Client, + Protocol.Server, + Tenants + } @impl true - def start_link(ref, _sock, transport, opts) do + def start_link(ref, transport, opts) do pid = :proc_lib.spawn_link(__MODULE__, :init, [ref, transport, opts]) {:ok, pid} end @@ -26,23 +35,32 @@ defmodule Supavisor.ClientHandler do @impl true def callback_mode, do: [:handle_event_function] - def client_cast(pid, bin, status) do - :partisan_gen_statem.cast(pid, {:client_cast, bin, status}) - end - - @spec client_call(pid, iodata(), atom()) :: :ok | {:error, term()} - def client_call(pid, bin, status), - do: :partisan_gen_statem.call(pid, {:client_call, bin, status}, 30_000) + @spec db_status(pid(), :ready_for_query | :read_sql_error, binary()) :: :ok + def db_status(pid, status, bin), do: :gen_statem.cast(pid, {:db_status, status, bin}) @impl true def init(_), do: :ignore def init(ref, trans, opts) do Process.flag(:trap_exit, true) - H.set_max_heap_size(150) + Helpers.set_max_heap_size(90) {:ok, sock} = :ranch.handshake(ref) - :ok = trans.setopts(sock, active: true) + + :ok = + trans.setopts(sock, + # mode: :binary, + # packet: :raw, + # recbuf: 8192, + # sndbuf: 8192, + # # backlog: 2048, + # send_timeout: 120, + # keepalive: true, + # nodelay: true, + # nopush: true, + active: true + ) + Logger.debug("ClientHandler is: #{inspect(self())}") data = %{ @@ -67,17 +85,24 @@ defmodule Supavisor.ClientHandler do last_query: nil, heartbeat_interval: 0, connection_start: System.monotonic_time(), - log_level: nil + log_level: nil, + auth: %{}, + tenant_availability_zone: nil, + local: opts[:local] || false, + active_count: 0, + peer_ip: Helpers.peer_ip(sock), + app_name: nil, + subscribe_retries: 0 } - :partisan_gen_statem.enter_loop(__MODULE__, [hibernate_after: 5_000], :exchange, data) + :gen_statem.enter_loop(__MODULE__, [hibernate_after: 5_000], :exchange, data) end @impl true def handle_event(:info, {_proto, _, <<"GET", _::binary>>}, :exchange, data) do Logger.debug("ClientHandler: Client is trying to request HTTP") - HH.sock_send( + HandlerHelpers.sock_send( data.sock, "HTTP/1.1 204 OK\r\nx-app-version: #{Application.spec(:supavisor, :vsn)}\r\n\r\n" ) @@ -86,9 +111,9 @@ defmodule Supavisor.ClientHandler do end # cancel request - def handle_event(:info, {_, _, <<16::32, 1234::16, 5678::16, pid::32, key::32>>}, _, _) do + def handle_event(:info, {_, _, <<@cancel_query_msg, pid::32, key::32>>}, _, _) do Logger.debug("ClientHandler: Got cancel query for #{inspect({pid, key})}") - :ok = HH.send_cancel_query(pid, key) + :ok = HandlerHelpers.send_cancel_query(pid, key) {:stop, {:shutdown, :cancel_query}} end @@ -96,11 +121,11 @@ defmodule Supavisor.ClientHandler do def handle_event(:info, :cancel_query, :busy, data) do key = {data.tenant, data.db_pid} Logger.debug("ClientHandler: Cancel query for #{inspect(key)}") - {_pool, db_pid} = data.db_pid + {_pool, db_pid, _db_sock} = data.db_pid case db_pid_meta(key) do [{^db_pid, meta}] -> - :ok = HH.cancel_query(meta.host, meta.port, meta.ip_ver, meta.pid, meta.key) + :ok = HandlerHelpers.cancel_query(meta.host, meta.port, meta.ip_ver, meta.pid, meta.key) error -> Logger.error( @@ -114,13 +139,13 @@ defmodule Supavisor.ClientHandler do def handle_event(:info, {:tcp, _, <<_::64>>}, :exchange, %{sock: sock} = data) do Logger.debug("ClientHandler: Client is trying to connect with SSL") - downstream_cert = H.downstream_cert() - downstream_key = H.downstream_key() + downstream_cert = Helpers.downstream_cert() + downstream_key = Helpers.downstream_key() # SSL negotiation, S/N/Error if !!downstream_cert and !!downstream_key do - :ok = HH.setopts(sock, active: false) - :ok = HH.sock_send(sock, "S") + :ok = HandlerHelpers.setopts(sock, active: false) + :ok = HandlerHelpers.sock_send(sock, "S") opts = [ certfile: downstream_cert, @@ -130,7 +155,7 @@ defmodule Supavisor.ClientHandler do case :ssl.handshake(elem(sock, 1), opts) do {:ok, ssl_sock} -> socket = {:ssl, ssl_sock} - :ok = HH.setopts(socket, active: true) + :ok = HandlerHelpers.setopts(socket, active: true) {:keep_state, %{data | sock: socket, ssl: true}} error -> @@ -143,36 +168,42 @@ defmodule Supavisor.ClientHandler do "ClientHandler: User requested SSL connection but no downstream cert/key found" ) - :ok = HH.sock_send(data.sock, "N") + :ok = HandlerHelpers.sock_send(data.sock, "N") :keep_state_and_data end end + def handle_event(:info, {_, _, bin}, :exchange, _) when byte_size(bin) > 1024 do + Logger.error("ClientHandler: Startup packet too large #{byte_size(bin)}") + {:stop, {:shutdown, :startup_packet_too_large}} + end + def handle_event(:info, {_, _, bin}, :exchange, data) do case Server.decode_startup_packet(bin) do {:ok, hello} -> Logger.debug("ClientHandler: Client startup message: #{inspect(hello)}") - {type, {user, tenant_or_alias, db_name}} = HH.parse_user_info(hello.payload) + {type, {user, tenant_or_alias, db_name}} = HandlerHelpers.parse_user_info(hello.payload) - not_allowed = ["\"", "\\"] + if Helpers.validate_name(user) and Helpers.validate_name(db_name) do + log_level = maybe_change_log(hello) + search_path = hello.payload["options"]["--search_path"] + event = {:hello, {type, {user, tenant_or_alias, db_name, search_path}}} + app_name = app_name(hello.payload["application_name"]) - if String.contains?(user, not_allowed) or String.contains?(db_name, not_allowed) do - reason = "Invalid characters in user or db_name" - Logger.error("ClientHandler: #{inspect(reason)}") - Telem.client_join(:fail, data.id) - HH.send_error(data.sock, "XX000", "Authentication error, reason: #{inspect(reason)}") - {:stop, {:shutdown, :invalid_characters}} + {:keep_state, %{data | log_level: log_level, app_name: app_name}, + {:next_event, :internal, event}} else - log_level = - case hello.payload["options"]["log_level"] do - nil -> nil - level -> String.to_existing_atom(level) - end - - H.set_log_level(log_level) + reason = "Invalid format for user or db_name" + Logger.error("ClientHandler: #{inspect(reason)} #{inspect({user, db_name})}") + Telem.client_join(:fail, tenant_or_alias) + + HandlerHelpers.send_error( + data.sock, + "XX000", + "Authentication error, reason: #{inspect(reason)}" + ) - {:keep_state, %{data | log_level: log_level}, - {:next_event, :internal, {:hello, {type, {user, tenant_or_alias, db_name}}}}} + {:stop, {:shutdown, :invalid_format}} end {:error, error} -> @@ -184,11 +215,11 @@ defmodule Supavisor.ClientHandler do def handle_event( :internal, - {:hello, {type, {user, tenant_or_alias, db_name}}}, + {:hello, {type, {user, tenant_or_alias, db_name, search_path}}}, :exchange, %{sock: sock} = data ) do - sni_hostname = HH.try_get_sni(sock) + sni_hostname = HandlerHelpers.try_get_sni(sock) case Tenants.get_user_cache(type, user, tenant_or_alias, sni_hostname) do {:ok, info} -> @@ -200,37 +231,41 @@ defmodule Supavisor.ClientHandler do user, data.mode, info.user.mode_type, - db_name + db_name, + search_path ) - mode = S.mode(id) + mode = Supavisor.mode(id) Logger.metadata( project: tenant_or_alias, user: user, mode: mode, type: type, - db_name: db_name + db_name: db_name, + app_name: data.app_name, + peer_ip: data.peer_ip, + local: data.local ) Registry.register(Supavisor.Registry.TenantClients, id, []) - {:ok, addr} = HH.addr_from_sock(sock) + {:ok, addr} = HandlerHelpers.addr_from_sock(sock) cond do - info.tenant.enforce_ssl and !data.ssl -> + !data.local and info.tenant.enforce_ssl and !data.ssl -> Logger.error( "ClientHandler: Tenant is not allowed to connect without SSL, user #{user}" ) - :ok = HH.send_error(sock, "XX000", "SSL connection is required") + :ok = HandlerHelpers.send_error(sock, "XX000", "SSL connection is required") Telem.client_join(:fail, id) {:stop, {:shutdown, :ssl_required}} - HH.filter_cidrs(info.tenant.allow_list, addr) == [] -> + HandlerHelpers.filter_cidrs(info.tenant.allow_list, addr) == [] -> message = "Address not in tenant allow_list: " <> inspect(addr) Logger.error("ClientHandler: #{message}") - :ok = HH.send_error(sock, "XX000", message) + :ok = HandlerHelpers.send_error(sock, "XX000", message) Telem.client_join(:fail, id) {:stop, {:shutdown, :address_not_allowed}} @@ -252,7 +287,11 @@ defmodule Supavisor.ClientHandler do ) :ok = - HH.send_error(sock, "XX000", "Authentication error, reason: #{inspect(reason)}") + HandlerHelpers.send_error( + sock, + "XX000", + "Authentication error, reason: #{inspect(reason)}" + ) Telem.client_join(:fail, id) {:stop, {:shutdown, :auth_secrets_error}} @@ -264,7 +303,7 @@ defmodule Supavisor.ClientHandler do "ClientHandler: User not found: #{inspect(reason)} #{inspect({type, user, tenant_or_alias})}" ) - :ok = HH.send_error(sock, "XX000", "Tenant or user not found") + :ok = HandlerHelpers.send_error(sock, "XX000", "Tenant or user not found") Telem.client_join(:fail, data.id) {:stop, {:shutdown, :user_not_found}} end @@ -285,11 +324,9 @@ defmodule Supavisor.ClientHandler do ) msg = - if method == :auth_query_md5 do - Server.error_message("XX000", reason) - else - Server.exchange_message(:final, "e=#{reason}") - end + if method == :auth_query_md5, + do: Server.error_message("XX000", reason), + else: Server.exchange_message(:final, "e=#{reason}") key = {:secrets_check, data.tenant, data.user} @@ -297,7 +334,7 @@ defmodule Supavisor.ClientHandler do Cachex.get(Supavisor.Cache, key) == {:ok, nil} do case auth_secrets(info, data.user, key, 15_000) do {:ok, {method2, secrets2}} = value -> - if method != method2 || Map.delete(secrets.(), :client_key) != secrets2.() do + if method != method2 or Map.delete(secrets.(), :client_key) != secrets2.() do Logger.warning("ClientHandler: Update secrets and terminate pool") Cachex.update( @@ -318,26 +355,29 @@ defmodule Supavisor.ClientHandler do Logger.debug("ClientHandler: Cache hit for #{inspect(key)}") end - HH.sock_send(sock, msg) + HandlerHelpers.sock_send(sock, msg) Telem.client_join(:fail, data.id) {:stop, {:shutdown, :exchange_error}} {:ok, client_key} -> secrets = - if client_key do - fn -> - Map.put(secrets.(), :client_key, client_key) - end - else - secrets - end + if client_key, + do: fn -> Map.put(secrets.(), :client_key, client_key) end, + else: secrets Logger.debug("ClientHandler: Exchange success") - :ok = HH.sock_send(sock, Server.authentication_ok()) + :ok = HandlerHelpers.sock_send(sock, Server.authentication_ok()) Telem.client_join(:ok, data.id) - {:keep_state, %{data | auth_secrets: {method, secrets}}, - {:next_event, :internal, :subscribe}} + auth = Map.merge(data.auth, %{secrets: secrets, method: method}) + + conn_type = + if data.mode == :proxy, + do: :connect_db, + else: :subscribe + + {:keep_state, %{data | auth_secrets: {method, secrets}, auth: auth}, + {:next_event, :internal, conn_type}} end end @@ -345,7 +385,15 @@ defmodule Supavisor.ClientHandler do Logger.debug("ClientHandler: Subscribe to tenant #{inspect(data.id)}") with {:ok, sup} <- - Supavisor.start_dist(data.id, data.auth_secrets, log_level: data.log_level), + Supavisor.start_dist(data.id, data.auth_secrets, + log_level: data.log_level, + availability_zone: data.tenant_availability_zone + ), + true <- + if(node(sup) != node() and data.mode in [:transaction, :session], + do: :proxy, + else: true + ), {:ok, opts} <- Supavisor.subscribe(sup, data.id) do Process.monitor(opts.workers.manager) data = Map.merge(data, opts.workers) @@ -353,39 +401,84 @@ defmodule Supavisor.ClientHandler do data = %{data | db_pid: db_pid, idle_timeout: opts.idle_timeout} next = - if opts.ps == [] do - {:timeout, 10_000, :wait_ps} - else - {:next_event, :internal, {:greetings, opts.ps}} - end + if opts.ps == [], + do: {:timeout, 10_000, :wait_ps}, + else: {:next_event, :internal, {:greetings, opts.ps}} {:keep_state, data, next} else {:error, :max_clients_reached} -> msg = "Max client connections reached" Logger.error("ClientHandler: #{msg}") - :ok = HH.send_error(data.sock, "XX000", msg) + :ok = HandlerHelpers.send_error(data.sock, "XX000", msg) Telem.client_join(:fail, data.id) {:stop, {:shutdown, :max_clients_reached}} + {:error, :max_pools_reached} -> + msg = "Max pools count reached" + Logger.error("ClientHandler: #{msg}") + :ok = HandlerHelpers.send_error(data.sock, "XX000", msg) + Telem.client_join(:fail, data.id) + {:stop, {:shutdown, :max_pools_reached}} + + :proxy -> + case Supavisor.get_pool_ranch(data.id) do + {:ok, %{port: port, host: host}} -> + auth = + Map.merge(data.auth, %{ + port: port, + host: to_charlist(host), + ip_version: :inet, + upstream_ssl: false, + upstream_tls_ca: nil, + upstream_verify: nil + }) + + {:keep_state, %{data | auth: auth}, {:next_event, :internal, :connect_db}} + + other -> + Logger.error("ClientHandler: Subscribe proxy error: #{inspect(other)}") + timeout_subscribe_or_terminate(data) + end + error -> Logger.error("ClientHandler: Subscribe error: #{inspect(error)}") - {:keep_state_and_data, {:timeout, 1000, :subscribe}} + timeout_subscribe_or_terminate(data) end end + def handle_event(:internal, :connect_db, _, data) do + Logger.debug("ClientHandler: Trying to connect to DB") + + args = %{ + id: data.id, + auth: data.auth, + user: data.user, + tenant: {:single, data.tenant}, + replica_type: :write, + mode: :proxy, + proxy: true, + log_level: data.log_level, + caller: self(), + client_sock: data.sock + } + + {:ok, db_pid} = DbHandler.start_link(args) + db_sock = :gen_statem.call(db_pid, {:checkout, data.sock, self()}) + {:keep_state, %{data | db_pid: {nil, db_pid, db_sock}, mode: :proxy}} + end + def handle_event(:internal, {:greetings, ps}, _, %{sock: sock} = data) do {header, <> = payload} = Server.backend_key_data() msg = [ps, [header, payload], Server.ready_for_query()] - :ok = HH.listen_cancel_query(pid, key) - :ok = HH.sock_send(sock, msg) + :ok = HandlerHelpers.listen_cancel_query(pid, key) + :ok = HandlerHelpers.sock_send(sock, msg) Telem.client_connection_time(data.connection_start, data.id) {:next_state, :idle, data, handle_actions(data)} end - def handle_event(:timeout, :subscribe, _, _) do - {:keep_state_and_data, {:next_event, :internal, :subscribe}} - end + def handle_event(:timeout, :subscribe, _, _), + do: {:keep_state_and_data, {:next_event, :internal, :subscribe}} def handle_event(:timeout, :wait_ps, _, data) do Logger.error( @@ -403,49 +496,60 @@ defmodule Supavisor.ClientHandler do def handle_event(:timeout, :heartbeat_check, _, data) do Logger.debug("ClientHandler: Send heartbeat to client") - HH.sock_send(data.sock, Server.application_name()) + HandlerHelpers.sock_send(data.sock, Server.application_name()) {:keep_state_and_data, {:timeout, data.heartbeat_interval, :heartbeat_check}} end # handle Terminate message def handle_event(:info, {proto, _, <>}, :idle, _) - when proto in [:tcp, :ssl] do - Logger.debug("ClientHandler: Terminate received from client") + when proto in @proto do + Logger.info("ClientHandler: Terminate received from client") {:stop, {:shutdown, :terminate_received}} end # handle Sync message - def handle_event(:info, {proto, _, <>}, :idle, data) - when proto in [:tcp, :ssl] do + def handle_event(:info, {proto, _, <> = msg}, :idle, data) + when proto in @proto do Logger.debug("ClientHandler: Receive sync") - :ok = HH.sock_send(data.sock, Server.ready_for_query()) - {:keep_state_and_data, handle_actions(data)} + + # db_pid can be nil in transaction mode, so we will send ready_for_query + # without checking out a direct connection. If there is a linked db_pid, + # we will forward the message to it + if data.db_pid != nil, + do: :ok = sock_send_maybe_active_once(msg, data), + else: :ok = HandlerHelpers.sock_send(data.sock, Server.ready_for_query()) + + {:keep_state, %{data | active_count: reset_active_count(data)}, handle_actions(data)} end def handle_event(:info, {proto, _, <> = msg}, _, data) - when proto in [:tcp, :ssl] do + when proto in @proto do Logger.debug("ClientHandler: Receive sync while not idle") - {_, db_pid} = data.db_pid - Db.cast(db_pid, self(), msg) - :keep_state_and_data + :ok = sock_send_maybe_active_once(msg, data) + {:keep_state, %{data | active_count: reset_active_count(data)}, handle_actions(data)} end + # handle Flush message def handle_event(:info, {proto, _, <> = msg}, _, data) - when proto in [:tcp, :ssl] do + when proto in @proto do Logger.debug("ClientHandler: Receive flush while not idle") - {_, db_pid} = data.db_pid - Db.cast(db_pid, self(), msg) - :keep_state_and_data + :ok = sock_send_maybe_active_once(msg, data) + {:keep_state, %{data | active_count: reset_active_count(data)}, handle_actions(data)} end # incoming query with a single pool def handle_event(:info, {proto, _, bin}, :idle, %{pool: pid} = data) when is_binary(bin) and is_pid(pid) do - ts = System.monotonic_time() + Logger.debug("ClientHandler: Receive query #{inspect(bin)}") db_pid = db_checkout(:both, :on_query, data) handle_prepared_statements(db_pid, bin, data) - {:next_state, :busy, %{data | db_pid: db_pid, query_start: ts}, + {:next_state, :busy, %{data | db_pid: db_pid, query_start: System.monotonic_time()}, + {:next_event, :internal, {proto, nil, bin}}} + end + + def handle_event(:info, {proto, _, bin}, _, %{mode: :proxy} = data) do + {:next_state, :busy, %{data | query_start: System.monotonic_time()}, {:next_event, :internal, {proto, nil, bin}}} end @@ -477,33 +581,22 @@ defmodule Supavisor.ClientHandler do end # forward query to db - def handle_event(_, {proto, _, bin}, :busy, data) - when proto in [:tcp, :ssl] do - {_, db_pid} = data.db_pid + def handle_event(_, {proto, _, bin}, :busy, data) when proto in @proto do + Logger.debug("ClientHandler: Forward query to db #{inspect(bin)} #{inspect(data.db_pid)}") - case Db.call(db_pid, self(), bin) do + case sock_send_maybe_active_once(bin, data) do :ok -> - Logger.debug("ClientHandler: DbHandler call success") - :keep_state_and_data + {:keep_state, %{data | active_count: data.active_count + 1}} - {:buffering, size} -> - Logger.debug("ClientHandler: DbHandler call buffering #{size}") + error -> + Logger.error("ClientHandler: error while sending query: #{inspect(error)}") - if size > 1_000_000 do - msg = "DbHandler buffer size is too big: #{size}" - Logger.error("ClientHandler: #{msg}") - HH.sock_send(data.sock, Server.error_message("XX000", msg)) - {:stop, {:shutdown, :buffer_size}} - else - Logger.debug("ClientHandler: DbHandler call buffering") - :keep_state_and_data - end + HandlerHelpers.sock_send( + data.sock, + Server.error_message("XX000", "Error while sending query") + ) - {:error, reason} -> - msg = "DbHandler error: #{inspect(reason)}" - Logger.error("ClientHandler: #{msg}") - HH.sock_send(data.sock, Server.error_message("XX000", msg)) - {:stop, {:shutdown, :db_handler_error}} + {:stop, {:shutdown, :send_query_error}} end end @@ -512,9 +605,8 @@ defmodule Supavisor.ClientHandler do {:stop, {:shutdown, :parameter_status_updated}} end - def handle_event(:info, {:parameter_status, ps}, :exchange, _) do - {:keep_state_and_data, {:next_event, :internal, {:greetings, ps}}} - end + def handle_event(:info, {:parameter_status, ps}, :exchange, _), + do: {:keep_state_and_data, {:next_event, :internal, {:greetings, ps}}} # client closed connection def handle_event(_, {closed, _}, _, data) @@ -526,7 +618,7 @@ defmodule Supavisor.ClientHandler do # linked DbHandler went down def handle_event(:info, {:EXIT, db_pid, reason}, _, data) do Logger.error("ClientHandler: DbHandler #{inspect(db_pid)} exited #{inspect(reason)}") - HH.sock_send(data.sock, Server.error_message("XX000", "DbHandler exited")) + HandlerHelpers.sock_send(data.sock, Server.error_message("XX000", "DbHandler exited")) {:stop, {:shutdown, :db_handler_exit}} end @@ -537,14 +629,9 @@ defmodule Supavisor.ClientHandler do ) case {state, reason} do - {_, :shutdown} -> - {:stop, {:shutdown, :manager_shutdown}} - - {:idle, _} -> - {:keep_state_and_data, {:next_event, :internal, :subscribe}} - - {:busy, _} -> - {:stop, {:shutdown, :manager_down}} + {_, :shutdown} -> {:stop, {:shutdown, :manager_shutdown}} + {:idle, _} -> {:keep_state_and_data, {:next_event, :internal, :subscribe}} + {:busy, _} -> {:stop, {:shutdown, :manager_down}} end end @@ -554,29 +641,27 @@ defmodule Supavisor.ClientHandler do end # emulate handle_cast - def handle_event(:cast, {:client_cast, bin, status}, _, data) do - Logger.debug("ClientHandler: --> --> bin #{inspect(byte_size(bin))} bytes") - + def handle_event(:cast, {:db_status, status, bin}, :busy, data) do case status do :ready_for_query -> Logger.debug("ClientHandler: Client is ready") + :ok = HandlerHelpers.sock_send(data.sock, bin) + db_pid = handle_db_pid(data.mode, data.pool, data.db_pid) {_, stats} = Telem.network_usage(:client, data.sock, data.id, data.stats) Telem.client_query_time(data.query_start, data.id) - :ok = HH.sock_send(data.sock, bin) - actions = handle_actions(data) - {:next_state, :idle, %{data | db_pid: db_pid, stats: stats}, actions} - :continue -> - Logger.debug("ClientHandler: Client is not ready") - :ok = HH.sock_send(data.sock, bin) - :keep_state_and_data + {:next_state, :idle, + %{data | db_pid: db_pid, stats: stats, active_count: reset_active_count(data)}, + handle_actions(data)} :read_sql_error -> - Logger.error("ClientHandler: read only sql transaction, reruning the query to write pool") + Logger.error( + "ClientHandler: read only sql transaction, rerunning the query to write pool" + ) # release the read pool _ = handle_db_pid(data.mode, data.pool, data.db_pid) @@ -589,12 +674,6 @@ defmodule Supavisor.ClientHandler do end end - # emulate handle_call - def handle_event({:call, from}, {:client_call, bin, _}, _, data) do - Logger.debug("ClientHandler: --> --> bin call #{inspect(byte_size(bin))} bytes") - {:keep_state_and_data, {:reply, from, HH.sock_send(data.sock, bin)}} - end - def handle_event(type, content, state, data) do msg = [ {"type", type}, @@ -617,29 +696,25 @@ defmodule Supavisor.ClientHandler do msg = case data.mode do :session -> - "Max client connections reached" + "MaxClientsInSessionMode: max clients reached - in Session mode max clients are limited to pool_size" :transaction -> "Unable to check out process from the pool due to timeout" end Logger.error("ClientHandler: #{msg}") - HH.sock_send(data.sock, Server.error_message("XX000", msg)) + HandlerHelpers.sock_send(data.sock, Server.error_message("XX000", msg)) :ok end - def terminate(reason, _state, %{db_pid: {_, pid}}) do + def terminate(reason, _state, %{db_pid: {_, pid, _}}) do db_info = - case Db.get_state_and_mode(pid) do - {:ok, {state, mode} = resp} -> - if state == :busy || mode == :session, do: Db.stop(pid) - resp - - error -> - error + with {:ok, {state, mode} = resp} <- DbHandler.get_state_and_mode(pid) do + if state == :busy or mode == :session, do: DbHandler.stop(pid) + resp end - Logger.warning( + Logger.debug( "ClientHandler: socket closed with reason #{inspect(reason)}, DbHandler #{inspect({pid, db_info})}" ) @@ -647,16 +722,17 @@ defmodule Supavisor.ClientHandler do end def terminate(reason, _state, _data) do - Logger.warning("ClientHandler: socket closed with reason #{inspect(reason)}") + Logger.debug("ClientHandler: socket closed with reason #{inspect(reason)}") :ok end ## Internal functions - @spec handle_exchange(S.sock(), {atom(), fun()}) :: {:ok, binary() | nil} | {:error, String.t()} + @spec handle_exchange(Supavisor.sock(), {atom(), fun()}) :: + {:ok, binary() | nil} | {:error, String.t()} def handle_exchange({_, socket} = sock, {:auth_query_md5 = method, secrets}) do salt = :crypto.strong_rand_bytes(4) - :ok = HH.sock_send(sock, Server.md5_request(salt)) + :ok = HandlerHelpers.sock_send(sock, Server.md5_request(salt)) with {:ok, %{ @@ -672,7 +748,7 @@ defmodule Supavisor.ClientHandler do end def handle_exchange({_, socket} = sock, {method, secrets}) do - :ok = HH.sock_send(sock, Server.scram_request()) + :ok = HandlerHelpers.sock_send(sock, Server.scram_request()) with {:ok, %{ @@ -697,7 +773,7 @@ defmodule Supavisor.ClientHandler do ), {:ok, key} <- authenticate_exchange(method, secrets, signatures, p) do message = "v=#{Base.encode64(signatures.server)}" - :ok = HH.sock_send(sock, Server.exchange_message(:final, message)) + :ok = HandlerHelpers.sock_send(sock, Server.exchange_message(:final, message)) {:ok, key} else {:error, message} -> {:error, message} @@ -719,22 +795,20 @@ defmodule Supavisor.ClientHandler do defp reply_first_exchange(sock, method, secrets, channel, nonce, user) do {message, signatures} = exchange_first(method, secrets, nonce, user, channel) - :ok = HH.sock_send(sock, Server.exchange_message(:first, message)) + :ok = HandlerHelpers.sock_send(sock, Server.exchange_message(:first, message)) {:ok, signatures} end defp authenticate_exchange(:password, _secrets, signatures, p) do - if p == signatures.client do - {:ok, nil} - else - {:error, "Wrong password"} - end + if p == signatures.client, + do: {:ok, nil}, + else: {:error, "Wrong password"} end defp authenticate_exchange(:auth_query, secrets, signatures, p) do client_key = :crypto.exor(Base.decode64!(p), signatures.client) - if H.hash(client_key) == secrets.().stored_key do + if Helpers.hash(client_key) == secrets.().stored_key do {:ok, client_key} else {:error, "Wrong password"} @@ -742,17 +816,16 @@ defmodule Supavisor.ClientHandler do end defp authenticate_exchange(:auth_query_md5, client_hash, server_hash, salt) do - if "md5" <> H.md5([server_hash, salt]) == client_hash do - {:ok, nil} - else - {:error, "Wrong password"} - end + if "md5" <> Helpers.md5([server_hash, salt]) == client_hash, + do: {:ok, nil}, + else: {:error, "Wrong password"} end - @spec db_checkout(:write | :read | :both, :on_connect | :on_query, map) :: {pid, pid} | nil - defp db_checkout(_, _, %{mode: :session, db_pid: {pool, db_pid}}) - when is_pid(pool) and is_pid(db_pid) do - {pool, db_pid} + @spec db_checkout(:write | :read | :both, :on_connect | :on_query, map) :: + {pid, pid, Supavisor.sock()} | nil + defp db_checkout(_, _, %{mode: mode, db_pid: {pool, db_pid, db_sock}}) + when is_pid(db_pid) and mode in [:session, :proxy] do + {pool, db_pid, db_sock} end defp db_checkout(_, :on_connect, %{mode: :transaction}), do: nil @@ -770,32 +843,49 @@ defmodule Supavisor.ClientHandler do end defp db_checkout(_, _, data) do - {time, db_pid} = :timer.tc(:poolboy, :checkout, [data.pool, true, data.timeout]) + start = System.monotonic_time(:microsecond) + db_pid = :poolboy.checkout(data.pool, true, data.timeout) Process.link(db_pid) + db_sock = DbHandler.checkout(db_pid, data.sock, self()) same_box = if node(db_pid) == node(), do: :local, else: :remote - Telem.pool_checkout_time(time, data.id, same_box) - {data.pool, db_pid} + Telem.pool_checkout_time(System.monotonic_time(:microsecond) - start, data.id, same_box) + {data.pool, db_pid, db_sock} end @spec handle_db_pid(:transaction, pid(), pid() | nil) :: nil @spec handle_db_pid(:session, pid(), pid()) :: pid() + @spec handle_db_pid(:proxy, pid(), pid()) :: pid() defp handle_db_pid(:transaction, _pool, nil), do: nil - defp handle_db_pid(:transaction, _pool, {pool, db_pid}) do + defp handle_db_pid(:transaction, pool, {_, db_pid, _}) do Process.unlink(db_pid) :poolboy.checkin(pool, db_pid) nil end defp handle_db_pid(:session, _, db_pid), do: db_pid + defp handle_db_pid(:proxy, _, db_pid), do: db_pid defp update_user_data(data, info, user, id, db_name, mode) do proxy_type = - if info.tenant.require_user do - :password - else - :auth_query - end + if info.tenant.require_user, + do: :password, + else: :auth_query + + auth = %{ + application_name: data[:app_name] || "Supavisor", + database: info.tenant.db_database, + host: to_charlist(info.tenant.db_host), + sni_hostname: + if(info.tenant.sni_hostname != nil, do: to_charlist(info.tenant.sni_hostname)), + port: info.tenant.db_port, + user: user, + password: info.user.db_password, + require_user: info.tenant.require_user, + upstream_ssl: info.tenant.upstream_ssl, + upstream_tls_ca: info.tenant.upstream_tls_ca, + upstream_verify: info.tenant.upstream_verify + } %{ data @@ -807,12 +897,14 @@ defmodule Supavisor.ClientHandler do id: id, heartbeat_interval: info.tenant.client_heartbeat_interval * 1000, db_name: db_name, - mode: mode + mode: mode, + auth: auth, + tenant_availability_zone: info.tenant.availability_zone } end @spec auth_secrets(map, String.t(), term(), non_neg_integer()) :: - {:ok, S.secrets()} | {:error, term()} + {:ok, Supavisor.secrets()} | {:error, term()} ## password secrets def auth_secrets(%{user: user, tenant: %{require_user: true}}, _, _, _) do secrets = %{db_user: user.db_user, password: user.db_password, alias: user.db_user_alias} @@ -842,7 +934,7 @@ defmodule Supavisor.ClientHandler do if tenant.upstream_ssl and tenant.upstream_verify == "peer" do [ {:verify, :verify_peer}, - {:cacerts, [H.upstream_cert(tenant.upstream_tls_ca)]}, + {:cacerts, [Helpers.upstream_cert(tenant.upstream_tls_ca)]}, {:server_name_indication, String.to_charlist(tenant.db_host)}, {:customize_hostname_check, [{:match_fun, fn _, _ -> true end}]} ] @@ -858,35 +950,43 @@ defmodule Supavisor.ClientHandler do parameters: [application_name: "Supavisor auth_query"], ssl: tenant.upstream_ssl, socket_options: [ - H.ip_version(tenant.ip_version, tenant.db_host) + Helpers.ip_version(tenant.ip_version, tenant.db_host) ], queue_target: 1_000, queue_interval: 5_000, ssl_opts: ssl_opts || [] ) - resp = - case H.get_user_secret(conn, tenant.auth_query, db_user) do - {:ok, secret} -> + try do + Logger.debug( + "ClientHandler: Connected to db #{tenant.db_host} #{tenant.db_port} #{tenant.db_database} #{user.db_user}" + ) + + resp = + with {:ok, secret} <- Helpers.get_user_secret(conn, tenant.auth_query, db_user) do t = if secret.digest == :md5, do: :auth_query_md5, else: :auth_query {:ok, {t, fn -> Map.put(secret, :alias, user.db_user_alias) end}} + end - {:error, reason} -> - {:error, reason} - end - - GenServer.stop(conn, :normal) - resp + Logger.info("ClientHandler: Get secrets finished") + resp + rescue + exception -> + Logger.error("ClientHandler: Couldn't fetch user secrets from #{tenant.db_host}") + reraise exception, __STACKTRACE__ + after + GenServer.stop(conn, :normal, 5_000) + end end @spec exchange_first(:password | :auth_query, fun(), binary(), binary(), binary()) :: {binary(), map()} defp exchange_first(:password, secret, nonce, user, channel) do message = Server.exchange_first_message(nonce) - server_first_parts = H.parse_server_first(message, nonce) + server_first_parts = Helpers.parse_server_first(message, nonce) {client_final_message, server_proof} = - H.get_client_final( + Helpers.get_client_final( :password, secret.().password, server_first_parts, @@ -906,10 +1006,10 @@ defmodule Supavisor.ClientHandler do defp exchange_first(:auth_query, secret, nonce, user, channel) do secret = secret.() message = Server.exchange_first_message(nonce, secret.salt) - server_first_parts = H.parse_server_first(message, nonce) + server_first_parts = Helpers.parse_server_first(message, nonce) sings = - H.signatures( + Helpers.signatures( secret.stored_key, secret.server_key, server_first_parts, @@ -921,7 +1021,7 @@ defmodule Supavisor.ClientHandler do {message, sings} end - @spec try_get_sni(S.sock()) :: String.t() | nil + @spec try_get_sni(Supavisor.sock()) :: String.t() | nil def try_get_sni({:ssl, sock}) do case :ssl.connection_information(sock, [:sni_hostname]) do {:ok, [sni_hostname: sni]} -> List.to_string(sni) @@ -931,12 +1031,7 @@ defmodule Supavisor.ClientHandler do def try_get_sni(_), do: nil - @spec timeout_check(atom, non_neg_integer) :: {:timeout, non_neg_integer, atom} - defp timeout_check(key, timeout) do - {:timeout, timeout, key} - end - - defp db_pid_meta({_, {_, pid}} = _key) do + defp db_pid_meta({_, {_, pid, _}} = _key) do rkey = Supavisor.Registry.PoolPids fnode = node(pid) @@ -947,11 +1042,11 @@ defmodule Supavisor.ClientHandler do end end - @spec handle_prepared_statements({pid, pid}, binary, map) :: :ok | nil - defp handle_prepared_statements({_, pid}, bin, %{mode: :transaction} = data) do + @spec handle_prepared_statements({pid, pid, Supavisor.sock()}, binary, map) :: :ok | nil + defp handle_prepared_statements({_, pid, _}, bin, %{mode: :transaction} = data) do with {:ok, payload} <- Client.get_payload(bin), - {:ok, statamets} <- Supavisor.PgParser.statements(payload), - true <- Enum.member?([["PrepareStmt"], ["DeallocateStmt"]], statamets) do + {:ok, statements} <- Supavisor.PgParser.statements(payload), + true <- statements in [["PrepareStmt"], ["DeallocateStmt"]] do Logger.info("ClientHandler: Handle prepared statement #{inspect(payload)}") GenServer.call(data.pool, :get_all_workers) @@ -976,18 +1071,66 @@ defmodule Supavisor.ClientHandler do defp handle_prepared_statements(_, _, _), do: nil @spec handle_actions(map) :: [{:timeout, non_neg_integer, atom}] - defp handle_actions(data) do - Enum.flat_map(data, fn - {:heartbeat_interval, v} = t when v > 0 -> - Logger.debug("ClientHandler: Call timeout #{inspect(t)}") - [timeout_check(:heartbeat_check, v)] - - {:idle_timeout, v} = t when v > 0 -> - Logger.debug("ClientHandler: Call timeout #{inspect(t)}") - [timeout_check(:idle_terminate, v)] - - _ -> - [] - end) + defp handle_actions(%{} = data) do + heartbeat = + if data.heartbeat_interval > 0, + do: [{:timeout, data.heartbeat_interval, :heartbeat_check}], + else: [] + + idle = if data.idle_timeout > 0, do: [{:timeout, data.idle_timeout, :idle_timeout}], else: [] + + idle ++ heartbeat + end + + @spec app_name(any()) :: String.t() + def app_name(name) when is_binary(name), do: name + + def app_name(name) do + Logger.debug("ClientHandler: Invalid application name #{inspect(name)}") + "Supavisor" + end + + @spec maybe_change_log(map()) :: atom() | nil + def maybe_change_log(%{"payload" => %{"options" => options}}) do + level = options["log_level"] && String.to_existing_atom(options["log_level"]) + + if level in [:debug, :info, :notice, :warning, :error] do + Helpers.set_log_level(level) + level + end + end + + def maybe_change_log(_), do: :ok + + @spec sock_send_maybe_active_once(binary(), map()) :: :ok | {:error, term()} + def sock_send_maybe_active_once(bin, data) do + Logger.debug("ClientHandler: Send maybe active once") + active_count = data.active_count + + if active_count > @switch_active_count do + Logger.debug("ClientHandler: Activate socket #{inspect(active_count)}") + HandlerHelpers.active_once(data.sock) + end + + HandlerHelpers.sock_send(elem(data.db_pid, 2), bin) + end + + @spec timeout_subscribe_or_terminate(map()) :: :gen_statem.handle_event_result() + def timeout_subscribe_or_terminate(%{subscribe_retries: subscribe_retries} = data) do + if subscribe_retries < @subscribe_retries do + Logger.warning("ClientHandler: Retry subscribe #{inspect(subscribe_retries)}") + + {:keep_state, %{data | subscribe_retries: subscribe_retries + 1}, + {:timeout, @timeout_subscribe, :subscribe}} + else + Logger.error("ClientHandler: Terminate after retries") + {:stop, {:shutdown, :subscribe_retries}} + end + end + + @spec reset_active_count(map()) :: 0 + def reset_active_count(data) do + HandlerHelpers.activate(data.sock) + 0 end end diff --git a/lib/supavisor/db_handler.ex b/lib/supavisor/db_handler.ex index e14e59d6..1d7ff475 100644 --- a/lib/supavisor/db_handler.ex +++ b/lib/supavisor/db_handler.ex @@ -6,71 +6,81 @@ defmodule Supavisor.DbHandler do require Logger - @behaviour :partisan_gen_statem + @behaviour :gen_statem - alias Supavisor, as: S - alias Supavisor.ClientHandler, as: Client - alias Supavisor.Helpers, as: H - alias Supavisor.HandlerHelpers, as: HH - alias Supavisor.{Monitoring.Telem, Protocol.Server} + alias Supavisor.{ + ClientHandler, + HandlerHelpers, + Helpers, + Monitoring.Telem, + Protocol.Server + } @type state :: :connect | :authentication | :idle | :busy @reconnect_timeout 2_500 + @reconnect_timeout_proxy 500 @sock_closed [:tcp_closed, :ssl_closed] @proto [:tcp, :ssl] - @async_send_limit 1_000 + @switch_active_count Application.compile_env(:supavisor, :switch_active_count) + @reconnect_retries Application.compile_env(:supavisor, :reconnect_retries) - def start_link(config) do - :partisan_gen_statem.start_link(__MODULE__, config, hibernate_after: 5_000) - end + def start_link(config), + do: :gen_statem.start_link(__MODULE__, config, hibernate_after: 5_000) - @spec call(pid(), pid(), binary()) :: :ok | {:error, any()} | {:buffering, non_neg_integer()} - def call(pid, caller, msg), do: :partisan_gen_statem.call(pid, {:db_call, caller, msg}, 15_000) + def checkout(pid, sock, caller, timeout \\ 15_000), + do: :gen_statem.call(pid, {:checkout, sock, caller}, timeout) - @spec cast(pid(), pid(), binary()) :: :ok | {:error, any()} | {:buffering, non_neg_integer()} - def cast(pid, caller, msg), do: :partisan_gen_statem.cast(pid, {:db_cast, caller, msg}) + @spec checkin(pid()) :: :ok + def checkin(pid), do: :gen_statem.cast(pid, :checkin) @spec get_state_and_mode(pid()) :: {:ok, {state, Supavisor.mode()}} | {:error, term()} def get_state_and_mode(pid) do - try do - {:ok, :partisan_gen_statem.call(pid, :get_state_and_mode, 5_000)} - catch - error, reason -> {:error, {error, reason}} - end + {:ok, :gen_statem.call(pid, :get_state_and_mode, 5_000)} + catch + error, reason -> {:error, {error, reason}} end @spec stop(pid()) :: :ok - def stop(pid), do: :partisan_gen_statem.stop(pid, :client_termination, 5_000) + def stop(pid) do + Logger.debug("DbHandler: Stop pid #{inspect(pid)}") + :gen_statem.stop(pid, {:shutdown, :client_termination}, 5_000) + end @impl true def init(args) do Process.flag(:trap_exit, true) - H.set_log_level(args.log_level) - H.set_max_heap_size(150) + Helpers.set_log_level(args.log_level) + Helpers.set_max_heap_size(90) {_, tenant} = args.tenant Logger.metadata(project: tenant, user: args.user, mode: args.mode) - data = %{ - id: args.id, - sock: nil, - caller: nil, - sent: false, - auth: args.auth, - user: args.user, - tenant: args.tenant, - buffer: [], - anon_buffer: [], - db_state: nil, - parameter_status: %{}, - nonce: nil, - messages: "", - server_proof: nil, - stats: %{}, - mode: args.mode, - replica_type: args.replica_type - } + data = + %{ + id: args.id, + sock: nil, + sent: false, + auth: args.auth, + user: args.user, + tenant: args.tenant, + buffer: [], + anon_buffer: [], + db_state: nil, + parameter_status: %{}, + nonce: nil, + messages: "", + server_proof: nil, + stats: %{}, + mode: args.mode, + replica_type: args.replica_type, + reply: nil, + caller: args[:caller] || nil, + client_sock: args[:client_sock] || nil, + proxy: args[:proxy] || false, + active_count: 0, + reconnect_retries: 0 + } Telem.handler_action(:db_handler, :started, args.id) {:ok, :connect, data, {:next_event, :internal, :connect}} @@ -83,15 +93,26 @@ defmodule Supavisor.DbHandler do def handle_event(:internal, _, :connect, %{auth: auth} = data) do Logger.debug("DbHandler: Try to connect to DB") - sock_opts = [ - :binary, - {:packet, :raw}, - {:active, false}, - {:nodelay, true}, - auth.ip_version - ] - - reconnect_callback = {:keep_state_and_data, {:state_timeout, @reconnect_timeout, :connect}} + sock_opts = + [ + auth.ip_version, + mode: :binary, + packet: :raw, + # recbuf: 8192, + # sndbuf: 8192, + # backlog: 2048, + # send_timeout: 120, + # keepalive: true, + # nopush: true, + nodelay: true, + active: false + ] + + maybe_reconnect_callback = fn reason -> + if data.reconnect_retries > @reconnect_retries and data.client_sock != nil, + do: {:stop, {:failed_to_connect, reason}}, + else: {:keep_state_and_data, {:state_timeout, reconnect_timeout(data), :connect}} + end Telem.handler_action(:db_handler, :db_connection, data.id) @@ -101,19 +122,22 @@ defmodule Supavisor.DbHandler do case try_ssl_handshake({:gen_tcp, sock}, auth) do {:ok, sock} -> - case send_startup(sock, auth) do + tenant = if data.proxy, do: Supavisor.tenant(data.id) + search_path = Supavisor.search_path(data.id) + + case send_startup(sock, auth, tenant, search_path) do :ok -> :ok = activate(sock) {:next_state, :authentication, %{data | sock: sock}} {:error, reason} -> Logger.error("DbHandler: Send startup error #{inspect(reason)}") - reconnect_callback + maybe_reconnect_callback.(reason) end - {:error, error} -> - Logger.error("DbHandler: Handshake error #{inspect(error)}") - reconnect_callback + {:error, reason} -> + Logger.error("DbHandler: Handshake error #{inspect(reason)}") + maybe_reconnect_callback.(reason) end other -> @@ -121,165 +145,66 @@ defmodule Supavisor.DbHandler do "DbHandler: Connection failed #{inspect(other)} to #{inspect(auth.host)}:#{inspect(auth.port)}" ) - reconnect_callback + maybe_reconnect_callback.(other) end end - def handle_event(:state_timeout, :connect, _state, _) do - Logger.warning("DbHandler: Reconnect") - {:keep_state_and_data, {:next_event, :internal, :connect}} + def handle_event(:state_timeout, :connect, _state, data) do + retry = data.reconnect_retries + Logger.warning("DbHandler: Reconnect #{retry} to DB") + + {:keep_state, %{data | reconnect_retries: retry + 1}, {:next_event, :internal, :connect}} end def handle_event(:info, {proto, _, bin}, :authentication, data) when proto in @proto do dec_pkt = Server.decode(bin) Logger.debug("DbHandler: dec_pkt, #{inspect(dec_pkt, pretty: true)}") - resp = - Enum.reduce(dec_pkt, {%{}, nil}, fn - %{tag: :parameter_status, payload: {k, v}}, {ps, db_state} -> - {Map.put(ps, k, v), db_state} - - %{tag: :ready_for_query, payload: db_state}, {ps, _} -> - {:ready_for_query, ps, db_state} - - %{tag: :backend_key_data, payload: payload}, acc -> - key = self() - conn = %{host: data.auth.host, port: data.auth.port, ip_ver: data.auth.ip_version} - Registry.register(Supavisor.Registry.PoolPids, key, Map.merge(payload, conn)) - Logger.debug("DbHandler: Backend #{inspect(key)} data: #{inspect(payload)}") - acc - - %{payload: {:authentication_sasl_password, methods_b}}, {ps, _} -> - nonce = - case Server.decode_string(methods_b) do - {:ok, req_method, _} -> - Logger.debug("DbHandler: SASL method #{inspect(req_method)}") - nonce = :pgo_scram.get_nonce(16) - user = get_user(data.auth) - client_first = :pgo_scram.get_client_first(user, nonce) - client_first_size = IO.iodata_length(client_first) - - sasl_initial_response = [ - "SCRAM-SHA-256", - 0, - <>, - client_first - ] - - bin = :pgo_protocol.encode_scram_response_message(sasl_initial_response) - :ok = sock_send(data.sock, bin) - nonce - - other -> - Logger.error("DbHandler: Undefined sasl method #{inspect(other)}") - nil - end - - {ps, :authentication_sasl, nonce} - - %{payload: {:authentication_server_first_message, server_first}}, {ps, _} - when data.auth.require_user == false -> - nonce = data.nonce - server_first_parts = H.parse_server_first(server_first, nonce) - - {client_final_message, server_proof} = - H.get_client_final( - :auth_query, - data.auth.secrets.(), - server_first_parts, - nonce, - data.auth.secrets.().user, - "biws" - ) - - bin = :pgo_protocol.encode_scram_response_message(client_final_message) - :ok = sock_send(data.sock, bin) - - {ps, :authentication_server_first_message, server_proof} - - %{payload: {:authentication_server_first_message, server_first}}, {ps, _} -> - nonce = data.nonce - server_first_parts = :pgo_scram.parse_server_first(server_first, nonce) - - {client_final_message, server_proof} = - :pgo_scram.get_client_final( - server_first_parts, - nonce, - data.auth.user, - data.auth.password.() - ) - - bin = :pgo_protocol.encode_scram_response_message(client_final_message) - :ok = sock_send(data.sock, bin) - - {ps, :authentication_server_first_message, server_proof} - - %{payload: {:authentication_server_final_message, _server_final}}, acc -> - acc - - %{payload: {:authentication_md5_password, salt}}, {ps, _} -> - Logger.debug("DbHandler: dec_pkt, #{inspect(dec_pkt, pretty: true)}") - - digest = - if data.auth.method == :password do - H.md5([data.auth.password.(), data.auth.user]) - else - data.auth.secrets.().secret - end - - payload = ["md5", H.md5([digest, salt]), 0] - bin = [?p, <>, payload] - :ok = sock_send(data.sock, bin) - {ps, :authentication_md5} - - %{tag: :error_response, payload: error}, _ -> - {:error_response, error} - - _e, acc -> - acc - end) + resp = Enum.reduce(dec_pkt, %{}, &handle_auth_pkts(&1, &2, data)) case resp do - {_, :authentication_sasl, nonce} -> + {:authentication_sasl, nonce} -> {:keep_state, %{data | nonce: nonce}} - {_, :authentication_server_first_message, server_proof} -> + {:authentication_server_first_message, server_proof} -> {:keep_state, %{data | server_proof: server_proof}} - {_, :authentication_md5} -> + %{authentication_server_final_message: _server_final} -> + :keep_state_and_data + + %{authentication_ok: true} -> + :keep_state_and_data + + :authentication -> + :keep_state_and_data + + :authentication_md5 -> {:keep_state, data} {:error_response, ["SFATAL", "VFATAL", "C28P01", reason, _, _, _]} -> - tenant = Supavisor.tenant(data.id) - - for node <- [node() | Node.list()] do - :erpc.cast(node, fn -> - Cachex.del(Supavisor.Cache, {:secrets, tenant, data.user}) - Cachex.del(Supavisor.Cache, {:secrets_check, tenant, data.user}) - - Registry.dispatch(Supavisor.Registry.TenantClients, data.id, fn entries -> - for {client_handler, _meta} <- entries, - do: send(client_handler, {:disconnect, reason}) - end) - end) - end - - Supavisor.stop(data.id) + handle_authentication_error(data, reason) Logger.error("DbHandler: Auth error #{inspect(reason)}") {:stop, :invalid_password, data} {:error_response, error} -> Logger.error("DbHandler: Error auth response #{inspect(error)}") - {:keep_state, data} + {:stop, {:encode_and_forward, error}} + + {:ready_for_query, acc} -> + ps = acc.ps - {:ready_for_query, ps, db_state} -> Logger.debug( - "DbHandler: DB ready_for_query: #{inspect(db_state)} #{inspect(ps, pretty: true)}" + "DbHandler: DB ready_for_query: #{inspect(acc.db_state)} #{inspect(ps, pretty: true)}" ) - Supavisor.set_parameter_status(data.id, ps) + if data.proxy do + bin_ps = Server.encode_parameter_status(ps) + send(data.caller, {:parameter_status, bin_ps}) + else + Supavisor.set_parameter_status(data.id, ps) + end - {:next_state, :idle, %{data | parameter_status: ps}, + {:next_state, :idle, %{data | parameter_status: ps, reconnect_retries: 0}, {:next_event, :internal, :check_buffer}} other -> @@ -288,6 +213,11 @@ defmodule Supavisor.DbHandler do end end + def handle_event(:internal, :check_buffer, :idle, %{reply: from} = data) when from != nil do + Logger.debug("DbHandler: Check buffer") + {:next_state, :busy, %{data | reply: nil}, {:reply, from, data.sock}} + end + def handle_event(:internal, :check_buffer, :idle, %{buffer: buff, caller: caller} = data) when is_pid(caller) do if buff != [] do @@ -301,6 +231,8 @@ defmodule Supavisor.DbHandler do # check if it needs to apply queries from the anon buffer def handle_event(:internal, :check_anon_buffer, _, %{anon_buffer: buff, caller: nil} = data) do + Logger.debug("DbHandler: Check anon buffer") + if buff != [] do Logger.debug( "DbHandler: Anon buffer is not empty, try to send #{IO.iodata_length(buff)} bytes" @@ -313,6 +245,11 @@ defmodule Supavisor.DbHandler do {:keep_state, %{data | anon_buffer: []}} end + def handle_event(:internal, :check_anon_buffer, _, _) do + Logger.debug("DbHandler: Anon buffer is empty") + :keep_state_and_data + end + # the process received message from db without linked caller def handle_event(:info, {proto, _, bin}, _, %{caller: nil}) when proto in @proto do Logger.debug("DbHandler: Got db response #{inspect(bin)} when caller was nil") @@ -343,9 +280,8 @@ defmodule Supavisor.DbHandler do :continue end - :ok = Client.client_cast(data.caller, bin, resp) - if resp != :continue do + :ok = ClientHandler.db_status(data.caller, resp, bin) {_, stats} = Telem.network_usage(:db, data.sock, data.id, data.stats) {:keep_state, %{data | stats: stats, caller: handler_caller(data)}} else @@ -353,34 +289,34 @@ defmodule Supavisor.DbHandler do end end - def handle_event(:info, {proto, _, bin}, _, %{caller: caller} = data) + # forward the message to the client + def handle_event(:info, {proto, _, bin}, _, %{caller: caller, reply: nil} = data) when is_pid(caller) and proto in @proto do - Logger.debug("DbHandler: Got write replica message #{inspect(bin)}") - HH.setopts(data.sock, active: :once) - # check if the response ends with "ready for query" - ready = check_ready(bin) - sent = data.sent || 0 - - {send_via, progress} = - case ready do - {:ready_for_query, :idle} -> {:client_cast, :ready_for_query} - {:ready_for_query, _} -> {:client_cast, :continue} - _ when sent < @async_send_limit -> {:client_cast, :continue} - _ -> {:client_call, :continue} - end + Logger.debug("DbHandler: Got write replica message #{inspect(bin)}") + + if String.ends_with?(bin, Server.ready_for_query()) do + HandlerHelpers.activate(data.sock) - :ok = apply(Client, send_via, [data.caller, bin, progress]) + {_, stats} = Telem.network_usage(:db, data.sock, data.id, data.stats) - case progress do - :ready_for_query -> - {_, stats} = Telem.network_usage(:db, data.sock, data.id, data.stats) - HH.setopts(data.sock, active: true) + # in transaction mode, we need to notify the client when the transaction is finished, + # after which it will unlink the direct db connection process from itself. + data = + if data.mode == :transaction do + ClientHandler.db_status(data.caller, :ready_for_query, bin) + %{data | stats: stats, caller: nil, client_sock: nil, active_count: 0} + else + HandlerHelpers.sock_send(data.client_sock, bin) + %{data | stats: stats, active_count: 0} + end - {:next_state, :idle, %{data | stats: stats, caller: handler_caller(data), sent: false}, - {:next_event, :internal, :check_anon_buffer}} + {:next_state, :idle, data, {:next_event, :internal, :check_anon_buffer}} + else + if data.active_count > @switch_active_count, + do: HandlerHelpers.active_once(data.sock) - :continue -> - {:keep_state, %{data | sent: sent + 1}} + HandlerHelpers.sock_send(data.client_sock, bin) + {:keep_state, %{data | active_count: data.active_count + 1}} end end @@ -391,56 +327,30 @@ defmodule Supavisor.DbHandler do {:next_event, :internal, :check_anon_buffer}} end - def handle_event({:call, from}, {:db_call, caller, bin}, :idle, %{sock: sock} = data) do - reply = {:reply, from, sock_send(sock, bin)} - {:next_state, :busy, %{data | caller: caller}, reply} - end - - def handle_event({:call, from}, {:db_call, caller, bin}, :busy, %{sock: sock} = data) do - reply = {:reply, from, sock_send(sock, bin)} - {:keep_state, %{data | caller: caller}, reply} - end - - def handle_event({:call, from}, {:db_call, caller, bin}, state, %{buffer: buff} = data) do - Logger.debug( - "DbHandler: state #{state} <-- <-- bin #{inspect(byte_size(bin))} bytes, caller: #{inspect(caller)}" - ) + def handle_event({:call, from}, {:checkout, sock, caller}, state, data) do + Logger.debug("DbHandler: checkout call when state was #{state}") - new_buff = [bin | buff] - reply = {:reply, from, {:buffering, IO.iodata_length(new_buff)}} - {:keep_state, %{data | caller: caller, buffer: new_buff}, reply} + # store the reply ref and send it when the state is idle + if state in [:idle, :busy], + do: {:keep_state, %{data | client_sock: sock, caller: caller}, {:reply, from, data.sock}}, + else: {:keep_state, %{data | client_sock: sock, caller: caller, reply: from}} end - # emulate handle_cast - def handle_event(:cast, {:db_cast, caller, bin}, state, %{sock: sock}) - when state in [:idle, :busy] do - Logger.debug( - "DbHandler: state #{state} <-- <-- bin #{inspect(byte_size(bin))} bytes, cast caller: #{inspect(caller)}" - ) - - sock_send(sock, bin) - :keep_state_and_data - end - - def handle_event(:cast, {:db_cast, caller, bin}, state, %{buffer: buff} = data) do - Logger.debug( - "DbHandler: state #{state} <-- <-- bin #{inspect(byte_size(bin))} bytes, cast caller: #{inspect(caller)}" - ) - - new_buff = [bin | buff] - {:keep_state, %{data | caller: caller, buffer: new_buff}} + def handle_event({:call, from}, :ps, _, data) do + Logger.debug("DbHandler: get parameter status") + {:keep_state_and_data, {:reply, from, data.parameter_status}} end def handle_event(_, {closed, _}, :busy, data) when closed in @sock_closed do - {:stop, :db_termination, data} + {:stop, {:shutdown, :db_termination}, data} end def handle_event(_, {closed, _}, state, data) when closed in @sock_closed do Logger.error("DbHandler: Connection closed when state was #{state}") if Application.get_env(:supavisor, :reconnect_on_db_close), - do: {:next_state, :connect, data, {:state_timeout, @reconnect_timeout, :connect}}, - else: {:stop, :db_termination, data} + do: {:next_state, :connect, data, {:state_timeout, reconnect_timeout(data), :connect}}, + else: {:stop, {:shutdown, :db_termination}, data} end # linked client_handler went down @@ -451,8 +361,8 @@ defmodule Supavisor.DbHandler do ) end - if state == :busy || data.mode == :session do - :ok = sock_send(data.sock, <>) + if state == :busy or data.mode == :session do + sock_send(data.sock, Server.terminate_message()) :gen_tcp.close(elem(data.sock, 1)) {:stop, {:client_handler_down, data.mode}} else @@ -486,12 +396,22 @@ defmodule Supavisor.DbHandler do def terminate(reason, state, data) do Telem.handler_action(:db_handler, :stopped, data.id) + if data.client_sock != nil do + message = + case reason do + {:encode_and_forward, msg} -> Server.encode_error_message(msg) + _ -> Server.error_message("XX000", inspect(reason)) + end + + HandlerHelpers.sock_send(data.client_sock, message) + end + Logger.error( "DbHandler: Terminating with reason #{inspect(reason)} when state was #{inspect(state)}" ) end - @spec try_ssl_handshake(S.tcp_sock(), map) :: {:ok, S.sock()} | {:error, term()} + @spec try_ssl_handshake(Supavisor.tcp_sock(), map) :: {:ok, Supavisor.sock()} | {:error, term()} defp try_ssl_handshake(sock, %{upstream_ssl: true} = auth) do case sock_send(sock, Server.ssl_request()) do :ok -> ssl_recv(sock, auth) @@ -501,21 +421,17 @@ defmodule Supavisor.DbHandler do defp try_ssl_handshake(sock, _), do: {:ok, sock} - @spec ssl_recv(S.tcp_sock(), map) :: {:ok, S.ssl_sock()} | {:error, term} + @spec ssl_recv(Supavisor.tcp_sock(), map) :: {:ok, Supavisor.ssl_sock()} | {:error, term} defp ssl_recv({:gen_tcp, sock} = s, auth) do case :gen_tcp.recv(sock, 1, 15_000) do - {:ok, <>} -> - ssl_connect(s, auth) - - {:ok, <>} -> - {:error, :ssl_not_available} - - {:error, _} = error -> - error + {:ok, <>} -> ssl_connect(s, auth) + {:ok, <>} -> {:error, :ssl_not_available} + {:error, _} = error -> error end end - @spec ssl_connect(S.tcp_sock(), map, pos_integer) :: {:ok, S.ssl_sock()} | {:error, term} + @spec ssl_connect(Supavisor.tcp_sock(), map, pos_integer) :: + {:ok, Supavisor.ssl_sock()} | {:error, term} defp ssl_connect({:gen_tcp, sock}, auth, timeout \\ 5000) do opts = case auth.upstream_verify do @@ -523,7 +439,8 @@ defmodule Supavisor.DbHandler do [ verify: :verify_peer, cacerts: [auth.upstream_tls_ca], - server_name_indication: auth.host, + # unclear behavior on pg14 + server_name_indication: auth.sni_hostname || auth.host, customize_hostname_check: [{:match_fun, fn _, _ -> true end}] ] @@ -540,26 +457,30 @@ defmodule Supavisor.DbHandler do end end - @spec send_startup(S.sock(), map()) :: :ok | {:error, term} - defp send_startup(sock, auth) do - user = get_user(auth) + @spec send_startup(Supavisor.sock(), map(), String.t() | nil, String.t() | nil) :: + :ok | {:error, term} + def send_startup(sock, auth, tenant, search_path) do + user = + if is_nil(tenant), do: get_user(auth), else: "#{get_user(auth)}.#{tenant}" msg = - :pgo_protocol.encode_startup_message([ - {"user", user}, - {"database", auth.database}, - {"application_name", auth.application_name} - ]) + :pgo_protocol.encode_startup_message( + [ + {"user", user}, + {"database", auth.database}, + {"application_name", auth.application_name} + ] ++ if(search_path, do: [{"options", "--search_path=#{search_path}"}], else: []) + ) sock_send(sock, msg) end - @spec sock_send(S.sock(), iodata) :: :ok | {:error, term} + @spec sock_send(Supavisor.sock(), iodata) :: :ok | {:error, term} defp sock_send({mod, sock}, data) do mod.send(sock, data) end - @spec activate(S.sock()) :: :ok | {:error, term} + @spec activate(Supavisor.sock()) :: :ok | {:error, term} defp activate({:gen_tcp, sock}) do :inet.setopts(sock, active: true) end @@ -577,7 +498,7 @@ defmodule Supavisor.DbHandler do end @spec receive_ready_for_query() :: :ok | :timeout_error - defp receive_ready_for_query() do + defp receive_ready_for_query do receive do {_proto, _socket, <>} -> :ok @@ -611,4 +532,158 @@ defmodule Supavisor.DbHandler do :continue end end + + @spec handle_auth_pkts(map(), map(), map()) :: any() + defp handle_auth_pkts(%{tag: :parameter_status, payload: {k, v}}, acc, _), + do: update_in(acc, [:ps], fn ps -> Map.put(ps || %{}, k, v) end) + + defp handle_auth_pkts(%{tag: :ready_for_query, payload: db_state}, acc, _), + do: {:ready_for_query, Map.put(acc, :db_state, db_state)} + + defp handle_auth_pkts(%{tag: :backend_key_data, payload: payload}, acc, data) do + key = self() + conn = %{host: data.auth.host, port: data.auth.port, ip_ver: data.auth.ip_version} + Registry.register(Supavisor.Registry.PoolPids, key, Map.merge(payload, conn)) + Logger.debug("DbHandler: Backend #{inspect(key)} data: #{inspect(payload)}") + Map.put(acc, :backend_key_data, payload) + end + + defp handle_auth_pkts(%{payload: {:authentication_sasl_password, methods_b}}, _, data) do + nonce = + case Server.decode_string(methods_b) do + {:ok, req_method, _} -> + Logger.debug("DbHandler: SASL method #{inspect(req_method)}") + nonce = :pgo_scram.get_nonce(16) + user = get_user(data.auth) + client_first = :pgo_scram.get_client_first(user, nonce) + client_first_size = IO.iodata_length(client_first) + + sasl_initial_response = [ + "SCRAM-SHA-256", + 0, + <>, + client_first + ] + + bin = :pgo_protocol.encode_scram_response_message(sasl_initial_response) + :ok = HandlerHelpers.sock_send(data.sock, bin) + nonce + + other -> + Logger.error("DbHandler: Undefined sasl method #{inspect(other)}") + nil + end + + {:authentication_sasl, nonce} + end + + defp handle_auth_pkts( + %{payload: {:authentication_server_first_message, server_first}}, + _, + data + ) + when data.auth.require_user == false do + nonce = data.nonce + server_first_parts = Helpers.parse_server_first(server_first, nonce) + + {client_final_message, server_proof} = + Helpers.get_client_final( + :auth_query, + data.auth.secrets.(), + server_first_parts, + nonce, + data.auth.secrets.().user, + "biws" + ) + + bin = :pgo_protocol.encode_scram_response_message(client_final_message) + :ok = HandlerHelpers.sock_send(data.sock, bin) + + {:authentication_server_first_message, server_proof} + end + + defp handle_auth_pkts( + %{payload: {:authentication_server_first_message, server_first}}, + _, + data + ) do + nonce = data.nonce + server_first_parts = :pgo_scram.parse_server_first(server_first, nonce) + + {client_final_message, server_proof} = + :pgo_scram.get_client_final( + server_first_parts, + nonce, + data.auth.user, + data.auth.secrets.().password + ) + + bin = :pgo_protocol.encode_scram_response_message(client_final_message) + :ok = HandlerHelpers.sock_send(data.sock, bin) + + {:authentication_server_first_message, server_proof} + end + + defp handle_auth_pkts( + %{payload: {:authentication_server_final_message, server_final}}, + acc, + _data + ), + do: Map.put(acc, :authentication_server_final_message, server_final) + + defp handle_auth_pkts( + %{payload: :authentication_ok}, + acc, + _data + ), + do: Map.put(acc, :authentication_ok, true) + + defp handle_auth_pkts(%{payload: {:authentication_md5_password, salt}} = dec_pkt, _, data) do + Logger.debug("DbHandler: dec_pkt, #{inspect(dec_pkt, pretty: true)}") + + digest = + if data.auth.method == :password do + Helpers.md5([data.auth.password.(), data.auth.user]) + else + data.auth.secrets.().secret + end + + payload = ["md5", Helpers.md5([digest, salt]), 0] + bin = [?p, <>, payload] + :ok = HandlerHelpers.sock_send(data.sock, bin) + :authentication_md5 + end + + defp handle_auth_pkts(%{tag: :error_response, payload: error}, _acc, _data), + do: {:error_response, error} + + defp handle_auth_pkts(_e, acc, _data), do: acc + + @spec handle_authentication_error(map(), String.t()) :: any() + defp handle_authentication_error(%{proxy: false} = data, reason) do + tenant = Supavisor.tenant(data.id) + + for node <- [node() | Node.list()] do + :erpc.cast(node, fn -> + Cachex.del(Supavisor.Cache, {:secrets, tenant, data.user}) + Cachex.del(Supavisor.Cache, {:secrets_check, tenant, data.user}) + + Registry.dispatch(Supavisor.Registry.TenantClients, data.id, fn entries -> + for {client_handler, _meta} <- entries, + do: send(client_handler, {:disconnect, reason}) + end) + end) + end + + Supavisor.stop(data.id) + end + + defp handle_authentication_error(%{proxy: true}, _reason), do: :ok + + @spec reconnect_timeout(map()) :: pos_integer() + def reconnect_timeout(%{proxy: true}), + do: @reconnect_timeout_proxy + + def reconnect_timeout(_), + do: @reconnect_timeout end diff --git a/lib/supavisor/handler_helpers.ex b/lib/supavisor/handler_helpers.ex index 8dd9ec27..91f8055e 100644 --- a/lib/supavisor/handler_helpers.ex +++ b/lib/supavisor/handler_helpers.ex @@ -2,38 +2,33 @@ defmodule Supavisor.HandlerHelpers do @moduledoc false alias Phoenix.PubSub - alias Supavisor, as: S alias Supavisor.Protocol.Server - @spec sock_send(S.sock(), iodata()) :: :ok | {:error, term()} + @spec sock_send(Supavisor.sock(), iodata()) :: :ok | {:error, term()} def sock_send({mod, sock}, data) do mod.send(sock, data) end - @spec sock_close(nil | S.sock()) :: :ok | {:error, term()} + @spec sock_close(Supavisor.sock() | nil | {any(), nil}) :: :ok | {:error, term()} def sock_close(nil), do: :ok + def sock_close({_, nil}), do: :ok - def sock_close({mod, sock}) do - mod.close(sock) - end + def sock_close({mod, sock}), do: mod.close(sock) - @spec setopts(S.sock(), term()) :: :ok | {:error, term()} + @spec setopts(Supavisor.sock(), term()) :: :ok | {:error, term()} def setopts({mod, sock}, opts) do mod = if mod == :gen_tcp, do: :inet, else: mod mod.setopts(sock, opts) end - @spec activate(S.sock()) :: :ok | {:error, term} - def activate({:gen_tcp, sock}) do - :inet.setopts(sock, active: true) - end + @spec active_once(Supavisor.sock()) :: :ok | {:error, term} + def active_once(sock), do: setopts(sock, active: :once) - def activate({:ssl, sock}) do - :ssl.setopts(sock, active: true) - end + @spec activate(Supavisor.sock()) :: :ok | {:error, term} + def activate(sock), do: setopts(sock, active: true) - @spec try_ssl_handshake(S.tcp_sock(), boolean) :: - {:ok, S.sock()} | {:error, term()} + @spec try_ssl_handshake(Supavisor.tcp_sock(), boolean) :: + {:ok, Supavisor.sock()} | {:error, term()} def try_ssl_handshake(sock, true) do case sock_send(sock, Server.ssl_request()) do :ok -> ssl_recv(sock) @@ -43,7 +38,7 @@ defmodule Supavisor.HandlerHelpers do def try_ssl_handshake(sock, false), do: {:ok, sock} - @spec ssl_recv(S.tcp_sock()) :: {:ok, S.ssl_sock()} | {:error, term} + @spec ssl_recv(Supavisor.tcp_sock()) :: {:ok, Supavisor.ssl_sock()} | {:error, term} def ssl_recv({:gen_tcp, sock} = s) do case :gen_tcp.recv(sock, 1, 15_000) do {:ok, <>} -> ssl_connect(s) @@ -52,8 +47,8 @@ defmodule Supavisor.HandlerHelpers do end end - @spec ssl_connect(S.tcp_sock(), pos_integer) :: - {:ok, S.ssl_sock()} | {:error, term} + @spec ssl_connect(Supavisor.tcp_sock(), pos_integer) :: + {:ok, Supavisor.ssl_sock()} | {:error, term} def ssl_connect({:gen_tcp, sock}, timeout \\ 5000) do opts = [verify: :verify_none] @@ -63,13 +58,13 @@ defmodule Supavisor.HandlerHelpers do end end - @spec send_error(S.sock(), String.t(), String.t()) :: :ok | {:error, term()} + @spec send_error(Supavisor.sock(), String.t(), String.t()) :: :ok | {:error, term()} def send_error(sock, code, message) do data = Server.error_message(code, message) sock_send(sock, data) end - @spec try_get_sni(S.sock()) :: String.t() | nil + @spec try_get_sni(Supavisor.sock()) :: String.t() | nil def try_get_sni({:ssl, sock}) do case :ssl.connection_information(sock, [:sni_hostname]) do {:ok, [sni_hostname: sni]} -> List.to_string(sni) @@ -106,12 +101,12 @@ defmodule Supavisor.HandlerHelpers do end end - @spec send_cancel_query(non_neg_integer, non_neg_integer) :: :ok | {:errr, term} - def send_cancel_query(pid, key) do + @spec send_cancel_query(non_neg_integer, non_neg_integer, term) :: :ok | {:errr, term} + def send_cancel_query(pid, key, msg \\ :cancel_query) do PubSub.broadcast( Supavisor.PubSub, "cancel_req:#{pid}_#{key}", - :cancel_query + msg ) end @@ -155,7 +150,7 @@ defmodule Supavisor.HandlerHelpers do @spec filter_cidrs(list(), :inet.ip_address() | any()) :: list() def filter_cidrs(allow_list, addr) when is_list(allow_list) and is_tuple(addr) do for range <- allow_list, - range |> InetCidr.parse() |> InetCidr.contains?(addr) do + range |> InetCidr.parse_cidr!() |> InetCidr.contains?(addr) do range end end @@ -164,7 +159,7 @@ defmodule Supavisor.HandlerHelpers do [] end - @spec addr_from_sock(S.sock()) :: {:ok, :inet.ip_address()} | :error + @spec addr_from_sock(Supavisor.sock()) :: {:ok, :inet.ip_address()} | :error def addr_from_sock({:gen_tcp, port}) do case :inet.peername(port) do {:ok, {:local, _}} -> diff --git a/lib/supavisor/helpers.ex b/lib/supavisor/helpers.ex index 895a2c88..7cc4364f 100644 --- a/lib/supavisor/helpers.ex +++ b/lib/supavisor/helpers.ex @@ -57,7 +57,9 @@ defmodule Supavisor.Helpers do Postgrex.query(conn, "select version()", []) |> case do {:ok, %{rows: [[version]]}} -> - if !params["require_user"] do + if params["require_user"] do + {:cont, {:ok, version}} + else case get_user_secret(conn, params["auth_query"], user["db_user"]) do {:ok, _} -> {:halt, {:ok, version}} @@ -65,8 +67,6 @@ defmodule Supavisor.Helpers do {:error, reason} -> {:halt, {:error, reason}} end - else - {:cont, {:ok, version}} end {:error, reason} -> @@ -101,9 +101,9 @@ defmodule Supavisor.Helpers do {:error, "There is no user '#{user}' in the database. Please create it or change the user in the config"} - %{columns: colums} -> + %{columns: columns} -> {:error, - "Authentification query returned wrong format. Should be two columns: user and secret, but got: #{inspect(colums)}"} + "Authentication query returned wrong format. Should be two columns: user and secret, but got: #{inspect(columns)}"} {:error, reason} -> {:error, reason} @@ -201,6 +201,7 @@ defmodule Supavisor.Helpers do """ @spec detect_ip_version(String.t()) :: :inet | :inet6 def detect_ip_version(host) when is_binary(host) do + Logger.info("Detecting IP version for #{host}") host = String.to_charlist(host) case :inet.gethostbyname(host) do @@ -231,12 +232,12 @@ defmodule Supavisor.Helpers do end @spec downstream_cert() :: Path.t() | nil - def downstream_cert() do + def downstream_cert do Application.get_env(:supavisor, :global_downstream_cert) end @spec downstream_key() :: Path.t() | nil - def downstream_key() do + def downstream_key do Application.get_env(:supavisor, :global_downstream_key) end @@ -327,14 +328,12 @@ defmodule Supavisor.Helpers do @spec rpc(Node.t(), module(), atom(), [any()], non_neg_integer()) :: {:error, any()} | any() def rpc(node, module, function, args, timeout \\ 15_000) do - try do - :erpc.call(node, module, function, args, timeout) - catch - kind, reason -> {:error, {:badrpc, {kind, reason}}} - else - {:EXIT, _} = badrpc -> {:error, {:badrpc, badrpc}} - result -> result - end + :erpc.call(node, module, function, args, timeout) + catch + kind, reason -> {:error, {:badrpc, {kind, reason}}} + else + {:EXIT, _} = badrpc -> {:error, {:badrpc, badrpc}} + result -> result end @doc """ @@ -350,11 +349,31 @@ defmodule Supavisor.Helpers do Process.flag(:max_heap_size, %{size: max_heap_words}) end - @spec set_log_level(atom()) :: :ok - def set_log_level(nil), do: :ok - - def set_log_level(level) when is_atom(level) do + @spec set_log_level(atom()) :: :ok | nil + def set_log_level(level) when level in [:debug, :info, :notice, :warning, :error] do Logger.notice("Setting log level to #{inspect(level)}") Logger.put_process_level(self(), level) end + + def set_log_level(_), do: nil + + @spec peer_ip(:gen_tcp.socket()) :: String.t() + def peer_ip(socket) do + case :inet.peername(socket) do + {:ok, {ip, _port}} -> List.to_string(:inet.ntoa(ip)) + _error -> "undefined" + end + end + + @spec controlling_process(Supavisor.sock(), pid) :: :ok | {:error, any()} + def controlling_process({mod, socket}, pid), + do: mod.controlling_process(socket, pid) + + @spec validate_name(String.t()) :: boolean() + def validate_name(name) do + # 1-63 characters, starting with a lowercase letter or underscore, and containing only alphanumeric characters, underscores, and dollar signs. Names with spaces or uppercase letters must be enclosed in double quotes. + String.length(name) <= 63 and + name =~ ~r/^(?:[a-z_][a-z0-9_$ ]*|"[a-zA-Z0-9_$ ]+")$/ and + name != ~s/""/ + end end diff --git a/lib/supavisor/hot_upgrade.ex b/lib/supavisor/hot_upgrade.ex index 682f56c5..7e2f296f 100644 --- a/lib/supavisor/hot_upgrade.ex +++ b/lib/supavisor/hot_upgrade.ex @@ -91,12 +91,12 @@ defmodule Supavisor.HotUpgrade do end end - def reint_funs() do + def reint_funs do reinit_pool_args() reinit_auth_query() end - def reinit_pool_args() do + def reinit_pool_args do for [_tenant, pid, _meta] <- Registry.select(Supavisor.Registry.TenantSups, [ {{:"$1", :"$2", :"$3"}, [], [[:"$1", :"$2", :"$3"]]} @@ -131,7 +131,7 @@ defmodule Supavisor.HotUpgrade do end end - def reinit_auth_query() do + def reinit_auth_query do Supavisor.Cache |> Cachex.stream!() |> Enum.each(fn entry(key: key, value: value) -> @@ -159,12 +159,10 @@ defmodule Supavisor.HotUpgrade do def do_enc(val), do: fn -> val end def get_state(pid) do - try do - {:ok, :sys.get_state(pid)} - catch - type, exception -> - IO.write("Error getting state: #{inspect(exception)}") - {:error, {type, exception}} - end + {:ok, :sys.get_state(pid)} + catch + type, exception -> + IO.write("Error getting state: #{inspect(exception)}") + {:error, {type, exception}} end end diff --git a/lib/supavisor/manager.ex b/lib/supavisor/manager.ex index 1c7c63d8..079e4898 100644 --- a/lib/supavisor/manager.ex +++ b/lib/supavisor/manager.ex @@ -5,7 +5,7 @@ defmodule Supavisor.Manager do alias Supavisor.Protocol.Server alias Supavisor.Tenants - alias Supavisor.Helpers, as: H + alias Supavisor.Helpers @check_timeout 120_000 @@ -34,12 +34,12 @@ defmodule Supavisor.Manager do @impl true def init(args) do - H.set_log_level(args.log_level) + Helpers.set_log_level(args.log_level) tid = :ets.new(__MODULE__, [:protected]) [args | _] = Enum.filter(args.replicas, fn e -> e.replica_type == :write end) - {{type, tenant}, user, _mode, db_name} = args.id + {{type, tenant}, user, _mode, db_name, _search_path} = args.id state = %{ id: args.id, @@ -65,7 +65,7 @@ defmodule Supavisor.Manager do # don't limit if max_clients is null {reply, new_state} = - if :ets.info(state.tid, :size) < state.max_clients do + if :ets.info(state.tid, :size) < state.max_clients or Supavisor.mode(state.id) == :session do :ets.insert(state.tid, {Process.monitor(pid), pid, now()}) case state.parameter_status do @@ -135,7 +135,7 @@ defmodule Supavisor.Manager do ## Internal functions - defp check_subscribers() do + defp check_subscribers do Process.send_after( self(), :check_subscribers, @@ -143,15 +143,19 @@ defmodule Supavisor.Manager do ) end - defp now() do + defp now do System.system_time(:second) end @spec check_parameter_status(map, map) :: :ok | {:error, String.t()} defp check_parameter_status(ps, def_ps) do - Enum.find_value(ps, :ok, fn {key, value} -> - if def_ps[key] && def_ps[key] != value do - {:error, "Parameter #{key} changed from #{def_ps[key]} to #{value}"} + Enum.find_value(ps, :ok, fn {key, new_value} -> + case def_ps do + %{^key => old_value} when old_value != new_value -> + {:error, "Parameter #{key} changed from #{old_value} to #{new_value}"} + + _ -> + nil end end) end diff --git a/lib/supavisor/metrics_cleaner.ex b/lib/supavisor/metrics_cleaner.ex new file mode 100644 index 00000000..9e76da9a --- /dev/null +++ b/lib/supavisor/metrics_cleaner.ex @@ -0,0 +1,66 @@ +defmodule Supavisor.MetricsCleaner do + @moduledoc false + + use GenServer + require Logger + + @interval :timer.minutes(30) + + def start_link(args), + do: GenServer.start_link(__MODULE__, args, name: __MODULE__) + + def init(_args) do + Logger.info("Starting MetricsCleaner") + {:ok, %{check_ref: check()}} + end + + def handle_info(:check, state) do + Process.cancel_timer(state.check_ref) + + start = System.monotonic_time(:millisecond) + loop_and_cleanup_metrics_table() + exec_time = System.monotonic_time(:millisecond) - start + + if exec_time > :timer.seconds(5), + do: Logger.warning("Metrics check took: #{exec_time} ms") + + {:noreply, %{state | check_ref: check()}} + end + + def handle_info(msg, state) do + Logger.error("Unexpected message: #{inspect(msg)}") + {:noreply, state} + end + + def check, do: Process.send_after(self(), :check, @interval) + + def loop_and_cleanup_metrics_table do + metrics_table = Supavisor.Monitoring.PromEx.Metrics + tenant_registry_table = :syn_registry_by_name_tenants + + fn + {{_, + %{ + type: type, + mode: mode, + user: user, + tenant: tenant, + db_name: db, + search_path: search_path + }} = key, _}, + _ -> + case :ets.lookup(tenant_registry_table, {{type, tenant}, user, mode, db, search_path}) do + [] -> + Logger.warning("Found orphaned metric: #{inspect(key)}") + :ets.delete(metrics_table, key) + + _ -> + nil + end + + _, acc -> + acc + end + |> :ets.foldl(nil, metrics_table) + end +end diff --git a/lib/supavisor/monitoring/osmon.ex b/lib/supavisor/monitoring/osmon.ex index 75226a34..5a7cb4a4 100644 --- a/lib/supavisor/monitoring/osmon.ex +++ b/lib/supavisor/monitoring/osmon.ex @@ -61,7 +61,7 @@ defmodule Supavisor.PromEx.Plugins.OsMon do ) end - def execute_metrics() do + def execute_metrics do execute_metrics(@event_ram_usage, %{ram: ram_usage()}) execute_metrics(@event_cpu_util, %{cpu: cpu_util()}) execute_metrics(@event_cpu_la, cpu_la()) @@ -72,13 +72,13 @@ defmodule Supavisor.PromEx.Plugins.OsMon do end @spec ram_usage() :: float() - def ram_usage() do + def ram_usage do mem = :memsup.get_system_memory_data() 100 - mem[:free_memory] / mem[:total_memory] * 100 end @spec cpu_la() :: %{avg1: float(), avg5: float(), avg15: float()} - def cpu_la() do + def cpu_la do %{ avg1: :cpu_sup.avg1() / 256, avg5: :cpu_sup.avg5() / 256, @@ -87,7 +87,7 @@ defmodule Supavisor.PromEx.Plugins.OsMon do end @spec cpu_util() :: float() | {:error, term()} - def cpu_util() do + def cpu_util do :cpu_sup.util() end end diff --git a/lib/supavisor/monitoring/prom_ex.ex b/lib/supavisor/monitoring/prom_ex.ex index 96a0eff3..1d0faa72 100644 --- a/lib/supavisor/monitoring/prom_ex.ex +++ b/lib/supavisor/monitoring/prom_ex.ex @@ -30,15 +30,27 @@ defmodule Supavisor.Monitoring.PromEx do end @spec remove_metrics(S.id()) :: non_neg_integer - def remove_metrics({{type, tenant}, user, mode, db_name}) do - meta = %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + def remove_metrics({{type, tenant}, user, mode, db_name, search_path} = id) do + Logger.debug("Removing metrics for #{inspect(id)}") + + meta = %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } Supavisor.Monitoring.PromEx.Metrics - |> :ets.select_delete([{{{:_, meta}, :_}, [], [true]}]) + |> :ets.select_delete([ + {{{:_, meta}, :_}, [], [true]}, + {{{:_, meta, :_}, :_}, [], [true]} + ]) end @spec set_metrics_tags() :: map() - def set_metrics_tags() do + def set_metrics_tags do [_, host] = node() |> Atom.to_string() |> String.split("@") metrics_tags = %{ @@ -57,7 +69,7 @@ defmodule Supavisor.Monitoring.PromEx do end @spec short_node_id() :: String.t() | nil - def short_node_id() do + def short_node_id do with {:ok, fly_alloc_id} when is_binary(fly_alloc_id) <- Application.fetch_env(:supavisor, :fly_alloc_id), [short_alloc_id, _] <- String.split(fly_alloc_id, "-", parts: 2) do @@ -68,7 +80,7 @@ defmodule Supavisor.Monitoring.PromEx do end @spec get_metrics() :: String.t() - def get_metrics() do + def get_metrics do metrics_tags = case Application.fetch_env(:supavisor, :metrics_tags) do :error -> set_metrics_tags() @@ -89,7 +101,7 @@ defmodule Supavisor.Monitoring.PromEx do end @spec do_cache_tenants_metrics() :: list - def do_cache_tenants_metrics() do + def do_cache_tenants_metrics do metrics = get_metrics() |> String.split("\n") pools = @@ -97,7 +109,7 @@ defmodule Supavisor.Monitoring.PromEx do |> Enum.uniq() _ = - Enum.reduce(pools, metrics, fn {{_type, tenant}, _, _, _}, acc -> + Enum.reduce(pools, metrics, fn {{_type, tenant}, _, _, _, _}, acc -> {matched, rest} = Enum.split_with(acc, &String.contains?(&1, "tenant=\"#{tenant}\"")) if matched != [] do @@ -151,7 +163,7 @@ defmodule Supavisor.Monitoring.PromEx do |> String.trim() if value != cleaned do - Logger.error("Tag validation: #{inspect(value)} / #{inspect(cleaned)}") + Logger.warning("Tag validation: #{inspect(value)} / #{inspect(cleaned)}") end "=\"#{cleaned}\"" diff --git a/lib/supavisor/monitoring/telem.ex b/lib/supavisor/monitoring/telem.ex index c983925a..9c814511 100644 --- a/lib/supavisor/monitoring/telem.ex +++ b/lib/supavisor/monitoring/telem.ex @@ -3,66 +3,124 @@ defmodule Supavisor.Monitoring.Telem do require Logger - alias Supavisor, as: S + @metrics_disabled Application.compile_env(:supavisor, :metrics_disabled, false) - @spec network_usage(:client | :db, S.sock(), S.id(), map()) :: {:ok | :error, map()} - def network_usage(type, {mod, socket}, id, stats) do - mod = if mod == :ssl, do: :ssl, else: :inet + defmacro telemetry_execute(event_name, measurements, metadata) do + if not @metrics_disabled do + quote do + :telemetry.execute(unquote(event_name), unquote(measurements), unquote(metadata)) + end + end + end + + defmacro network_usage_disable(do: block) do + if @metrics_disabled do + quote do + {:ok, %{recv_oct: 0, send_oct: 0}} + end + else + block + end + end + + @spec network_usage(:client | :db, Supavisor.sock(), Supavisor.id(), map()) :: + {:ok | :error, map()} + def network_usage(type, {mod, socket}, id, _stats) do + network_usage_disable do + mod = if mod == :ssl, do: :ssl, else: :inet - case mod.getstat(socket) do - {:ok, values} -> - values = Map.new(values) - diff = Map.merge(values, stats, fn _, v1, v2 -> v1 - v2 end) + case mod.getstat(socket, [:recv_oct, :send_oct]) do + {:ok, [{:recv_oct, recv_oct}, {:send_oct, send_oct}]} -> + stats = %{ + send_oct: send_oct, + recv_oct: recv_oct + } - {{ptype, tenant}, user, mode, db_name} = id + {{ptype, tenant}, user, mode, db_name, search_path} = id - :telemetry.execute( - [:supavisor, type, :network, :stat], - diff, - %{tenant: tenant, user: user, mode: mode, type: ptype, db_name: db_name} - ) + :telemetry.execute( + [:supavisor, type, :network, :stat], + stats, + %{ + tenant: tenant, + user: user, + mode: mode, + type: ptype, + db_name: db_name, + search_path: search_path + } + ) - {:ok, values} + {:ok, %{}} - {:error, reason} -> - Logger.error("Failed to get socket stats: #{inspect(reason)}") - {:error, stats} + {:error, reason} -> + Logger.error("Failed to get socket stats: #{inspect(reason)}") + {:error, %{}} + end end end - @spec pool_checkout_time(integer(), S.id(), :local | :remote) :: :ok - def pool_checkout_time(time, {{type, tenant}, user, mode, db_name}, same_box) do - :telemetry.execute( + @spec pool_checkout_time(integer(), Supavisor.id(), :local | :remote) :: :ok | nil + def pool_checkout_time(time, {{type, tenant}, user, mode, db_name, search_path}, same_box) do + telemetry_execute( [:supavisor, :pool, :checkout, :stop, same_box], %{duration: time}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end - @spec client_query_time(integer(), S.id()) :: :ok - def client_query_time(start, {{type, tenant}, user, mode, db_name}) do - :telemetry.execute( + @spec client_query_time(integer(), Supavisor.id()) :: :ok | nil + def client_query_time(start, {{type, tenant}, user, mode, db_name, search_path}) do + telemetry_execute( [:supavisor, :client, :query, :stop], %{duration: System.monotonic_time() - start}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end - @spec client_connection_time(integer(), S.id()) :: :ok - def client_connection_time(start, {{type, tenant}, user, mode, db_name}) do - :telemetry.execute( + @spec client_connection_time(integer(), Supavisor.id()) :: :ok | nil + def client_connection_time(start, {{type, tenant}, user, mode, db_name, search_path}) do + telemetry_execute( [:supavisor, :client, :connection, :stop], %{duration: System.monotonic_time() - start}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end - @spec client_join(:ok | :fail, S.id() | any()) :: :ok - def client_join(status, {{type, tenant}, user, mode, db_name}) do - :telemetry.execute( + @spec client_join(:ok | :fail, Supavisor.id() | any()) :: :ok | nil + def client_join(status, {{type, tenant}, user, mode, db_name, search_path}) do + telemetry_execute( [:supavisor, :client, :joins, status], %{}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end @@ -73,13 +131,20 @@ defmodule Supavisor.Monitoring.Telem do @spec handler_action( :client_handler | :db_handler, :started | :stopped | :db_connection, - S.id() - ) :: :ok - def handler_action(handler, action, {{type, tenant}, user, mode, db_name}) do - :telemetry.execute( + Supavisor.id() + ) :: :ok | nil + def handler_action(handler, action, {{type, tenant}, user, mode, db_name, search_path}) do + telemetry_execute( [:supavisor, handler, action, :all], %{}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end diff --git a/lib/supavisor/monitoring/tenant.ex b/lib/supavisor/monitoring/tenant.ex index ff2309bc..d129b343 100644 --- a/lib/supavisor/monitoring/tenant.ex +++ b/lib/supavisor/monitoring/tenant.ex @@ -6,7 +6,7 @@ defmodule Supavisor.PromEx.Plugins.Tenant do alias Supavisor, as: S - @tags [:tenant, :user, :mode, :type, :db_name] + @tags [:tenant, :user, :mode, :type, :db_name, :search_path] @impl true def polling_metrics(opts) do @@ -26,7 +26,13 @@ defmodule Supavisor.PromEx.Plugins.Tenant do ] end - def client_metrics() do + defmodule Buckets do + @moduledoc false + use Peep.Buckets.Custom, + buckets: [1, 5, 10, 100, 1_000, 5_000, 10_000] + end + + defp client_metrics do Event.build( :supavisor_tenant_client_event_metrics, [ @@ -36,9 +42,9 @@ defmodule Supavisor.PromEx.Plugins.Tenant do measurement: :duration, description: "Duration of the checkout local process in the tenant db pool.", tags: @tags, - unit: {:microsecond, :millisecond}, + unit: {:native, :millisecond}, reporter_options: [ - buckets: [1, 5, 10, 100, 1_000, 5_000, 10_000] + peep_bucket_calculator: Buckets ] ), distribution( @@ -47,9 +53,9 @@ defmodule Supavisor.PromEx.Plugins.Tenant do measurement: :duration, description: "Duration of the checkout remote process in the tenant db pool.", tags: @tags, - unit: {:microsecond, :millisecond}, + unit: {:native, :millisecond}, reporter_options: [ - buckets: [1, 5, 10, 100, 1_000, 5_000, 10_000] + peep_bucket_calculator: Buckets ] ), distribution( @@ -60,7 +66,7 @@ defmodule Supavisor.PromEx.Plugins.Tenant do tags: @tags, unit: {:native, :millisecond}, reporter_options: [ - buckets: [1, 5, 10, 100, 1_000, 5_000, 10_000] + peep_bucket_calculator: Buckets ] ), distribution( @@ -71,22 +77,28 @@ defmodule Supavisor.PromEx.Plugins.Tenant do tags: @tags, unit: {:native, :millisecond}, reporter_options: [ - buckets: [1, 5, 10, 100, 1_000, 5_000, 10_000] + peep_bucket_calculator: Buckets ] ), - sum( + last_value( [:supavisor, :client, :network, :recv], event_name: [:supavisor, :client, :network, :stat], measurement: :recv_oct, description: "The total number of bytes received by clients.", - tags: @tags + tags: @tags, + reporter_options: [ + prometheus_type: :sum + ] ), - sum( + last_value( [:supavisor, :client, :network, :send], event_name: [:supavisor, :client, :network, :stat], measurement: :send_oct, description: "The total number of bytes sent by clients.", - tags: @tags + tags: @tags, + reporter_options: [ + prometheus_type: :sum + ] ), counter( [:supavisor, :client, :queries, :count], @@ -117,52 +129,58 @@ defmodule Supavisor.PromEx.Plugins.Tenant do event_name: [:supavisor, :client_handler, :stopped, :all], description: "The total number of stopped client_handler.", tags: @tags - ), - counter( - [:supavisor, :db_handler, :started, :count], - event_name: [:supavisor, :db_handler, :started, :all], - description: "The total number of created db_handler.", - tags: @tags - ), - counter( - [:supavisor, :db_handler, :stopped, :count], - event_name: [:supavisor, :db_handler, :stopped, :all], - description: "The total number of stopped db_handler.", - tags: @tags - ), - counter( - [:supavisor, :db_handler, :db_connection, :count], - event_name: [:supavisor, :db_handler, :db_connection, :all], - description: "The total number of database connections by db_handler.", - tags: @tags ) ] ) end - def db_metrics() do + defp db_metrics do Event.build( :supavisor_tenant_db_event_metrics, [ - sum( + last_value( [:supavisor, :db, :network, :recv], event_name: [:supavisor, :db, :network, :stat], measurement: :recv_oct, description: "The total number of bytes received by db process", - tags: @tags + tags: @tags, + reporter_options: [ + prometheus_type: :sum + ] ), - sum( + last_value( [:supavisor, :db, :network, :send], event_name: [:supavisor, :db, :network, :stat], measurement: :send_oct, description: "The total number of bytes sent by db process", + tags: @tags, + reporter_options: [ + prometheus_type: :sum + ] + ), + counter( + [:supavisor, :db_handler, :started, :count], + event_name: [:supavisor, :db_handler, :started, :all], + description: "The total number of created db_handler.", + tags: @tags + ), + counter( + [:supavisor, :db_handler, :stopped, :count], + event_name: [:supavisor, :db_handler, :stopped, :all], + description: "The total number of stopped db_handler.", + tags: @tags + ), + counter( + [:supavisor, :db_handler, :db_connection, :count], + event_name: [:supavisor, :db_handler, :db_connection, :all], + description: "The total number of database connections by db_handler.", tags: @tags ) ] ) end - def concurrent_connections(poll_rate) do + defp concurrent_connections(poll_rate) do Polling.build( :supavisor_concurrent_connections, poll_rate, @@ -179,22 +197,29 @@ defmodule Supavisor.PromEx.Plugins.Tenant do ) end - def execute_tenant_metrics() do + def execute_tenant_metrics do Registry.select(Supavisor.Registry.TenantClients, [{{:"$1", :_, :_}, [], [:"$1"]}]) |> Enum.frequencies() |> Enum.each(&emit_telemetry_for_tenant/1) end @spec emit_telemetry_for_tenant({S.id(), non_neg_integer()}) :: :ok - def emit_telemetry_for_tenant({{{type, tenant}, user, mode, db_name}, count}) do + def emit_telemetry_for_tenant({{{type, tenant}, user, mode, db_name, search_path}, count}) do :telemetry.execute( [:supavisor, :connections], %{active: count}, - %{tenant: tenant, user: user, mode: mode, type: type, db_name: db_name} + %{ + tenant: tenant, + user: user, + mode: mode, + type: type, + db_name: db_name, + search_path: search_path + } ) end - def concurrent_tenants(poll_rate) do + defp concurrent_tenants(poll_rate) do Polling.build( :supavisor_concurrent_tenants, poll_rate, @@ -210,7 +235,7 @@ defmodule Supavisor.PromEx.Plugins.Tenant do ) end - def execute_conn_tenants_metrics() do + def execute_conn_tenants_metrics do num = Registry.select(Supavisor.Registry.TenantSups, [{{:"$1", :_, :_}, [], [:"$1"]}]) |> Enum.uniq() diff --git a/lib/supavisor/native_handler.ex b/lib/supavisor/native_handler.ex index a02255ef..d9a13da5 100644 --- a/lib/supavisor/native_handler.ex +++ b/lib/supavisor/native_handler.ex @@ -1,16 +1,19 @@ defmodule Supavisor.NativeHandler do @moduledoc false + use GenServer + @behaviour :ranch_protocol require Logger + alias Supavisor, as: S - alias Supavisor.Helpers, as: H alias Supavisor.HandlerHelpers, as: HH + alias Supavisor.Helpers, as: H alias Supavisor.{Protocol.Server, Tenants} @impl true - def start_link(ref, _sock, transport, opts) do + def start_link(ref, transport, opts) do pid = :proc_lib.spawn_link(__MODULE__, :init, [ref, transport, opts]) {:ok, pid} end @@ -145,11 +148,11 @@ defmodule Supavisor.NativeHandler do db_name: db_name ) - id = Supavisor.id(ext_id, user, :native, :native, db_name) + id = Supavisor.id(ext_id, user, :native, :native, db_name, nil) Registry.register(Supavisor.Registry.TenantClients, id, []) payload = - if !!hello.payload["user"] do + if hello.payload["user"] do %{hello.payload | "user" => user} else hello.payload @@ -161,7 +164,12 @@ defmodule Supavisor.NativeHandler do {:ok, addr} = HH.addr_from_sock(sock) - unless HH.filter_cidrs(tenant.allow_list, addr) == [] do + if HH.filter_cidrs(tenant.allow_list, addr) == [] do + message = "Address not in tenant allow_list: " <> inspect(addr) + Logger.error(message) + :ok = HH.send_error(sock, "XX000", message) + {:stop, :normal, state} + else case connect_local(host, port, payload, ip_ver, state.ssl) do {:ok, db_sock} -> auth = %{host: host, port: port, ip_ver: ip_ver} @@ -171,11 +179,6 @@ defmodule Supavisor.NativeHandler do Logger.error("Error connecting to tenant db: #{inspect(reason)}") {:stop, :normal, state} end - else - message = "Address not in tenant allow_list: " <> inspect(addr) - Logger.error(message) - :ok = HH.send_error(sock, "XX000", message) - {:stop, :normal, state} end _ -> diff --git a/lib/supavisor/pg_parser.ex b/lib/supavisor/pg_parser.ex index 961482a9..87640090 100644 --- a/lib/supavisor/pg_parser.ex +++ b/lib/supavisor/pg_parser.ex @@ -1,4 +1,6 @@ defmodule Supavisor.PgParser do + @moduledoc false + use Rustler, otp_app: :supavisor, crate: "pgparser" # When your NIF is loaded, it will override this function. diff --git a/lib/supavisor/protocol/client.ex b/lib/supavisor/protocol/client.ex index b9a3c966..c13bd882 100644 --- a/lib/supavisor/protocol/client.ex +++ b/lib/supavisor/protocol/client.ex @@ -1,9 +1,12 @@ defmodule Supavisor.Protocol.Client do + @moduledoc false + require Logger @pkt_header_size 5 defmodule Pkt do + @moduledoc false defstruct([:tag, :len, :payload, :bin]) @type t :: %Pkt{ @@ -103,22 +106,18 @@ defmodule Supavisor.Protocol.Client do end def decode_payload(:simple_query, payload) do - case String.split(payload, <<0>>) do + case :binary.split(payload, <<0>>) do [query, ""] -> query _ -> :undefined end end + def decode_payload(:parse_message, <<0>>), do: :undefined + def decode_payload(:parse_message, payload) do - case String.split(payload, <<0>>) do - [""] -> - :undefined - - other -> - case Enum.filter(other, &(&1 != "")) do - [sql] -> sql - message -> message - end + case :binary.split(payload, <<0>>, [:global, :trim_all]) do + [sql] -> sql + message -> message end end @@ -164,7 +163,7 @@ defmodule Supavisor.Protocol.Client do :undef end - def parse_msg_sel_1() do + def parse_msg_sel_1 do <<80, 0, 0, 0, 16, 0, 115, 101, 108, 101, 99, 116, 32, 49, 0, 0, 0, 66, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 68, 0, 0, 0, 6, 80, 0, 69, 0, 0, 0, 9, 0, 0, 0, 0, 200, 83, 0, 0, 0, 4>> end diff --git a/lib/supavisor/protocol/server.ex b/lib/supavisor/protocol/server.ex index ac96d571..9a865134 100644 --- a/lib/supavisor/protocol/server.ex +++ b/lib/supavisor/protocol/server.ex @@ -15,6 +15,7 @@ defmodule Supavisor.Protocol.Server do @scram_request <> @msg_cancel_header <<16::32, 1234::16, 5678::16>> @application_name <> + @terminate_message <> defmodule Pkt do @moduledoc "Representing a packet structure with tag, length, and payload fields." @@ -172,7 +173,7 @@ defmodule Supavisor.Protocol.Server do # https://www.postgresql.org/docs/current/protocol-error-fields.html def decode_payload(:error_response, payload) do - String.split(payload, <<0>>, trim: true) + :binary.split(payload, <<0>>, [:global, :trim_all]) end def decode_payload( @@ -195,7 +196,7 @@ defmodule Supavisor.Protocol.Server do end def decode_payload(:password_message, "md5" <> _ = bin) do - case String.split(bin, <<0>>) do + case :binary.split(bin, <<0>>) do [digest, ""] -> {:md5, digest} _ -> :undefined end @@ -275,7 +276,7 @@ defmodule Supavisor.Protocol.Server do end @spec scram_request() :: iodata - def scram_request() do + def scram_request do @scram_request end @@ -317,17 +318,23 @@ defmodule Supavisor.Protocol.Server do [<>, message] end + @spec encode_error_message(list()) :: iodata() + def encode_error_message(message) when is_list(message) do + message = Enum.join(message, <<0>>) <> <<0, 0>> + [<>, message] + end + def decode_parameter_description("", acc), do: Enum.reverse(acc) def decode_parameter_description(<>, acc) do decode_parameter_description(rest, [oid | acc]) end - def flush() do + def flush do <> end - def sync() do + def sync do <> end @@ -337,7 +344,7 @@ defmodule Supavisor.Protocol.Server do [<>, payload] end - def test_extended_query() do + def test_extended_query do [ encode("select * from todos where id = 40;"), [<<68, 0, 0, 0, 6, 83>>, [], <<0>>], @@ -345,13 +352,13 @@ defmodule Supavisor.Protocol.Server do ] end - def select_1_response() do + def select_1_response do <<84, 0, 0, 0, 33, 0, 1, 63, 99, 111, 108, 117, 109, 110, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 4, 255, 255, 255, 255, 0, 0, 68, 0, 0, 0, 11, 0, 1, 0, 0, 0, 1, 49, 67, 0, 0, 0, 13, 83, 69, 76, 69, 67, 84, 32, 49, 0, 90, 0, 0, 0, 5, 73>> end - def authentication_ok() do + def authentication_ok do @authentication_ok end @@ -370,7 +377,7 @@ defmodule Supavisor.Protocol.Server do end @spec backend_key_data() :: {iodata(), binary} - def backend_key_data() do + def backend_key_data do pid = System.unique_integer([:positive, :monotonic]) key = :crypto.strong_rand_bytes(4) payload = <> @@ -379,13 +386,13 @@ defmodule Supavisor.Protocol.Server do end @spec ready_for_query() :: binary() - def ready_for_query() do + def ready_for_query do @ready_for_query end # SSLRequest message @spec ssl_request() :: binary() - def ssl_request() do + def ssl_request do @ssl_request end @@ -463,5 +470,8 @@ defmodule Supavisor.Protocol.Server do end @spec application_name() :: binary - def application_name(), do: @application_name + def application_name, do: @application_name + + @spec terminate_message() :: binary + def terminate_message(), do: @terminate_message end diff --git a/lib/supavisor/secret_checker.ex b/lib/supavisor/secret_checker.ex new file mode 100644 index 00000000..78e9e607 --- /dev/null +++ b/lib/supavisor/secret_checker.ex @@ -0,0 +1,114 @@ +defmodule Supavisor.SecretChecker do + @moduledoc false + + use GenServer + require Logger + + alias Supavisor.Helpers + + @interval :timer.seconds(15) + + def start_link(args) do + name = {:via, Registry, {Supavisor.Registry.Tenants, {:secret_checker, args.id}}} + + GenServer.start_link(__MODULE__, args, name: name) + end + + def init(args) do + Logger.debug("SecretChecker: Starting secret checker") + tenant = Supavisor.tenant(args.id) + + %{auth: auth, user: user} = Enum.find(args.replicas, fn e -> e.replica_type == :write end) + + state = %{ + tenant: tenant, + auth: auth, + user: user, + key: {:secrets, tenant, user}, + ttl: args[:ttl] || :timer.hours(24), + conn: nil, + check_ref: check() + } + + Logger.metadata(project: tenant, user: user) + {:ok, state, {:continue, :init_conn}} + end + + def handle_continue(:init_conn, %{auth: auth} = state) do + ssl_opts = + if auth.upstream_ssl and auth.upstream_verify == "peer" do + [ + {:verify, :verify_peer}, + {:cacerts, [Helpers.upstream_cert(auth.upstream_tls_ca)]}, + {:server_name_indication, auth.host}, + {:customize_hostname_check, [{:match_fun, fn _, _ -> true end}]} + ] + end + + {:ok, conn} = + Postgrex.start_link( + hostname: auth.host, + port: auth.port, + database: auth.database, + password: auth.password.(), + username: auth.user, + parameters: [application_name: "Supavisor auth_query"], + ssl: auth.upstream_ssl, + socket_options: [ + auth.ip_version + ], + queue_target: 1_000, + queue_interval: 5_000, + ssl_opts: ssl_opts || [] + ) + + # kill the postgrex connection if the current process exits unexpectedly + Process.link(conn) + {:noreply, %{state | conn: conn}} + end + + def handle_info(:check, state) do + check_secrets(state) + {:noreply, %{state | check_ref: check()}} + end + + def handle_info(msg, state) do + Logger.error("Unexpected message: #{inspect(msg)}") + {:noreply, state} + end + + def terminate(_, state) do + :gen_statem.stop(state.conn) + :ok + end + + def check(interval \\ @interval), + do: Process.send_after(self(), :check, interval) + + def check_secrets(%{auth: auth, user: user, conn: conn} = state) do + case Helpers.get_user_secret(conn, auth.auth_query, user) do + {:ok, secret} -> + method = if secret.digest == :md5, do: :auth_query_md5, else: :auth_query + secrets = Map.put(secret, :alias, auth.alias) + + update_cache = + case Cachex.get(Supavisor.Cache, state.key) do + {:ok, {:cached, {_, {old_method, old_secrets}}}} -> + method != old_method or secrets != old_secrets.() + + other -> + Logger.error("Failed to get cache: #{inspect(other)}") + true + end + + if update_cache do + Logger.info("Secrets changed or not present, updating cache") + value = {:ok, {method, fn -> secrets end}} + Cachex.put(Supavisor.Cache, state.key, {:cached, value}, expire: :timer.hours(24)) + end + + other -> + Logger.error("Failed to get secret: #{inspect(other)}") + end + end +end diff --git a/lib/supavisor/syn_handler.ex b/lib/supavisor/syn_handler.ex index 9ab5cf75..d4d2986b 100644 --- a/lib/supavisor/syn_handler.ex +++ b/lib/supavisor/syn_handler.ex @@ -2,22 +2,45 @@ defmodule Supavisor.SynHandler do @moduledoc """ Custom defined Syn's callbacks """ + + @behaviour :syn_event_handler + require Logger + alias Supavisor.Monitoring.PromEx + @impl true def on_process_unregistered( :tenants, - {{_type, _tenant}, _user, _mode, _db_name} = id, + {{_type, _tenant}, _user, _mode, _db_name, _search_path} = id, _pid, - _meta, + meta, reason ) do Logger.debug("Process unregistered: #{inspect(id)} #{inspect(reason)}") + case meta do + %{port: port, listener: listener} -> + try do + :ranch.stop_listener(id) + + Logger.notice( + "Stopped listener #{inspect(id)} on port #{inspect(port)} listener #{inspect(listener)}" + ) + rescue + exception -> + Logger.error("Failed to stop listener #{inspect(id)} #{Exception.message(exception)}") + end + + _ -> + nil + end + # remove all Prometheus metrics for the specified tenant PromEx.remove_metrics(id) end + @impl true def resolve_registry_conflict( :tenants, id, diff --git a/lib/supavisor/tenant_supervisor.ex b/lib/supavisor/tenant_supervisor.ex index c3b8c3a9..9a8739aa 100644 --- a/lib/supavisor/tenant_supervisor.ex +++ b/lib/supavisor/tenant_supervisor.ex @@ -2,7 +2,17 @@ defmodule Supavisor.TenantSupervisor do @moduledoc false use Supervisor + require Logger alias Supavisor.Manager + alias Supavisor.SecretChecker + + def start_link(%{replicas: [%{mode: mode} = single]} = args) + when mode in [:transaction, :session] do + {:ok, meta} = Supavisor.start_local_server(single) + Logger.info("Starting ranch instance #{inspect(meta)} for #{inspect(args.id)}") + name = {:via, :syn, {:tenants, args.id, meta}} + Supervisor.start_link(__MODULE__, args, name: name) + end def start_link(args) do name = {:via, :syn, {:tenants, args.id}} @@ -24,10 +34,10 @@ defmodule Supavisor.TenantSupervisor do } end) - children = [{Manager, args} | pools] + children = [{Manager, args}, {SecretChecker, args} | pools] - {{type, tenant}, user, mode, db_name} = args.id - map_id = %{user: user, mode: mode, type: type, db_name: db_name} + {{type, tenant}, user, mode, db_name, search_path} = args.id + map_id = %{user: user, mode: mode, type: type, db_name: db_name, search_path: search_path} Registry.register(Supavisor.Registry.TenantSups, tenant, map_id) Supervisor.init(children, @@ -57,6 +67,7 @@ defmodule Supavisor.TenantSupervisor do # end {size, overflow} = {1, args.pool_size} + # {size, overflow} = {args.pool_size, 0} [ name: {:via, Registry, {Supavisor.Registry.Tenants, id, args.replica_type}}, diff --git a/lib/supavisor/tenants.ex b/lib/supavisor/tenants.ex index 26095c67..f8a8e05c 100644 --- a/lib/supavisor/tenants.ex +++ b/lib/supavisor/tenants.ex @@ -6,10 +6,10 @@ defmodule Supavisor.Tenants do import Ecto.Query, warn: false alias Supavisor.Repo - alias Supavisor.Tenants.Tenant - alias Supavisor.Tenants.User alias Supavisor.Tenants.Cluster alias Supavisor.Tenants.ClusterTenants + alias Supavisor.Tenants.Tenant + alias Supavisor.Tenants.User @doc """ Returns the list of tenants. @@ -141,6 +141,18 @@ defmodule Supavisor.Tenants do ) end + def get_pool_config_cache(external_id, user, ttl \\ :timer.hours(24)) do + ttl = if is_nil(ttl), do: :timer.hours(24), else: ttl + cache_key = {:pool_config_cache, external_id, user} + + case Cachex.fetch(Supavisor.Cache, cache_key, fn _key -> + {:commit, {:cached, get_pool_config(external_id, user)}, ttl: ttl} + end) do + {_, {:cached, value}} -> value + {_, {:cached, value}, _} -> value + end + end + @spec get_cluster_config(String.t(), String.t()) :: [ClusterTenants.t()] | {:error, any()} def get_cluster_config(external_id, user) do case Repo.all(ClusterTenants, cluster_alias: external_id) do diff --git a/lib/supavisor/tenants/cluster.ex b/lib/supavisor/tenants/cluster.ex index eb7e5de6..b4ad69f2 100644 --- a/lib/supavisor/tenants/cluster.ex +++ b/lib/supavisor/tenants/cluster.ex @@ -1,4 +1,6 @@ defmodule Supavisor.Tenants.Cluster do + @moduledoc false + use Ecto.Schema import Ecto.Changeset alias Supavisor.Tenants.ClusterTenants diff --git a/lib/supavisor/tenants/cluster_tenants.ex b/lib/supavisor/tenants/cluster_tenants.ex index 10c57759..0abfe9ca 100644 --- a/lib/supavisor/tenants/cluster_tenants.ex +++ b/lib/supavisor/tenants/cluster_tenants.ex @@ -1,8 +1,12 @@ defmodule Supavisor.Tenants.ClusterTenants do + @moduledoc false + use Ecto.Schema + import Ecto.Changeset - alias Supavisor.Tenants.Tenant + alias Supavisor.Tenants.Cluster + alias Supavisor.Tenants.Tenant @type t :: %__MODULE__{} diff --git a/lib/supavisor/tenants/tenant.ex b/lib/supavisor/tenants/tenant.ex index cf1941e6..a95d79d0 100644 --- a/lib/supavisor/tenants/tenant.ex +++ b/lib/supavisor/tenants/tenant.ex @@ -31,6 +31,7 @@ defmodule Supavisor.Tenants.Tenant do field(:client_idle_timeout, :integer, default: 0) field(:client_heartbeat_interval, :integer, default: 60) field(:allow_list, {:array, :string}, default: ["0.0.0.0/0", "::/0"]) + field(:availability_zone, :string) has_many(:users, User, foreign_key: :tenant_external_id, @@ -63,7 +64,8 @@ defmodule Supavisor.Tenants.Tenant do :default_max_clients, :client_idle_timeout, :client_heartbeat_interval, - :allow_list + :allow_list, + :availability_zone ]) |> check_constraint(:upstream_ssl, name: :upstream_constraints, prefix: "_supavisor") |> check_constraint(:upstream_verify, name: :upstream_constraints, prefix: "_supavisor") @@ -121,12 +123,6 @@ defmodule Supavisor.Tenants.Tenant do end defp valid_range?(range) do - try do - InetCidr.parse(range) - true - rescue - _e -> - false - end + match?({:ok, _}, InetCidr.parse_cidr(range)) end end diff --git a/lib/supavisor/tenants_metrics.ex b/lib/supavisor/tenants_metrics.ex index 6c2605e6..a3cf43ad 100644 --- a/lib/supavisor/tenants_metrics.ex +++ b/lib/supavisor/tenants_metrics.ex @@ -8,7 +8,12 @@ defmodule Supavisor.TenantsMetrics do @check_timeout 10_000 def start_link(args) do - GenServer.start_link(__MODULE__, args, name: __MODULE__) + GenServer.start_link(__MODULE__, args, + name: __MODULE__, + spawn_opt: [ + priority: :low + ] + ) end ## Callbacks @@ -26,7 +31,7 @@ defmodule Supavisor.TenantsMetrics do active_pools = PromEx.do_cache_tenants_metrics() |> MapSet.new() MapSet.difference(state.pools, active_pools) - |> Enum.each(fn {{_type, tenant}, _, _, _} = pool -> + |> Enum.each(fn {{_type, tenant}, _, _, _, _} = pool -> Logger.debug("Removing cached metrics for #{inspect(pool)}") Cachex.del(Supavisor.Cache, {:metrics, tenant}) end) @@ -36,7 +41,7 @@ defmodule Supavisor.TenantsMetrics do ## Internal functions - defp check_metrics() do + defp check_metrics do Process.send_after( self(), :check_metrics, diff --git a/lib/supavisor_web.ex b/lib/supavisor_web.ex index 3db43269..66000550 100644 --- a/lib/supavisor_web.ex +++ b/lib/supavisor_web.ex @@ -32,37 +32,10 @@ defmodule SupavisorWeb do root: "lib/supavisor_web/templates", namespace: SupavisorWeb - # Import convenience functions from controllers - import Phoenix.Controller, - only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1] - - # Include shared imports and aliases for views - unquote(view_helpers()) - end - end - - def live_view do - quote do - use Phoenix.LiveView, - layout: {SupavisorWeb.LayoutView, "live.html"} - - unquote(view_helpers()) - end - end - - def live_component do - quote do - use Phoenix.LiveComponent - - unquote(view_helpers()) - end - end - - def component do - quote do - use Phoenix.Component + # Import basic rendering functionality (render, render_layout, etc) + import Phoenix.View - unquote(view_helpers()) + import SupavisorWeb.ErrorHelpers end end @@ -82,22 +55,6 @@ defmodule SupavisorWeb do end end - defp view_helpers do - quote do - # Use all HTML functionality (forms, tags, etc) - use Phoenix.HTML - - # Import LiveView and .heex helpers (live_render, live_patch, <.form>, etc) - import Phoenix.Component - - # Import basic rendering functionality (render, render_layout, etc) - import Phoenix.View - - import SupavisorWeb.ErrorHelpers - alias SupavisorWeb.Router.Helpers, as: Routes - end - end - @doc """ When used, dispatch to the appropriate controller/view/etc. """ diff --git a/lib/supavisor_web/api_spec.ex b/lib/supavisor_web/api_spec.ex index a86a089f..948fa947 100644 --- a/lib/supavisor_web/api_spec.ex +++ b/lib/supavisor_web/api_spec.ex @@ -4,8 +4,8 @@ defmodule SupavisorWeb.ApiSpec do alias OpenApiSpex.Info alias OpenApiSpex.OpenApi alias OpenApiSpex.Paths - alias OpenApiSpex.Server alias OpenApiSpex.SecurityScheme + alias OpenApiSpex.Server alias SupavisorWeb.Endpoint alias SupavisorWeb.Router diff --git a/lib/supavisor_web/controllers/metrics_controller.ex b/lib/supavisor_web/controllers/metrics_controller.ex index def7b538..cd34d67a 100644 --- a/lib/supavisor_web/controllers/metrics_controller.ex +++ b/lib/supavisor_web/controllers/metrics_controller.ex @@ -27,7 +27,7 @@ defmodule SupavisorWeb.MetricsController do end @spec fetch_cluster_metrics() :: String.t() - def fetch_cluster_metrics() do + def fetch_cluster_metrics do Node.list() |> Task.async_stream(&fetch_node_metrics/1, timeout: :infinity) |> Enum.reduce(PromEx.get_metrics(), &merge_node_metrics/2) diff --git a/lib/supavisor_web/controllers/tenant_controller.ex b/lib/supavisor_web/controllers/tenant_controller.ex index 6c7c82e7..4cdd14e6 100644 --- a/lib/supavisor_web/controllers/tenant_controller.ex +++ b/lib/supavisor_web/controllers/tenant_controller.ex @@ -4,17 +4,21 @@ defmodule SupavisorWeb.TenantController do require Logger - alias Supavisor.Helpers, as: H - alias Supavisor.{Tenants, Repo} + alias Supavisor.{ + Helpers, + Repo, + Tenants + } + alias Tenants.Tenant, as: TenantModel alias SupavisorWeb.OpenApiSchemas.{ + Created, + Empty, + NotFound, Tenant, - TenantList, TenantCreate, - NotFound, - Created, - Empty + TenantList } action_fallback(SupavisorWeb.FallbackController) @@ -89,12 +93,12 @@ defmodule SupavisorWeb.TenantController do } ) - # conver cert to pem format + # convert cert to pem format def update(conn, %{ "external_id" => id, "tenant" => %{"upstream_tls_ca" => "-----BEGIN" <> _ = upstream_tls_ca} = tenant_params }) do - case H.cert_to_bin(upstream_tls_ca) do + case Helpers.cert_to_bin(upstream_tls_ca) do {:ok, bin} -> update(conn, %{ "external_id" => id, @@ -112,7 +116,7 @@ defmodule SupavisorWeb.TenantController do def update(conn, %{"external_id" => id, "tenant" => params}) do Logger.info("Delete cache dist #{id}: #{inspect(Supavisor.del_all_cache_dist(id))}") - cert = H.upstream_cert(params["upstream_tls_ca"]) + cert = Helpers.upstream_cert(params["upstream_tls_ca"]) if params["upstream_ssl"] && params["upstream_verify"] == "peer" && !cert do conn @@ -123,7 +127,7 @@ defmodule SupavisorWeb.TenantController do else case Tenants.get_tenant_by_external_id(id) do nil -> - case H.check_creds_get_ver(params) do + case Helpers.check_creds_get_ver(params) do {:error, reason} -> conn |> put_status(400) diff --git a/lib/supavisor_web/endpoint.ex b/lib/supavisor_web/endpoint.ex index 2f2d2b30..92f8efc6 100644 --- a/lib/supavisor_web/endpoint.ex +++ b/lib/supavisor_web/endpoint.ex @@ -12,24 +12,6 @@ defmodule SupavisorWeb.Endpoint do socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]] - # Serve at "/" the static files from "priv/static" directory. - # - # You should set gzip to true if you are running phx.digest - # when deploying your static files in production. - plug Plug.Static, - at: "/", - from: :supavisor, - gzip: false, - only: ~w(assets fonts images favicon-32x32.png robots.txt) - - # Code reloading can be explicitly enabled under the - # :code_reloader configuration of your endpoint. - if code_reloading? do - socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket - plug Phoenix.LiveReloader - plug Phoenix.CodeReloader - end - plug Phoenix.LiveDashboard.RequestLogger, param_key: "request_logger", cookie_key: "request_logger" diff --git a/lib/supavisor_web/open_api_schemas.ex b/lib/supavisor_web/open_api_schemas.ex index 3899c82e..23798f5c 100644 --- a/lib/supavisor_web/open_api_schemas.ex +++ b/lib/supavisor_web/open_api_schemas.ex @@ -51,7 +51,7 @@ defmodule SupavisorWeb.OpenApiSchemas do } }) - def response(), do: {"User Response", "application/json", __MODULE__} + def response, do: {"User Response", "application/json", __MODULE__} end defmodule Tenant do @@ -114,7 +114,7 @@ defmodule SupavisorWeb.OpenApiSchemas do } }) - def response(), do: {"Tenant Response", "application/json", __MODULE__} + def response, do: {"Tenant Response", "application/json", __MODULE__} end defmodule TenantList do @@ -122,7 +122,7 @@ defmodule SupavisorWeb.OpenApiSchemas do require OpenApiSpex OpenApiSpex.schema(%{type: :array, items: Tenant}) - def response(), do: {"Tenant List Response", "application/json", __MODULE__} + def response, do: {"Tenant List Response", "application/json", __MODULE__} end defmodule TenantCreate do @@ -190,7 +190,7 @@ defmodule SupavisorWeb.OpenApiSchemas do required: [:tenant] }) - def params(), do: {"Tenant Create Params", "application/json", __MODULE__} + def params, do: {"Tenant Create Params", "application/json", __MODULE__} end defmodule Created do @@ -203,7 +203,7 @@ defmodule SupavisorWeb.OpenApiSchemas do require OpenApiSpex OpenApiSpex.schema(%{}) - def response(), do: {"", "text/plain", __MODULE__} + def response, do: {"", "application/json", __MODULE__} end defmodule NotFound do @@ -211,6 +211,6 @@ defmodule SupavisorWeb.OpenApiSchemas do require OpenApiSpex OpenApiSpex.schema(%{}) - def response(), do: {"Not found", "text/plain", __MODULE__} + def response, do: {"Not found", "application/json", __MODULE__} end end diff --git a/lib/supavisor_web/templates/layout/app.html.heex b/lib/supavisor_web/templates/layout/app.html.heex deleted file mode 100644 index 169aed95..00000000 --- a/lib/supavisor_web/templates/layout/app.html.heex +++ /dev/null @@ -1,5 +0,0 @@ -
- - - <%= @inner_content %> -
diff --git a/lib/supavisor_web/templates/layout/live.html.heex b/lib/supavisor_web/templates/layout/live.html.heex deleted file mode 100644 index a29d6044..00000000 --- a/lib/supavisor_web/templates/layout/live.html.heex +++ /dev/null @@ -1,11 +0,0 @@ -
- - - - - <%= @inner_content %> -
diff --git a/lib/supavisor_web/templates/layout/root.html.heex b/lib/supavisor_web/templates/layout/root.html.heex deleted file mode 100644 index cd476343..00000000 --- a/lib/supavisor_web/templates/layout/root.html.heex +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - Supavisor - - - <%= @inner_content %> - - diff --git a/lib/supavisor_web/views/cluster_view.ex b/lib/supavisor_web/views/cluster_view.ex index 8478edd9..6073c635 100644 --- a/lib/supavisor_web/views/cluster_view.ex +++ b/lib/supavisor_web/views/cluster_view.ex @@ -1,7 +1,8 @@ defmodule SupavisorWeb.ClusterView do use SupavisorWeb, :view - alias SupavisorWeb.ClusterView + alias SupavisorWeb.ClusterTenantsView + alias SupavisorWeb.ClusterView def render("index.json", %{clusters: clusters}) do %{data: render_many(clusters, ClusterView, "cluster.json")} diff --git a/lib/supavisor_web/views/error_helpers.ex b/lib/supavisor_web/views/error_helpers.ex index f6109bea..52951c76 100644 --- a/lib/supavisor_web/views/error_helpers.ex +++ b/lib/supavisor_web/views/error_helpers.ex @@ -3,20 +3,6 @@ defmodule SupavisorWeb.ErrorHelpers do Conveniences for translating and building error messages. """ - use Phoenix.HTML - - @doc """ - Generates tag for inlined form input errors. - """ - def error_tag(form, field) do - Enum.map(Keyword.get_values(form.errors, field), fn error -> - content_tag(:span, translate_error(error), - class: "invalid-feedback", - phx_feedback_for: input_name(form, field) - ) - end) - end - @doc """ Translates an error message using gettext. """ diff --git a/lib/supavisor_web/views/layout_view.ex b/lib/supavisor_web/views/layout_view.ex deleted file mode 100644 index 55eb75d3..00000000 --- a/lib/supavisor_web/views/layout_view.ex +++ /dev/null @@ -1,7 +0,0 @@ -defmodule SupavisorWeb.LayoutView do - use SupavisorWeb, :view - - # Phoenix LiveDashboard is available only in development by default, - # so we instruct Elixir to not warn if the dashboard route is missing. - @compile {:no_warn_undefined, {Routes, :live_dashboard_path, 2}} -end diff --git a/lib/supavisor_web/ws_proxy.ex b/lib/supavisor_web/ws_proxy.ex index ca5ba995..46a57377 100644 --- a/lib/supavisor_web/ws_proxy.ex +++ b/lib/supavisor_web/ws_proxy.ex @@ -59,7 +59,7 @@ defmodule SupavisorWeb.WsProxy do def filter_pass_pkt(bin), do: bin @spec connect_local() :: {:ok, port()} | {:error, term()} - defp connect_local() do + defp connect_local do proxy_port = Application.fetch_env!(:supavisor, :proxy_port_transaction) :gen_tcp.connect(~c"localhost", proxy_port, [:binary, packet: :raw, active: true]) end diff --git a/mix.exs b/mix.exs index 111b3ab1..b50573a0 100644 --- a/mix.exs +++ b/mix.exs @@ -22,11 +22,11 @@ defmodule Supavisor.MixProject do [ mod: {Supavisor.Application, []}, extra_applications: - [:logger, :runtime_tools, :os_mon, :ssl, :partisan] ++ extra_applications(Mix.env()) + [:logger, :runtime_tools, :os_mon, :ssl] ++ extra_applications(Mix.env()) ] end - defp extra_applications(:test), do: [:common_test] + defp extra_applications(:dev), do: [:wx, :observer] defp extra_applications(_), do: [] # Specifies which paths to compile per environment. @@ -42,39 +42,36 @@ defmodule Supavisor.MixProject do {:phoenix_ecto, "~> 4.4"}, {:ecto_sql, "~> 3.10"}, {:postgrex, ">= 0.0.0"}, - {:phoenix_html, "~> 3.0"}, {:phoenix_view, "~> 2.0.2"}, - {:phoenix_live_reload, "~> 1.2", only: :dev}, - {:phoenix_live_view, "~> 0.18.18"}, + {:phoenix_live_view, "~> 0.20.0"}, {:phoenix_live_dashboard, "~> 0.7"}, - {:telemetry_metrics, "~> 0.6"}, {:telemetry_poller, "~> 1.0"}, + {:peep, "~> 3.1"}, {:jason, "~> 1.2"}, {:plug_cowboy, "~> 2.5"}, - {:joken, "~> 2.5.0"}, - {:cloak_ecto, "~> 1.2.0"}, - {:meck, "~> 0.9.2", only: :test}, + {:joken, "~> 2.6.0"}, + {:cloak_ecto, "~> 1.3.0"}, + {:meck, "~> 0.9.2", only: [:dev, :test]}, {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, - {:benchee, "~> 1.1.0", only: :dev}, - {:prom_ex, "~> 1.8.0"}, + {:benchee, "~> 1.3", only: :dev}, + {:prom_ex, "~> 1.10"}, {:open_api_spex, "~> 3.16"}, - {:burrito, github: "burrito-elixir/burrito"}, {:libcluster, "~> 3.3.1"}, {:logflare_logger_backend, github: "Logflare/logflare_logger_backend", tag: "v0.11.4"}, {:distillery, "~> 2.1"}, {:cachex, "~> 3.6"}, {:inet_cidr, "~> 1.0.0"}, {:observer_cli, "~> 1.7"}, + {:eflambe, "~> 0.3.1", only: [:dev]}, # pooller # {:poolboy, "~> 1.5.2"}, {:poolboy, git: "https://github.com/abc3/poolboy.git", tag: "v0.0.2"}, - {:partisan, git: "https://github.com/lasp-lang/partisan.git", tag: "v5.0.0-rc.12"}, {:syn, "~> 3.3"}, {:pgo, "~> 0.13"}, - {:rustler, "~> 0.29.1"} - # TODO: add ranch deps + {:rustler, "~> 0.34.0"}, + {:ranch, "~> 2.0", override: true} ] end @@ -84,17 +81,6 @@ defmodule Supavisor.MixProject do steps: [:assemble, &upgrade/1, :tar], include_erts: System.get_env("INCLUDE_ERTS", "true") == "true", cookie: System.get_env("RELEASE_COOKIE", Base.url_encode64(:crypto.strong_rand_bytes(30))) - ], - supavisor_bin: [ - steps: [:assemble, &Burrito.wrap/1], - burrito: [ - targets: [ - macos_aarch64: [os: :darwin, cpu: :aarch64], - macos_x86_64: [os: :darwin, cpu: :x86_64], - linux_x86_64: [os: :linux, cpu: :x86_64], - linux_aarch64: [os: :linux, cpu: :aarch64] - ] - ] ] ] end diff --git a/mix.lock b/mix.lock index 6bc3bbe7..cb9bf545 100644 --- a/mix.lock +++ b/mix.lock @@ -1,86 +1,81 @@ %{ - "acceptor_pool": {:hex, :acceptor_pool, "1.0.0", "43c20d2acae35f0c2bcd64f9d2bde267e459f0f3fd23dab26485bf518c281b21", [:rebar3], [], "hexpm", "0cbcd83fdc8b9ad2eee2067ef8b91a14858a5883cb7cd800e6fcd5803e158788"}, "artificery": {:hex, :artificery, "0.4.3", "0bc4260f988dcb9dda4b23f9fc3c6c8b99a6220a331534fdf5bf2fd0d4333b02", [:mix], [], "hexpm", "12e95333a30e20884e937abdbefa3e7f5e05609c2ba8cf37b33f000b9ffc0504"}, "backoff": {:hex, :backoff, "1.1.6", "83b72ed2108ba1ee8f7d1c22e0b4a00cfe3593a67dbc792799e8cce9f42f796b", [:rebar3], [], "hexpm", "cf0cfff8995fb20562f822e5cc47d8ccf664c5ecdc26a684cbe85c225f9d7c39"}, - "benchee": {:hex, :benchee, "1.1.0", "f3a43817209a92a1fade36ef36b86e1052627fd8934a8b937ac9ab3a76c43062", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}], "hexpm", "7da57d545003165a012b587077f6ba90b89210fd88074ce3c60ce239eb5e6d93"}, + "benchee": {:hex, :benchee, "1.3.1", "c786e6a76321121a44229dde3988fc772bca73ea75170a73fd5f4ddf1af95ccf", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "76224c58ea1d0391c8309a8ecbfe27d71062878f59bd41a390266bf4ac1cc56d"}, "bertex": {:hex, :bertex, "1.3.0", "0ad0df9159b5110d9d2b6654f72fbf42a54884ef43b6b651e6224c0af30ba3cb", [:mix], [], "hexpm", "0a5d5e478bb5764b7b7bae37cae1ca491200e58b089df121a2fe1c223d8ee57a"}, "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, - "burrito": {:git, "https://github.com/burrito-elixir/burrito.git", "a60c6ab21156fc4c788907d33bfd0c546a022272", []}, "cachex": {:hex, :cachex, "3.6.0", "14a1bfbeee060dd9bec25a5b6f4e4691e3670ebda28c8ba2884b12fe30b36bf8", [:mix], [{:eternal, "~> 1.2", [hex: :eternal, repo: "hexpm", optional: false]}, {:jumper, "~> 1.0", [hex: :jumper, repo: "hexpm", optional: false]}, {:sleeplocks, "~> 1.1", [hex: :sleeplocks, repo: "hexpm", optional: false]}, {:unsafe, "~> 1.0", [hex: :unsafe, repo: "hexpm", optional: false]}], "hexpm", "ebf24e373883bc8e0c8d894a63bbe102ae13d918f790121f5cfe6e485cc8e2e2"}, - "castore": {:hex, :castore, "1.0.4", "ff4d0fb2e6411c0479b1d965a814ea6d00e51eb2f58697446e9c41a97d940b28", [:mix], [], "hexpm", "9418c1b8144e11656f0be99943db4caf04612e3eaecefb5dae9a2a87565584f8"}, - "cloak": {:hex, :cloak, "1.1.2", "7e0006c2b0b98d976d4f559080fabefd81f0e0a50a3c4b621f85ceeb563e80bb", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "940d5ac4fcd51b252930fd112e319ea5ae6ab540b722f3ca60a85666759b9585"}, - "cloak_ecto": {:hex, :cloak_ecto, "1.2.0", "e86a3df3bf0dc8980f70406bcb0af2858bac247d55494d40bc58a152590bd402", [:mix], [{:cloak, "~> 1.1.1", [hex: :cloak, repo: "hexpm", optional: false]}, {:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "8bcc677185c813fe64b786618bd6689b1707b35cd95acaae0834557b15a0c62f"}, - "cowboy": {:hex, :cowboy, "2.10.0", "ff9ffeff91dae4ae270dd975642997afe2a1179d94b1887863e43f681a203e26", [:make, :rebar3], [{:cowlib, "2.12.1", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "3afdccb7183cc6f143cb14d3cf51fa00e53db9ec80cdcd525482f5e99bc41d6b"}, + "castore": {:hex, :castore, "1.0.9", "5cc77474afadf02c7c017823f460a17daa7908e991b0cc917febc90e466a375c", [:mix], [], "hexpm", "5ea956504f1ba6f2b4eb707061d8e17870de2bee95fb59d512872c2ef06925e7"}, + "cloak": {:hex, :cloak, "1.1.4", "aba387b22ea4d80d92d38ab1890cc528b06e0e7ef2a4581d71c3fdad59e997e7", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "92b20527b9aba3d939fab0dd32ce592ff86361547cfdc87d74edce6f980eb3d7"}, + "cloak_ecto": {:hex, :cloak_ecto, "1.3.0", "0de127c857d7452ba3c3367f53fb814b0410ff9c680a8d20fbe8b9a3c57a1118", [:mix], [{:cloak, "~> 1.1.1", [hex: :cloak, repo: "hexpm", optional: false]}, {:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "314beb0c123b8a800418ca1d51065b27ba3b15f085977e65c0f7b2adab2de1cc"}, + "cowboy": {:hex, :cowboy, "2.12.0", "f276d521a1ff88b2b9b4c54d0e753da6c66dd7be6c9fca3d9418b561828a3731", [:make, :rebar3], [{:cowlib, "2.13.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "8a7abe6d183372ceb21caa2709bec928ab2b72e18a3911aa1771639bef82651e"}, "cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"}, - "cowlib": {:hex, :cowlib, "2.12.1", "a9fa9a625f1d2025fe6b462cb865881329b5caff8f1854d1cbc9f9533f00e1e1", [:make, :rebar3], [], "hexpm", "163b73f6367a7341b33c794c4e88e7dbfe6498ac42dcd69ef44c5bc5507c8db0"}, - "credo": {:hex, :credo, "1.7.7", "771445037228f763f9b2afd612b6aa2fd8e28432a95dbbc60d8e03ce71ba4446", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8bc87496c9aaacdc3f90f01b7b0582467b69b4bd2441fe8aae3109d843cc2f2e"}, - "db_connection": {:hex, :db_connection, "2.6.0", "77d835c472b5b67fc4f29556dee74bf511bbafecdcaf98c27d27fa5918152086", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c2f992d15725e721ec7fbc1189d4ecdb8afef76648c746a8e1cad35e3b8a35f3"}, + "cowlib": {:hex, :cowlib, "2.13.0", "db8f7505d8332d98ef50a3ef34b34c1afddec7506e4ee4dd4a3a266285d282ca", [:make, :rebar3], [], "hexpm", "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4"}, + "credo": {:hex, :credo, "1.7.10", "6e64fe59be8da5e30a1b96273b247b5cf1cc9e336b5fd66302a64b25749ad44d", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "71fbc9a6b8be21d993deca85bf151df023a3097b01e09a2809d460348561d8cd"}, + "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, - "dialyxir": {:hex, :dialyxir, "1.4.3", "edd0124f358f0b9e95bfe53a9fcf806d615d8f838e2202a9f430d59566b6b53b", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "bf2cfb75cd5c5006bec30141b131663299c661a864ec7fbbc72dfa557487a986"}, + "dialyxir": {:hex, :dialyxir, "1.4.4", "fb3ce8741edeaea59c9ae84d5cec75da00fa89fe401c72d6e047d11a61f65f70", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "cd6111e8017ccd563e65621a4d9a4a1c5cd333df30cebc7face8029cacb4eff6"}, "distillery": {:hex, :distillery, "2.1.1", "f9332afc2eec8a1a2b86f22429e068ef35f84a93ea1718265e740d90dd367814", [:mix], [{:artificery, "~> 0.2", [hex: :artificery, repo: "hexpm", optional: false]}], "hexpm", "bbc7008b0161a6f130d8d903b5b3232351fccc9c31a991f8fcbf2a12ace22995"}, - "ecto": {:hex, :ecto, "3.10.3", "eb2ae2eecd210b4eb8bece1217b297ad4ff824b4384c0e3fdd28aaf96edd6135", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "44bec74e2364d491d70f7e42cd0d690922659d329f6465e89feb8a34e8cd3433"}, - "ecto_sql": {:hex, :ecto_sql, "3.10.2", "6b98b46534b5c2f8b8b5f03f126e75e2a73c64f3c071149d32987a5378b0fdbd", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.10.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 0.17.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "68c018debca57cb9235e3889affdaec7a10616a4e3a80c99fa1d01fdafaa9007"}, + "ecto": {:hex, :ecto, "3.12.4", "267c94d9f2969e6acc4dd5e3e3af5b05cdae89a4d549925f3008b2b7eb0b93c3", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ef04e4101688a67d061e1b10d7bc1fbf00d1d13c17eef08b71d070ff9188f747"}, + "ecto_sql": {:hex, :ecto_sql, "3.12.1", "c0d0d60e85d9ff4631f12bafa454bc392ce8b9ec83531a412c12a0d415a3a4d0", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aff5b958a899762c5f09028c847569f7dfb9cc9d63bdb8133bff8a5546de6bf5"}, + "eflambe": {:hex, :eflambe, "0.3.1", "ef0a35084fad1f50744496730a9662782c0a9ebf449d3e03143e23295c5926ea", [:rebar3], [{:meck, "0.9.2", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "58d5997be606d4e269e9e9705338e055281fdf3e4935cc902c8908e9e4516c5f"}, "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, "eternal": {:hex, :eternal, "1.2.2", "d1641c86368de99375b98d183042dd6c2b234262b8d08dfd72b9eeaafc2a1abd", [:mix], [], "hexpm", "2c9fe32b9c3726703ba5e1d43a1d255a4f3f2d8f8f9bc19f094c7cb1a7a9e782"}, - "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"}, - "finch": {:hex, :finch, "0.16.0", "40733f02c89f94a112518071c0a91fe86069560f5dbdb39f9150042f44dcfb1a", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.3", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 0.2.6 or ~> 1.0", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f660174c4d519e5fec629016054d60edd822cdfe2b7270836739ac2f97735ec5"}, - "hpax": {:hex, :hpax, "0.1.2", "09a75600d9d8bbd064cdd741f21fc06fc1f4cf3d0fcc335e5aa19be1a7235c84", [:mix], [], "hexpm", "2c87843d5a23f5f16748ebe77969880e29809580efdaccd615cd3bed628a8c13"}, - "inet_cidr": {:hex, :inet_cidr, "1.0.4", "a05744ab7c221ca8e395c926c3919a821eb512e8f36547c062f62c4ca0cf3d6e", [:mix], [], "hexpm", "64a2d30189704ae41ca7dbdd587f5291db5d1dda1414e0774c29ffc81088c1bc"}, - "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, - "joken": {:hex, :joken, "2.5.0", "09be497d804b8115eb6f07615cef2e60c2a1008fb89dc0aef0d4c4b4609b99aa", [:mix], [{:jose, "~> 1.11.2", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "22b25c89617c5ed8ca7b31026340a25ea0f9ca7160f9706b79be9ed81fdf74e7"}, - "jose": {:hex, :jose, "1.11.6", "613fda82552128aa6fb804682e3a616f4bc15565a048dabd05b1ebd5827ed965", [:mix, :rebar3], [], "hexpm", "6275cb75504f9c1e60eeacb771adfeee4905a9e182103aa59b53fed651ff9738"}, + "file_system": {:hex, :file_system, "1.0.1", "79e8ceaddb0416f8b8cd02a0127bdbababe7bf4a23d2a395b983c1f8b3f73edd", [:mix], [], "hexpm", "4414d1f38863ddf9120720cd976fce5bdde8e91d8283353f0e31850fa89feb9e"}, + "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"}, + "hpax": {:hex, :hpax, "1.0.0", "28dcf54509fe2152a3d040e4e3df5b265dcb6cb532029ecbacf4ce52caea3fd2", [:mix], [], "hexpm", "7f1314731d711e2ca5fdc7fd361296593fc2542570b3105595bb0bc6d0fad601"}, + "inet_cidr": {:hex, :inet_cidr, "1.0.8", "d26bb7bdbdf21ae401ead2092bf2bb4bf57fe44a62f5eaa5025280720ace8a40", [:mix], [], "hexpm", "d5b26da66603bb56c933c65214c72152f0de9a6ea53618b56d63302a68f6a90e"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "joken": {:hex, :joken, "2.6.2", "5daaf82259ca603af4f0b065475099ada1b2b849ff140ccd37f4b6828ca6892a", [:mix], [{:jose, "~> 1.11.10", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "5134b5b0a6e37494e46dbf9e4dad53808e5e787904b7c73972651b51cce3d72b"}, + "jose": {:hex, :jose, "1.11.10", "a903f5227417bd2a08c8a00a0cbcc458118be84480955e8d251297a425723f83", [:mix, :rebar3], [], "hexpm", "0d6cd36ff8ba174db29148fc112b5842186b68a90ce9fc2b3ec3afe76593e614"}, "jumper": {:hex, :jumper, "1.0.2", "68cdcd84472a00ac596b4e6459a41b3062d4427cbd4f1e8c8793c5b54f1406a7", [:mix], [], "hexpm", "9b7782409021e01ab3c08270e26f36eb62976a38c1aa64b2eaf6348422f165e1"}, "libcluster": {:hex, :libcluster, "3.3.3", "a4f17721a19004cfc4467268e17cff8b1f951befe428975dd4f6f7b84d927fe0", [:mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "7c0a2275a0bb83c07acd17dab3c3bfb4897b145106750eeccc62d302e3bdfee5"}, "logflare_api_client": {:hex, :logflare_api_client, "0.3.5", "c427ebf65a8402d68b056d4a5ef3e1eb3b90c0ad1d0de97d1fe23807e0c1b113", [:mix], [{:bertex, "~> 1.3", [hex: :bertex, repo: "hexpm", optional: false]}, {:finch, "~> 0.10", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:tesla, "~> 1.0", [hex: :tesla, repo: "hexpm", optional: false]}], "hexpm", "16d29abcb80c4f72745cdf943379da02a201504813c3aa12b4d4acb0302b7723"}, "logflare_etso": {:hex, :logflare_etso, "1.1.2", "040bd3e482aaf0ed20080743b7562242ec5079fd88a6f9c8ce5d8298818292e9", [:mix], [{:ecto, "~> 3.8", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "ab96be42900730a49b132891f43a9be1d52e4ad3ee9ed9cb92565c5f87345117"}, "logflare_logger_backend": {:git, "https://github.com/Logflare/logflare_logger_backend.git", "7fcc9f32ec48f466ddc1738709d7dc646cfc1e3a", [tag: "v0.11.4"]}, "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, - "mime": {:hex, :mime, "2.0.5", "dc34c8efd439abe6ae0343edbb8556f4d63f178594894720607772a041b04b02", [:mix], [], "hexpm", "da0d64a365c45bc9935cc5c8a7fc5e49a0e0f9932a761c55d6c52b142780a05c"}, - "mint": {:hex, :mint, "1.5.1", "8db5239e56738552d85af398798c80648db0e90f343c8469f6c6d8898944fb6f", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "4a63e1e76a7c3956abd2c72f370a0d0aecddc3976dea5c27eccbecfa5e7d5b1e"}, - "nimble_options": {:hex, :nimble_options, "1.0.2", "92098a74df0072ff37d0c12ace58574d26880e522c22801437151a159392270e", [:mix], [], "hexpm", "fd12a8db2021036ce12a309f26f564ec367373265b53e25403f0ee697380f1b8"}, - "nimble_pool": {:hex, :nimble_pool, "1.0.0", "5eb82705d138f4dd4423f69ceb19ac667b3b492ae570c9f5c900bb3d2f50a847", [:mix], [], "hexpm", "80be3b882d2d351882256087078e1b1952a28bf98d0a287be87e4a24a710b67a"}, - "observer_cli": {:hex, :observer_cli, "1.7.4", "3c1bfb6d91bf68f6a3d15f46ae20da0f7740d363ee5bc041191ce8722a6c4fae", [:mix, :rebar3], [{:recon, "~> 2.5.1", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "50de6d95d814f447458bd5d72666a74624eddb0ef98bdcee61a0153aae0865ff"}, - "octo_fetch": {:hex, :octo_fetch, "0.3.0", "89ff501d2ac0448556ff1931634a538fe6d6cd358ba827ce1747e6a42a46efbf", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c07e44f2214ab153743b7b3182f380798d0b294b1f283811c1e30cff64096d3d"}, - "open_api_spex": {:hex, :open_api_spex, "3.18.0", "f9952b6bc8a1bf14168f3754981b7c8d72d015112bfedf2588471dd602e1e715", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "37849887ab67efab052376401fac28c0974b273ffaecd98f4532455ca0886464"}, - "opentelemetry_api": {:hex, :opentelemetry_api, "1.2.1", "7b69ed4f40025c005de0b74fce8c0549625d59cb4df12d15c32fe6dc5076ff42", [:mix, :rebar3], [{:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "6d7a27b7cad2ad69a09cabf6670514cafcec717c8441beb5c96322bac3d05350"}, - "opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "0.2.0", "b67fe459c2938fcab341cb0951c44860c62347c005ace1b50f8402576f241435", [:mix, :rebar3], [], "hexpm", "d61fa1f5639ee8668d74b527e6806e0503efc55a42db7b5f39939d84c07d6895"}, - "partisan": {:git, "https://github.com/lasp-lang/partisan.git", "a6a59b3ed406037099773861dbaad4e97b5b9142", [tag: "v5.0.0-rc.12"]}, + "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, + "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, + "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, + "observer_cli": {:hex, :observer_cli, "1.8.0", "1359409c4b25b11360db56bc3103cfb51f3a4b3aea76ec58c7b8595feb5d6019", [:mix, :rebar3], [{:recon, "~> 2.5.6", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "9842759b11360819dd0e6e60173c39c1e6aaef4b20fa6fe9b4700e3e02911b83"}, + "octo_fetch": {:hex, :octo_fetch, "0.4.0", "074b5ecbc08be10b05b27e9db08bc20a3060142769436242702931c418695b19", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "cf8be6f40cd519d7000bb4e84adcf661c32e59369ca2827c4e20042eda7a7fc6"}, + "open_api_spex": {:hex, :open_api_spex, "3.21.2", "6a704f3777761feeb5657340250d6d7332c545755116ca98f33d4b875777e1e5", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "f42ae6ed668b895ebba3e02773cfb4b41050df26f803f2ef634c72a7687dc387"}, + "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.0", "63ca1742f92f00059298f478048dfb826f4b20d49534493d6919a0db39b6db04", [:mix, :rebar3], [], "hexpm", "3dfbbfaa2c2ed3121c5c483162836c4f9027def469c41578af5ef32589fcfc58"}, + "peep": {:hex, :peep, "3.3.0", "ece8c38f0e3cfeecf8739d377c228c7e2b34d947f6b4817a183be37c88b94ebe", [:mix], [{:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:plug, "~> 1.16", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "19c84bf78c4eee97cb0df33d4ea628e93b6ab148ee19c289c499d1d62b3e78cb"}, "pg_types": {:hex, :pg_types, "0.4.0", "3ce365c92903c5bb59c0d56382d842c8c610c1b6f165e20c4b652c96fa7e9c14", [:rebar3], [], "hexpm", "b02efa785caececf9702c681c80a9ca12a39f9161a846ce17b01fb20aeeed7eb"}, "pgo": {:hex, :pgo, "0.14.0", "f53711d103d7565db6fc6061fcf4ff1007ab39892439be1bb02d9f686d7e6663", [:rebar3], [{:backoff, "~> 1.1.6", [hex: :backoff, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:pg_types, "~> 0.4.0", [hex: :pg_types, repo: "hexpm", optional: false]}], "hexpm", "71016c22599936e042dc0012ee4589d24c71427d266292f775ebf201d97df9c9"}, - "phoenix": {:hex, :phoenix, "1.7.10", "02189140a61b2ce85bb633a9b6fd02dff705a5f1596869547aeb2b2b95edd729", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "cf784932e010fd736d656d7fead6a584a4498efefe5b8227e9f383bf15bb79d0"}, - "phoenix_ecto": {:hex, :phoenix_ecto, "4.4.3", "86e9878f833829c3f66da03d75254c155d91d72a201eb56ae83482328dc7ca93", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "d36c401206f3011fefd63d04e8ef626ec8791975d9d107f9a0817d426f61ac07"}, - "phoenix_html": {:hex, :phoenix_html, "3.3.3", "380b8fb45912b5638d2f1d925a3771b4516b9a78587249cabe394e0a5d579dc9", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "923ebe6fec6e2e3b3e569dfbdc6560de932cd54b000ada0208b5f45024bdd76c"}, - "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.7.2", "97cc4ff2dba1ebe504db72cb45098cb8e91f11160528b980bd282cc45c73b29c", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.18.3", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "0e5fdf063c7a3b620c566a30fcf68b7ee02e5e46fe48ee46a6ec3ba382dc05b7"}, - "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.4.1", "2aff698f5e47369decde4357ba91fc9c37c6487a512b41732818f2204a8ef1d3", [:mix], [{:file_system, "~> 0.2.1 or ~> 0.3", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "9bffb834e7ddf08467fe54ae58b5785507aaba6255568ae22b4d46e2bb3615ab"}, - "phoenix_live_view": {:hex, :phoenix_live_view, "0.18.18", "1f38fbd7c363723f19aad1a04b5490ff3a178e37daaf6999594d5f34796c47fc", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a5810d0472f3189ede6d2a95bda7f31c6113156b91784a3426cb0ab6a6d85214"}, + "phoenix": {:hex, :phoenix, "1.7.14", "a7d0b3f1bc95987044ddada111e77bd7f75646a08518942c72a8440278ae7825", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "c7859bc56cc5dfef19ecfc240775dae358cbaa530231118a9e014df392ace61a"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.3", "f686701b0499a07f2e3b122d84d52ff8a31f5def386e03706c916f6feddf69ef", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "909502956916a657a197f94cc1206d9a65247538de8a5e186f7537c895d95764"}, + "phoenix_html": {:hex, :phoenix_html, "4.1.1", "4c064fd3873d12ebb1388425a8f2a19348cef56e7289e1998e2d2fa758aa982e", [:mix], [], "hexpm", "f2f2df5a72bc9a2f510b21497fd7d2b86d932ec0598f0210fed4114adc546c6f"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.4", "4508e481f791ce62ec6a096e13b061387158cbeefacca68c6c1928e1305e23ed", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "2984aae96994fbc5c61795a73b8fb58153b41ff934019cfb522343d2d3817d59"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "0.20.17", "f396bbdaf4ba227b82251eb75ac0afa6b3da5e509bc0d030206374237dfc9450", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61d741ffb78c85fdbca0de084da6a48f8ceb5261a79165b5a0b59e5f65ce98b"}, "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, - "phoenix_template": {:hex, :phoenix_template, "1.0.3", "32de561eefcefa951aead30a1f94f1b5f0379bc9e340bb5c667f65f1edfa4326", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "16f4b6588a4152f3cc057b9d0c0ba7e82ee23afa65543da535313ad8d25d8e2c"}, - "phoenix_view": {:hex, :phoenix_view, "2.0.3", "4d32c4817fce933693741deeb99ef1392619f942633dde834a5163124813aad3", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}], "hexpm", "cd34049af41be2c627df99cd4eaa71fc52a328c0c3d8e7d4aa28f880c30e7f64"}, - "plug": {:hex, :plug, "1.15.1", "b7efd81c1a1286f13efb3f769de343236bd8b7d23b4a9f40d3002fc39ad8f74c", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "459497bd94d041d98d948054ec6c0b76feacd28eec38b219ca04c0de13c79d30"}, - "plug_cowboy": {:hex, :plug_cowboy, "2.6.1", "9a3bbfceeb65eff5f39dab529e5cd79137ac36e913c02067dba3963a26efe9b2", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "de36e1a21f451a18b790f37765db198075c25875c64834bcc82d90b309eb6613"}, - "plug_crypto": {:hex, :plug_crypto, "2.0.0", "77515cc10af06645abbfb5e6ad7a3e9714f805ae118fa1a70205f80d2d70fe73", [:mix], [], "hexpm", "53695bae57cc4e54566d993eb01074e4d894b65a3766f1c43e2c61a1b0f45ea9"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "phoenix_view": {:hex, :phoenix_view, "2.0.4", "b45c9d9cf15b3a1af5fb555c674b525391b6a1fe975f040fb4d913397b31abf4", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}], "hexpm", "4e992022ce14f31fe57335db27a28154afcc94e9983266835bb3040243eb620b"}, + "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"}, + "plug_cowboy": {:hex, :plug_cowboy, "2.7.2", "fdadb973799ae691bf9ecad99125b16625b1c6039999da5fe544d99218e662e4", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "245d8a11ee2306094840c000e8816f0cbed69a23fc0ac2bcf8d7835ae019bb2f"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"}, "poolboy": {:git, "https://github.com/abc3/poolboy.git", "999ec7f5c7282d515020bb058b4832029d6d07bc", [tag: "v0.0.2"]}, - "postgrex": {:hex, :postgrex, "0.17.3", "c92cda8de2033a7585dae8c61b1d420a1a1322421df84da9a82a6764580c503d", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "946cf46935a4fdca7a81448be76ba3503cff082df42c6ec1ff16a4bdfbfb098d"}, - "prom_ex": {:hex, :prom_ex, "1.8.0", "662615e1d2f2ab3e0dc13a51c92ad0ccfcab24336a90cb9b114ee1bce9ef88aa", [:mix], [{:absinthe, ">= 1.6.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.0.2", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.5.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.15", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.4.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.3", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.5.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.14.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.12.1", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, "~> 2.5", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.0", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.0", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "3eea763dfa941e25de50decbf17a6a94dbd2270e7b32f88279aa6e9bbb8e23e7"}, - "quickrand": {:hex, :quickrand, "2.0.7", "d2bd76676a446e6a058d678444b7fda1387b813710d1af6d6e29bb92186c8820", [:rebar3], [], "hexpm", "b8acbf89a224bc217c3070ca8bebc6eb236dbe7f9767993b274084ea044d35f0"}, - "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, - "recon": {:hex, :recon, "2.5.4", "05dd52a119ee4059fa9daa1ab7ce81bc7a8161a2f12e9d42e9d551ffd2ba901c", [:mix, :rebar3], [], "hexpm", "e9ab01ac7fc8572e41eb59385efeb3fb0ff5bf02103816535bacaedf327d0263"}, - "req": {:hex, :req, "0.3.12", "f84c2f9e7cc71c81d7cbeacf7c61e763e53ab5f3065703792a4ab264b4f22672", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.9", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "c91103d4d1c8edeba90c84e0ba223a59865b673eaab217bfd17da3aa54ab136c"}, - "rustler": {:hex, :rustler, "0.29.1", "880f20ae3027bd7945def6cea767f5257bc926f33ff50c0d5d5a5315883c084d", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:toml, "~> 0.6", [hex: :toml, repo: "hexpm", optional: false]}], "hexpm", "109497d701861bfcd26eb8f5801fe327a8eef304f56a5b63ef61151ff44ac9b6"}, - "sleeplocks": {:hex, :sleeplocks, "1.1.2", "d45aa1c5513da48c888715e3381211c859af34bee9b8290490e10c90bb6ff0ca", [:rebar3], [], "hexpm", "9fe5d048c5b781d6305c1a3a0f40bb3dfc06f49bf40571f3d2d0c57eaa7f59a5"}, + "postgrex": {:hex, :postgrex, "0.19.3", "a0bda6e3bc75ec07fca5b0a89bffd242ca209a4822a9533e7d3e84ee80707e19", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "d31c28053655b78f47f948c85bb1cf86a9c1f8ead346ba1aa0d0df017fa05b61"}, + "prom_ex": {:hex, :prom_ex, "1.11.0", "1f6d67f2dead92224cb4f59beb3e4d319257c5728d9638b4a5e8ceb51a4f9c7e", [:mix], [{:absinthe, ">= 1.7.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.1.0", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.11.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.18", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.10.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.4", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:peep, "~> 3.0", [hex: :peep, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.7.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.20.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.16.0", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 2.6.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.2", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.1", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "76b074bc3730f0802978a7eb5c7091a65473eaaf07e99ec9e933138dcc327805"}, + "ranch": {:hex, :ranch, "2.1.0", "2261f9ed9574dcfcc444106b9f6da155e6e540b2f82ba3d42b339b93673b72a3", [:make, :rebar3], [], "hexpm", "244ee3fa2a6175270d8e1fc59024fd9dbc76294a321057de8f803b1479e76916"}, + "recon": {:hex, :recon, "2.5.6", "9052588e83bfedfd9b72e1034532aee2a5369d9d9343b61aeb7fbce761010741", [:mix, :rebar3], [], "hexpm", "96c6799792d735cc0f0fd0f86267e9d351e63339cbe03df9d162010cefc26bb0"}, + "req": {:hex, :req, "0.5.7", "b722680e03d531a2947282adff474362a48a02aa54b131196fbf7acaff5e4cee", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "c6035374615120a8923e8089d0c21a3496cf9eda2d287b806081b8f323ceee29"}, + "rustler": {:hex, :rustler, "0.34.0", "e9a73ee419fc296a10e49b415a2eb87a88c9217aa0275ec9f383d37eed290c1c", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:toml, "~> 0.6", [hex: :toml, repo: "hexpm", optional: false]}], "hexpm", "1d0c7449482b459513003230c0e2422b0252245776fe6fd6e41cb2b11bd8e628"}, + "sleeplocks": {:hex, :sleeplocks, "1.1.3", "96a86460cc33b435c7310dbd27ec82ca2c1f24ae38e34f8edde97f756503441a", [:rebar3], [], "hexpm", "d3b3958552e6eb16f463921e70ae7c767519ef8f5be46d7696cc1ed649421321"}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, "syn": {:hex, :syn, "3.3.0", "4684a909efdfea35ce75a9662fc523e4a8a4e8169a3df275e4de4fa63f99c486", [:rebar3], [], "hexpm", "e58ee447bc1094bdd21bf0acc102b1fbf99541a508cd48060bf783c245eaf7d6"}, - "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, - "telemetry_metrics": {:hex, :telemetry_metrics, "0.6.1", "315d9163a1d4660aedc3fee73f33f1d355dcc76c5c3ab3d59e76e3edf80eef1f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7be9e0871c41732c233be71e4be11b96e56177bf15dde64a8ac9ce72ac9834c6"}, - "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.1.0", "4e15f6d7dbedb3a4e3aed2262b7e1407f166fcb9c30ca3f96635dfbbef99965c", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "0dd10e7fe8070095df063798f82709b0a1224c31b8baf6278b423898d591a069"}, - "telemetry_poller": {:hex, :telemetry_poller, "1.0.0", "db91bb424e07f2bb6e73926fcafbfcbcb295f0193e0a00e825e589a0a47e8453", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b3a24eafd66c3f42da30fc3ca7dda1e9d546c12250a2d60d7b81d264fbec4f6e"}, - "tesla": {:hex, :tesla, "1.8.0", "d511a4f5c5e42538d97eef7c40ec4f3e44effdc5068206f42ed859e09e51d1fd", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "10501f360cd926a309501287470372af1a6e1cbed0f43949203a4c13300bc79f"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.0.0", "29f5f84991ca98b8eb02fc208b2e6de7c95f8bb2294ef244a176675adc7775df", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f23713b3847286a534e005126d4c959ebcca68ae9582118ce436b521d1d47d5d"}, + "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"}, + "tesla": {:hex, :tesla, "1.13.2", "85afa342eb2ac0fee830cf649dbd19179b6b359bec4710d02a3d5d587f016910", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:mox, "~> 1.0", [hex: :mox, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "960609848f1ef654c3cdfad68453cd84a5febecb6ed9fed9416e36cd9cd724f9"}, "toml": {:hex, :toml, "0.7.0", "fbcd773caa937d0c7a02c301a1feea25612720ac3fa1ccb8bfd9d30d822911de", [:mix], [], "hexpm", "0690246a2478c1defd100b0c9b89b4ea280a22be9a7b313a8a058a2408a2fa70"}, "typed_struct": {:hex, :typed_struct, "0.3.0", "939789e3c1dca39d7170c87f729127469d1315dcf99fee8e152bb774b17e7ff7", [:mix], [], "hexpm", "c50bd5c3a61fe4e198a8504f939be3d3c85903b382bde4865579bc23111d1b6d"}, - "types": {:hex, :types, "0.1.8", "5782b67231e8c174fe2835395e71e669fe0121076779d2a09f1c0d58ee0e2f13", [:rebar3], [], "hexpm", "04285239f4954c5ede56f78ed7778ede24e3f2e997f7b16402a167af0cc2658a"}, "unsafe": {:hex, :unsafe, "1.0.2", "23c6be12f6c1605364801f4b47007c0c159497d0446ad378b5cf05f1855c0581", [:mix], [], "hexpm", "b485231683c3ab01a9cd44cb4a79f152c6f3bb87358439c6f68791b85c2df675"}, - "uuid": {:hex, :uuid_erl, "2.0.5", "60faeeb7edfd40847ed13cb0dd1044baabe4e79a00c0ca9c4d13a073914b1016", [:rebar3], [{:quickrand, ">= 2.0.5", [hex: :quickrand, repo: "hexpm", optional: false]}], "hexpm", "e54373262ca88401689277947c54b95e9ecbc977bd5c57c9dd44ad9da278e360"}, "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, - "websock_adapter": {:hex, :websock_adapter, "0.5.5", "9dfeee8269b27e958a65b3e235b7e447769f66b5b5925385f5a569269164a210", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "4b977ba4a01918acbf77045ff88de7f6972c2a009213c515a445c48f224ffce9"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.7", "65fa74042530064ef0570b75b43f5c49bb8b235d6515671b3d250022cb8a1f9e", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "d0f478ee64deddfec64b800673fd6e0c8888b079d9f3444dd96d2a98383bdbd1"}, } diff --git a/mkdocs.yaml b/mkdocs.yaml index 72369950..436b4be5 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -6,51 +6,52 @@ repo_name: supabase/supavisor repo_url: https://github.com/supabase/supavisor nav: - - Welcome: 'index.md' - - FAQ: 'faq.md' - - Development: - - Installation: 'development/installation.md' - - Setup: 'development/setup.md' - - Docs: 'development/docs.md' - - Deployment: - - Deploy with Fly.io: 'deployment/fly.md' - - Connecting: - - Overview: 'connecting/overview.md' - - Authentication: 'connecting/authentication.md' - - Configuration: - - Tenants: 'configuration/tenants.md' - - Users: 'configuration/users.md' - - Pool Modes: 'configuration/pool_modes.md' - - Migrating: - - from PgBouncer: 'migrating/pgbouncer.md' - - Monitoring: - - Metrics: 'monitoring/metrics.md' - - ORMs: - - Prisma: 'orms/prisma.md' + - Welcome: "index.md" + - FAQ: "faq.md" + - Development: + - Installation: "development/installation.md" + - Setup: "development/setup.md" + - Docs: "development/docs.md" + - Deployment: + - Deploy with Fly.io: "deployment/fly.md" + - Connecting: + - Overview: "connecting/overview.md" + - Authentication: "connecting/authentication.md" + - Configuration: + - Tenants: "configuration/tenants.md" + - Users: "configuration/users.md" + - Pool Modes: "configuration/pool_modes.md" + - Migrating: + - from PgBouncer: "migrating/pgbouncer.md" + - Monitoring: + - Metrics: "monitoring/metrics.md" + - Logs: "monitoring/logs.md" + - ORMs: + - Prisma: "orms/prisma.md" theme: - name: 'material' - favicon: 'images/favicon.ico' - logo: 'images/favicon.ico' - homepage: https://supabase.github.io/supavisor - features: - - navigation.expand - palette: - primary: black - accent: light green + name: "material" + favicon: "images/favicon.ico" + logo: "images/favicon.ico" + homepage: https://supabase.github.io/supavisor + features: + - navigation.expand + palette: + primary: black + accent: light green markdown_extensions: - - pymdownx.highlight: - linenums: true - guess_lang: false - use_pygments: true - pygments_style: default - - pymdownx.superfences - - pymdownx.tabbed: - alternate_style: true - - pymdownx.snippets - - pymdownx.tasklist - - admonition - - pymdownx.emoji: - emoji_index: !!python/name:materialx.emoji.twemoji - emoji_generator: !!python/name:materialx.emoji.to_svg \ No newline at end of file + - pymdownx.highlight: + linenums: true + guess_lang: false + use_pygments: true + pygments_style: default + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.snippets + - pymdownx.tasklist + - admonition + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg diff --git a/native/.gitignore b/native/.gitignore new file mode 100644 index 00000000..eb5a316c --- /dev/null +++ b/native/.gitignore @@ -0,0 +1 @@ +target diff --git a/native/pgparser/Cargo.lock b/native/Cargo.lock similarity index 64% rename from native/pgparser/Cargo.lock rename to native/Cargo.lock index 7233ff8a..1d1f73e2 100644 --- a/native/pgparser/Cargo.lock +++ b/native/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "bindgen" @@ -23,7 +23,7 @@ version = "0.66.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" dependencies = [ - "bitflags 2.4.1", + "bitflags", "cexpr", "clang-sys", "lazy_static", @@ -36,36 +36,27 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.72", "which", ] [[package]] name = "bitflags" -version = "1.3.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bytes" -version = "1.5.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" [[package]] name = "cexpr" @@ -84,9 +75,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -104,9 +95,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "equivalent" @@ -116,19 +107,19 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys", ] [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fixedbitset" @@ -150,9 +141,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "heck" @@ -160,25 +151,37 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "home" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.52.0", + "windows-sys", ] [[package]] name = "indexmap" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown", ] +[[package]] +name = "inventory" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" + [[package]] name = "itertools" version = "0.10.5" @@ -190,15 +193,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -208,37 +211,37 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets", ] [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "log" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "minimal-lexical" @@ -276,9 +279,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap", @@ -312,19 +315,19 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.16" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.72", ] [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -348,7 +351,7 @@ dependencies = [ "bytes", "cfg-if", "cmake", - "heck", + "heck 0.4.1", "itertools", "lazy_static", "log", @@ -386,27 +389,18 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "regex" -version = "1.10.2" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", @@ -416,9 +410,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", @@ -427,9 +421,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rustc-hash" @@ -439,45 +433,46 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.38.30" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.1", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys", ] [[package]] name = "rustler" -version = "0.30.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4fea69e23de68c42c06769d6624d2d018da550c17244dd4b691f90ced4a7e" +checksum = "e94bdfa68c0388cbd725f1ca54e975956482c262599e5cced04a903eec918b7f" dependencies = [ - "lazy_static", + "inventory", "rustler_codegen", "rustler_sys", ] [[package]] name = "rustler_codegen" -version = "0.30.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406061bd07aaf052c344257afed4988c5ec8efe4d2352b4c2cf27ea7c8575b12" +checksum = "996dc019acb78b91b4e0c1bd6fa2cd509a835d309de762dc15213b97eac399da" dependencies = [ - "heck", + "heck 0.5.0", + "inventory", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.72", ] [[package]] name = "rustler_sys" -version = "2.3.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a7c0740e5322b64e2b952d8f0edce5f90fcf6f6fe74cca3f6e78eb3de5ea858" +checksum = "3914a75a147934353c3772a77b774c79fdf80ba84e8347f52a50df0c164aaff2" dependencies = [ "regex", "unreachable", @@ -485,37 +480,38 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.72", ] [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -539,9 +535,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", @@ -550,35 +546,34 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", "rustix", - "windows-sys 0.52.0", + "windows-sys", ] [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.72", ] [[package]] @@ -614,134 +609,75 @@ dependencies = [ "rustix", ] -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets", ] [[package]] name = "windows-targets" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" -dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] -name = "windows_i686_gnu" -version = "0.52.0" +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/native/Cargo.toml b/native/Cargo.toml new file mode 100644 index 00000000..43a9c690 --- /dev/null +++ b/native/Cargo.toml @@ -0,0 +1,5 @@ +[workspace] +resolver = "2" +members = [ + "pgparser" +] diff --git a/native/pgparser/.gitignore b/native/pgparser/.gitignore deleted file mode 100644 index ea8c4bf7..00000000 --- a/native/pgparser/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/target diff --git a/native/pgparser/Cargo.toml b/native/pgparser/Cargo.toml index a0e0c61e..f4d5923c 100644 --- a/native/pgparser/Cargo.toml +++ b/native/pgparser/Cargo.toml @@ -9,5 +9,5 @@ path = "src/lib.rs" crate-type = ["cdylib"] [dependencies] -rustler = "0.30.0" +rustler = "0.34.0" pg_query = "5.1.0" diff --git a/native/pgparser/src/lib.rs b/native/pgparser/src/lib.rs index 83f2d795..18c3aee2 100644 --- a/native/pgparser/src/lib.rs +++ b/native/pgparser/src/lib.rs @@ -1,28 +1,13 @@ -use rustler::{Atom, Error as RustlerError, NifTuple}; - -mod atoms { - rustler::atoms! { - ok, - error, - } -} - -#[derive(NifTuple)] -struct Response { - status: Atom, - message: Vec -} - #[rustler::nif] -fn statement_types(query: &str) -> Result { - let result = pg_query::parse(&query); - - if let Ok(result) = result { - let message = result.statement_types().into_iter().map(|s| s.to_string()).collect(); - return Ok(Response{status: atoms::ok(), message}); - } else { - return Err(RustlerError::Term(Box::new("Error parsing query"))); - } +fn statement_types(query: &str) -> Result, String> { + let result = pg_query::parse(query).map_err(|_| "Error parsing query")?; + + let message = result + .statement_types() + .into_iter() + .map(Into::into) + .collect(); + Ok(message) } -rustler::init!("Elixir.Supavisor.PgParser", [statement_types]); +rustler::init!("Elixir.Supavisor.PgParser"); diff --git a/nix/package.nix b/nix/package.nix new file mode 100644 index 00000000..3bc4ae00 --- /dev/null +++ b/nix/package.nix @@ -0,0 +1,51 @@ +{ + fetchMixDeps, + mixRelease, + cargo, + rustPlatform, + lib, + stdenv, + darwin, + protobuf, + libiconv, +}: let + pname = "supavisor"; + version = "0.0.1"; + src = ./..; + + mixFodDeps = fetchMixDeps { + pname = "mix-deps-${pname}"; + inherit src version; + hash = "sha256-vTBDNIZ6Pp23u70f8oTe3nbpReCEDPf6VuWNLdkWwq4="; + }; + + cargoDeps = rustPlatform.importCargoLock { + lockFile = ../native/pgparser/Cargo.lock; + }; +in + mixRelease { + inherit pname version src mixFodDeps; + + nativeBuildInputs = [cargo protobuf]; + + buildInputs = lib.optionals stdenv.isDarwin (with darwin.apple_sdk; [ + libiconv + frameworks.System + frameworks.CoreFoundation + frameworks.CoreServices + frameworks.DiskArbitration + frameworks.IOKit + frameworks.CFNetwork + frameworks.Security + libs.libDER + ]); + + preConfigure = '' + cat ${cargoDeps}/.cargo/config >> native/pgparser/.cargo/config.toml + ln -s ${cargoDeps} native/pgparser/cargo-vendor-dir + ''; + + meta = { + mainProgram = "supavisor"; + }; + } diff --git a/priv/repo/migrations/20240822132419_add_availability_zone.exs b/priv/repo/migrations/20240822132419_add_availability_zone.exs new file mode 100644 index 00000000..bda994a2 --- /dev/null +++ b/priv/repo/migrations/20240822132419_add_availability_zone.exs @@ -0,0 +1,9 @@ +defmodule Supavisor.Repo.Migrations.AddAwsZone do + use Ecto.Migration + + def change do + alter table("tenants", prefix: "_supavisor") do + add(:availability_zone, :string) + end + end +end diff --git a/priv/repo/seeds_after_migration.exs b/priv/repo/seeds_after_migration.exs index 3a99bfe1..424382b0 100644 --- a/priv/repo/seeds_after_migration.exs +++ b/priv/repo/seeds_after_migration.exs @@ -39,7 +39,7 @@ if !Tenants.get_tenant_by_external_id("is_manager") do |> Tenants.create_tenant() end -["proxy_tenant1", "syn_tenant", "prom_tenant"] +["proxy_tenant1", "syn_tenant", "prom_tenant", "max_pool_tenant"] |> Enum.each(fn tenant -> if !Tenants.get_tenant_by_external_id(tenant) do %{ @@ -54,6 +54,7 @@ end "db_user" => db_conf[:username], "db_password" => db_conf[:password], "pool_size" => 9, + "max_clients" => 100, "mode_type" => "transaction" }, %{ @@ -61,6 +62,7 @@ end "db_user" => db_conf[:username], "db_password" => db_conf[:password], "pool_size" => 3, + "max_clients" => 100, "mode_type" => "transaction" }, %{ @@ -69,13 +71,14 @@ end "db_password" => db_conf[:password], "pool_size" => 1, "mode_type" => "session", + "max_clients" => 100, "pool_checkout_timeout" => 500 }, %{ "db_user_alias" => "max_clients", "db_user" => db_conf[:username], "db_password" => db_conf[:password], - "pool_size" => 2, + "pool_size" => 1, "max_clients" => -1, "mode_type" => "transaction", "pool_checkout_timeout" => 500 diff --git a/test/integration/external_test.exs b/test/integration/external_test.exs new file mode 100644 index 00000000..5601acca --- /dev/null +++ b/test/integration/external_test.exs @@ -0,0 +1,125 @@ +defmodule Supavisor.Integration.ExternalTest do + use ExUnit.Case, async: false + + @moduletag integration: true + + setup_all do + npm = + get_tool("yarn") || get_tool("npm") || get_tool("bun") || + raise "Cannot find neither Yarn nor NPM" + + assert {_, 0} = System.cmd(npm, ~w[install], cd: suite("js")) + + {:ok, npm: npm} + end + + setup :external_id + + setup ctx do + if get_tool(ctx.runtime) do + :ok + else + raise "Runtime not available" + end + end + + describe "Postgres.js" do + @describetag library: "postgres.js", suite: "js" + + @tag runtime: "node", mode: "session" + test "Node session", ctx do + assert_run(ctx, ~w[postgres/index.js]) + end + + @tag runtime: "node", mode: "transaction" + test "Node transaction", ctx do + assert_run(ctx, ~w[postgres/index.js]) + end + + # These currently do not pass + # @tag runtime: "bun", mode: "session" + # test "Bun session", ctx do + # assert_run ctx, ~w[postgres/index.js], suite: "js" + # end + # + # @tag runtime: "bun", mode: "transaction" + # test "Bun transaction", ctx do + # assert_run ctx, ~w[postgres/index.js], suite: "js" + # end + # + # @tag runtime: "deno", mode: "session" + # test "Deno session", ctx do + # assert_run ctx, ~w[run --allow-all postgres/index.js], suite: "js" + # end + # + # @tag runtime: "deno", mode: "transaction" + # test "Deno transaction", ctx do + # assert_run ctx, ~w[run --allow-all postgres/index.js], suite: "js" + # end + end + + defp assert_run(ctx, args, opts \\ []) do + suite = suite(ctx.suite) + + env = + [ + {"PGMODE", ctx.mode}, + {"PGDATABASE", ctx.db}, + {"PGHOST", "localhost"}, + {"PGPORT", to_string(port(ctx.mode))}, + {"PGUSER", ctx.user}, + {"PGPASS", "postgres"} + ] ++ (opts[:env] || []) + + assert {output, code} = + System.cmd(ctx.runtime, args, + env: env, + cd: suite, + stderr_to_stdout: true + ) + + assert code == 0, output + end + + ## UTILS + + defp suite(name), do: Path.join(__DIR__, name) + + defp get_tool(name), do: System.find_executable(name) + + defp port("session"), do: Application.fetch_env!(:supavisor, :proxy_port_session) + defp port("transaction"), do: Application.fetch_env!(:supavisor, :proxy_port_transaction) + + defp external_id(ctx) do + external_id = + [ctx.runtime, ctx.library, ctx.mode] + |> Enum.map_join("_", &String.replace(&1, ~r/\W/, "")) + + # Ensure that there are no leftovers + _ = Supavisor.Tenants.delete_tenant_by_external_id(external_id) + + _ = Supavisor.Repo.query("DROP DATABASE IF EXISTS #{external_id}") + assert {:ok, _} = Supavisor.Repo.query("CREATE DATABASE #{external_id}") + + assert {:ok, tenant} = + Supavisor.Tenants.create_tenant(%{ + default_parameter_status: %{}, + db_host: "localhost", + db_port: 6432, + db_database: external_id, + auth_query: "SELECT rolname, rolpassword FROM pg_authid WHERE rolname=$1;", + external_id: external_id, + users: [ + %{ + "pool_size" => 15, + "db_user" => "postgres", + "db_password" => "postgres", + "is_manager" => true, + "mode_type" => "session" + } + ] + }) + + {:ok, user: "postgres.#{external_id}", db: tenant.db_database, external_id: external_id} + end +end diff --git a/test/integration/js/.gitignore b/test/integration/js/.gitignore new file mode 100644 index 00000000..07e6e472 --- /dev/null +++ b/test/integration/js/.gitignore @@ -0,0 +1 @@ +/node_modules diff --git a/test/integration/js/package.json b/test/integration/js/package.json new file mode 100644 index 00000000..4a83eaaa --- /dev/null +++ b/test/integration/js/package.json @@ -0,0 +1,13 @@ +{ + "name": "supavisor-integration", + "version": "1.0.0", + "main": "index.js", + "type": "module", + "license": "MIT", + "scripts": { + "test:postgres": "node ./postgres/index.js" + }, + "dependencies": { + "postgres": "^3.4.5" + } +} diff --git a/test/integration/js/postgres/copy.csv b/test/integration/js/postgres/copy.csv new file mode 100644 index 00000000..6622044e --- /dev/null +++ b/test/integration/js/postgres/copy.csv @@ -0,0 +1,2 @@ +1 2 3 +4 5 6 diff --git a/test/integration/js/postgres/index.js b/test/integration/js/postgres/index.js new file mode 100644 index 00000000..4d56013c --- /dev/null +++ b/test/integration/js/postgres/index.js @@ -0,0 +1,2612 @@ +import { t, nt, ot } from './test.js' // eslint-disable-line +import net from 'node:net' +import fs from 'node:fs' +import crypto from 'node:crypto' + +import postgres from 'postgres' +const delay = ms => new Promise(r => setTimeout(r, ms)) + +const rel = x => new URL(x, import.meta.url) +const idle_timeout = t.timeout + +const login = { + user: process.env.PGUSER, + pass: process.env.PGPASS, +} + +//const login_md5 = { +// user: 'postgres_js_test_md5', +// pass: 'postgres_js_test_md5' +//} +// +//const login_scram = { +// user: 'postgres_js_test_scram', +// pass: 'postgres_js_test_scram' +//} + +const options = { + host: process.env.PGHOST, + port: process.env.PGPORT, + db: process.env.PGDATABASE, + prepare: (process.env.PGMODE != 'transaction'), + user: login.user, + pass: login.pass, + idle_timeout, + connect_timeout: t.timeout, + max: 1 +} + +const sql = postgres(options) + +await sql`DROP TABLE IF EXISTS test`; + +//t('Connects with no options', async() => { +// const sql = postgres({ max: 1 }) +// +// const result = (await sql`select 1 as x`)[0].x +// await sql.end() +// +// return [1, result] +//}) + +//t('Uses default database without slash', async() => { +// const sql = postgres('postgres://localhost') +// return [sql.options.user, sql.options.database] +//}) +// +//t('Uses default database with slash', async() => { +// const sql = postgres('postgres://localhost/') +// return [sql.options.user, sql.options.database] +//}) + +t('Result is array', async() => + [true, Array.isArray(await sql`select 1`)] +) + +t('Result has count', async() => + [1, (await sql`select 1`).count] +) + +t('Result has command', async() => + ['SELECT', (await sql`select 1`).command] +) + +t('Create table', async() => + ['CREATE TABLE', (await sql`create table test(int int)`).command, await sql`drop table test`] +) + +t('Drop table', { timeout: t.timeout * 2 }, async() => { + await sql`create table test(int int)` + return ['DROP TABLE', (await sql`drop table test`).command] +}) + +t('null', async() => + [null, (await sql`select ${ null } as x`)[0].x] +) + +t('Integer', async() => + ['1', (await sql`select ${ 1 } as x`)[0].x] +) + +t('String', async() => + ['hello', (await sql`select ${ 'hello' } as x`)[0].x] +) + +t('Boolean false', async() => + [false, (await sql`select ${ false } as x`)[0].x] +) + +t('Boolean true', async() => + [true, (await sql`select ${ true } as x`)[0].x] +) + +t('Date', async() => { + const now = new Date() + return [0, now - (await sql`select ${ now } as x`)[0].x] +}) + +t('Json', async() => { + const x = (await sql`select ${ sql.json({ a: 'hello', b: 42 }) } as x`)[0].x + return ['hello,42', [x.a, x.b].join()] +}) + +t('implicit json', async() => { + const x = (await sql`select ${ { a: 'hello', b: 42 } }::json as x`)[0].x + return ['hello,42', [x.a, x.b].join()] +}) + +t('implicit jsonb', async() => { + const x = (await sql`select ${ { a: 'hello', b: 42 } }::jsonb as x`)[0].x + return ['hello,42', [x.a, x.b].join()] +}) + +t('Empty array', async() => + [true, Array.isArray((await sql`select ${ sql.array([], 1009) } as x`)[0].x)] +) + +t('String array', async() => + ['123', (await sql`select ${ '{1,2,3}' }::int[] as x`)[0].x.join('')] +) + +t('Array of Integer', async() => + ['3', (await sql`select ${ sql.array([1, 2, 3]) } as x`)[0].x[2]] +) + +t('Array of String', async() => + ['c', (await sql`select ${ sql.array(['a', 'b', 'c']) } as x`)[0].x[2]] +) + +t('Array of Date', async() => { + const now = new Date() + return [now.getTime(), (await sql`select ${ sql.array([now, now, now]) } as x`)[0].x[2].getTime()] +}) + +t('Array of Box', async() => [ + '(3,4),(1,2);(6,7),(4,5)', + (await sql`select ${ '{(1,2),(3,4);(4,5),(6,7)}' }::box[] as x`)[0].x.join(';') +]) + +t('Nested array n2', async() => + ['4', (await sql`select ${ sql.array([[1, 2], [3, 4]]) } as x`)[0].x[1][1]] +) + +t('Nested array n3', async() => + ['6', (await sql`select ${ sql.array([[[1, 2]], [[3, 4]], [[5, 6]]]) } as x`)[0].x[2][0][1]] +) + +t('Escape in arrays', async() => + ['Hello "you",c:\\windows', (await sql`select ${ sql.array(['Hello "you"', 'c:\\windows']) } as x`)[0].x.join(',')] +) + +t('Escapes', async() => { + return ['hej"hej', Object.keys((await sql`select 1 as ${ sql('hej"hej') }`)[0])[0]] +}) + +t('null for int', async() => { + await sql`create table test (x int)` + return [1, (await sql`insert into test values(${ null })`).count, await sql`drop table test`] +}) + +t('Throws on illegal transactions', async() => { + const sql = postgres({ ...options, max: 2, fetch_types: false }) + const error = await sql`begin`.catch(e => e) + return [ + error.code, + 'UNSAFE_TRANSACTION' + ] +}) + +t('Transaction throws', async() => { + await sql`create table test (a int)` + return ['22P02', await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql`insert into test values('hej')` + }).catch(x => x.code), await sql`drop table test`] +}) + +t('Transaction rolls back', async() => { + await sql`create table test (a int)` + await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql`insert into test values('hej')` + }).catch(() => { /* ignore */ }) + return [0, (await sql`select a from test`).count, await sql`drop table test`] +}) + +t('Transaction throws on uncaught savepoint', async() => { + await sql`create table test (a int)` + + return ['fail', (await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql.savepoint(async sql => { + await sql`insert into test values(2)` + throw new Error('fail') + }) + }).catch((err) => err.message)), await sql`drop table test`] +}) + +t('Transaction throws on uncaught named savepoint', async() => { + await sql`create table test (a int)` + + return ['fail', (await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql.savepoit('watpoint', async sql => { + await sql`insert into test values(2)` + throw new Error('fail') + }) + }).catch(() => 'fail')), await sql`drop table test`] +}) + +t('Transaction succeeds on caught savepoint', async() => { + await sql`create table test (a int)` + await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql.savepoint(async sql => { + await sql`insert into test values(2)` + throw new Error('please rollback') + }).catch(() => { /* ignore */ }) + await sql`insert into test values(3)` + }) + + return ['2', (await sql`select count(1) from test`)[0].count, await sql`drop table test`] +}) + +t('Savepoint returns Result', async() => { + let result + await sql.begin(async sql => { + result = await sql.savepoint(sql => + sql`select 1 as x` + ) + }) + + return [1, result[0].x] +}) + +// Reason: disabled because of security reasons +// +//t('Prepared transaction', async() => { +// await sql`create table test (a int)` +// +// await sql.begin(async sql => { +// await sql`insert into test values(1)` +// await sql.prepare('tx1') +// }) +// +// await sql`commit prepared 'tx1'` +// +// return ['1', (await sql`select count(1) from test`)[0].count, await sql`drop table test`] +//}) + +t('Transaction requests are executed implicitly', async() => { + const sql = postgres({ ...options, debug: true, idle_timeout: 1, fetch_types: false }) + return [ + 'testing', + (await sql.begin(sql => [ + sql`select set_config('postgres_js.test', 'testing', true)`, + sql`select current_setting('postgres_js.test') as x` + ]))[1][0].x + ] +}) + +t('Uncaught transaction request errors bubbles to transaction', async() => [ + '42703', + (await sql.begin(sql => [ + sql`select wat`, + sql`select current_setting('postgres_js.test') as x, ${ 1 } as a` + ]).catch(e => e.code)) +]) + +t('Fragments in transactions', async() => [ + true, + (await sql.begin(sql => sql`select true as x where ${ sql`1=1` }`))[0].x +]) + +t('Transaction rejects with rethrown error', async() => [ + 'WAT', + await sql.begin(async sql => { + try { + await sql`select exception` + } catch (ex) { + throw new Error('WAT') + } + }).catch(e => e.message) +]) + +t('Parallel transactions', async() => { + await sql`create table test (a int)` + return ['11', (await Promise.all([ + sql.begin(sql => sql`select 1`), + sql.begin(sql => sql`select 1`) + ])).map(x => x.count).join(''), await sql`drop table test`] +}) + +t('Many transactions at beginning of connection', async() => { + const sql = postgres(options) + const xs = await Promise.all(Array.from({ length: 100 }, () => sql.begin(sql => sql`select 1`))) + return [100, xs.length] +}) + +t('Transactions array', async() => { + await sql`create table test (a int)` + + return ['11', (await sql.begin(sql => [ + sql`select 1`.then(x => x), + sql`select 1` + ])).map(x => x.count).join(''), await sql`drop table test`] +}) + +t('Transaction waits', async() => { + await sql`create table test (a int)` + await sql.begin(async sql => { + await sql`insert into test values(1)` + await sql.savepoint(async sql => { + await sql`insert into test values(2)` + throw new Error('please rollback') + }).catch(() => { /* ignore */ }) + await sql`insert into test values(3)` + }) + + return ['11', (await Promise.all([ + sql.begin(sql => sql`select 1`), + sql.begin(sql => sql`select 1`) + ])).map(x => x.count).join(''), await sql`drop table test`] +}) + +t('Helpers in Transaction', async() => { + return ['1', (await sql.begin(async sql => + await sql`select ${ sql({ x: 1 }) }` + ))[0].x] +}) + +t('Undefined values throws', async() => { + let error + + await sql` + select ${ undefined } as x + `.catch(x => error = x.code) + + return ['UNDEFINED_VALUE', error] +}) + +t('Transform undefined', async() => { + const sql = postgres({ ...options, transform: { undefined: null } }) + return [null, (await sql`select ${ undefined } as x`)[0].x] +}) + +t('Transform undefined in array', async() => { + const sql = postgres({ ...options, transform: { undefined: null } }) + return [null, (await sql`select * from (values ${ sql([undefined, undefined]) }) as x(x, y)`)[0].y] +}) + +t('Null sets to null', async() => + [null, (await sql`select ${ null } as x`)[0].x] +) + +t('Throw syntax error', async() => + ['42601', (await sql`wat 1`.catch(x => x)).code] +) + +t('Connect using uri', async() => + [true, await new Promise((resolve, reject) => { + const sql = postgres(`postgres://${login.user}:${login.pass}@${options.host}:${options.port}/${options.db}`, { + idle_timeout + }) + sql`select 1`.then(() => resolve(true), reject) + })] +) + +t('Options from uri with special characters in user and pass', async() => { + const opt = postgres({ user: 'öla', pass: 'pass^word' }).options + return [[opt.user, opt.pass].toString(), 'öla,pass^word'] +}) + +t('Fail with proper error on no host', async() => + ['ECONNREFUSED', (await new Promise((resolve, reject) => { + const sql = postgres('postgres://localhost:33333/' + options.db, { + idle_timeout + }) + sql`select 1`.then(reject, resolve) + })).code] +) + +// REASON: No SSL in local testing (so far) +//t('Connect using SSL', async() => +// [true, (await new Promise((resolve, reject) => { +// postgres({ +// ssl: { rejectUnauthorized: false }, +// idle_timeout +// })`select 1`.then(() => resolve(true), reject) +// }))] +//) +// +//t('Connect using SSL require', async() => +// [true, (await new Promise((resolve, reject) => { +// postgres({ +// ssl: 'require', +// idle_timeout +// })`select 1`.then(() => resolve(true), reject) +// }))] +//) +// +//t('Connect using SSL prefer', async() => { +// await exec('psql', ['-c', 'alter system set ssl=off']) +// await exec('psql', ['-c', 'select pg_reload_conf()']) +// +// const sql = postgres({ +// ssl: 'prefer', +// idle_timeout +// }) +// +// return [ +// 1, (await sql`select 1 as x`)[0].x, +// await exec('psql', ['-c', 'alter system set ssl=on']), +// await exec('psql', ['-c', 'select pg_reload_conf()']) +// ] +//}) +// +//t('Reconnect using SSL', { timeout: 2 }, async() => { +// const sql = postgres({ +// ssl: 'require', +// idle_timeout: 0.1 +// }) +// +// await sql`select 1` +// await delay(200) +// +// return [1, (await sql`select 1 as x`)[0].x] +//}) +// +//t('Login without password', async() => { +// return [true, (await postgres({ ...options, ...login })`select true as x`)[0].x] +//}) + +// Reason: No MD5 +//t('Login using MD5', async() => { +// return [true, (await postgres({ ...options, ...login_md5 })`select true as x`)[0].x] +//}) +// +//t('Login using scram-sha-256', async() => { +// return [true, (await postgres({ ...options, ...login_scram })`select true as x`)[0].x] +//}) + +// Reason: No tests for SCRAM (for now) +//t('Parallel connections using scram-sha-256', { +// timeout: 2 +//}, async() => { +// const sql = postgres({ ...options, ...login_scram }) +// return [true, (await Promise.all([ +// sql`select true as x, pg_sleep(0.01)`, +// sql`select true as x, pg_sleep(0.01)`, +// sql`select true as x, pg_sleep(0.01)` +// ]))[0][0].x] +//}) +// +//t('Support dynamic password function', async() => { +// return [true, (await postgres({ +// ...options, +// ...login_scram, +// pass: () => 'postgres_js_test_scram' +// })`select true as x`)[0].x] +//}) +// +//t('Support dynamic async password function', async() => { +// return [true, (await postgres({ +// ...options, +// ...login_scram, +// pass: () => Promise.resolve('postgres_js_test_scram') +// })`select true as x`)[0].x] +//}) + +t('Point type', async() => { + const sql = postgres({ + ...options, + types: { + point: { + to: 600, + from: [600], + serialize: ([x, y]) => '(' + x + ',' + y + ')', + parse: (x) => x.slice(1, -1).split(',').map(x => +x) + } + } + }) + + await sql`create table test (x point)` + await sql`insert into test (x) values (${ sql.types.point([10, 20]) })` + return [20, (await sql`select x from test`)[0].x[1], await sql`drop table test`] +}) + +t('Point type array', async() => { + const sql = postgres({ + ...options, + types: { + point: { + to: 600, + from: [600], + serialize: ([x, y]) => '(' + x + ',' + y + ')', + parse: (x) => x.slice(1, -1).split(',').map(x => +x) + } + } + }) + + await sql`create table test (x point[])` + await sql`insert into test (x) values (${ sql.array([sql.types.point([10, 20]), sql.types.point([20, 30])]) })` + return [30, (await sql`select x from test`)[0].x[1][1], await sql`drop table test`] +}) + +t('sql file', async() => + [1, (await sql.file(rel('select.sql')))[0].x] +) + +t('sql file has forEach', async() => { + let result + await sql + .file(rel('select.sql'), { cache: false }) + .forEach(({ x }) => result = x) + + return [1, result] +}) + +t('sql file throws', async() => + ['ENOENT', (await sql.file(rel('selectomondo.sql')).catch(x => x.code))] +) + +t('sql file cached', async() => { + await sql.file(rel('select.sql')) + await delay(20) + + return [1, (await sql.file(rel('select.sql')))[0].x] +}) + +t('Parameters in file', async() => { + const result = await sql.file( + rel('select-param.sql'), + ['hello'] + ) + return ['hello', result[0].x] +}) + +t('Connection ended promise', async() => { + const sql = postgres(options) + + await sql.end() + + return [undefined, await sql.end()] +}) + +t('Connection ended timeout', async() => { + const sql = postgres(options) + + await sql.end({ timeout: 10 }) + + return [undefined, await sql.end()] +}) + +t('Connection ended error', async() => { + const sql = postgres(options) + await sql.end() + return ['CONNECTION_ENDED', (await sql``.catch(x => x.code))] +}) + +t('Connection end does not cancel query', async() => { + const sql = postgres(options) + + const promise = sql`select 1 as x`.execute() + + await sql.end() + + return [1, (await promise)[0].x] +}) + +t('Connection destroyed', async() => { + const sql = postgres(options) + process.nextTick(() => sql.end({ timeout: 0 })) + return ['CONNECTION_DESTROYED', await sql``.catch(x => x.code)] +}) + +t('Connection destroyed with query before', async() => { + const sql = postgres(options) + , error = sql`select pg_sleep(0.2)`.catch(err => err.code) + + sql.end({ timeout: 0 }) + return ['CONNECTION_DESTROYED', await error] +}) + +t('transform column', async() => { + const sql = postgres({ + ...options, + transform: { column: x => x.split('').reverse().join('') } + }) + + await sql`create table test (hello_world int)` + await sql`insert into test values (1)` + return ['dlrow_olleh', Object.keys((await sql`select * from test`)[0])[0], await sql`drop table test`] +}) + +t('column toPascal', async() => { + const sql = postgres({ + ...options, + transform: { column: postgres.toPascal } + }) + + await sql`create table test (hello_world int)` + await sql`insert into test values (1)` + return ['HelloWorld', Object.keys((await sql`select * from test`)[0])[0], await sql`drop table test`] +}) + +t('column toCamel', async() => { + const sql = postgres({ + ...options, + transform: { column: postgres.toCamel } + }) + + await sql`create table test (hello_world int)` + await sql`insert into test values (1)` + return ['helloWorld', Object.keys((await sql`select * from test`)[0])[0], await sql`drop table test`] +}) + +t('column toKebab', async() => { + const sql = postgres({ + ...options, + transform: { column: postgres.toKebab } + }) + + await sql`create table test (hello_world int)` + await sql`insert into test values (1)` + return ['hello-world', Object.keys((await sql`select * from test`)[0])[0], await sql`drop table test`] +}) + +t('Transform nested json in arrays', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + return ['aBcD', (await sql`select '[{"a_b":1},{"c_d":2}]'::jsonb as x`)[0].x.map(Object.keys).join('')] +}) + +t('Transform deeply nested json object in arrays', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + return [ + 'childObj_deeplyNestedObj_grandchildObj', + (await sql` + select '[{"nested_obj": {"child_obj": 2, "deeply_nested_obj": {"grandchild_obj": 3}}}]'::jsonb as x + `)[0].x.map(x => { + let result + for (const key in x) + result = [...Object.keys(x[key]), ...Object.keys(x[key].deeplyNestedObj)] + return result + })[0] + .join('_') + ] +}) + +t('Transform deeply nested json array in arrays', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + return [ + 'childArray_deeplyNestedArray_grandchildArray', + (await sql` + select '[{"nested_array": [{"child_array": 2, "deeply_nested_array": [{"grandchild_array":3}]}]}]'::jsonb AS x + `)[0].x.map((x) => { + let result + for (const key in x) + result = [...Object.keys(x[key][0]), ...Object.keys(x[key][0].deeplyNestedArray[0])] + return result + })[0] + .join('_') + ] +}) + +t('Bypass transform for json primitive', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + + const x = ( + await sql`select 'null'::json as a, 'false'::json as b, '"a"'::json as c, '1'::json as d` + )[0] + + return [ + JSON.stringify({ a: null, b: false, c: 'a', d: 1 }), + JSON.stringify(x) + ] +}) + +t('Bypass transform for jsonb primitive', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + + const x = ( + await sql`select 'null'::jsonb as a, 'false'::jsonb as b, '"a"'::jsonb as c, '1'::jsonb as d` + )[0] + + return [ + JSON.stringify({ a: null, b: false, c: 'a', d: 1 }), + JSON.stringify(x) + ] +}) + +t('unsafe', async() => { + await sql`create table test (x int)` + return [1, (await sql.unsafe('insert into test values ($1) returning *', [1]))[0].x, await sql`drop table test`] +}) + +t('unsafe simple', async() => { + return [1, (await sql.unsafe('select 1 as x'))[0].x] +}) + +t('unsafe simple includes columns', async() => { + return ['x', (await sql.unsafe('select 1 as x').values()).columns[0].name] +}) + +t('unsafe describe', async() => { + const q = 'insert into test values (1)' + await sql`create table test(a int unique)` + await sql.unsafe(q).describe() + const x = await sql.unsafe(q).describe() + return [ + q, + x.string, + await sql`drop table test` + ] +}) + +t('simple query using unsafe with multiple statements', async() => { + return [ + '1,2', + (await sql.unsafe('select 1 as x;select 2 as x')).map(x => x[0].x).join() + ] +}) + +t('simple query using simple() with multiple statements', async() => { + return [ + '1,2', + (await sql`select 1 as x;select 2 as x`.simple()).map(x => x[0].x).join() + ] +}) + +if (options.prepare) { + t('listen and notify', async() => { + const sql = postgres(options) + const channel = 'hello' + const result = await new Promise(async r => { + await sql.listen(channel, r) + sql.notify(channel, 'works') + }) + + return [ + 'works', + result, + sql.end() + ] + }) + + t('double listen', async() => { + const sql = postgres(options) + , channel = 'hello' + + let count = 0 + + await new Promise((resolve, reject) => + sql.listen(channel, resolve) + .then(() => sql.notify(channel, 'world')) + .catch(reject) + ).then(() => count++) + + await new Promise((resolve, reject) => + sql.listen(channel, resolve) + .then(() => sql.notify(channel, 'world')) + .catch(reject) + ).then(() => count++) + + // for coverage + sql.listen('weee', () => { /* noop */ }).then(sql.end) + + return [2, count] + }) + +// Reason: No LISTEN/NOTIFY +//t('multiple listeners work after a reconnect', async() => { +// const sql = postgres(options) +// , xs = [] +// +// const s1 = await sql.listen('test', x => xs.push('1', x)) +// await sql.listen('test', x => xs.push('2', x)) +// await sql.notify('test', 'a') +// await delay(50) +// await sql`select pg_terminate_backend(${ s1.state.pid })` +// await delay(200) +// await sql.notify('test', 'b') +// await delay(50) +// sql.end() +// +// return ['1a2a1b2b', xs.join('')] +//}) +// +//t('listen and notify with weird name', async() => { +// const sql = postgres(options) +// const channel = 'wat-;.ø.§' +// const result = await new Promise(async r => { +// const { unlisten } = await sql.listen(channel, r) +// sql.notify(channel, 'works') +// await delay(50) +// await unlisten() +// }) +// +// return [ +// 'works', +// result, +// sql.end() +// ] +//}) +// +//t('listen and notify with upper case', async() => { +// const sql = postgres(options) +// const channel = 'withUpperChar' +// const result = await new Promise(async r => { +// await sql.listen(channel, r) +// sql.notify(channel, 'works') +// }) +// +// return [ +// 'works', +// result, +// sql.end() +// ] +//}) +// +//t('listen reconnects', { timeout: 2 }, async() => { +// const sql = postgres(options) +// , resolvers = {} +// , a = new Promise(r => resolvers.a = r) +// , b = new Promise(r => resolvers.b = r) +// +// let connects = 0 +// +// const { state: { pid } } = await sql.listen( +// 'test', +// x => x in resolvers && resolvers[x](), +// () => connects++ +// ) +// await sql.notify('test', 'a') +// await a +// await sql`select pg_terminate_backend(${ pid })` +// await delay(100) +// await sql.notify('test', 'b') +// await b +// sql.end() +// return [connects, 2] +//}) +// +//t('listen result reports correct connection state after reconnection', async() => { +// const sql = postgres(options) +// , xs = [] +// +// const result = await sql.listen('test', x => xs.push(x)) +// const initialPid = result.state.pid +// await sql.notify('test', 'a') +// await sql`select pg_terminate_backend(${ initialPid })` +// await delay(50) +// sql.end() +// +// return [result.state.pid !== initialPid, true] +//}) +// +//t('unlisten removes subscription', async() => { +// const sql = postgres(options) +// , xs = [] +// +// const { unlisten } = await sql.listen('test', x => xs.push(x)) +// await sql.notify('test', 'a') +// await delay(50) +// await unlisten() +// await sql.notify('test', 'b') +// await delay(50) +// sql.end() +// +// return ['a', xs.join('')] +//}) +// +//t('listen after unlisten', async() => { +// const sql = postgres(options) +// , xs = [] +// +// const { unlisten } = await sql.listen('test', x => xs.push(x)) +// await sql.notify('test', 'a') +// await delay(50) +// await unlisten() +// await sql.notify('test', 'b') +// await delay(50) +// await sql.listen('test', x => xs.push(x)) +// await sql.notify('test', 'c') +// await delay(50) +// sql.end() +// +// return ['ac', xs.join('')] +//}) +// +//t('multiple listeners and unlisten one', async() => { +// const sql = postgres(options) +// , xs = [] +// +// await sql.listen('test', x => xs.push('1', x)) +// const s2 = await sql.listen('test', x => xs.push('2', x)) +// await sql.notify('test', 'a') +// await delay(50) +// await s2.unlisten() +// await sql.notify('test', 'b') +// await delay(50) +// sql.end() +// +// return ['1a2a1b', xs.join('')] +//}) +} + +// Reason: We alter these parameters for PSQL, so it will not work as expected +//t('responds with server parameters (application_name)', async() => +// ['postgres.js', await new Promise((resolve, reject) => postgres({ +// ...options, +// onparameter: (k, v) => k === 'application_name' && resolve(v) +// })`select 1`.catch(reject))] +//) +// +//t('has server parameters', async() => { +// return ['postgres.js', (await sql`select 1`.then(() => sql.parameters.application_name))] +//}) + +t('big query body', { timeout: t.timeout * 2 }, async() => { + const size = 50000 + await sql`create table test (x int)` + return [size, (await sql`insert into test ${ + sql([...Array(size).keys()].map(x => ({ x }))) + }`).count, await sql`drop table test`] +}) + +// Reason: This tests checks internalt of the library, not the DB stuff +//ot('Throws if more than 65534 parameters', {timeout: t.timeout * 2}, async() => { +// await sql`create table test (x int) -- barfoo ` +// return ['MAX_PARAMETERS_EXCEEDED', (await sql`insert into test ${ +// sql([...Array(65535).keys()].map(x => ({ x }))) +// }`.catch(e => (console.debug(e.code), e.code)))] //, await sql`drop table test -- foobar`] +//}) + +t('let postgres do implicit cast of unknown types', async() => { + await sql`create table test (x timestamp with time zone)` + const [{ x }] = await sql`insert into test values (${ new Date().toISOString() }) returning *` + return [true, x instanceof Date, await sql`drop table test`] +}) + +t('only allows one statement', async() => + ['42601', await sql`select 1; select 2`.catch(e => e.code)] +) + +t('await sql() throws not tagged error', async() => { + let error + try { + await sql('select 1') + } catch (e) { + error = e.code + } + return ['NOT_TAGGED_CALL', error] +}) + +t('sql().then throws not tagged error', async() => { + let error + try { + sql('select 1').then(() => { /* noop */ }) + } catch (e) { + error = e.code + } + return ['NOT_TAGGED_CALL', error] +}) + +t('sql().catch throws not tagged error', async() => { + let error + try { + await sql('select 1') + } catch (e) { + error = e.code + } + return ['NOT_TAGGED_CALL', error] +}) + +t('sql().finally throws not tagged error', async() => { + let error + try { + sql('select 1').finally(() => { /* noop */ }) + } catch (e) { + error = e.code + } + return ['NOT_TAGGED_CALL', error] +}) + +t('little bobby tables', async() => { + const name = 'Robert\'); DROP TABLE students;--' + + await sql`create table students (name text, age int)` + await sql`insert into students (name) values (${ name })` + + return [ + name, (await sql`select name from students`)[0].name, + await sql`drop table students` + ] +}) + +t('Connection errors are caught using begin()', { + timeout: t.timeout * 2 +}, async() => { + let error + try { + const sql = postgres({ host: 'localhost', port: 1 }) + + await sql.begin(async(sql) => { + await sql`insert into test (label, value) values (${1}, ${2})` + }) + } catch (err) { + error = err + } + + return [ + true, + error.code === 'ECONNREFUSED' || + error.message === 'Connection refused (os error 61)' + ] +}) + +t('dynamic table name', async() => { + await sql`create table test(a int)` + return [ + 0, (await sql`select * from ${ sql('test') }`).count, + await sql`drop table test` + ] +}) + +t('dynamic schema name', async() => { + await sql`create table test(a int)` + return [ + 0, (await sql`select * from ${ sql('public') }.test`).count, + await sql`drop table test` + ] +}) + +t('dynamic schema and table name', async() => { + await sql`create table test(a int)` + return [ + 0, (await sql`select * from ${ sql('public.test') }`).count, + await sql`drop table test` + ] +}) + +t('dynamic column name', async() => { + return ['!not_valid', Object.keys((await sql`select 1 as ${ sql('!not_valid') }`)[0])[0]] +}) + +t('dynamic select as', async() => { + return ['2', (await sql`select ${ sql({ a: 1, b: 2 }) }`)[0].b] +}) + +t('dynamic select as pluck', async() => { + return [undefined, (await sql`select ${ sql({ a: 1, b: 2 }, 'a') }`)[0].b] +}) + +t('dynamic insert', async() => { + await sql`create table test (a int, b text)` + const x = { a: 42, b: 'the answer' } + + return ['the answer', (await sql`insert into test ${ sql(x) } returning *`)[0].b, await sql`drop table test`] +}) + +t('dynamic insert pluck', async() => { + await sql`create table test (a int, b text)` + const x = { a: 42, b: 'the answer' } + + return [null, (await sql`insert into test ${ sql(x, 'a') } returning *`)[0].b, await sql`drop table test`] +}) + +t('dynamic in with empty array', async() => { + await sql`create table test (a int)` + await sql`insert into test values (1)` + return [ + (await sql`select * from test where null in ${ sql([]) }`).count, + 0, + await sql`drop table test` + ] +}) + +t('dynamic in after insert', async() => { + await sql`create table test (a int, b text)` + const [{ x }] = await sql` + with x as ( + insert into test values (1, 'hej') + returning * + ) + select 1 in ${ sql([1, 2, 3]) } as x from x + ` + return [ + true, x, + await sql`drop table test` + ] +}) + +t('array insert', async() => { + await sql`create table test (a int, b int)` + return [2, (await sql`insert into test (a, b) values ${ sql([1, 2]) } returning *`)[0].b, await sql`drop table test`] +}) + +t('where parameters in()', async() => { + await sql`create table test (x text)` + await sql`insert into test values ('a')` + return [ + (await sql`select * from test where x in ${ sql(['a', 'b', 'c']) }`)[0].x, + 'a', + await sql`drop table test` + ] +}) + +t('where parameters in() values before', async() => { + return [2, (await sql` + with rows as ( + select * from (values (1), (2), (3), (4)) as x(a) + ) + select * from rows where a in ${ sql([3, 4]) } + `).count] +}) + +t('dynamic multi row insert', async() => { + await sql`create table test (a int, b text)` + const x = { a: 42, b: 'the answer' } + + return [ + 'the answer', + (await sql`insert into test ${ sql([x, x]) } returning *`)[1].b, await sql`drop table test` + ] +}) + +t('dynamic update', async() => { + await sql`create table test (a int, b text)` + await sql`insert into test (a, b) values (17, 'wrong')` + + return [ + 'the answer', + (await sql`update test set ${ sql({ a: 42, b: 'the answer' }) } returning *`)[0].b, await sql`drop table test` + ] +}) + +t('dynamic update pluck', async() => { + await sql`create table test (a int, b text)` + await sql`insert into test (a, b) values (17, 'wrong')` + + return [ + 'wrong', + (await sql`update test set ${ sql({ a: 42, b: 'the answer' }, 'a') } returning *`)[0].b, await sql`drop table test` + ] +}) + +t('dynamic select array', async() => { + await sql`create table test (a int, b text)` + await sql`insert into test (a, b) values (42, 'yay')` + return ['yay', (await sql`select ${ sql(['a', 'b']) } from test`)[0].b, await sql`drop table test`] +}) + +t('dynamic returning array', async() => { + await sql`create table test (a int, b text)` + return [ + 'yay', + (await sql`insert into test (a, b) values (42, 'yay') returning ${ sql(['a', 'b']) }`)[0].b, + await sql`drop table test` + ] +}) + +t('dynamic select args', async() => { + await sql`create table test (a int, b text)` + await sql`insert into test (a, b) values (42, 'yay')` + return ['yay', (await sql`select ${ sql('a', 'b') } from test`)[0].b, await sql`drop table test`] +}) + +t('dynamic values single row', async() => { + const [{ b }] = await sql` + select * from (values ${ sql(['a', 'b', 'c']) }) as x(a, b, c) + ` + + return ['b', b] +}) + +t('dynamic values multi row', async() => { + const [, { b }] = await sql` + select * from (values ${ sql([['a', 'b', 'c'], ['a', 'b', 'c']]) }) as x(a, b, c) + ` + + return ['b', b] +}) + +// Reason: we do not support custom connection parameters +//t('connection parameters', async() => { +// const sql = postgres({ +// ...options, +// connection: { +// 'some.var': 'yay' +// } +// }) +// +// return ['yay', (await sql`select current_setting('some.var') as x`)[0].x] +//}) + +t('Multiple queries', async() => { + const sql = postgres({ ...options, max: 5 }) + return [4, (await Promise.all([ + sql`select 1`, + sql`select 2`, + sql`select 3`, + sql`select 4` + ])).length] +}) + +t('Multiple statements', async() => + [2, await sql.unsafe(` + select 1 as x; + select 2 as a; + `).then(([, [x]]) => x.a)] +) + +// Reason: We return different error, so that test will not work +//t('throws correct error when authentication fails', async() => { +// const sql = postgres({ +// ...options, +// pass: 'wrong' +// }) +// return ['28P01', await sql`select 1`.catch(e => e.code)] +//}) + +t('notice', async() => { + let notice + const log = console.log // eslint-disable-line + console.log = function(x) { // eslint-disable-line + notice = x + } + + await sql`create table if not exists users()` + await sql`create table if not exists users()` + + console.log = log // eslint-disable-line + + return ['NOTICE', notice.severity] +}) + +t('notice hook', async() => { + let notice + const sql = postgres({ + ...options, + onnotice: x => notice = x + }) + + await sql`create table if not exists users()` + await sql`create table if not exists users()` + + return ['NOTICE', notice.severity] +}) + +t('bytea serializes and parses', async() => { + const buf = Buffer.from('wat') + + await sql`create table test (x bytea)` + await sql`insert into test values (${ buf })` + + return [ + buf.toString(), + (await sql`select x from test`)[0].x.toString(), + await sql`drop table test` + ] +}) + +t('forEach', async() => { + let result + await sql`select 1 as x`.forEach(({ x }) => result = x) + return [1, result] +}) + +t('forEach returns empty array', async() => { + return [0, (await sql`select 1 as x`.forEach(() => { /* noop */ })).length] +}) + +if (process.env.PGMODE != 'transaction') { + t('Cursor', async() => { + const order = [] + await sql`select 1 as x union select 2 as x`.cursor(async([x]) => { + order.push(x.x + 'a') + await delay(100) + order.push(x.x + 'b') + }) + return ['1a1b2a2b', order.join('')] + }) + + t('Unsafe cursor', async() => { + const order = [] + await sql.unsafe('select 1 as x union select 2 as x').cursor(async([x]) => { + order.push(x.x + 'a') + await delay(100) + order.push(x.x + 'b') + }) + return ['1a1b2a2b', order.join('')] + }) + + t('Cursor custom n', async() => { + const order = [] + await sql`select * from generate_series(1,20)`.cursor(10, async(x) => { + order.push(x.length) + }) + return ['10,10', order.join(',')] + }) + + t('Cursor custom with rest n', async() => { + const order = [] + await sql`select * from generate_series(1,20)`.cursor(11, async(x) => { + order.push(x.length) + }) + return ['11,9', order.join(',')] + }) + + t('Cursor custom with less results than batch size', async() => { + const order = [] + await sql`select * from generate_series(1,20)`.cursor(21, async(x) => { + order.push(x.length) + }) + return ['20', order.join(',')] + }) + + t('Cursor cancel', async() => { + let result + await sql`select * from generate_series(1,10) as x`.cursor(async([{ x }]) => { + result = x + return sql.CLOSE + }) + return [1, result] + }) + + t('Cursor throw', async() => { + const order = [] + await sql`select 1 as x union select 2 as x`.cursor(async([x]) => { + order.push(x.x + 'a') + await delay(100) + throw new Error('watty') + }).catch(() => order.push('err')) + return ['1aerr', order.join('')] + }) + + t('Cursor error', async() => [ + '42601', + await sql`wat`.cursor(() => { /* noop */ }).catch((err) => err.code) + ]) + + t('Multiple Cursors', { timeout: t.timeout * 2 }, async() => { + const result = [] + await sql.begin(async sql => [ + await sql`select 1 as cursor, x from generate_series(1,4) as x`.cursor(async([row]) => { + result.push(row.x) + await new Promise(r => setTimeout(r, 20)) + }), + await sql`select 2 as cursor, x from generate_series(101,104) as x`.cursor(async([row]) => { + result.push(row.x) + await new Promise(r => setTimeout(r, 10)) + }) + ]) + + return ['1,2,3,4,101,102,103,104', result.join(',')] + }) + + t('Cursor as async iterator', async() => { + const order = [] + for await (const [x] of sql`select generate_series(1,2) as x;`.cursor()) { + order.push(x.x + 'a') + await delay(10) + order.push(x.x + 'b') + } + + return ['1a1b2a2b', order.join('')] + }) + + t('Cursor as async iterator with break', async() => { + const order = [] + for await (const xs of sql`select generate_series(1,2) as x;`.cursor()) { + order.push(xs[0].x + 'a') + await delay(10) + order.push(xs[0].x + 'b') + break + } + + return ['1a1b', order.join('')] + }) + + t('Async Iterator Unsafe cursor', async() => { + const order = [] + for await (const [x] of sql.unsafe('select 1 as x union select 2 as x').cursor()) { + order.push(x.x + 'a') + await delay(10) + order.push(x.x + 'b') + } + return ['1a1b2a2b', order.join('')] + }) + + t('Async Iterator Cursor custom n', async() => { + const order = [] + for await (const x of sql`select * from generate_series(1,20)`.cursor(10)) + order.push(x.length) + + return ['10,10', order.join(',')] + }) + + t('Async Iterator Cursor custom with rest n', async() => { + const order = [] + for await (const x of sql`select * from generate_series(1,20)`.cursor(11)) + order.push(x.length) + + return ['11,9', order.join(',')] + }) + + t('Async Iterator Cursor custom with less results than batch size', async() => { + const order = [] + for await (const x of sql`select * from generate_series(1,20)`.cursor(21)) + order.push(x.length) + return ['20', order.join(',')] + }) +} + +t('Transform row', async() => { + const sql = postgres({ + ...options, + transform: { row: () => 1 } + }) + + return [1, (await sql`select 'wat'`)[0]] +}) + +t('Transform row forEach', async() => { + let result + const sql = postgres({ + ...options, + transform: { row: () => 1 } + }) + + await sql`select 1`.forEach(x => result = x) + + return [1, result] +}) + +t('Transform value', async() => { + const sql = postgres({ + ...options, + transform: { value: () => 1 } + }) + + return [1, (await sql`select 'wat' as x`)[0].x] +}) + +t('Transform columns from', async() => { + const sql = postgres({ + ...options, + transform: postgres.fromCamel + }) + await sql`create table test (a_test int, b_test text)` + await sql`insert into test ${ sql([{ aTest: 1, bTest: 1 }]) }` + await sql`update test set ${ sql({ aTest: 2, bTest: 2 }) }` + return [ + 2, + (await sql`select ${ sql('aTest', 'bTest') } from test`)[0].a_test, + await sql`drop table test` + ] +}) + +t('Transform columns to', async() => { + const sql = postgres({ + ...options, + transform: postgres.toCamel + }) + await sql`create table test (a_test int, b_test text)` + await sql`insert into test ${ sql([{ a_test: 1, b_test: 1 }]) }` + await sql`update test set ${ sql({ a_test: 2, b_test: 2 }) }` + return [ + 2, + (await sql`select a_test, b_test from test`)[0].aTest, + await sql`drop table test` + ] +}) + +t('Transform columns from and to', async() => { + const sql = postgres({ + ...options, + transform: postgres.camel + }) + await sql`create table test (a_test int, b_test text)` + await sql`insert into test ${ sql([{ aTest: 1, bTest: 1 }]) }` + await sql`update test set ${ sql({ aTest: 2, bTest: 2 }) }` + return [ + 2, + (await sql`select ${ sql('aTest', 'bTest') } from test`)[0].aTest, + await sql`drop table test` + ] +}) + +t('Transform columns from and to (legacy)', async() => { + const sql = postgres({ + ...options, + transform: { + column: { + to: postgres.fromCamel, + from: postgres.toCamel + } + } + }) + await sql`create table test (a_test int, b_test text)` + await sql`insert into test ${ sql([{ aTest: 1, bTest: 1 }]) }` + await sql`update test set ${ sql({ aTest: 2, bTest: 2 }) }` + return [ + 2, + (await sql`select ${ sql('aTest', 'bTest') } from test`)[0].aTest, + await sql`drop table test` + ] +}) + +// Reason: we do not support Unix sockets +//t('Unix socket', async() => { +// const sql = postgres({ +// ...options, +// host: process.env.PGSOCKET || '/tmp' // eslint-disable-line +// }) +// +// return [1, (await sql`select 1 as x`)[0].x] +//}) + +t('Big result', async() => { + return [100000, (await sql`select * from generate_series(1, 100000)`).count] +}) + +t('Debug', async() => { + let result + const sql = postgres({ + ...options, + debug: (connection_id, str) => result = str + }) + + await sql`select 1` + + return ['select 1', result] +}) + +t('bigint is returned as String', async() => [ + 'string', + typeof (await sql`select 9223372036854777 as x`)[0].x +]) + +t('int is returned as Number', async() => [ + 'number', + typeof (await sql`select 123 as x`)[0].x +]) + +t('numeric is returned as string', async() => [ + 'string', + typeof (await sql`select 1.2 as x`)[0].x +]) + +t('Async stack trace', async() => { + const sql = postgres({ ...options, debug: false }) + return [ + parseInt(new Error().stack.split('\n')[1].match(':([0-9]+):')[1]) + 1, + parseInt(await sql`error`.catch(x => x.stack.split('\n').pop().match(':([0-9]+):')[1])) + ] +}) + +t('Debug has long async stack trace', async() => { + const sql = postgres({ ...options, debug: true }) + + return [ + 'watyo', + await yo().catch(x => x.stack.match(/wat|yo/g).join('')) + ] + + function yo() { + return wat() + } + + function wat() { + return sql`error` + } +}) + +t('Error contains query string', async() => [ + 'selec 1', + (await sql`selec 1`.catch(err => err.query)) +]) + +t('Error contains query serialized parameters', async() => [ + 1, + (await sql`selec ${ 1 }`.catch(err => err.parameters[0])) +]) + +t('Error contains query raw parameters', async() => [ + 1, + (await sql`selec ${ 1 }`.catch(err => err.args[0])) +]) + +t('Query and parameters on errorare not enumerable if debug is not set', async() => { + const sql = postgres({ ...options, debug: false }) + + return [ + false, + (await sql`selec ${ 1 }`.catch(err => err.propertyIsEnumerable('parameters') || err.propertyIsEnumerable('query'))) + ] +}) + +t('Query and parameters are enumerable if debug is set', async() => { + const sql = postgres({ ...options, debug: true }) + + return [ + true, + (await sql`selec ${ 1 }`.catch(err => err.propertyIsEnumerable('parameters') && err.propertyIsEnumerable('query'))) + ] +}) + +t('connect_timeout', { timeout: t.timeout * 20 }, async() => { + const connect_timeout = 0.2 + const server = net.createServer() + server.listen() + const sql = postgres({ port: server.address().port, host: '127.0.0.1', connect_timeout }) + const start = Date.now() + let end + await sql`select 1`.catch((e) => { + if (e.code !== 'CONNECT_TIMEOUT') + throw e + end = Date.now() + }) + server.close() + return [connect_timeout, Math.floor((end - start) / 100) / 10] +}) + +t('connect_timeout throws proper error', async() => [ + 'CONNECT_TIMEOUT', + await postgres({ + ...options, + connect_timeout: 0.001 + })`select 1`.catch(e => e.code) +]) + +t('connect_timeout error message includes host:port', { timeout: t.timeout * 20 }, async() => { + const connect_timeout = 0.2 + const server = net.createServer() + server.listen() + const sql = postgres({ port: server.address().port, host: '127.0.0.1', connect_timeout }) + const port = server.address().port + let err + await sql`select 1`.catch((e) => { + if (e.code !== 'CONNECT_TIMEOUT') + throw e + err = e.message + }) + server.close() + return [['write CONNECT_TIMEOUT 127.0.0.1:', port].join(''), err] +}) + +t('requests works after single connect_timeout', async() => { + let first = true + + const sql = postgres({ + ...options, + connect_timeout: { valueOf() { return first ? (first = false, 0.0001) : 1 } } + }) + + return [ + 'CONNECT_TIMEOUT,,1', + [ + await sql`select 1 as x`.then(() => 'success', x => x.code), + await delay(10), + (await sql`select 1 as x`)[0].x + ].join(',') + ] +}) + +t('Postgres errors are of type PostgresError', async() => + [true, (await sql`bad keyword`.catch(e => e)) instanceof sql.PostgresError] +) + +t('Result has columns spec', async() => + ['x', (await sql`select 1 as x`).columns[0].name] +) + +t('forEach has result as second argument', async() => { + let x + await sql`select 1 as x`.forEach((_, result) => x = result) + return ['x', x.columns[0].name] +}) + +t('Result as arrays', async() => { + const sql = postgres({ + ...options, + transform: { + row: x => Object.values(x) + } + }) + + return ['1,2', (await sql`select 1 as a, 2 as b`)[0].join(',')] +}) + +t('Insert empty array', async() => { + await sql`create table tester (ints int[])` + return [ + Array.isArray((await sql`insert into tester (ints) values (${ sql.array([]) }) returning *`)[0].ints), + true, + await sql`drop table tester` + ] +}) + +t('Insert array in sql()', async() => { + await sql`create table tester (ints int[])` + return [ + Array.isArray((await sql`insert into tester ${ sql({ ints: sql.array([]) }) } returning *`)[0].ints), + true, + await sql`drop table tester` + ] +}) + +if (options.prepare) { + t('Automatically creates prepared statements', async() => { + const result = await sql`select * from pg_prepared_statements` + return [true, result.some(x => x.name = result.statement.name)] + }) + + t('no_prepare: true disables prepared statements (deprecated)', async() => { + const sql = postgres({ ...options, no_prepare: true }) + const result = await sql`select * from pg_prepared_statements` + return [false, result.some(x => x.name = result.statement.name)] + }) + + t('prepare: false disables prepared statements', async() => { + const sql = postgres({ ...options, prepare: false }) + const result = await sql`select * from pg_prepared_statements` + return [false, result.some(x => x.name = result.statement.name)] + }) + + t('prepare: true enables prepared statements', async() => { + const sql = postgres({ ...options, prepare: true }) + const result = await sql`select * from pg_prepared_statements` + return [true, result.some(x => x.name = result.statement.name)] + }) + + t('prepares unsafe query when "prepare" option is true', async() => { + const sql = postgres({ ...options, prepare: true }) + const result = await sql.unsafe('select * from pg_prepared_statements where name <> $1', ['bla'], { prepare: true }) + return [true, result.some(x => x.name = result.statement.name)] + }) + + t('does not prepare unsafe query by default', async() => { + const sql = postgres({ ...options, prepare: true }) + const result = await sql.unsafe('select * from pg_prepared_statements where name <> $1', ['bla']) + return [false, result.some(x => x.name = result.statement.name)] + }) + + t('Recreate prepared statements on transformAssignedExpr error', async() => { + const insert = () => sql`insert into test (name) values (${ '1' }) returning name` + await sql`create table test (name text)` + await insert() + await sql`alter table test alter column name type int using name::integer` + return [ + 1, + (await insert())[0].name, + await sql`drop table test` + ] + }) +} + +t('Throws correct error when retrying in transactions', async() => { + await sql`create table test(x int)` + const error = await sql.begin(sql => sql`insert into test (x) values (${ false })`).catch(e => e) + return [ + error.code, + '42804', + sql`drop table test` + ] +}) + +t('Recreate prepared statements on RevalidateCachedQuery error', async() => { + const select = () => sql`select name from test` + await sql`create table test (name text)` + await sql`insert into test values ('1')` + await select() + await sql`alter table test alter column name type int using name::integer` + return [ + 1, + (await select())[0].name, + await sql`drop table test` + ] +}) + +t('Properly throws routine error on not prepared statements', async() => { + await sql`create table x (x text[])` + const { routine } = await sql.unsafe(` + insert into x(x) values (('a', 'b')) + `).catch(e => e) + + return ['transformAssignedExpr', routine, await sql`drop table x`] +}) + +t('Properly throws routine error on not prepared statements in transaction', async() => { + const { routine } = await sql.begin(sql => [ + sql`create table x (x text[])`, + sql`insert into x(x) values (('a', 'b'))` + ]).catch(e => e) + + return ['transformAssignedExpr', routine] +}) + +t('Properly throws routine error on not prepared statements using file', async() => { + const { routine } = await sql.unsafe(` + create table x (x text[]); + insert into x(x) values (('a', 'b')); + `, { prepare: true }).catch(e => e) + + return ['transformAssignedExpr', routine] +}) + +t('Catches connection config errors', async() => { + const sql = postgres({ ...options, user: { toString: () => { throw new Error('wat') } }, database: 'prut' }) + + return [ + 'wat', + await sql`select 1`.catch((e) => e.message) + ] +}) + +t('Catches connection config errors with end', async() => { + const sql = postgres({ ...options, user: { toString: () => { throw new Error('wat') } }, database: 'prut' }) + + return [ + 'wat', + await sql`select 1`.catch((e) => e.message), + await sql.end() + ] +}) + +// Reason: It tests internals of the library, not DB connection +//nt('Catches query format errors', async() => [ +// 'wat', +// await sql.unsafe({ toString: () => { throw new Error('wat') } }).catch((e) => e.message) +//]) + +// Reason: single host only +//t('Multiple hosts', { +// timeout: 1 +//}, async() => { +// const s1 = postgres({ idle_timeout }) +// , s2 = postgres({ idle_timeout, port: 5433 }) +// , sql = postgres('postgres://localhost:5432,localhost:5433', { idle_timeout, max: 1 }) +// , result = [] +// +// const id1 = (await s1`select system_identifier as x from pg_control_system()`)[0].x +// const id2 = (await s2`select system_identifier as x from pg_control_system()`)[0].x +// +// const x1 = await sql`select 1` +// result.push((await sql`select system_identifier as x from pg_control_system()`)[0].x) +// await s1`select pg_terminate_backend(${ x1.state.pid }::int)` +// await delay(50) +// +// const x2 = await sql`select 1` +// result.push((await sql`select system_identifier as x from pg_control_system()`)[0].x) +// await s2`select pg_terminate_backend(${ x2.state.pid }::int)` +// await delay(50) +// +// result.push((await sql`select system_identifier as x from pg_control_system()`)[0].x) +// +// return [[id1, id2, id1].join(','), result.join(',')] +//}) + +t('Escaping supports schemas and tables', async() => { + await sql`create schema a` + await sql`create table a.b (c int)` + await sql`insert into a.b (c) values (1)` + return [ + 1, + (await sql`select ${ sql('a.b.c') } from a.b`)[0].c, + await sql`drop table a.b`, + await sql`drop schema a` + ] +}) + +t('Raw method returns rows as arrays', async() => { + const [x] = await sql`select 1`.raw() + return [ + Array.isArray(x), + true + ] +}) + +t('Raw method returns values unparsed as Buffer', async() => { + const [[x]] = await sql`select 1`.raw() + return [ + x instanceof Uint8Array, + true + ] +}) + +t('Array returns rows as arrays of columns', async() => { + return [(await sql`select 1`.values())[0][0], 1] +}) + +t('Copy read', async() => { + const result = [] + + await sql`create table test (x int)` + await sql`insert into test select * from generate_series(1,10)` + const readable = await sql`copy test to stdout`.readable() + readable.on('data', x => result.push(x)) + await new Promise(r => readable.on('end', r)) + + return [ + result.length, + 10, + await sql`drop table test` + ] +}) + +t('Copy write', { timeout: t.timeout * 2 }, async() => { + await sql`create table test (x int)` + const writable = await sql`copy test from stdin`.writable() + + writable.write('1\n') + writable.write('1\n') + writable.end() + + await new Promise(r => writable.on('finish', r)) + + return [ + (await sql`select 1 from test`).length, + 2, + await sql`drop table test` + ] +}) + +t('Copy write as first', async() => { + await sql`create table test (x int)` + const first = postgres(options) + const writable = await first`COPY test FROM STDIN WITH(FORMAT csv, HEADER false, DELIMITER ',')`.writable() + writable.write('1\n') + writable.write('1\n') + writable.end() + + await new Promise(r => writable.on('finish', r)) + + return [ + (await sql`select 1 from test`).length, + 2, + await sql`drop table test` + ] +}) + +t('Copy from file', async() => { + await sql`create table test (x int, y int, z int)` + await new Promise(async r => fs + .createReadStream(rel('copy.csv')) + .pipe(await sql`copy test from stdin`.writable()) + .on('finish', r) + ) + + return [ + JSON.stringify(await sql`select * from test`), + '[{"x":1,"y":2,"z":3},{"x":4,"y":5,"z":6}]', + await sql`drop table test` + ] +}) + +t('Copy from works in transaction', async() => { + await sql`create table test(x int)` + const xs = await sql.begin(async sql => { + (await sql`copy test from stdin`.writable()).end('1\n2') + await delay(20) + return sql`select 1 from test` + }) + + return [ + xs.length, + 2, + await sql`drop table test` + ] +}) + +t('Copy from abort', async() => { + const sql = postgres(options) + const readable = fs.createReadStream(rel('copy.csv')) + + await sql`create table test (x int, y int, z int)` + await sql`TRUNCATE TABLE test` + + const writable = await sql`COPY test FROM STDIN`.writable() + + let aborted + + readable + .pipe(writable) + .on('error', (err) => aborted = err) + + writable.destroy(new Error('abort')) + await sql.end() + + return [ + 'abort', + aborted.message, + await postgres(options)`drop table test` + ] +}) + +t('multiple queries before connect', async() => { + const sql = postgres({ ...options, max: 2 }) + const xs = await Promise.all([ + sql`select 1 as x`, + sql`select 2 as x`, + sql`select 3 as x`, + sql`select 4 as x` + ]) + + return [ + '1,2,3,4', + xs.map(x => x[0].x).join() + ] +}) + +// Reason: No subscriptions, use Supabase Realtime +//t('subscribe', { timeout: 2 }, async() => { +// const sql = postgres({ +// database: 'postgres', +// publications: 'alltables' +// }) +// +// await sql.unsafe('create publication alltables for all tables') +// +// const result = [] +// +// const { unsubscribe } = await sql.subscribe('*', (row, { command, old }) => { +// result.push(command, row.name, row.id, old && old.name, old && old.id) +// }) +// +// await sql` +// create table test ( +// id serial primary key, +// name text +// ) +// ` +// +// await sql`alter table test replica identity default` +// await sql`insert into test (name) values ('Murray')` +// await sql`update test set name = 'Rothbard'` +// await sql`update test set id = 2` +// await sql`delete from test` +// await sql`alter table test replica identity full` +// await sql`insert into test (name) values ('Murray')` +// await sql`update test set name = 'Rothbard'` +// await sql`delete from test` +// await delay(10) +// await unsubscribe() +// await sql`insert into test (name) values ('Oh noes')` +// await delay(10) +// return [ +// 'insert,Murray,1,,,update,Rothbard,1,,,update,Rothbard,2,,1,delete,,2,,,insert,Murray,2,,,update,Rothbard,2,Murray,2,delete,Rothbard,2,,', // eslint-disable-line +// result.join(','), +// await sql`drop table test`, +// await sql`drop publication alltables`, +// await sql.end() +// ] +//}) +// +//t('subscribe with transform', { timeout: 2 }, async() => { +// const sql = postgres({ +// transform: { +// column: { +// from: postgres.toCamel, +// to: postgres.fromCamel +// } +// }, +// database: 'postgres', +// publications: 'alltables' +// }) +// +// await sql.unsafe('create publication alltables for all tables') +// +// const result = [] +// +// const { unsubscribe } = await sql.subscribe('*', (row, { command, old }) => +// result.push(command, row.nameInCamel || row.id, old && old.nameInCamel) +// ) +// +// await sql` +// create table test ( +// id serial primary key, +// name_in_camel text +// ) +// ` +// +// await sql`insert into test (name_in_camel) values ('Murray')` +// await sql`update test set name_in_camel = 'Rothbard'` +// await sql`delete from test` +// await sql`alter table test replica identity full` +// await sql`insert into test (name_in_camel) values ('Murray')` +// await sql`update test set name_in_camel = 'Rothbard'` +// await sql`delete from test` +// await delay(10) +// await unsubscribe() +// await sql`insert into test (name_in_camel) values ('Oh noes')` +// await delay(10) +// return [ +// 'insert,Murray,,update,Rothbard,,delete,1,,insert,Murray,,update,Rothbard,Murray,delete,Rothbard,', +// result.join(','), +// await sql`drop table test`, +// await sql`drop publication alltables`, +// await sql.end() +// ] +//}) +// +//t('subscribe reconnects and calls onsubscribe', { timeout: 4 }, async() => { +// const sql = postgres({ +// database: 'postgres', +// publications: 'alltables', +// fetch_types: false +// }) +// +// await sql.unsafe('create publication alltables for all tables') +// +// const result = [] +// let onsubscribes = 0 +// +// const { unsubscribe, sql: subscribeSql } = await sql.subscribe( +// '*', +// (row, { command, old }) => result.push(command, row.name || row.id, old && old.name), +// () => onsubscribes++ +// ) +// +// await sql` +// create table test ( +// id serial primary key, +// name text +// ) +// ` +// +// await sql`insert into test (name) values ('Murray')` +// await delay(10) +// await subscribeSql.close() +// await delay(500) +// await sql`delete from test` +// await delay(100) +// await unsubscribe() +// return [ +// '2insert,Murray,,delete,1,', +// onsubscribes + result.join(','), +// await sql`drop table test`, +// await sql`drop publication alltables`, +// await sql.end() +// ] +//}) + +t('Execute', async() => { + const result = await new Promise((resolve) => { + const sql = postgres({ ...options, fetch_types: false, debug:(id, query) => resolve(query) }) + sql`select 1`.execute() + }) + + return [result, 'select 1'] +}) + +t('Cancel running query', async() => { + const query = sql`select pg_sleep(2)` + setTimeout(() => query.cancel(), 500) + const error = await query.catch(x => x) + return ['57014', error.code] +}) + +t('Cancel piped query', { timeout: t.timeout * 2 }, async() => { + const sql = postgres({...options, max: 2}) + await sql`select 1` + const last = sql`select pg_sleep(1)`.execute() + const query = sql`select pg_sleep(2) as dig` + setTimeout(() => query.cancel(), 500) + const error = await query.catch(x => x) + await last + return ['57014', error.code] +}) + +t('Cancel queued query', async() => { + const query = sql`select pg_sleep(2) as nej` + const tx = sql.begin(sql => ( + query.cancel(), + sql`select pg_sleep(0.5) as hej, 'hejsa'` + )) + const error = await query.catch(x => x) + await tx + return ['57014', error.code] +}) + +t('Fragments', async() => [ + 1, + (await sql` + ${ sql`select` } 1 as x + `)[0].x +]) + +t('Result becomes array', async() => [ + true, + (await sql`select 1`).slice() instanceof Array +]) + +t('Describe', async() => { + const type = (await sql`select ${ 1 }::int as x`.describe()).types[0] + return [23, type] +}) + +t('Describe a statement', async() => { + await sql`create table tester (name text, age int)` + const r = await sql`select name, age from tester where name like $1 and age > $2`.describe() + return [ + '25,23/name:25,age:23', + `${ r.types.join(',') }/${ r.columns.map(c => `${c.name}:${c.type}`).join(',') }`, + await sql`drop table tester` + ] +}) + +t('Include table oid and column number in column details', async() => { + await sql`create table tester (name text, age int)` + const r = await sql`select name, age from tester where name like $1 and age > $2`.describe() + const [{ oid }] = await sql`select oid from pg_class where relname = 'tester'` + + return [ + `table:${oid},number:1|table:${oid},number:2`, + `${ r.columns.map(c => `table:${c.table},number:${c.number}`).join('|') }`, + await sql`drop table tester` + ] +}) + +t('Describe a statement without parameters', async() => { + await sql`create table tester (name text, age int)` + const r = await sql`select name, age from tester`.describe() + return [ + '0,2', + `${ r.types.length },${ r.columns.length }`, + await sql`drop table tester` + ] +}) + +t('Describe a statement without columns', async() => { + await sql`create table tester (name text, age int)` + const r = await sql`insert into tester (name, age) values ($1, $2)`.describe() + return [ + '2,0', + `${ r.types.length },${ r.columns.length }`, + await sql`drop table tester` + ] +}) + +t('Large object', async() => { + const file = rel('index.js') + , md5 = crypto.createHash('md5').update(fs.readFileSync(file)).digest('hex') + + const lo = await sql.largeObject() + await new Promise(async r => fs.createReadStream(file).pipe(await lo.writable()).on('finish', r)) + await lo.seek(0) + + const out = crypto.createHash('md5') + await new Promise(r => lo.readable().then(x => x.on('data', x => out.update(x)).on('end', r))) + + return [ + md5, + out.digest('hex'), + await lo.close() + ] +}) + +t('Catches type serialize errors', async() => { + const sql = postgres({ + ...options, + idle_timeout, + types: { + text: { + from: 25, + to: 25, + parse: x => x, + serialize: () => { throw new Error('watSerialize') } + } + } + }) + + return [ + 'watSerialize', + (await sql`select ${ 'wat' }`.catch(e => e.message)) + ] +}) + +t('Catches type parse errors', async() => { + const sql = postgres({ + ...options, + idle_timeout, + types: { + text: { + from: 25, + to: 25, + parse: () => { throw new Error('watParse') }, + serialize: x => x + } + } + }) + + return [ + 'watParse', + (await sql`select 'wat'`.catch(e => e.message)) + ] +}) + +t('Catches type serialize errors in transactions', async() => { + const sql = postgres({ + ...options, + idle_timeout, + types: { + text: { + from: 25, + to: 25, + parse: x => x, + serialize: () => { throw new Error('watSerialize') } + } + } + }) + + return [ + 'watSerialize', + (await sql.begin(sql => ( + sql`select 1`, + sql`select ${ 'wat' }` + )).catch(e => e.message)) + ] +}) + +t('Catches type parse errors in transactions', async() => { + const sql = postgres({ + ...options, + idle_timeout, + types: { + text: { + from: 25, + to: 25, + parse: () => { throw new Error('watParse') }, + serialize: x => x + } + } + }) + + return [ + 'watParse', + (await sql.begin(sql => ( + sql`select 1`, + sql`select 'wat'` + )).catch(e => e.message)) + ] +}) + +t('Prevent premature end of connection in transaction', async() => { + const sql = postgres({...options, max_lifetime: 0.01, idle_timeout }) + const result = await sql.begin(async sql => { + await sql`select 1` + await delay(20) + await sql`select 1` + return 'yay' + }) + + + return [ + 'yay', + result + ] +}) + +t('Ensure reconnect after max_lifetime with transactions', { timeout: t.timeout * 5 }, async() => { + const sql = postgres({ + ...options, + max_lifetime: 0.01, + idle_timeout, + max: 1 + }) + + let x = 0 + while (x++ < 10) await sql.begin(sql => sql`select 1 as x`) + + return [true, true] +}) + + +t('Ensure transactions throw if connection is closed while there is no query', async() => { + const sql = postgres(options) + const x = await sql.begin(async() => { + setTimeout(() => sql.end({ timeout: 0 }), 10) + await new Promise(r => setTimeout(r, 200)) + return sql`select 1` + }).catch(x => x) + return ['CONNECTION_CLOSED', x.code] +}) + +// Reason: Irrelevant to us, if user wants to use custom socket, it is up to +// them to make it work. +// +//t('Custom socket', {}, async() => { +// let result +// const sql = postgres({ +// ...options, +// socket: () => new Promise((resolve, reject) => { +// const socket = new net.Socket() +// socket.connect(options.port) +// socket.once('data', x => result = x[0]) +// socket.on('error', reject) +// socket.on('connect', () => resolve(socket)) +// }), +// idle_timeout +// }) +// +// await sql`select 1` +// +// return [ +// result, +// 82 +// ] +//}) + +t('Ensure drain only dequeues if ready', async() => { + const sql = postgres(options) + + const res = await Promise.all([ + sql.unsafe('SELECT 0+$1 --' + '.'.repeat(100000), [1]), + sql.unsafe('SELECT 0+$1+$2+$3', [1, 2, 3]) + ]) + + return [res.length, 2] +}) + +t('Supports fragments as dynamic parameters', async() => { + await sql`create table test (a int, b bool)` + await sql`insert into test values(1, true)` + await sql`insert into test ${ + sql({ + a: 2, + b: sql`exists(select 1 from test where b = ${ true })` + }) + }` + + return [ + '1,t2,t', + (await sql`select * from test`.raw()).join(''), + await sql`drop table test` + ] +}) + +t('Supports nested fragments with parameters', async() => { + await sql`create table test ${ + sql`(${ sql('a') } ${ sql`int` })` + }` + await sql`insert into test values(1)` + return [ + 1, + (await sql`select a from test`)[0].a, + await sql`drop table test` + ] +}) + +t('Supports multiple nested fragments with parameters', async() => { + const [{ b }] = await sql`select * ${ + sql`from ${ + sql`(values (2, ${ 1 }::int)) as x(${ sql(['a', 'b']) })` + }` + }` + return [ + 1, + b + ] +}) + +t('Supports arrays of fragments', async() => { + const [{ x }] = await sql` + ${ [sql`select`, sql`1`, sql`as`, sql`x`] } + ` + + return [ + 1, + x + ] +}) + +t('Does not try rollback when commit errors', async() => { + let notice = null + const sql = postgres({ ...options, onnotice: x => notice = x }) + await sql`create table test(x int constraint test_constraint unique deferrable initially deferred)` + + await sql.begin('isolation level serializable', async sql => { + await sql`insert into test values(1)` + await sql`insert into test values(1)` + }).catch(e => e) + + return [ + notice, + null, + await sql`drop table test` + ] +}) + +t('Last keyword used even with duplicate keywords', async() => { + await sql`create table test (x int)` + await sql`insert into test values(1)` + const [{ x }] = await sql` + select + 1 in (1) as x + from test + where x in ${ sql([1, 2]) } + ` + + return [x, true, await sql`drop table test`] +}) + +t('Insert array with null', async() => { + await sql`create table test (x int[])` + await sql`insert into test ${ sql({ x: [1, null, 3] }) }` + return [ + 1, + (await sql`select x from test`)[0].x[0], + await sql`drop table test` + ] +}) + +t('Insert array with undefined throws', async() => { + await sql`create table test (x int[])` + return [ + 'UNDEFINED_VALUE', + await sql`insert into test ${ sql({ x: [1, undefined, 3] }) }`.catch(e => e.code), + await sql`drop table test` + ] +}) + +t('Insert array with undefined transform', async() => { + const sql = postgres({ ...options, transform: { undefined: null } }) + await sql`create table test (x int[])` + await sql`insert into test ${ sql({ x: [1, undefined, 3] }) }` + return [ + 1, + (await sql`select x from test`)[0].x[0], + await sql`drop table test` + ] +}) + +t('concurrent cursors', async() => { + const xs = [] + + await Promise.all([...Array(7)].map((x, i) => [ + sql`select ${ i }::int as a, generate_series(1, 2) as x`.cursor(([x]) => xs.push(x.a + x.x)) + ]).flat()) + + return ['12233445566778', xs.join('')] +}) + +t('concurrent cursors multiple connections', async() => { + const sql = postgres({ ...options, max: 2 }) + const xs = [] + + await Promise.all([...Array(7)].map((x, i) => [ + sql`select ${ i }::int as a, generate_series(1, 2) as x`.cursor(([x]) => xs.push(x.a + x.x)) + ]).flat()) + + return ['12233445566778', xs.sort().join('')] +}) + +if (process.env.PGMODE != 'transaction') { + t('reserve connection', async() => { + const reserved = await sql.reserve() + + setTimeout(() => reserved.release(), 510) + + const xs = await Promise.all([ + reserved`select 1 as x`.then(([{ x }]) => ({ time: Date.now(), x })), + sql`select 2 as x`.then(([{ x }]) => ({ time: Date.now(), x })), + reserved`select 3 as x`.then(([{ x }]) => ({ time: Date.now(), x })) + ]) + + if (xs[1].time - xs[2].time < 500) + throw new Error('Wrong time') + + return [ + '123', + xs.map(x => x.x).join('') + ] + }) +} + +t('arrays in reserved connection', async() => { + const reserved = await sql.reserve() + const [{ x }] = await reserved`select array[1, 2, 3] as x` + reserved.release() + + return [ + '123', + x.join('') + ] +}) diff --git a/test/integration/js/postgres/select-param.sql b/test/integration/js/postgres/select-param.sql new file mode 100644 index 00000000..d4de2440 --- /dev/null +++ b/test/integration/js/postgres/select-param.sql @@ -0,0 +1 @@ +select $1 as x diff --git a/test/integration/js/postgres/select.sql b/test/integration/js/postgres/select.sql new file mode 100644 index 00000000..f951e920 --- /dev/null +++ b/test/integration/js/postgres/select.sql @@ -0,0 +1 @@ +select 1 as x diff --git a/test/integration/js/postgres/test.js b/test/integration/js/postgres/test.js new file mode 100644 index 00000000..08786ee4 --- /dev/null +++ b/test/integration/js/postgres/test.js @@ -0,0 +1,86 @@ +/* eslint no-console: 0 */ + +import util from 'node:util' + +let done = 0 +let only = false +let ignored = 0 +let failed = false +let promise = Promise.resolve() +const tests = {} + , ignore = Symbol('ignore') + +const failFast = !!process.env.FAIL_FAST + +export const nt = () => ignored++ +export const ot = (...rest) => (only = true, test(true, ...rest)) +export const t = (...rest) => test(false, ...rest) +t.timeout = (process.env.TIMEOUT || 5) | 0 + +async function test(o, name, options, fn) { + typeof options !== 'object' && (fn = options, options = {}) + const line = new Error().stack.split('\n')[3].match(':([0-9]+):')[1] + + await 1 + + if (only && !o) + return + + tests[line] = { fn, line, name } + promise = promise.then(() => Promise.race([ + new Promise((resolve, reject) => + fn.timer = setTimeout(() => reject('Timed out'), (options.timeout || t.timeout) * 1000) + ), + (failed && failFast) ? (ignored++, ignore) : (function() { + process.stdout.write(`${name}: `) + return fn() + })() + ])) + .then(async x => { + clearTimeout(fn.timer) + if (x === ignore) + return + + if (!Array.isArray(x)) + throw new Error('Test should return result array') + + const [expected, got] = await Promise.all(x) + if (expected !== got) { + failed = true + throw new Error(util.inspect(expected) + ' != ' + util.inspect(got)) + } + + tests[line].succeeded = true + process.stdout.write('✅\n') + }) + .catch(err => { + process.stdout.write('⛔️') + tests[line].failed = failed = true + tests[line].error = err instanceof Error ? err : new Error(util.inspect(err)) + console.error(name + ' at line', line, 'failed\n', util.inspect(err)) + }) + .then(() => { + ++done === Object.keys(tests).length && exit() + }) +} + +function exit() { + let success = true + Object.values(tests).every((x) => { + if (x.succeeded) + return true + + success = false + }) + + only + ? console.error('⚠️', 'Not all tests were run') + : ignored + ? console.error('⚠️', ignored, 'ignored test' + (ignored === 1 ? '' : 's'), '\n') + : success + ? console.log('🎉') + : console.error('⚠️', 'Not good') + + !process.exitCode && (!success || only || ignored) && (process.exitCode = 1) +} + diff --git a/test/integration/js/yarn.lock b/test/integration/js/yarn.lock new file mode 100644 index 00000000..187e2cfc --- /dev/null +++ b/test/integration/js/yarn.lock @@ -0,0 +1,8 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +postgres@^3.4.5: + version "3.4.5" + resolved "https://registry.yarnpkg.com/postgres/-/postgres-3.4.5.tgz#1ef99e51b0ba9b53cbda8a215dd406725f7d15f9" + integrity sha512-cDWgoah1Gez9rN3H4165peY9qfpEo+SA61oQv65O3cRUE1pOEoJWwddwcqKE8XZYjbblOJlYDlLV4h67HrEVDg== diff --git a/test/integration/proxy_test.exs b/test/integration/proxy_test.exs index a3720f2b..68f15bb1 100644 --- a/test/integration/proxy_test.exs +++ b/test/integration/proxy_test.exs @@ -1,13 +1,14 @@ defmodule Supavisor.Integration.ProxyTest do - use Supavisor.DataCase, async: true + use Supavisor.DataCase, async: false require Logger alias Postgrex, as: P + alias Supavisor.Support.Cluster @tenant "proxy_tenant1" - setup_all do + setup do db_conf = Application.get_env(:supavisor, Repo) {:ok, proxy} = @@ -60,11 +61,9 @@ defmodule Supavisor.Integration.ProxyTest do "postgresql://#{db_conf[:username] <> "." <> @tenant}:no_pass@#{db_conf[:hostname]}:#{Application.get_env(:supavisor, :proxy_port_transaction)}/postgres" assert {:error, - {_, - {:stop, - %Postgrex.Error{ - message: "error received in SCRAM server final message: \"Wrong password\"" - }, _}}} = parse_uri(url) |> single_connection() + %Postgrex.Error{ + message: "error received in SCRAM server final message: \"Wrong password\"" + }} = parse_uri(url) |> single_connection() end test "insert", %{proxy: proxy, origin: origin} do @@ -75,6 +74,8 @@ defmodule Supavisor.Integration.ProxyTest do end test "query via another node", %{proxy: proxy, user: user} do + {:ok, _pid, node2} = Cluster.start_node() + sup = Enum.reduce_while(1..30, nil, fn _, acc -> case Supavisor.get_global_sup({@tenant, user, :transaction}) do @@ -89,7 +90,7 @@ defmodule Supavisor.Integration.ProxyTest do assert sup == :erpc.call( - :"secondary@127.0.0.1", + node2, Supavisor, :get_global_sup, [{@tenant, user, :transaction}], @@ -114,7 +115,7 @@ defmodule Supavisor.Integration.ProxyTest do assert sup == :erpc.call( - :"secondary@127.0.0.1", + node2, Supavisor, :get_global_sup, [{@tenant, user, :transaction}], @@ -154,19 +155,36 @@ defmodule Supavisor.Integration.ProxyTest do P.query!(origin, "select * from public.test where details = 'test_delete'", []) end - # test "too many clients in session mode" do - # db_conf = Application.get_env(:supavisor, Repo) + test "too many clients in session mode" do + Process.flag(:trap_exit, true) + db_conf = Application.get_env(:supavisor, Repo) + port = Application.get_env(:supavisor, :proxy_port_session) - # url = - # "postgresql://session.#{@tenant}:#{db_conf[:password]}@#{db_conf[:hostname]}:#{Application.get_env(:supavisor, :proxy_port)}/postgres" + connection_opts = + Keyword.merge(db_conf, + username: "max_clients.proxy_tenant1", + port: port + ) - # spawn(fn -> System.cmd("psql", [url], stderr_to_stdout: true) end) + {:ok, pid1} = single_connection(connection_opts) + {:ok, pid2} = single_connection(connection_opts) - # :timer.sleep(500) + :timer.sleep(1000) - # {result, _} = System.cmd("psql", [url], stderr_to_stdout: true) - # assert result =~ "FATAL: Too many clients already" - # end + assert {:error, + %Postgrex.Error{ + postgres: %{ + code: :internal_error, + message: + "MaxClientsInSessionMode: max clients reached - in Session mode max clients are limited to pool_size", + unknown: "FATAL", + severity: "FATAL", + pg_code: "XX000" + } + }} = single_connection(connection_opts) + + for pid <- [pid1, pid2], do: :gen_statem.stop(pid) + end test "http to proxy server returns 200 OK" do assert :httpc.request( @@ -186,12 +204,15 @@ defmodule Supavisor.Integration.ProxyTest do {:ok, pid} = parse_uri(url) |> single_connection() [{_, client_pid, _}] = - Supavisor.get_local_manager({{:single, @tenant}, "transaction", :transaction, "postgres"}) + Supavisor.get_local_manager( + {{:single, @tenant}, "transaction", :transaction, "postgres", nil} + ) |> :sys.get_state() - |> then(& &1[:tid]) + |> Access.get(:tid) |> :ets.tab2list() - {state, %{db_pid: db_pid}} = :sys.get_state(client_pid) + assert {state, map} = :sys.get_state(client_pid) + assert %{db_pid: db_pid} = map assert {:idle, nil} = {state, db_pid} :gen_statem.stop(pid) @@ -201,21 +222,24 @@ defmodule Supavisor.Integration.ProxyTest do Process.flag(:trap_exit, true) db_conf = Application.get_env(:supavisor, Repo) - url = - "postgresql://max_clients.#{@tenant}:#{db_conf[:password]}@#{db_conf[:hostname]}:#{Application.get_env(:supavisor, :proxy_port_transaction)}/postgres?sslmode=disable" + connection_opts = [ + hostname: db_conf[:hostname], + port: Application.get_env(:supavisor, :proxy_port_transaction), + username: "max_clients.prom_tenant", + database: "postgres", + password: db_conf[:password] + ] assert {:error, - {_, - {:stop, - %Postgrex.Error{ - postgres: %{ - code: :internal_error, - message: "Max client connections reached", - pg_code: "XX000", - severity: "FATAL", - unknown: "FATAL" - } - }, _}}} = parse_uri(url) |> single_connection() + %Postgrex.Error{ + postgres: %{ + code: :internal_error, + message: "Max client connections reached", + pg_code: "XX000", + severity: "FATAL", + unknown: "FATAL" + } + }} = single_connection(connection_opts) end test "change role password", %{origin: origin} do @@ -232,21 +256,20 @@ defmodule Supavisor.Integration.ProxyTest do assert {:ok, pid} = parse_uri(first_pass) |> single_connection() assert [%Postgrex.Result{rows: [["1"]]}] = P.SimpleConnection.call(pid, {:query, "select 1;"}) - + :gen_statem.stop(pid) P.query(origin, "alter user dev_postgres with password 'postgres_new';", []) Supavisor.stop({{:single, "is_manager"}, "dev_postgres", :transaction, "postgres"}) :timer.sleep(1000) assert {:error, - {_, - {:stop, - %Postgrex.Error{ - message: "error received in SCRAM server final message: \"Wrong password\"" - }, _}}} = parse_uri(new_pass) |> single_connection() + %Postgrex.Error{ + message: "error received in SCRAM server final message: \"Wrong password\"" + }} = parse_uri(new_pass) |> single_connection() {:ok, pid} = parse_uri(new_pass) |> single_connection() assert [%Postgrex.Result{rows: [["1"]]}] = P.SimpleConnection.call(pid, {:query, "select 1;"}) + :gen_statem.stop(pid) end test "invalid characters in user or db_name" do @@ -257,18 +280,100 @@ defmodule Supavisor.Integration.ProxyTest do "postgresql://user\"user.#{@tenant}:#{db_conf[:password]}@#{db_conf[:hostname]}:#{Application.get_env(:supavisor, :proxy_port_transaction)}/postgres\\\\\\\\\"\\" assert {:error, - {_, - {:stop, - %Postgrex.Error{ - postgres: %{ - code: :internal_error, - message: - "Authentication error, reason: \"Invalid characters in user or db_name\"", - pg_code: "XX000", - severity: "FATAL", - unknown: "FATAL" - } - }, _}}} = parse_uri(url) |> single_connection() + %Postgrex.Error{ + postgres: %{ + code: :internal_error, + message: "Authentication error, reason: \"Invalid format for user or db_name\"", + pg_code: "XX000", + severity: "FATAL", + unknown: "FATAL" + } + }} = parse_uri(url) |> single_connection() + end + + # test "max_pools limit" do + # Process.flag(:trap_exit, true) + # db_conf = Application.get_env(:supavisor, Repo) + # port = Application.get_env(:supavisor, :proxy_port_transaction) + + # tenant = "max_pool_tenant" + + # {:ok, pid1} = + # Keyword.merge(db_conf, + # username: "postgres.#{tenant}", + # port: port + # ) + # |> single_connection() + + # assert Supavisor.count_pools(tenant) == 1 + + # {:ok, pid2} = + # Keyword.merge(db_conf, + # username: "session.#{tenant}", + # port: port + # ) + # |> single_connection() + + # assert Supavisor.count_pools(tenant) == 2 + + # {:ok, pid3} = + # Keyword.merge(db_conf, + # username: "transaction.#{tenant}", + # port: port + # ) + # |> single_connection() + + # assert Supavisor.count_pools(tenant) == 3 + + # connection_opts = + # Keyword.merge(db_conf, + # username: "max_clients.#{tenant}", + # port: port + # ) + + # assert {:error, + # %Postgrex.Error{ + # postgres: %{ + # code: :internal_error, + # message: "Max pools count reached", + # unknown: "FATAL", + # severity: "FATAL", + # pg_code: "XX000" + # } + # }} = single_connection(connection_opts) + + # for pid <- [pid1, pid2, pid3], do: :gen_statem.stop(pid) + # end + + test "active_count doesn't block" do + Process.flag(:trap_exit, true) + db_conf = Application.get_env(:supavisor, Repo) + port = Application.get_env(:supavisor, :proxy_port_session) + + connection_opts = + Keyword.merge(db_conf, + username: db_conf[:username] <> "." <> @tenant, + port: port + ) + + assert {:ok, pid} = single_connection(connection_opts) + + id = {{:single, @tenant}, db_conf[:username], :session, db_conf[:database], nil} + [{client_pid, _}] = Registry.lookup(Supavisor.Registry.TenantClients, id) + + P.SimpleConnection.call(pid, {:query, "select 1;"}) + {_, %{active_count: active_count}} = :sys.get_state(client_pid) + assert active_count >= 1 + + Enum.each(0..200, fn _ -> + P.SimpleConnection.call(pid, {:query, "select 1;"}) + end) + + assert [ + %Postgrex.Result{ + command: :select + } + ] = P.SimpleConnection.call(pid, {:query, "select 1;"}) end defp single_connection(db_conf, c_port \\ nil) when is_list(db_conf) do diff --git a/test/supavisor/client_handler_test.exs b/test/supavisor/client_handler_test.exs index 86314d18..37de47db 100644 --- a/test/supavisor/client_handler_test.exs +++ b/test/supavisor/client_handler_test.exs @@ -1,50 +1,3 @@ defmodule Supavisor.ClientHandlerTest do use ExUnit.Case, async: true - - alias Supavisor.HandlerHelpers, as: HH - - describe "parse_user_info/1" do - test "extracts the external_id from the username" do - payload = %{"user" => "test.user.external_id"} - {:single, {name, external_id, nil}} = HH.parse_user_info(payload) - assert name == "test.user" - assert external_id == "external_id" - end - - test "username consists only of username" do - username = "username" - payload = %{"user" => username} - {:single, {user, nil, nil}} = HH.parse_user_info(payload) - assert username == user - end - - test "consist cluster" do - username = "some.user.cluster.alias" - {t, {u, a, nil}} = HH.parse_user_info(%{"user" => username}) - assert {t, {u, a, nil}} == {:cluster, {"some.user", "alias", nil}} - end - - test "external_id in options" do - user = "test.user" - external_id = "external_id" - payload = %{"options" => %{"reference" => external_id}, "user" => user} - {:single, {user1, external_id1, nil}} = HH.parse_user_info(payload) - assert user1 == user - assert external_id1 == external_id - end - - test "unicode in username" do - payload = %{"user" => "тестовe.імʼя.external_id"} - {:single, {name, external_id, nil}} = HH.parse_user_info(payload) - assert name == "тестовe.імʼя" - assert external_id == "external_id" - end - - test "extracts db_name" do - payload = %{"user" => "user", "database" => "postgres_test"} - {:single, {name, nil, db_name}} = HH.parse_user_info(payload) - assert name == "user" - assert db_name == "postgres_test" - end - end end diff --git a/test/supavisor/db_handler_test.exs b/test/supavisor/db_handler_test.exs index e2610de6..ff53cbb5 100644 --- a/test/supavisor/db_handler_test.exs +++ b/test/supavisor/db_handler_test.exs @@ -3,18 +3,20 @@ defmodule Supavisor.DbHandlerTest do alias Supavisor.DbHandler, as: Db alias Supavisor.Protocol.Server # import Mock + @id {{:single, "tenant"}, "user", :transaction, "postgres", nil} describe "init/1" do test "starts with correct state" do args = %{ - id: {"a", "b"}, + id: @id, auth: %{}, tenant: {:single, "test_tenant"}, user_alias: "test_user_alias", user: "user", mode: :transaction, replica_type: :single, - log_level: nil + log_level: nil, + reconnect_retries: 5 } {:ok, :connect, data, {_, next_event, _}} = Db.init(args) @@ -33,7 +35,7 @@ defmodule Supavisor.DbHandlerTest do end describe "handle_event/4" do - test "db is avaible" do + test "db is available" do :meck.new(:gen_tcp, [:unstick, :passthrough]) :meck.new(:inet, [:unstick, :passthrough]) :meck.expect(:gen_tcp, :connect, fn _host, _port, _sock_opts -> {:ok, :sock} end) @@ -57,7 +59,8 @@ defmodule Supavisor.DbHandlerTest do Db.handle_event(:internal, nil, :connect, %{ auth: auth, sock: {:gen_tcp, nil}, - id: {"a", "b"} + id: @id, + proxy: false }) assert state == @@ -74,19 +77,20 @@ defmodule Supavisor.DbHandlerTest do secrets: secrets }, sock: {:gen_tcp, :sock}, - id: {"a", "b"} + id: @id, + proxy: false }} :meck.unload(:gen_tcp) end - test "db is not avaible" do + test "db is not available" do :meck.new(:gen_tcp, [:unstick, :passthrough]) :meck.expect(:gen_tcp, :connect, fn _host, _port, _sock_opts -> {:error, "some error"} end) auth = %{ - id: {"a", "b"}, + id: @id, host: "host", port: 0, user: "some user", @@ -95,41 +99,19 @@ defmodule Supavisor.DbHandlerTest do ip_version: :inet } - state = Db.handle_event(:internal, nil, :connect, %{auth: auth, sock: nil, id: {"a", "b"}}) + state = + Db.handle_event(:internal, nil, :connect, %{ + auth: auth, + sock: nil, + id: {"a", "b"}, + reconnect_retries: 5 + }) assert state == {:keep_state_and_data, {:state_timeout, 2_500, :connect}} :meck.unload(:gen_tcp) end end - test "handle_event/4 with idle state" do - {:ok, sock} = :gen_tcp.listen(0, []) - data = %{sock: {:gen_tcp, sock}, caller: nil, buffer: []} - from = {self(), :test_ref} - event = {:call, from} - payload = {:db_call, self(), "test_data"} - - {:next_state, :busy, new_data, reply} = Db.handle_event(event, payload, :idle, data) - - # check if the message arrived in gen_tcp.send - assert {:reply, ^from, {:error, :enotconn}} = reply - assert new_data.caller == self() - end - - test "handle_event/4 with non-idle state" do - data = %{sock: nil, caller: self(), buffer: []} - from = {self(), :test_ref} - event = {:call, from} - payload = {:db_call, self(), "test_data"} - state = :non_idle - - {:keep_state, new_data, reply} = Db.handle_event(event, payload, state, data) - - assert {:reply, ^from, {:buffering, 9}} = reply - assert new_data.caller == self() - assert new_data.buffer == ["test_data"] - end - describe "handle_event/4 info tcp authentication authentication_md5_password payload events" do test "keeps state while sending the digested md5" do # `82` is `?R`, which identifies the payload tag as `:authentication` @@ -177,7 +159,7 @@ defmodule Supavisor.DbHandlerTest do caller_pid = self() data = %{ - id: {{:single, "tenant"}, "user", :session, "postgres"}, + id: @id, caller: caller_pid, sock: {:gen_tcp, nil}, stats: %{}, @@ -192,46 +174,13 @@ defmodule Supavisor.DbHandlerTest do :meck.new(:inet, [:unstick, :passthrough]) :meck.expect(:prim_inet, :getstat, fn _, _ -> - {:ok, %{}} - end) - - :meck.expect(:inet, :setopts, fn _, _ -> :ok end) - - {:next_state, :idle, new_data, _} = Db.handle_event(:info, event, state, data) - - assert new_data.caller == caller_pid - :meck.unload(:prim_inet) - :meck.unload(:inet) - end - - test "does not update caller in data for non-session mode" do - proto = :tcp - bin = "response_data" <> Server.ready_for_query() - caller_pid = self() - - data = %{ - id: {{:single, "tenant"}, "user", :session, "postgres"}, - caller: caller_pid, - sock: {:gen_tcp, nil}, - stats: %{}, - mode: :transaction, - sent: false - } - - state = :some_state - event = {proto, :dummy_value, bin} - :meck.new(:prim_inet, [:unstick, :passthrough]) - :meck.new(:inet, [:unstick, :passthrough]) - - :meck.expect(:prim_inet, :getstat, fn _, _ -> - {:ok, %{}} + {:ok, [{:recv_oct, 21}, {:send_oct, 37}]} end) :meck.expect(:inet, :setopts, fn _, _ -> :ok end) - {:next_state, :idle, new_data, _} = Db.handle_event(:info, event, state, data) + :keep_state_and_data = Db.handle_event(:info, event, state, data) - assert new_data.caller == nil :meck.unload(:prim_inet) :meck.unload(:inet) end diff --git a/test/supavisor/handler_helpers_test.exs b/test/supavisor/handler_helpers_test.exs index 28a3512b..e1a1e090 100644 --- a/test/supavisor/handler_helpers_test.exs +++ b/test/supavisor/handler_helpers_test.exs @@ -1,4 +1,52 @@ defmodule Supavisor.HandlerHelpersTest do - use ExUnit.Case - doctest Supavisor.HandlerHelpers + use ExUnit.Case, async: true + + @subject Supavisor.HandlerHelpers + + doctest @subject + + describe "parse_user_info/1" do + test "extracts the external_id from the username" do + payload = %{"user" => "test.user.external_id"} + {:single, {name, external_id, nil}} = @subject.parse_user_info(payload) + assert name == "test.user" + assert external_id == "external_id" + end + + test "username consists only of username" do + username = "username" + payload = %{"user" => username} + {:single, {user, nil, nil}} = @subject.parse_user_info(payload) + assert username == user + end + + test "consist cluster" do + username = "some.user.cluster.alias" + {t, {u, a, nil}} = @subject.parse_user_info(%{"user" => username}) + assert {t, {u, a, nil}} == {:cluster, {"some.user", "alias", nil}} + end + + test "external_id in options" do + user = "test.user" + external_id = "external_id" + payload = %{"options" => %{"reference" => external_id}, "user" => user} + {:single, {user1, external_id1, nil}} = @subject.parse_user_info(payload) + assert user1 == user + assert external_id1 == external_id + end + + test "unicode in username" do + payload = %{"user" => "тестовe.імʼя.external_id"} + {:single, {name, external_id, nil}} = @subject.parse_user_info(payload) + assert name == "тестовe.імʼя" + assert external_id == "external_id" + end + + test "extracts db_name" do + payload = %{"user" => "user", "database" => "postgres_test"} + {:single, {name, nil, db_name}} = @subject.parse_user_info(payload) + assert name == "user" + assert db_name == "postgres_test" + end + end end diff --git a/test/supavisor/helpers_test.exs b/test/supavisor/helpers_test.exs index 3966d1ad..4d2fb5a3 100644 --- a/test/supavisor/helpers_test.exs +++ b/test/supavisor/helpers_test.exs @@ -35,4 +35,52 @@ defmodule Supavisor.HelpersTest do {:error, "Unsupported or invalid secret format"} end end + + describe "validate_name/1" do + test "allows valid unquoted names" do + assert Helpers.validate_name("valid_name") + # Minimum length + assert Helpers.validate_name("a") + assert Helpers.validate_name("valid_name_123") + assert Helpers.validate_name("name$123") + end + + test "rejects invalid unquoted names" do + # Empty name + refute Helpers.validate_name("") + # Starts with a number + refute Helpers.validate_name("0invalid") + # Contains uppercase letters + refute Helpers.validate_name("InvalidName") + # Contains hyphen + refute Helpers.validate_name("invalid-name") + # Contains period + refute Helpers.validate_name("invalid.name") + # Over 63 chars + refute Helpers.validate_name( + "this_name_is_way_toooooo_long_and_exceeds_sixty_three_characters" + ) + end + + test "allows valid quoted names" do + # Contains space + assert Helpers.validate_name("\"Valid Name\"") + # Contains uppercase letters + assert Helpers.validate_name("\"ValidName123\"") + # Same as unquoted but quoted + assert Helpers.validate_name("\"valid_name\"") + # Contains dollar sign + assert Helpers.validate_name("\"Name with $\"") + assert Helpers.validate_name("\"name with multiple spaces\"") + end + + test "rejects invalid quoted names" do + # Contains hyphen + refute Helpers.validate_name("\"invalid-name\"") + # Contains period + refute Helpers.validate_name("\"invalid.name\"") + # Empty name + refute Helpers.validate_name("\"\"") + end + end end diff --git a/test/supavisor/pg_parser_test.exs b/test/supavisor/pg_parser_test.exs index 9209cb8d..54502312 100644 --- a/test/supavisor/pg_parser_test.exs +++ b/test/supavisor/pg_parser_test.exs @@ -1,4 +1,7 @@ defmodule Supavisor.PgParserTest do use ExUnit.Case, async: true - doctest Supavisor.PgParser + + @subject Supavisor.PgParser + + doctest @subject end diff --git a/test/supavisor/prom_ex_test.exs b/test/supavisor/prom_ex_test.exs index d29dc23c..27edfc0c 100644 --- a/test/supavisor/prom_ex_test.exs +++ b/test/supavisor/prom_ex_test.exs @@ -2,11 +2,19 @@ defmodule Supavisor.PromExTest do use ExUnit.Case, async: true use Supavisor.DataCase + import Supavisor.Asserts + alias Supavisor.Monitoring.PromEx @tenant "prom_tenant" - setup_all do + # These tests are known to be flaky, and while these do not affect users + # directly we can run them independently when needed. In future we probably + # should make them pass "regularly", but for now that is easier to filter them + # out. + @moduletag flaky: true + + setup do db_conf = Application.get_env(:supavisor, Repo) {:ok, proxy} = @@ -28,12 +36,10 @@ defmodule Supavisor.PromExTest do test "remove tenant tag upon termination", %{proxy: proxy, user: user, db_name: db_name} do assert PromEx.get_metrics() =~ "tenant=\"#{@tenant}\"" - GenServer.stop(proxy) - Supavisor.stop({{:single, @tenant}, user, :transaction, db_name}) - - Process.sleep(500) + :ok = GenServer.stop(proxy) + :ok = Supavisor.stop({{:single, @tenant}, user, :transaction, db_name, nil}) - refute PromEx.get_metrics() =~ "tenant=\"#{@tenant}\"" + refute_eventually(10, fn -> PromEx.get_metrics() =~ "tenant=\"#{@tenant}\"" end) end test "clean_string/1 removes extra spaces from metric string" do diff --git a/test/supavisor/syn_handler_test.exs b/test/supavisor/syn_handler_test.exs index edfa4f5a..dbb65243 100644 --- a/test/supavisor/syn_handler_test.exs +++ b/test/supavisor/syn_handler_test.exs @@ -3,11 +3,12 @@ defmodule Supavisor.SynHandlerTest do import ExUnit.CaptureLog require Logger alias Ecto.Adapters.SQL.Sandbox + alias Supavisor.Support.Cluster - @id {{:single, "syn_tenant"}, "postgres", :session, "postgres"} + @id {{:single, "syn_tenant"}, "postgres", :session, "postgres", nil} test "resolving conflict" do - node2 = :"secondary@127.0.0.1" + {:ok, _pid, node2} = Cluster.start_node() secret = %{alias: "postgres"} auth_secret = {:password, fn -> secret end} @@ -16,7 +17,7 @@ defmodule Supavisor.SynHandlerTest do assert pid2 == Supavisor.get_global_sup(@id) assert node(pid2) == node2 true = Node.disconnect(node2) - Process.sleep(500) + Process.sleep(1000) assert nil == Supavisor.get_global_sup(@id) {:ok, pid1} = Supavisor.start(@id, auth_secret) @@ -28,7 +29,7 @@ defmodule Supavisor.SynHandlerTest do msg = "Resolving syn_tenant conflict, stop local pid" - assert capture_log(fn -> Logger.warn(msg) end) =~ + assert capture_log(fn -> Logger.warning(msg) end) =~ msg assert pid2 == Supavisor.get_global_sup(@id) diff --git a/test/supavisor_web/controllers/metrics_controller_test.exs b/test/supavisor_web/controllers/metrics_controller_test.exs index 5bbd3407..06f7a978 100644 --- a/test/supavisor_web/controllers/metrics_controller_test.exs +++ b/test/supavisor_web/controllers/metrics_controller_test.exs @@ -1,5 +1,6 @@ defmodule SupavisorWeb.MetricsControllerTest do use SupavisorWeb.ConnCase + alias Supavisor.Support.Cluster setup %{conn: conn} do new_conn = @@ -13,6 +14,10 @@ defmodule SupavisorWeb.MetricsControllerTest do end test "exporting metrics", %{conn: conn} do + {:ok, _pid, node2} = Cluster.start_node() + + Node.connect(node2) + :meck.expect(Supavisor.Jwt, :authorize, fn _token, _secret -> {:ok, %{}} end) conn = get(conn, Routes.metrics_path(conn, :index)) assert conn.status == 200 diff --git a/test/supavisor_web/views/layout_view_test.exs b/test/supavisor_web/views/layout_view_test.exs deleted file mode 100644 index 5584a842..00000000 --- a/test/supavisor_web/views/layout_view_test.exs +++ /dev/null @@ -1,8 +0,0 @@ -defmodule SupavisorWeb.LayoutViewTest do - use SupavisorWeb.ConnCase, async: true - - # When testing helpers, you may want to import Phoenix.HTML and - # use functions such as safe_to_string() to convert the helper - # result into an HTML string. - # import Phoenix.HTML -end diff --git a/test/support/asserts.ex b/test/support/asserts.ex new file mode 100644 index 00000000..d0e720b9 --- /dev/null +++ b/test/support/asserts.ex @@ -0,0 +1,41 @@ +defmodule Supavisor.Asserts do + @doc """ + Asserts that `function` will eventually success. Fails otherwise. + + It performs `repeats` checks with `delay` milliseconds between each check. + """ + def assert_eventually(repeats \\ 5, delay \\ 1000, function) + + def assert_eventually(0, _, _) do + raise ExUnit.AssertionError, message: "Expected function to return truthy value" + end + + def assert_eventually(n, delay, func) do + if func.() do + :ok + else + Process.sleep(delay) + assert_eventually(n - 1, delay, func) + end + end + + @doc """ + Asserts that `function` will eventually fail. Fails otherwise. + + It performs `repeats` checks with `delay` milliseconds between each check. + """ + def refute_eventually(repeats \\ 5, delay \\ 1000, function) + + def refute_eventually(0, _, _) do + raise ExUnit.AssertionError, message: "Expected function to return falsey value" + end + + def refute_eventually(n, delay, func) do + if func.() do + Process.sleep(delay) + refute_eventually(n - 1, delay, func) + else + :ok + end + end +end diff --git a/test/support/cluster.ex b/test/support/cluster.ex index 9243eaf6..f2add46a 100644 --- a/test/support/cluster.ex +++ b/test/support/cluster.ex @@ -3,7 +3,24 @@ defmodule Supavisor.Support.Cluster do This module provides functionality to help handle distributive mode for testing. """ - def apply_config(node) do + def start_node(name \\ :peer.random_name()) do + {:ok, pid, node} = + :peer.start_link(%{ + name: name, + host: ~c"127.0.0.1", + longnames: true, + connection: :standard_io + }) + + :peer.call(pid, :logger, :set_primary_config, [:level, :none]) + true = :peer.call(pid, :code, :set_path, [:code.get_path()]) + apply_config(pid) + :peer.call(pid, Application, :ensure_all_started, [:supavisor]) + + {:ok, pid, node} + end + + defp apply_config(pid) do for {app_name, _, _} <- Application.loaded_applications() do for {key, val} <- Application.get_all_env(app_name) do val = @@ -20,16 +37,17 @@ defmodule Supavisor.Support.Cluster do {:supavisor, :region} -> "usa" - {:partisan, :listen_addrs} -> - [{"127.0.0.1", 10201}] + {:supavisor, :availability_zone} -> + "ap-southeast-1c" _ -> val end - :rpc.call(node, Application, :put_env, [app_name, key, val, [persistent: true]]) - :rpc.call(node, Supavisor.Monitoring.PromEx, :set_metrics_tags, []) + :peer.call(pid, Application, :put_env, [app_name, key, val]) end end + + :peer.call(pid, Supavisor.Monitoring.PromEx, :set_metrics_tags, []) end end diff --git a/test/support/fixtures/single_connection.ex b/test/support/fixtures/single_connection.ex index 1b416c50..2f3d89b5 100644 --- a/test/support/fixtures/single_connection.ex +++ b/test/support/fixtures/single_connection.ex @@ -1,5 +1,8 @@ defmodule SingleConnection do + @moduledoc false + alias Postgrex, as: P + @behaviour P.SimpleConnection def connect(conf) do diff --git a/test/test_helper.exs b/test/test_helper.exs index a3a81c8b..b4467cac 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,13 +1,13 @@ {:ok, _} = Node.start(:"primary@127.0.0.1", :longnames) -node2 = :"secondary@127.0.0.1" -:ct_slave.start(node2) -true = :erpc.call(node2, :code, :set_path, [:code.get_path()]) - -Supavisor.Support.Cluster.apply_config(node2) - -{:ok, _} = :erpc.call(node2, :application, :ensure_all_started, [:supavisor]) Cachex.start_link(name: Supavisor.Cache) -ExUnit.start() +ExUnit.start( + capture_log: true, + exclude: [ + flaky: true, + integration: true + ] +) + Ecto.Adapters.SQL.Sandbox.mode(Supavisor.Repo, :auto) diff --git a/typos.toml b/typos.toml new file mode 100644 index 00000000..ec32404b --- /dev/null +++ b/typos.toml @@ -0,0 +1,11 @@ +[files] +extend-exclude = [ + # Ignore integration tests, as these can be copied as-is + "test/integration/**" +] + +[default] +extend-ignore-re = [ + "\\bey[A-Za-z0-9_-]{20,}\\.ey[A-Za-z0-9_-]{20,}\\.[A-Za-z0-9_-]{20,}\\b", + "\\bfly-local-[a-z0-9]+\\b" +]