From 5ceacca5e31d2068677e7fd702e6d3d27f430855 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Wed, 21 Jan 2026 23:40:43 -0500 Subject: [PATCH 1/7] tests: refactor oriole pg_regress and add Docker regress coverage --- Dockerfile-orioledb-17 | 5 +- nix/checks.nix | 42 +- nix/docs/docker-testing.md | 160 +++++++ .../z_orioledb-17_docs-cascades-deletes.out | 373 +++++++++++++++ .../z_orioledb-17_docs-full-text-search.out | 298 ++++++++++++ .../expected/z_orioledb-17_docs-functions.out | 230 ++++++++++ .../expected/z_orioledb-17_docs-indexes.out | 24 + .../z_orioledb-17_docs-partitioning.out | 110 +++++ .../expected/z_orioledb-17_ext_interface.out | 71 ++- .../z_orioledb-17_extensions_schema.out | 23 + .../expected/z_orioledb-17_index_advisor.out | 16 + nix/tests/expected/z_orioledb-17_pgroonga.out | 78 ++++ .../z_orioledb-17_verify_orioledb.out | 34 ++ .../z_orioledb-17_docs-cascades-deletes.sql | 262 +++++++++++ .../z_orioledb-17_docs-full-text-search.sql | 197 ++++++++ .../sql/z_orioledb-17_docs-functions.sql | 225 ++++++++++ nix/tests/sql/z_orioledb-17_docs-indexes.sql | 26 ++ .../sql/z_orioledb-17_docs-partitioning.sql | 89 ++++ .../sql/z_orioledb-17_extensions_schema.sql | 15 + nix/tests/sql/z_orioledb-17_index_advisor.sql | 13 + nix/tests/sql/z_orioledb-17_pgroonga.sql | 48 ++ .../sql/z_orioledb-17_verify_orioledb.sql | 24 + test-docker-image.sh | 424 ++++++++++++++++++ 23 files changed, 2778 insertions(+), 9 deletions(-) create mode 100644 nix/docs/docker-testing.md create mode 100644 nix/tests/expected/z_orioledb-17_docs-cascades-deletes.out create mode 100644 nix/tests/expected/z_orioledb-17_docs-full-text-search.out create mode 100644 nix/tests/expected/z_orioledb-17_docs-functions.out create mode 100644 nix/tests/expected/z_orioledb-17_docs-indexes.out create mode 100644 nix/tests/expected/z_orioledb-17_docs-partitioning.out create mode 100644 nix/tests/expected/z_orioledb-17_extensions_schema.out create mode 100644 nix/tests/expected/z_orioledb-17_index_advisor.out create mode 100644 nix/tests/expected/z_orioledb-17_pgroonga.out create mode 100644 nix/tests/expected/z_orioledb-17_verify_orioledb.out create mode 100644 nix/tests/sql/z_orioledb-17_docs-cascades-deletes.sql create mode 100644 nix/tests/sql/z_orioledb-17_docs-full-text-search.sql create mode 100644 nix/tests/sql/z_orioledb-17_docs-functions.sql create mode 100644 nix/tests/sql/z_orioledb-17_docs-indexes.sql create mode 100644 nix/tests/sql/z_orioledb-17_docs-partitioning.sql create mode 100644 nix/tests/sql/z_orioledb-17_extensions_schema.sql create mode 100644 nix/tests/sql/z_orioledb-17_index_advisor.sql create mode 100644 nix/tests/sql/z_orioledb-17_pgroonga.sql create mode 100644 nix/tests/sql/z_orioledb-17_verify_orioledb.sql create mode 100755 test-docker-image.sh diff --git a/Dockerfile-orioledb-17 b/Dockerfile-orioledb-17 index ebcaa38e7..c343f1cd1 100644 --- a/Dockerfile-orioledb-17 +++ b/Dockerfile-orioledb-17 @@ -231,8 +231,9 @@ RUN sed -i \ chown -R postgres:postgres /etc/postgresql-custom # Remove items from postgresql.conf -RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" - #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" + # As of pg 16.4+ db_user_namespace is totally deprecated and will break the server if setting is present +RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" RUN sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" diff --git a/nix/checks.nix b/nix/checks.nix index 7370aee9b..61701ca04 100644 --- a/nix/checks.nix +++ b/nix/checks.nix @@ -113,12 +113,13 @@ let name = pkg.version; in - if builtins.match "15.*" name != null then + # Check orioledb first since "17_15" would match "17.*" pattern + if builtins.match "17_[0-9]+" name != null then + "orioledb-17" + else if builtins.match "15.*" name != null then "15" else if builtins.match "17.*" name != null then "17" - else if builtins.match "orioledb-17.*" name != null then - "orioledb-17" else throw "Unsupported PostgreSQL version: ${name}"; @@ -127,10 +128,18 @@ version: dir: let files = builtins.readDir dir; + # Get list of OrioleDB-specific test basenames (without z_orioledb-17_ prefix) + orioledbVariants = pkgs.lib.pipe files [ + builtins.attrNames + (builtins.filter (n: builtins.match "z_orioledb-17_.*\\.sql" n != null)) + (map (n: builtins.substring 14 (pkgs.lib.stringLength n - 18) n)) # Remove "z_orioledb-17_" prefix (14 chars) and ".sql" suffix (4 chars) + ]; + hasOrioledbVariant = basename: builtins.elem basename orioledbVariants; isValidFile = name: let isVersionSpecific = builtins.match "z_.*" name != null; + basename = builtins.substring 0 (pkgs.lib.stringLength name - 4) name; # Remove .sql matchesVersion = if isVersionSpecific then if version == "orioledb-17" then @@ -139,6 +148,10 @@ builtins.match "z_17_.*" name != null else builtins.match "z_15_.*" name != null + else + # For common tests: exclude if OrioleDB variant exists and we're running OrioleDB + if version == "orioledb-17" && hasOrioledbVariant basename then + false else true; in @@ -275,7 +288,14 @@ if [[ "${pgpkg.version}" == *"17"* ]]; then perl -pi -e 's/ timescaledb,//g' "$PGTAP_CLUSTER/postgresql.conf" fi - #NOTE in the future we may also need to add the orioledb extension to the cluster when cluster is oriole + # Configure OrioleDB if running orioledb-17 check + #shellcheck disable=SC2193 + if [[ "${pgpkg.version}" == *"_"* ]]; then + log info "Configuring OrioleDB..." + # Add orioledb to shared_preload_libraries + perl -pi -e "s/(shared_preload_libraries = ')/\$1orioledb, /" "$PGTAP_CLUSTER/postgresql.conf" + log info "OrioleDB added to shared_preload_libraries" + fi # Check if postgresql.conf exists if [ ! -f "$PGTAP_CLUSTER/postgresql.conf" ]; then @@ -302,6 +322,13 @@ log info "Creating test database" log_cmd createdb -p ${pgPort} -h localhost --username=supabase_admin testing + # Create orioledb extension if running orioledb-17 check (before prime.sql) + #shellcheck disable=SC2193 + if [[ "${pgpkg.version}" == *"_"* ]]; then + log info "Creating orioledb extension..." + log_cmd psql -p ${pgPort} -h localhost --username=supabase_admin -d testing -c "CREATE EXTENSION IF NOT EXISTS orioledb;" + fi + log info "Loading prime SQL file" if ! log_cmd psql -p ${pgPort} -h localhost --username=supabase_admin -d testing -v ON_ERROR_STOP=1 -Xf ${./tests/prime.sql}; then log error "Error executing SQL file. PostgreSQL log content:" @@ -341,6 +368,13 @@ check_postgres_ready + # Create orioledb extension if running orioledb-17 check (before prime.sql) + #shellcheck disable=SC2193 + if [[ "${pgpkg.version}" == *"_"* ]]; then + log info "Creating orioledb extension for pg_regress tests..." + log_cmd psql -p ${pgPort} -h localhost --no-password --username=supabase_admin -d postgres -c "CREATE EXTENSION IF NOT EXISTS orioledb;" + fi + log info "Loading prime SQL file" if ! log_cmd psql -p ${pgPort} -h localhost --no-password --username=supabase_admin -d postgres -v ON_ERROR_STOP=1 -Xf ${./tests/prime.sql} 2>&1; then log error "Error executing SQL file" diff --git a/nix/docs/docker-testing.md b/nix/docs/docker-testing.md new file mode 100644 index 000000000..efffb4875 --- /dev/null +++ b/nix/docs/docker-testing.md @@ -0,0 +1,160 @@ +# Docker Image Testing + +This document describes how to test PostgreSQL Docker images against the pg_regress test suite. + +## Overview + +The `test-docker-image.sh` script builds a Docker image from one of the project's Dockerfiles and runs the existing `nix/tests/` test suite against it. This validates that Docker images work correctly before deployment. + +## Quick Start + +```bash +# Test PostgreSQL 17 image +./test-docker-image.sh Dockerfile-17 + +# Test PostgreSQL 15 image +./test-docker-image.sh Dockerfile-15 + +# Test OrioleDB 17 image +./test-docker-image.sh Dockerfile-orioledb-17 +``` + +## Requirements + +- Docker +- Nix (provides psql and pg_regress from the flake) + +## Usage + +``` +Usage: ./test-docker-image.sh [OPTIONS] DOCKERFILE + +Test a PostgreSQL Docker image against the pg_regress test suite. + +Arguments: + DOCKERFILE The Dockerfile to build and test (e.g., Dockerfile-17) + +Options: + -h, --help Show this help message + --no-build Skip building the image (use existing) + --keep Keep the container running after tests (for debugging) +``` + +### Examples + +```bash +# Build and test +./test-docker-image.sh Dockerfile-17 + +# Test without rebuilding (faster iteration) +./test-docker-image.sh --no-build Dockerfile-17 + +# Keep container running for debugging +./test-docker-image.sh --keep Dockerfile-17 +# Then connect with: +# psql -h localhost -p 5435 -U supabase_admin postgres +``` + +## How It Works + +1. **Build** - Builds Docker image from the specified Dockerfile +2. **Start** - Runs container with PostgreSQL exposed on a test port +3. **Wait** - Waits for PostgreSQL to be ready (pg_isready) +4. **HTTP Mock** - Starts the HTTP mock server inside the container for `http` extension tests +5. **Setup** - Runs `prime.sql` to enable all extensions, creates `test_config` table +6. **Patch** - Copies test files to temp dir, patches expected outputs for Docker-specific differences +7. **Test** - Runs pg_regress with version-filtered test files +8. **Compare** - Checks output against patched expected files +9. **Report** - Shows pass/fail, prints diffs on failure +10. **Cleanup** - Removes container (unless `--keep`) + +## Version Mapping + +| Dockerfile | Version | Test Port | Test Filter | Tests | +|------------|---------|-----------|-------------|-------| +| Dockerfile-15 | 15 | 5436 | `z_15_*.sql` + common | 53 | +| Dockerfile-17 | 17 | 5435 | `z_17_*.sql` + common | 49 | +| Dockerfile-orioledb-17 | orioledb-17 | 5437 | `z_orioledb-17_*.sql` + common | 47 | + +## Test Filtering + +Tests in `nix/tests/sql/` are filtered by PostgreSQL version: + +- Files without `z_` prefix run for **all versions** +- Files starting with `z_15_` run only for PostgreSQL 15 +- Files starting with `z_17_` run only for PostgreSQL 17 +- Files starting with `z_orioledb-17_` run only for OrioleDB 17 + +## CI Integration + +### GitHub Actions + +```yaml +jobs: + test-docker: + runs-on: ubuntu-latest + strategy: + matrix: + dockerfile: + - Dockerfile-15 + - Dockerfile-17 + - Dockerfile-orioledb-17 + steps: + - uses: actions/checkout@v4 + - uses: cachix/install-nix-action@v24 + - name: Test Docker image + run: ./test-docker-image.sh ${{ matrix.dockerfile }} +``` + +## Debugging Failed Tests + +When tests fail, the script outputs `regression.diffs` showing the differences between expected and actual output. + +To investigate further: + +```bash +# Run with --keep to preserve container +./test-docker-image.sh --keep Dockerfile-17 + +# Connect to the running database +psql -h localhost -p 5435 -U supabase_admin postgres + +# Run individual test manually +psql -h localhost -p 5435 -U supabase_admin postgres -f nix/tests/sql/pgroonga.sql +``` + +## Relationship to Nix Checks + +This script complements `nix flake check` which tests the Nix-built PostgreSQL packages directly. The Docker tests validate that: + +1. Docker image builds correctly +2. Extensions are properly installed in the container +3. Configuration files are correctly applied +4. The containerized PostgreSQL behaves the same as the Nix-built version + +## Known Differences (Auto-Patched) + +The script automatically patches expected outputs at runtime to handle Docker-specific differences: + +| Difference | Affected Tests | Cause | +|------------|----------------|-------| +| `$user` → `\$user` in search_path | pgmq, vault, roles | Docker image configuration escapes `$` in search_path | + +These patches are applied to a temporary copy of the test files - the original files are never modified. + +### OrioleDB-Specific Test Files + +For tests that produce different output under OrioleDB (due to the orioledb extension being loaded or different storage behavior), create OrioleDB-specific versions: + +- `nix/tests/sql/z_orioledb-17_.sql` - OrioleDB version of the test +- `nix/tests/expected/z_orioledb-17_.out` - Expected output for OrioleDB + +When an OrioleDB variant exists, the common test is automatically skipped for OrioleDB runs. This approach is used by both the Docker test script and Nix flake checks. + +## Adding New Tests + +1. Add SQL file to `nix/tests/sql/` +2. Add expected output to `nix/tests/expected/` +3. For version-specific tests, prefix with `z_15_`, `z_17_`, or `z_orioledb-17_` + +See existing tests for examples. diff --git a/nix/tests/expected/z_orioledb-17_docs-cascades-deletes.out b/nix/tests/expected/z_orioledb-17_docs-cascades-deletes.out new file mode 100644 index 000000000..e196992d7 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_docs-cascades-deletes.out @@ -0,0 +1,373 @@ +-- testing sql found in https://supabase.com/docs/guides/database/postgres/cascades-deletes +-- all of the errors produced by this file are expected +create table grandparent ( + id serial primary key, + name text +); +create table parent ( + id serial primary key, + name text, + parent_id integer references grandparent (id) + on delete cascade +); +create table child ( + id serial primary key, + name text, + father integer references parent (id) + on delete restrict +); +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); +insert into child + (id, name, father) +values + (1, 'William', 1); +select count(*) from grandparent; + count +------- + 1 +(1 row) + +select count(*) from parent; + count +------- + 2 +(1 row) + +select count(*) from child; + count +------- + 1 +(1 row) + +delete from grandparent; +ERROR: update or delete on table "parent" violates foreign key constraint "child_father_fkey" on table "child" +DETAIL: Key (id)=(1) is still referenced from table "child". +select count(*) from grandparent; + count +------- + 1 +(1 row) + +select count(*) from parent; + count +------- + 2 +(1 row) + +select count(*) from child; + count +------- + 1 +(1 row) + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); +ERROR: duplicate key value violates unique constraint "grandparent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('2') already exists. +insert into child + (id, name, father) +values + (1, 'William', 1); +ERROR: duplicate key value violates unique constraint "child_pkey" +DETAIL: Key (id)=('1') already exists. +alter table child +drop constraint child_father_fkey; +alter table child +add constraint child_father_fkey foreign key (father) references parent (id) + on delete no action; +delete from grandparent; +ERROR: update or delete on table "parent" violates foreign key constraint "child_father_fkey" on table "child" +DETAIL: Key (id)=(1) is still referenced from table "child". +select count(*) from grandparent; + count +------- + 1 +(1 row) + +select count(*) from parent; + count +------- + 2 +(1 row) + +select count(*) from child; + count +------- + 1 +(1 row) + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); +ERROR: duplicate key value violates unique constraint "grandparent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('2') already exists. +insert into child + (id, name, father) +values + (1, 'William', 1); +ERROR: duplicate key value violates unique constraint "child_pkey" +DETAIL: Key (id)=('1') already exists. +alter table child +drop constraint child_father_fkey; +alter table child +add constraint child_father_fkey foreign key (father) references parent (id) + on delete no action initially deferred; +delete from grandparent; +ERROR: update or delete on table "parent" violates foreign key constraint "child_father_fkey" on table "child" +DETAIL: Key (id)=(1) is still referenced from table "child". +select count(*) from grandparent; + count +------- + 1 +(1 row) + +select count(*) from parent; + count +------- + 2 +(1 row) + +select count(*) from child; + count +------- + 1 +(1 row) + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); +ERROR: duplicate key value violates unique constraint "grandparent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('1') already exists. +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); +ERROR: duplicate key value violates unique constraint "parent_pkey" +DETAIL: Key (id)=('2') already exists. +insert into child + (id, name, father) +values + (1, 'William', 1); +ERROR: duplicate key value violates unique constraint "child_pkey" +DETAIL: Key (id)=('1') already exists. +alter table child +add column mother integer references parent (id) + on delete cascade; +update child +set mother = 2 +where id = 1; +delete from grandparent; +select count(*) from grandparent; + count +------- + 0 +(1 row) + +select count(*) from parent; + count +------- + 0 +(1 row) + +select count(*) from child; + count +------- + 0 +(1 row) + +create table test_cascade ( + id serial primary key, + name text +); +create table test_cascade_child ( + id serial primary key, + parent_id integer references test_cascade (id) on delete cascade, + name text +); +insert into test_cascade (name) values ('Parent'); +insert into test_cascade_child (parent_id, name) values (1, 'Child'); +delete from test_cascade; +select count(*) from test_cascade; + count +------- + 0 +(1 row) + +select count(*) from test_cascade_child; + count +------- + 0 +(1 row) + +create table test_restrict ( + id serial primary key, + name text +); +create table test_restrict_child ( + id serial primary key, + parent_id integer references test_restrict (id) on delete restrict, + name text +); +insert into test_restrict (name) values ('Parent'); +insert into test_restrict_child (parent_id, name) values (1, 'Child'); +delete from test_restrict; +ERROR: update or delete on table "test_restrict" violates foreign key constraint "test_restrict_child_parent_id_fkey" on table "test_restrict_child" +DETAIL: Key (id)=(1) is still referenced from table "test_restrict_child". +select count(*) from test_restrict; + count +------- + 1 +(1 row) + +select count(*) from test_restrict_child; + count +------- + 1 +(1 row) + +create table test_set_null ( + id serial primary key, + name text +); +create table test_set_null_child ( + id serial primary key, + parent_id integer references test_set_null (id) on delete set null, + name text +); +insert into test_set_null (name) values ('Parent'); +insert into test_set_null_child (parent_id, name) values (1, 'Child'); +delete from test_set_null; +select count(*) from test_set_null; + count +------- + 0 +(1 row) + +select count(*) from test_set_null_child; + count +------- + 1 +(1 row) + +select parent_id from test_set_null_child; + parent_id +----------- + +(1 row) + +create table test_set_default ( + id serial primary key, + name text +); +create table test_set_default_child ( + id serial primary key, + parent_id integer default 999 references test_set_default (id) on delete set default, + name text +); +insert into test_set_default (name) values ('Parent'); +insert into test_set_default_child (parent_id, name) values (1, 'Child'); +delete from test_set_default; +ERROR: insert or update on table "test_set_default_child" violates foreign key constraint "test_set_default_child_parent_id_fkey" +DETAIL: Key (parent_id)=(999) is not present in table "test_set_default". +select count(*) from test_set_default; + count +------- + 1 +(1 row) + +select count(*) from test_set_default_child; + count +------- + 1 +(1 row) + +select parent_id from test_set_default_child; + parent_id +----------- + 1 +(1 row) + +create table test_no_action ( + id serial primary key, + name text +); +create table test_no_action_child ( + id serial primary key, + parent_id integer references test_no_action (id) on delete no action, + name text +); +insert into test_no_action (name) values ('Parent'); +insert into test_no_action_child (parent_id, name) values (1, 'Child'); +delete from test_no_action; +ERROR: update or delete on table "test_no_action" violates foreign key constraint "test_no_action_child_parent_id_fkey" on table "test_no_action_child" +DETAIL: Key (id)=(1) is still referenced from table "test_no_action_child". +select count(*) from test_no_action; + count +------- + 1 +(1 row) + +select count(*) from test_no_action_child; + count +------- + 1 +(1 row) + +drop table if exists test_cascade_child; +drop table if exists test_cascade; +drop table if exists test_restrict_child; +drop table if exists test_restrict; +drop table if exists test_set_null_child; +drop table if exists test_set_null; +drop table if exists test_set_default_child; +drop table if exists test_set_default; +drop table if exists test_no_action_child; +drop table if exists test_no_action; +drop table if exists child; +drop table if exists parent; +drop table if exists grandparent; diff --git a/nix/tests/expected/z_orioledb-17_docs-full-text-search.out b/nix/tests/expected/z_orioledb-17_docs-full-text-search.out new file mode 100644 index 000000000..329f808f6 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_docs-full-text-search.out @@ -0,0 +1,298 @@ +-- testing sql found in https://supabase.com/docs/guides/database/full-text-search +create table books ( + id serial primary key, + title text, + author text, + description text +); +insert into books + (title, author, description) +values + ( + 'The Poky Little Puppy', + 'Janette Sebring Lowrey', + 'Puppy is slower than other, bigger animals.' + ), + ('The Tale of Peter Rabbit', 'Beatrix Potter', 'Rabbit eats some vegetables.'), + ('Tootle', 'Gertrude Crampton', 'Little toy train has big dreams.'), + ( + 'Green Eggs and Ham', + 'Dr. Seuss', + 'Sam has changing food preferences and eats unusually colored food.' + ), + ( + 'Harry Potter and the Goblet of Fire', + 'J.K. Rowling', + 'Fourth year of school starts, big drama ensues.' + ); +select to_tsvector('green eggs and ham'); + to_tsvector +--------------------------- + 'egg':2 'green':1 'ham':4 +(1 row) + +select to_tsvector('english', 'green eggs and ham'); + to_tsvector +--------------------------- + 'egg':2 'green':1 'ham':4 +(1 row) + +select * +from books +where title = 'Harry'; + id | title | author | description +----+-------+--------+------------- +(0 rows) + +select * +from books +where to_tsvector(title) @@ to_tsquery('Harry'); + id | title | author | description +----+-------------------------------------+--------------+------------------------------------------------- + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. +(1 row) + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('big'); + id | title | author | description +----+-------------------------------------+-------------------+------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. +(2 rows) + +select + * +from + books +where + to_tsvector(description || ' ' || title) + @@ to_tsquery('little'); + id | title | author | description +----+-----------------------+------------------------+--------------------------------------------- + 1 | The Poky Little Puppy | Janette Sebring Lowrey | Puppy is slower than other, bigger animals. + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. +(2 rows) + +create function title_description(books) returns text as $$ + select $1.title || ' ' || $1.description; +$$ language sql immutable; +select + * +from + books +where + to_tsvector(title_description(books.*)) + @@ to_tsquery('little'); + id | title | author | description +----+-----------------------+------------------------+--------------------------------------------- + 1 | The Poky Little Puppy | Janette Sebring Lowrey | Puppy is slower than other, bigger animals. + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. +(2 rows) + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('little & big'); + id | title | author | description +----+--------+-------------------+---------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. +(1 row) + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('little | big'); + id | title | author | description +----+-------------------------------------+-------------------+------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. +(2 rows) + +select title from books where to_tsvector(title) @@ to_tsquery('Lit:*'); + title +----------------------- + The Poky Little Puppy +(1 row) + +create or replace function search_books_by_title_prefix(prefix text) +returns setof books AS $$ +begin + return query + select * from books where to_tsvector('english', title) @@ to_tsquery(prefix || ':*'); +end; +$$ language plpgsql; +select * from search_books_by_title_prefix('Lit'); + id | title | author | description +----+-----------------------+------------------------+--------------------------------------------- + 1 | The Poky Little Puppy | Janette Sebring Lowrey | Puppy is slower than other, bigger animals. +(1 row) + +select * from search_books_by_title_prefix('Little+Puppy'); + id | title | author | description +----+-----------------------+------------------------+--------------------------------------------- + 1 | The Poky Little Puppy | Janette Sebring Lowrey | Puppy is slower than other, bigger animals. +(1 row) + +alter table + books +add column + fts tsvector generated always as (to_tsvector('english', description || ' ' || title)) stored; +create index books_fts on books using gin (fts); +NOTICE: index bridging is enabled for orioledb table 'books' +DETAIL: index access method 'gin' is supported only via index bridging for OrioleDB table +select id, fts +from books; + id | fts +----+----------------------------------------------------------------------------------------------------------------- + 1 | 'anim':7 'bigger':6 'littl':10 'poki':9 'puppi':1,11 'slower':3 + 2 | 'eat':2 'peter':8 'rabbit':1,9 'tale':6 'veget':4 + 3 | 'big':5 'dream':6 'littl':1 'tootl':7 'toy':2 'train':3 + 4 | 'chang':3 'color':9 'eat':7 'egg':12 'food':4,10 'green':11 'ham':14 'prefer':5 'sam':1 'unusu':8 + 5 | 'big':6 'drama':7 'ensu':8 'fire':15 'fourth':1 'goblet':13 'harri':9 'potter':10 'school':4 'start':5 'year':2 +(5 rows) + +select + * +from + books +where + fts @@ to_tsquery('little & big'); + id | title | author | description | fts +----+--------+-------------------+----------------------------------+--------------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. | 'big':5 'dream':6 'littl':1 'tootl':7 'toy':2 'train':3 +(1 row) + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('big <-> dreams'); + id | title | author | description | fts +----+--------+-------------------+----------------------------------+--------------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. | 'big':5 'dream':6 'littl':1 'tootl':7 'toy':2 'train':3 +(1 row) + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('year <2> school'); + id | title | author | description | fts +----+-------------------------------------+--------------+-------------------------------------------------+----------------------------------------------------------------------------------------------------------------- + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. | 'big':6 'drama':7 'ensu':8 'fire':15 'fourth':1 'goblet':13 'harri':9 'potter':10 'school':4 'start':5 'year':2 +(1 row) + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('big & !little'); + id | title | author | description | fts +----+-------------------------------------+--------------+-------------------------------------------------+----------------------------------------------------------------------------------------------------------------- + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. | 'big':6 'drama':7 'ensu':8 'fire':15 'fourth':1 'goblet':13 'harri':9 'potter':10 'school':4 'start':5 'year':2 +(1 row) + +select + * +from + books +where + to_tsvector(title) @@ to_tsquery('harry & potter'); + id | title | author | description | fts +----+-------------------------------------+--------------+-------------------------------------------------+----------------------------------------------------------------------------------------------------------------- + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. | 'big':6 'drama':7 'ensu':8 'fire':15 'fourth':1 'goblet':13 'harri':9 'potter':10 'school':4 'start':5 'year':2 +(1 row) + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('food & !egg'); + id | title | author | description | fts +----+--------------------+-----------+--------------------------------------------------------------------+--------------------------------------------------------------------------------------------------- + 4 | Green Eggs and Ham | Dr. Seuss | Sam has changing food preferences and eats unusually colored food. | 'chang':3 'color':9 'eat':7 'egg':12 'food':4,10 'green':11 'ham':14 'prefer':5 'sam':1 'unusu':8 +(1 row) + +select + * +from + books +where + to_tsvector(title || ' ' || description) @@ to_tsquery('train & toy'); + id | title | author | description | fts +----+--------+-------------------+----------------------------------+--------------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. | 'big':5 'dream':6 'littl':1 'tootl':7 'toy':2 'train':3 +(1 row) + +select + * +from + books +where + fts @@ to_tsquery('puppy & slow'); + id | title | author | description | fts +----+-------+--------+-------------+----- +(0 rows) + +select + * +from + books +where + fts @@ to_tsquery('rabbit | peter'); + id | title | author | description | fts +----+--------------------------+----------------+------------------------------+--------------------------------------------------- + 2 | The Tale of Peter Rabbit | Beatrix Potter | Rabbit eats some vegetables. | 'eat':2 'peter':8 'rabbit':1,9 'tale':6 'veget':4 +(1 row) + +select + * +from + books +where + fts @@ to_tsquery('harry <-> potter'); + id | title | author | description | fts +----+-------------------------------------+--------------+-------------------------------------------------+----------------------------------------------------------------------------------------------------------------- + 5 | Harry Potter and the Goblet of Fire | J.K. Rowling | Fourth year of school starts, big drama ensues. | 'big':6 'drama':7 'ensu':8 'fire':15 'fourth':1 'goblet':13 'harri':9 'potter':10 'school':4 'start':5 'year':2 +(1 row) + +select + * +from + books +where + fts @@ to_tsquery('fourth <3> year'); + id | title | author | description | fts +----+-------+--------+-------------+----- +(0 rows) + +select + * +from + books +where + fts @@ to_tsquery('big & !drama'); + id | title | author | description | fts +----+--------+-------------------+----------------------------------+--------------------------------------------------------- + 3 | Tootle | Gertrude Crampton | Little toy train has big dreams. | 'big':5 'dream':6 'littl':1 'tootl':7 'toy':2 'train':3 +(1 row) + +drop function search_books_by_title_prefix(text); +drop function title_description(books); +drop table books; + diff --git a/nix/tests/expected/z_orioledb-17_docs-functions.out b/nix/tests/expected/z_orioledb-17_docs-functions.out new file mode 100644 index 000000000..72af403b8 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_docs-functions.out @@ -0,0 +1,230 @@ +-- testing sql found in https://supabase.com/docs/guides/database/functions +create or replace function hello_world() +returns text +language sql +as $$ + select 'hello world'; +$$; +select hello_world(); + hello_world +------------- + hello world +(1 row) + +create table planets ( + id serial primary key, + name text +); +insert into planets + (id, name) +values + (1, 'Tattoine'), + (2, 'Alderaan'), + (3, 'Kashyyyk'); +create table people ( + id serial primary key, + name text, + planet_id bigint references planets +); +insert into people + (id, name, planet_id) +values + (1, 'Anakin Skywalker', 1), + (2, 'Luke Skywalker', 1), + (3, 'Princess Leia', 2), + (4, 'Chewbacca', 3); +create or replace function get_planets() +returns setof planets +language sql +as $$ + select * from planets; +$$; +select * +from get_planets() +where id = 1; + id | name +----+---------- + 1 | Tattoine +(1 row) + +create or replace function add_planet(name text) +returns bigint +language plpgsql +as $$ +declare + new_row bigint; +begin + insert into planets(name) + values (add_planet.name) + returning id into new_row; + + return new_row; +end; +$$; +select * from add_planet('Jakku'); +ERROR: duplicate key value violates unique constraint "planets_pkey" +DETAIL: Key (id)=('1') already exists. +CONTEXT: SQL statement "insert into planets(name) + values (add_planet.name) + returning id" +PL/pgSQL function add_planet(text) line 5 at SQL statement +create function hello_world_definer() +returns text +language plpgsql +security definer set search_path = '' +as $$ +begin + select 'hello world'; +end; +$$; +select hello_world_definer(); +ERROR: query has no destination for result data +HINT: If you want to discard the results of a SELECT, use PERFORM instead. +CONTEXT: PL/pgSQL function public.hello_world_definer() line 3 at SQL statement +revoke execute on function public.hello_world from public; +revoke execute on function public.hello_world from anon; +grant execute on function public.hello_world to authenticated; +revoke execute on all functions in schema public from public; +revoke execute on all functions in schema public from anon, authenticated; +alter default privileges in schema public revoke execute on functions from public; +alter default privileges in schema public revoke execute on functions from anon, authenticated; +grant execute on function public.hello_world to authenticated; +create function logging_example( + log_message text, + warning_message text, + error_message text +) +returns void +language plpgsql +as $$ +begin + raise log 'logging message: %', log_message; + raise warning 'logging warning: %', warning_message; + raise exception 'logging error: %', error_message; +end; +$$; +select logging_example('LOGGED MESSAGE', 'WARNING MESSAGE', 'ERROR MESSAGE'); +WARNING: logging warning: WARNING MESSAGE +ERROR: logging error: ERROR MESSAGE +CONTEXT: PL/pgSQL function logging_example(text,text,text) line 5 at RAISE +create or replace function error_if_null(some_val text) +returns text +language plpgsql +as $$ +begin + if some_val is null then + raise exception 'some_val should not be NULL'; + end if; + return some_val; +end; +$$; +select error_if_null('not null'); + error_if_null +--------------- + not null +(1 row) + +create table attendance_table ( + id uuid primary key, + student text +); +insert into attendance_table (id, student) values ('123e4567-e89b-12d3-a456-426614174000', 'Harry Potter'); +create function assert_example(name text) +returns uuid +language plpgsql +as $$ +declare + student_id uuid; +begin + select + id into student_id + from attendance_table + where student = name; + + assert student_id is not null, 'assert_example() ERROR: student not found'; + + return student_id; +end; +$$; +select assert_example('Harry Potter'); + assert_example +-------------------------------------- + 123e4567-e89b-12d3-a456-426614174000 +(1 row) + +create function error_example() +returns void +language plpgsql +as $$ +begin + select * from table_that_does_not_exist; + + exception + when others then + raise exception 'An error occurred in function : %', sqlerrm; +end; +$$; +select error_example(); +ERROR: An error occurred in function : relation "table_that_does_not_exist" does not exist +CONTEXT: PL/pgSQL function error_example() line 7 at RAISE +create table some_table ( + col_1 int, + col_2 text +); +insert into some_table (col_1, col_2) values (42, 'test value'); +create or replace function advanced_example(num int default 10) +returns text +language plpgsql +as $$ +declare + var1 int := 20; + var2 text; +begin + raise log 'logging start of function call: (%)', (select now()); + + select + col_1 into var1 + from some_table + limit 1; + raise log 'logging a variable (%)', var1; + + raise log 'logging a query with a single return value(%)', (select col_1 from some_table limit 1); + + raise log 'logging an entire row as JSON (%)', (select to_jsonb(some_table.*) from some_table limit 1); + + insert into some_table (col_2) + values ('new val') + returning col_2 into var2; + + raise log 'logging a value from an INSERT (%)', var2; + + return var1 || ',' || var2; +exception + when others then + raise exception 'An error occurred in function : %', sqlerrm; +end; +$$; +select advanced_example(); + advanced_example +------------------ + 42,new val +(1 row) + +drop function advanced_example(int); +drop function error_example(); +drop function assert_example(text); +drop function error_if_null(text); +drop function logging_example(text, text, text); +drop function hello_world_definer(); +drop function add_planet(text); +drop function get_planets(); +drop function hello_world(); +drop table people; +drop table planets; +drop table attendance_table; +drop table some_table; +grant execute on all functions in schema public to public; +grant execute on all functions in schema public to anon, authenticated; +alter default privileges in schema public grant execute on functions to public; +alter default privileges in schema public grant execute on functions to anon, authenticated; + diff --git a/nix/tests/expected/z_orioledb-17_docs-indexes.out b/nix/tests/expected/z_orioledb-17_docs-indexes.out new file mode 100644 index 000000000..5feec8208 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_docs-indexes.out @@ -0,0 +1,24 @@ +-- testing sql found in https://supabase.com/docs/guides/database/indexes +create table persons ( + id bigint generated by default as identity primary key, + age int, + height int, + weight int, + name text, + deceased boolean +); +insert into persons (age, height, weight, name, deceased) values (32, 180, 70, 'John Doe', false); +select name from persons where age = 32; + name +---------- + John Doe +(1 row) + +create index idx_persons_age on persons (age); +create index idx_living_persons_age on persons (age) where deceased is false; +create index idx_persons_age_desc on persons (age desc nulls last); +reindex index concurrently idx_persons_age; +WARNING: REINDEX CONCURRENTLY is not supported for orioledb tables yet, using a plain REINDEX instead +reindex table concurrently persons; +WARNING: REINDEX CONCURRENTLY is not supported for orioledb tables yet, using a plain REINDEX instead +drop table persons cascade; diff --git a/nix/tests/expected/z_orioledb-17_docs-partitioning.out b/nix/tests/expected/z_orioledb-17_docs-partitioning.out new file mode 100644 index 000000000..4ae2c4ba8 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_docs-partitioning.out @@ -0,0 +1,110 @@ +-- testing sql found in https://supabase.com/docs/guides/database/partitioning +create table sales ( + id bigint generated by default as identity, + order_date date not null, + customer_id bigint, + amount bigint, + primary key (order_date, id) +) +partition by range (order_date); +create table sales_2000_01 + partition of sales + for values from ('2000-01-01') to ('2000-02-01'); +create table sales_2000_02 + partition of sales + for values from ('2000-02-01') to ('2000-03-01'); +insert into sales (order_date, customer_id, amount) values + ('2000-01-15', 1, 100), + ('2000-01-20', 2, 200), + ('2000-02-10', 3, 150), + ('2000-02-25', 4, 300); +select * from sales where order_date >= '2000-01-01' and order_date < '2000-03-01'; + id | order_date | customer_id | amount +----+------------+-------------+-------- + 1 | 01-15-2000 | 1 | 100 + 2 | 01-20-2000 | 2 | 200 + 3 | 02-10-2000 | 3 | 150 + 4 | 02-25-2000 | 4 | 300 +(4 rows) + +select * from sales_2000_02; + id | order_date | customer_id | amount +----+------------+-------------+-------- + 3 | 02-10-2000 | 3 | 150 + 4 | 02-25-2000 | 4 | 300 +(2 rows) + +drop table sales cascade; +create table customers ( + id bigint generated by default as identity, + name text, + country text, + primary key (country, id) +) +partition by list(country); +create table customers_americas + partition of customers + for values in ('US', 'CANADA'); +create table customers_asia + partition of customers + for values in ('INDIA', 'CHINA', 'JAPAN'); +insert into customers (name, country) values + ('John Doe', 'US'), + ('Jane Smith', 'CANADA'), + ('Li Wei', 'CHINA'), + ('Priya Patel', 'INDIA'), + ('Yuki Tanaka', 'JAPAN'); +select * from customers where country in ('US', 'CANADA'); + id | name | country +----+------------+--------- + 2 | Jane Smith | CANADA + 1 | John Doe | US +(2 rows) + +select * from customers_asia; + id | name | country +----+-------------+--------- + 3 | Li Wei | CHINA + 4 | Priya Patel | INDIA + 5 | Yuki Tanaka | JAPAN +(3 rows) + +drop table customers cascade; +create table products ( + id bigint generated by default as identity, + name text, + category text, + price bigint +) +partition by hash (id); +create table products_one + partition of products + for values with (modulus 2, remainder 1); +create table products_two + partition of products + for values with (modulus 2, remainder 0); +insert into products (name, category, price) values + ('Laptop', 'Electronics', 999), + ('Phone', 'Electronics', 599), + ('Book', 'Education', 29), + ('Chair', 'Furniture', 199); +select * from products where category = 'Electronics'; + id | name | category | price +----+--------+-------------+------- + 1 | Laptop | Electronics | 999 + 2 | Phone | Electronics | 599 +(2 rows) + +select count(*) from products_one; + count +------- + 2 +(1 row) + +select count(*) from products_two; + count +------- + 2 +(1 row) + +drop table products cascade; diff --git a/nix/tests/expected/z_orioledb-17_ext_interface.out b/nix/tests/expected/z_orioledb-17_ext_interface.out index 1b09ed17e..4dafb1c87 100644 --- a/nix/tests/expected/z_orioledb-17_ext_interface.out +++ b/nix/tests/expected/z_orioledb-17_ext_interface.out @@ -70,6 +70,7 @@ order by lo | t ltree | t moddatetime | t + orioledb | t pageinspect | t pg_buffercache | t pg_freespacemap | t @@ -117,7 +118,7 @@ order by vector | t wrappers | t xml2 | f -(73 rows) +(74 rows) /* @@ -1078,6 +1079,50 @@ order by ltree | public | subpath | ltree, integer, integer | ltree ltree | public | text2ltree | text | ltree moddatetime | public | moddatetime | | trigger + orioledb | extensions | orioledb_commit_hash | | text + orioledb | extensions | orioledb_compression_max_level | | bigint + orioledb | extensions | orioledb_evict_pages | relid oid, maxlevel integer | void + orioledb | extensions | orioledb_get_complete_oxid | | bigint + orioledb | extensions | orioledb_get_complete_xid | | integer + orioledb | extensions | orioledb_get_current_oxid | | bigint + orioledb | extensions | orioledb_get_evicted_trees | OUT datoid oid, OUT relnode oid, OUT root_downlink bigint, OUT file_length bigint | SETOF record + orioledb | extensions | orioledb_get_index_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | extensions | orioledb_get_rewind_evicted_length | | bigint + orioledb | extensions | orioledb_get_rewind_queue_length | | bigint + orioledb | extensions | orioledb_get_table_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | extensions | orioledb_has_retained_undo | | boolean + orioledb | extensions | orioledb_idx_structure | relid oid, tree_name text, options character varying, depth integer | text + orioledb | extensions | orioledb_index_description | datoid oid, relid oid, relnode oid, index_type text, OUT name text, OUT description text | record + orioledb | extensions | orioledb_index_oids | OUT datoid oid, OUT table_reloid oid, OUT table_relnode oid, OUT index_reloid oid, OUT index_relnode oid, OUT index_type text | SETOF record + orioledb | extensions | orioledb_index_rows | relid oid, OUT total integer, OUT dead integer | record + orioledb | extensions | orioledb_page_stats | OUT pool_name text, OUT busy_pages bigint, OUT free_pages bigint, OUT dirty_pages bigint, OUT all_pages bigint | SETOF record + orioledb | extensions | orioledb_recovery_synchronized | | boolean + orioledb | extensions | orioledb_relation_size | relid oid | bigint + orioledb | extensions | orioledb_rewind_by_time | rewind_time integer | void + orioledb | extensions | orioledb_rewind_to_timestamp | rewind_timestamp timestamp with time zone | void + orioledb | extensions | orioledb_rewind_to_transaction | xid integer, oxid bigint | void + orioledb | extensions | orioledb_sys_tree_check | num integer, force_map_check boolean | boolean + orioledb | extensions | orioledb_sys_tree_rows | num integer | SETOF jsonb + orioledb | extensions | orioledb_sys_tree_structure | num integer, options character varying, depth integer | text + orioledb | extensions | orioledb_table_description | relid oid | text + orioledb | extensions | orioledb_table_description | datoid oid, relid oid, relnode oid | text + orioledb | extensions | orioledb_table_oids | OUT datoid oid, OUT reloid oid, OUT relnode oid | SETOF record + orioledb | extensions | orioledb_table_pages | relid oid, OUT blkno bigint, OUT level integer, OUT rightlink bigint, OUT hikey jsonb | SETOF record + orioledb | extensions | orioledb_tableam_handler | internal | table_am_handler + orioledb | extensions | orioledb_tbl_are_indices_equal | idx_oid1 regclass, idx_oid2 regclass | boolean + orioledb | extensions | orioledb_tbl_bin_structure | relid oid, print_bytes boolean, depth integer | text + orioledb | extensions | orioledb_tbl_check | relid oid, force_map_check boolean | boolean + orioledb | extensions | orioledb_tbl_compression_check | level bigint, relid oid, ranges integer[] | text + orioledb | extensions | orioledb_tbl_indices | relid oid | text + orioledb | extensions | orioledb_tbl_indices | relid oid, internal boolean, oids boolean | text + orioledb | extensions | orioledb_tbl_structure | relid oid, options character varying, depth integer | text + orioledb | extensions | orioledb_tree_stat | relid regclass, OUT level integer, OUT count bigint, OUT avgoccupied double precision, OUT avgvacated double precision | SETOF record + orioledb | extensions | orioledb_ucm_check | | boolean + orioledb | extensions | orioledb_version | | text + orioledb | extensions | orioledb_write_pages | relid oid | void + orioledb | extensions | pg_stopevent_reset | eventname text | boolean + orioledb | extensions | pg_stopevent_set | eventname text, condition jsonpath | void + orioledb | extensions | pg_stopevents | OUT stopevent text, OUT condition jsonpath, OUT waiter_pids integer[] | SETOF record pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record pageinspect | public | brin_page_type | page bytea | text pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid @@ -4937,7 +4982,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(4778 rows) +(4822 rows) /* @@ -4997,6 +5042,26 @@ order by hypopg | public | hypopg_list_indexes | indexrelid hypopg | public | hypopg_list_indexes | schema_name hypopg | public | hypopg_list_indexes | table_name + orioledb | extensions | orioledb_index | datoid + orioledb | extensions | orioledb_index | description + orioledb | extensions | orioledb_index | index_relnode + orioledb | extensions | orioledb_index | index_reloid + orioledb | extensions | orioledb_index | index_type + orioledb | extensions | orioledb_index | name + orioledb | extensions | orioledb_index | table_relnode + orioledb | extensions | orioledb_index | table_reloid + orioledb | extensions | orioledb_index_descr | datoid + orioledb | extensions | orioledb_index_descr | refcnt + orioledb | extensions | orioledb_index_descr | relnode + orioledb | extensions | orioledb_index_descr | reloid + orioledb | extensions | orioledb_table | datoid + orioledb | extensions | orioledb_table | description + orioledb | extensions | orioledb_table | relnode + orioledb | extensions | orioledb_table | reloid + orioledb | extensions | orioledb_table_descr | datoid + orioledb | extensions | orioledb_table_descr | refcnt + orioledb | extensions | orioledb_table_descr | relnode + orioledb | extensions | orioledb_table_descr | reloid pg_buffercache | public | pg_buffercache | bufferid pg_buffercache | public | pg_buffercache | isdirty pg_buffercache | public | pg_buffercache | pinning_backends @@ -5420,5 +5485,5 @@ order by wrappers | public | wrappers_fdw_stats | rows_in wrappers | public | wrappers_fdw_stats | rows_out wrappers | public | wrappers_fdw_stats | updated_at -(449 rows) +(469 rows) diff --git a/nix/tests/expected/z_orioledb-17_extensions_schema.out b/nix/tests/expected/z_orioledb-17_extensions_schema.out new file mode 100644 index 000000000..2eafa8119 --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_extensions_schema.out @@ -0,0 +1,23 @@ +-- all default extensions are installed in a schema "extensions" +-- we don't include the version as that will break often, we only care about +-- ensuring these extensions are present +select + e.extname as extension_name, + n.nspname as schema_name, + e.extowner::regrole as extension_owner +from + pg_extension e +join + pg_namespace n on e.extnamespace = n.oid +where + n.nspname = 'extensions' and e.extname != 'pgjwt' +order by + e.extname; + extension_name | schema_name | extension_owner +--------------------+-------------+----------------- + orioledb | extensions | supabase_admin + pg_stat_statements | extensions | supabase_admin + pgcrypto | extensions | supabase_admin + uuid-ossp | extensions | supabase_admin +(4 rows) + diff --git a/nix/tests/expected/z_orioledb-17_index_advisor.out b/nix/tests/expected/z_orioledb-17_index_advisor.out new file mode 100644 index 000000000..3fc2e4fff --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_index_advisor.out @@ -0,0 +1,16 @@ +create schema v; +create table v.book( + id int primary key, + title text not null +); +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + index_statements | errors +------------------+-------------------------------------------- + {} | {"could not open relation with OID 14007"} +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.book diff --git a/nix/tests/expected/z_orioledb-17_pgroonga.out b/nix/tests/expected/z_orioledb-17_pgroonga.out new file mode 100644 index 000000000..f338b728d --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_pgroonga.out @@ -0,0 +1,78 @@ +create schema v; +create table v.roon( + id serial primary key, + content text +); +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + ?column? +--------------------------------------------- + TokenBigram + TokenBigramIgnoreBlank + TokenBigramIgnoreBlankSplitSymbol + TokenBigramIgnoreBlankSplitSymbolAlpha + TokenBigramIgnoreBlankSplitSymbolAlphaDigit + TokenBigramSplitSymbol + TokenBigramSplitSymbolAlpha + TokenBigramSplitSymbolAlphaDigit + TokenDelimit + TokenDelimitNull + TokenDocumentVectorBM25 + TokenDocumentVectorTFIDF + TokenMecab + TokenNgram + TokenPattern + TokenRegexp + TokenTable + TokenTrigram + TokenUnigram +(19 rows) + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); +NOTICE: index bridging is enabled for orioledb table 'roon' +DETAIL: index access method 'pgroonga' is supported only via index bridging for OrioleDB table +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; + id | content +----+------------- + 1 | Hello World +(1 row) + +select * from v.roon where content &@~ 'powerful'; + id | content +----+--------- +(0 rows) + +select * from v.roon where content &@~ 'supports'; + id | content +----+------------------------------------- + 4 | PGroonga supports various languages +(1 row) + +drop schema v cascade; +NOTICE: drop cascades to table v.roon diff --git a/nix/tests/expected/z_orioledb-17_verify_orioledb.out b/nix/tests/expected/z_orioledb-17_verify_orioledb.out new file mode 100644 index 000000000..ed2dfd42e --- /dev/null +++ b/nix/tests/expected/z_orioledb-17_verify_orioledb.out @@ -0,0 +1,34 @@ +-- Verify OrioleDB is actually loaded and working +-- This test will FAIL if orioledb is not properly configured +-- Check orioledb access method exists +SELECT amname FROM pg_am WHERE amname = 'orioledb'; + amname +---------- + orioledb +(1 row) + +-- Check orioledb is in shared_preload_libraries +SELECT setting LIKE '%orioledb%' AS orioledb_preloaded +FROM pg_settings WHERE name = 'shared_preload_libraries'; + orioledb_preloaded +-------------------- + t +(1 row) + +-- Create a table using orioledb access method +CREATE TABLE test_orioledb_verify ( + id int PRIMARY KEY, + data text +) USING orioledb; +-- Verify it's actually using orioledb storage +SELECT relname, amname +FROM pg_class c +JOIN pg_am a ON c.relam = a.oid +WHERE relname = 'test_orioledb_verify'; + relname | amname +----------------------+---------- + test_orioledb_verify | orioledb +(1 row) + +-- Cleanup +DROP TABLE test_orioledb_verify; diff --git a/nix/tests/sql/z_orioledb-17_docs-cascades-deletes.sql b/nix/tests/sql/z_orioledb-17_docs-cascades-deletes.sql new file mode 100644 index 000000000..5a3f75c73 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_docs-cascades-deletes.sql @@ -0,0 +1,262 @@ +-- testing sql found in https://supabase.com/docs/guides/database/postgres/cascades-deletes +-- all of the errors produced by this file are expected + +create table grandparent ( + id serial primary key, + name text +); + +create table parent ( + id serial primary key, + name text, + parent_id integer references grandparent (id) + on delete cascade +); + +create table child ( + id serial primary key, + name text, + father integer references parent (id) + on delete restrict +); + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); + +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); + +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); + +insert into child + (id, name, father) +values + (1, 'William', 1); + +select count(*) from grandparent; +select count(*) from parent; +select count(*) from child; + +delete from grandparent; + +select count(*) from grandparent; +select count(*) from parent; +select count(*) from child; + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); + +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); + +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); + +insert into child + (id, name, father) +values + (1, 'William', 1); + +alter table child +drop constraint child_father_fkey; + +alter table child +add constraint child_father_fkey foreign key (father) references parent (id) + on delete no action; + +delete from grandparent; + +select count(*) from grandparent; +select count(*) from parent; +select count(*) from child; + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); + +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); + +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); + +insert into child + (id, name, father) +values + (1, 'William', 1); + +alter table child +drop constraint child_father_fkey; + +alter table child +add constraint child_father_fkey foreign key (father) references parent (id) + on delete no action initially deferred; + +delete from grandparent; + +select count(*) from grandparent; +select count(*) from parent; +select count(*) from child; + +insert into grandparent + (id, name) +values + (1, 'Elizabeth'); + +insert into parent + (id, name, parent_id) +values + (1, 'Charles', 1); + +insert into parent + (id, name, parent_id) +values + (2, 'Diana', 1); + +insert into child + (id, name, father) +values + (1, 'William', 1); + +alter table child +add column mother integer references parent (id) + on delete cascade; + +update child +set mother = 2 +where id = 1; + +delete from grandparent; + +select count(*) from grandparent; +select count(*) from parent; +select count(*) from child; + +create table test_cascade ( + id serial primary key, + name text +); + +create table test_cascade_child ( + id serial primary key, + parent_id integer references test_cascade (id) on delete cascade, + name text +); + +insert into test_cascade (name) values ('Parent'); +insert into test_cascade_child (parent_id, name) values (1, 'Child'); + +delete from test_cascade; + +select count(*) from test_cascade; +select count(*) from test_cascade_child; + +create table test_restrict ( + id serial primary key, + name text +); + +create table test_restrict_child ( + id serial primary key, + parent_id integer references test_restrict (id) on delete restrict, + name text +); + +insert into test_restrict (name) values ('Parent'); +insert into test_restrict_child (parent_id, name) values (1, 'Child'); + +delete from test_restrict; + +select count(*) from test_restrict; +select count(*) from test_restrict_child; + +create table test_set_null ( + id serial primary key, + name text +); + +create table test_set_null_child ( + id serial primary key, + parent_id integer references test_set_null (id) on delete set null, + name text +); + +insert into test_set_null (name) values ('Parent'); +insert into test_set_null_child (parent_id, name) values (1, 'Child'); + +delete from test_set_null; + +select count(*) from test_set_null; +select count(*) from test_set_null_child; +select parent_id from test_set_null_child; + +create table test_set_default ( + id serial primary key, + name text +); + +create table test_set_default_child ( + id serial primary key, + parent_id integer default 999 references test_set_default (id) on delete set default, + name text +); + +insert into test_set_default (name) values ('Parent'); +insert into test_set_default_child (parent_id, name) values (1, 'Child'); + +delete from test_set_default; + +select count(*) from test_set_default; +select count(*) from test_set_default_child; +select parent_id from test_set_default_child; + +create table test_no_action ( + id serial primary key, + name text +); + +create table test_no_action_child ( + id serial primary key, + parent_id integer references test_no_action (id) on delete no action, + name text +); + +insert into test_no_action (name) values ('Parent'); +insert into test_no_action_child (parent_id, name) values (1, 'Child'); + +delete from test_no_action; + +select count(*) from test_no_action; +select count(*) from test_no_action_child; + +drop table if exists test_cascade_child; +drop table if exists test_cascade; +drop table if exists test_restrict_child; +drop table if exists test_restrict; +drop table if exists test_set_null_child; +drop table if exists test_set_null; +drop table if exists test_set_default_child; +drop table if exists test_set_default; +drop table if exists test_no_action_child; +drop table if exists test_no_action; +drop table if exists child; +drop table if exists parent; +drop table if exists grandparent; diff --git a/nix/tests/sql/z_orioledb-17_docs-full-text-search.sql b/nix/tests/sql/z_orioledb-17_docs-full-text-search.sql new file mode 100644 index 000000000..d62521e15 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_docs-full-text-search.sql @@ -0,0 +1,197 @@ +-- testing sql found in https://supabase.com/docs/guides/database/full-text-search +create table books ( + id serial primary key, + title text, + author text, + description text +); + +insert into books + (title, author, description) +values + ( + 'The Poky Little Puppy', + 'Janette Sebring Lowrey', + 'Puppy is slower than other, bigger animals.' + ), + ('The Tale of Peter Rabbit', 'Beatrix Potter', 'Rabbit eats some vegetables.'), + ('Tootle', 'Gertrude Crampton', 'Little toy train has big dreams.'), + ( + 'Green Eggs and Ham', + 'Dr. Seuss', + 'Sam has changing food preferences and eats unusually colored food.' + ), + ( + 'Harry Potter and the Goblet of Fire', + 'J.K. Rowling', + 'Fourth year of school starts, big drama ensues.' + ); + +select to_tsvector('green eggs and ham'); + +select to_tsvector('english', 'green eggs and ham'); + +select * +from books +where title = 'Harry'; + +select * +from books +where to_tsvector(title) @@ to_tsquery('Harry'); + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('big'); + +select + * +from + books +where + to_tsvector(description || ' ' || title) + @@ to_tsquery('little'); + +create function title_description(books) returns text as $$ + select $1.title || ' ' || $1.description; +$$ language sql immutable; + +select + * +from + books +where + to_tsvector(title_description(books.*)) + @@ to_tsquery('little'); + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('little & big'); + +select + * +from + books +where + to_tsvector(description) + @@ to_tsquery('little | big'); + +select title from books where to_tsvector(title) @@ to_tsquery('Lit:*'); + +create or replace function search_books_by_title_prefix(prefix text) +returns setof books AS $$ +begin + return query + select * from books where to_tsvector('english', title) @@ to_tsquery(prefix || ':*'); +end; +$$ language plpgsql; + +select * from search_books_by_title_prefix('Lit'); + +select * from search_books_by_title_prefix('Little+Puppy'); + +alter table + books +add column + fts tsvector generated always as (to_tsvector('english', description || ' ' || title)) stored; + +create index books_fts on books using gin (fts); + +select id, fts +from books; + +select + * +from + books +where + fts @@ to_tsquery('little & big'); + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('big <-> dreams'); + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('year <2> school'); + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('big & !little'); + +select + * +from + books +where + to_tsvector(title) @@ to_tsquery('harry & potter'); + +select + * +from + books +where + to_tsvector(description) @@ to_tsquery('food & !egg'); + +select + * +from + books +where + to_tsvector(title || ' ' || description) @@ to_tsquery('train & toy'); + +select + * +from + books +where + fts @@ to_tsquery('puppy & slow'); + +select + * +from + books +where + fts @@ to_tsquery('rabbit | peter'); + +select + * +from + books +where + fts @@ to_tsquery('harry <-> potter'); + +select + * +from + books +where + fts @@ to_tsquery('fourth <3> year'); + +select + * +from + books +where + fts @@ to_tsquery('big & !drama'); + +drop function search_books_by_title_prefix(text); +drop function title_description(books); +drop table books; + \ No newline at end of file diff --git a/nix/tests/sql/z_orioledb-17_docs-functions.sql b/nix/tests/sql/z_orioledb-17_docs-functions.sql new file mode 100644 index 000000000..f91f17261 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_docs-functions.sql @@ -0,0 +1,225 @@ +-- testing sql found in https://supabase.com/docs/guides/database/functions + +create or replace function hello_world() +returns text +language sql +as $$ + select 'hello world'; +$$; + +select hello_world(); + +create table planets ( + id serial primary key, + name text +); + +insert into planets + (id, name) +values + (1, 'Tattoine'), + (2, 'Alderaan'), + (3, 'Kashyyyk'); + +create table people ( + id serial primary key, + name text, + planet_id bigint references planets +); + +insert into people + (id, name, planet_id) +values + (1, 'Anakin Skywalker', 1), + (2, 'Luke Skywalker', 1), + (3, 'Princess Leia', 2), + (4, 'Chewbacca', 3); + +create or replace function get_planets() +returns setof planets +language sql +as $$ + select * from planets; +$$; + +select * +from get_planets() +where id = 1; + +create or replace function add_planet(name text) +returns bigint +language plpgsql +as $$ +declare + new_row bigint; +begin + insert into planets(name) + values (add_planet.name) + returning id into new_row; + + return new_row; +end; +$$; + +select * from add_planet('Jakku'); + +create function hello_world_definer() +returns text +language plpgsql +security definer set search_path = '' +as $$ +begin + select 'hello world'; +end; +$$; + +select hello_world_definer(); + +revoke execute on function public.hello_world from public; +revoke execute on function public.hello_world from anon; + +grant execute on function public.hello_world to authenticated; + +revoke execute on all functions in schema public from public; +revoke execute on all functions in schema public from anon, authenticated; + +alter default privileges in schema public revoke execute on functions from public; +alter default privileges in schema public revoke execute on functions from anon, authenticated; + +grant execute on function public.hello_world to authenticated; + +create function logging_example( + log_message text, + warning_message text, + error_message text +) +returns void +language plpgsql +as $$ +begin + raise log 'logging message: %', log_message; + raise warning 'logging warning: %', warning_message; + raise exception 'logging error: %', error_message; +end; +$$; + +select logging_example('LOGGED MESSAGE', 'WARNING MESSAGE', 'ERROR MESSAGE'); + +create or replace function error_if_null(some_val text) +returns text +language plpgsql +as $$ +begin + if some_val is null then + raise exception 'some_val should not be NULL'; + end if; + return some_val; +end; +$$; + +select error_if_null('not null'); + +create table attendance_table ( + id uuid primary key, + student text +); + +insert into attendance_table (id, student) values ('123e4567-e89b-12d3-a456-426614174000', 'Harry Potter'); + +create function assert_example(name text) +returns uuid +language plpgsql +as $$ +declare + student_id uuid; +begin + select + id into student_id + from attendance_table + where student = name; + + assert student_id is not null, 'assert_example() ERROR: student not found'; + + return student_id; +end; +$$; + +select assert_example('Harry Potter'); + +create function error_example() +returns void +language plpgsql +as $$ +begin + select * from table_that_does_not_exist; + + exception + when others then + raise exception 'An error occurred in function : %', sqlerrm; +end; +$$; + +select error_example(); + +create table some_table ( + col_1 int, + col_2 text +); + +insert into some_table (col_1, col_2) values (42, 'test value'); + +create or replace function advanced_example(num int default 10) +returns text +language plpgsql +as $$ +declare + var1 int := 20; + var2 text; +begin + raise log 'logging start of function call: (%)', (select now()); + + select + col_1 into var1 + from some_table + limit 1; + raise log 'logging a variable (%)', var1; + + raise log 'logging a query with a single return value(%)', (select col_1 from some_table limit 1); + + raise log 'logging an entire row as JSON (%)', (select to_jsonb(some_table.*) from some_table limit 1); + + insert into some_table (col_2) + values ('new val') + returning col_2 into var2; + + raise log 'logging a value from an INSERT (%)', var2; + + return var1 || ',' || var2; +exception + when others then + raise exception 'An error occurred in function : %', sqlerrm; +end; +$$; + +select advanced_example(); + +drop function advanced_example(int); +drop function error_example(); +drop function assert_example(text); +drop function error_if_null(text); +drop function logging_example(text, text, text); +drop function hello_world_definer(); +drop function add_planet(text); +drop function get_planets(); +drop function hello_world(); +drop table people; +drop table planets; +drop table attendance_table; +drop table some_table; + +grant execute on all functions in schema public to public; +grant execute on all functions in schema public to anon, authenticated; + +alter default privileges in schema public grant execute on functions to public; +alter default privileges in schema public grant execute on functions to anon, authenticated; + \ No newline at end of file diff --git a/nix/tests/sql/z_orioledb-17_docs-indexes.sql b/nix/tests/sql/z_orioledb-17_docs-indexes.sql new file mode 100644 index 000000000..9d874adb0 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_docs-indexes.sql @@ -0,0 +1,26 @@ +-- testing sql found in https://supabase.com/docs/guides/database/indexes + +create table persons ( + id bigint generated by default as identity primary key, + age int, + height int, + weight int, + name text, + deceased boolean +); + +insert into persons (age, height, weight, name, deceased) values (32, 180, 70, 'John Doe', false); + +select name from persons where age = 32; + +create index idx_persons_age on persons (age); + +create index idx_living_persons_age on persons (age) where deceased is false; + +create index idx_persons_age_desc on persons (age desc nulls last); + +reindex index concurrently idx_persons_age; + +reindex table concurrently persons; + +drop table persons cascade; diff --git a/nix/tests/sql/z_orioledb-17_docs-partitioning.sql b/nix/tests/sql/z_orioledb-17_docs-partitioning.sql new file mode 100644 index 000000000..6037d526a --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_docs-partitioning.sql @@ -0,0 +1,89 @@ +-- testing sql found in https://supabase.com/docs/guides/database/partitioning + +create table sales ( + id bigint generated by default as identity, + order_date date not null, + customer_id bigint, + amount bigint, + primary key (order_date, id) +) +partition by range (order_date); + +create table sales_2000_01 + partition of sales + for values from ('2000-01-01') to ('2000-02-01'); + +create table sales_2000_02 + partition of sales + for values from ('2000-02-01') to ('2000-03-01'); + +insert into sales (order_date, customer_id, amount) values + ('2000-01-15', 1, 100), + ('2000-01-20', 2, 200), + ('2000-02-10', 3, 150), + ('2000-02-25', 4, 300); + +select * from sales where order_date >= '2000-01-01' and order_date < '2000-03-01'; + +select * from sales_2000_02; + +drop table sales cascade; + +create table customers ( + id bigint generated by default as identity, + name text, + country text, + primary key (country, id) +) +partition by list(country); + +create table customers_americas + partition of customers + for values in ('US', 'CANADA'); + +create table customers_asia + partition of customers + for values in ('INDIA', 'CHINA', 'JAPAN'); + +insert into customers (name, country) values + ('John Doe', 'US'), + ('Jane Smith', 'CANADA'), + ('Li Wei', 'CHINA'), + ('Priya Patel', 'INDIA'), + ('Yuki Tanaka', 'JAPAN'); + +select * from customers where country in ('US', 'CANADA'); + +select * from customers_asia; + +drop table customers cascade; + +create table products ( + id bigint generated by default as identity, + name text, + category text, + price bigint +) +partition by hash (id); + +create table products_one + partition of products + for values with (modulus 2, remainder 1); + +create table products_two + partition of products + for values with (modulus 2, remainder 0); + +insert into products (name, category, price) values + ('Laptop', 'Electronics', 999), + ('Phone', 'Electronics', 599), + ('Book', 'Education', 29), + ('Chair', 'Furniture', 199); + +select * from products where category = 'Electronics'; + +select count(*) from products_one; + +select count(*) from products_two; + +drop table products cascade; diff --git a/nix/tests/sql/z_orioledb-17_extensions_schema.sql b/nix/tests/sql/z_orioledb-17_extensions_schema.sql new file mode 100644 index 000000000..f3d0b3528 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_extensions_schema.sql @@ -0,0 +1,15 @@ +-- all default extensions are installed in a schema "extensions" +-- we don't include the version as that will break often, we only care about +-- ensuring these extensions are present +select + e.extname as extension_name, + n.nspname as schema_name, + e.extowner::regrole as extension_owner +from + pg_extension e +join + pg_namespace n on e.extnamespace = n.oid +where + n.nspname = 'extensions' and e.extname != 'pgjwt' +order by + e.extname; diff --git a/nix/tests/sql/z_orioledb-17_index_advisor.sql b/nix/tests/sql/z_orioledb-17_index_advisor.sql new file mode 100644 index 000000000..3911d6eb5 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_index_advisor.sql @@ -0,0 +1,13 @@ +create schema v; + +create table v.book( + id int primary key, + title text not null +); + +select + index_statements, errors +from + index_advisor('select id from v.book where title = $1'); + +drop schema v cascade; diff --git a/nix/tests/sql/z_orioledb-17_pgroonga.sql b/nix/tests/sql/z_orioledb-17_pgroonga.sql new file mode 100644 index 000000000..503f2665c --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_pgroonga.sql @@ -0,0 +1,48 @@ +create schema v; + +create table v.roon( + id serial primary key, + content text +); + + +with tokenizers as ( + select + x + from + jsonb_array_elements( + (select pgroonga_command('tokenizer_list'))::jsonb + ) x(val) + limit + 1 + offset + 1 -- first record is unrelated and not stable +) +select + t.x::jsonb ->> 'name' +from + jsonb_array_elements((select * from tokenizers)) t(x) +order by + t.x::jsonb ->> 'name'; + + +insert into v.roon (content) +values + ('Hello World'), + ('PostgreSQL with PGroonga is a thing'), + ('This is a full-text search test'), + ('PGroonga supports various languages'); + +-- Create default index +create index pgroonga_index on v.roon using pgroonga (content); + +-- Create mecab tokenizer index since we had a bug with this one once +create index pgroonga_index_mecab on v.roon using pgroonga (content) with (tokenizer='TokenMecab'); + +-- Run some queries to test the index +select * from v.roon where content &@~ 'Hello'; +select * from v.roon where content &@~ 'powerful'; +select * from v.roon where content &@~ 'supports'; + + +drop schema v cascade; diff --git a/nix/tests/sql/z_orioledb-17_verify_orioledb.sql b/nix/tests/sql/z_orioledb-17_verify_orioledb.sql new file mode 100644 index 000000000..168ddfc74 --- /dev/null +++ b/nix/tests/sql/z_orioledb-17_verify_orioledb.sql @@ -0,0 +1,24 @@ +-- Verify OrioleDB is actually loaded and working +-- This test will FAIL if orioledb is not properly configured + +-- Check orioledb access method exists +SELECT amname FROM pg_am WHERE amname = 'orioledb'; + +-- Check orioledb is in shared_preload_libraries +SELECT setting LIKE '%orioledb%' AS orioledb_preloaded +FROM pg_settings WHERE name = 'shared_preload_libraries'; + +-- Create a table using orioledb access method +CREATE TABLE test_orioledb_verify ( + id int PRIMARY KEY, + data text +) USING orioledb; + +-- Verify it's actually using orioledb storage +SELECT relname, amname +FROM pg_class c +JOIN pg_am a ON c.relam = a.oid +WHERE relname = 'test_orioledb_verify'; + +-- Cleanup +DROP TABLE test_orioledb_verify; diff --git a/test-docker-image.sh b/test-docker-image.sh new file mode 100755 index 000000000..ef74b04ae --- /dev/null +++ b/test-docker-image.sh @@ -0,0 +1,424 @@ +#!/usr/bin/env bash +# Test a PostgreSQL Docker image against the pg_regress test suite +# +# Usage: +# ./test-docker-image.sh Dockerfile-17 +# ./test-docker-image.sh Dockerfile-15 +# ./test-docker-image.sh Dockerfile-orioledb-17 +# +# Dependencies: +# - Docker +# - Nix (for psql and pg_regress) + +set -euo pipefail + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +TESTS_DIR="$SCRIPT_DIR/nix/tests" +TESTS_SQL_DIR="$TESTS_DIR/sql" +HTTP_MOCK_SERVER="$TESTS_DIR/http-mock-server.py" +CONTAINER_NAME="" +IMAGE_TAG="" +POSTGRES_USER="supabase_admin" +POSTGRES_DB="postgres" +POSTGRES_PASSWORD="postgres" +OUTPUT_DIR="" +HTTP_MOCK_PORT="" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +print_help() { + cat << 'EOF' +Usage: ./test-docker-image.sh [OPTIONS] DOCKERFILE + +Test a PostgreSQL Docker image against the pg_regress test suite. + +Arguments: + DOCKERFILE The Dockerfile to build and test (e.g., Dockerfile-17) + +Options: + -h, --help Show this help message + --no-build Skip building the image (use existing) + --keep Keep the container running after tests (for debugging) + +Examples: + ./test-docker-image.sh Dockerfile-17 + ./test-docker-image.sh Dockerfile-15 + ./test-docker-image.sh Dockerfile-orioledb-17 + ./test-docker-image.sh --no-build Dockerfile-17 +EOF +} + +# Map Dockerfile to version info +get_version_info() { + local dockerfile="$1" + case "$dockerfile" in + Dockerfile-15) + echo "15 5436" + ;; + Dockerfile-17) + echo "17 5435" + ;; + Dockerfile-orioledb-17) + echo "orioledb-17 5437" + ;; + *) + log_error "Unknown Dockerfile: $dockerfile" + log_error "Supported: Dockerfile-15, Dockerfile-17, Dockerfile-orioledb-17" + exit 1 + ;; + esac +} + +# Filter test files based on version +get_test_list() { + local version="$1" + local tests=() + + # Build list of OrioleDB-specific test basenames (tests that have z_orioledb-17_ variants) + local orioledb_variants=() + for f in "$TESTS_SQL_DIR"/z_orioledb-17_*.sql; do + if [[ -f "$f" ]]; then + local variant_name + variant_name=$(basename "$f" .sql) + # Extract the base test name (remove z_orioledb-17_ prefix) + local base_name="${variant_name#z_orioledb-17_}" + orioledb_variants+=("$base_name") + fi + done + + for f in "$TESTS_SQL_DIR"/*.sql; do + local basename + basename=$(basename "$f" .sql) + + # Check if it's a version-specific test (starts with z_) + if [[ "$basename" == z_* ]]; then + # Only include if it matches our version + case "$version" in + 15) + [[ "$basename" == z_15_* ]] && tests+=("$basename") + ;; + 17) + [[ "$basename" == z_17_* ]] && tests+=("$basename") + ;; + orioledb-17) + [[ "$basename" == z_orioledb-17_* ]] && tests+=("$basename") + ;; + esac + else + # Non-version-specific tests: check if OrioleDB variant exists + if [[ "$version" == "orioledb-17" ]]; then + # Skip common test if OrioleDB-specific variant exists + local has_variant=false + for variant in "${orioledb_variants[@]}"; do + if [[ "$basename" == "$variant" ]]; then + has_variant=true + break + fi + done + if [[ "$has_variant" == "false" ]]; then + tests+=("$basename") + fi + else + tests+=("$basename") + fi + fi + done + + # Sort the tests + printf '%s\n' "${tests[@]}" | sort +} + +# Cleanup function +cleanup() { + local exit_code=$? + + if [[ -n "$CONTAINER_NAME" ]] && [[ "$KEEP_CONTAINER" != "true" ]]; then + log_info "Cleaning up container $CONTAINER_NAME..." + docker rm -f "$CONTAINER_NAME" 2>/dev/null || true + fi + + if [[ -n "$OUTPUT_DIR" ]] && [[ -d "$OUTPUT_DIR" ]]; then + if [[ $exit_code -ne 0 ]]; then + log_info "Test output preserved at: $OUTPUT_DIR" + else + rm -rf "$OUTPUT_DIR" + fi + fi + + exit $exit_code +} + +trap cleanup EXIT + +# Wait for postgres to be ready +wait_for_postgres() { + local host="$1" + local port="$2" + local max_attempts=60 + local attempt=1 + + log_info "Waiting for PostgreSQL to be ready..." + + while [[ $attempt -le $max_attempts ]]; do + if pg_isready -h "$host" -p "$port" -U "$POSTGRES_USER" -q 2>/dev/null; then + log_info "PostgreSQL is ready" + return 0 + fi + sleep 1 + ((attempt++)) + done + + log_error "PostgreSQL failed to start after ${max_attempts}s" + return 1 +} + +# Main +main() { + local dockerfile="" + local skip_build=false + KEEP_CONTAINER=false + + # Parse arguments + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) + print_help + exit 0 + ;; + --no-build) + skip_build=true + shift + ;; + --keep) + KEEP_CONTAINER=true + shift + ;; + -*) + log_error "Unknown option: $1" + print_help + exit 1 + ;; + *) + dockerfile="$1" + shift + ;; + esac + done + + if [[ -z "$dockerfile" ]]; then + log_error "Dockerfile argument required" + print_help + exit 1 + fi + + # Check dockerfile exists + if [[ ! -f "$SCRIPT_DIR/$dockerfile" ]]; then + log_error "Dockerfile not found: $SCRIPT_DIR/$dockerfile" + exit 1 + fi + + # Get version info + read -r VERSION PORT <<< "$(get_version_info "$dockerfile")" + + IMAGE_TAG="pg-docker-test:${VERSION}" + CONTAINER_NAME="pg-test-${VERSION}-$$" + OUTPUT_DIR=$(mktemp -d) + + log_info "Testing $dockerfile (version: $VERSION, port: $PORT)" + + # Build image + if [[ "$skip_build" != "true" ]]; then + log_info "Building image from $dockerfile..." + if ! docker build -f "$SCRIPT_DIR/$dockerfile" -t "$IMAGE_TAG" "$SCRIPT_DIR"; then + log_error "Failed to build image" + exit 1 + fi + else + log_info "Skipping build (--no-build)" + if ! docker image inspect "$IMAGE_TAG" &>/dev/null; then + log_error "Image $IMAGE_TAG not found. Run without --no-build first." + exit 1 + fi + fi + + # Start container + log_info "Starting container $CONTAINER_NAME..." + docker run -d \ + --name "$CONTAINER_NAME" \ + -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + -p "$PORT:5432" \ + "$IMAGE_TAG" + + # Wait for postgres + if ! wait_for_postgres "localhost" "$PORT"; then + log_error "Container logs:" + docker logs "$CONTAINER_NAME" + exit 1 + fi + + # Get psql and pg_regress from Nix + log_info "Setting up Nix environment..." + + # Determine psql binary path based on version + local nix_psql_attr + case "$VERSION" in + 15) nix_psql_attr="psql_15/bin" ;; + 17) nix_psql_attr="psql_17/bin" ;; + orioledb-17) nix_psql_attr="psql_orioledb-17/bin" ;; + esac + + # Build the required Nix packages + PSQL_PATH=$(nix build --no-link --print-out-paths ".#${nix_psql_attr}")/bin/psql + PG_REGRESS_PATH=$(nix build --no-link --print-out-paths ".#pg_regress")/bin/pg_regress + + if [[ ! -x "$PSQL_PATH" ]]; then + log_error "Failed to get psql from Nix" + exit 1 + fi + + if [[ ! -x "$PG_REGRESS_PATH" ]]; then + log_error "Failed to get pg_regress from Nix" + exit 1 + fi + + log_info "Using psql: $PSQL_PATH" + log_info "Using pg_regress: $PG_REGRESS_PATH" + + # Start HTTP mock server inside the container + log_info "Starting HTTP mock server inside container..." + + # Copy mock server script into container + docker cp "$HTTP_MOCK_SERVER" "$CONTAINER_NAME:/tmp/http-mock-server.py" + + # Start mock server in container background + docker exec -d "$CONTAINER_NAME" python3 /tmp/http-mock-server.py 8880 + HTTP_MOCK_PORT=8880 + + # Wait for mock server to be ready + sleep 2 + log_info "HTTP mock server started on port $HTTP_MOCK_PORT (inside container)" + + # Run prime.sql to enable extensions + log_info "Running prime.sql to enable extensions..." + if ! PGPASSWORD="$POSTGRES_PASSWORD" "$PSQL_PATH" \ + -h localhost \ + -p "$PORT" \ + -U "$POSTGRES_USER" \ + -d "$POSTGRES_DB" \ + -v ON_ERROR_STOP=1 \ + -X \ + -f "$TESTS_DIR/prime.sql" 2>&1; then + log_error "Failed to run prime.sql" + exit 1 + fi + + # Create test_config table with HTTP mock port + log_info "Creating test_config table..." + PGPASSWORD="$POSTGRES_PASSWORD" "$PSQL_PATH" \ + -h localhost \ + -p "$PORT" \ + -U "$POSTGRES_USER" \ + -d "$POSTGRES_DB" \ + -c "CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); + INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;" + + # Get filtered test list + log_info "Collecting tests for version $VERSION..." + TEST_LIST=() + while IFS= read -r line; do + TEST_LIST+=("$line") + done < <(get_test_list "$VERSION") + log_info "Running ${#TEST_LIST[@]} tests" + + # Create output directory structure + mkdir -p "$OUTPUT_DIR/regression_output" + + # Copy tests to temp dir and patch expected files for Docker escaping differences + log_info "Preparing test files..." + PATCHED_TESTS_DIR="$OUTPUT_DIR/tests" + cp -r "$TESTS_DIR" "$PATCHED_TESTS_DIR" + + # Patch expected files: Docker escapes $user as \$user in search_path + # Only patch files that have the $user escaping difference + for f in pgmq.out vault.out; do + if [[ -f "$PATCHED_TESTS_DIR/expected/$f" ]]; then + sed -i.bak \ + -e 's/ "\$user"/ "\\$user"/g' \ + -e 's/search_path $/search_path /' \ + -e 's/^-----------------------------------$/------------------------------------/' \ + "$PATCHED_TESTS_DIR/expected/$f" + rm -f "$PATCHED_TESTS_DIR/expected/$f.bak" + fi + done + # Patch roles.out separately (different escaping pattern in JSON) + if [[ -f "$PATCHED_TESTS_DIR/expected/roles.out" ]]; then + sed -i.bak \ + -e 's/\\"\$user\\"/\\"\\\\$user\\"/g' \ + "$PATCHED_TESTS_DIR/expected/roles.out" + rm -f "$PATCHED_TESTS_DIR/expected/roles.out.bak" + fi + + # Patch index_advisor OID: Docker and Nix have different catalog OIDs + # The OID in the error message is environment-specific + if [[ -f "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out" ]]; then + sed -i.bak \ + -e 's/OID 14007/OID 13638/g' \ + "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out" + rm -f "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out.bak" + fi + + # Run pg_regress + log_info "Running pg_regress..." + local regress_exit=0 + + if ! PGPASSWORD="$POSTGRES_PASSWORD" "$PG_REGRESS_PATH" \ + --use-existing \ + --dbname="$POSTGRES_DB" \ + --inputdir="$PATCHED_TESTS_DIR" \ + --outputdir="$OUTPUT_DIR/regression_output" \ + --host=localhost \ + --port="$PORT" \ + --user="$POSTGRES_USER" \ + "${TEST_LIST[@]}" 2>&1; then + regress_exit=1 + fi + + # Report results + if [[ $regress_exit -eq 0 ]]; then + log_info "${GREEN}PASS: all ${#TEST_LIST[@]} tests passed${NC}" + else + log_error "FAIL: some tests failed" + if [[ -f "$OUTPUT_DIR/regression_output/regression.diffs" ]]; then + echo "" + echo "=== regression.diffs ===" + cat "$OUTPUT_DIR/regression_output/regression.diffs" + echo "========================" + fi + exit 1 + fi + + if [[ "$KEEP_CONTAINER" == "true" ]]; then + log_info "Container kept running: $CONTAINER_NAME (port $PORT)" + log_info "Connect with: psql -h localhost -p $PORT -U $POSTGRES_USER $POSTGRES_DB" + fi +} + +main "$@" From 6fe3cfcbee4e8950f7c3ceab30d4fa1db5e04e01 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Thu, 22 Jan 2026 00:13:22 -0500 Subject: [PATCH 2/7] tests: don't cover index_advisor for orioledb --- nix/checks.nix | 11 +++++++- .../expected/z_orioledb-17_index_advisor.out | 16 ----------- nix/tests/sql/z_orioledb-17_index_advisor.sql | 13 --------- test-docker-image.sh | 28 +++++++++++++------ 4 files changed, 29 insertions(+), 39 deletions(-) delete mode 100644 nix/tests/expected/z_orioledb-17_index_advisor.out delete mode 100644 nix/tests/sql/z_orioledb-17_index_advisor.sql diff --git a/nix/checks.nix b/nix/checks.nix index 61701ca04..9af5996a0 100644 --- a/nix/checks.nix +++ b/nix/checks.nix @@ -123,6 +123,11 @@ else throw "Unsupported PostgreSQL version: ${name}"; + # Tests to skip for OrioleDB (not compatible with OrioleDB storage) + orioledbSkipTests = [ + "index_advisor" # index_advisor doesn't support OrioleDB tables + ]; + # Helper function to filter SQL files based on version filterTestFiles = version: dir: @@ -140,8 +145,12 @@ let isVersionSpecific = builtins.match "z_.*" name != null; basename = builtins.substring 0 (pkgs.lib.stringLength name - 4) name; # Remove .sql + # Skip tests that don't work with OrioleDB + isSkippedForOrioledb = version == "orioledb-17" && builtins.elem basename orioledbSkipTests; matchesVersion = - if isVersionSpecific then + if isSkippedForOrioledb then + false + else if isVersionSpecific then if version == "orioledb-17" then builtins.match "z_orioledb-17_.*" name != null else if version == "17" then diff --git a/nix/tests/expected/z_orioledb-17_index_advisor.out b/nix/tests/expected/z_orioledb-17_index_advisor.out deleted file mode 100644 index 3fc2e4fff..000000000 --- a/nix/tests/expected/z_orioledb-17_index_advisor.out +++ /dev/null @@ -1,16 +0,0 @@ -create schema v; -create table v.book( - id int primary key, - title text not null -); -select - index_statements, errors -from - index_advisor('select id from v.book where title = $1'); - index_statements | errors -------------------+-------------------------------------------- - {} | {"could not open relation with OID 14007"} -(1 row) - -drop schema v cascade; -NOTICE: drop cascades to table v.book diff --git a/nix/tests/sql/z_orioledb-17_index_advisor.sql b/nix/tests/sql/z_orioledb-17_index_advisor.sql deleted file mode 100644 index 3911d6eb5..000000000 --- a/nix/tests/sql/z_orioledb-17_index_advisor.sql +++ /dev/null @@ -1,13 +0,0 @@ -create schema v; - -create table v.book( - id int primary key, - title text not null -); - -select - index_statements, errors -from - index_advisor('select id from v.book where title = $1'); - -drop schema v cascade; diff --git a/test-docker-image.sh b/test-docker-image.sh index ef74b04ae..e9e9886f4 100755 --- a/test-docker-image.sh +++ b/test-docker-image.sh @@ -86,6 +86,11 @@ get_version_info() { esac } +# Tests to skip for OrioleDB (not compatible with OrioleDB storage) +ORIOLEDB_SKIP_TESTS=( + "index_advisor" # index_advisor doesn't support OrioleDB tables +) + # Filter test files based on version get_test_list() { local version="$1" @@ -107,6 +112,20 @@ get_test_list() { local basename basename=$(basename "$f" .sql) + # Skip tests that don't work with OrioleDB + if [[ "$version" == "orioledb-17" ]]; then + local should_skip=false + for skip_test in "${ORIOLEDB_SKIP_TESTS[@]}"; do + if [[ "$basename" == "$skip_test" ]]; then + should_skip=true + break + fi + done + if [[ "$should_skip" == "true" ]]; then + continue + fi + fi + # Check if it's a version-specific test (starts with z_) if [[ "$basename" == z_* ]]; then # Only include if it matches our version @@ -376,15 +395,6 @@ main() { rm -f "$PATCHED_TESTS_DIR/expected/roles.out.bak" fi - # Patch index_advisor OID: Docker and Nix have different catalog OIDs - # The OID in the error message is environment-specific - if [[ -f "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out" ]]; then - sed -i.bak \ - -e 's/OID 14007/OID 13638/g' \ - "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out" - rm -f "$PATCHED_TESTS_DIR/expected/z_orioledb-17_index_advisor.out.bak" - fi - # Run pg_regress log_info "Running pg_regress..." local regress_exit=0 From e7ca5b9d3258d05699b819528d63357e12867440 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Thu, 22 Jan 2026 11:49:18 -0500 Subject: [PATCH 3/7] tests: align test with pr to correct docs --- nix/tests/expected/docs-functions.out | 10 ++++++---- nix/tests/expected/z_orioledb-17_docs-functions.out | 10 ++++++---- nix/tests/sql/docs-functions.sql | 2 +- nix/tests/sql/z_orioledb-17_docs-functions.sql | 2 +- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/nix/tests/expected/docs-functions.out b/nix/tests/expected/docs-functions.out index 1a2c6085c..9acc45ffb 100644 --- a/nix/tests/expected/docs-functions.out +++ b/nix/tests/expected/docs-functions.out @@ -74,13 +74,15 @@ language plpgsql security definer set search_path = '' as $$ begin - select 'hello world'; + return 'hello world'; end; $$; select hello_world_definer(); -ERROR: query has no destination for result data -HINT: If you want to discard the results of a SELECT, use PERFORM instead. -CONTEXT: PL/pgSQL function public.hello_world_definer() line 3 at SQL statement + hello_world_definer +--------------------- + hello world +(1 row) + revoke execute on function public.hello_world from public; revoke execute on function public.hello_world from anon; grant execute on function public.hello_world to authenticated; diff --git a/nix/tests/expected/z_orioledb-17_docs-functions.out b/nix/tests/expected/z_orioledb-17_docs-functions.out index 72af403b8..e18694e02 100644 --- a/nix/tests/expected/z_orioledb-17_docs-functions.out +++ b/nix/tests/expected/z_orioledb-17_docs-functions.out @@ -74,13 +74,15 @@ language plpgsql security definer set search_path = '' as $$ begin - select 'hello world'; + return 'hello world'; end; $$; select hello_world_definer(); -ERROR: query has no destination for result data -HINT: If you want to discard the results of a SELECT, use PERFORM instead. -CONTEXT: PL/pgSQL function public.hello_world_definer() line 3 at SQL statement + hello_world_definer +--------------------- + hello world +(1 row) + revoke execute on function public.hello_world from public; revoke execute on function public.hello_world from anon; grant execute on function public.hello_world to authenticated; diff --git a/nix/tests/sql/docs-functions.sql b/nix/tests/sql/docs-functions.sql index f91f17261..d3b7afe06 100644 --- a/nix/tests/sql/docs-functions.sql +++ b/nix/tests/sql/docs-functions.sql @@ -69,7 +69,7 @@ language plpgsql security definer set search_path = '' as $$ begin - select 'hello world'; + return 'hello world'; end; $$; diff --git a/nix/tests/sql/z_orioledb-17_docs-functions.sql b/nix/tests/sql/z_orioledb-17_docs-functions.sql index f91f17261..d3b7afe06 100644 --- a/nix/tests/sql/z_orioledb-17_docs-functions.sql +++ b/nix/tests/sql/z_orioledb-17_docs-functions.sql @@ -69,7 +69,7 @@ language plpgsql security definer set search_path = '' as $$ begin - select 'hello world'; + return 'hello world'; end; $$; From ebc0ec6dc14cee19a7f62e5127c98d20d22ebd5d Mon Sep 17 00:00:00 2001 From: samrose Date: Thu, 22 Jan 2026 15:43:07 -0500 Subject: [PATCH 4/7] Update test-docker-image.sh Co-authored-by: Douglas J Hunley --- test-docker-image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-docker-image.sh b/test-docker-image.sh index e9e9886f4..288f95307 100755 --- a/test-docker-image.sh +++ b/test-docker-image.sh @@ -327,8 +327,8 @@ main() { docker cp "$HTTP_MOCK_SERVER" "$CONTAINER_NAME:/tmp/http-mock-server.py" # Start mock server in container background - docker exec -d "$CONTAINER_NAME" python3 /tmp/http-mock-server.py 8880 HTTP_MOCK_PORT=8880 + docker exec -d "$CONTAINER_NAME" python3 /tmp/http-mock-server.py $HTTP_MOCK_PORT # Wait for mock server to be ready sleep 2 From dc47b0fe37cccc97db9746893ef4e3c657cd3f30 Mon Sep 17 00:00:00 2001 From: samrose Date: Thu, 22 Jan 2026 15:43:39 -0500 Subject: [PATCH 5/7] Update test-docker-image.sh Co-authored-by: Douglas J Hunley --- test-docker-image.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test-docker-image.sh b/test-docker-image.sh index 288f95307..c4bf2d9b2 100755 --- a/test-docker-image.sh +++ b/test-docker-image.sh @@ -166,6 +166,9 @@ get_test_list() { # Cleanup function cleanup() { + # since this function is set as the trap for EXIT + # store the return code of the last command that + # was executed before said EXIT local exit_code=$? if [[ -n "$CONTAINER_NAME" ]] && [[ "$KEEP_CONTAINER" != "true" ]]; then From f35e6a36c44bb1e9a14f8d0f992b251f161394e2 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Thu, 22 Jan 2026 15:47:23 -0500 Subject: [PATCH 6/7] fix: replace basename with _basename to avoid collision --- test-docker-image.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test-docker-image.sh b/test-docker-image.sh index c4bf2d9b2..5204d7931 100755 --- a/test-docker-image.sh +++ b/test-docker-image.sh @@ -109,14 +109,14 @@ get_test_list() { done for f in "$TESTS_SQL_DIR"/*.sql; do - local basename - basename=$(basename "$f" .sql) + local _basename + _basename=$(basename "$f" .sql) # Skip tests that don't work with OrioleDB if [[ "$version" == "orioledb-17" ]]; then local should_skip=false for skip_test in "${ORIOLEDB_SKIP_TESTS[@]}"; do - if [[ "$basename" == "$skip_test" ]]; then + if [[ "$_basename" == "$skip_test" ]]; then should_skip=true break fi @@ -127,17 +127,17 @@ get_test_list() { fi # Check if it's a version-specific test (starts with z_) - if [[ "$basename" == z_* ]]; then + if [[ "$_basename" == z_* ]]; then # Only include if it matches our version case "$version" in 15) - [[ "$basename" == z_15_* ]] && tests+=("$basename") + [[ "$_basename" == z_15_* ]] && tests+=("$_basename") ;; 17) - [[ "$basename" == z_17_* ]] && tests+=("$basename") + [[ "$_basename" == z_17_* ]] && tests+=("$_basename") ;; orioledb-17) - [[ "$basename" == z_orioledb-17_* ]] && tests+=("$basename") + [[ "$_basename" == z_orioledb-17_* ]] && tests+=("$_basename") ;; esac else @@ -146,16 +146,16 @@ get_test_list() { # Skip common test if OrioleDB-specific variant exists local has_variant=false for variant in "${orioledb_variants[@]}"; do - if [[ "$basename" == "$variant" ]]; then + if [[ "$_basename" == "$variant" ]]; then has_variant=true break fi done if [[ "$has_variant" == "false" ]]; then - tests+=("$basename") + tests+=("$_basename") fi else - tests+=("$basename") + tests+=("$_basename") fi fi done From 7793d1911a8a242082dba40fef4e25ad1e3abb41 Mon Sep 17 00:00:00 2001 From: samrose Date: Thu, 22 Jan 2026 15:49:02 -0500 Subject: [PATCH 7/7] Update nix/checks.nix Co-authored-by: Douglas J Hunley --- nix/checks.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/checks.nix b/nix/checks.nix index 9af5996a0..6e68c734e 100644 --- a/nix/checks.nix +++ b/nix/checks.nix @@ -133,7 +133,7 @@ version: dir: let files = builtins.readDir dir; - # Get list of OrioleDB-specific test basenames (without z_orioledb-17_ prefix) + # Get list of OrioleDB-specific test basenames , then strip the orioledb prefix from them orioledbVariants = pkgs.lib.pipe files [ builtins.attrNames (builtins.filter (n: builtins.match "z_orioledb-17_.*\\.sql" n != null))