reset
This commit is contained in:
parent
31ecf136f7
commit
dde8450e7e
5
.gitignore
vendored
5
.gitignore
vendored
@ -3,12 +3,15 @@
|
|||||||
.env.*
|
.env.*
|
||||||
!.env.example
|
!.env.example
|
||||||
|
|
||||||
|
.archive/
|
||||||
|
|
||||||
# Docker volume RUNTIME data (large binary/runtime files - not schema SQL)
|
# Docker volume RUNTIME data (large binary/runtime files - not schema SQL)
|
||||||
volumes/db/data/
|
volumes/db-data/
|
||||||
volumes/storage/
|
volumes/storage/
|
||||||
volumes/pooler/
|
volumes/pooler/
|
||||||
volumes/logs/
|
volumes/logs/
|
||||||
|
|
||||||
|
|
||||||
# Backup files
|
# Backup files
|
||||||
*.bak
|
*.bak
|
||||||
*.bak.*
|
*.bak.*
|
||||||
|
|||||||
@ -11,7 +11,7 @@ services:
|
|||||||
|
|
||||||
studio:
|
studio:
|
||||||
container_name: supabase-studio
|
container_name: supabase-studio
|
||||||
image: supabase/studio:2025.06.30-sha-6f5982d
|
image: supabase/studio:2026.02.16-sha-26c615c
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- ${STUDIO_PORT}:3000
|
- ${STUDIO_PORT}:3000
|
||||||
@ -75,7 +75,7 @@ services:
|
|||||||
|
|
||||||
auth:
|
auth:
|
||||||
container_name: supabase-auth
|
container_name: supabase-auth
|
||||||
image: supabase/gotrue:v2.177.0
|
image: supabase/gotrue:v2.186.0
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health" ]
|
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health" ]
|
||||||
@ -150,7 +150,7 @@ services:
|
|||||||
|
|
||||||
rest:
|
rest:
|
||||||
container_name: supabase-rest
|
container_name: supabase-rest
|
||||||
image: postgrest/postgrest:v12.2.12
|
image: postgrest/postgrest:v14.5
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
db:
|
db:
|
||||||
@ -171,7 +171,7 @@ services:
|
|||||||
realtime:
|
realtime:
|
||||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||||
container_name: realtime-dev.supabase-realtime
|
container_name: realtime-dev.supabase-realtime
|
||||||
image: supabase/realtime:v2.34.47
|
image: supabase/realtime:v2.76.5
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
db:
|
db:
|
||||||
@ -205,7 +205,7 @@ services:
|
|||||||
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
||||||
storage:
|
storage:
|
||||||
container_name: supabase-storage
|
container_name: supabase-storage
|
||||||
image: supabase/storage-api:v1.25.7
|
image: supabase/storage-api:v1.37.8
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/storage:/var/lib/storage:z
|
- ./volumes/storage:/var/lib/storage:z
|
||||||
@ -240,7 +240,7 @@ services:
|
|||||||
|
|
||||||
imgproxy:
|
imgproxy:
|
||||||
container_name: supabase-imgproxy
|
container_name: supabase-imgproxy
|
||||||
image: darthsim/imgproxy:v3.8.0
|
image: darthsim/imgproxy:v3.30.1
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/storage:/var/lib/storage:z
|
- ./volumes/storage:/var/lib/storage:z
|
||||||
@ -257,7 +257,7 @@ services:
|
|||||||
|
|
||||||
meta:
|
meta:
|
||||||
container_name: supabase-meta
|
container_name: supabase-meta
|
||||||
image: supabase/postgres-meta:v0.91.0
|
image: supabase/postgres-meta:v0.95.2
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
db:
|
db:
|
||||||
@ -275,7 +275,7 @@ services:
|
|||||||
|
|
||||||
functions:
|
functions:
|
||||||
container_name: supabase-edge-functions
|
container_name: supabase-edge-functions
|
||||||
image: supabase/edge-runtime:v1.67.4
|
image: supabase/edge-runtime:v1.70.3
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/functions:/home/deno/functions:Z
|
- ./volumes/functions:/home/deno/functions:Z
|
||||||
@ -294,7 +294,7 @@ services:
|
|||||||
|
|
||||||
analytics:
|
analytics:
|
||||||
container_name: supabase-analytics
|
container_name: supabase-analytics
|
||||||
image: supabase/logflare:1.14.2
|
image: supabase/logflare:1.31.2
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- 4000:4000
|
- 4000:4000
|
||||||
@ -339,12 +339,26 @@ services:
|
|||||||
# Comment out everything below this point if you are using an external Postgres database
|
# Comment out everything below this point if you are using an external Postgres database
|
||||||
db:
|
db:
|
||||||
container_name: supabase-db
|
container_name: supabase-db
|
||||||
image: supabase/postgres:15.8.1.060
|
image: supabase/postgres:15.8.1.085
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/db:/docker-entrypoint-initdb.d:Z
|
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||||
|
# Must be superuser to create event trigger
|
||||||
|
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||||
|
# Must be superuser to alter reserved role
|
||||||
|
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||||
|
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||||
|
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||||
|
# Changes required for internal supabase data such as _analytics
|
||||||
|
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
|
||||||
|
# Changes required for Analytics support
|
||||||
|
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||||
|
# Changes required for Pooler support
|
||||||
|
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
|
||||||
# PGDATA directory - persists database files between restarts
|
# PGDATA directory - persists database files between restarts
|
||||||
- ./volumes/db-data:/var/lib/postgresql/data:Z
|
- ./volumes/db-data:/var/lib/postgresql/data:Z
|
||||||
|
# Use named volume to persist pgsodium decryption key between restarts
|
||||||
|
- db-config:/etc/postgresql-custom
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ]
|
test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
@ -355,7 +369,6 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_HOST: /var/run/postgresql
|
POSTGRES_HOST: /var/run/postgresql
|
||||||
POSTGRES_USER: postgres
|
|
||||||
PGPORT: ${POSTGRES_PORT}
|
PGPORT: ${POSTGRES_PORT}
|
||||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||||
@ -377,7 +390,7 @@ services:
|
|||||||
|
|
||||||
vector:
|
vector:
|
||||||
container_name: supabase-vector
|
container_name: supabase-vector
|
||||||
image: timberio/vector:0.28.1-alpine
|
image: timberio/vector:0.53.0-alpine
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
|
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
|
||||||
@ -397,7 +410,7 @@ services:
|
|||||||
# Update the DATABASE_URL if you are using an external Postgres database
|
# Update the DATABASE_URL if you are using an external Postgres database
|
||||||
supavisor:
|
supavisor:
|
||||||
container_name: supabase-pooler
|
container_name: supabase-pooler
|
||||||
image: supabase/supavisor:2.5.7
|
image: supabase/supavisor:2.7.0
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- ${POSTGRES_PORT}:5432
|
- ${POSTGRES_PORT}:5432
|
||||||
@ -464,3 +477,4 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
db-config:
|
db-config:
|
||||||
|
deno-cache:
|
||||||
|
|||||||
@ -1,82 +0,0 @@
|
|||||||
-- ============================================================
|
|
||||||
-- Supabase Core Roles & Schemas Initialization
|
|
||||||
-- Runs first (50-) to set up all roles required by later scripts
|
|
||||||
-- ============================================================
|
|
||||||
|
|
||||||
-- Create supabase_admin role
|
|
||||||
DO
|
|
||||||
$$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_admin') THEN
|
|
||||||
CREATE ROLE supabase_admin WITH LOGIN CREATEROLE REPLICATION BYPASSRLS PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Create ALL standard Supabase roles needed by subsequent init scripts
|
|
||||||
-- (56-roles.sql will ALTER these, so they must pre-exist)
|
|
||||||
DO
|
|
||||||
$$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'anon') THEN
|
|
||||||
CREATE ROLE anon NOLOGIN NOINHERIT;
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticated') THEN
|
|
||||||
CREATE ROLE authenticated NOLOGIN NOINHERIT;
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'service_role') THEN
|
|
||||||
CREATE ROLE service_role NOLOGIN NOINHERIT BYPASSRLS;
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticator') THEN
|
|
||||||
CREATE ROLE authenticator WITH NOINHERIT LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'pgbouncer') THEN
|
|
||||||
CREATE ROLE pgbouncer WITH LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_auth_admin') THEN
|
|
||||||
CREATE ROLE supabase_auth_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_storage_admin') THEN
|
|
||||||
CREATE ROLE supabase_storage_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_functions_admin') THEN
|
|
||||||
CREATE ROLE supabase_functions_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_replication_admin') THEN
|
|
||||||
CREATE ROLE supabase_replication_admin LOGIN REPLICATION;
|
|
||||||
END IF;
|
|
||||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_read_only_user') THEN
|
|
||||||
CREATE ROLE supabase_read_only_user BYPASSRLS;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Grant pg_read_server_files to supabase_admin (required by pg_net extension)
|
|
||||||
GRANT pg_read_server_files TO supabase_admin;
|
|
||||||
|
|
||||||
-- Core grants
|
|
||||||
GRANT ALL ON DATABASE postgres TO supabase_admin WITH GRANT OPTION;
|
|
||||||
GRANT anon TO authenticator;
|
|
||||||
GRANT authenticated TO authenticator;
|
|
||||||
GRANT service_role TO authenticator;
|
|
||||||
GRANT supabase_auth_admin TO supabase_admin;
|
|
||||||
GRANT supabase_storage_admin TO supabase_admin;
|
|
||||||
GRANT supabase_functions_admin TO supabase_admin;
|
|
||||||
|
|
||||||
-- Create _supabase database for internal Supabase services
|
|
||||||
CREATE DATABASE _supabase WITH OWNER supabase_admin;
|
|
||||||
|
|
||||||
-- Create required schemas in postgres database
|
|
||||||
CREATE SCHEMA IF NOT EXISTS _supabase AUTHORIZATION supabase_admin;
|
|
||||||
CREATE SCHEMA IF NOT EXISTS extensions AUTHORIZATION supabase_admin;
|
|
||||||
|
|
||||||
-- Stub schemas: auth/storage populated by GoTrue/Storage services at runtime
|
|
||||||
-- but must exist for 61-core-schema.sql to pass validation
|
|
||||||
CREATE SCHEMA IF NOT EXISTS auth;
|
|
||||||
CREATE SCHEMA IF NOT EXISTS storage;
|
|
||||||
GRANT USAGE ON SCHEMA auth TO supabase_admin, supabase_auth_admin;
|
|
||||||
GRANT USAGE ON SCHEMA storage TO supabase_admin, supabase_storage_admin;
|
|
||||||
|
|
||||||
-- Switch to _supabase database and create required schemas
|
|
||||||
\connect _supabase
|
|
||||||
CREATE SCHEMA IF NOT EXISTS _analytics AUTHORIZATION supabase_admin;
|
|
||||||
@ -1,123 +0,0 @@
|
|||||||
-- Create pg_net extension outside transaction (cannot run inside BEGIN/COMMIT)
|
|
||||||
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
|
||||||
|
|
||||||
BEGIN;
|
|
||||||
-- Create pg_net extension
|
|
||||||
-- pg_net extension created above (outside transaction)
|
|
||||||
-- Create the supabase_functions schema
|
|
||||||
CREATE SCHEMA IF NOT EXISTS supabase_functions AUTHORIZATION supabase_admin;
|
|
||||||
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
|
||||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
|
||||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
|
||||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
|
||||||
-- supabase_functions.migrations definition
|
|
||||||
CREATE TABLE supabase_functions.migrations (
|
|
||||||
version text PRIMARY KEY,
|
|
||||||
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
-- Initial supabase_functions migration
|
|
||||||
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
|
||||||
-- supabase_functions.hooks definition
|
|
||||||
CREATE TABLE supabase_functions.hooks (
|
|
||||||
id bigserial PRIMARY KEY,
|
|
||||||
hook_table_id integer NOT NULL,
|
|
||||||
hook_name text NOT NULL,
|
|
||||||
created_at timestamptz NOT NULL DEFAULT NOW(),
|
|
||||||
request_id bigint
|
|
||||||
);
|
|
||||||
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
|
||||||
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
|
||||||
COMMENT ON TABLE supabase_functions.hooks IS 'Webhook request logs stored temporarily while awaiting the request.';
|
|
||||||
CREATE FUNCTION supabase_functions.http_request()
|
|
||||||
RETURNS trigger
|
|
||||||
LANGUAGE plpgsql
|
|
||||||
AS $func$
|
|
||||||
DECLARE
|
|
||||||
request_id bigint;
|
|
||||||
payload jsonb;
|
|
||||||
url text := TG_ARGV[0]::text;
|
|
||||||
method text := TG_ARGV[1]::text;
|
|
||||||
headers jsonb DEFAULT '{}'::jsonb;
|
|
||||||
params jsonb DEFAULT '{}'::jsonb;
|
|
||||||
timeout_ms integer DEFAULT 1000;
|
|
||||||
BEGIN
|
|
||||||
IF url IS NULL OR url = 'null' THEN
|
|
||||||
RAISE EXCEPTION 'url argument is missing';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF method IS NULL OR method = 'null' THEN
|
|
||||||
RAISE EXCEPTION 'method argument is missing';
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
|
||||||
headers = '{}'::jsonb;
|
|
||||||
ELSE
|
|
||||||
headers = TG_ARGV[2]::jsonb;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
|
||||||
params = '{}'::jsonb;
|
|
||||||
ELSE
|
|
||||||
params = TG_ARGV[3]::jsonb;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
|
||||||
timeout_ms = 1000;
|
|
||||||
ELSE
|
|
||||||
timeout_ms = TG_ARGV[4]::integer;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
CASE
|
|
||||||
WHEN method = 'GET' THEN
|
|
||||||
SELECT http_get INTO request_id FROM net.http_get(
|
|
||||||
url,
|
|
||||||
params,
|
|
||||||
headers,
|
|
||||||
timeout_ms
|
|
||||||
);
|
|
||||||
WHEN method = 'POST' THEN
|
|
||||||
payload = jsonb_build_object(
|
|
||||||
'old_record', OLD,
|
|
||||||
'record', NEW,
|
|
||||||
'type', TG_OP,
|
|
||||||
'table', TG_TABLE_NAME,
|
|
||||||
'schema', TG_TABLE_SCHEMA
|
|
||||||
);
|
|
||||||
|
|
||||||
SELECT http_post INTO request_id FROM net.http_post(
|
|
||||||
url,
|
|
||||||
payload,
|
|
||||||
headers,
|
|
||||||
timeout_ms
|
|
||||||
);
|
|
||||||
ELSE
|
|
||||||
RAISE EXCEPTION 'method argument % is invalid', method;
|
|
||||||
END CASE;
|
|
||||||
|
|
||||||
INSERT INTO supabase_functions.hooks
|
|
||||||
(hook_table_id, hook_name, request_id)
|
|
||||||
VALUES
|
|
||||||
(TG_RELID, TG_NAME, request_id);
|
|
||||||
|
|
||||||
RETURN NEW;
|
|
||||||
END
|
|
||||||
$func$;
|
|
||||||
-- Supabase super admin
|
|
||||||
DO
|
|
||||||
$$
|
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS (
|
|
||||||
SELECT FROM pg_catalog.pg_roles
|
|
||||||
WHERE rolname = 'supabase_functions_admin'
|
|
||||||
) THEN
|
|
||||||
CREATE ROLE supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$$;
|
|
||||||
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
|
||||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
|
||||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
|
||||||
ALTER function supabase_functions.http_request() OWNER TO supabase_functions_admin;
|
|
||||||
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183942_update_grants');
|
|
||||||
ALTER ROLE supabase_functions_admin SET search_path = supabase_functions;
|
|
||||||
COMMIT;
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
-- Create analytics/logs schema
|
|
||||||
CREATE SCHEMA IF NOT EXISTS _analytics;
|
|
||||||
ALTER SCHEMA _analytics OWNER TO supabase_admin;
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
-- create realtime schema for Realtime RLS (already exists but just in case)
|
|
||||||
CREATE SCHEMA IF NOT EXISTS _realtime;
|
|
||||||
ALTER SCHEMA _realtime OWNER TO supabase_admin;
|
|
||||||
@ -1,13 +0,0 @@
|
|||||||
-- pgBouncer auth function
|
|
||||||
CREATE OR REPLACE FUNCTION public.get_auth(p_usename TEXT) RETURNS TABLE(username TEXT, password TEXT) AS
|
|
||||||
$$
|
|
||||||
BEGIN
|
|
||||||
RAISE WARNING 'get_auth() called for user: %', p_usename;
|
|
||||||
RETURN QUERY
|
|
||||||
SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow
|
|
||||||
WHERE usename = p_usename;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
|
||||||
|
|
||||||
REVOKE ALL ON FUNCTION public.get_auth(p_usename TEXT) FROM PUBLIC;
|
|
||||||
GRANT EXECUTE ON FUNCTION public.get_auth(p_usename TEXT) TO pgbouncer;
|
|
||||||
@ -1,345 +0,0 @@
|
|||||||
--[ Database Schema Version ]--
|
|
||||||
-- Version: 1.0.0
|
|
||||||
-- Last Updated: 2024-02-24
|
|
||||||
-- Description: Core schema setup for ClassConcepts with neoFS filesystem integration
|
|
||||||
-- Dependencies: auth.users (Supabase Auth)
|
|
||||||
|
|
||||||
--[ 1. Extensions ]--
|
|
||||||
create extension if not exists "uuid-ossp";
|
|
||||||
|
|
||||||
-- Create rpc schema if it doesn't exist
|
|
||||||
create schema if not exists rpc;
|
|
||||||
grant usage on schema rpc to anon, authenticated;
|
|
||||||
|
|
||||||
-- Create exec_sql function for admin operations
|
|
||||||
create or replace function exec_sql(query text)
|
|
||||||
returns void as $$
|
|
||||||
begin
|
|
||||||
execute query;
|
|
||||||
end;
|
|
||||||
$$ language plpgsql security definer;
|
|
||||||
|
|
||||||
-- Create updated_at trigger function
|
|
||||||
create or replace function public.handle_updated_at()
|
|
||||||
returns trigger as $$
|
|
||||||
begin
|
|
||||||
new.updated_at = timezone('utc'::text, now());
|
|
||||||
return new;
|
|
||||||
end;
|
|
||||||
$$ language plpgsql security definer;
|
|
||||||
|
|
||||||
-- Create completed_at trigger function for document artefacts
|
|
||||||
create or replace function public.set_completed_at()
|
|
||||||
returns trigger as $$
|
|
||||||
begin
|
|
||||||
if NEW.status = 'completed' and OLD.status != 'completed' then
|
|
||||||
NEW.completed_at = now();
|
|
||||||
end if;
|
|
||||||
return NEW;
|
|
||||||
end;
|
|
||||||
$$ language plpgsql security definer;
|
|
||||||
|
|
||||||
--[ 5. Core Tables ]--
|
|
||||||
-- Base user profiles
|
|
||||||
create table if not exists public.profiles (
|
|
||||||
id uuid primary key references auth.users(id) on delete cascade,
|
|
||||||
email text not null unique,
|
|
||||||
user_type text not null check (
|
|
||||||
user_type in (
|
|
||||||
'teacher',
|
|
||||||
'student',
|
|
||||||
'email_teacher',
|
|
||||||
'email_student',
|
|
||||||
'developer',
|
|
||||||
'superadmin'
|
|
||||||
)
|
|
||||||
),
|
|
||||||
username text not null unique,
|
|
||||||
full_name text,
|
|
||||||
display_name text,
|
|
||||||
metadata jsonb default '{}'::jsonb,
|
|
||||||
user_db_name text,
|
|
||||||
school_db_name text,
|
|
||||||
neo4j_sync_status text default 'pending' check (neo4j_sync_status in ('pending', 'ready', 'failed')),
|
|
||||||
neo4j_synced_at timestamp with time zone,
|
|
||||||
last_login timestamp with time zone,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
|
|
||||||
comment on column public.profiles.user_type is 'Type of user: teacher or student';
|
|
||||||
|
|
||||||
-- Active institutes
|
|
||||||
create table if not exists public.institutes (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
name text not null,
|
|
||||||
urn text unique,
|
|
||||||
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
|
|
||||||
address jsonb default '{}'::jsonb,
|
|
||||||
website text,
|
|
||||||
metadata jsonb default '{}'::jsonb,
|
|
||||||
geo_coordinates jsonb default '{}'::jsonb,
|
|
||||||
neo4j_uuid_string text,
|
|
||||||
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
|
|
||||||
neo4j_public_sync_at timestamp with time zone,
|
|
||||||
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
|
|
||||||
neo4j_private_sync_at timestamp with time zone,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.institutes is 'Active institutes in the system';
|
|
||||||
comment on column public.institutes.geo_coordinates is 'Geospatial coordinates from OSM search (latitude, longitude, boundingbox)';
|
|
||||||
|
|
||||||
--[ 6. neoFS Filesystem Tables ]--
|
|
||||||
-- File cabinets for organizing files
|
|
||||||
create table if not exists public.file_cabinets (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
user_id uuid not null references public.profiles(id) on delete cascade,
|
|
||||||
name text not null,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.file_cabinets is 'User file cabinets for organizing documents and files';
|
|
||||||
|
|
||||||
-- Files stored in cabinets
|
|
||||||
create table if not exists public.files (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
|
|
||||||
name text not null,
|
|
||||||
path text not null,
|
|
||||||
bucket text default 'file-cabinets' not null,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
mime_type text,
|
|
||||||
metadata jsonb default '{}'::jsonb,
|
|
||||||
size text,
|
|
||||||
category text generated always as (
|
|
||||||
case
|
|
||||||
when mime_type like 'image/%' then 'image'
|
|
||||||
when mime_type = 'application/pdf' then 'document'
|
|
||||||
when mime_type in ('application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document') then 'document'
|
|
||||||
when mime_type in ('application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') then 'spreadsheet'
|
|
||||||
when mime_type in ('application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation') then 'presentation'
|
|
||||||
when mime_type like 'audio/%' then 'audio'
|
|
||||||
when mime_type like 'video/%' then 'video'
|
|
||||||
else 'other'
|
|
||||||
end
|
|
||||||
) stored
|
|
||||||
);
|
|
||||||
comment on table public.files is 'Files stored in user cabinets with automatic categorization';
|
|
||||||
comment on column public.files.category is 'Automatically determined file category based on MIME type';
|
|
||||||
|
|
||||||
-- AI brains for processing files
|
|
||||||
create table if not exists public.brains (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
user_id uuid not null references public.profiles(id) on delete cascade,
|
|
||||||
name text not null,
|
|
||||||
purpose text,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.brains is 'AI brains for processing and analyzing user files';
|
|
||||||
|
|
||||||
-- Brain-file associations
|
|
||||||
create table if not exists public.brain_files (
|
|
||||||
brain_id uuid not null references public.brains(id) on delete cascade,
|
|
||||||
file_id uuid not null references public.files(id) on delete cascade,
|
|
||||||
primary key (brain_id, file_id)
|
|
||||||
);
|
|
||||||
comment on table public.brain_files is 'Associations between AI brains and files for processing';
|
|
||||||
|
|
||||||
-- Document artefacts from file processing
|
|
||||||
create table if not exists public.document_artefacts (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
file_id uuid references public.files(id) on delete cascade,
|
|
||||||
page_number integer default 0 not null,
|
|
||||||
type text not null,
|
|
||||||
rel_path text not null,
|
|
||||||
size_tag text,
|
|
||||||
language text,
|
|
||||||
chunk_index integer,
|
|
||||||
extra jsonb,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
status text default 'completed' not null check (status in ('pending', 'processing', 'completed', 'failed')),
|
|
||||||
started_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
completed_at timestamp with time zone,
|
|
||||||
error_message text
|
|
||||||
);
|
|
||||||
comment on table public.document_artefacts is 'Extracted artefacts from document processing';
|
|
||||||
comment on column public.document_artefacts.status is 'Extraction status: pending, processing, completed, or failed';
|
|
||||||
comment on column public.document_artefacts.started_at is 'Timestamp when extraction process started';
|
|
||||||
comment on column public.document_artefacts.completed_at is 'Timestamp when extraction process completed (success or failure)';
|
|
||||||
comment on column public.document_artefacts.error_message is 'Error details if extraction failed';
|
|
||||||
|
|
||||||
-- Function execution logs
|
|
||||||
create table if not exists public.function_logs (
|
|
||||||
id serial primary key,
|
|
||||||
file_id uuid references public.files(id) on delete cascade,
|
|
||||||
timestamp timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
step text,
|
|
||||||
message text,
|
|
||||||
data jsonb
|
|
||||||
);
|
|
||||||
comment on table public.function_logs is 'Logs of function executions and processing steps';
|
|
||||||
|
|
||||||
--[ 7. Relationship Tables ]--
|
|
||||||
-- Institute memberships
|
|
||||||
create table if not exists public.institute_memberships (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
profile_id uuid references public.profiles(id) on delete cascade,
|
|
||||||
institute_id uuid references public.institutes(id) on delete cascade,
|
|
||||||
role text not null check (role in ('teacher', 'student')),
|
|
||||||
tldraw_preferences jsonb default '{}'::jsonb,
|
|
||||||
metadata jsonb default '{}'::jsonb,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
unique(profile_id, institute_id)
|
|
||||||
);
|
|
||||||
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
|
|
||||||
|
|
||||||
-- Membership requests
|
|
||||||
create table if not exists public.institute_membership_requests (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
profile_id uuid references public.profiles(id) on delete cascade,
|
|
||||||
institute_id uuid references public.institutes(id) on delete cascade,
|
|
||||||
requested_role text check (requested_role in ('teacher', 'student')),
|
|
||||||
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
|
|
||||||
metadata jsonb default '{}'::jsonb,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
|
|
||||||
|
|
||||||
--[ 8. Audit Tables ]--
|
|
||||||
-- System audit logs
|
|
||||||
create table if not exists public.audit_logs (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
profile_id uuid references public.profiles(id) on delete set null,
|
|
||||||
action_type text,
|
|
||||||
table_name text,
|
|
||||||
record_id uuid,
|
|
||||||
changes jsonb,
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
comment on table public.audit_logs is 'System-wide audit trail for important operations';
|
|
||||||
|
|
||||||
--[ 9. Exam Specifications ]--
|
|
||||||
create table if not exists public.eb_specifications (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
spec_code text unique,
|
|
||||||
exam_board_code text,
|
|
||||||
award_code text,
|
|
||||||
subject_code text,
|
|
||||||
first_teach text,
|
|
||||||
spec_ver text,
|
|
||||||
|
|
||||||
-- Document storage details
|
|
||||||
storage_loc text,
|
|
||||||
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
|
|
||||||
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
|
|
||||||
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
|
|
||||||
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
|
|
||||||
comment on table public.eb_specifications is 'Exam board specifications and their primary document';
|
|
||||||
comment on column public.eb_specifications.spec_code is 'Unique code for the specification, used for linking exams';
|
|
||||||
comment on column public.eb_specifications.doc_details is 'Tika extract of the specification document';
|
|
||||||
comment on column public.eb_specifications.docling_docs is 'Docling extracts settings and storage locations for the specification document';
|
|
||||||
|
|
||||||
--[ 10. Exam Papers / Entries ]--
|
|
||||||
create table if not exists public.eb_exams (
|
|
||||||
id uuid primary key default uuid_generate_v4(),
|
|
||||||
exam_code text unique,
|
|
||||||
spec_code text references public.eb_specifications(spec_code) on delete cascade,
|
|
||||||
paper_code text,
|
|
||||||
tier text,
|
|
||||||
session text,
|
|
||||||
type_code text,
|
|
||||||
|
|
||||||
-- Document storage details
|
|
||||||
storage_loc text,
|
|
||||||
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
|
|
||||||
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
|
|
||||||
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
|
|
||||||
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now())
|
|
||||||
);
|
|
||||||
|
|
||||||
comment on table public.eb_exams is 'Exam papers and related documents linked to specifications';
|
|
||||||
comment on column public.eb_exams.exam_code is 'Unique code for the exam paper, used for linking questions';
|
|
||||||
comment on column public.eb_exams.type_code is 'Type code for the exam document: Question Paper (QP), Mark Scheme (MS), Examiner Report (ER), Other (OT)';
|
|
||||||
comment on column public.eb_exams.doc_details is 'Tika extract of the exam paper document';
|
|
||||||
comment on column public.eb_exams.docling_docs is 'Docling extracts settings and storage locations for the exam paper document';
|
|
||||||
|
|
||||||
--[ 11. Indexes ]--
|
|
||||||
-- Index for geospatial queries
|
|
||||||
create index if not exists idx_institutes_geo_coordinates on public.institutes using gin(geo_coordinates);
|
|
||||||
create index if not exists idx_institutes_urn on public.institutes(urn);
|
|
||||||
|
|
||||||
-- Document artefacts indexes
|
|
||||||
create index if not exists idx_document_artefacts_file_status on public.document_artefacts(file_id, status);
|
|
||||||
create index if not exists idx_document_artefacts_file_type on public.document_artefacts(file_id, type);
|
|
||||||
create index if not exists idx_document_artefacts_status on public.document_artefacts(status);
|
|
||||||
|
|
||||||
-- File indexes
|
|
||||||
create index if not exists idx_files_cabinet_id on public.files(cabinet_id);
|
|
||||||
create index if not exists idx_files_mime_type on public.files(mime_type);
|
|
||||||
create index if not exists idx_files_category on public.files(category);
|
|
||||||
|
|
||||||
-- Brain indexes
|
|
||||||
create index if not exists idx_brains_user_id on public.brains(user_id);
|
|
||||||
|
|
||||||
-- Exam board indexes
|
|
||||||
create index if not exists idx_eb_exams_exam_code on public.eb_exams(exam_code);
|
|
||||||
create index if not exists idx_eb_exams_spec_code on public.eb_exams(spec_code);
|
|
||||||
create index if not exists idx_eb_exams_paper_code on public.eb_exams(paper_code);
|
|
||||||
create index if not exists idx_eb_exams_tier on public.eb_exams(tier);
|
|
||||||
create index if not exists idx_eb_exams_session on public.eb_exams(session);
|
|
||||||
create index if not exists idx_eb_exams_type_code on public.eb_exams(type_code);
|
|
||||||
create index if not exists idx_eb_specifications_spec_code on public.eb_specifications(spec_code);
|
|
||||||
create index if not exists idx_eb_specifications_exam_board_code on public.eb_specifications(exam_board_code);
|
|
||||||
create index if not exists idx_eb_specifications_award_code on public.eb_specifications(award_code);
|
|
||||||
create index if not exists idx_eb_specifications_subject_code on public.eb_specifications(subject_code);
|
|
||||||
|
|
||||||
--[ 12. Triggers ]--
|
|
||||||
-- Set completed_at when document artefact status changes to completed
|
|
||||||
create trigger trigger_set_completed_at
|
|
||||||
before update on public.document_artefacts
|
|
||||||
for each row
|
|
||||||
execute function public.set_completed_at();
|
|
||||||
|
|
||||||
-- Set updated_at on profile updates
|
|
||||||
create trigger trigger_profiles_updated_at
|
|
||||||
before update on public.profiles
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- Set updated_at on institute updates
|
|
||||||
create trigger trigger_institutes_updated_at
|
|
||||||
before update on public.institutes
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- Set updated_at on institute_memberships updates
|
|
||||||
create trigger trigger_institute_memberships_updated_at
|
|
||||||
before update on public.institute_memberships
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- Set updated_at on institute_membership_requests updates
|
|
||||||
create trigger trigger_institute_membership_requests_updated_at
|
|
||||||
before update on public.institute_memberships
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- Set updated_at on eb_specifications updates
|
|
||||||
create trigger trigger_eb_specifications_updated_at
|
|
||||||
before update on public.eb_specifications
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- Set updated_at on eb_exams updates
|
|
||||||
create trigger trigger_eb_exams_updated_at
|
|
||||||
before update on public.eb_exams
|
|
||||||
for each row
|
|
||||||
execute function public.handle_updated_at();
|
|
||||||
@ -1,191 +0,0 @@
|
|||||||
--[ 8. Auth Functions ]--
|
|
||||||
-- Create a secure function to check admin status
|
|
||||||
create or replace function public.is_admin()
|
|
||||||
returns boolean as $$
|
|
||||||
select coalesce(
|
|
||||||
(select true
|
|
||||||
from public.profiles
|
|
||||||
where id = auth.uid()
|
|
||||||
and user_type = 'admin'),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
$$ language sql security definer;
|
|
||||||
|
|
||||||
-- Create a secure function to check super admin status
|
|
||||||
create or replace function public.is_super_admin()
|
|
||||||
returns boolean as $$
|
|
||||||
select coalesce(
|
|
||||||
(select true
|
|
||||||
from public.profiles
|
|
||||||
where id = auth.uid()
|
|
||||||
and user_type = 'admin'),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
$$ language sql security definer;
|
|
||||||
|
|
||||||
-- Create public wrapper functions
|
|
||||||
-- Note: These are now the main implementation functions, not wrappers
|
|
||||||
-- The original auth schema functions have been moved to public schema
|
|
||||||
|
|
||||||
-- Grant execute permissions
|
|
||||||
grant execute on function public.is_admin to authenticated;
|
|
||||||
grant execute on function public.is_super_admin to authenticated;
|
|
||||||
|
|
||||||
-- Initial admin setup function
|
|
||||||
create or replace function public.setup_initial_admin(admin_email text)
|
|
||||||
returns json
|
|
||||||
language plpgsql
|
|
||||||
security definer
|
|
||||||
as $$
|
|
||||||
declare
|
|
||||||
result json;
|
|
||||||
begin
|
|
||||||
-- Only allow this to run as service role or superuser
|
|
||||||
if not (
|
|
||||||
current_user = 'service_role'
|
|
||||||
or exists (
|
|
||||||
select 1 from pg_roles
|
|
||||||
where rolname = current_user
|
|
||||||
and rolsuper
|
|
||||||
)
|
|
||||||
) then
|
|
||||||
raise exception 'Must be run as service_role or superuser';
|
|
||||||
end if;
|
|
||||||
|
|
||||||
-- Update user_type and username for admin
|
|
||||||
update public.profiles
|
|
||||||
set user_type = 'admin',
|
|
||||||
username = coalesce(username, 'superadmin'),
|
|
||||||
display_name = coalesce(display_name, 'Super Admin')
|
|
||||||
where email = admin_email
|
|
||||||
returning json_build_object(
|
|
||||||
'id', id,
|
|
||||||
'email', email,
|
|
||||||
'user_type', user_type,
|
|
||||||
'username', username,
|
|
||||||
'display_name', display_name
|
|
||||||
) into result;
|
|
||||||
|
|
||||||
if result is null then
|
|
||||||
raise exception 'Admin user with email % not found', admin_email;
|
|
||||||
end if;
|
|
||||||
|
|
||||||
return result;
|
|
||||||
end;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Grant execute permissions
|
|
||||||
revoke execute on function public.setup_initial_admin from public;
|
|
||||||
grant execute on function public.setup_initial_admin to authenticated, service_role;
|
|
||||||
|
|
||||||
-- Create RPC wrapper for REST API access
|
|
||||||
create or replace function rpc.setup_initial_admin(admin_email text)
|
|
||||||
returns json
|
|
||||||
language plpgsql
|
|
||||||
security definer
|
|
||||||
as $$
|
|
||||||
begin
|
|
||||||
return public.setup_initial_admin(admin_email);
|
|
||||||
end;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Grant execute permissions for RPC wrapper
|
|
||||||
grant execute on function rpc.setup_initial_admin to authenticated, service_role;
|
|
||||||
|
|
||||||
--[ 9. Utility Functions ]--
|
|
||||||
-- Check if database is ready
|
|
||||||
create or replace function check_db_ready()
|
|
||||||
returns boolean
|
|
||||||
language plpgsql
|
|
||||||
security definer
|
|
||||||
as $$
|
|
||||||
begin
|
|
||||||
-- Check if essential schemas exist
|
|
||||||
if not exists (
|
|
||||||
select 1
|
|
||||||
from information_schema.schemata
|
|
||||||
where schema_name in ('auth', 'storage', 'public')
|
|
||||||
) then
|
|
||||||
return false;
|
|
||||||
end if;
|
|
||||||
|
|
||||||
-- Check if essential tables exist
|
|
||||||
if not exists (
|
|
||||||
select 1
|
|
||||||
from information_schema.tables
|
|
||||||
where table_schema = 'auth'
|
|
||||||
and table_name = 'users'
|
|
||||||
) then
|
|
||||||
return false;
|
|
||||||
end if;
|
|
||||||
|
|
||||||
-- Check if RLS is enabled on public.profiles
|
|
||||||
if not exists (
|
|
||||||
select 1
|
|
||||||
from pg_tables
|
|
||||||
where schemaname = 'public'
|
|
||||||
and tablename = 'profiles'
|
|
||||||
and rowsecurity = true
|
|
||||||
) then
|
|
||||||
return false;
|
|
||||||
end if;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
end;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Grant execute permission
|
|
||||||
grant execute on function check_db_ready to anon, authenticated, service_role;
|
|
||||||
|
|
||||||
-- Function to handle new user registration
|
|
||||||
create or replace function public.handle_new_user()
|
|
||||||
returns trigger
|
|
||||||
language plpgsql
|
|
||||||
security definer set search_path = public
|
|
||||||
as $$
|
|
||||||
declare
|
|
||||||
default_user_type text := 'email_student';
|
|
||||||
default_username text;
|
|
||||||
begin
|
|
||||||
-- Generate username from email
|
|
||||||
default_username := split_part(new.email, '@', 1);
|
|
||||||
|
|
||||||
insert into public.profiles (
|
|
||||||
id,
|
|
||||||
email,
|
|
||||||
user_type,
|
|
||||||
username,
|
|
||||||
display_name
|
|
||||||
)
|
|
||||||
values (
|
|
||||||
new.id,
|
|
||||||
new.email,
|
|
||||||
coalesce(new.raw_user_meta_data->>'user_type', default_user_type),
|
|
||||||
coalesce(new.raw_user_meta_data->>'username', default_username),
|
|
||||||
coalesce(new.raw_user_meta_data->>'display_name', default_username)
|
|
||||||
);
|
|
||||||
return new;
|
|
||||||
end;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Trigger for new user creation
|
|
||||||
drop trigger if exists on_auth_user_created on auth.users;
|
|
||||||
create trigger on_auth_user_created
|
|
||||||
after insert on auth.users
|
|
||||||
for each row execute procedure public.handle_new_user();
|
|
||||||
|
|
||||||
--[ 11. Database Triggers ]--
|
|
||||||
drop trigger if exists handle_profiles_updated_at on public.profiles;
|
|
||||||
create trigger handle_profiles_updated_at
|
|
||||||
before update on public.profiles
|
|
||||||
for each row execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
drop trigger if exists handle_institute_memberships_updated_at on public.institute_memberships;
|
|
||||||
create trigger handle_institute_memberships_updated_at
|
|
||||||
before update on public.institute_memberships
|
|
||||||
for each row execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
drop trigger if exists handle_membership_requests_updated_at on public.institute_membership_requests;
|
|
||||||
create trigger handle_membership_requests_updated_at
|
|
||||||
before update on public.institute_membership_requests
|
|
||||||
for each row execute function public.handle_updated_at();
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
-- Storage policies configuration for Supabase
|
|
||||||
-- Note: Storage bucket policies are managed by Supabase internally
|
|
||||||
-- This file provides guidance on what should be configured
|
|
||||||
|
|
||||||
-- Storage bucket policies should be configured through:
|
|
||||||
-- 1. Supabase Dashboard > Storage > Policies
|
|
||||||
-- 2. Or via SQL with proper permissions (requires service_role or owner access)
|
|
||||||
|
|
||||||
-- Recommended policies for storage.buckets:
|
|
||||||
-- - Super admin has full access to buckets
|
|
||||||
-- - Users can create their own buckets
|
|
||||||
-- - Users can view their own buckets or public buckets
|
|
||||||
|
|
||||||
-- Recommended policies for storage.objects:
|
|
||||||
-- - Users can upload to buckets they own
|
|
||||||
-- - Users can view objects in public buckets
|
|
||||||
-- - Users can manage objects in buckets they own
|
|
||||||
|
|
||||||
-- Note: These policies require the service_role or appropriate permissions
|
|
||||||
-- to be applied to the storage schema tables
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
-- Initial admin setup for ClassroomCopilot
|
|
||||||
-- This file handles basic database setup and permissions
|
|
||||||
|
|
||||||
-- Ensure uuid-ossp extension is enabled
|
|
||||||
create extension if not exists "uuid-ossp" schema extensions;
|
|
||||||
|
|
||||||
-- Grant basic permissions to authenticated users for public schema
|
|
||||||
-- Note: These permissions are granted to allow users to work with the application
|
|
||||||
grant usage on schema public to authenticated;
|
|
||||||
grant all on all tables in schema public to authenticated;
|
|
||||||
grant all on all sequences in schema public to authenticated;
|
|
||||||
grant all on all functions in schema public to authenticated;
|
|
||||||
|
|
||||||
-- Set default privileges for future objects
|
|
||||||
alter default privileges in schema public grant all on tables to authenticated;
|
|
||||||
alter default privileges in schema public grant all on sequences to authenticated;
|
|
||||||
alter default privileges in schema public grant all on functions to authenticated;
|
|
||||||
|
|
||||||
-- Note: The setup_initial_admin function is defined in 62-functions-triggers.sql
|
|
||||||
-- and should be called with an admin email parameter when needed
|
|
||||||
@ -1,95 +0,0 @@
|
|||||||
-- Files table augments and storage GC hooks
|
|
||||||
|
|
||||||
-- 1) Add columns to files if missing
|
|
||||||
do $$
|
|
||||||
begin
|
|
||||||
if not exists (
|
|
||||||
select 1 from information_schema.columns
|
|
||||||
where table_schema='public' and table_name='files' and column_name='uploaded_by'
|
|
||||||
) then
|
|
||||||
alter table public.files add column uploaded_by uuid references public.profiles(id);
|
|
||||||
end if;
|
|
||||||
if not exists (
|
|
||||||
select 1 from information_schema.columns
|
|
||||||
where table_schema='public' and table_name='files' and column_name='size_bytes'
|
|
||||||
) then
|
|
||||||
alter table public.files add column size_bytes bigint;
|
|
||||||
end if;
|
|
||||||
if not exists (
|
|
||||||
select 1 from information_schema.columns
|
|
||||||
where table_schema='public' and table_name='files' and column_name='source'
|
|
||||||
) then
|
|
||||||
alter table public.files add column source text default 'uploader-web';
|
|
||||||
end if;
|
|
||||||
end $$;
|
|
||||||
|
|
||||||
-- 2) Unique index for cabinet/path combo
|
|
||||||
create unique index if not exists uq_files_cabinet_path on public.files(cabinet_id, path);
|
|
||||||
|
|
||||||
-- 3) Storage GC helpers (ported from neoFS with storage schema)
|
|
||||||
create or replace function public._delete_storage_objects(p_bucket text, p_path text)
|
|
||||||
returns void
|
|
||||||
language plpgsql security definer
|
|
||||||
set search_path to 'public', 'storage'
|
|
||||||
as $$
|
|
||||||
begin
|
|
||||||
if p_bucket is null or p_path is null then
|
|
||||||
return;
|
|
||||||
end if;
|
|
||||||
delete from storage.objects where bucket_id = p_bucket and name = p_path;
|
|
||||||
delete from storage.objects where bucket_id = p_bucket and name like p_path || '/%';
|
|
||||||
end
|
|
||||||
$$;
|
|
||||||
|
|
||||||
create or replace function public._storage_gc_sql()
|
|
||||||
returns trigger
|
|
||||||
language plpgsql security definer
|
|
||||||
set search_path to 'public', 'storage'
|
|
||||||
as $$
|
|
||||||
begin
|
|
||||||
if tg_op = 'DELETE' then
|
|
||||||
perform public._delete_storage_objects(old.bucket, old.path);
|
|
||||||
elsif tg_op = 'UPDATE' then
|
|
||||||
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
|
|
||||||
perform public._delete_storage_objects(old.bucket, old.path);
|
|
||||||
end if;
|
|
||||||
end if;
|
|
||||||
return null;
|
|
||||||
end
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- 4) Attach GC trigger to files bucket/path changes
|
|
||||||
drop trigger if exists trg_files_gc on public.files;
|
|
||||||
create trigger trg_files_gc
|
|
||||||
after delete or update of bucket, path on public.files
|
|
||||||
for each row execute function public._storage_gc_sql();
|
|
||||||
|
|
||||||
-- 5) Document artefacts GC: remove artefact objects from storage when rows change/delete
|
|
||||||
create or replace function public._artefact_gc_sql()
|
|
||||||
returns trigger
|
|
||||||
language plpgsql security definer
|
|
||||||
set search_path to 'public', 'storage'
|
|
||||||
as $$
|
|
||||||
declare
|
|
||||||
v_bucket text;
|
|
||||||
begin
|
|
||||||
if tg_op = 'DELETE' then
|
|
||||||
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
|
|
||||||
perform public._delete_storage_objects(v_bucket, old.rel_path);
|
|
||||||
return old;
|
|
||||||
elsif tg_op = 'UPDATE' then
|
|
||||||
if (old.rel_path is distinct from new.rel_path) or (old.file_id is distinct from new.file_id) then
|
|
||||||
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
|
|
||||||
perform public._delete_storage_objects(v_bucket, old.rel_path);
|
|
||||||
end if;
|
|
||||||
return new;
|
|
||||||
end if;
|
|
||||||
end
|
|
||||||
$$;
|
|
||||||
|
|
||||||
drop trigger if exists trg_document_artefacts_gc on public.document_artefacts;
|
|
||||||
create trigger trg_document_artefacts_gc
|
|
||||||
before delete or update of file_id, rel_path on public.document_artefacts
|
|
||||||
for each row execute function public._artefact_gc_sql();
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,84 +0,0 @@
|
|||||||
-- Enable RLS and define policies for filesystem tables
|
|
||||||
|
|
||||||
-- 1) Enable RLS
|
|
||||||
alter table if exists public.file_cabinets enable row level security;
|
|
||||||
alter table if exists public.files enable row level security;
|
|
||||||
alter table if exists public.brain_files enable row level security;
|
|
||||||
alter table if exists public.document_artefacts enable row level security;
|
|
||||||
|
|
||||||
drop policy if exists "User can access own cabinets" on public.file_cabinets;
|
|
||||||
create policy "User can access own cabinets" on public.file_cabinets
|
|
||||||
using (user_id = auth.uid())
|
|
||||||
with check (user_id = auth.uid());
|
|
||||||
|
|
||||||
drop policy if exists "User can access files in own cabinet" on public.files;
|
|
||||||
create policy "User can access files in own cabinet" on public.files
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
))
|
|
||||||
with check (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "User can insert files into own cabinet" on public.files;
|
|
||||||
create policy "User can insert files into own cabinet" on public.files for insert to authenticated
|
|
||||||
with check (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "User can update files in own cabinet" on public.files;
|
|
||||||
create policy "User can update files in own cabinet" on public.files for update to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
))
|
|
||||||
with check (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "User can delete files from own cabinet" on public.files;
|
|
||||||
create policy "User can delete files from own cabinet" on public.files for delete
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.file_cabinets c
|
|
||||||
where c.id = files.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
-- 4) Brain-files: allow linking owned files to owned brains
|
|
||||||
drop policy if exists "User can link files they own to their brains" on public.brain_files;
|
|
||||||
create policy "User can link files they own to their brains" on public.brain_files
|
|
||||||
using (
|
|
||||||
exists (select 1 from public.brains b where b.id = brain_files.brain_id and b.user_id = auth.uid())
|
|
||||||
and exists (
|
|
||||||
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
|
|
||||||
where f.id = brain_files.file_id and c.user_id = auth.uid()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
with check (true);
|
|
||||||
|
|
||||||
-- 5) Document artefacts: allow reads to owners via file cabinet, writes via service_role
|
|
||||||
drop policy if exists "artefacts_read_by_owner" on public.document_artefacts;
|
|
||||||
create policy "artefacts_read_by_owner" on public.document_artefacts for select to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
|
|
||||||
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "artefacts_rw_service" on public.document_artefacts;
|
|
||||||
create policy "artefacts_rw_service" on public.document_artefacts to service_role
|
|
||||||
using (true) with check (true);
|
|
||||||
|
|
||||||
-- Allow owners to delete their artefacts (needed for cascades under RLS)
|
|
||||||
drop policy if exists "artefacts_delete_by_owner" on public.document_artefacts;
|
|
||||||
create policy "artefacts_delete_by_owner" on public.document_artefacts for delete to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
|
|
||||||
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
-- File vectors RLS and policies are defined in 67-vectors.sql after the table is created
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,79 +0,0 @@
|
|||||||
-- Vectors: file_vectors table and similarity search function
|
|
||||||
|
|
||||||
-- 1) Ensure pgvector extension is available
|
|
||||||
create extension if not exists vector;
|
|
||||||
|
|
||||||
-- 2) File vectors table
|
|
||||||
create table if not exists public.file_vectors (
|
|
||||||
id bigint generated by default as identity primary key,
|
|
||||||
created_at timestamp with time zone default now() not null,
|
|
||||||
embedding public.vector,
|
|
||||||
metadata jsonb,
|
|
||||||
content text
|
|
||||||
);
|
|
||||||
|
|
||||||
-- 3) ANN index (skipped until embedding dimension is fixed)
|
|
||||||
-- To enable: set column type to public.vector(<dim>) and uncomment:
|
|
||||||
-- create index if not exists file_vectors_embedding_idx
|
|
||||||
-- on public.file_vectors using ivfflat (embedding public.vector_cosine_ops)
|
|
||||||
-- with (lists='100');
|
|
||||||
|
|
||||||
-- 3b) Enable RLS and set policies (moved here to avoid ordering issues)
|
|
||||||
alter table if exists public.file_vectors enable row level security;
|
|
||||||
|
|
||||||
drop policy if exists "vectors_read_by_owner" on public.file_vectors;
|
|
||||||
create policy "vectors_read_by_owner" on public.file_vectors for select to authenticated
|
|
||||||
using (coalesce((metadata->>'file_id')::uuid, null) is null or exists (
|
|
||||||
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
|
|
||||||
where f.id = (metadata->>'file_id')::uuid and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "vectors_rw_service" on public.file_vectors;
|
|
||||||
create policy "vectors_rw_service" on public.file_vectors to service_role
|
|
||||||
using (true) with check (true);
|
|
||||||
|
|
||||||
-- 4) Match function mirrored from neoFS (generic metadata mapping)
|
|
||||||
create or replace function public.match_file_vectors(
|
|
||||||
filter jsonb,
|
|
||||||
match_count integer,
|
|
||||||
query_embedding public.vector
|
|
||||||
)
|
|
||||||
returns table (
|
|
||||||
id bigint,
|
|
||||||
file_id uuid,
|
|
||||||
cabinet_id uuid,
|
|
||||||
artefact_type text,
|
|
||||||
artefact_is text,
|
|
||||||
original_path_prefix text,
|
|
||||||
original_filename text,
|
|
||||||
content text,
|
|
||||||
metadata jsonb,
|
|
||||||
similarity double precision
|
|
||||||
)
|
|
||||||
language sql stable as $$
|
|
||||||
select
|
|
||||||
fv.id,
|
|
||||||
nullif(fv.metadata->>'file_id','')::uuid as file_id,
|
|
||||||
nullif(fv.metadata->>'cabinet_id','')::uuid as cabinet_id,
|
|
||||||
nullif(fv.metadata->>'artefact_type','') as artefact_type,
|
|
||||||
nullif(fv.metadata->>'artefact_is','') as artefact_is,
|
|
||||||
nullif(fv.metadata->>'original_path_prefix','') as original_path_prefix,
|
|
||||||
nullif(fv.metadata->>'original_filename','') as original_filename,
|
|
||||||
fv.content,
|
|
||||||
fv.metadata,
|
|
||||||
1 - (fv.embedding <=> query_embedding) as similarity
|
|
||||||
from public.file_vectors fv
|
|
||||||
where
|
|
||||||
(coalesce(filter ? 'file_id', false) = false or (fv.metadata->>'file_id')::uuid = (filter->>'file_id')::uuid)
|
|
||||||
and (coalesce(filter ? 'cabinet_id', false) = false or (fv.metadata->>'cabinet_id')::uuid = (filter->>'cabinet_id')::uuid)
|
|
||||||
and (coalesce(filter ? 'artefact_type', false) = false or (fv.metadata->>'artefact_type') = (filter->>'artefact_type'))
|
|
||||||
and (coalesce(filter ? 'artefact_id', false) = false or (fv.metadata->>'artefact_id') = (filter->>'artefact_id'))
|
|
||||||
and (coalesce(filter ? 'original_path_prefix', false) = false or (fv.metadata->>'original_path_prefix') like (filter->>'original_path_prefix') || '%')
|
|
||||||
and (coalesce(filter ? 'original_path_prefix_ilike', false)= false or (fv.metadata->>'original_path_prefix') ilike (filter->>'original_path_prefix_ilike') || '%')
|
|
||||||
and (coalesce(filter ? 'original_filename', false) = false or (fv.metadata->>'original_filename') = (filter->>'original_filename'))
|
|
||||||
and (coalesce(filter ? 'original_filename_ilike', false)= false or (fv.metadata->>'original_filename') ilike (filter->>'original_filename_ilike'))
|
|
||||||
order by fv.embedding <=> query_embedding
|
|
||||||
limit greatest(coalesce(match_count, 10), 1)
|
|
||||||
$$;
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,73 +0,0 @@
|
|||||||
-- Cabinet memberships for sharing access
|
|
||||||
|
|
||||||
create table if not exists public.cabinet_memberships (
|
|
||||||
id uuid default uuid_generate_v4() primary key,
|
|
||||||
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
|
|
||||||
profile_id uuid not null references public.profiles(id) on delete cascade,
|
|
||||||
role text not null check (role in ('owner','editor','viewer')),
|
|
||||||
created_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
updated_at timestamp with time zone default timezone('utc'::text, now()),
|
|
||||||
unique(cabinet_id, profile_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
create index if not exists idx_cabinet_memberships_cabinet on public.cabinet_memberships(cabinet_id);
|
|
||||||
create index if not exists idx_cabinet_memberships_profile on public.cabinet_memberships(profile_id);
|
|
||||||
|
|
||||||
-- Updated at trigger
|
|
||||||
drop trigger if exists trg_cabinet_memberships_updated_at on public.cabinet_memberships;
|
|
||||||
create trigger trg_cabinet_memberships_updated_at
|
|
||||||
before update on public.cabinet_memberships
|
|
||||||
for each row execute function public.handle_updated_at();
|
|
||||||
|
|
||||||
-- RLS and policies
|
|
||||||
alter table if exists public.cabinet_memberships enable row level security;
|
|
||||||
|
|
||||||
-- Members can select their own memberships; cabinet owners can also see memberships
|
|
||||||
drop policy if exists cm_read_self_or_owner on public.cabinet_memberships;
|
|
||||||
create policy cm_read_self_or_owner on public.cabinet_memberships for select to authenticated
|
|
||||||
using (
|
|
||||||
profile_id = auth.uid() or exists (
|
|
||||||
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Cabinet owners can insert memberships
|
|
||||||
drop policy if exists cm_insert_by_owner on public.cabinet_memberships;
|
|
||||||
create policy cm_insert_by_owner on public.cabinet_memberships for insert to authenticated
|
|
||||||
with check (exists (
|
|
||||||
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
-- Cabinet owners can update memberships (e.g., role)
|
|
||||||
drop policy if exists cm_update_by_owner on public.cabinet_memberships;
|
|
||||||
create policy cm_update_by_owner on public.cabinet_memberships for update to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
|
|
||||||
))
|
|
||||||
with check (exists (
|
|
||||||
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
-- Cabinet owners can delete memberships
|
|
||||||
drop policy if exists cm_delete_by_owner on public.cabinet_memberships;
|
|
||||||
create policy cm_delete_by_owner on public.cabinet_memberships for delete to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
-- Extend access to cabinets/files for members (after table exists)
|
|
||||||
drop policy if exists "User can access cabinets via membership" on public.file_cabinets;
|
|
||||||
create policy "User can access cabinets via membership" on public.file_cabinets for select to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.cabinet_memberships m
|
|
||||||
where m.cabinet_id = file_cabinets.id and m.profile_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
drop policy if exists "User can access files via membership" on public.files;
|
|
||||||
create policy "User can access files via membership" on public.files for select to authenticated
|
|
||||||
using (exists (
|
|
||||||
select 1 from public.cabinet_memberships m
|
|
||||||
where m.cabinet_id = files.cabinet_id and m.profile_id = auth.uid()
|
|
||||||
));
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
-- Ensure storage objects for all artefacts are removed when a file is deleted
|
|
||||||
-- by deleting the entire "cabinet_id/file_id" directory prefix in Storage.
|
|
||||||
|
|
||||||
-- Helper to delete all objects under a prefix
|
|
||||||
create or replace function public._delete_storage_prefix(p_bucket text, p_prefix text)
|
|
||||||
returns void
|
|
||||||
language plpgsql security definer
|
|
||||||
set search_path to 'public', 'storage'
|
|
||||||
as $$
|
|
||||||
begin
|
|
||||||
if p_bucket is null or p_prefix is null then
|
|
||||||
return;
|
|
||||||
end if;
|
|
||||||
-- Delete any objects whose name starts with the prefix + '/'
|
|
||||||
delete from storage.objects where bucket_id = p_bucket and name like p_prefix || '/%';
|
|
||||||
-- In case an object exists exactly at the prefix (rare but safe)
|
|
||||||
delete from storage.objects where bucket_id = p_bucket and name = p_prefix;
|
|
||||||
end
|
|
||||||
$$;
|
|
||||||
|
|
||||||
-- Update file-level GC to also delete the parent directory prefix (cabinet_id/file_id)
|
|
||||||
create or replace function public._storage_gc_sql()
|
|
||||||
returns trigger
|
|
||||||
language plpgsql security definer
|
|
||||||
set search_path to 'public', 'storage'
|
|
||||||
as $$
|
|
||||||
declare
|
|
||||||
v_prefix text;
|
|
||||||
begin
|
|
||||||
-- Derive directory prefix from the file path by removing the last segment (filename)
|
|
||||||
-- Example: 'cabinet_id/file_id/filename.ext' -> 'cabinet_id/file_id'
|
|
||||||
v_prefix := regexp_replace(old.path, '/[^/]+$', '');
|
|
||||||
|
|
||||||
if tg_op = 'DELETE' then
|
|
||||||
-- Delete the original object and any artefacts under the file's directory
|
|
||||||
perform public._delete_storage_objects(old.bucket, old.path);
|
|
||||||
perform public._delete_storage_prefix(old.bucket, v_prefix);
|
|
||||||
elsif tg_op = 'UPDATE' then
|
|
||||||
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
|
|
||||||
perform public._delete_storage_objects(old.bucket, old.path);
|
|
||||||
perform public._delete_storage_prefix(old.bucket, v_prefix);
|
|
||||||
end if;
|
|
||||||
end if;
|
|
||||||
return null;
|
|
||||||
end
|
|
||||||
$$;
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,41 +0,0 @@
|
|||||||
-- Add directory support to files table
|
|
||||||
-- Migration: Add directory support for folder uploads
|
|
||||||
|
|
||||||
-- Add new columns to files table
|
|
||||||
ALTER TABLE files
|
|
||||||
ADD COLUMN IF NOT EXISTS is_directory BOOLEAN DEFAULT FALSE,
|
|
||||||
ADD COLUMN IF NOT EXISTS parent_directory_id UUID REFERENCES files(id) ON DELETE CASCADE,
|
|
||||||
ADD COLUMN IF NOT EXISTS relative_path TEXT,
|
|
||||||
ADD COLUMN IF NOT EXISTS directory_manifest JSONB,
|
|
||||||
ADD COLUMN IF NOT EXISTS upload_session_id UUID,
|
|
||||||
ADD COLUMN IF NOT EXISTS processing_status TEXT DEFAULT 'uploaded' CHECK (processing_status IN ('uploaded', 'processing', 'completed', 'failed', 'queued'));
|
|
||||||
|
|
||||||
-- Create index for efficient directory queries
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_files_parent_directory ON files(parent_directory_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_files_upload_session ON files(upload_session_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_files_processing_status ON files(processing_status);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_files_is_directory ON files(is_directory);
|
|
||||||
|
|
||||||
-- Create directory manifest structure
|
|
||||||
COMMENT ON COLUMN files.is_directory IS 'True if this record represents a directory/folder';
|
|
||||||
COMMENT ON COLUMN files.parent_directory_id IS 'ID of parent directory if this file is inside an uploaded folder';
|
|
||||||
COMMENT ON COLUMN files.relative_path IS 'Relative path within the uploaded directory structure';
|
|
||||||
COMMENT ON COLUMN files.directory_manifest IS 'JSON manifest of directory contents including file count, total size, structure';
|
|
||||||
COMMENT ON COLUMN files.upload_session_id IS 'Groups files uploaded together in a single directory upload session';
|
|
||||||
COMMENT ON COLUMN files.processing_status IS 'Simple status tracking without auto-processing';
|
|
||||||
|
|
||||||
-- Example directory_manifest structure:
|
|
||||||
-- {
|
|
||||||
-- "total_files": 15,
|
|
||||||
-- "total_size_bytes": 12345678,
|
|
||||||
-- "directory_structure": {
|
|
||||||
-- "documents/": {
|
|
||||||
-- "file1.pdf": {"size": 123456, "mime_type": "application/pdf"},
|
|
||||||
-- "subdirectory/": {
|
|
||||||
-- "file2.docx": {"size": 234567, "mime_type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document"}
|
|
||||||
-- }
|
|
||||||
-- }
|
|
||||||
-- },
|
|
||||||
-- "upload_timestamp": "2024-09-23T12:00:00Z",
|
|
||||||
-- "upload_method": "directory_picker"
|
|
||||||
-- }
|
|
||||||
3
volumes/db/_supabase.sql
Normal file
3
volumes/db/_supabase.sql
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
\set pguser `echo "$POSTGRES_USER"`
|
||||||
|
|
||||||
|
CREATE DATABASE _supabase WITH OWNER :pguser;
|
||||||
0
volumes/db/init/data.sql
Normal file
0
volumes/db/init/data.sql
Normal file
6
volumes/db/logs.sql
Normal file
6
volumes/db/logs.sql
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
\set pguser `echo "$POSTGRES_USER"`
|
||||||
|
|
||||||
|
\c _supabase
|
||||||
|
create schema if not exists _analytics;
|
||||||
|
alter schema _analytics owner to :pguser;
|
||||||
|
\c postgres
|
||||||
6
volumes/db/pooler.sql
Normal file
6
volumes/db/pooler.sql
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
\set pguser `echo "$POSTGRES_USER"`
|
||||||
|
|
||||||
|
\c _supabase
|
||||||
|
create schema if not exists _supavisor;
|
||||||
|
alter schema _supavisor owner to :pguser;
|
||||||
|
\c postgres
|
||||||
4
volumes/db/realtime.sql
Normal file
4
volumes/db/realtime.sql
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
\set pguser `echo "$POSTGRES_USER"`
|
||||||
|
|
||||||
|
create schema if not exists _realtime;
|
||||||
|
alter schema _realtime owner to :pguser;
|
||||||
@ -1,7 +1,6 @@
|
|||||||
-- NOTE: change to your own passwords for production environments
|
-- NOTE: change to your own passwords for production environments
|
||||||
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
||||||
|
|
||||||
ALTER USER supabase_admin WITH PASSWORD :'pgpass';
|
|
||||||
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
||||||
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
||||||
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
||||||
208
volumes/db/webhooks.sql
Normal file
208
volumes/db/webhooks.sql
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
BEGIN;
|
||||||
|
-- Create pg_net extension
|
||||||
|
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
||||||
|
-- Create supabase_functions schema
|
||||||
|
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
|
||||||
|
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
||||||
|
-- supabase_functions.migrations definition
|
||||||
|
CREATE TABLE supabase_functions.migrations (
|
||||||
|
version text PRIMARY KEY,
|
||||||
|
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
-- Initial supabase_functions migration
|
||||||
|
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
||||||
|
-- supabase_functions.hooks definition
|
||||||
|
CREATE TABLE supabase_functions.hooks (
|
||||||
|
id bigserial PRIMARY KEY,
|
||||||
|
hook_table_id integer NOT NULL,
|
||||||
|
hook_name text NOT NULL,
|
||||||
|
created_at timestamptz NOT NULL DEFAULT NOW(),
|
||||||
|
request_id bigint
|
||||||
|
);
|
||||||
|
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
||||||
|
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
||||||
|
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
|
||||||
|
CREATE FUNCTION supabase_functions.http_request()
|
||||||
|
RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $function$
|
||||||
|
DECLARE
|
||||||
|
request_id bigint;
|
||||||
|
payload jsonb;
|
||||||
|
url text := TG_ARGV[0]::text;
|
||||||
|
method text := TG_ARGV[1]::text;
|
||||||
|
headers jsonb DEFAULT '{}'::jsonb;
|
||||||
|
params jsonb DEFAULT '{}'::jsonb;
|
||||||
|
timeout_ms integer DEFAULT 1000;
|
||||||
|
BEGIN
|
||||||
|
IF url IS NULL OR url = 'null' THEN
|
||||||
|
RAISE EXCEPTION 'url argument is missing';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF method IS NULL OR method = 'null' THEN
|
||||||
|
RAISE EXCEPTION 'method argument is missing';
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
||||||
|
headers = '{"Content-Type": "application/json"}'::jsonb;
|
||||||
|
ELSE
|
||||||
|
headers = TG_ARGV[2]::jsonb;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
||||||
|
params = '{}'::jsonb;
|
||||||
|
ELSE
|
||||||
|
params = TG_ARGV[3]::jsonb;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
||||||
|
timeout_ms = 1000;
|
||||||
|
ELSE
|
||||||
|
timeout_ms = TG_ARGV[4]::integer;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
CASE
|
||||||
|
WHEN method = 'GET' THEN
|
||||||
|
SELECT http_get INTO request_id FROM net.http_get(
|
||||||
|
url,
|
||||||
|
params,
|
||||||
|
headers,
|
||||||
|
timeout_ms
|
||||||
|
);
|
||||||
|
WHEN method = 'POST' THEN
|
||||||
|
payload = jsonb_build_object(
|
||||||
|
'old_record', OLD,
|
||||||
|
'record', NEW,
|
||||||
|
'type', TG_OP,
|
||||||
|
'table', TG_TABLE_NAME,
|
||||||
|
'schema', TG_TABLE_SCHEMA
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT http_post INTO request_id FROM net.http_post(
|
||||||
|
url,
|
||||||
|
payload,
|
||||||
|
params,
|
||||||
|
headers,
|
||||||
|
timeout_ms
|
||||||
|
);
|
||||||
|
ELSE
|
||||||
|
RAISE EXCEPTION 'method argument % is invalid', method;
|
||||||
|
END CASE;
|
||||||
|
|
||||||
|
INSERT INTO supabase_functions.hooks
|
||||||
|
(hook_table_id, hook_name, request_id)
|
||||||
|
VALUES
|
||||||
|
(TG_RELID, TG_NAME, request_id);
|
||||||
|
|
||||||
|
RETURN NEW;
|
||||||
|
END
|
||||||
|
$function$;
|
||||||
|
-- Supabase super admin
|
||||||
|
DO
|
||||||
|
$$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_roles
|
||||||
|
WHERE rolname = 'supabase_functions_admin'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
||||||
|
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||||
|
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||||
|
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
|
||||||
|
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
|
||||||
|
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
|
||||||
|
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
|
||||||
|
GRANT supabase_functions_admin TO postgres;
|
||||||
|
-- Remove unused supabase_pg_net_admin role
|
||||||
|
DO
|
||||||
|
$$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_roles
|
||||||
|
WHERE rolname = 'supabase_pg_net_admin'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
|
||||||
|
DROP OWNED BY supabase_pg_net_admin;
|
||||||
|
DROP ROLE supabase_pg_net_admin;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
-- pg_net grants when extension is already enabled
|
||||||
|
DO
|
||||||
|
$$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_extension
|
||||||
|
WHERE extname = 'pg_net'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
-- Event trigger for pg_net
|
||||||
|
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||||
|
RETURNS event_trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_event_trigger_ddl_commands() AS ev
|
||||||
|
JOIN pg_extension AS ext
|
||||||
|
ON ev.objid = ext.oid
|
||||||
|
WHERE ext.extname = 'pg_net'
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||||
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||||
|
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
||||||
|
DO
|
||||||
|
$$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_event_trigger
|
||||||
|
WHERE evtname = 'issue_pg_net_access'
|
||||||
|
) THEN
|
||||||
|
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
|
||||||
|
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
|
||||||
|
ALTER function supabase_functions.http_request() SECURITY DEFINER;
|
||||||
|
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
|
||||||
|
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
|
||||||
|
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
|
||||||
|
COMMIT;
|
||||||
Loading…
x
Reference in New Issue
Block a user