Compare commits

..

10 Commits
master ... main

Author SHA1 Message Date
Classroom Copilot Dev
e0d2c5c619 Merge branch 'main' of https://git.kevlarai.com/ClassroomCopilot/supabase 2026-02-23 21:17:32 +00:00
Classroom Copilot Dev
5573b8fede chore: add .bak to gitignore and remove .env.local 2026-02-23 21:16:50 +00:00
942d1f59e9 Delete .env.local 2026-02-23 21:00:55 +00:00
c85f4b4484 fixed volume mount names in docker-compose.yml, fixed naming consistence in 70-add-directory-support.sql 2026-02-23 17:42:54 +00:00
3b24453bb0 cc changes back 2026-02-22 21:54:19 +00:00
dde8450e7e reset 2026-02-22 21:36:47 +00:00
31ecf136f7 feat: reorganize supabase config - flat db init structure, add edge functions, mcp, kong api config 2026-02-22 00:31:50 +00:00
95af17c02d feat: migrate to docker-compose with selfhosted-supabase-mcp
- Replace legacy directory structure (api/, db/, functions/, logs/, pooler/) with
  single docker-compose.yml based self-hosted setup
- Add selfhosted-supabase-mcp TypeScript MCP server for database management
- Add .dockerignore for Docker build context
- Update .gitignore to exclude .env files, volumes/, backups, logs
2026-02-21 19:32:57 +00:00
a7cca50f17 chore: update cli-latest and core schema migration 2026-02-21 16:42:06 +00:00
1941a2626d latest 2025-11-14 14:46:49 +00:00
125 changed files with 15005 additions and 1846 deletions

View File

@ -7,7 +7,7 @@ project_id = "ClassroomCopilot"
[api] [api]
enabled = true enabled = true
# Port to use for the API URL. # Port to use for the API URL.
port = "env(PORT_SUPABASE_KONG_HTTP)" port = 8000
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API # Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default. # endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"] schemas = ["public", "graphql_public"]
@ -23,7 +23,7 @@ enabled = false
[db] [db]
# Port to use for the local database URL. # Port to use for the local database URL.
port = "env(PORT_SUPABASE_POSTGRES)" port = 5432
# Port used by db diff command to initialize the shadow database. # Port used by db diff command to initialize the shadow database.
shadow_port = 54320 shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW # The database major version to use. This has to be the same as your remote database's. Run `SHOW
@ -43,19 +43,28 @@ default_pool_size = 20
max_client_conn = 100 max_client_conn = 100
[db.vault] [db.vault]
secret_key = "env(VAULT_ENC_KEY)" #secret_key = "mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc"
[db.migrations] [db.migrations]
# Specifies an ordered list of schema files that describe your database. # Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql" # Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = ["./db/init-scripts/*.sql", "./db/migrations/*.sql"] schema_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql"
]
[db.seed] [db.seed]
# If enabled, seeds the database after migrations during a db reset. # If enabled, seeds the database after migrations during a db reset.
enabled = true enabled = true
# Specifies an ordered list of seed files to load during db reset. # Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql" # Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = ["./db/init/seed.sql"] sql_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql",
"./db/init/seed.sql"
]
[realtime] [realtime]
enabled = true enabled = true
@ -67,11 +76,11 @@ enabled = true
[studio] [studio]
enabled = true enabled = true
# Port to use for Supabase Studio. # Port to use for Supabase Studio.
port = "env(PORT_SUPABASE_STUDIO)" port = 3000
# External URL of the API server that frontend connects to. # External URL of the API server that frontend connects to.
api_url = "http://localhost" api_url = "http://localhost"
# OpenAI API Key to use for Supabase AI in the Supabase Studio. # OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "env(OPENAI_API_KEY)" openai_api_key = "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"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they # Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface. # are monitored, and you can view the emails that would have been sent from the web interface.
@ -82,8 +91,8 @@ port = 54324
# Uncomment to expose additional ports for testing user applications that send emails. # Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325 # smtp_port = 54325
# pop3_port = 54326 # pop3_port = 54326
admin_email = "env(SUPER_ADMIN_EMAIL)" admin_email = "admin@classroomcopilot.ai"
sender_name = "env(SUPER_ADMIN_NAME)" sender_name = "Super Admin"
[storage] [storage]
enabled = true enabled = true
@ -105,9 +114,9 @@ file_size_limit = "50MiB"
enabled = true enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails. # in emails.
site_url = "env(SITE_URL)" site_url = "http://localhost:8000"
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication. # A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["env(ADDITIONAL_REDIRECT_URLS)"] additional_redirect_urls = ["http://localhost", "http://127.0.0.1"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week). # How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600 jwt_expiry = 3600
# If disabled, the refresh token will never expire. # If disabled, the refresh token will never expire.
@ -286,10 +295,10 @@ backend = "postgres"
# Configures Postgres storage engine to use OrioleDB (S3) # Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = "" orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com # Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = "env(S3_HOST)" s3_host = ""
# Configures S3 bucket region, eg. us-east-1 # Configures S3 bucket region, eg. us-east-1
s3_region = "env(S3_REGION)" s3_region = ""
# Configures AWS_ACCESS_KEY_ID for S3 bucket # Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = "env(S3_ACCESS_KEY)" s3_access_key = ""
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket # Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = "env(S3_SECRET_KEY)" s3_secret_key = ""

View File

@ -0,0 +1,131 @@
BEGIN;
-- Create pg_net extension
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
-- Create supabase_functions schema
CREATE SCHEMA IF NOT EXISTS supabase_functions AUTHORIZATION postgres;
-- Grant basic permissions
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition
CREATE TABLE IF NOT EXISTS supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
-- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial') ON CONFLICT DO NOTHING;
-- supabase_functions.hooks definition
CREATE TABLE IF NOT EXISTS supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
-- Create indexes if they don't exist
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
-- Create the http_request function
CREATE OR REPLACE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{"Content-Type": "application/json"}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(
url,
params,
headers,
timeout_ms
);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD,
'record', NEW,
'type', TG_OP,
'table', TG_TABLE_NAME,
'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(
url,
payload,
params,
headers,
timeout_ms
);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks
(hook_table_id, hook_name, request_id)
VALUES
(TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$function$;
-- Set function properties
ALTER FUNCTION supabase_functions.http_request() SECURITY DEFINER;
ALTER FUNCTION supabase_functions.http_request() SET search_path = supabase_functions;
-- Grant execute permissions
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
-- Grant pg_net permissions
GRANT USAGE ON SCHEMA net TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
-- Add migration record
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants') ON CONFLICT DO NOTHING;
COMMIT;

View File

@ -0,0 +1,6 @@
-- Set JWT configuration for the database
-- These settings will be configured through environment variables in the Supabase setup
-- Note: JWT configuration is handled by Supabase's internal configuration
-- This file is kept for reference but the actual JWT settings are managed
-- through the Supabase configuration and environment variables

View File

@ -0,0 +1,11 @@
-- NOTE: change to your own passwords for production environments
-- Password configuration is handled by Supabase's internal setup
-- This file is kept for reference but the actual password settings are managed
-- through the Supabase configuration and environment variables
-- The following users are created and configured by Supabase automatically:
-- - authenticator
-- - pgbouncer
-- - supabase_auth_admin
-- - supabase_functions_admin
-- - supabase_storage_admin

View File

@ -0,0 +1,7 @@
-- Create _supabase database for internal Supabase operations
-- This database is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual database creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _supabase database is created with the postgres user as owner
-- by default during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _analytics schema for Supabase analytics
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _analytics schema is created in the _supabase database
-- with appropriate ownership during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _realtime schema for Supabase realtime functionality
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _realtime schema is created with appropriate ownership
-- during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _supavisor schema for Supabase connection pooling
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _supavisor schema is created in the _supabase database
-- with appropriate ownership during Supabase initialization

View File

@ -1,491 +0,0 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Initial schema setup for ClassConcepts
-- Dependencies: auth.users (Supabase Auth)
--[ Validation ]--
do $$
begin
-- Verify required extensions
if not exists (select 1 from pg_extension where extname = 'uuid-ossp') then
raise exception 'Required extension uuid-ossp is not installed';
end if;
-- Verify auth schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'auth') then
raise exception 'Required auth schema is not available';
end if;
-- Verify storage schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'storage') then
raise exception 'Required storage schema is not available';
end if;
end $$;
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (user_type in ('admin', 'email_teacher', 'email_student')),
username text not null unique,
display_name text,
metadata jsonb default '{}'::jsonb,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: admin, teacher, or student';
-- Institute import data
create table if not exists public.institute_imports (
id uuid primary key default uuid_generate_v4(),
urn text unique,
establishment_name text not null,
la_code text,
la_name text,
establishment_number text,
establishment_type text,
establishment_type_group text,
establishment_status text,
reason_establishment_opened text,
open_date date,
reason_establishment_closed text,
close_date date,
phase_of_education text,
statutory_low_age integer,
statutory_high_age integer,
boarders text,
nursery_provision text,
official_sixth_form text,
gender text,
religious_character text,
religious_ethos text,
diocese text,
admissions_policy text,
school_capacity integer,
special_classes text,
census_date date,
number_of_pupils integer,
number_of_boys integer,
number_of_girls integer,
percentage_fsm numeric(5,2),
trust_school_flag text,
trusts_name text,
school_sponsor_flag text,
school_sponsors_name text,
federation_flag text,
federations_name text,
ukprn text,
fehe_identifier text,
further_education_type text,
ofsted_last_inspection date,
last_changed_date date,
street text,
locality text,
address3 text,
town text,
county text,
postcode text,
school_website text,
telephone_num text,
head_title text,
head_first_name text,
head_last_name text,
head_preferred_job_title text,
gssla_code text,
parliamentary_constituency text,
urban_rural text,
rsc_region text,
country text,
uprn text,
sen_stat boolean,
sen_no_stat boolean,
sen_unit_on_roll integer,
sen_unit_capacity integer,
resourced_provision_on_roll integer,
resourced_provision_capacity integer,
metadata jsonb default '{}'::jsonb,
imported_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_imports is 'Raw institute data imported from external sources';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
import_id uuid references public.institute_imports(id),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
neo4j_unique_id text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
--[ 6. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('admin', 'teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 7. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 8. Auth Functions ]--
-- Create a secure function to check admin status
create or replace function auth.is_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create a secure function to check super admin status
create or replace function auth.is_super_admin()
returns boolean as $$
select coalesce(
(select role = 'supabase_admin'
from auth.users
where id = auth.uid()),
false
);
$$ language sql security definer;
-- Create public wrappers for the auth functions
create or replace function public.is_admin()
returns boolean as $$
select auth.is_admin();
$$ language sql security definer;
create or replace function public.is_super_admin()
returns boolean as $$
select auth.is_super_admin();
$$ language sql security definer;
-- Grant execute permissions
grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated;
grant execute on function auth.is_admin to authenticated;
grant execute on function auth.is_super_admin to authenticated;
-- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
declare
result json;
begin
-- Only allow this to run as service role or supabase_admin
if not (
current_user = 'service_role'
or exists (
select 1 from pg_roles
where rolname = current_user
and rolsuper
)
) then
raise exception 'Must be run as service_role or superuser';
end if;
-- Update user_type and username for admin
update public.profiles
set user_type = 'admin',
username = coalesce(username, 'superadmin'),
display_name = coalesce(display_name, 'Super Admin')
where email = admin_email
returning json_build_object(
'id', id,
'email', email,
'user_type', user_type,
'username', username,
'display_name', display_name
) into result;
if result is null then
raise exception 'Admin user with email % not found', admin_email;
end if;
return result;
end;
$$;
-- Grant execute permissions
revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role, supabase_admin;
-- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
begin
return public.setup_initial_admin(admin_email);
end;
$$;
-- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role, supabase_admin;
--[ 9. Utility Functions ]--
-- Check if database is ready
create or replace function check_db_ready()
returns boolean
language plpgsql
security definer
as $$
begin
-- Check if essential schemas exist
if not exists (
select 1
from information_schema.schemata
where schema_name in ('auth', 'storage', 'public')
) then
return false;
end if;
-- Check if essential tables exist
if not exists (
select 1
from information_schema.tables
where table_schema = 'auth'
and table_name = 'users'
) then
return false;
end if;
-- Check if RLS is enabled on public.profiles
if not exists (
select 1
from pg_tables
where schemaname = 'public'
and tablename = 'profiles'
and rowsecurity = true
) then
return false;
end if;
return true;
end;
$$;
-- Grant execute permission
grant execute on function check_db_ready to anon, authenticated, service_role;
-- Function to handle new user registration
create or replace function public.handle_new_user()
returns trigger
language plpgsql
security definer set search_path = public
as $$
declare
default_user_type text := 'email_student';
default_username text;
begin
-- Generate username from email
default_username := split_part(new.email, '@', 1);
insert into public.profiles (
id,
email,
user_type,
username,
display_name
)
values (
new.id,
new.email,
coalesce(new.raw_user_meta_data->>'user_type', default_user_type),
coalesce(new.raw_user_meta_data->>'username', default_username),
coalesce(new.raw_user_meta_data->>'display_name', default_username)
);
return new;
end;
$$;
-- Trigger for new user creation
drop trigger if exists on_auth_user_created on auth.users;
create trigger on_auth_user_created
after insert on auth.users
for each row execute procedure public.handle_new_user();
--[ 10. Security Setup ]--
-- Enable RLS
alter table if exists public.profiles enable row level security;
alter table if exists public.institute_imports enable row level security;
alter table if exists public.institutes enable row level security;
alter table if exists public.institute_memberships enable row level security;
alter table if exists public.institute_membership_requests enable row level security;
alter table if exists public.audit_logs enable row level security;
-- First, ensure proper schema access
grant usage on schema public to anon, authenticated;
-- First, drop existing policies
drop policy if exists "Users can read and update own profile" on public.profiles;
drop policy if exists "Users can update their profile during registration" on public.profiles;
-- Create updated policies
create policy "Users can read own profile"
on public.profiles for select
to authenticated
using (auth.uid() = id);
create policy "Users can update own profile"
on public.profiles for update
to authenticated
using (auth.uid() = id)
with check (auth.uid() = id);
create policy "Public can read basic profile info"
on public.profiles for select
to anon, authenticated
using (
user_type in ('email_teacher', 'email_student')
);
create policy "Super admins have full access"
on public.profiles for all
using (auth.is_super_admin());
create policy "Admins can read all profiles"
on public.profiles for select
using (auth.is_admin() or auth.is_super_admin());
-- Grant permissions
grant select, update on public.profiles to authenticated;
grant select (id, email, user_type, display_name) on public.profiles to anon;
-- Storage bucket policies
alter table if exists storage.buckets enable row level security;
-- Allow super admin full access to buckets
create policy "Super admin has full access to buckets"
on storage.buckets for all
using (current_user = 'service_role' or current_user = 'supabase_admin' or current_user = 'authenticated');
-- Allow authenticated users to create buckets if they are the owner
create policy "Users can create their own buckets"
on storage.buckets for insert
to authenticated
with check (true); -- We'll handle ownership in the application layer
-- Allow users to view buckets they own or public buckets
create policy "Users can view their own buckets"
on storage.buckets for select
to authenticated
using (
owner::text = auth.uid()::text
);
--[ 11. Database Triggers ]--
drop trigger if exists handle_profiles_updated_at on public.profiles;
create trigger handle_profiles_updated_at
before update on public.profiles
for each row execute function public.handle_updated_at();
drop trigger if exists handle_institute_memberships_updated_at on public.institute_memberships;
create trigger handle_institute_memberships_updated_at
before update on public.institute_memberships
for each row execute function public.handle_updated_at();
drop trigger if exists handle_membership_requests_updated_at on public.institute_membership_requests;
create trigger handle_membership_requests_updated_at
before update on public.institute_membership_requests
for each row execute function public.handle_updated_at();
--[ 12. Permissions ]--
-- Grant schema access
grant usage on schema public to postgres, anon, authenticated;
-- Grant table permissions
grant all privileges on all tables in schema public to postgres;
grant select, insert, update on all tables in schema public to authenticated;
--[ 13. Realtime Setup ]--
-- Drop existing publication if it exists
drop publication if exists supabase_realtime;
-- Create publication (without IF NOT EXISTS)
create publication supabase_realtime;
-- Add tables to publication (these are idempotent operations)
alter publication supabase_realtime add table profiles;
alter publication supabase_realtime add table institute_imports;
alter publication supabase_realtime add table institutes;
alter publication supabase_realtime add table institute_memberships;
alter publication supabase_realtime add table institute_membership_requests;
alter publication supabase_realtime add table audit_logs;

View File

@ -0,0 +1 @@
main

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
volumes/db/data
volumes/storage
volumes/logs
.git

481
.env
View File

@ -1,481 +0,0 @@
HOST_IP=localhost
## App Information
APP_NAME=ClassroomCopilot
APP_VERSION=0.0.1
APP_DESCRIPTION="An AI copilot for learners and educators."
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=classroomcopilot.ai
APP_URL_INTERNAL=classroomcopilot.ai
APP_PROTOCOL=https
APP_WS_PROTOCOL=wss
API_EXTERNAL_URL=${APP_PROTOCOL}://supa.${APP_URL}
# KevelarAI URLs
KEVLARAI_URL=kevlarai.ai
KEVLARAI_PROTOCOL=https
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
## Hosts
HOST_OLLAMA=${HOST_IP}
## Ports
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
#############################################################
## APP CONFIGURATION
#############################################################
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
SUPABASE_URL=${APP_PROTOCOL}://supa.${APP_URL}
SUPABASE_PUBLIC_URL=${APP_PROTOCOL}://supastudio.${APP_URL}
## App domains
APP_SITE_URL=${SITE_URL}
APP_SUPABASE_URL=${SUPABASE_URL}
APP_STUDIO_URL=${SUPABASE_PUBLIC_URL}
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=""
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## App Information
APP_NAME=ClassroomCopilot
APP_VERSION=0.0.1
APP_DESCRIPTION="An AI copilot for learners and educators."
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=classroomcopilot.ai
APP_URL_INTERNAL=classroomcopilot.internal
APP_PROTOCOL=https
APP_WS_PROTOCOL=wss
# KevelarAI URLs
KEVLARAI_URL=kevlarai.ai
KEVLARAI_PROTOCOL=https
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
## Runtime settings
PROJECT_DIR=/Users/kcar/dev/ClassroomCopilot
BUILD_OS=macos
NGINX_MODE=prod
DEV_MODE=false
HOST_IP=localhost
BACKEND_DEV_MODE=false
STRICT_MODE=false
SUPER_ADMIN_CHECK=true
INIT_SUPER_ADMIN=false
## Docker compose environment variables
COMPOSE_PROJECT_NAME=classroomcopilot-${NGINX_MODE:-dev}
## Hosts
HOST_OLLAMA=${HOST_IP}
## Ports
PORT_KEYCLOAK=8080
PORT_KEYCLOAK_MANAGEMENT=9000
PORT_KEYCLOAK_SSL=8444
PORT_CC_ADMIN=5173
PORT_CC_ADMIN_DEVTOOLS=5001
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
# PORT_SOLID_CSS=3006 # not used currently in docker on by localhost solid server
PORT_SOLID_PROXY=3007
PORT_SOLID_PROXY_SSL=3008
PORT_NEO4J_BOLT=7687
PORT_NEO4J_HTTP=7474
PORT_NEO4J_HTTPS=7473
PORT_FRONTEND=3003
PORT_FRONTEND_SSL=3033
PORT_MARKETING_SITE=3004
PORT_MARKETING_SITE_SSL=3044
PORT_BACKEND=8880
PORT_BACKEND_SSL=8088
PORT_TLDRAW_SYNC=5002
PORT_WHISPERLIVE=5050
PORT_WHISPERLIVE_SSL=5053
PORT_TEXT_GENERATION=7861
PORT_TEXT_GENERATION_API=5010
PORT_STABLE_DIFFUSION=7860
PORT_STABLE_DIFFUSION_API=5011
PORT_OLLAMA=11434
PORT_OPEN_WEBUI=3333
PORT_OPEN_WEBUI_SSL=3334
PORT_OPENWEBUI_PROXY_INTERNAL=3335
PORT_MORPHIC=3001
PORT_REDIS=6379
PORT_SEARXNG=8090
PORT_MAILHOG_SMTP=1025
PORT_MAILHOG_WEB=8025
# WhisperLive Frontend
PORT_WHISPERLIVE_FRONTEND=5054
PORT_WHISPERLIVE_FRONTEND_SSL=5055
#############################################################
## APP CONFIGURATION
#############################################################
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
SUPABASE_URL=${APP_PROTOCOL}://supa.${APP_URL}
API_EXTERNAL_URL=${APP_PROTOCOL}://supa.${APP_URL}
SUPABASE_PUBLIC_URL=${APP_PROTOCOL}://supastudio.${APP_URL}
## App domains
APP_SITE_URL=${SITE_URL}
APP_SUPABASE_URL=${SUPABASE_URL}
APP_STUDIO_URL=${SUPABASE_PUBLIC_URL}
APP_API_URL=${APP_PROTOCOL}://api.${APP_URL}
APP_GRAPH_URL=${APP_PROTOCOL}://graph.${APP_URL}
APP_BOLT_URL=bolt://neo4j:${PORT_NEO4J_BOLT}
CC_ADMIN_URL=${APP_PROTOCOL}://admin.${APP_URL}
APP_ADMIN_API_URL=${APP_PROTOCOL}://admin-api.${APP_URL}
## Vite environment variables
VITE_APP_URL=app.${APP_URL}
#############################################################
## OAUTH2 PROXY CONFIGURATION
#############################################################
KEYCLOAK_SECRET_OPENWEBUI=XbKriIGb1YRSKmALfoKodpyJaQQOtP4U
KEYCLOAK_SECRET_ADMIN=""
COOKIE_SECRET_OPENWEBUI=QAm4ImW8ieeEftQgRly5guVYqHzcU/m+to5k5sHqfF8=
COOKIE_SECRET_ADMIN=yDaNr1DwYqRykdoeW+mS/Ari5pWs8m4YPQJsjIt2xYQ=
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=""
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## Supabase Analytics Configuration (Google Cloud)
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
#############################################################
## OTHER SERVICES CONFIGURATION
#############################################################
# Neo4j Settings
USER_NEO4J=neo4j
PASSWORD_NEO4J=password
NEO4J_AUTH=${USER_NEO4J}/${PASSWORD_NEO4J}
## Keycloak Configuration
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
KEYCLOAK_DB_USER=keycloak
KEYCLOAK_DB_PASSWORD=keycloak
KEYCLOAK_DB_DATABASE=keycloak
KEYCLOAK_PORT=${PORT_KEYCLOAK}
KEYCLOAK_MANAGEMENT_PORT=${PORT_KEYCLOAK_MANAGEMENT}
KEYCLOAK_SSL_PORT=${PORT_KEYCLOAK_SSL}
KEYCLOAK_IMAGE=quay.io/keycloak/keycloak:24.0.1
KEYCLOAK_REALM=classroomcopilot
KEYCLOAK_CLIENT_ID=frontend-app
KEYCLOAK_CLIENT_SECRET=your-super-secret-and-long-keycloak-client-secret
KEYCLOAK_URL=${KEVLARAI_PROTOCOL}://keycloak.${KEVLARAI_URL}
KEYCLOAK_ADMIN_URL=${KEVLARAI_PROTOCOL}://keycloak-admin.${KEVLARAI_URL}
KEYCLOAK_INTERNAL_URL=http://keycloak:8080
## Backend
UVICORN_WORKERS=2
CORS_SITE_URL=${APP_URL}
NODE_FILESYSTEM_PATH=/node_filesystem
BACKEND_INIT_PATH=/init
LOG_PATH=/logs
# Log level must be lowercase for Node.js services using Pino logger (storage, functions)
# Valid values: trace, debug, info, warn, error, fatal
LOG_LEVEL=debug
# Whisper live settings
WHISPERLIVE_SSL=false
WHISPL_USE_CUSTOM_MODEL=false
FASTERWHISPER_MODEL=faster-whisper-large-v3
WHISPERLIVE_URL=${APP_WS_PROTOCOL}://whisperlive.${APP_URL}
## SearXNG Settings
SEARXNG_URL=${APP_PROTOCOL}://search.${APP_URL}
SEARXNG_SECRET="" # generate a secret key e.g. openssl rand -base64 32
SEARXNG_PORT=${PORT_SEARXNG} # default port
SEARXNG_BIND_ADDRESS=0.0.0.0 # default address
SEARXNG_IMAGE_PROXY=true # enable image proxy
SEARXNG_LIMITER=false # can be enabled to limit the number of requests per IP address
SEARXNG_DEFAULT_DEPTH=basic # Set to 'basic' or 'advanced', only affects SearXNG searches
SEARXNG_MAX_RESULTS=50 # Maximum number of results to return from SearXNG
SEARXNG_ENGINES=google,bing,duckduckgo,wikipedia # Search engines to use
SEARXNG_TIME_RANGE=None # Time range for search results: day, week, month, year, or None (for all time)
SEARXNG_SAFESEARCH=0 # Safe search setting: 0 (off), 1 (moderate), 2 (strict)
## Morphic Settings
NEXT_PUBLIC_BASE_URL=http://morphic:3001
USE_LOCAL_REDIS=true
LOCAL_REDIS_URL=redis://redis:6379
SEARXNG_API_URL=${APP_PROTOCOL}://search.${APP_URL}
SEARCH_API=searxng # use searxng, tavily or exa
## Notion settings
NOTION_CAPTAINS_LOG_SENDER_INTERNAL_INTEGRATION_SECRET=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
## API Keys
OPENAI_API_KEY=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
LANGCHAIN_API_KEY=ls__27405da61a724d18ba4833a0b79730e0
## Other Settings
LANGCHAIN_TRACING_V2=true
LANGCHAIN_PROJECT='LangChain Perpexity Clone with human in the loop for Classroom Copilot'
USER_AGENT='cc_user_agent'
# Google API Settings
YOUTUBE_API_KEY=AIzaSyDbpJInK6dsFUjY6oG60FlzYkj7JUJmUNs
GOOGLE_CLIENT_SECRETS_FILE=Users/kcar/ClassroomCopilot/backend/app/secrets/google_cloud_yt_credentials.json

23
.gitignore vendored Normal file
View File

@ -0,0 +1,23 @@
# Environment files
.env
.env.*
!.env.example
.archive/
# Docker volume RUNTIME data (large binary/runtime files - not schema SQL)
volumes/db-data/
volumes/storage/
volumes/pooler/
volumes/logs/
# Backup files
*.bak
*.bak.*
backups/
# Logs
logs/
*.log
*.bak

View File

@ -1,25 +0,0 @@
-- Create Keycloak schema if it doesn't exist
create schema if not exists keycloak;
-- Create Keycloak user if it doesn't exist
do $$
begin
if not exists (select 1 from pg_roles where rolname = 'keycloak') then
create user keycloak with password 'keycloak';
end if;
end
$$;
-- Grant schema usage and ownership to Keycloak user
alter schema keycloak owner to keycloak;
grant usage on schema keycloak to keycloak;
-- Grant all privileges on all tables in keycloak schema to keycloak user
grant all privileges on all tables in schema keycloak to keycloak;
-- Grant all privileges on all sequences in keycloak schema to keycloak user
grant all privileges on all sequences in schema keycloak to keycloak;
-- Set default privileges for future tables and sequences
alter default privileges in schema keycloak grant all on tables to keycloak;
alter default privileges in schema keycloak grant all on sequences to keycloak;

View File

@ -1,200 +0,0 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Core schema setup for ClassConcepts
-- Dependencies: auth.users (Supabase Auth)
--[ Validation ]--
do $$
begin
-- Verify required extensions
if not exists (select 1 from pg_extension where extname = 'uuid-ossp') then
raise exception 'Required extension uuid-ossp is not installed';
end if;
-- Verify auth schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'auth') then
raise exception 'Required auth schema is not available';
end if;
-- Verify storage schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'storage') then
raise exception 'Required storage schema is not available';
end if;
end $$;
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (user_type in ('admin', 'email_teacher', 'email_student')),
username text not null unique,
full_name text,
display_name text,
metadata jsonb default '{}'::jsonb,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: admin, teacher, or student';
-- Institute import data
create table if not exists public.institute_imports (
id uuid primary key default uuid_generate_v4(),
urn text unique,
establishment_name text not null,
la_code text,
la_name text,
establishment_number text,
establishment_type text,
establishment_type_group text,
establishment_status text,
reason_establishment_opened text,
open_date date,
reason_establishment_closed text,
close_date date,
phase_of_education text,
statutory_low_age integer,
statutory_high_age integer,
boarders text,
nursery_provision text,
official_sixth_form text,
gender text,
religious_character text,
religious_ethos text,
diocese text,
admissions_policy text,
school_capacity integer,
special_classes text,
census_date date,
number_of_pupils integer,
number_of_boys integer,
number_of_girls integer,
percentage_fsm numeric(5,2),
trust_school_flag text,
trusts_name text,
school_sponsor_flag text,
school_sponsors_name text,
federation_flag text,
federations_name text,
ukprn text,
fehe_identifier text,
further_education_type text,
ofsted_last_inspection date,
last_changed_date date,
street text,
locality text,
address3 text,
town text,
county text,
postcode text,
school_website text,
telephone_num text,
head_title text,
head_first_name text,
head_last_name text,
head_preferred_job_title text,
gssla_code text,
parliamentary_constituency text,
urban_rural text,
rsc_region text,
country text,
uprn text,
sen_stat boolean,
sen_no_stat boolean,
sen_unit_on_roll integer,
sen_unit_capacity integer,
resourced_provision_on_roll integer,
resourced_provision_capacity integer,
metadata jsonb default '{}'::jsonb,
imported_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_imports is 'Raw institute data imported from external sources';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
import_id uuid references public.institute_imports(id),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
neo4j_unique_id text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
--[ 6. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('admin', 'teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 7. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';

View File

@ -1,50 +0,0 @@
-- Enable RLS on storage.buckets
alter table if exists storage.buckets enable row level security;
-- Drop existing policies if they exist
drop policy if exists "Super admin has full access to buckets" on storage.buckets;
drop policy if exists "Users can create their own buckets" on storage.buckets;
drop policy if exists "Users can view their own buckets" on storage.buckets;
-- Create new policies with proper permissions
create policy "Super admin has full access to buckets"
on storage.buckets for all
using (
current_user = 'service_role'
or current_user = 'supabase_admin'
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Allow authenticated users to create buckets
create policy "Users can create their own buckets"
on storage.buckets for insert
to authenticated
with check (
owner::text = auth.uid()::text
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Allow users to view buckets they own or public buckets
create policy "Users can view their own buckets"
on storage.buckets for select
to authenticated
using (
owner::text = auth.uid()::text
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Grant necessary permissions
grant all on storage.buckets to authenticated;
grant all on storage.objects to authenticated;

View File

@ -1,31 +0,0 @@
-- Ensure uuid-ossp extension is enabled
create extension if not exists "uuid-ossp" schema extensions;
-- Function to set up initial admin
create or replace function public.setup_initial_admin()
returns void
language plpgsql
security definer
set search_path = public, extensions
as $$
begin
-- Check if admin already exists
if exists (
select 1 from public.profiles
where user_type = 'admin'
) then
return;
end if;
-- Grant necessary permissions
grant all on all tables in schema public to authenticated;
grant all on all sequences in schema public to authenticated;
grant all on all functions in schema public to authenticated;
end;
$$;
-- Execute the function
select public.setup_initial_admin();
-- Drop the function after execution
drop function public.setup_initial_admin();

View File

@ -1,23 +0,0 @@
-- Create Keycloak user if it doesn't exist
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'keycloak') THEN
CREATE USER keycloak WITH PASSWORD 'keycloak';
END IF;
END
$$;
-- Create Keycloak schema if it doesn't exist
CREATE SCHEMA IF NOT EXISTS keycloak;
-- Grant necessary permissions
GRANT USAGE ON SCHEMA keycloak TO keycloak;
GRANT ALL ON ALL TABLES IN SCHEMA keycloak TO keycloak;
GRANT ALL ON ALL SEQUENCES IN SCHEMA keycloak TO keycloak;
-- Set default privileges for future tables
ALTER DEFAULT PRIVILEGES IN SCHEMA keycloak GRANT ALL ON TABLES TO keycloak;
ALTER DEFAULT PRIVILEGES IN SCHEMA keycloak GRANT ALL ON SEQUENCES TO keycloak;
-- Grant connect permission to the database
GRANT CONNECT ON DATABASE postgres TO keycloak;

View File

@ -1,61 +1,67 @@
# Usage
# Start: docker compose up
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
# Stop: docker compose down
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
# Reset everything: ./reset.sh
name: supabase
services: services:
# Supabase containers
studio: studio:
container_name: supabase-studio container_name: supabase-studio
image: supabase/studio:20250113-83c9420 image: supabase/studio:2026.02.16-sha-26c615c
restart: unless-stopped restart: unless-stopped
ports:
- ${STUDIO_PORT}:3000
healthcheck: healthcheck:
test: test: [ "CMD", "node", "-e", "fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})" ]
[
"CMD",
"node",
"-e",
"fetch('http://studio:3000/api/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})",
]
timeout: 10s timeout: 10s
interval: 5s interval: 5s
retries: 3 retries: 3
depends_on: depends_on:
analytics: analytics:
condition: service_healthy condition: service_healthy
ports:
- ${PORT_SUPABASE_STUDIO}:3000
env_file:
- .env
environment: environment:
STUDIO_PG_META_URL: http://meta:8080 STUDIO_PG_META_URL: http://meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DEFAULT_PROJECT_ID: "ClassroomCopilot"
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
OPENAI_API_KEY: ${OPENAI_API_KEY:-} OPENAI_API_KEY: ${OPENAI_API_KEY:-}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_URL: http://kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} AUTH_JWT_SECRET: ${JWT_SECRET}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_URL: http://analytics:4000 LOGFLARE_URL: http://analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true NEXT_PUBLIC_ENABLE_LOGS: true
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
networks: # Uncomment to use Big Query backend for analytics
- kevlarai-network # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
kong: kong:
container_name: supabase-kong container_name: supabase-kong
image: kong:2.8.1 image: kong:2.8.1
restart: unless-stopped restart: unless-stopped
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
ports: ports:
- ${KONG_HTTP_PORT}:8000/tcp - ${KONG_HTTP_PORT}:8000/tcp
- ${KONG_HTTPS_PORT}:8443/tcp - ${KONG_HTTPS_PORT}:8443/tcp
volumes:
# https://github.com/supabase/supabase/issues/12661
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on: depends_on:
analytics: analytics:
condition: service_healthy condition: service_healthy
env_file:
- .env
environment: environment:
KONG_DATABASE: "off" KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
@ -64,59 +70,51 @@ services:
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
KONG_PROXY_ACCESS_LOG: "/dev/stdout" # https://unix.stackexchange.com/a/294837
KONG_ADMIN_ACCESS_LOG: "/dev/stdout" entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
KONG_PROXY_ERROR_LOG: "/dev/stderr"
KONG_ADMIN_ERROR_LOG: "/dev/stderr"
KONG_CORS_ORIGINS: "*"
KONG_CORS_METHODS: "GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS"
KONG_CORS_HEADERS: "DNT,X-Auth-Token,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,apikey,x-client-info"
KONG_CORS_EXPOSED_HEADERS: "Content-Length,Content-Range"
KONG_CORS_MAX_AGE: 3600
volumes:
- ./api/kong.yml:/home/kong/temp.yml:ro
networks:
- kevlarai-network
auth: auth:
container_name: supabase-auth container_name: supabase-auth
image: supabase/gotrue:v2.167.0 image: supabase/gotrue:v2.186.0
depends_on: restart: unless-stopped
db:
condition: service_healthy
analytics:
condition: service_healthy
healthcheck: healthcheck:
test: test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health" ]
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health",
]
timeout: 5s timeout: 5s
interval: 5s interval: 5s
retries: 3 retries: 3
restart: unless-stopped depends_on:
env_file: db:
- .env # Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
environment: environment:
GOTRUE_API_HOST: 0.0.0.0 GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999 GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL} API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL} GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY} GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET} GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_LOG_LEVEL: ${AUTH_LOG_LEVEL}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST} GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT} GOTRUE_SMTP_PORT: ${SMTP_PORT}
@ -127,32 +125,39 @@ services:
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: ${MAILER_SECURE_EMAIL_CHANGE_ENABLED}
GOTRUE_MAILER_EXTERNAL_HOSTS: "localhost,admin.localhost,kong,supabase.classroomcopilot.ai,classroomcopilot.ai"
GOTRUE_MAILER_EXTERNAL_HOSTS_ALLOW_REGEX: ".*\\.classroomcopilot\\.ai$"
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_EXTERNAL_AZURE_ENABLED: ${AZURE_ENABLED} GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
GOTRUE_EXTERNAL_AZURE_CLIENT_ID: ${AZURE_CLIENT_ID} # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
GOTRUE_EXTERNAL_AZURE_SECRET: ${AZURE_SECRET}
GOTRUE_EXTERNAL_AZURE_REDIRECT_URI: ${AZURE_REDIRECT_URI} # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
networks: # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
- kevlarai-network # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
rest: rest:
container_name: supabase-rest container_name: supabase-rest
image: postgrest/postgrest:v12.2.0 image: postgrest/postgrest:v14.5
restart: unless-stopped
depends_on: depends_on:
db: db:
# Disable this if you are using an external Postgres database
condition: service_healthy condition: service_healthy
analytics: analytics:
condition: service_healthy condition: service_healthy
restart: unless-stopped
env_file:
- .env
environment: environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
@ -161,37 +166,24 @@ services:
PGRST_DB_USE_LEGACY_GUCS: "false" PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command: "postgrest" command: [ "postgrest" ]
networks:
- kevlarai-network
realtime: realtime:
container_name: supabase-realtime # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
image: supabase/realtime:v2.34.7 container_name: realtime-dev.supabase-realtime
image: supabase/realtime:v2.76.5
restart: unless-stopped
depends_on: depends_on:
db: db:
# Disable this if you are using an external Postgres database
condition: service_healthy condition: service_healthy
analytics: analytics:
condition: service_healthy condition: service_healthy
healthcheck: healthcheck:
test: test: [ "CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "-H", "Authorization: Bearer ${ANON_KEY}", "http://localhost:4000/api/tenants/realtime-dev/health" ]
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"-H",
"Authorization: Bearer ${ANON_KEY}",
"http://localhost:4000/api/tenants/realtime-dev/health",
]
timeout: 5s timeout: 5s
interval: 5s interval: 5s
retries: 3 retries: 3
restart: unless-stopped
env_file:
- .env
environment: environment:
PORT: 4000 PORT: 4000
DB_HOST: ${POSTGRES_HOST} DB_HOST: ${POSTGRES_HOST}
@ -199,7 +191,7 @@ services:
DB_USER: supabase_admin DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD} DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB} DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: "SET search_path TO _realtime" DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET} API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE} SECRET_KEY_BASE: ${SECRET_KEY_BASE}
@ -209,83 +201,70 @@ services:
APP_NAME: realtime APP_NAME: realtime
SEED_SELF_HOST: true SEED_SELF_HOST: true
RUN_JANITOR: true RUN_JANITOR: true
networks:
- kevlarai-network
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
storage: storage:
container_name: supabase-storage container_name: supabase-storage
image: supabase/storage-api:v1.14.5 image: supabase/storage-api:v1.37.8
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://storage:5000/status" ]
timeout: 5s
interval: 5s
retries: 3
depends_on: depends_on:
db: db:
# Disable this if you are using an external Postgres database
condition: service_healthy condition: service_healthy
rest: rest:
condition: service_started condition: service_started
imgproxy: imgproxy:
condition: service_started condition: service_started
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://storage:5000/status",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
env_file:
- .env
environment: environment:
ANON_KEY: ${ANON_KEY} ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY} SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://rest:3000 POSTGREST_URL: http://rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET} PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
FILE_SIZE_LIMIT: 52428800 FILE_SIZE_LIMIT: 2147483648
STORAGE_BACKEND: file STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub TENANT_ID: stub
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: stub REGION: stub
GLOBAL_S3_BUCKET: stub GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "true" ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:5001 IMGPROXY_URL: http://imgproxy:5001
volumes:
- ./storage:/var/lib/storage:z
networks:
- kevlarai-network
imgproxy: imgproxy:
container_name: supabase-imgproxy container_name: supabase-imgproxy
image: darthsim/imgproxy:v3.8.0 image: darthsim/imgproxy:v3.30.1
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck: healthcheck:
test: ["CMD", "imgproxy", "health"] test: [ "CMD", "imgproxy", "health" ]
timeout: 10s timeout: 5s
interval: 5s interval: 5s
retries: 10 retries: 3
env_file:
- .env
environment: environment:
IMGPROXY_BIND: ":5001" IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: / IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true" IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
networks:
- kevlarai-network
meta: meta:
container_name: supabase-meta container_name: supabase-meta
image: supabase/postgres-meta:v0.84.2 image: supabase/postgres-meta:v0.95.2
restart: unless-stopped
depends_on: depends_on:
db: db:
# Disable this if you are using an external Postgres database
condition: service_healthy condition: service_healthy
analytics: analytics:
condition: service_healthy condition: service_healthy
restart: unless-stopped
env_file:
- .env
environment: environment:
PG_META_PORT: 8080 PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST} PG_META_DB_HOST: ${POSTGRES_HOST}
@ -293,48 +272,47 @@ services:
PG_META_DB_NAME: ${POSTGRES_DB} PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
networks:
- kevlarai-network
functions: functions:
container_name: supabase-edge-functions container_name: supabase-edge-functions
image: supabase/edge-runtime:v1.67.0 image: supabase/edge-runtime:v1.70.3
restart: unless-stopped restart: unless-stopped
volumes:
- ./volumes/functions:/home/deno/functions:Z
depends_on: depends_on:
analytics: analytics:
condition: service_healthy condition: service_healthy
env_file:
- .env
environment: environment:
JWT_SECRET: ${JWT_SECRET} JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: ${SUPABASE_URL} SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
volumes: command: [ "start", "--main-service", "/home/deno/functions/main" ]
- ./functions:/home/deno/functions:Z
command:
- start
- --main-service
- /home/deno/functions/main
networks:
- kevlarai-network
analytics: analytics:
container_name: supabase-analytics container_name: supabase-analytics
image: supabase/logflare:1.4.0 image: supabase/logflare:1.31.2
restart: unless-stopped
ports:
- 4000:4000
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
healthcheck: healthcheck:
test: ["CMD", "curl", "http://localhost:4000/health"] test: [ "CMD", "curl", "http://localhost:4000/health" ]
timeout: 10s timeout: 5s
interval: 5s interval: 5s
retries: 10 retries: 10
restart: unless-stopped
depends_on: depends_on:
db: db:
# Disable this if you are using an external Postgres database
condition: service_healthy condition: service_healthy
env_file:
- .env
environment: environment:
LOGFLARE_NODE_HOST: 127.0.0.1 LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin DB_USERNAME: supabase_admin
@ -343,39 +321,63 @@ services:
DB_PORT: ${POSTGRES_PORT} DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD} DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics DB_SCHEMA: _analytics
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_SINGLE_TENANT: true LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true LOGFLARE_SUPABASE_MODE: true
LOGFLARE_MIN_CLUSTER_SIZE: 1 LOGFLARE_MIN_CLUSTER_SIZE: 1
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
ports: # Uncomment to use Big Query backend for analytics
- 4000:4000 # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
networks: # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
- kevlarai-network
# Comment out everything below this point if you are using an external Postgres database
db: db:
container_name: supabase-db container_name: supabase-db
image: supabase/postgres:15.8.1.020 image: supabase/postgres:15.8.1.085
restart: unless-stopped
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/59-realtime.sql:Z
# Must be superuser to create event trigger
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/58-webhooks.sql:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/59-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/59-jwt.sql:Z
# Changes required for internal supabase data such as _analytics
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/57-_supabase.sql:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/59-logs.sql:Z
# Changes required for Pooler support
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/59-pooler.sql:Z
# ClassroomCopilot changes
- ./volumes/db/cc/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql:Z
- ./volumes/db/cc/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql:Z
- ./volumes/db/cc/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql:Z
- ./volumes/db/cc/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql:Z
- ./volumes/db/cc/65-filesystem-augments.sql:/docker-entrypoint-initdb.d/migrations/65-filesystem-augments.sql:Z
- ./volumes/db/cc/66-rls-policies.sql:/docker-entrypoint-initdb.d/migrations/66-rls-policies.sql:Z
- ./volumes/db/cc/67-vectors.sql:/docker-entrypoint-initdb.d/migrations/67-vectors.sql:Z
- ./volumes/db/cc/68-cabinet-memberships.sql:/docker-entrypoint-initdb.d/migrations/68-cabinet-memberships.sql:Z
- ./volumes/db/cc/69-gc-prefix-cleanup.sql:/docker-entrypoint-initdb.d/migrations/69-gc-prefix-cleanup.sql:Z
- ./volumes/db/cc/70-add-directory-support.sql:/docker-entrypoint-initdb.d/migrations/70-add-directory-support.sql:Z
# PGDATA directory - persists database files between restarts
- ./volumes/db-data:/var/lib/postgresql/data:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -h localhost || exit 1"] test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ]
interval: 10s interval: 5s
timeout: 5s timeout: 5s
retries: 20 retries: 10
start_period: 30s
depends_on: depends_on:
vector: vector:
condition: service_healthy condition: service_healthy
command:
- postgres
- -c
- config_file=/etc/postgresql/postgresql.conf
- -c
- log_min_messages=fatal
restart: unless-stopped
env_file:
- .env
environment: environment:
POSTGRES_HOST: /var/run/postgresql POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT} PGPORT: ${POSTGRES_PORT}
@ -386,104 +388,104 @@ services:
POSTGRES_DB: ${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET} JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY} JWT_EXP: ${JWT_EXPIRY}
volumes: command:
- ./db/migrations/supabase/50-_supabase.sql:/docker-entrypoint-initdb.d/migrations/50-_supabase.sql [
- ./db/migrations/supabase/52-realtime.sql:/docker-entrypoint-initdb.d/migrations/52-realtime.sql "postgres",
- ./db/migrations/supabase/52-pooler.sql:/docker-entrypoint-initdb.d/migrations/52-pooler.sql "-c",
- ./db/migrations/supabase/52-logs.sql:/docker-entrypoint-initdb.d/migrations/52-logs.sql "config_file=/etc/postgresql/postgresql.conf",
- ./db/init-scripts/51-webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/51-webhooks.sql "-c",
- ./db/init-scripts/52-roles.sql:/docker-entrypoint-initdb.d/init-scripts/52-roles.sql "log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
- ./db/init-scripts/52-jwt.sql:/docker-entrypoint-initdb.d/init-scripts/52-jwt.sql ]
- ./db/migrations/core/60-create-databases.sql:/docker-entrypoint-initdb.d/migrations/60-create-databases.sql ports:
- ./db/migrations/core/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql - ${PORT_SUPABASE_POSTGRES_TEST}:${POSTGRES_PORT}
- ./db/migrations/core/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql
- ./db/migrations/core/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql
- ./db/migrations/core/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql
- ./db/migrations/core/65-keycloak-setup.sql:/docker-entrypoint-initdb.d/migrations/65-keycloak-setup.sql
- supabase-db-data:/var/lib/postgresql/data
- supabase-db-config:/etc/postgresql-custom
networks:
- kevlarai-network
vector: vector:
container_name: supabase-vector container_name: supabase-vector
image: timberio/vector:0.28.1-alpine image: timberio/vector:0.53.0-alpine
healthcheck: restart: unless-stopped
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health",
]
timeout: 10s
interval: 10s
retries: 10
volumes: volumes:
- ./logs/vector.yml:/etc/vector/vector.yml:ro - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- /var/run/docker.sock:/var/run/docker.sock:ro - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z
env_file: healthcheck:
- .env test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://vector:9001/health" ]
timeout: 5s
interval: 5s
retries: 3
environment: environment:
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
command: ["--config", "/etc/vector/vector.yml"] command: [ "--config", "/etc/vector/vector.yml" ]
networks: security_opt:
- kevlarai-network - "label=disable"
# Update the DATABASE_URL if you are using an external Postgres database
supavisor: supavisor:
container_name: supabase-pooler container_name: supabase-pooler
image: supabase/supavisor:1.1.56 image: supabase/supavisor:2.7.0
restart: unless-stopped
ports:
- ${POSTGRES_PORT}:5432
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
volumes:
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck: healthcheck:
test: curl -sSfL --head -o /dev/null "http://127.0.0.1:4000/api/health" test: [ "CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "http://127.0.0.1:4000/api/health" ]
interval: 10s interval: 10s
timeout: 10s timeout: 5s
retries: 10 retries: 5
depends_on: depends_on:
db: db:
condition: service_healthy condition: service_healthy
analytics: analytics:
condition: service_healthy condition: service_healthy
command:
- /bin/sh
- -c
- /app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server
restart: unless-stopped
ports:
- ${POSTGRES_PORT}:5432
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
env_file:
- .env
environment: environment:
- PORT=4000 PORT: 4000
- POSTGRES_PORT=${POSTGRES_PORT} POSTGRES_PORT: ${POSTGRES_PORT}
- POSTGRES_DB=${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
- DATABASE_URL=ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
- CLUSTER_POSTGRES=true CLUSTER_POSTGRES: true
- SECRET_KEY_BASE=${SECRET_KEY_BASE} SECRET_KEY_BASE: ${SECRET_KEY_BASE}
- VAULT_ENC_KEY=${VAULT_ENC_KEY} VAULT_ENC_KEY: ${VAULT_ENC_KEY}
- API_JWT_SECRET=${JWT_SECRET} API_JWT_SECRET: ${JWT_SECRET}
- METRICS_JWT_SECRET=${JWT_SECRET} METRICS_JWT_SECRET: ${JWT_SECRET}
- REGION=local REGION: local
- ERL_AFLAGS=-proto_dist inet_tcp ERL_AFLAGS: -proto_dist inet_tcp
- POOLER_TENANT_ID=${POOLER_TENANT_ID} POOLER_TENANT_ID: ${POOLER_TENANT_ID}
- POOLER_DEFAULT_POOL_SIZE=${POOLER_DEFAULT_POOL_SIZE} POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
- POOLER_MAX_CLIENT_CONN=${POOLER_MAX_CLIENT_CONN} POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
- POOLER_POOL_MODE=transaction POOLER_POOL_MODE: transaction
volumes: DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE}
- ./pooler/pooler.exs:/etc/pooler/pooler.exs:ro command: [ "/bin/sh", "-c", "/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server" ]
networks:
- kevlarai-network ## MCP Server - Model Context Protocol for AI integrations
## DISABLED BY DEFAULT - Add 'mcp' to COMPOSE_PROFILES to enable
mcp:
container_name: mcp
profiles:
- mcp
build:
context: .
dockerfile: ./volumes/mcp/Dockerfile
restart: unless-stopped
healthcheck:
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3100/health" ]
timeout: 5s
interval: 10s
retries: 3
depends_on:
db:
condition: service_healthy
rest:
condition: service_started
environment:
SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_AUTH_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
command: [ "bun", "run", "dist/index.js", "--transport", "http", "--port", "3100", "--host", "0.0.0.0", "--url", "http://kong:8000", "--anon-key", "${ANON_KEY}", "--service-key", "${SERVICE_ROLE_KEY}", "--jwt-secret", "${JWT_SECRET}", "--db-url", "postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}" ]
volumes: volumes:
supabase-db-config: db-config:
driver: local deno-cache:
supabase-db-data:
driver: local
networks:
kevlarai-network:
name: kevlarai-network
driver: bridge

View File

@ -1,232 +0,0 @@
api:
enabled: true
address: 0.0.0.0:9001
sources:
docker_host:
type: docker_logs
exclude_containers:
- supabase-vector
transforms:
project_logs:
type: remap
inputs:
- docker_host
source: |-
.project = "default"
.event_message = del(.message)
.appname = del(.container_name)
del(.container_created_at)
del(.container_id)
del(.source_type)
del(.stream)
del(.label)
del(.image)
del(.host)
del(.stream)
router:
type: route
inputs:
- project_logs
route:
kong: '.appname == "supabase-kong"'
auth: '.appname == "supabase-auth"'
rest: '.appname == "supabase-rest"'
realtime: '.appname == "supabase-realtime"'
storage: '.appname == "supabase-storage"'
functions: '.appname == "supabase-functions"'
db: '.appname == "supabase-db"'
# Ignores non nginx errors since they are related with kong booting up
kong_logs:
type: remap
inputs:
- router.kong
source: |-
req, err = parse_nginx_log(.event_message, "combined")
if err == null {
.timestamp = req.timestamp
.metadata.request.headers.referer = req.referer
.metadata.request.headers.user_agent = req.agent
.metadata.request.headers.cf_connecting_ip = req.client
.metadata.request.method = req.method
.metadata.request.path = req.path
.metadata.request.protocol = req.protocol
.metadata.response.status_code = req.status
}
if err != null {
abort
}
# Ignores non nginx errors since they are related with kong booting up
kong_err:
type: remap
inputs:
- router.kong
source: |-
.metadata.request.method = "GET"
.metadata.response.status_code = 200
parsed, err = parse_nginx_log(.event_message, "error")
if err == null {
.timestamp = parsed.timestamp
.severity = parsed.severity
.metadata.request.host = parsed.host
.metadata.request.headers.cf_connecting_ip = parsed.client
url, err = split(parsed.request, " ")
if err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
}
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
auth_logs:
type: remap
inputs:
- router.auth
source: |-
parsed, err = parse_json(.event_message)
if err == null {
.metadata.timestamp = parsed.time
.metadata = merge!(.metadata, parsed)
}
# PostgREST logs are structured so we separate timestamp from message using regex
rest_logs:
type: remap
inputs:
- router.rest
source: |-
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.timestamp = to_timestamp!(parsed.time)
.metadata.host = .project
}
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
realtime_logs:
type: remap
inputs:
- router.realtime
source: |-
.metadata.project = del(.project)
.metadata.external_id = .metadata.project
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
}
# Storage logs may contain json objects so we parse them for completeness
storage_logs:
type: remap
inputs:
- router.storage
source: |-
.metadata.project = del(.project)
.metadata.tenantId = .metadata.project
parsed, err = parse_json(.event_message)
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
.metadata.timestamp = parsed.time
.metadata.context[0].host = parsed.hostname
.metadata.context[0].pid = parsed.pid
}
# Postgres logs some messages to stderr which we map to warning severity level
db_logs:
type: remap
inputs:
- router.db
source: |-
.metadata.host = "db-default"
.metadata.parsed.timestamp = .timestamp
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
if err != null || parsed == null {
.metadata.parsed.error_severity = "info"
}
if parsed != null {
.metadata.parsed.error_severity = parsed.level
}
if .metadata.parsed.error_severity == "info" {
.metadata.parsed.error_severity = "log"
}
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
sinks:
logflare_auth:
type: 'http'
inputs:
- auth_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_realtime:
type: 'http'
inputs:
- realtime_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_rest:
type: 'http'
inputs:
- rest_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_db:
type: 'http'
inputs:
- db_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
# lead to broken queries from studio. This works by the assumption that containers are started in the
# following order: vector > db > logflare > kong
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_functions:
type: 'http'
inputs:
- router.functions
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_storage:
type: 'http'
inputs:
- storage_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_kong:
type: 'http'
inputs:
- kong_logs
- kong_err
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'

View File

@ -1,30 +0,0 @@
{:ok, _} = Application.ensure_all_started(:supavisor)
{:ok, version} =
case Supavisor.Repo.query!("select version()") do
%{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver)
_ -> nil
end
params = %{
"external_id" => System.get_env("POOLER_TENANT_ID"),
"db_host" => "db",
"db_port" => System.get_env("POSTGRES_PORT"),
"db_database" => System.get_env("POSTGRES_DB"),
"require_user" => false,
"auth_query" => "SELECT * FROM pgbouncer.get_auth($1)",
"default_max_clients" => System.get_env("POOLER_MAX_CLIENT_CONN"),
"default_pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
"default_parameter_status" => %{"server_version" => version},
"users" => [%{
"db_user" => "pgbouncer",
"db_password" => System.get_env("POSTGRES_PASSWORD"),
"mode_type" => System.get_env("POOLER_POOL_MODE"),
"pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
"is_manager" => true
}]
}
if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do
{:ok, _} = Supavisor.Tenants.create_tenant(params)
end

View File

@ -0,0 +1,19 @@
# Codacy configuration
# See: https://docs.codacy.com/repositories-configure/codacy-configuration-file/
---
engines:
eslint:
enabled: true
exclude_paths:
# Test files - contain intentional test fixtures (hardcoded credentials, etc.)
- "src/__tests__/**"
- "**/*.test.ts"
- "**/*.spec.ts"
# Build output
- "dist/**"
- "node_modules/**"
# Config files
- "*.config.js"
- "*.config.ts"

33
selfhosted-supabase-mcp/.gitignore vendored Normal file
View File

@ -0,0 +1,33 @@
# Dependency directories
node_modules/
# Build output
dist/
.fastembed_cache
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
# Environment variables
.env
.env.*
!.env.example
# OS generated files
.DS_Store
Thumbs.db
# IDE directories
.vscode/
.idea/
.cursor/
# Test coverage
coverage/

View File

@ -0,0 +1,48 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- **HTTP Transport Mode**: Run MCP server in HTTP mode for Docker/Kong integration
- Express-based HTTP server with Streamable HTTP Transport
- Configurable CORS, rate limiting, and request timeouts
- Health check endpoint for container orchestration
- **JWT Authentication Middleware**: Validate Supabase JWTs in HTTP mode
- **Privilege-Based Access Control**: Role-based tool access (regular, privileged)
- `service_role`: Access to all tools
- `authenticated`/`anon`: Access to regular tools only
- **24 New Database Introspection Tools**:
- Schema: `list_table_columns`, `list_indexes`, `list_constraints`, `list_foreign_keys`, `list_triggers`, `list_database_functions`, `list_available_extensions`
- Security: `list_rls_policies`, `get_rls_status`, `get_advisors`
- Definitions: `get_function_definition`, `get_trigger_definition`
- Performance: `get_index_stats`, `get_vector_index_stats`, `explain_query`
- Extensions: `list_cron_jobs`, `get_cron_job_history`, `list_vector_indexes`
- Edge Functions: `list_edge_functions`, `get_edge_function_details`, `list_edge_function_logs`
- Storage: `get_storage_config`, `update_storage_config`
- Logs: `get_logs`
- **Bun Runtime**: Migrated from Node.js/npm to Bun for faster builds and execution
- **Comprehensive Test Suite**: 13 test files with 240+ passing tests
- **Docker Integration**: Dockerfile and Docker Compose configuration for self-hosted Supabase stacks
### Changed
- `execute_sql` now requires `service_role` JWT in HTTP mode (privileged tool)
- Replaced `package-lock.json` with `bun.lock`
### Removed
- **`get_anon_key` tool**: Removed to prevent exposure of sensitive API keys through MCP
- **`get_service_key` tool**: Removed to prevent exposure of sensitive API keys through MCP
### Security
- Removed tools that exposed API keys (`get_anon_key`, `get_service_key`)
- **Rationale**: MCP tools can be called by any connected client. Exposing API keys through MCP creates a security risk where keys could be extracted by malicious or compromised MCP clients. The anon key and service role key are already available to the server at startup via environment variables or CLI arguments - there's no legitimate use case for retrieving them via MCP during runtime.
- Added privilege-based access control to restrict sensitive operations to `service_role` only
- JWT authentication enforced for all HTTP mode requests

View File

@ -0,0 +1,18 @@
# Generated by https://smithery.ai. See: https://smithery.ai/docs/build/project-config
# Use official Node.js LTS on Alpine
FROM node:lts-alpine
WORKDIR /app
# Install dependencies (including dev for build)
COPY package.json package-lock.json ./
RUN npm install
# Copy remaining source files
COPY . .
# Build the project
RUN npm run build
# Entrypoint for MCP server
ENTRYPOINT ["node", "dist/index.js"]

View File

@ -0,0 +1,67 @@
# Self-Hosted Supabase MCP Server - Implementation Plan
This plan outlines the steps to build the minimal self-hosted Supabase MCP server based on `migration_notes.md`.
## Progress Tracking
- [x] Project Setup (package.json, tsconfig.json, dependencies, directories)
- [x] Bun Migration (replaced Node.js/npm with Bun runtime)
- [x] Define Core Types (`src/types/`)
- [x] Implement `SelfhostedSupabaseClient` (`src/client/`)
- [x] Basic connection (`@supabase/supabase-js`)
- [x] RPC `execute_sql` function call logic
- [x] RPC function existence check and creation logic (using service key)
- [x] Direct DB connection fallback/transactional method (`pg`)
- [x] Async initialization logic (`client.initialize()`)
- [x] Implement Server Entry Point (`src/index.ts`)
- [x] `commander` setup for args/env vars
- [x] `createSelfhostedSupabaseClient` factory usage
- [x] MCP SDK initialization (`stdio: true`)
- [x] Tool registration
- [x] Error handling
- [x] Implement Tools (`src/tools/`)
- [x] **Schema & Migrations**
- [x] `list_tables`
- [x] `list_extensions`
- [x] `list_migrations`
- [x] `apply_migration`
- [x] **Database Operations & Stats**
- [x] `execute_sql`
- [x] `get_database_connections`
- [x] `get_database_stats`
- [x] **Project Configuration**
- [x] `get_project_url`
- [x] `verify_jwt_secret`
- [-] `get_anon_key` (Removed - security risk)
- [-] `get_service_key` (Removed - security risk)
- [x] **Development & Extension Tools**
- [x] `generate_typescript_types`
- [x] `rebuild_hooks`
- [x] `get_logs` (Added - tries analytics stack, falls back to CSV logs)
- [x] **Auth User Management**
- [x] `list_auth_users`
- [x] `get_auth_user`
- [x] `create_auth_user`
- [x] `delete_auth_user`
- [x] `update_auth_user`
- [x] **Storage Insights**
- [x] `list_storage_buckets`
- [x] `list_storage_objects`
- [x] **Realtime Inspection**
- [x] `list_realtime_publications`
- [x] **Extension-Specific Tools**
- [x] `list_cron_jobs` (for pg_cron)
- [x] `list_vector_indexes` (for pgvector)
- [x] **Edge Function Management**
- [x] `list_edge_functions`
- [x] `get_edge_function_details`
- [-] `deploy_edge_function` (Skipped - requires filesystem access outside MCP scope)
- [x] **Additional Tools (from Official Supabase MCP)**
- [x] `get_advisors` (security/performance advisory notices via Splinter)
- [x] `get_storage_config` (storage bucket configuration)
- [x] `update_storage_config` (update storage bucket settings)
- [x] Add Basic README.md
- [x] Test Infrastructure
- [x] Bun test runner setup
- [x] Utils tests
- [x] Type definition tests

View File

@ -0,0 +1,619 @@
# Self-Hosted Supabase MCP Server
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![smithery badge](https://smithery.ai/badge/@HenkDz/selfhosted-supabase-mcp)](https://smithery.ai/server/@HenkDz/selfhosted-supabase-mcp)
## Overview
This project provides a [Model Context Protocol (MCP)](https://github.com/modelcontextprotocol/specification) server designed specifically for interacting with **self-hosted Supabase instances**. It bridges the gap between MCP clients (like IDE extensions) and your local or privately hosted Supabase projects, enabling database introspection, management, and interaction directly from your development environment.
This server was built from scratch, drawing lessons from adapting the official Supabase cloud MCP server, to provide a minimal, focused implementation tailored for the self-hosted use case.
## Purpose
The primary goal of this server is to enable developers using self-hosted Supabase installations to leverage MCP-based tools for tasks such as:
* Querying database schemas and data.
* Managing database migrations.
* Inspecting database statistics and connections.
* Managing authentication users.
* Interacting with Supabase Storage.
* Generating type definitions.
It avoids the complexities of the official cloud server related to multi-project management and cloud-specific APIs, offering a streamlined experience for single-project, self-hosted environments.
## Features (Implemented Tools)
Tools are categorized by privilege level:
- **Regular** tools are accessible by any authenticated Supabase JWT (`authenticated` or `service_role` role).
- **Privileged** tools require a `service_role` JWT (HTTP mode) or direct database/service-key access (stdio mode).
### Schema & Migrations
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_tables` | Lists tables in the database schemas | Regular |
| `list_extensions` | Lists installed PostgreSQL extensions | Regular |
| `list_available_extensions` | Lists all available (installable) extensions | Regular |
| `list_migrations` | Lists applied migrations from `supabase_migrations.schema_migrations` | Regular |
| `apply_migration` | Applies a SQL migration and records it in `supabase_migrations.schema_migrations` | **Privileged** |
| `list_table_columns` | Lists columns for a specific table | Regular |
| `list_indexes` | Lists indexes for a specific table | Regular |
| `list_constraints` | Lists constraints for a specific table | Regular |
| `list_foreign_keys` | Lists foreign keys for a specific table | Regular |
| `list_triggers` | Lists triggers for a specific table | Regular |
| `list_database_functions` | Lists user-defined database functions | Regular |
| `get_function_definition` | Gets the source definition of a function | Regular |
| `get_trigger_definition` | Gets the source definition of a trigger | Regular |
### Database Operations & Stats
| Tool | Description | Privilege |
|------|-------------|-----------|
| `execute_sql` | Executes an arbitrary SQL query | **Privileged** |
| `explain_query` | Runs `EXPLAIN ANALYZE` on a query | **Privileged** |
| `get_database_connections` | Shows active connections (`pg_stat_activity`) | Regular |
| `get_database_stats` | Retrieves database statistics (`pg_stat_*`) | Regular |
| `get_index_stats` | Shows index usage statistics | Regular |
| `get_vector_index_stats` | Shows pgvector index statistics | Regular |
### Security & RLS
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_rls_policies` | Lists Row-Level Security policies for a table | Regular |
| `get_rls_status` | Shows RLS enabled/disabled status for tables | Regular |
| `get_advisors` | Retrieves security and performance advisory notices | Regular |
### Project Configuration
| Tool | Description | Privilege |
|------|-------------|-----------|
| `get_project_url` | Returns the configured Supabase URL | Regular |
| `verify_jwt_secret` | Checks if the JWT secret is configured | Regular |
### Development & Extension Tools
| Tool | Description | Privilege |
|------|-------------|-----------|
| `generate_typescript_types` | Generates TypeScript types from the database schema | Regular |
| `rebuild_hooks` | Restarts the `pg_net` worker (if used) | **Privileged** |
| `get_logs` | Retrieves recent log entries (analytics stack or CSV fallback) | Regular |
### Auth User Management
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_auth_users` | Lists users from `auth.users` | Regular |
| `get_auth_user` | Retrieves details for a specific user | Regular |
| `create_auth_user` | Creates a new user in `auth.users` (password bcrypt-hashed via pgcrypto) | **Privileged** |
| `update_auth_user` | Updates user details (password bcrypt-hashed if changed) | **Privileged** |
| `delete_auth_user` | Deletes a user from `auth.users` | **Privileged** |
### Storage
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_storage_buckets` | Lists all storage buckets | Regular |
| `list_storage_objects` | Lists objects within a specific bucket | Regular |
| `get_storage_config` | Retrieves storage bucket configuration | Regular |
| `update_storage_config` | Updates storage bucket settings | **Privileged** |
### Realtime Inspection
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_realtime_publications` | Lists PostgreSQL publications (e.g. `supabase_realtime`) | Regular |
### Extension-Specific Tools
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_cron_jobs` | Lists scheduled jobs (requires `pg_cron` extension) | Regular |
| `get_cron_job_history` | Shows recent execution history for a cron job | Regular |
| `list_vector_indexes` | Lists pgvector indexes (requires `pgvector` extension) | Regular |
### Edge Functions
| Tool | Description | Privilege |
|------|-------------|-----------|
| `list_edge_functions` | Lists deployed Edge Functions | Regular |
| `get_edge_function_details` | Gets details and metadata for an Edge Function | Regular |
| `list_edge_function_logs` | Retrieves recent logs for an Edge Function | Regular |
---
### About `supabase_migrations.schema_migrations`
The `list_migrations` and `apply_migration` tools rely on the `supabase_migrations.schema_migrations` table. This table is **created and managed by the Supabase CLI** — it is not part of the MCP server itself.
**How the table is created:**
The table is automatically created when you initialise or run migrations using the Supabase CLI:
```bash
supabase db push # pushes local migrations to a remote database
supabase migration up # applies pending local migration files
```
If you have never run the Supabase CLI against your database, the table will not exist and `list_migrations` will return an error. You can create it manually with:
```sql
CREATE SCHEMA IF NOT EXISTS supabase_migrations;
CREATE TABLE IF NOT EXISTS supabase_migrations.schema_migrations (
version text NOT NULL PRIMARY KEY,
name text NOT NULL DEFAULT '',
inserted_at timestamptz NOT NULL DEFAULT now()
);
```
**Schema difference vs. official Supabase:**
The Supabase cloud platform tracks additional columns (e.g. `statements`, `dirty`). This MCP server uses the minimal schema (version + name + inserted_at) that is compatible with the Supabase CLI's local-development workflow. If your existing table has extra columns they are simply ignored.
## Setup and Installation
### Installing via Smithery
To install Self-Hosted Supabase MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@HenkDz/selfhosted-supabase-mcp):
```bash
npx -y @smithery/cli install @HenkDz/selfhosted-supabase-mcp --client claude
```
### Prerequisites
* [Bun](https://bun.sh/) v1.1 or later (replaces Node.js/npm — used for runtime and builds)
* Access to your self-hosted Supabase instance (URL, keys, and optionally a direct PostgreSQL connection string).
### Steps
1. **Clone the repository:**
```bash
git clone <repository-url>
cd selfhosted-supabase-mcp
```
2. **Install dependencies:**
```bash
bun install
```
3. **Build the project:**
```bash
bun run build
```
This compiles the TypeScript source to JavaScript in the `dist` directory.
## Configuration
The server requires configuration details for your Supabase instance. These can be provided via command-line arguments or environment variables. CLI arguments take precedence.
**Required:**
* `--url <url>` or `SUPABASE_URL=<url>`: The main HTTP URL of your Supabase project (e.g., `http://localhost:8000`).
* `--anon-key <key>` or `SUPABASE_ANON_KEY=<key>`: Your Supabase project's anonymous key.
**Optional (but Recommended/Required for certain tools):**
* `--service-key <key>` or `SUPABASE_SERVICE_ROLE_KEY=<key>`: Your Supabase project's service role key. Required for privileged tools and for auto-creating the `execute_sql` helper function on startup.
* `--db-url <url>` or `DATABASE_URL=<url>`: The direct PostgreSQL connection string for your Supabase database (e.g., `postgresql://postgres:password@localhost:5432/postgres`). Required for tools needing direct database access (`apply_migration`, Auth tools, Storage tools, `pg_catalog` queries).
* `--jwt-secret <secret>` or `SUPABASE_AUTH_JWT_SECRET=<secret>`: Your Supabase project's JWT secret. Required when using `--transport http` and needed by the `verify_jwt_secret` tool.
* `--tools-config <path>`: Path to a JSON file specifying which tools to enable (whitelist). If omitted, all tools are enabled. Format: `{"enabledTools": ["tool_name_1", "tool_name_2"]}`.
**HTTP transport options (when using `--transport http`):**
* `--port <number>`: HTTP server port (default: `3000`).
* `--host <string>`: HTTP server host (default: `127.0.0.1`).
* `--cors-origins <origins>`: Comma-separated list of allowed CORS origins. Defaults to localhost only.
* `--rate-limit-window <ms>`: Rate limit window in milliseconds (default: `60000`).
* `--rate-limit-max <count>`: Max requests per rate limit window (default: `100`).
* `--request-timeout <ms>`: Request timeout in milliseconds (default: `30000`).
### Important Notes:
* **`execute_sql` Helper Function:** Many tools rely on a `public.execute_sql` function within your Supabase database for SQL execution via RPC. The server attempts to check for this function on startup. If it's missing *and* a `service-key` *and* `db-url` are provided, it will attempt to create the function automatically. If creation fails or keys aren't provided, tools relying solely on RPC may fail.
* **Direct Database Access:** Tools interacting directly with privileged schemas (`auth`, `storage`) or system catalogs (`pg_catalog`) generally require `DATABASE_URL` to be configured.
* **Coolify / reverse-proxy deployments:**
* The `DATABASE_URL` must use the internal hostname reachable from wherever the MCP server process runs, not the public-facing domain.
* An `ECONNRESET` error during startup means the `DATABASE_URL` cannot be reached from the server's network context.
* The server will still start successfully and all tools that don't require a direct DB connection will continue to work normally.
## Security
### HTTP transport (recommended for remote access)
When running with `--transport http`, the server enforces:
- **JWT authentication** on all `/mcp` endpoints using your `SUPABASE_AUTH_JWT_SECRET`.
- **Privilege-based access control (RBAC)** — the `role` claim in the JWT determines which tools are accessible:
- `service_role`: Full access (all tools including privileged ones).
- `authenticated`: Regular tools only.
- `anon`: No tool access.
- **Rate limiting** — configurable request rate limit per IP address.
- **CORS** — configurable allow-list of origins (defaults to localhost only).
- **Security headers**`X-Content-Type-Options`, `X-Frame-Options`, `Strict-Transport-Security`, etc.
- **Request timeouts** — configurable timeout to prevent resource exhaustion.
### Stdio transport (local development)
Stdio mode has **no authentication** — all tools (including privileged ones) are accessible. It is intended for trusted local clients only (e.g., an IDE extension running on your local machine). A warning is printed on startup when this mode is used.
### Password handling for auth user tools
`create_auth_user` and `update_auth_user` accept a plain-text password from the MCP client, then immediately hash it with **bcrypt** (via PostgreSQL's `pgcrypto` extension: `crypt($password, gen_salt('bf'))`) before storing it in `auth.users`. The plain-text password is never stored. Passwords are passed as query parameters (not string-interpolated into SQL), preventing SQL injection.
> **Note:** The password travels over the MCP transport in plain text between the MCP client and server. This is inherent to the MCP protocol interface and unavoidable at this layer. Use the HTTP transport with TLS termination (e.g., behind Kong/nginx) for network protection.
### SQL execution security
All database operations in the MCP server use parameterized queries (`$1`, `$2`, ...) to prevent SQL injection. The `execute_sql` tool is an intentional exception — it executes arbitrary SQL by design (it is the tool's purpose). This tool is restricted to `service_role` privilege level to limit exposure.
## Usage
### Stdio mode (local MCP clients)
Run the server using Bun, providing the necessary configuration:
```bash
# Using CLI arguments (stdio mode — default)
bun run dist/index.js --url http://localhost:8000 --anon-key <your-anon-key> \
--db-url postgresql://postgres:password@localhost:5432/postgres \
--service-key <your-service-key>
# Example with tool whitelisting via config file
bun run dist/index.js --url http://localhost:8000 --anon-key <your-anon-key> \
--tools-config ./mcp-tools.json
# Or configure using environment variables and run:
# export SUPABASE_URL=http://localhost:8000
# export SUPABASE_ANON_KEY=<your-anon-key>
# export DATABASE_URL=postgresql://postgres:password@localhost:5432/postgres
# export SUPABASE_SERVICE_ROLE_KEY=<your-service-key>
bun run dist/index.js
```
### HTTP mode (Docker / remote access)
```bash
bun run dist/index.js \
--transport http \
--port 3100 \
--host 0.0.0.0 \
--url http://kong:8000 \
--anon-key <your-anon-key> \
--service-key <your-service-key> \
--jwt-secret <your-jwt-secret> \
--db-url postgresql://postgres:password@db:5432/postgres
```
HTTP mode requires `--jwt-secret`. All `/mcp` requests must include a valid Supabase JWT in the `Authorization: Bearer <token>` header.
The server communicates via stdio (default) or HTTP (Streamable HTTP Transport) and is designed to be invoked by an MCP client application (e.g., an IDE extension like Cursor). The client will connect to the server's stdio stream or HTTP endpoint to list and call the available tools.
## Client Configuration Examples
Below are examples of how to configure popular MCP clients to use this self-hosted server.
**Important:**
* Replace placeholders like `<your-supabase-url>`, `<your-anon-key>`, `<your-db-url>`, `<path-to-dist/index.js>` etc., with your actual values.
* Ensure the path to the compiled server file (`dist/index.js`) is correct for your system.
* Be cautious about storing sensitive keys directly in configuration files, especially if committed to version control. Consider using environment variables or more secure methods where supported by the client.
### Cursor
1. Create or open the file `.cursor/mcp.json` in your project root.
2. Add the following configuration:
```json
{
"mcpServers": {
"selfhosted-supabase": {
"command": "bun",
"args": [
"run",
"<path-to-dist/index.js>", // e.g., "/home/user/selfhosted-supabase-mcp/dist/index.js"
"--url",
"<your-supabase-url>", // e.g., "http://localhost:8000"
"--anon-key",
"<your-anon-key>",
// Optional - Add these if needed by the tools you use
"--service-key",
"<your-service-key>",
"--db-url",
"<your-db-url>", // e.g., "postgresql://postgres:password@host:port/postgres"
"--jwt-secret",
"<your-jwt-secret>",
// Optional - Whitelist specific tools
"--tools-config",
"<path-to-your-mcp-tools.json>" // e.g., "./mcp-tools.json"
]
}
}
}
```
### Visual Studio Code (Copilot)
VS Code Copilot allows using environment variables populated via prompted inputs, which is more secure for keys.
1. Create or open the file `.vscode/mcp.json` in your project root.
2. Add the following configuration:
```json
{
"inputs": [
{ "type": "promptString", "id": "sh-supabase-url", "description": "Self-Hosted Supabase URL", "default": "http://localhost:8000" },
{ "type": "promptString", "id": "sh-supabase-anon-key", "description": "Self-Hosted Supabase Anon Key", "password": true },
{ "type": "promptString", "id": "sh-supabase-service-key", "description": "Self-Hosted Supabase Service Key (Optional)", "password": true, "required": false },
{ "type": "promptString", "id": "sh-supabase-db-url", "description": "Self-Hosted Supabase DB URL (Optional)", "password": true, "required": false },
{ "type": "promptString", "id": "sh-supabase-jwt-secret", "description": "Self-Hosted Supabase JWT Secret (Optional)", "password": true, "required": false },
{ "type": "promptString", "id": "sh-supabase-server-path", "description": "Path to self-hosted-supabase-mcp/dist/index.js" },
{ "type": "promptString", "id": "sh-supabase-tools-config", "description": "Path to tools config JSON (Optional, e.g., ./mcp-tools.json)", "required": false }
],
"servers": {
"selfhosted-supabase": {
"command": "bun",
"args": [
"run",
"${input:sh-supabase-server-path}",
"--tools-config", "${input:sh-supabase-tools-config}"
],
"env": {
"SUPABASE_URL": "${input:sh-supabase-url}",
"SUPABASE_ANON_KEY": "${input:sh-supabase-anon-key}",
"SUPABASE_SERVICE_ROLE_KEY": "${input:sh-supabase-service-key}",
"DATABASE_URL": "${input:sh-supabase-db-url}",
"SUPABASE_AUTH_JWT_SECRET": "${input:sh-supabase-jwt-secret}"
}
}
}
}
```
3. When you use Copilot Chat in Agent mode (@workspace), it should detect the server. You will be prompted to enter the details (URL, keys, path) when the server is first invoked.
### Other Clients (Windsurf, Cline, Claude)
Adapt the configuration structure shown for Cursor or the official Supabase documentation, replacing the `command` and `args` with the `bun run` command and the arguments for this server, similar to the Cursor example:
```json
{
"mcpServers": {
"selfhosted-supabase": {
"command": "bun",
"args": [
"run",
"<path-to-dist/index.js>",
"--url", "<your-supabase-url>",
"--anon-key", "<your-anon-key>",
"--service-key", "<your-service-key>",
"--db-url", "<your-db-url>",
"--jwt-secret", "<your-jwt-secret>",
"--tools-config", "<path-to-your-mcp-tools.json>"
]
}
}
}
```
Consult the specific documentation for each client on where to place the `mcp.json` or equivalent configuration file.
## Docker Integration with Self-Hosted Supabase
This MCP server can be integrated directly into a self-hosted Supabase Docker Compose stack, making it available alongside other Supabase services via the Kong API gateway.
### Architecture Overview
When integrated with Docker:
- The MCP server runs in HTTP transport mode (not stdio)
- It's exposed through Kong at `/mcp/v1/*`
- JWT authentication is handled by the MCP server itself
- The server has direct access to the database and all Supabase keys
### Setup Steps
#### 1. Add the MCP Server as a Git Submodule
From your Supabase Docker directory:
```bash
git submodule add https://github.com/HenkDz/selfhosted-supabase-mcp.git selfhosted-supabase-mcp
```
#### 2. Create the Dockerfile
Create `volumes/mcp/Dockerfile`:
```dockerfile
# Dockerfile for selfhosted-supabase-mcp HTTP mode
# Multi-stage build using Bun runtime for self-hosted Supabase
FROM oven/bun:1.1-alpine AS builder
WORKDIR /app
# Copy package files from submodule
COPY selfhosted-supabase-mcp/package.json selfhosted-supabase-mcp/bun.lock* ./
# Install dependencies
RUN bun install --frozen-lockfile || bun install
# Copy source code
COPY selfhosted-supabase-mcp/src ./src
COPY selfhosted-supabase-mcp/tsconfig.json ./
# Build the application
RUN bun build src/index.ts --outdir dist --target bun
# Production stage
FROM oven/bun:1.1-alpine AS runner
WORKDIR /app
# Create non-root user for security
RUN addgroup --system --gid 1001 mcp && \
adduser --system --uid 1001 --ingroup mcp mcp
# Copy built application from builder
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./
# Set ownership
RUN chown -R mcp:mcp /app
USER mcp
# Default environment variables
ENV NODE_ENV=production
# Health check
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:3100/health || exit 1
# Expose HTTP port
EXPOSE 3100
# Start the MCP server in HTTP mode
CMD ["bun", "run", "dist/index.js"]
```
#### 3. Add the MCP Service to docker-compose.yml
Add this service definition to your `docker-compose.yml`:
```yaml
## MCP Server - Model Context Protocol for AI integrations
## DISABLED BY DEFAULT - Add 'mcp' to COMPOSE_PROFILES to enable
mcp:
container_name: ${COMPOSE_PROJECT_NAME:-supabase}-mcp
profiles:
- mcp
build:
context: .
dockerfile: ./volumes/mcp/Dockerfile
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3100/health"
]
timeout: 5s
interval: 10s
retries: 3
depends_on:
db:
condition: service_healthy
rest:
condition: service_started
environment:
SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_AUTH_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
command:
[
"bun",
"run",
"dist/index.js",
"--transport", "http",
"--port", "3100",
"--host", "0.0.0.0",
"--url", "http://kong:8000",
"--anon-key", "${ANON_KEY}",
"--service-key", "${SERVICE_ROLE_KEY}",
"--jwt-secret", "${JWT_SECRET}",
"--db-url", "postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}"
]
```
#### 4. Add Kong API Gateway Routes
Add the MCP routes to `volumes/api/kong.yml` in the `services` section:
```yaml
## MCP Server routes - Model Context Protocol for AI integrations
## Authentication is handled by the MCP server itself (JWT validation)
- name: mcp-v1
_comment: 'MCP Server: /mcp/v1/* -> http://mcp:3100/*'
url: http://mcp:3100/
routes:
- name: mcp-v1-all
strip_path: true
paths:
- /mcp/v1/
plugins:
- name: cors
config:
origins:
- "$SITE_URL_PATTERN"
- "http://localhost:3000"
- "http://127.0.0.1:3000"
methods:
- GET
- POST
- DELETE
- OPTIONS
headers:
- Accept
- Authorization
- Content-Type
- X-Client-Info
- apikey
- Mcp-Session-Id
exposed_headers:
- Mcp-Session-Id
credentials: true
max_age: 3600
```
#### 5. Enable the MCP Service
The MCP service uses Docker Compose profiles, so it's disabled by default. To enable it:
**Option A: Set in `.env` file:**
```bash
COMPOSE_PROFILES=mcp
```
**Option B: Enable at runtime:**
```bash
docker compose --profile mcp up -d
```
### Accessing the MCP Server
Once running, the MCP server is available at:
- **Internal (from other containers):** `http://mcp:3100`
- **External (via Kong):** `http://localhost:8000/mcp/v1/`
### Authentication
When running in HTTP mode, the MCP server validates JWTs using the configured `JWT_SECRET`. Clients must include a valid Supabase JWT in the `Authorization` header:
```
Authorization: Bearer <supabase-jwt>
```
The JWT's `role` claim determines access:
- `service_role`: Full access to all tools (regular + privileged)
- `authenticated`: Access to regular tools only
- `anon`: No tool access
### Health Check
The MCP server exposes a health endpoint:
```bash
curl http://localhost:8000/mcp/v1/health
```
### Security Considerations
When deploying via Docker:
1. The MCP server runs as a non-root user (`mcp:mcp`)
2. JWT authentication is enforced for all tool calls
3. Privileged tools (like `execute_sql`) require `service_role` JWT
4. CORS is configured via Kong - adjust origins for your deployment
## Development
* **Language:** TypeScript
* **Build:** `bun build` (via `bun run build`)
* **Runtime:** [Bun](https://bun.sh/) v1.1+
* **Test runner:** `bun test`
* **Dependencies:** Managed via `bun` (`bun.lock`)
* **Core Libraries:** `@supabase/supabase-js`, `pg` (node-postgres), `zod` (validation), `commander` (CLI args), `@modelcontextprotocol/sdk` (MCP server framework), `express`, `jsonwebtoken`.
## License
This project is licensed under the MIT License. See the LICENSE file for details.

View File

@ -0,0 +1,321 @@
{
"lockfileVersion": 1,
"configVersion": 1,
"workspaces": {
"": {
"name": "self-hosted-supabase-mcp",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.2",
"@supabase/supabase-js": "^2.90.1",
"commander": "^14.0.2",
"express": "^5.2.1",
"jsonwebtoken": "^9.0.3",
"pg": "^8.17.1",
"zod": "^4.3.5",
},
"devDependencies": {
"@types/bun": "latest",
"@types/express": "^5.0.6",
"@types/jsonwebtoken": "^9.0.10",
"@types/node": "^22.19.7",
"@types/pg": "^8.16.0",
"typescript": "^5.9.3",
},
},
},
"packages": {
"@hono/node-server": ["@hono/node-server@1.19.9", "", { "peerDependencies": { "hono": "^4" } }, "sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw=="],
"@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.25.2", "", { "dependencies": { "@hono/node-server": "^1.19.7", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "jose": "^6.1.1", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.0" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-LZFeo4F9M5qOhC/Uc1aQSrBHxMrvxett+9KLHt7OhcExtoiRN9DKgbZffMP/nxjutWDQpfMDfP3nkHI4X9ijww=="],
"@supabase/auth-js": ["@supabase/auth-js@2.90.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-vxb66dgo6h3yyPbR06735Ps+dK3hj0JwS8w9fdQPVZQmocSTlKUW5MfxSy99mN0XqCCuLMQ3jCEiIIUU23e9ng=="],
"@supabase/functions-js": ["@supabase/functions-js@2.90.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-x9mV9dF1Lam9qL3zlpP6mSM5C9iqMPtF5B/tU1Jj/F0ufX5mjDf9ghVBaErVxmrQJRL4+iMKWKY2GnODkpS8tw=="],
"@supabase/postgrest-js": ["@supabase/postgrest-js@2.90.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-jh6vqzaYzoFn3raaC0hcFt9h+Bt+uxNRBSdc7PfToQeRGk7PDPoweHsbdiPWREtDVTGKfu+PyPW9e2jbK+BCgQ=="],
"@supabase/realtime-js": ["@supabase/realtime-js@2.90.1", "", { "dependencies": { "@types/phoenix": "^1.6.6", "@types/ws": "^8.18.1", "tslib": "2.8.1", "ws": "^8.18.2" } }, "sha512-PWbnEMkcQRuor8jhObp4+Snufkq8C6fBp+MchVp2qBPY1NXk/c3Iv3YyiFYVzo0Dzuw4nAlT4+ahuPggy4r32w=="],
"@supabase/storage-js": ["@supabase/storage-js@2.90.1", "", { "dependencies": { "iceberg-js": "^0.8.1", "tslib": "2.8.1" } }, "sha512-GHY+Ps/K/RBfRj7kwx+iVf2HIdqOS43rM2iDOIDpapyUnGA9CCBFzFV/XvfzznGykd//z2dkGZhlZZprsVFqGg=="],
"@supabase/supabase-js": ["@supabase/supabase-js@2.90.1", "", { "dependencies": { "@supabase/auth-js": "2.90.1", "@supabase/functions-js": "2.90.1", "@supabase/postgrest-js": "2.90.1", "@supabase/realtime-js": "2.90.1", "@supabase/storage-js": "2.90.1" } }, "sha512-U8KaKGLUgTIFHtwEW1dgw1gK7XrdpvvYo7nzzqPx721GqPe8WZbAiLh/hmyKLGBYQ/mmQNr20vU9tWSDZpii3w=="],
"@types/body-parser": ["@types/body-parser@1.19.6", "", { "dependencies": { "@types/connect": "*", "@types/node": "*" } }, "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g=="],
"@types/bun": ["@types/bun@1.3.6", "", { "dependencies": { "bun-types": "1.3.6" } }, "sha512-uWCv6FO/8LcpREhenN1d1b6fcspAB+cefwD7uti8C8VffIv0Um08TKMn98FynpTiU38+y2dUO55T11NgDt8VAA=="],
"@types/connect": ["@types/connect@3.4.38", "", { "dependencies": { "@types/node": "*" } }, "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug=="],
"@types/express": ["@types/express@5.0.6", "", { "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^5.0.0", "@types/serve-static": "^2" } }, "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA=="],
"@types/express-serve-static-core": ["@types/express-serve-static-core@5.1.1", "", { "dependencies": { "@types/node": "*", "@types/qs": "*", "@types/range-parser": "*", "@types/send": "*" } }, "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A=="],
"@types/http-errors": ["@types/http-errors@2.0.5", "", {}, "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg=="],
"@types/jsonwebtoken": ["@types/jsonwebtoken@9.0.10", "", { "dependencies": { "@types/ms": "*", "@types/node": "*" } }, "sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA=="],
"@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="],
"@types/node": ["@types/node@22.19.7", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-MciR4AKGHWl7xwxkBa6xUGxQJ4VBOmPTF7sL+iGzuahOFaO0jHCsuEfS80pan1ef4gWId1oWOweIhrDEYLuaOw=="],
"@types/pg": ["@types/pg@8.16.0", "", { "dependencies": { "@types/node": "*", "pg-protocol": "*", "pg-types": "^2.2.0" } }, "sha512-RmhMd/wD+CF8Dfo+cVIy3RR5cl8CyfXQ0tGgW6XBL8L4LM/UTEbNXYRbLwU6w+CgrKBNbrQWt4FUtTfaU5jSYQ=="],
"@types/phoenix": ["@types/phoenix@1.6.7", "", {}, "sha512-oN9ive//QSBkf19rfDv45M7eZPi0eEXylht2OLEXicu5b4KoQ1OzXIw+xDSGWxSxe1JmepRR/ZH283vsu518/Q=="],
"@types/qs": ["@types/qs@6.14.0", "", {}, "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ=="],
"@types/range-parser": ["@types/range-parser@1.2.7", "", {}, "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ=="],
"@types/send": ["@types/send@1.2.1", "", { "dependencies": { "@types/node": "*" } }, "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ=="],
"@types/serve-static": ["@types/serve-static@2.2.0", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*" } }, "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ=="],
"@types/ws": ["@types/ws@8.18.1", "", { "dependencies": { "@types/node": "*" } }, "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg=="],
"accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
"ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="],
"ajv-formats": ["ajv-formats@3.0.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="],
"body-parser": ["body-parser@2.2.2", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.3", "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", "qs": "^6.14.1", "raw-body": "^3.0.1", "type-is": "^2.0.1" } }, "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA=="],
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
"bun-types": ["bun-types@1.3.6", "", { "dependencies": { "@types/node": "*" } }, "sha512-OlFwHcnNV99r//9v5IIOgQ9Uk37gZqrNMCcqEaExdkVq3Avwqok1bJFmvGMCkCE0FqzdY8VMOZpfpR3lwI+CsQ=="],
"bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="],
"call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="],
"call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="],
"commander": ["commander@14.0.2", "", {}, "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ=="],
"content-disposition": ["content-disposition@1.0.1", "", {}, "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q=="],
"content-type": ["content-type@1.0.5", "", {}, "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="],
"cookie": ["cookie@0.7.2", "", {}, "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w=="],
"cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="],
"cors": ["cors@2.8.5", "", { "dependencies": { "object-assign": "^4", "vary": "^1" } }, "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g=="],
"cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="],
"debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="],
"depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="],
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
"ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="],
"ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="],
"encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="],
"es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="],
"es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="],
"es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="],
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
"etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="],
"eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="],
"eventsource-parser": ["eventsource-parser@3.0.6", "", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="],
"express": ["express@5.2.1", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw=="],
"express-rate-limit": ["express-rate-limit@7.5.1", "", { "peerDependencies": { "express": ">= 4.11" } }, "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw=="],
"fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
"fast-uri": ["fast-uri@3.1.0", "", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="],
"finalhandler": ["finalhandler@2.1.1", "", { "dependencies": { "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "on-finished": "^2.4.1", "parseurl": "^1.3.3", "statuses": "^2.0.1" } }, "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA=="],
"forwarded": ["forwarded@0.2.0", "", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="],
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
"get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="],
"get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="],
"gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="],
"has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="],
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
"hono": ["hono@4.11.4", "", {}, "sha512-U7tt8JsyrxSRKspfhtLET79pU8K+tInj5QZXs1jSugO1Vq5dFj3kmZsRldo29mTBfcjDRVRXrEZ6LS63Cog9ZA=="],
"http-errors": ["http-errors@2.0.1", "", { "dependencies": { "depd": "~2.0.0", "inherits": "~2.0.4", "setprototypeof": "~1.2.0", "statuses": "~2.0.2", "toidentifier": "~1.0.1" } }, "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ=="],
"iceberg-js": ["iceberg-js@0.8.1", "", {}, "sha512-1dhVQZXhcHje7798IVM+xoo/1ZdVfzOMIc8/rgVSijRK38EDqOJoGula9N/8ZI5RD8QTxNQtK/Gozpr+qUqRRA=="],
"iconv-lite": ["iconv-lite@0.7.2", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw=="],
"inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="],
"ipaddr.js": ["ipaddr.js@1.9.1", "", {}, "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="],
"is-promise": ["is-promise@4.0.0", "", {}, "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ=="],
"isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="],
"jose": ["jose@6.1.3", "", {}, "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ=="],
"json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="],
"json-schema-typed": ["json-schema-typed@8.0.2", "", {}, "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA=="],
"jsonwebtoken": ["jsonwebtoken@9.0.3", "", { "dependencies": { "jws": "^4.0.1", "lodash.includes": "^4.3.0", "lodash.isboolean": "^3.0.3", "lodash.isinteger": "^4.0.4", "lodash.isnumber": "^3.0.3", "lodash.isplainobject": "^4.0.6", "lodash.isstring": "^4.0.1", "lodash.once": "^4.0.0", "ms": "^2.1.1", "semver": "^7.5.4" } }, "sha512-MT/xP0CrubFRNLNKvxJ2BYfy53Zkm++5bX9dtuPbqAeQpTVe0MQTFhao8+Cp//EmJp244xt6Drw/GVEGCUj40g=="],
"jwa": ["jwa@2.0.1", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg=="],
"jws": ["jws@4.0.1", "", { "dependencies": { "jwa": "^2.0.1", "safe-buffer": "^5.0.1" } }, "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA=="],
"lodash.includes": ["lodash.includes@4.3.0", "", {}, "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w=="],
"lodash.isboolean": ["lodash.isboolean@3.0.3", "", {}, "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg=="],
"lodash.isinteger": ["lodash.isinteger@4.0.4", "", {}, "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA=="],
"lodash.isnumber": ["lodash.isnumber@3.0.3", "", {}, "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw=="],
"lodash.isplainobject": ["lodash.isplainobject@4.0.6", "", {}, "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="],
"lodash.isstring": ["lodash.isstring@4.0.1", "", {}, "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw=="],
"lodash.once": ["lodash.once@4.1.1", "", {}, "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg=="],
"math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="],
"media-typer": ["media-typer@1.1.0", "", {}, "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw=="],
"merge-descriptors": ["merge-descriptors@2.0.0", "", {}, "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g=="],
"mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="],
"mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="],
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
"negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="],
"object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="],
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
"on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
"parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="],
"path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
"path-to-regexp": ["path-to-regexp@8.3.0", "", {}, "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA=="],
"pg": ["pg@8.17.1", "", { "dependencies": { "pg-connection-string": "^2.10.0", "pg-pool": "^3.11.0", "pg-protocol": "^1.11.0", "pg-types": "2.2.0", "pgpass": "1.0.5" }, "optionalDependencies": { "pg-cloudflare": "^1.3.0" }, "peerDependencies": { "pg-native": ">=3.0.1" }, "optionalPeers": ["pg-native"] }, "sha512-EIR+jXdYNSMOrpRp7g6WgQr7SaZNZfS7IzZIO0oTNEeibq956JxeD15t3Jk3zZH0KH8DmOIx38qJfQenoE8bXQ=="],
"pg-cloudflare": ["pg-cloudflare@1.3.0", "", {}, "sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ=="],
"pg-connection-string": ["pg-connection-string@2.10.0", "", {}, "sha512-ur/eoPKzDx2IjPaYyXS6Y8NSblxM7X64deV2ObV57vhjsWiwLvUD6meukAzogiOsu60GO8m/3Cb6FdJsWNjwXg=="],
"pg-int8": ["pg-int8@1.0.1", "", {}, "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw=="],
"pg-pool": ["pg-pool@3.11.0", "", { "peerDependencies": { "pg": ">=8.0" } }, "sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w=="],
"pg-protocol": ["pg-protocol@1.11.0", "", {}, "sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g=="],
"pg-types": ["pg-types@2.2.0", "", { "dependencies": { "pg-int8": "1.0.1", "postgres-array": "~2.0.0", "postgres-bytea": "~1.0.0", "postgres-date": "~1.0.4", "postgres-interval": "^1.1.0" } }, "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA=="],
"pgpass": ["pgpass@1.0.5", "", { "dependencies": { "split2": "^4.1.0" } }, "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug=="],
"pkce-challenge": ["pkce-challenge@5.0.1", "", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="],
"postgres-array": ["postgres-array@2.0.0", "", {}, "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA=="],
"postgres-bytea": ["postgres-bytea@1.0.1", "", {}, "sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ=="],
"postgres-date": ["postgres-date@1.0.7", "", {}, "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q=="],
"postgres-interval": ["postgres-interval@1.2.0", "", { "dependencies": { "xtend": "^4.0.0" } }, "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ=="],
"proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="],
"qs": ["qs@6.14.1", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ=="],
"range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="],
"raw-body": ["raw-body@3.0.2", "", { "dependencies": { "bytes": "~3.1.2", "http-errors": "~2.0.1", "iconv-lite": "~0.7.0", "unpipe": "~1.0.0" } }, "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA=="],
"require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="],
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
"safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
"safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="],
"semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="],
"send": ["send@1.2.1", "", { "dependencies": { "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.1", "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.2" } }, "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ=="],
"serve-static": ["serve-static@2.2.1", "", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw=="],
"setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="],
"shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
"shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
"side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="],
"side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="],
"side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="],
"side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="],
"split2": ["split2@4.2.0", "", {}, "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg=="],
"statuses": ["statuses@2.0.2", "", {}, "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw=="],
"toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="],
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"type-is": ["type-is@2.0.1", "", { "dependencies": { "content-type": "^1.0.5", "media-typer": "^1.1.0", "mime-types": "^3.0.0" } }, "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw=="],
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
"undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
"unpipe": ["unpipe@1.0.0", "", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="],
"vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="],
"which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
"ws": ["ws@8.19.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg=="],
"xtend": ["xtend@4.0.2", "", {}, "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="],
"zod": ["zod@4.3.5", "", {}, "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g=="],
"zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
}
}

View File

@ -0,0 +1,4 @@
{
"enabledTools": [
]
}

View File

@ -0,0 +1,123 @@
# Notes for Minimal Selfhosted Supabase MCP Server
This document summarizes the findings and decisions made while attempting to adapt the official Supabase cloud MCP server for self-hosted use. The goal is to build a new, minimal server from scratch using these notes.
## Core Requirements
- **Target:** Self-hosted Supabase instances.
- **Scope:** Single project environment.
- **Runtime:** Bun (v1.3+) - migrated from Node.js for faster startup and built-in TypeScript support.
- **Authentication:** Supabase URL and Anon Key required. Service Role Key optional (but recommended for certain operations like auto-creating helper functions).
- **Configuration:** Server should accept URL/Keys via CLI arguments (e.g., using `commander`) or environment variables (`SUPABASE_URL`, `SUPABASE_ANON_KEY`, `SUPABASE_SERVICE_ROLE_KEY`). Also needs `DATABASE_URL` for direct DB fallback/transactions.
## Client Implementation (`SelfhostedSupabaseClient`)
- **Primary Connection:** Use `@supabase/supabase-js` client initialized with user-provided URL and Anon Key.
- **Core SQL Execution:**
- Prefer using a PostgreSQL RPC function (`public.execute_sql`) called via the Supabase JS client (`supabase.rpc(...)`). This leverages the existing connection pool and permissions.
- The client should check if this function exists on initialization. If not found (error code `42883`), and if the `serviceRoleKey` is available, attempt to create the function using a temporary service role client or the direct DB connection.
- **RPC Function SQL:**
```sql
-- SQL to create the helper function
CREATE OR REPLACE FUNCTION public.execute_sql(query text, read_only boolean DEFAULT false)
RETURNS jsonb -- Using jsonb is generally preferred over json
LANGUAGE plpgsql
AS $$
DECLARE
result jsonb;
BEGIN
-- Note: SET TRANSACTION READ ONLY might not behave as expected within a function
-- depending on the outer transaction state. Handle read-only logic outside if needed.
-- Execute the dynamic query and aggregate results into a JSONB array
EXECUTE 'SELECT COALESCE(jsonb_agg(t), ''[]''::jsonb) FROM (' || query || ') t' INTO result;
RETURN result;
EXCEPTION
WHEN others THEN
-- Rethrow the error with context
RAISE EXCEPTION 'Error executing SQL: %', SQLERRM;
END;
$$;
-- Grant execution permission (run using service key or manually)
GRANT EXECUTE ON FUNCTION public.execute_sql(text, boolean) TO authenticated;
-- Optionally grant to anon if needed: GRANT EXECUTE ON FUNCTION public.execute_sql(text, boolean) TO anon;
```
- **Fallback/Transactional SQL Execution:**
- Implement a secondary method (`executeSqlWithPg`) using the `pg` library (Node-postgres).
- This method requires the `DATABASE_URL` environment variable to be set for the direct connection string.
- Initialize the `pg.Pool` lazily on the first call to this method.
- Use this method as a fallback if the RPC call fails *after* initialization, or specifically for operations requiring transaction control (like `apply_migration`).
- **Client Initialization:** The factory function (`createSelfhostedSupabaseClient`) should be `async` and perform the RPC check/create logic during an `await client.initialize()` step before returning the client instance.
- **Type Safety:** Use specific types (`unknown`, `Record<string, unknown>`) instead of `any`. Leverage TypeScript type inference and define types for options and responses.
## Supported Tools
Based on analysis, the following tools are relevant for a self-hosted context:
* **Database Operations:**
* `list_tables` (Uses `pg-meta` logic)
* `list_extensions` (Uses `pg-meta` logic)
* `list_migrations` (Queries `supabase_migrations.schema_migrations`)
* `apply_migration` (Executes DDL + inserts into `supabase_migrations.schema_migrations`; ideally uses `executeSqlWithPg` for transaction)
* `execute_sql` (Primary interface to `SelfhostedSupabaseClient.executeSql`)
* **Debugging:**
* `get_logs` (Needs careful implementation; direct DB query of `pg_log` might be feasible but depends on setup. May need to be removed or simplified).
* `get_database_connections` (Queries `pg_stat_activity`)
* `get_database_stats` (Queries `pg_stat_*` views)
* **Development & Configuration:**
* `get_project_url` (Returns configured URL)
* `generate_typescript_types` (Relies on DB introspection, potentially wrap `supabase gen types` or use `pg-meta`)
* `rebuild_hooks` (Interacts with `pg_net` if database webhooks are used)
* `verify_jwt_secret` (Useful for Auth debugging)
* ~~`get_anon_key`~~ (Removed - security risk: exposes sensitive keys via MCP)
* ~~`get_service_key`~~ (Removed - security risk: exposes sensitive keys via MCP)
* **Edge Functions (If Enabled):**
* `list_edge_functions`
* `deploy_edge_function`
## Removed Tools (Cloud-Specific)
The following tools from the original cloud server are not applicable and should *not* be implemented:
* Project Management (`list_projects`, `create_project`, etc.)
* Branching (`create_branch`, `list_branches`, etc.)
* Cost Confirmation (`get_cost`, `confirm_cost`)
## Server Entry Point (`selfhosted-stdio.ts`)
- Use `commander` for parsing CLI arguments (`--url`, `--anon-key`, etc.) and reading environment variables as fallbacks.
- Implement an `async main()` function.
- Call the `async createSelfhostedSupabaseClient` factory.
- Create tool instances by passing the initialized `selfhostedClient` to tool generator functions (e.g., `getDatabaseOperationTools({ selfhostedClient, readOnly })`).
- Initialize the MCP SDK (`@modelcontextprotocol/sdk`) with `stdio: true` and the combined dictionary of tool instances.
- Include robust error handling for client initialization and server startup.
## Dependencies
- **Runtime:** Bun v1.3+ (replaces Node.js)
- **Core:** `@supabase/supabase-js`, `pg`, `zod` (v4.x), `commander`, `@modelcontextprotocol/sdk`.
- **Dev:** `@types/bun`, `@types/node`, `@types/pg`, `typescript`
## Zod v4 Compatibility Notes
Updated for Zod v4 breaking changes:
- `z.ZodError.errors``z.ZodError.issues`
- `z.record(z.unknown())``z.record(z.string(), z.unknown())`
- `z.string().optional()``z.optional(z.string())` for better type inference
- `z.coerce` API changes handled with `z.union().transform()`
## Useful Logic to Re-use
- SQL generation logic from `packages/pg-meta` (e.g., `listTablesSql`, `listExtensionsSql`).
- The `injectableTool` utility from `tools/util.ts` for structuring tool definitions with Zod schemas.
## Reason for Restart
Adapting the official cloud MCP server proved overly complex due to:
- Deep integration with the multi-project/Management API paradigm.
- Need for extensive refactoring of options and logic paths.
- Inherited build complexities and dependencies not strictly necessary for a minimal self-hosted server.
Building from scratch allows for a cleaner, more focused implementation tailored specifically to the self-hosted use case.

View File

@ -0,0 +1,48 @@
{
"name": "self-hosted-supabase-mcp",
"version": "1.2.0",
"description": "MCP server for self-hosted Supabase instances with 50+ database management tools",
"main": "dist/index.js",
"scripts": {
"build": "bun build src/index.ts --outdir dist --target bun",
"start": "bun run dist/index.js",
"dev": "bun --watch src/index.ts",
"test": "bun test",
"test:watch": "bun test --watch",
"test:coverage": "bun test --coverage",
"typecheck": "bunx tsc --noEmit"
},
"keywords": [
"mcp",
"supabase",
"self-hosted",
"database",
"postgres",
"model-context-protocol",
"bun"
],
"author": "HenkDz",
"repository": {
"type": "git",
"url": "https://github.com/HenkDz/selfhosted-supabase-mcp"
},
"license": "MIT",
"type": "module",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.2",
"@supabase/supabase-js": "^2.90.1",
"commander": "^14.0.2",
"express": "^5.2.1",
"jsonwebtoken": "^9.0.3",
"pg": "^8.17.1",
"zod": "^4.3.5"
},
"devDependencies": {
"@types/bun": "latest",
"@types/express": "^5.0.6",
"@types/jsonwebtoken": "^9.0.10",
"@types/node": "^22.19.7",
"@types/pg": "^8.16.0",
"typescript": "^5.9.3"
}
}

View File

@ -0,0 +1,59 @@
# Smithery configuration file: https://smithery.ai/docs/build/project-config
startCommand:
type: stdio
configSchema:
# JSON Schema defining the configuration options for the MCP.
type: object
required:
- supabaseUrl
- supabaseAnonKey
properties:
supabaseUrl:
type: string
description: Self-hosted Supabase HTTP URL
supabaseAnonKey:
type: string
description: Supabase anonymous key
supabaseServiceRoleKey:
type: string
description: Supabase service role key (optional)
databaseUrl:
type: string
description: Direct PostgreSQL connection string (optional)
supabaseAuthJwtSecret:
type: string
description: Supabase JWT secret (optional)
toolsConfig:
type: string
description: Path to tools config JSON (optional)
commandFunction:
# A JS function that produces the CLI command based on the given config to start the MCP on stdio.
|-
(config) => {
const args = [
'dist/index.js',
'--url', config.supabaseUrl,
'--anon-key', config.supabaseAnonKey
];
if (config.supabaseServiceRoleKey) {
args.push('--service-key', config.supabaseServiceRoleKey);
}
if (config.databaseUrl) {
args.push('--db-url', config.databaseUrl);
}
if (config.supabaseAuthJwtSecret) {
args.push('--jwt-secret', config.supabaseAuthJwtSecret);
}
if (config.toolsConfig) {
args.push('--tools-config', config.toolsConfig);
}
return { command: 'node', args };
}
exampleConfig:
supabaseUrl: http://localhost:8000
supabaseAnonKey: example-anon-key
supabaseServiceRoleKey: example-service-key
databaseUrl: postgresql://postgres:password@localhost:5432/postgres
supabaseAuthJwtSecret: example-jwt-secret
toolsConfig: ./mcp-tools.json

View File

@ -0,0 +1,443 @@
/**
* Tests for SelfhostedSupabaseClient
*
* These tests verify the core client functionality including:
* - Client initialization and validation
* - SQL execution via RPC
* - SQL execution via direct pg connection
* - Transaction handling
* - Getter methods
*/
import { describe, test, expect, mock, beforeEach, spyOn } from 'bun:test';
import { SelfhostedSupabaseClient } from '../../client/index.js';
import type { SelfhostedSupabaseClientOptions } from '../../types/index.js';
// Mock the external dependencies
const mockSupabaseClient = {
rpc: mock(() => Promise.resolve({ data: [], error: null })),
};
const mockCreateClient = mock(() => mockSupabaseClient);
// Mock @supabase/supabase-js
mock.module('@supabase/supabase-js', () => ({
createClient: mockCreateClient,
}));
// Mock pg Pool
const mockPoolClient = {
query: mock(() => Promise.resolve({ rows: [] })),
release: mock(() => {}),
};
const mockPool = {
connect: mock(() => Promise.resolve(mockPoolClient)),
end: mock(() => Promise.resolve()),
on: mock(() => {}),
};
const mockPoolConstructor = mock(() => mockPool);
mock.module('pg', () => ({
Pool: mockPoolConstructor,
}));
describe('SelfhostedSupabaseClient', () => {
const validOptions: SelfhostedSupabaseClientOptions = {
supabaseUrl: 'https://test.supabase.co',
supabaseAnonKey: 'test-anon-key',
supabaseServiceRoleKey: 'test-service-role-key',
databaseUrl: 'postgresql://postgres:postgres@localhost:5432/postgres',
jwtSecret: 'test-jwt-secret',
};
beforeEach(() => {
// Reset all mocks
mockCreateClient.mockClear();
mockSupabaseClient.rpc.mockClear();
mockPool.connect.mockClear();
mockPool.end.mockClear();
mockPoolClient.query.mockClear();
mockPoolClient.release.mockClear();
// Reset to default successful behavior
mockSupabaseClient.rpc.mockImplementation(() =>
Promise.resolve({ data: [], error: null })
);
mockPoolClient.query.mockImplementation(() =>
Promise.resolve({ rows: [] })
);
});
describe('create() factory method', () => {
test('creates client with valid options', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client).toBeDefined();
expect(mockCreateClient).toHaveBeenCalledWith(
validOptions.supabaseUrl,
validOptions.supabaseAnonKey,
undefined
);
});
test('throws error when supabaseUrl is missing', async () => {
const invalidOptions = {
...validOptions,
supabaseUrl: '',
};
await expect(SelfhostedSupabaseClient.create(invalidOptions)).rejects.toThrow();
});
test('throws error when supabaseAnonKey is missing', async () => {
const invalidOptions = {
...validOptions,
supabaseAnonKey: '',
};
await expect(SelfhostedSupabaseClient.create(invalidOptions)).rejects.toThrow();
});
});
describe('getters', () => {
test('getSupabaseUrl returns configured URL', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.getSupabaseUrl()).toBe(validOptions.supabaseUrl);
});
test('getAnonKey returns configured anon key', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.getAnonKey()).toBe(validOptions.supabaseAnonKey);
});
test('getServiceRoleKey returns configured service role key', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.getServiceRoleKey()).toBe(validOptions.supabaseServiceRoleKey);
});
test('getServiceRoleKey returns undefined when not configured', async () => {
const optionsWithoutServiceKey = {
supabaseUrl: validOptions.supabaseUrl,
supabaseAnonKey: validOptions.supabaseAnonKey,
};
const client = await SelfhostedSupabaseClient.create(optionsWithoutServiceKey);
expect(client.getServiceRoleKey()).toBeUndefined();
});
test('getJwtSecret returns configured JWT secret', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.getJwtSecret()).toBe(validOptions.jwtSecret);
});
test('getDbUrl returns configured database URL', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.getDbUrl()).toBe(validOptions.databaseUrl);
});
test('isPgAvailable returns true when databaseUrl is configured', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.isPgAvailable()).toBe(true);
});
test('isPgAvailable returns false when databaseUrl is not configured', async () => {
const optionsWithoutDb = {
supabaseUrl: validOptions.supabaseUrl,
supabaseAnonKey: validOptions.supabaseAnonKey,
};
const client = await SelfhostedSupabaseClient.create(optionsWithoutDb);
expect(client.isPgAvailable()).toBe(false);
});
});
describe('executeSqlViaRpc', () => {
test('returns success response for valid query', async () => {
const expectedData = [{ id: 1, name: 'test' }];
mockSupabaseClient.rpc.mockImplementation(() =>
Promise.resolve({ data: expectedData, error: null })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlViaRpc('SELECT * FROM users');
expect(result).toEqual(expectedData);
expect(mockSupabaseClient.rpc).toHaveBeenCalledWith('execute_sql', {
query: 'SELECT * FROM users',
read_only: false,
});
});
test('passes read_only parameter correctly', async () => {
mockSupabaseClient.rpc.mockImplementation(() =>
Promise.resolve({ data: [], error: null })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
await client.executeSqlViaRpc('SELECT 1', true);
expect(mockSupabaseClient.rpc).toHaveBeenCalledWith('execute_sql', {
query: 'SELECT 1',
read_only: true,
});
});
test('returns error response when RPC fails', async () => {
// First call succeeds (initialization check), second call fails
let callCount = 0;
mockSupabaseClient.rpc.mockImplementation(() => {
callCount++;
if (callCount === 1) {
// Initialization check succeeds
return Promise.resolve({ data: [], error: null });
}
// Actual query fails
return Promise.resolve({
data: null,
error: {
message: 'Query failed',
code: 'P0001',
details: 'Some details',
hint: 'Try something else',
},
});
});
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlViaRpc('INVALID SQL');
expect(result).toHaveProperty('error');
expect((result as { error: { message: string } }).error.message).toBe('Query failed');
expect((result as { error: { code: string } }).error.code).toBe('P0001');
});
test('returns error when RPC function does not exist', async () => {
// First call during initialization - function doesn't exist
mockSupabaseClient.rpc.mockImplementation(() =>
Promise.resolve({
data: null,
error: { message: 'Function not found', code: '42883' },
})
);
const client = await SelfhostedSupabaseClient.create({
...validOptions,
supabaseServiceRoleKey: undefined,
databaseUrl: undefined,
});
const result = await client.executeSqlViaRpc('SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { message: string } }).error.message).toContain(
'execute_sql RPC function not found'
);
});
test('handles unexpected response format', async () => {
// First call succeeds (initialization), second returns bad format
let callCount = 0;
mockSupabaseClient.rpc.mockImplementation(() => {
callCount++;
if (callCount === 1) {
return Promise.resolve({ data: [], error: null });
}
return Promise.resolve({ data: 'not an array', error: null });
});
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlViaRpc('SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { code: string } }).error.code).toBe('MCP_RPC_FORMAT_ERROR');
});
test('handles RPC exceptions during query', async () => {
// First call succeeds (initialization), second throws
let callCount = 0;
mockSupabaseClient.rpc.mockImplementation(() => {
callCount++;
if (callCount === 1) {
return Promise.resolve({ data: [], error: null });
}
return Promise.reject(new Error('Network error'));
});
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlViaRpc('SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { code: string } }).error.code).toBe('MCP_RPC_EXCEPTION');
expect((result as { error: { message: string } }).error.message).toContain('Network error');
});
});
describe('executeSqlWithPg', () => {
test('returns success response for valid query', async () => {
const expectedRows = [{ id: 1, name: 'test' }];
mockPoolClient.query.mockImplementation(() =>
Promise.resolve({ rows: expectedRows })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlWithPg('SELECT * FROM users');
expect(result).toEqual(expectedRows);
});
test('returns error when databaseUrl is not configured', async () => {
const optionsWithoutDb = {
supabaseUrl: validOptions.supabaseUrl,
supabaseAnonKey: validOptions.supabaseAnonKey,
};
const client = await SelfhostedSupabaseClient.create(optionsWithoutDb);
const result = await client.executeSqlWithPg('SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { message: string } }).error.message).toContain(
'DATABASE_URL is not configured'
);
});
test('handles database errors', async () => {
const dbError = new Error('Connection refused') as Error & { code: string };
dbError.code = 'ECONNREFUSED';
mockPoolClient.query.mockImplementation(() => Promise.reject(dbError));
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeSqlWithPg('SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { message: string } }).error.message).toContain(
'Connection refused'
);
expect((result as { error: { code: string } }).error.code).toBe('ECONNREFUSED');
});
test('releases client after successful query', async () => {
mockPoolClient.query.mockImplementation(() =>
Promise.resolve({ rows: [] })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
await client.executeSqlWithPg('SELECT 1');
expect(mockPoolClient.release).toHaveBeenCalled();
});
test('releases client after failed query', async () => {
mockPoolClient.query.mockImplementation(() =>
Promise.reject(new Error('Query failed'))
);
const client = await SelfhostedSupabaseClient.create(validOptions);
await client.executeSqlWithPg('SELECT 1');
expect(mockPoolClient.release).toHaveBeenCalled();
});
});
describe('executeTransactionWithPg', () => {
test('commits transaction on success', async () => {
const expectedResult = { success: true };
mockPoolClient.query.mockImplementation(() =>
Promise.resolve({ rows: [] })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
const result = await client.executeTransactionWithPg(async (pgClient) => {
await pgClient.query('INSERT INTO users (name) VALUES ($1)', ['test']);
return expectedResult;
});
expect(result).toEqual(expectedResult);
// Check that BEGIN was called
expect(mockPoolClient.query).toHaveBeenCalledWith('BEGIN');
// Check that COMMIT was called
expect(mockPoolClient.query).toHaveBeenCalledWith('COMMIT');
});
test('rolls back transaction on failure', async () => {
let beginCalled = false;
mockPoolClient.query.mockImplementation((query: string) => {
if (query === 'BEGIN') {
beginCalled = true;
return Promise.resolve({ rows: [] });
}
if (query === 'ROLLBACK') {
return Promise.resolve({ rows: [] });
}
if (query === 'COMMIT') {
return Promise.resolve({ rows: [] });
}
// Fail on the actual operation
return Promise.reject(new Error('Insert failed'));
});
const client = await SelfhostedSupabaseClient.create(validOptions);
await expect(
client.executeTransactionWithPg(async (pgClient) => {
await pgClient.query('INSERT INTO users (name) VALUES ($1)', ['test']);
})
).rejects.toThrow('Insert failed');
expect(beginCalled).toBe(true);
expect(mockPoolClient.query).toHaveBeenCalledWith('ROLLBACK');
});
test('throws error when databaseUrl is not configured', async () => {
const optionsWithoutDb = {
supabaseUrl: validOptions.supabaseUrl,
supabaseAnonKey: validOptions.supabaseAnonKey,
};
const client = await SelfhostedSupabaseClient.create(optionsWithoutDb);
await expect(
client.executeTransactionWithPg(async () => {})
).rejects.toThrow('DATABASE_URL is not configured');
});
test('releases client after transaction', async () => {
mockPoolClient.query.mockImplementation(() =>
Promise.resolve({ rows: [] })
);
const client = await SelfhostedSupabaseClient.create(validOptions);
await client.executeTransactionWithPg(async () => {});
expect(mockPoolClient.release).toHaveBeenCalled();
});
test('releases client after failed transaction', async () => {
mockPoolClient.query.mockImplementation((query: string) => {
if (query === 'BEGIN' || query === 'ROLLBACK') {
return Promise.resolve({ rows: [] });
}
return Promise.reject(new Error('Failed'));
});
const client = await SelfhostedSupabaseClient.create(validOptions);
try {
await client.executeTransactionWithPg(async (pgClient) => {
await pgClient.query('FAIL');
});
} catch {
// Expected to throw
}
expect(mockPoolClient.release).toHaveBeenCalled();
});
});
describe('supabase client access', () => {
test('exposes supabase client instance', async () => {
const client = await SelfhostedSupabaseClient.create(validOptions);
expect(client.supabase).toBeDefined();
expect(client.supabase).toBe(mockSupabaseClient);
});
});
});

View File

@ -0,0 +1,265 @@
/**
* Shared test mocks and helpers for the selfhosted-supabase-mcp test suite.
*/
import { mock } from 'bun:test';
import type { SelfhostedSupabaseClient } from '../../client/index.js';
import type { ToolContext } from '../../tools/types.js';
import type { SqlExecutionResult, SqlSuccessResponse, SqlErrorResponse } from '../../types/index.js';
/**
* Options for creating a mock SelfhostedSupabaseClient
*/
export interface MockClientOptions {
pgAvailable?: boolean;
serviceRoleAvailable?: boolean;
rpcResult?: SqlExecutionResult;
pgResult?: SqlExecutionResult;
serviceRoleRpcResult?: SqlExecutionResult;
supabaseUrl?: string;
anonKey?: string;
serviceRoleKey?: string;
jwtSecret?: string;
dbUrl?: string;
supabaseClient?: MockSupabaseClient;
}
/**
* Mock Supabase client type for auth operations
*/
export interface MockSupabaseClient {
auth: {
admin: {
listUsers: ReturnType<typeof mock>;
getUserById: ReturnType<typeof mock>;
createUser: ReturnType<typeof mock>;
updateUserById: ReturnType<typeof mock>;
deleteUser: ReturnType<typeof mock>;
};
};
rpc: ReturnType<typeof mock>;
}
/**
* Creates a mock Supabase client with configurable auth admin methods
*/
export function createMockSupabaseClient(overrides?: Partial<MockSupabaseClient>): MockSupabaseClient {
return {
auth: {
admin: {
listUsers: mock(() => Promise.resolve({ data: { users: [] }, error: null })),
getUserById: mock(() => Promise.resolve({ data: { user: null }, error: null })),
createUser: mock(() => Promise.resolve({ data: { user: null }, error: null })),
updateUserById: mock(() => Promise.resolve({ data: { user: null }, error: null })),
deleteUser: mock(() => Promise.resolve({ data: null, error: null })),
...overrides?.auth?.admin,
},
},
rpc: mock(() => Promise.resolve({ data: [], error: null })),
...overrides,
};
}
/**
* Creates a mock SelfhostedSupabaseClient for testing tools
*/
export function createMockClient(options: MockClientOptions = {}): SelfhostedSupabaseClient {
const {
pgAvailable = true,
serviceRoleAvailable = true,
rpcResult = [] as SqlSuccessResponse,
pgResult = [] as SqlSuccessResponse,
serviceRoleRpcResult = [] as SqlSuccessResponse,
supabaseUrl = 'https://test.supabase.co',
anonKey = 'test-anon-key',
serviceRoleKey = 'test-service-role-key',
jwtSecret = 'test-jwt-secret',
dbUrl = 'postgresql://test:test@localhost:5432/test',
supabaseClient = createMockSupabaseClient(),
} = options;
// Create a mock that satisfies the SelfhostedSupabaseClient interface
const mockClient = {
supabase: supabaseClient,
executeSqlViaRpc: mock(async (_query: string, _readOnly?: boolean) => rpcResult),
executeSqlWithPg: mock(async (_query: string) => pgResult),
executeSqlViaServiceRoleRpc: mock(async (_query: string, _readOnly?: boolean) => serviceRoleRpcResult),
executeTransactionWithPg: mock(async <T>(callback: (client: unknown) => Promise<T>) => {
const mockPgClient = {
query: mock(() => Promise.resolve({ rows: [] })),
};
return callback(mockPgClient);
}),
isPgAvailable: () => pgAvailable,
isServiceRoleAvailable: () => serviceRoleAvailable,
getSupabaseUrl: () => supabaseUrl,
getAnonKey: () => anonKey,
getServiceRoleKey: () => (serviceRoleKey ? serviceRoleKey : undefined),
getJwtSecret: () => (jwtSecret ? jwtSecret : undefined),
getDbUrl: () => (pgAvailable ? dbUrl : undefined),
} as unknown as SelfhostedSupabaseClient;
return mockClient;
}
/**
* Creates a mock ToolContext for testing tool execute functions
*/
export function createMockContext(client?: SelfhostedSupabaseClient): ToolContext {
return {
selfhostedClient: client ?? createMockClient(),
log: mock((_message: string, _level?: 'info' | 'warn' | 'error') => {}),
workspacePath: '/test/workspace',
};
}
/**
* Creates a SQL success response
*/
export function createSuccessResponse(rows: Record<string, unknown>[]): SqlSuccessResponse {
return rows;
}
/**
* Creates a SQL error response
*/
export function createErrorResponse(
message: string,
code?: string,
details?: string,
hint?: string
): SqlErrorResponse {
return {
error: {
message,
code,
details,
hint,
},
};
}
/**
* Sample test data for various entity types
*/
export const testData = {
users: [
{
id: '550e8400-e29b-41d4-a716-446655440001',
email: 'test1@example.com',
role: 'authenticated',
created_at: '2024-01-01T00:00:00Z',
last_sign_in_at: '2024-01-15T12:00:00Z',
raw_app_meta_data: { provider: 'email' },
raw_user_meta_data: { name: 'Test User 1' },
},
{
id: '550e8400-e29b-41d4-a716-446655440002',
email: 'test2@example.com',
role: 'authenticated',
created_at: '2024-01-02T00:00:00Z',
last_sign_in_at: null,
raw_app_meta_data: {},
raw_user_meta_data: {},
},
],
tables: [
{
table_schema: 'public',
table_name: 'users',
table_type: 'BASE TABLE',
is_insertable_into: 'YES',
},
{
table_schema: 'public',
table_name: 'posts',
table_type: 'BASE TABLE',
is_insertable_into: 'YES',
},
],
extensions: [
{ name: 'plpgsql', installed_version: '1.0', comment: 'PL/pgSQL procedural language' },
{ name: 'uuid-ossp', installed_version: '1.1', comment: 'generate universally unique identifiers' },
],
buckets: [
{
id: 'bucket-1',
name: 'avatars',
owner: null,
public: true,
avif_autodetection: false,
file_size_limit: 5242880,
allowed_mime_types: ['image/png', 'image/jpeg'],
created_at: '2024-01-01T00:00:00Z',
updated_at: '2024-01-01T00:00:00Z',
},
],
storageObjects: [
{
id: '550e8400-e29b-41d4-a716-446655440003',
name: 'avatar.png',
bucket_id: 'avatars',
owner: '550e8400-e29b-41d4-a716-446655440001',
version: null,
mimetype: 'image/png',
size: 1024,
metadata: { mimetype: 'image/png', size: 1024 },
created_at: '2024-01-01T00:00:00Z',
updated_at: '2024-01-01T00:00:00Z',
last_accessed_at: null,
},
],
migrations: [
{
version: '20240101000000',
name: 'initial_schema',
executed_at: '2024-01-01T00:00:00Z',
},
],
connections: [
{
pid: 12345,
usename: 'postgres',
datname: 'postgres',
client_addr: '127.0.0.1',
state: 'active',
query: 'SELECT 1',
backend_start: '2024-01-01T00:00:00Z',
},
],
};
/**
* Helper to create Express-like request/response mocks for middleware testing
*/
export function createMockExpressReqRes() {
const req = {
headers: {} as Record<string, string>,
user: undefined as unknown,
};
const res = {
statusCode: 200,
jsonBody: null as unknown,
status: mock(function(this: typeof res, code: number) {
this.statusCode = code;
return this;
}),
json: mock(function(this: typeof res, body: unknown) {
this.jsonBody = body;
return this;
}),
};
const next = mock(() => {});
return { req, res, next };
}

View File

@ -0,0 +1,211 @@
/**
* Integration tests for SelfhostedSupabaseClient
*
* These tests run against a real Supabase instance and are skipped
* when environment variables are not configured.
*
* Required environment variables:
* - SUPABASE_URL
* - SUPABASE_ANON_KEY
* - DATABASE_URL (optional, for direct pg connection tests)
*/
import { describe, test, expect, beforeAll, afterAll } from 'bun:test';
import { SelfhostedSupabaseClient } from '../../client/index.js';
// Check if we have the required credentials
const hasCredentials = !!(
process.env.SUPABASE_URL &&
process.env.SUPABASE_ANON_KEY
);
const hasDatabaseUrl = !!process.env.DATABASE_URL;
// Skip all tests if credentials are not available
describe.skipIf(!hasCredentials)('SelfhostedSupabaseClient Integration Tests', () => {
let client: SelfhostedSupabaseClient;
beforeAll(async () => {
client = await SelfhostedSupabaseClient.create({
supabaseUrl: process.env.SUPABASE_URL!,
supabaseAnonKey: process.env.SUPABASE_ANON_KEY!,
supabaseServiceRoleKey: process.env.SUPABASE_SERVICE_ROLE_KEY,
databaseUrl: process.env.DATABASE_URL,
jwtSecret: process.env.JWT_SECRET,
});
});
describe('Client initialization', () => {
test('creates client successfully', () => {
expect(client).toBeDefined();
expect(client.supabase).toBeDefined();
});
test('getSupabaseUrl returns correct URL', () => {
expect(client.getSupabaseUrl()).toBe(process.env.SUPABASE_URL);
});
test('getAnonKey returns correct key', () => {
expect(client.getAnonKey()).toBe(process.env.SUPABASE_ANON_KEY);
});
test('isPgAvailable reflects DATABASE_URL configuration', () => {
expect(client.isPgAvailable()).toBe(hasDatabaseUrl);
});
});
describe('SQL execution via RPC', () => {
test('executes simple SELECT query', async () => {
const result = await client.executeSqlViaRpc('SELECT 1 as value', true);
// If RPC is not available, we'll get an error
if ('error' in result) {
console.log('RPC not available:', result.error.message);
// This is acceptable in integration tests - RPC may not be set up
expect(result.error).toBeDefined();
} else {
expect(Array.isArray(result)).toBe(true);
expect(result[0]?.value).toBe(1);
}
});
test('executes query returning multiple rows', async () => {
const result = await client.executeSqlViaRpc(
'SELECT generate_series(1, 3) as num',
true
);
if ('error' in result) {
console.log('RPC not available:', result.error.message);
expect(result.error).toBeDefined();
} else {
expect(Array.isArray(result)).toBe(true);
expect(result.length).toBe(3);
}
});
});
describe.skipIf(!hasDatabaseUrl)('SQL execution via direct pg', () => {
test('executes simple SELECT query', async () => {
const result = await client.executeSqlWithPg('SELECT 1 as value');
if ('error' in result) {
console.log('Direct pg error:', result.error.message);
throw new Error(result.error.message);
}
expect(Array.isArray(result)).toBe(true);
expect(result[0]?.value).toBe(1);
});
test('executes query with multiple columns', async () => {
const result = await client.executeSqlWithPg(
"SELECT 'hello' as greeting, 42 as answer"
);
if ('error' in result) {
throw new Error(result.error.message);
}
expect(result[0]?.greeting).toBe('hello');
expect(result[0]?.answer).toBe(42);
});
test('handles query with no results', async () => {
const result = await client.executeSqlWithPg(
'SELECT 1 WHERE false'
);
if ('error' in result) {
throw new Error(result.error.message);
}
expect(result).toEqual([]);
});
test('returns error for invalid SQL', async () => {
const result = await client.executeSqlWithPg('INVALID SQL QUERY');
expect('error' in result).toBe(true);
if ('error' in result) {
expect(result.error.message).toBeDefined();
}
});
});
describe.skipIf(!hasDatabaseUrl)('Transaction handling', () => {
test('commits transaction on success', async () => {
const testTableName = `test_integration_${Date.now()}`;
try {
// Create a test table in a transaction
await client.executeTransactionWithPg(async (pgClient) => {
await pgClient.query(`
CREATE TEMP TABLE ${testTableName} (id serial, name text)
`);
await pgClient.query(
`INSERT INTO ${testTableName} (name) VALUES ($1)`,
['test-value']
);
});
// Verify the table was created (temp tables are session-scoped)
// This test mainly verifies the transaction didn't throw
expect(true).toBe(true);
} catch (error) {
// If this fails, it's likely a permissions issue
console.log('Transaction test failed:', error);
expect(error).toBeDefined();
}
});
test('rolls back transaction on error', async () => {
try {
await client.executeTransactionWithPg(async (pgClient) => {
await pgClient.query('SELECT 1');
throw new Error('Intentional error for rollback test');
});
// Should not reach here
expect(true).toBe(false);
} catch (error) {
expect((error as Error).message).toContain('Intentional error');
}
});
});
describe('System catalog queries', () => {
test.skipIf(!hasDatabaseUrl)('lists database extensions', async () => {
const result = await client.executeSqlWithPg(`
SELECT extname as name
FROM pg_extension
LIMIT 5
`);
if ('error' in result) {
throw new Error(result.error.message);
}
expect(Array.isArray(result)).toBe(true);
// plpgsql is always installed
const hasPlpgsql = result.some((ext: { name: string }) => ext.name === 'plpgsql');
expect(hasPlpgsql).toBe(true);
});
test.skipIf(!hasDatabaseUrl)('queries pg_stat_activity', async () => {
const result = await client.executeSqlWithPg(`
SELECT pid, state
FROM pg_stat_activity
WHERE backend_type = 'client backend'
LIMIT 5
`);
if ('error' in result) {
// May fail due to permissions
console.log('pg_stat_activity query failed:', result.error.message);
expect(result.error).toBeDefined();
} else {
expect(Array.isArray(result)).toBe(true);
}
});
});
});

View File

@ -0,0 +1,212 @@
/**
* Integration tests for MCP tools
*
* These tests run against a real Supabase instance and are skipped
* when environment variables are not configured.
*
* Required environment variables:
* - SUPABASE_URL
* - SUPABASE_ANON_KEY
* - DATABASE_URL (required for most tools)
*/
import { describe, test, expect, beforeAll, afterAll } from 'bun:test';
import { SelfhostedSupabaseClient } from '../../client/index.js';
import type { ToolContext } from '../../tools/types.js';
// Import tools to test
import { listTablesTool } from '../../tools/list_tables.js';
import { listExtensionsTool } from '../../tools/list_extensions.js';
import { getDatabaseConnectionsTool } from '../../tools/get_database_connections.js';
import { executeSqlTool } from '../../tools/execute_sql.js';
import { getProjectUrlTool } from '../../tools/get_project_url.js';
import { verifyJwtSecretTool } from '../../tools/verify_jwt_secret.js';
import { listStorageBucketsTool } from '../../tools/list_storage_buckets.js';
// Check if we have the required credentials
const hasCredentials = !!(
process.env.SUPABASE_URL &&
process.env.SUPABASE_ANON_KEY
);
const hasDatabaseUrl = !!process.env.DATABASE_URL;
// Skip all tests if credentials are not available
describe.skipIf(!hasCredentials)('Tools Integration Tests', () => {
let client: SelfhostedSupabaseClient;
let context: ToolContext;
beforeAll(async () => {
client = await SelfhostedSupabaseClient.create({
supabaseUrl: process.env.SUPABASE_URL!,
supabaseAnonKey: process.env.SUPABASE_ANON_KEY!,
supabaseServiceRoleKey: process.env.SUPABASE_SERVICE_ROLE_KEY,
databaseUrl: process.env.DATABASE_URL,
jwtSecret: process.env.JWT_SECRET,
});
context = {
selfhostedClient: client,
log: (message: string, level?: 'info' | 'warn' | 'error') => {
console.log(`[${level || 'info'}] ${message}`);
},
};
});
describe('Simple getter tools', () => {
test('get_project_url returns configured URL', async () => {
const result = await getProjectUrlTool.execute({}, context);
expect(result.project_url).toBe(process.env.SUPABASE_URL);
});
test('verify_jwt_secret returns status', async () => {
const result = await verifyJwtSecretTool.execute({}, context);
if (process.env.JWT_SECRET) {
expect(result.jwt_secret_status).toBe('found');
} else {
expect(result.jwt_secret_status).toBe('not_configured');
}
});
});
describe.skipIf(!hasDatabaseUrl)('Database tools', () => {
test('list_tables returns table list', async () => {
const result = await listTablesTool.execute({}, context);
expect(Array.isArray(result)).toBe(true);
// All tables should have schema and name
result.forEach((table: { schema: string; name: string }) => {
expect(typeof table.schema).toBe('string');
expect(typeof table.name).toBe('string');
});
});
test('list_extensions returns extension list', async () => {
const result = await listExtensionsTool.execute({}, context);
expect(Array.isArray(result)).toBe(true);
// Each extension should have name and version
result.forEach((ext: { name: string; version: string }) => {
expect(typeof ext.name).toBe('string');
expect(typeof ext.version).toBe('string');
});
});
test('get_database_connections returns connection list', async () => {
try {
const result = await getDatabaseConnectionsTool.execute({}, context);
expect(Array.isArray(result)).toBe(true);
// Should have at least one connection (ourselves)
expect(result.length).toBeGreaterThan(0);
// Each connection should have pid
result.forEach((conn: { pid: number }) => {
expect(typeof conn.pid).toBe('number');
});
} catch (error) {
// May fail due to permissions on pg_stat_activity
console.log('get_database_connections failed (may be permissions):', error);
expect(error).toBeDefined();
}
});
test('execute_sql runs simple queries', async () => {
const result = await executeSqlTool.execute(
{ sql: 'SELECT 1 as value', read_only: true },
context
);
expect(Array.isArray(result)).toBe(true);
expect(result[0]?.value).toBe(1);
});
test('execute_sql handles complex queries', async () => {
const result = await executeSqlTool.execute(
{
sql: `
SELECT
'test' as name,
42 as number,
ARRAY[1,2,3] as arr,
'{"key": "value"}'::jsonb as json_data
`,
read_only: true,
},
context
);
expect(Array.isArray(result)).toBe(true);
expect(result[0]?.name).toBe('test');
expect(result[0]?.number).toBe(42);
});
test('execute_sql returns error for invalid SQL', async () => {
await expect(
executeSqlTool.execute(
{ sql: 'INVALID SQL STATEMENT' },
context
)
).rejects.toThrow('SQL Error');
});
});
describe.skipIf(!hasDatabaseUrl)('Storage tools', () => {
test('list_storage_buckets returns bucket list', async () => {
try {
const result = await listStorageBucketsTool.execute({}, context);
expect(Array.isArray(result)).toBe(true);
// Each bucket should have id and name
result.forEach((bucket: { id: string; name: string }) => {
expect(typeof bucket.id).toBe('string');
expect(typeof bucket.name).toBe('string');
});
} catch (error) {
// Storage schema may not exist
console.log('list_storage_buckets failed (storage may not be configured):', error);
expect(error).toBeDefined();
}
});
});
describe.skipIf(!hasDatabaseUrl)('Auth tools (read-only)', () => {
// Note: We only test read operations to avoid modifying data
// Create/Update/Delete tests would need proper cleanup
test('can query auth.users table structure', async () => {
// Just verify we can query the auth schema
try {
const result = await executeSqlTool.execute(
{
sql: `
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_schema = 'auth' AND table_name = 'users'
LIMIT 5
`,
read_only: true,
},
context
);
expect(Array.isArray(result)).toBe(true);
// Should have some columns
expect(result.length).toBeGreaterThan(0);
} catch (error) {
// May not have access to auth schema
console.log('Auth schema query failed (may be permissions):', error);
expect(error).toBeDefined();
}
});
});
});
// Separate describe block for cleanup to ensure it runs
describe.skipIf(!hasCredentials)('Integration Test Cleanup', () => {
test('placeholder for cleanup', () => {
// Any test data cleanup would go here
// For now, we're using read-only operations
expect(true).toBe(true);
});
});

View File

@ -0,0 +1,294 @@
/**
* Tests for JWT Authentication Middleware
*
* These tests verify the JWT authentication middleware for HTTP transport mode:
* - Missing/invalid authorization headers
* - Token validation (signature, expiration, claims)
* - User info extraction
* - Error responses
*/
import { describe, test, expect, mock, beforeEach } from 'bun:test';
import jwt from 'jsonwebtoken';
import { createAuthMiddleware, type AuthenticatedRequest } from '../../server/auth-middleware.js';
import type { Response, NextFunction } from 'express';
describe('createAuthMiddleware', () => {
// codacy:disable-line:hardcoded-credentials -- Test fixture, not a real secret
// nosec: hardcoded test credential for unit testing only
const JWT_SECRET = 'test-jwt-secret-key-for-testing'; // NOSONAR
const middleware = createAuthMiddleware(JWT_SECRET);
// Helper to create mock request/response/next
function createMocks() {
const req = {
headers: {} as Record<string, string>,
user: undefined,
} as AuthenticatedRequest;
const res = {
statusCode: 200,
body: null as unknown,
status: mock(function (this: typeof res, code: number) {
this.statusCode = code;
return this;
}),
json: mock(function (this: typeof res, body: unknown) {
this.body = body;
return this;
}),
} as unknown as Response;
const next = mock(() => {}) as NextFunction;
return { req, res, next };
}
// Helper to create valid JWT tokens
function createToken(payload: Record<string, unknown>, options?: jwt.SignOptions) {
return jwt.sign(payload, JWT_SECRET, { algorithm: 'HS256', ...options });
}
describe('Authorization header validation', () => {
test('returns 401 when Authorization header is missing', () => {
const { req, res, next } = createMocks();
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect(res.json).toHaveBeenCalledWith({
error: 'Unauthorized',
message: 'Missing Authorization header',
});
expect(next).not.toHaveBeenCalled();
});
test('returns 401 when Authorization header does not start with Bearer', () => {
const { req, res, next } = createMocks();
req.headers.authorization = 'Basic dXNlcjpwYXNz';
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect(res.json).toHaveBeenCalledWith({
error: 'Unauthorized',
message: 'Invalid Authorization header format. Expected: Bearer [token]',
});
expect(next).not.toHaveBeenCalled();
});
test('returns 401 when token is empty after Bearer prefix', () => {
const { req, res, next } = createMocks();
req.headers.authorization = 'Bearer ';
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect(res.json).toHaveBeenCalledWith({
error: 'Unauthorized',
message: 'Missing token in Authorization header',
});
expect(next).not.toHaveBeenCalled();
});
});
describe('Token signature validation', () => {
test('returns 401 for token with invalid signature', () => {
const { req, res, next } = createMocks();
// Create token with wrong secret
// codacy:disable-line:hardcoded-credentials -- Test fixture for signature mismatch
const invalidToken = jwt.sign({ sub: 'user-123' }, 'wrong-secret', { // NOSONAR
algorithm: 'HS256',
});
req.headers.authorization = `Bearer ${invalidToken}`;
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect((res as { body: { error: string } }).body.error).toBe('Unauthorized');
expect((res as { body: { message: string } }).body.message).toContain('Invalid token');
expect(next).not.toHaveBeenCalled();
});
test('returns 401 for malformed token', () => {
const { req, res, next } = createMocks();
req.headers.authorization = 'Bearer not.a.valid.jwt.token';
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect((res as { body: { error: string } }).body.error).toBe('Unauthorized');
expect(next).not.toHaveBeenCalled();
});
});
describe('Token expiration validation', () => {
test('returns 401 for expired token', () => {
const { req, res, next } = createMocks();
// Create token that expired 1 hour ago
const expiredToken = createToken(
{ sub: 'user-123' },
{ expiresIn: '-1h' }
);
req.headers.authorization = `Bearer ${expiredToken}`;
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect((res as { body: { message: string } }).body.message).toContain('expired');
expect(next).not.toHaveBeenCalled();
});
test('accepts token that has not expired', () => {
const { req, res, next } = createMocks();
const validToken = createToken(
{ sub: 'user-123' },
{ expiresIn: '1h' }
);
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(next).toHaveBeenCalled();
expect(res.status).not.toHaveBeenCalled();
});
});
describe('Token claims validation', () => {
test('returns 401 when sub claim is missing', () => {
const { req, res, next } = createMocks();
// Create token without sub claim
const tokenWithoutSub = createToken({ email: 'test@example.com' });
req.headers.authorization = `Bearer ${tokenWithoutSub}`;
middleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect((res as { body: { message: string } }).body.message).toContain('missing subject');
expect(next).not.toHaveBeenCalled();
});
});
describe('Successful authentication', () => {
test('calls next() for valid token', () => {
const { req, res, next } = createMocks();
const validToken = createToken({ sub: 'user-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(next).toHaveBeenCalled();
expect(res.status).not.toHaveBeenCalled();
expect(res.json).not.toHaveBeenCalled();
});
test('sets req.user with userId from sub claim', () => {
const { req, res, next } = createMocks();
const validToken = createToken({ sub: 'user-abc-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user).toBeDefined();
expect(req.user?.userId).toBe('user-abc-123');
});
test('sets req.user.email from token', () => {
const { req, res, next } = createMocks();
const validToken = createToken(
{ sub: 'user-123', email: 'test@example.com' },
{ expiresIn: '1h' }
);
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user?.email).toBe('test@example.com');
});
test('sets req.user.email to null when not in token', () => {
const { req, res, next } = createMocks();
const validToken = createToken({ sub: 'user-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user?.email).toBeNull();
});
test('sets req.user.role from token', () => {
const { req, res, next } = createMocks();
const validToken = createToken(
{ sub: 'user-123', role: 'admin' },
{ expiresIn: '1h' }
);
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user?.role).toBe('admin');
});
test('defaults req.user.role to authenticated when not in token', () => {
const { req, res, next } = createMocks();
const validToken = createToken({ sub: 'user-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user?.role).toBe('authenticated');
});
test('sets req.user.exp from token', () => {
const { req, res, next } = createMocks();
const validToken = createToken({ sub: 'user-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${validToken}`;
middleware(req, res, next);
expect(req.user?.exp).toBeGreaterThan(0);
// Should expire in about 1 hour
const oneHourFromNow = Math.floor(Date.now() / 1000) + 3600;
expect(req.user?.exp).toBeGreaterThan(oneHourFromNow - 60); // Allow 60s tolerance
expect(req.user?.exp).toBeLessThan(oneHourFromNow + 60);
});
test('extracts all fields from complete Supabase-style token', () => {
const { req, res, next } = createMocks();
const supabaseToken = createToken(
{
sub: 'uuid-user-id',
email: 'user@example.com',
role: 'authenticated',
aud: 'authenticated',
iat: Math.floor(Date.now() / 1000),
},
{ expiresIn: '1h' }
);
req.headers.authorization = `Bearer ${supabaseToken}`;
middleware(req, res, next);
expect(req.user).toEqual({
userId: 'uuid-user-id',
email: 'user@example.com',
role: 'authenticated',
exp: expect.any(Number),
});
});
});
describe('Different JWT secrets', () => {
test('middleware with different secret rejects tokens from another secret', () => {
const anotherMiddleware = createAuthMiddleware('different-secret');
const { req, res, next } = createMocks();
const token = createToken({ sub: 'user-123' }, { expiresIn: '1h' });
req.headers.authorization = `Bearer ${token}`;
anotherMiddleware(req, res, next);
expect(res.status).toHaveBeenCalledWith(401);
expect(next).not.toHaveBeenCalled();
});
});
});

View File

@ -0,0 +1,8 @@
/**
* Test setup file for Bun test runner.
* This file is preloaded before all tests run.
*/
// Global test setup - mock environment variables
process.env.SUPABASE_URL = 'http://localhost:54321';
process.env.SUPABASE_ANON_KEY = 'test-anon-key';

View File

@ -0,0 +1,637 @@
/**
* Tests for authentication-related tools
*
* Tools tested:
* - list_auth_users
* - get_auth_user
* - create_auth_user
* - update_auth_user
* - delete_auth_user
*/
import { describe, test, expect, mock } from 'bun:test';
import { listAuthUsersTool } from '../../tools/list_auth_users.js';
import { createAuthUserTool } from '../../tools/create_auth_user.js';
import { deleteAuthUserTool } from '../../tools/delete_auth_user.js';
import { updateAuthUserTool } from '../../tools/update_auth_user.js';
import {
createMockClient,
createMockContext,
createSuccessResponse,
createErrorResponse,
testData,
} from '../helpers/mocks.js';
describe('listAuthUsersTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(listAuthUsersTool.name).toBe('list_auth_users');
});
test('has description', () => {
expect(listAuthUsersTool.description).toContain('user');
});
test('has input and output schemas', () => {
expect(listAuthUsersTool.inputSchema).toBeDefined();
expect(listAuthUsersTool.outputSchema).toBeDefined();
});
});
describe('input validation', () => {
test('accepts empty input with defaults', () => {
const result = listAuthUsersTool.inputSchema.safeParse({});
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.limit).toBe(50);
expect(result.data.offset).toBe(0);
}
});
test('accepts custom limit and offset', () => {
const result = listAuthUsersTool.inputSchema.safeParse({ limit: 10, offset: 20 });
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.limit).toBe(10);
expect(result.data.offset).toBe(20);
}
});
test('rejects negative limit', () => {
const result = listAuthUsersTool.inputSchema.safeParse({ limit: -1 });
expect(result.success).toBe(false);
});
test('rejects negative offset', () => {
const result = listAuthUsersTool.inputSchema.safeParse({ offset: -1 });
expect(result.success).toBe(false);
});
});
describe('execute', () => {
test('returns list of users', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(testData.users),
});
const context = createMockContext(mockClient);
const result = await listAuthUsersTool.execute({}, context);
expect(result).toEqual(testData.users);
});
test('returns empty array when no users', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await listAuthUsersTool.execute({}, context);
expect(result).toEqual([]);
});
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(listAuthUsersTool.execute({}, context)).rejects.toThrow(
'Direct database connection'
);
});
test('throws error on SQL failure', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('permission denied for table users', '42501'),
});
const context = createMockContext(mockClient);
await expect(listAuthUsersTool.execute({}, context)).rejects.toThrow('SQL Error');
});
test('uses pg connection directly (not RPC)', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
await listAuthUsersTool.execute({}, context);
expect(mockClient.executeSqlWithPg).toHaveBeenCalled();
expect(mockClient.executeSqlViaRpc).not.toHaveBeenCalled();
});
});
describe('output validation', () => {
test('validates correct user structure', () => {
const result = listAuthUsersTool.outputSchema.safeParse(testData.users);
expect(result.success).toBe(true);
});
test('rejects invalid UUID for id', () => {
const invalidUser = [{ ...testData.users[0], id: 'not-a-uuid' }];
const result = listAuthUsersTool.outputSchema.safeParse(invalidUser);
expect(result.success).toBe(false);
});
test('accepts null values for nullable fields', () => {
const userWithNulls = [{
id: '123e4567-e89b-12d3-a456-426614174000',
email: null,
role: null,
created_at: null,
last_sign_in_at: null,
raw_app_meta_data: null,
raw_user_meta_data: null,
}];
const result = listAuthUsersTool.outputSchema.safeParse(userWithNulls);
expect(result.success).toBe(true);
});
});
});
describe('createAuthUserTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(createAuthUserTool.name).toBe('create_auth_user');
});
test('has warning in description', () => {
expect(createAuthUserTool.description).toContain('WARNING');
});
});
describe('input validation', () => {
test('requires email', () => {
const result = createAuthUserTool.inputSchema.safeParse({ password: 'password123' });
expect(result.success).toBe(false);
});
test('requires password', () => {
const result = createAuthUserTool.inputSchema.safeParse({ email: 'test@example.com' });
expect(result.success).toBe(false);
});
test('validates email format', () => {
const result = createAuthUserTool.inputSchema.safeParse({
email: 'not-an-email',
password: 'password123',
});
expect(result.success).toBe(false);
});
test('requires minimum password length', () => {
const result = createAuthUserTool.inputSchema.safeParse({
email: 'test@example.com',
password: '12345', // 5 chars, needs 6
});
expect(result.success).toBe(false);
});
test('accepts valid input', () => {
const result = createAuthUserTool.inputSchema.safeParse({
email: 'test@example.com',
password: 'password123',
});
expect(result.success).toBe(true);
});
test('accepts optional role and metadata', () => {
const result = createAuthUserTool.inputSchema.safeParse({
email: 'test@example.com',
password: 'password123',
role: 'admin',
app_metadata: { custom: 'data' },
user_metadata: { name: 'Test User' },
});
expect(result.success).toBe(true);
});
});
describe('execute', () => {
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(
createAuthUserTool.execute(
{ email: 'test@example.com', password: 'password123' },
context
)
).rejects.toThrow('Direct database connection');
});
test('creates user via transaction', async () => {
const createdUser = {
id: '123e4567-e89b-12d3-a456-426614174000',
email: 'test@example.com',
role: 'authenticated',
created_at: '2024-01-01T00:00:00Z',
last_sign_in_at: null,
raw_app_meta_data: {},
raw_user_meta_data: {},
};
const mockPgClient = {
query: mock(async (sql: string, _params?: unknown[]) => {
// The crypt test SELECT query doesn't have INSERT
if (sql.includes('crypt') && sql.includes('SELECT') && !sql.includes('INSERT')) {
return { rows: [{ crypt: 'test' }] };
}
// The INSERT query that creates the user
return { rows: [createdUser] };
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await createAuthUserTool.execute(
{ email: 'test@example.com', password: 'password123' },
context
);
expect(result).toEqual(createdUser);
});
test('throws error when pgcrypto is not available', async () => {
const mockPgClient = {
query: mock(async (sql: string) => {
if (sql.includes('crypt')) {
throw new Error('function crypt does not exist');
}
return { rows: [] };
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await expect(
createAuthUserTool.execute(
{ email: 'test@example.com', password: 'password123' },
context
)
).rejects.toThrow('pgcrypto');
});
test('handles unique violation error for duplicate email', async () => {
const mockPgClient = {
query: mock(async (sql: string) => {
if (sql.includes('crypt') && !sql.includes('INSERT')) {
return { rows: [{ crypt: 'test' }] };
}
const error = new Error('duplicate key value violates unique constraint');
(error as unknown as { code: string }).code = '23505';
throw error;
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await expect(
createAuthUserTool.execute(
{ email: 'test@example.com', password: 'password123' },
context
)
).rejects.toThrow('already exists');
});
});
});
describe('deleteAuthUserTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(deleteAuthUserTool.name).toBe('delete_auth_user');
});
test('has description', () => {
expect(deleteAuthUserTool.description).toContain('Delete');
});
});
describe('input validation', () => {
test('requires user_id', () => {
const result = deleteAuthUserTool.inputSchema.safeParse({});
expect(result.success).toBe(false);
});
test('validates user_id is UUID', () => {
const result = deleteAuthUserTool.inputSchema.safeParse({ user_id: 'not-a-uuid' });
expect(result.success).toBe(false);
});
test('accepts valid UUID', () => {
const result = deleteAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
});
expect(result.success).toBe(true);
});
});
describe('execute', () => {
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(
deleteAuthUserTool.execute(
{ user_id: '123e4567-e89b-12d3-a456-426614174000' },
context
)
).rejects.toThrow('Direct database connection');
});
test('returns success when user is deleted', async () => {
const mockPgClient = {
query: mock(async () => ({ rowCount: 1 })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await deleteAuthUserTool.execute(
{ user_id: '123e4567-e89b-12d3-a456-426614174000' },
context
);
expect(result.success).toBe(true);
expect(result.message).toContain('Successfully deleted');
});
test('returns failure when user is not found', async () => {
const mockPgClient = {
query: mock(async () => ({ rowCount: 0 })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await deleteAuthUserTool.execute(
{ user_id: '123e4567-e89b-12d3-a456-426614174000' },
context
);
expect(result.success).toBe(false);
expect(result.message).toContain('not found');
});
test('throws error on database failure', async () => {
const mockPgClient = {
query: mock(async () => {
throw new Error('Database error');
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await expect(
deleteAuthUserTool.execute(
{ user_id: '123e4567-e89b-12d3-a456-426614174000' },
context
)
).rejects.toThrow('Failed to delete user');
});
});
describe('output validation', () => {
test('validates success response', () => {
const result = deleteAuthUserTool.outputSchema.safeParse({
success: true,
message: 'User deleted',
});
expect(result.success).toBe(true);
});
test('validates failure response', () => {
const result = deleteAuthUserTool.outputSchema.safeParse({
success: false,
message: 'User not found',
});
expect(result.success).toBe(true);
});
});
});
describe('updateAuthUserTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(updateAuthUserTool.name).toBe('update_auth_user');
});
test('has warning in description', () => {
expect(updateAuthUserTool.description).toContain('WARNING');
});
});
describe('input validation', () => {
test('requires user_id', () => {
const result = updateAuthUserTool.inputSchema.safeParse({ email: 'new@example.com' });
expect(result.success).toBe(false);
});
test('validates user_id is UUID', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: 'not-a-uuid',
email: 'new@example.com',
});
expect(result.success).toBe(false);
});
test('requires at least one field to update', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
});
expect(result.success).toBe(false);
});
test('accepts email update', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
email: 'new@example.com',
});
expect(result.success).toBe(true);
});
test('accepts password update', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
password: 'newpassword123',
});
expect(result.success).toBe(true);
});
test('accepts role update', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
role: 'admin',
});
expect(result.success).toBe(true);
});
test('accepts metadata updates', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
user_metadata: { name: 'New Name' },
});
expect(result.success).toBe(true);
});
test('validates minimum password length', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
password: '12345',
});
expect(result.success).toBe(false);
});
test('validates email format', () => {
const result = updateAuthUserTool.inputSchema.safeParse({
user_id: '123e4567-e89b-12d3-a456-426614174000',
email: 'not-an-email',
});
expect(result.success).toBe(false);
});
});
describe('execute', () => {
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(
updateAuthUserTool.execute(
{
user_id: '123e4567-e89b-12d3-a456-426614174000',
email: 'new@example.com',
},
context
)
).rejects.toThrow('Direct database connection');
});
test('updates user via transaction', async () => {
const updatedUser = {
id: '123e4567-e89b-12d3-a456-426614174000',
email: 'new@example.com',
role: 'authenticated',
created_at: '2024-01-01T00:00:00Z',
updated_at: '2024-01-02T00:00:00Z',
last_sign_in_at: null,
raw_app_meta_data: {},
raw_user_meta_data: {},
};
const mockPgClient = {
query: mock(async () => ({ rows: [updatedUser] })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await updateAuthUserTool.execute(
{
user_id: '123e4567-e89b-12d3-a456-426614174000',
email: 'new@example.com',
},
context
);
expect(result).toEqual(updatedUser);
});
test('throws error when user is not found', async () => {
const mockPgClient = {
query: mock(async () => ({ rows: [] })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await expect(
updateAuthUserTool.execute(
{
user_id: '123e4567-e89b-12d3-a456-426614174000',
email: 'new@example.com',
},
context
)
).rejects.toThrow('not found');
});
test('checks pgcrypto when updating password', async () => {
const mockPgClient = {
query: mock(async (sql: string) => {
if (sql.includes('crypt') && sql.includes('SELECT')) {
throw new Error('function crypt does not exist');
}
return { rows: [] };
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await expect(
updateAuthUserTool.execute(
{
user_id: '123e4567-e89b-12d3-a456-426614174000',
password: 'newpassword123',
},
context
)
).rejects.toThrow('pgcrypto');
});
});
});

View File

@ -0,0 +1,499 @@
/**
* Tests for database-related tools
*
* Tools tested:
* - list_tables
* - list_extensions
* - get_database_connections
* - get_database_stats
* - list_migrations
* - apply_migration
*/
import { describe, test, expect } from 'bun:test';
import { listTablesTool } from '../../tools/list_tables.js';
import { listExtensionsTool } from '../../tools/list_extensions.js';
import { getDatabaseConnectionsTool } from '../../tools/get_database_connections.js';
import { getDatabaseStatsTool } from '../../tools/get_database_stats.js';
import {
createMockClient,
createMockContext,
createSuccessResponse,
createErrorResponse,
testData,
} from '../helpers/mocks.js';
describe('listTablesTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(listTablesTool.name).toBe('list_tables');
});
test('has description', () => {
expect(listTablesTool.description).toBeDefined();
expect(listTablesTool.description).toContain('table');
});
test('has input and output schemas', () => {
expect(listTablesTool.inputSchema).toBeDefined();
expect(listTablesTool.outputSchema).toBeDefined();
expect(listTablesTool.mcpInputSchema).toBeDefined();
});
});
describe('execute', () => {
test('returns list of tables', async () => {
const tables = [
{ schema: 'public', name: 'users', comment: 'User accounts' },
{ schema: 'public', name: 'posts', comment: null },
];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(tables),
});
const context = createMockContext(mockClient);
const result = await listTablesTool.execute({}, context);
expect(result).toEqual(tables);
});
test('returns empty array when no tables exist', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await listTablesTool.execute({}, context);
expect(result).toEqual([]);
});
test('throws error on SQL failure', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('permission denied', '42501'),
});
const context = createMockContext(mockClient);
await expect(listTablesTool.execute({}, context)).rejects.toThrow('SQL Error');
});
test('uses read-only mode for query via service role RPC', async () => {
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: true,
serviceRoleRpcResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
await listTablesTool.execute({}, context);
// When using service role RPC, should be called with readOnly=true
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalledWith(
expect.any(String),
true
);
});
});
describe('output validation', () => {
test('validates correct table structure', () => {
const validOutput = [
{ schema: 'public', name: 'users', comment: 'User table' },
{ schema: 'public', name: 'posts', comment: null },
];
const result = listTablesTool.outputSchema.safeParse(validOutput);
expect(result.success).toBe(true);
});
test('rejects missing schema field', () => {
const invalidOutput = [{ name: 'users', comment: null }];
const result = listTablesTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
test('rejects missing name field', () => {
const invalidOutput = [{ schema: 'public', comment: null }];
const result = listTablesTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
});
});
describe('listExtensionsTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(listExtensionsTool.name).toBe('list_extensions');
});
test('has description', () => {
expect(listExtensionsTool.description).toContain('extension');
});
});
describe('execute', () => {
test('returns list of extensions', async () => {
const extensions = [
{ name: 'uuid-ossp', schema: 'extensions', version: '1.1', description: 'UUID functions' },
{ name: 'pgcrypto', schema: 'extensions', version: '1.3', description: null },
];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(extensions),
});
const context = createMockContext(mockClient);
const result = await listExtensionsTool.execute({}, context);
expect(result).toEqual(extensions);
});
test('returns empty array when no extensions installed', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await listExtensionsTool.execute({}, context);
expect(result).toEqual([]);
});
test('throws error on SQL failure', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('access denied', '42501'),
});
const context = createMockContext(mockClient);
await expect(listExtensionsTool.execute({}, context)).rejects.toThrow('SQL Error');
});
});
describe('output validation', () => {
test('validates correct extension structure', () => {
const validOutput = [
{ name: 'uuid-ossp', schema: 'public', version: '1.1', description: 'UUID gen' },
];
const result = listExtensionsTool.outputSchema.safeParse(validOutput);
expect(result.success).toBe(true);
});
test('accepts null description', () => {
const output = [
{ name: 'ext', schema: 'public', version: '1.0', description: null },
];
const result = listExtensionsTool.outputSchema.safeParse(output);
expect(result.success).toBe(true);
});
test('rejects missing required fields', () => {
const invalidOutput = [{ name: 'ext' }];
const result = listExtensionsTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
});
});
describe('getDatabaseConnectionsTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(getDatabaseConnectionsTool.name).toBe('get_database_connections');
});
test('has description about connections', () => {
expect(getDatabaseConnectionsTool.description).toContain('connection');
});
});
describe('execute', () => {
test('returns list of connections', async () => {
const connections = [
{
pid: 12345,
datname: 'postgres',
usename: 'postgres',
application_name: 'psql',
client_addr: '127.0.0.1',
backend_start: '2024-01-01T00:00:00Z',
state: 'active',
query: 'SELECT 1',
},
];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(connections),
});
const context = createMockContext(mockClient);
const result = await getDatabaseConnectionsTool.execute({}, context);
expect(result).toEqual(connections);
});
test('returns empty array when no connections', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await getDatabaseConnectionsTool.execute({}, context);
expect(result).toEqual([]);
});
test('handles connections with null values', async () => {
const connections = [
{
pid: 1,
datname: null,
usename: null,
application_name: null,
client_addr: null,
backend_start: null,
state: null,
query: null,
},
];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(connections),
});
const context = createMockContext(mockClient);
const result = await getDatabaseConnectionsTool.execute({}, context);
expect(result).toEqual(connections);
});
test('throws error on SQL failure', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('permission denied for pg_stat_activity', '42501'),
});
const context = createMockContext(mockClient);
await expect(getDatabaseConnectionsTool.execute({}, context)).rejects.toThrow('SQL Error');
});
});
describe('output validation', () => {
test('requires pid to be a number', () => {
const invalidOutput = [{ pid: 'not-a-number' }];
const result = getDatabaseConnectionsTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
test('accepts complete connection object', () => {
const validOutput = [
{
pid: 123,
datname: 'db',
usename: 'user',
application_name: 'app',
client_addr: '127.0.0.1',
backend_start: '2024-01-01',
state: 'idle',
query: 'SELECT 1',
},
];
const result = getDatabaseConnectionsTool.outputSchema.safeParse(validOutput);
expect(result.success).toBe(true);
});
});
});
describe('getDatabaseStatsTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(getDatabaseStatsTool.name).toBe('get_database_stats');
});
test('has description about statistics', () => {
expect(getDatabaseStatsTool.description).toContain('statistic');
});
});
describe('execute', () => {
test('returns combined database and bgwriter stats', async () => {
const dbStats = [
{
datname: 'postgres',
numbackends: 5,
xact_commit: '1000',
xact_rollback: '10',
blks_read: '500',
blks_hit: '9500',
tup_returned: '10000',
tup_fetched: '5000',
tup_inserted: '100',
tup_updated: '50',
tup_deleted: '10',
conflicts: '0',
temp_files: '0',
temp_bytes: '0',
deadlocks: '0',
checksum_failures: null,
checksum_last_failure: null,
blk_read_time: 1.5,
blk_write_time: 0.5,
stats_reset: '2024-01-01T00:00:00Z',
},
];
const bgWriterStats = [
{
checkpoints_timed: '100',
checkpoints_req: '5',
checkpoint_write_time: 1000.0,
checkpoint_sync_time: 50.0,
buffers_checkpoint: '500',
buffers_clean: '100',
maxwritten_clean: '0',
buffers_backend: '50',
buffers_backend_fsync: '0',
buffers_alloc: '1000',
stats_reset: '2024-01-01T00:00:00Z',
},
];
// Mock client needs to return different results for the two queries
let callCount = 0;
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeSqlWithPg as ReturnType<typeof import('bun:test').mock>).mockImplementation(
async () => {
callCount++;
return callCount === 1 ? dbStats : bgWriterStats;
}
);
const context = createMockContext(mockClient);
const result = await getDatabaseStatsTool.execute({}, context);
expect(result).toHaveProperty('database_stats');
expect(result).toHaveProperty('bgwriter_stats');
expect(result.database_stats).toEqual(dbStats);
expect(result.bgwriter_stats).toEqual(bgWriterStats);
});
test('throws error when database stats query fails', async () => {
let callCount = 0;
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeSqlWithPg as ReturnType<typeof import('bun:test').mock>).mockImplementation(
async () => {
callCount++;
if (callCount === 1) {
return createErrorResponse('query failed', 'ERROR');
}
return [];
}
);
const context = createMockContext(mockClient);
await expect(getDatabaseStatsTool.execute({}, context)).rejects.toThrow('SQL Error');
});
test('throws error when bgwriter stats query fails', async () => {
let callCount = 0;
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeSqlWithPg as ReturnType<typeof import('bun:test').mock>).mockImplementation(
async () => {
callCount++;
if (callCount === 2) {
return createErrorResponse('query failed', 'ERROR');
}
return [
{
datname: 'test',
numbackends: 1,
xact_commit: '0',
xact_rollback: '0',
blks_read: '0',
blks_hit: '0',
tup_returned: '0',
tup_fetched: '0',
tup_inserted: '0',
tup_updated: '0',
tup_deleted: '0',
conflicts: '0',
temp_files: '0',
temp_bytes: '0',
deadlocks: '0',
checksum_failures: null,
checksum_last_failure: null,
blk_read_time: 0,
blk_write_time: 0,
stats_reset: null,
},
];
}
);
const context = createMockContext(mockClient);
await expect(getDatabaseStatsTool.execute({}, context)).rejects.toThrow('SQL Error');
});
});
describe('output validation', () => {
test('validates correct stats structure', () => {
const validOutput = {
database_stats: [
{
datname: 'test',
numbackends: 1,
xact_commit: '0',
xact_rollback: '0',
blks_read: '0',
blks_hit: '0',
tup_returned: '0',
tup_fetched: '0',
tup_inserted: '0',
tup_updated: '0',
tup_deleted: '0',
conflicts: '0',
temp_files: '0',
temp_bytes: '0',
deadlocks: '0',
checksum_failures: null,
checksum_last_failure: null,
blk_read_time: 0,
blk_write_time: 0,
stats_reset: null,
},
],
bgwriter_stats: [
{
checkpoints_timed: '0',
checkpoints_req: '0',
checkpoint_write_time: 0,
checkpoint_sync_time: 0,
buffers_checkpoint: '0',
buffers_clean: '0',
maxwritten_clean: '0',
buffers_backend: '0',
buffers_backend_fsync: '0',
buffers_alloc: '0',
stats_reset: null,
},
],
};
const result = getDatabaseStatsTool.outputSchema.safeParse(validOutput);
expect(result.success).toBe(true);
});
test('rejects missing database_stats', () => {
const invalidOutput = { bgwriter_stats: [] };
const result = getDatabaseStatsTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
test('rejects missing bgwriter_stats', () => {
const invalidOutput = { database_stats: [] };
const result = getDatabaseStatsTool.outputSchema.safeParse(invalidOutput);
expect(result.success).toBe(false);
});
});
});

View File

@ -0,0 +1,248 @@
/**
* Tests for execute_sql tool
*
* Tests the SQL execution tool that allows arbitrary SQL queries.
*/
import { describe, test, expect, beforeEach } from 'bun:test';
import { executeSqlTool } from '../../tools/execute_sql.js';
import {
createMockClient,
createMockContext,
createSuccessResponse,
createErrorResponse,
} from '../helpers/mocks.js';
describe('executeSqlTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(executeSqlTool.name).toBe('execute_sql');
});
test('has description', () => {
expect(executeSqlTool.description).toBeDefined();
expect(executeSqlTool.description.length).toBeGreaterThan(0);
});
test('has input schema', () => {
expect(executeSqlTool.inputSchema).toBeDefined();
});
test('has MCP input schema', () => {
expect(executeSqlTool.mcpInputSchema).toBeDefined();
expect(executeSqlTool.mcpInputSchema.type).toBe('object');
expect(executeSqlTool.mcpInputSchema.properties.sql).toBeDefined();
});
test('has output schema', () => {
expect(executeSqlTool.outputSchema).toBeDefined();
});
});
describe('input validation', () => {
test('validates sql is required', () => {
const result = executeSqlTool.inputSchema.safeParse({});
expect(result.success).toBe(false);
});
test('validates sql must be string', () => {
const result = executeSqlTool.inputSchema.safeParse({ sql: 123 });
expect(result.success).toBe(false);
});
test('accepts valid sql string', () => {
const result = executeSqlTool.inputSchema.safeParse({ sql: 'SELECT 1' });
expect(result.success).toBe(true);
});
test('read_only defaults to false', () => {
const result = executeSqlTool.inputSchema.safeParse({ sql: 'SELECT 1' });
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.read_only).toBe(false);
}
});
test('accepts read_only boolean', () => {
const result = executeSqlTool.inputSchema.safeParse({
sql: 'SELECT 1',
read_only: true,
});
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.read_only).toBe(true);
}
});
});
describe('execute', () => {
test('returns results for successful query', async () => {
const expectedRows = [{ id: 1, name: 'test' }, { id: 2, name: 'test2' }];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(expectedRows),
});
const context = createMockContext(mockClient);
const result = await executeSqlTool.execute({ sql: 'SELECT * FROM users' }, context);
expect(result).toEqual(expectedRows);
});
test('returns empty array for query with no results', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await executeSqlTool.execute(
{ sql: 'SELECT * FROM users WHERE 1=0' },
context
);
expect(result).toEqual([]);
});
test('throws error for SQL error response', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('syntax error at position 1', '42601'),
});
const context = createMockContext(mockClient);
await expect(
executeSqlTool.execute({ sql: 'INVALID SQL' }, context)
).rejects.toThrow('SQL Error (42601): syntax error at position 1');
});
test('uses pg connection when available', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([{ result: 1 }]),
});
const context = createMockContext(mockClient);
await executeSqlTool.execute({ sql: 'SELECT 1 as result' }, context);
expect(mockClient.executeSqlWithPg).toHaveBeenCalled();
expect(mockClient.executeSqlViaRpc).not.toHaveBeenCalled();
});
test('falls back to service role RPC when pg is not available', async () => {
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: true,
serviceRoleRpcResult: createSuccessResponse([{ result: 1 }]),
});
const context = createMockContext(mockClient);
await executeSqlTool.execute({ sql: 'SELECT 1 as result' }, context);
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalled();
});
test('passes read_only flag to service role RPC', async () => {
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: true,
serviceRoleRpcResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
await executeSqlTool.execute(
{ sql: 'SELECT 1', read_only: true },
context
);
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalledWith('SELECT 1', true);
});
test('throws error when neither pg nor service role is available', async () => {
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: false,
});
const context = createMockContext(mockClient);
await expect(
executeSqlTool.execute({ sql: 'SELECT 1' }, context)
).rejects.toThrow('execute_sql requires either a direct database connection');
});
test('handles complex query results', async () => {
const complexResult = [
{
id: 1,
created_at: '2024-01-01T00:00:00Z',
metadata: { key: 'value' },
tags: ['a', 'b', 'c'],
},
];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(complexResult),
});
const context = createMockContext(mockClient);
const result = await executeSqlTool.execute(
{ sql: 'SELECT * FROM complex_table' },
context
);
expect(result).toEqual(complexResult);
});
test('handles INSERT returning result', async () => {
const insertResult = [{ id: 42 }];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(insertResult),
});
const context = createMockContext(mockClient);
const result = await executeSqlTool.execute(
{ sql: "INSERT INTO users (name) VALUES ('test') RETURNING id" },
context
);
expect(result).toEqual(insertResult);
});
test('handles UPDATE with no rows affected', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await executeSqlTool.execute(
{ sql: "UPDATE users SET name = 'test' WHERE id = -1" },
context
);
expect(result).toEqual([]);
});
});
describe('output validation', () => {
test('output schema accepts array of objects', () => {
const result = executeSqlTool.outputSchema.safeParse([
{ id: 1, name: 'test' },
]);
expect(result.success).toBe(true);
});
test('output schema accepts empty array', () => {
const result = executeSqlTool.outputSchema.safeParse([]);
expect(result.success).toBe(true);
});
test('output schema accepts array with any structure', () => {
const result = executeSqlTool.outputSchema.safeParse([
{ complex: { nested: { data: [1, 2, 3] } } },
]);
expect(result.success).toBe(true);
});
});
});

View File

@ -0,0 +1,253 @@
/**
* Tests for miscellaneous tools
*
* Tools tested:
* - get_project_url
* - verify_jwt_secret
* - generate_typescript_types
* - list_realtime_publications
* - list_cron_jobs
* - list_vector_indexes
*/
import { describe, test, expect, mock } from 'bun:test';
import { getProjectUrlTool } from '../../tools/get_project_url.js';
import { verifyJwtSecretTool } from '../../tools/verify_jwt_secret.js';
import { generateTypesTool } from '../../tools/generate_typescript_types.js';
import {
createMockClient,
createMockContext,
} from '../helpers/mocks.js';
describe('getProjectUrlTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(getProjectUrlTool.name).toBe('get_project_url');
});
test('has description', () => {
expect(getProjectUrlTool.description).toContain('URL');
});
});
describe('input validation', () => {
test('accepts empty input', () => {
const result = getProjectUrlTool.inputSchema.safeParse({});
expect(result.success).toBe(true);
});
});
describe('execute', () => {
test('returns project URL', async () => {
const mockClient = createMockClient({
supabaseUrl: 'https://my-project.supabase.co',
});
const context = createMockContext(mockClient);
const result = await getProjectUrlTool.execute({}, context);
expect(result.project_url).toBe('https://my-project.supabase.co');
});
test('returns configured URL from client', async () => {
const customUrl = 'https://custom.supabase.example.com';
const mockClient = createMockClient({ supabaseUrl: customUrl });
const context = createMockContext(mockClient);
const result = await getProjectUrlTool.execute({}, context);
expect(result.project_url).toBe(customUrl);
});
});
describe('output validation', () => {
test('validates URL format', () => {
const result = getProjectUrlTool.outputSchema.safeParse({
project_url: 'https://example.com',
});
expect(result.success).toBe(true);
});
test('rejects invalid URL', () => {
const result = getProjectUrlTool.outputSchema.safeParse({
project_url: 'not-a-url',
});
expect(result.success).toBe(false);
});
});
});
describe('verifyJwtSecretTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(verifyJwtSecretTool.name).toBe('verify_jwt_secret');
});
test('has description about JWT', () => {
expect(verifyJwtSecretTool.description).toContain('JWT');
});
});
describe('execute', () => {
test('returns found status when JWT secret is configured (no preview for security)', async () => {
const mockClient = createMockClient({ jwtSecret: 'my-secret-jwt-key-12345' });
const context = createMockContext(mockClient);
const result = await verifyJwtSecretTool.execute({}, context);
expect(result.jwt_secret_status).toBe('found');
// SECURITY: jwt_secret_preview was removed to avoid leaking secret info
expect('jwt_secret_preview' in result).toBe(false);
});
test('returns not_configured status when JWT secret is missing', async () => {
const mockClient = createMockClient({ jwtSecret: undefined });
mockClient.getJwtSecret = () => undefined;
const context = createMockContext(mockClient);
const result = await verifyJwtSecretTool.execute({}, context);
expect(result.jwt_secret_status).toBe('not_configured');
});
});
describe('output validation', () => {
test('validates found status', () => {
const result = verifyJwtSecretTool.outputSchema.safeParse({
jwt_secret_status: 'found',
});
expect(result.success).toBe(true);
});
test('validates not_configured status', () => {
const result = verifyJwtSecretTool.outputSchema.safeParse({
jwt_secret_status: 'not_configured',
});
expect(result.success).toBe(true);
});
test('rejects invalid status', () => {
const result = verifyJwtSecretTool.outputSchema.safeParse({
jwt_secret_status: 'invalid',
});
expect(result.success).toBe(false);
});
});
});
describe('generateTypesTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(generateTypesTool.name).toBe('generate_typescript_types');
});
test('has description about TypeScript types', () => {
expect(generateTypesTool.description).toContain('TypeScript');
});
});
describe('input validation', () => {
test('requires output_path', () => {
const result = generateTypesTool.inputSchema.safeParse({});
expect(result.success).toBe(false);
});
test('accepts valid input', () => {
const result = generateTypesTool.inputSchema.safeParse({
output_path: '/path/to/types.ts',
});
expect(result.success).toBe(true);
});
test('defaults included_schemas to public', () => {
const result = generateTypesTool.inputSchema.safeParse({
output_path: '/path/to/types.ts',
});
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.included_schemas).toEqual(['public']);
}
});
test('accepts custom schemas', () => {
const result = generateTypesTool.inputSchema.safeParse({
output_path: '/path/to/types.ts',
included_schemas: ['public', 'auth', 'storage'],
});
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.included_schemas).toEqual(['public', 'auth', 'storage']);
}
});
test('defaults output_filename', () => {
const result = generateTypesTool.inputSchema.safeParse({
output_path: '/path/to/types.ts',
});
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.output_filename).toBe('database.types.ts');
}
});
});
describe('execute', () => {
test('returns error when DATABASE_URL is not configured', async () => {
const mockClient = createMockClient({ dbUrl: undefined });
mockClient.getDbUrl = () => undefined;
const context = createMockContext(mockClient);
const result = await generateTypesTool.execute(
{ output_path: '/tmp/types.ts' },
context
);
expect(result.success).toBe(false);
expect(result.message).toContain('DATABASE_URL');
});
test('includes platform in response', async () => {
const mockClient = createMockClient({ dbUrl: undefined });
mockClient.getDbUrl = () => undefined;
const context = createMockContext(mockClient);
const result = await generateTypesTool.execute(
{ output_path: '/tmp/types.ts' },
context
);
expect(result.platform).toBeDefined();
expect(['win32', 'darwin', 'linux', 'freebsd', 'openbsd']).toContain(result.platform);
});
});
describe('output validation', () => {
test('validates success response', () => {
const result = generateTypesTool.outputSchema.safeParse({
success: true,
message: 'Types generated',
types: 'export type User = {...}',
file_path: '/path/to/types.ts',
platform: 'linux',
});
expect(result.success).toBe(true);
});
test('validates failure response', () => {
const result = generateTypesTool.outputSchema.safeParse({
success: false,
message: 'Failed to generate types',
platform: 'darwin',
});
expect(result.success).toBe(true);
});
test('requires platform field', () => {
const result = generateTypesTool.outputSchema.safeParse({
success: false,
message: 'Error',
});
expect(result.success).toBe(false);
});
});
});

View File

@ -0,0 +1,380 @@
/**
* Tests for storage-related tools
*
* Tools tested:
* - list_storage_buckets
* - list_storage_objects
* - get_storage_config
* - update_storage_config
*/
import { describe, test, expect, mock } from 'bun:test';
import { listStorageBucketsTool } from '../../tools/list_storage_buckets.js';
import { listStorageObjectsTool } from '../../tools/list_storage_objects.js';
import {
createMockClient,
createMockContext,
createSuccessResponse,
createErrorResponse,
testData,
} from '../helpers/mocks.js';
describe('listStorageBucketsTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(listStorageBucketsTool.name).toBe('list_storage_buckets');
});
test('has description', () => {
expect(listStorageBucketsTool.description).toContain('bucket');
});
test('has input and output schemas', () => {
expect(listStorageBucketsTool.inputSchema).toBeDefined();
expect(listStorageBucketsTool.outputSchema).toBeDefined();
});
});
describe('input validation', () => {
test('accepts empty input', () => {
const result = listStorageBucketsTool.inputSchema.safeParse({});
expect(result.success).toBe(true);
});
});
describe('execute', () => {
test('returns list of buckets', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(testData.buckets),
});
const context = createMockContext(mockClient);
const result = await listStorageBucketsTool.execute({}, context);
expect(result).toEqual(testData.buckets);
});
test('returns empty array when no buckets', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
const result = await listStorageBucketsTool.execute({}, context);
expect(result).toEqual([]);
});
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(listStorageBucketsTool.execute({}, context)).rejects.toThrow(
'Direct database connection'
);
});
test('throws error on SQL failure', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createErrorResponse('relation "storage.buckets" does not exist', '42P01'),
});
const context = createMockContext(mockClient);
await expect(listStorageBucketsTool.execute({}, context)).rejects.toThrow('SQL Error');
});
test('uses pg connection directly', async () => {
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse([]),
});
const context = createMockContext(mockClient);
await listStorageBucketsTool.execute({}, context);
expect(mockClient.executeSqlWithPg).toHaveBeenCalled();
});
});
describe('output validation', () => {
test('validates correct bucket structure', () => {
const result = listStorageBucketsTool.outputSchema.safeParse(testData.buckets);
expect(result.success).toBe(true);
});
test('accepts buckets with all nullable fields as null', () => {
const bucketWithNulls = [{
id: 'test-id',
name: 'test-bucket',
owner: null,
public: false,
avif_autodetection: false,
file_size_limit: null,
allowed_mime_types: null,
created_at: null,
updated_at: null,
}];
const result = listStorageBucketsTool.outputSchema.safeParse(bucketWithNulls);
expect(result.success).toBe(true);
});
test('rejects bucket without required id', () => {
const invalid = [{ name: 'test' }];
const result = listStorageBucketsTool.outputSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
test('rejects bucket with invalid public type', () => {
const invalid = [{
id: 'test',
name: 'test',
owner: null,
public: 'yes', // should be boolean
avif_autodetection: false,
file_size_limit: null,
allowed_mime_types: null,
created_at: null,
updated_at: null,
}];
const result = listStorageBucketsTool.outputSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
});
});
describe('listStorageObjectsTool', () => {
describe('metadata', () => {
test('has correct name', () => {
expect(listStorageObjectsTool.name).toBe('list_storage_objects');
});
test('has description', () => {
expect(listStorageObjectsTool.description).toContain('object');
});
});
describe('input validation', () => {
test('requires bucket_id', () => {
const result = listStorageObjectsTool.inputSchema.safeParse({});
expect(result.success).toBe(false);
});
test('accepts bucket_id only', () => {
const result = listStorageObjectsTool.inputSchema.safeParse({ bucket_id: 'test-bucket' });
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.limit).toBe(100);
expect(result.data.offset).toBe(0);
}
});
test('accepts all parameters', () => {
const result = listStorageObjectsTool.inputSchema.safeParse({
bucket_id: 'test-bucket',
limit: 50,
offset: 10,
prefix: 'public/',
});
expect(result.success).toBe(true);
});
test('rejects negative limit', () => {
const result = listStorageObjectsTool.inputSchema.safeParse({
bucket_id: 'test',
limit: -1,
});
expect(result.success).toBe(false);
});
test('rejects negative offset', () => {
const result = listStorageObjectsTool.inputSchema.safeParse({
bucket_id: 'test',
offset: -1,
});
expect(result.success).toBe(false);
});
});
describe('execute', () => {
test('returns list of objects', async () => {
const mockPgClient = {
query: mock(async () => ({ rows: testData.storageObjects })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await listStorageObjectsTool.execute(
{ bucket_id: 'avatars' },
context
);
expect(result.length).toBe(testData.storageObjects.length);
});
test('returns empty array when no objects', async () => {
const mockPgClient = {
query: mock(async () => ({ rows: [] })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
const result = await listStorageObjectsTool.execute(
{ bucket_id: 'empty-bucket' },
context
);
expect(result).toEqual([]);
});
test('throws error when pg is not available', async () => {
const mockClient = createMockClient({ pgAvailable: false });
const context = createMockContext(mockClient);
await expect(
listStorageObjectsTool.execute({ bucket_id: 'test' }, context)
).rejects.toThrow('Direct database connection');
});
test('uses transaction for parameterized query', async () => {
const mockPgClient = {
query: mock(async () => ({ rows: [] })),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await listStorageObjectsTool.execute({ bucket_id: 'test' }, context);
expect(mockClient.executeTransactionWithPg).toHaveBeenCalled();
});
test('applies prefix filter in query', async () => {
let executedSql = '';
let executedParams: unknown[] = [];
const mockPgClient = {
query: mock(async (sql: string, params: unknown[]) => {
executedSql = sql;
executedParams = params;
return { rows: [] };
}),
};
const mockClient = createMockClient({ pgAvailable: true });
(mockClient.executeTransactionWithPg as ReturnType<typeof mock>).mockImplementation(
async (callback: (client: unknown) => Promise<unknown>) => {
return callback(mockPgClient);
}
);
const context = createMockContext(mockClient);
await listStorageObjectsTool.execute(
{ bucket_id: 'test', prefix: 'images/' },
context
);
expect(executedSql).toContain('LIKE');
expect(executedParams).toContain('images/%');
});
});
describe('output validation', () => {
test('validates correct object structure', () => {
const validObjects = [{
id: '123e4567-e89b-12d3-a456-426614174000',
name: 'file.txt',
bucket_id: 'test',
owner: '123e4567-e89b-12d3-a456-426614174001',
version: null,
mimetype: 'text/plain',
size: 1024,
metadata: { mimetype: 'text/plain', size: 1024 },
created_at: '2024-01-01',
updated_at: null,
last_accessed_at: null,
}];
const result = listStorageObjectsTool.outputSchema.safeParse(validObjects);
expect(result.success).toBe(true);
});
test('transforms string size to number', () => {
const objectWithStringSize = [{
id: '123e4567-e89b-12d3-a456-426614174000',
name: 'file.txt',
bucket_id: 'test',
owner: null,
version: null,
mimetype: null,
size: '1024', // string
metadata: null,
created_at: null,
updated_at: null,
last_accessed_at: null,
}];
const result = listStorageObjectsTool.outputSchema.safeParse(objectWithStringSize);
expect(result.success).toBe(true);
if (result.success) {
expect(result.data[0].size).toBe(1024);
}
});
test('handles null size', () => {
const objectWithNullSize = [{
id: '123e4567-e89b-12d3-a456-426614174000',
name: null,
bucket_id: 'test',
owner: null,
version: null,
mimetype: null,
size: null,
metadata: null,
created_at: null,
updated_at: null,
last_accessed_at: null,
}];
const result = listStorageObjectsTool.outputSchema.safeParse(objectWithNullSize);
expect(result.success).toBe(true);
if (result.success) {
expect(result.data[0].size).toBeNull();
}
});
test('rejects invalid UUID for id', () => {
const invalid = [{
id: 'not-a-uuid',
name: 'file.txt',
bucket_id: 'test',
owner: null,
version: null,
mimetype: null,
size: null,
metadata: null,
created_at: null,
updated_at: null,
last_accessed_at: null,
}];
const result = listStorageObjectsTool.outputSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
});
});

View File

@ -0,0 +1,177 @@
import { describe, test, expect } from 'bun:test';
import type {
SelfhostedSupabaseClientOptions,
SqlSuccessResponse,
SqlErrorResponse,
SqlExecutionResult,
AuthUser,
StorageBucket,
StorageObject,
} from '../types/index.js';
describe('Type Definitions', () => {
describe('SelfhostedSupabaseClientOptions', () => {
test('required fields are enforced at compile time', () => {
const validOptions: SelfhostedSupabaseClientOptions = {
supabaseUrl: 'http://localhost:54321',
supabaseAnonKey: 'test-anon-key',
};
expect(validOptions.supabaseUrl).toBe('http://localhost:54321');
expect(validOptions.supabaseAnonKey).toBe('test-anon-key');
});
test('optional fields can be provided', () => {
const fullOptions: SelfhostedSupabaseClientOptions = {
supabaseUrl: 'http://localhost:54321',
supabaseAnonKey: 'test-anon-key',
supabaseServiceRoleKey: 'service-key',
databaseUrl: 'postgresql://localhost:5432/db',
jwtSecret: 'secret',
};
expect(fullOptions.supabaseServiceRoleKey).toBe('service-key');
expect(fullOptions.databaseUrl).toBe('postgresql://localhost:5432/db');
expect(fullOptions.jwtSecret).toBe('secret');
});
});
describe('SqlExecutionResult', () => {
test('SqlSuccessResponse is array of records', () => {
const success: SqlSuccessResponse = [
{ id: 1, name: 'test' },
{ id: 2, name: 'test2' },
];
expect(Array.isArray(success)).toBe(true);
expect(success.length).toBe(2);
});
test('SqlErrorResponse has error object', () => {
const error: SqlErrorResponse = {
error: {
message: 'Test error',
code: 'TEST001',
details: 'Some details',
hint: 'Try this',
},
};
expect(error.error.message).toBe('Test error');
expect(error.error.code).toBe('TEST001');
});
test('SqlExecutionResult can be either type', () => {
const successResult: SqlExecutionResult = [{ id: 1 }];
const errorResult: SqlExecutionResult = {
error: { message: 'error' },
};
// Type narrowing
if ('error' in errorResult) {
expect(errorResult.error.message).toBe('error');
}
if (Array.isArray(successResult)) {
expect(successResult[0].id).toBe(1);
}
});
});
describe('AuthUser', () => {
test('can create valid AuthUser object', () => {
const user: AuthUser = {
id: '123e4567-e89b-12d3-a456-426614174000',
email: 'test@example.com',
role: 'authenticated',
created_at: '2024-01-01T00:00:00Z',
last_sign_in_at: '2024-01-02T00:00:00Z',
raw_app_meta_data: { provider: 'email' },
raw_user_meta_data: { name: 'Test User' },
};
expect(user.id).toBe('123e4567-e89b-12d3-a456-426614174000');
expect(user.email).toBe('test@example.com');
});
test('nullable fields can be null', () => {
const user: AuthUser = {
id: '123e4567-e89b-12d3-a456-426614174000',
email: null,
role: null,
created_at: null,
last_sign_in_at: null,
raw_app_meta_data: null,
raw_user_meta_data: null,
};
expect(user.email).toBeNull();
expect(user.role).toBeNull();
});
});
describe('StorageBucket', () => {
test('can create valid StorageBucket object', () => {
const bucket: StorageBucket = {
id: 'avatars',
name: 'avatars',
owner: '123e4567-e89b-12d3-a456-426614174000',
public: true,
avif_autodetection: false,
file_size_limit: 5242880,
allowed_mime_types: ['image/png', 'image/jpeg'],
created_at: '2024-01-01T00:00:00Z',
updated_at: '2024-01-01T00:00:00Z',
};
expect(bucket.id).toBe('avatars');
expect(bucket.public).toBe(true);
});
test('nullable fields can be null', () => {
const bucket: StorageBucket = {
id: 'documents',
name: 'documents',
owner: null,
public: false,
avif_autodetection: false,
file_size_limit: null,
allowed_mime_types: null,
created_at: null,
updated_at: null,
};
expect(bucket.owner).toBeNull();
expect(bucket.file_size_limit).toBeNull();
});
});
describe('StorageObject', () => {
test('can create valid StorageObject object', () => {
const obj: StorageObject = {
id: '123e4567-e89b-12d3-a456-426614174000',
name: 'image.png',
bucket_id: 'avatars',
owner: '123e4567-e89b-12d3-a456-426614174001',
version: '1',
mimetype: 'image/png',
size: 1024,
metadata: { contentType: 'image/png' },
created_at: '2024-01-01T00:00:00Z',
updated_at: '2024-01-01T00:00:00Z',
last_accessed_at: '2024-01-02T00:00:00Z',
};
expect(obj.name).toBe('image.png');
expect(obj.size).toBe(1024);
});
test('nullable fields can be null', () => {
const obj: StorageObject = {
id: '123e4567-e89b-12d3-a456-426614174000',
name: null,
bucket_id: 'documents',
owner: null,
version: null,
mimetype: null,
size: null,
metadata: null,
created_at: null,
updated_at: null,
last_accessed_at: null,
};
expect(obj.name).toBeNull();
expect(obj.size).toBeNull();
});
});
});

View File

@ -0,0 +1,209 @@
import { describe, test, expect, mock, beforeEach, afterEach } from 'bun:test';
import { z } from 'zod';
import { isSqlErrorResponse, handleSqlResponse, executeSqlWithFallback, runExternalCommand } from '../tools/utils.js';
import type { SqlExecutionResult, SqlErrorResponse, SqlSuccessResponse } from '../types/index.js';
import { createMockClient, createSuccessResponse, createErrorResponse } from './helpers/mocks.js';
describe('utils', () => {
describe('isSqlErrorResponse', () => {
test('returns true for error response', () => {
const errorResult: SqlErrorResponse = {
error: {
message: 'Test error',
code: 'TEST001',
},
};
expect(isSqlErrorResponse(errorResult)).toBe(true);
});
test('returns false for success response', () => {
const successResult: SqlSuccessResponse = [
{ id: 1, name: 'test' },
];
expect(isSqlErrorResponse(successResult)).toBe(false);
});
test('returns false for empty array (valid success)', () => {
const emptyResult: SqlSuccessResponse = [];
expect(isSqlErrorResponse(emptyResult)).toBe(false);
});
});
describe('handleSqlResponse', () => {
const testSchema = z.array(
z.object({
id: z.number(),
name: z.string(),
})
);
test('returns parsed data for valid success response', () => {
const successResult: SqlSuccessResponse = [
{ id: 1, name: 'test' },
{ id: 2, name: 'test2' },
];
const result = handleSqlResponse(successResult, testSchema);
expect(result).toEqual([
{ id: 1, name: 'test' },
{ id: 2, name: 'test2' },
]);
});
test('throws error for SQL error response', () => {
const errorResult: SqlErrorResponse = {
error: {
message: 'Database error',
code: 'DB001',
},
};
expect(() => handleSqlResponse(errorResult, testSchema)).toThrow(
'SQL Error (DB001): Database error'
);
});
test('throws error for schema validation failure', () => {
const invalidData: SqlSuccessResponse = [
{ id: 'not-a-number', name: 'test' } as unknown as Record<string, unknown>,
];
expect(() => handleSqlResponse(invalidData, testSchema)).toThrow(
'Schema validation failed'
);
});
test('handles empty array with array schema', () => {
const emptyResult: SqlSuccessResponse = [];
const result = handleSqlResponse(emptyResult, testSchema);
expect(result).toEqual([]);
});
test('error message includes path for nested validation errors', () => {
const nestedSchema = z.array(
z.object({
user: z.object({
email: z.string().email('Invalid email'),
}),
})
);
const invalidData: SqlSuccessResponse = [
{ user: { email: 'not-an-email' } },
];
expect(() => handleSqlResponse(invalidData, nestedSchema)).toThrow(
/user\.email/
);
});
});
describe('executeSqlWithFallback', () => {
test('uses direct pg connection when available', async () => {
const expectedRows = [{ id: 1, name: 'test' }];
const mockClient = createMockClient({
pgAvailable: true,
pgResult: createSuccessResponse(expectedRows),
rpcResult: createSuccessResponse([{ id: 2, name: 'rpc' }]),
});
const result = await executeSqlWithFallback(mockClient, 'SELECT * FROM users');
expect(result).toEqual(expectedRows);
expect(mockClient.executeSqlWithPg).toHaveBeenCalledTimes(1);
expect(mockClient.executeSqlViaRpc).not.toHaveBeenCalled();
});
test('falls back to service role RPC when pg is not available', async () => {
const expectedRows = [{ id: 1, name: 'service-role-result' }];
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: true,
serviceRoleRpcResult: createSuccessResponse(expectedRows),
});
const result = await executeSqlWithFallback(mockClient, 'SELECT * FROM users', true);
expect(result).toEqual(expectedRows);
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalledTimes(1);
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalledWith('SELECT * FROM users', true);
});
test('propagates error from pg connection', async () => {
const errorResponse = createErrorResponse('Connection failed', 'CONN_ERR');
const mockClient = createMockClient({
pgAvailable: true,
pgResult: errorResponse,
});
const result = await executeSqlWithFallback(mockClient, 'SELECT 1');
expect(result).toEqual(errorResponse);
});
test('propagates error from service role RPC fallback', async () => {
const errorResponse = createErrorResponse('RPC failed', 'RPC_ERR');
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: true,
serviceRoleRpcResult: errorResponse,
});
const result = await executeSqlWithFallback(mockClient, 'SELECT 1');
expect(result).toEqual(errorResponse);
});
test('defaults readOnly to true when using service role RPC', async () => {
const mockClient = createMockClient({ pgAvailable: false, serviceRoleAvailable: true });
await executeSqlWithFallback(mockClient, 'SELECT 1');
expect(mockClient.executeSqlViaServiceRoleRpc).toHaveBeenCalledWith('SELECT 1', true);
});
test('returns error when neither pg nor service role is available', async () => {
const mockClient = createMockClient({
pgAvailable: false,
serviceRoleAvailable: false,
});
const result = await executeSqlWithFallback(mockClient, 'SELECT 1');
expect(result).toHaveProperty('error');
expect((result as { error: { code: string } }).error.code).toBe('MCP_CONFIG_ERROR');
});
});
describe('runExternalCommand', () => {
test('executes command and returns stdout', async () => {
const result = await runExternalCommand('echo "hello world"');
expect(result.stdout.trim()).toBe('hello world');
expect(result.stderr).toBe('');
expect(result.error).toBeNull();
});
test('returns empty stdout for command with no output', async () => {
const result = await runExternalCommand('true');
expect(result.stdout).toBe('');
expect(result.stderr).toBe('');
expect(result.error).toBeNull();
});
test('captures stderr and error for failing command', async () => {
const result = await runExternalCommand('ls /nonexistent-directory-12345');
expect(result.error).not.toBeNull();
expect(result.stderr.length).toBeGreaterThan(0);
});
test('returns error for non-existent command', async () => {
const result = await runExternalCommand('nonexistent-command-12345');
expect(result.error).not.toBeNull();
});
test('handles command with exit code', async () => {
const result = await runExternalCommand('exit 1');
expect(result.error).not.toBeNull();
});
});
});

View File

@ -0,0 +1,475 @@
import { createClient } from '@supabase/supabase-js';
import type { SupabaseClient } from '@supabase/supabase-js';
import type { SelfhostedSupabaseClientOptions, SqlExecutionResult, SqlErrorResponse, SqlSuccessResponse } from '../types/index.js';
import { Pool } from 'pg'; // We'll need this later for direct DB access
import type { PoolClient } from 'pg'; // Import PoolClient type
/**
* A client tailored for interacting with self-hosted Supabase instances.
* Handles both Supabase API interactions and direct database connections.
*/
export class SelfhostedSupabaseClient {
private options: SelfhostedSupabaseClientOptions;
public supabase: SupabaseClient;
private supabaseServiceRole: SupabaseClient | null = null; // For privileged operations (service_role key)
private pgPool: Pool | null = null; // Lazy initialized pool for direct DB access
private rpcFunctionExists = false;
// SQL definition for the helper function
private static readonly CREATE_EXECUTE_SQL_FUNCTION = `
CREATE OR REPLACE FUNCTION public.execute_sql(query text, read_only boolean DEFAULT false)
RETURNS jsonb -- Using jsonb is generally preferred over json
LANGUAGE plpgsql
AS $$
DECLARE
result jsonb;
BEGIN
-- Note: SET TRANSACTION READ ONLY might not behave as expected within a function
-- depending on the outer transaction state. Handle read-only logic outside if needed.
-- Execute the dynamic query and aggregate results into a JSONB array
EXECUTE 'SELECT COALESCE(jsonb_agg(t), ''[]''::jsonb) FROM (' || query || ') t' INTO result;
RETURN result;
EXCEPTION
WHEN others THEN
-- Rethrow the error with context, including the original SQLSTATE
RAISE EXCEPTION 'Error executing SQL (SQLSTATE: %): % ', SQLSTATE, SQLERRM;
END;
$$;
`;
// SQL to grant permissions - SECURITY: Only service_role can execute arbitrary SQL
private static readonly GRANT_EXECUTE_SQL_FUNCTION = `
-- Revoke any existing grants to ensure clean state
REVOKE ALL ON FUNCTION public.execute_sql(text, boolean) FROM PUBLIC;
REVOKE ALL ON FUNCTION public.execute_sql(text, boolean) FROM authenticated;
REVOKE ALL ON FUNCTION public.execute_sql(text, boolean) FROM anon;
-- Grant only to service_role for privileged operations
GRANT EXECUTE ON FUNCTION public.execute_sql(text, boolean) TO service_role;
`;
/**
* Creates an instance of SelfhostedSupabaseClient.
* Note: Call initialize() after creating the instance to check for RPC functions.
* @param options - Configuration options for the client.
*/
private constructor(options: SelfhostedSupabaseClientOptions) {
this.options = options;
// Validate required options first
if (!options.supabaseUrl || !options.supabaseAnonKey) {
throw new Error('Supabase URL and Anon Key are required.');
}
// Initialize the primary Supabase client (anon key) - for regular user context
this.supabase = createClient(options.supabaseUrl, options.supabaseAnonKey, options.supabaseClientOptions);
// Initialize the privileged Supabase client (service role key) - for admin/SQL operations
if (options.supabaseServiceRoleKey) {
this.supabaseServiceRole = createClient(
options.supabaseUrl,
options.supabaseServiceRoleKey,
options.supabaseClientOptions
);
}
}
/**
* Factory function to create and asynchronously initialize the client.
* Checks for the existence of the helper RPC function.
*/
public static async create(options: SelfhostedSupabaseClientOptions): Promise<SelfhostedSupabaseClient> {
const client = new SelfhostedSupabaseClient(options);
await client.initialize();
return client;
}
/**
* Initializes the client by checking for the required RPC function.
* Attempts to create the function if it doesn't exist and a service role key is provided.
*/
public async initialize(): Promise<void> {
console.error('Initializing SelfhostedSupabaseClient...');
try {
await this.checkAndCreateRpcFunction();
console.error(`RPC function 'public.execute_sql' status: ${this.rpcFunctionExists ? 'Available' : 'Unavailable'}`);
} catch (error) {
console.error('Error during client initialization:', error);
// Decide if we should throw or allow continuation without RPC
// For now, let's log and continue, executeSqlViaRpc will throw if needed
}
console.error('Initialization complete.');
}
// --- Public Methods (to be implemented) ---
/**
* Executes SQL using the preferred RPC method.
*/
public async executeSqlViaRpc(query: string, readOnly = false): Promise<SqlExecutionResult> {
if (!this.rpcFunctionExists) {
// This should ideally not be hit if initialize() succeeded and the function
// was expected to be available, but good to have a check.
console.error('Attempted to call executeSqlViaRpc, but RPC function is not available.');
return {
error: {
message: 'execute_sql RPC function not found or client not properly initialized.',
code: 'MCP_CLIENT_ERROR',
},
} as SqlErrorResponse;
}
console.error(`Executing via RPC (readOnly: ${readOnly}): ${query.substring(0, 100)}...`);
try {
const { data, error } = await this.supabase.rpc('execute_sql', {
query: query,
read_only: readOnly,
});
if (error) {
console.error('Error executing SQL via RPC:', error);
// Attempt to conform to SqlErrorResponse structure
return {
error: {
message: error.message,
code: error.code, // Propagate Supabase/PostgREST error code
details: error.details,
hint: error.hint,
},
};
}
// The RPC function returns JSONB which Supabase client parses.
// We expect it to be an array of objects (records).
// Add a type check for safety, although the RPC function should guarantee the shape.
if (Array.isArray(data)) {
// Explicitly cast to expected success type
return data as SqlSuccessResponse;
}
// If it's not an array, something went wrong with the RPC function's output
console.error('Unexpected response format from execute_sql RPC:', data);
return {
error: {
message: 'Unexpected response format from execute_sql RPC. Expected JSON array.',
code: 'MCP_RPC_FORMAT_ERROR',
},
} as SqlErrorResponse;
} catch (rpcError: unknown) {
const errorMessage = rpcError instanceof Error ? rpcError.message : String(rpcError);
console.error('Exception during executeSqlViaRpc call:', rpcError);
return {
error: {
message: `Exception during RPC call: ${errorMessage}`,
code: 'MCP_RPC_EXCEPTION',
},
} as SqlErrorResponse;
}
}
/**
* Executes SQL using the service role client (privileged).
* Required because execute_sql RPC is restricted to service_role only.
* SECURITY: This method uses elevated privileges - use only for MCP tool operations.
*/
public async executeSqlViaServiceRoleRpc(query: string, readOnly = false): Promise<SqlExecutionResult> {
if (!this.supabaseServiceRole) {
return {
error: {
message: 'Service role key not configured. Cannot execute privileged SQL via RPC.',
code: 'MCP_CONFIG_ERROR',
},
} as SqlErrorResponse;
}
if (!this.rpcFunctionExists) {
console.error('Attempted to call executeSqlViaServiceRoleRpc, but RPC function is not available.');
return {
error: {
message: 'execute_sql RPC function not found or client not properly initialized.',
code: 'MCP_CLIENT_ERROR',
},
} as SqlErrorResponse;
}
console.error(`Executing via Service Role RPC (readOnly: ${readOnly}): ${query.substring(0, 100)}...`);
try {
const { data, error } = await this.supabaseServiceRole.rpc('execute_sql', {
query: query,
read_only: readOnly,
});
if (error) {
console.error('Error executing SQL via Service Role RPC:', error);
return {
error: {
message: error.message,
code: error.code,
details: error.details,
hint: error.hint,
},
};
}
if (Array.isArray(data)) {
return data as SqlSuccessResponse;
}
console.error('Unexpected response format from execute_sql Service Role RPC:', data);
return {
error: {
message: 'Unexpected response format from execute_sql RPC. Expected JSON array.',
code: 'MCP_RPC_FORMAT_ERROR',
},
} as SqlErrorResponse;
} catch (rpcError: unknown) {
const errorMessage = rpcError instanceof Error ? rpcError.message : String(rpcError);
console.error('Exception during executeSqlViaServiceRoleRpc call:', rpcError);
return {
error: {
message: `Exception during Service Role RPC call: ${errorMessage}`,
code: 'MCP_RPC_EXCEPTION',
},
} as SqlErrorResponse;
}
}
/**
* Executes SQL directly against the database using the pg library.
* Requires DATABASE_URL to be configured.
* Useful for simple queries when RPC is unavailable or direct access is preferred.
* NOTE: Does not support transactions or parameterization directly.
* Consider executeTransactionWithPg for more complex operations.
*/
public async executeSqlWithPg(query: string): Promise<SqlExecutionResult> {
if (!this.options.databaseUrl) {
return { error: { message: 'DATABASE_URL is not configured. Cannot execute SQL directly.', code: 'MCP_CONFIG_ERROR' } };
}
await this.ensurePgPool(); // Ensure pool is initialized
if (!this.pgPool) { // Should not happen if ensurePgPool works, but type guard
return { error: { message: 'pg Pool not available after initialization attempt.', code: 'MCP_POOL_ERROR' } };
}
let client: PoolClient | undefined;
try {
client = await this.pgPool.connect();
console.error(`Executing via pg: ${query.substring(0, 100)}...`);
const result = await client.query(query);
// Return result in a format consistent with SqlSuccessResponse
// Assuming result.rows is the desired data array
return result.rows as SqlSuccessResponse;
} catch (dbError: unknown) {
const error = dbError instanceof Error ? dbError : new Error(String(dbError));
console.error('Error executing SQL with pg:', error);
// Try to extract code if possible (pg errors often have a .code property)
const code = (dbError as { code?: string }).code || 'PG_ERROR';
return { error: { message: error.message, code: code } };
} finally {
client?.release();
}
}
/**
* Ensures the pg connection pool is initialized.
* Should be called before accessing this.pgPool.
*/
private async ensurePgPool(): Promise<void> {
if (this.pgPool) return;
if (!this.options.databaseUrl) {
throw new Error('DATABASE_URL is not configured. Cannot initialize pg pool.');
}
console.error('Initializing pg pool...');
this.pgPool = new Pool({ connectionString: this.options.databaseUrl });
this.pgPool.on('error', (err, client) => {
console.error('PG Pool Error: Unexpected error on idle client', err);
// Optional: Implement logic to handle pool errors, e.g., attempt to reset pool
});
// Test connection?
try {
const client = await this.pgPool.connect();
console.error('pg pool connected successfully.');
client.release();
} catch (err) {
console.error('Failed to connect pg pool:', err);
// Clean up pool if connection fails?
await this.pgPool.end();
this.pgPool = null;
throw new Error(`Failed to connect pg pool: ${err instanceof Error ? err.message : String(err)}`);
}
}
/**
* Executes a series of operations within a single database transaction using the pg library.
* Requires DATABASE_URL to be configured.
* @param callback A function that receives a connected pg client and performs queries.
* It should return a promise that resolves on success or rejects on failure.
* The transaction will be committed if the promise resolves,
* and rolled back if it rejects.
*/
public async executeTransactionWithPg<T>(
callback: (client: PoolClient) => Promise<T>
): Promise<T> {
if (!this.options.databaseUrl) {
throw new Error('DATABASE_URL is not configured. Cannot execute transaction directly.');
}
await this.ensurePgPool();
if (!this.pgPool) {
throw new Error('pg Pool not available for transaction.');
}
const client = await this.pgPool.connect();
try {
await client.query('BEGIN');
console.error('BEGIN transaction');
const result = await callback(client);
await client.query('COMMIT');
console.error('COMMIT transaction');
return result;
} catch (error) {
console.error('Transaction Error - Rolling back:', error);
await client.query('ROLLBACK');
console.error('ROLLBACK transaction');
// Re-throw the error so the caller knows the transaction failed
throw error;
} finally {
client.release();
}
}
// --- Helper/Private Methods (to be implemented) ---
private async checkAndCreateRpcFunction(): Promise<void> {
console.error("Checking for public.execute_sql RPC function...");
// Use service role client for checking since execute_sql is restricted to service_role only
// Falls back to anon client if service role is not configured (will fail on permission check)
const clientToCheck = this.supabaseServiceRole || this.supabase;
const usingServiceRole = !!this.supabaseServiceRole;
if (!usingServiceRole) {
console.error("Warning: Checking execute_sql with anon key - this will fail if function exists but is restricted to service_role.");
}
try {
// Try calling the function with a simple query
const { error } = await clientToCheck.rpc('execute_sql', { query: 'SELECT 1' });
if (!error) {
console.error("'public.execute_sql' function found.");
this.rpcFunctionExists = true;
return;
}
const UNDEFINED_FUNCTION_ERROR_CODE = '42883';
// PostgREST error when function definition is not found in its cache
const POSTGREST_FUNCTION_NOT_FOUND_CODE = 'PGRST202';
if (
error.code === UNDEFINED_FUNCTION_ERROR_CODE ||
error.code === POSTGREST_FUNCTION_NOT_FOUND_CODE
) {
console.error(
`'public.execute_sql' function not found (Code: ${error.code}). Attempting creation...`,
);
if (!this.options.supabaseServiceRoleKey) {
console.error("Cannot create 'public.execute_sql': supabaseServiceRoleKey not provided.");
this.rpcFunctionExists = false;
return;
}
if (!this.options.databaseUrl) {
// Prefer direct DB connection for DDL if available
console.error("Cannot create 'public.execute_sql' reliably without databaseUrl for direct connection.");
// Could attempt with a service role client, but less ideal for DDL
this.rpcFunctionExists = false;
return;
}
try {
console.error("Creating 'public.execute_sql' function using direct DB connection...");
// Use direct DB connection (pg) as it's generally better for DDL
await this.executeSqlWithPg(SelfhostedSupabaseClient.CREATE_EXECUTE_SQL_FUNCTION);
await this.executeSqlWithPg(SelfhostedSupabaseClient.GRANT_EXECUTE_SQL_FUNCTION);
console.error("'public.execute_sql' function created and permissions granted successfully.");
// Attempt to notify PostgREST to reload its schema cache
console.error("Notifying PostgREST to reload schema cache...");
await this.executeSqlWithPg("NOTIFY pgrst, 'reload schema'");
console.error("PostgREST schema reload notification sent.");
// Assume success for now, but subsequent RPC calls will verify
this.rpcFunctionExists = true;
} catch (creationError: unknown) {
const errorMessage = creationError instanceof Error ? creationError.message : String(creationError);
console.error("Failed to create 'public.execute_sql' function or notify PostgREST:", creationError);
this.rpcFunctionExists = false;
// Rethrow or handle as appropriate
throw new Error(`Failed to create execute_sql function/notify: ${errorMessage}`);
}
} else {
console.error(
"Unexpected error checking for 'public.execute_sql' function:",
error,
);
this.rpcFunctionExists = false;
// Throw the original Supabase/PostgREST error for clarity
throw new Error(
`Error checking for execute_sql function: ${error.message}`,
);
}
} catch (err: unknown) {
const errorMessage = err instanceof Error ? err.message : String(err);
console.error("Exception during RPC function check/creation:", err);
this.rpcFunctionExists = false;
// Rethrow the error to be caught by initialize()
throw new Error(`Exception during RPC function check/creation: ${errorMessage}`); // Rethrow with a typed error
}
}
// --- Getters ---
public getSupabaseUrl(): string {
return this.options.supabaseUrl;
}
public getAnonKey(): string {
return this.options.supabaseAnonKey;
}
public getServiceRoleKey(): string | undefined {
return this.options.supabaseServiceRoleKey;
}
/**
* Gets the configured JWT secret, if provided.
*/
public getJwtSecret(): string | undefined {
return this.options.jwtSecret;
}
/**
* Gets the configured direct database connection URL, if provided.
*/
public getDbUrl(): string | undefined {
return this.options.databaseUrl;
}
/**
* Checks if the direct database connection (pg) is configured.
*/
public isPgAvailable(): boolean {
return !!this.options.databaseUrl;
}
/**
* Checks if the service role client is available for privileged operations.
* Required for execute_sql RPC since it's restricted to service_role only.
*/
public isServiceRoleAvailable(): boolean {
return this.supabaseServiceRole !== null;
}
}

View File

@ -0,0 +1,429 @@
import { Command } from 'commander';
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ErrorCode,
ListToolsRequestSchema,
McpError,
} from '@modelcontextprotocol/sdk/types.js';
import { SelfhostedSupabaseClient } from './client/index.js';
import { HttpMcpServer } from './server/http-server.js';
import { listTablesTool } from './tools/list_tables.js';
import { listExtensionsTool } from './tools/list_extensions.js';
import { listMigrationsTool } from './tools/list_migrations.js';
import { applyMigrationTool } from './tools/apply_migration.js';
import { executeSqlTool } from './tools/execute_sql.js';
import { getDatabaseConnectionsTool } from './tools/get_database_connections.js';
import { getDatabaseStatsTool } from './tools/get_database_stats.js';
import { getProjectUrlTool } from './tools/get_project_url.js';
import { generateTypesTool } from './tools/generate_typescript_types.js';
import { rebuildHooksTool } from './tools/rebuild_hooks.js';
import { verifyJwtSecretTool } from './tools/verify_jwt_secret.js';
import { listAuthUsersTool } from './tools/list_auth_users.js';
import { getAuthUserTool } from './tools/get_auth_user.js';
import { deleteAuthUserTool } from './tools/delete_auth_user.js';
import { createAuthUserTool } from './tools/create_auth_user.js';
import { updateAuthUserTool } from './tools/update_auth_user.js';
import { z } from 'zod';
import { zodToJsonSchema } from 'zod-to-json-schema';
import { canAccessTool, type ToolContext, type ToolPrivilegeLevel, type UserContext } from './tools/types.js';
import listStorageBucketsTool from './tools/list_storage_buckets.js';
import listStorageObjectsTool from './tools/list_storage_objects.js';
import listRealtimePublicationsTool from './tools/list_realtime_publications.js';
import { listCronJobsTool } from './tools/list_cron_jobs.js';
import { listVectorIndexesTool } from './tools/list_vector_indexes.js';
import { listEdgeFunctionsTool } from './tools/list_edge_functions.js';
import { getEdgeFunctionDetailsTool } from './tools/get_edge_function_details.js';
import { getLogsTool } from './tools/get_logs.js';
import { getAdvisorsTool } from './tools/get_advisors.js';
import { getStorageConfigTool } from './tools/get_storage_config.js';
import { updateStorageConfigTool } from './tools/update_storage_config.js';
import { listTableColumnsTool } from './tools/list_table_columns.js';
import { listIndexesTool } from './tools/list_indexes.js';
import { listConstraintsTool } from './tools/list_constraints.js';
import { listForeignKeysTool } from './tools/list_foreign_keys.js';
import { listRlsPoliciesTool } from './tools/list_rls_policies.js';
import { listTriggersTool } from './tools/list_triggers.js';
import { listDatabaseFunctionsTool } from './tools/list_database_functions.js';
import { getFunctionDefinitionTool } from './tools/get_function_definition.js';
import { getTriggerDefinitionTool } from './tools/get_trigger_definition.js';
import { getRlsStatusTool } from './tools/get_rls_status.js';
import { listAvailableExtensionsTool } from './tools/list_available_extensions.js';
import { getCronJobHistoryTool } from './tools/get_cron_job_history.js';
import { listEdgeFunctionLogsTool } from './tools/list_edge_function_logs.js';
import { getIndexStatsTool } from './tools/get_index_stats.js';
import { getVectorIndexStatsTool } from './tools/get_vector_index_stats.js';
import { explainQueryTool } from './tools/explain_query.js';
// Node.js built-in modules
import * as fs from 'node:fs';
import * as path from 'node:path';
// Define the structure expected by MCP for tool definitions
interface McpToolSchema {
name: string;
description?: string;
// inputSchema is the JSON Schema object for MCP capabilities
inputSchema: object;
}
// Base structure for our tool objects - For Reference
interface AppTool {
name: string;
description: string;
inputSchema: z.ZodTypeAny; // Zod schema for parsing
mcpInputSchema: object; // Static JSON schema for MCP (Required)
outputSchema: z.ZodTypeAny; // Zod schema for output (optional)
privilegeLevel?: ToolPrivilegeLevel; // Privilege level for access control
execute: (input: unknown, context: ToolContext) => Promise<unknown>;
}
// Main function
async function main() {
const program = new Command();
program
.name('self-hosted-supabase-mcp')
.description('MCP Server for self-hosted Supabase instances')
.option('--url <url>', 'Supabase project URL', process.env.SUPABASE_URL)
.option('--anon-key <key>', 'Supabase anonymous key', process.env.SUPABASE_ANON_KEY)
.option('--service-key <key>', 'Supabase service role key (optional)', process.env.SUPABASE_SERVICE_ROLE_KEY)
.option('--db-url <url>', 'Direct database connection string (optional, for pg fallback)', process.env.DATABASE_URL)
.option('--jwt-secret <secret>', 'Supabase JWT secret (optional, needed for some tools)', process.env.SUPABASE_AUTH_JWT_SECRET)
.option('--workspace-path <path>', 'Workspace root path (for file operations)', process.cwd())
.option('--tools-config <path>', 'Path to a JSON file specifying which tools to enable (e.g., { "enabledTools": ["tool1", "tool2"] }). If omitted, all tools are enabled.')
.option('--transport <type>', 'Transport mode: stdio or http (default: stdio)', 'stdio')
.option('--port <number>', 'HTTP server port (default: 3000)', '3000')
.option('--host <string>', 'HTTP server host (default: 127.0.0.1)', '127.0.0.1')
.option('--cors-origins <origins>', 'Comma-separated list of allowed CORS origins (default: localhost only)')
.option('--rate-limit-window <ms>', 'Rate limit window in milliseconds (default: 60000)', '60000')
.option('--rate-limit-max <count>', 'Max requests per rate limit window (default: 100)', '100')
.option('--request-timeout <ms>', 'Request timeout in milliseconds (default: 30000)', '30000')
.parse(process.argv);
const options = program.opts();
if (!options.url) {
console.error('Error: Supabase URL is required. Use --url or SUPABASE_URL.');
throw new Error('Supabase URL is required.');
}
if (!options.anonKey) {
console.error('Error: Supabase Anon Key is required. Use --anon-key or SUPABASE_ANON_KEY.');
throw new Error('Supabase Anon Key is required.');
}
// Validate transport option
const transport = options.transport as string;
if (transport !== 'stdio' && transport !== 'http') {
console.error('Error: Invalid transport. Must be "stdio" or "http".');
throw new Error('Invalid transport mode.');
}
// HTTP mode requires JWT secret for authentication
if (transport === 'http' && !options.jwtSecret) {
console.error('Error: --jwt-secret is required for HTTP transport mode.');
throw new Error('JWT secret is required for HTTP mode.');
}
console.error(`Initializing Self-Hosted Supabase MCP Server (transport: ${transport})...`);
try {
const selfhostedClient = await SelfhostedSupabaseClient.create({
supabaseUrl: options.url,
supabaseAnonKey: options.anonKey,
supabaseServiceRoleKey: options.serviceKey,
databaseUrl: options.dbUrl,
jwtSecret: options.jwtSecret,
});
console.error('Supabase client initialized successfully.');
// Use Map for tool registration to avoid object injection patterns
const availableTools = new Map<string, AppTool>([
[listTablesTool.name, listTablesTool as AppTool],
[listExtensionsTool.name, listExtensionsTool as AppTool],
[listMigrationsTool.name, listMigrationsTool as AppTool],
[applyMigrationTool.name, applyMigrationTool as AppTool],
[executeSqlTool.name, executeSqlTool as AppTool],
[getDatabaseConnectionsTool.name, getDatabaseConnectionsTool as AppTool],
[getDatabaseStatsTool.name, getDatabaseStatsTool as AppTool],
[getProjectUrlTool.name, getProjectUrlTool as AppTool],
[generateTypesTool.name, generateTypesTool as AppTool],
[rebuildHooksTool.name, rebuildHooksTool as AppTool],
[verifyJwtSecretTool.name, verifyJwtSecretTool as AppTool],
[listAuthUsersTool.name, listAuthUsersTool as AppTool],
[getAuthUserTool.name, getAuthUserTool as AppTool],
[deleteAuthUserTool.name, deleteAuthUserTool as AppTool],
[createAuthUserTool.name, createAuthUserTool as AppTool],
[updateAuthUserTool.name, updateAuthUserTool as AppTool],
[listStorageBucketsTool.name, listStorageBucketsTool as AppTool],
[listStorageObjectsTool.name, listStorageObjectsTool as AppTool],
[listRealtimePublicationsTool.name, listRealtimePublicationsTool as AppTool],
[listCronJobsTool.name, listCronJobsTool as AppTool],
[listVectorIndexesTool.name, listVectorIndexesTool as AppTool],
[listEdgeFunctionsTool.name, listEdgeFunctionsTool as AppTool],
[getEdgeFunctionDetailsTool.name, getEdgeFunctionDetailsTool as AppTool],
[getLogsTool.name, getLogsTool as AppTool],
[getAdvisorsTool.name, getAdvisorsTool as AppTool],
[getStorageConfigTool.name, getStorageConfigTool as AppTool],
[updateStorageConfigTool.name, updateStorageConfigTool as AppTool],
[listTableColumnsTool.name, listTableColumnsTool as AppTool],
[listIndexesTool.name, listIndexesTool as AppTool],
[listConstraintsTool.name, listConstraintsTool as AppTool],
[listForeignKeysTool.name, listForeignKeysTool as AppTool],
[listRlsPoliciesTool.name, listRlsPoliciesTool as AppTool],
[listTriggersTool.name, listTriggersTool as AppTool],
[listDatabaseFunctionsTool.name, listDatabaseFunctionsTool as AppTool],
[getFunctionDefinitionTool.name, getFunctionDefinitionTool as AppTool],
[getTriggerDefinitionTool.name, getTriggerDefinitionTool as AppTool],
[getRlsStatusTool.name, getRlsStatusTool as AppTool],
[listAvailableExtensionsTool.name, listAvailableExtensionsTool as AppTool],
[getCronJobHistoryTool.name, getCronJobHistoryTool as AppTool],
[listEdgeFunctionLogsTool.name, listEdgeFunctionLogsTool as AppTool],
[getIndexStatsTool.name, getIndexStatsTool as AppTool],
[getVectorIndexStatsTool.name, getVectorIndexStatsTool as AppTool],
[explainQueryTool.name, explainQueryTool as AppTool],
]);
// --- Tool Filtering Logic ---
// Use Map for registered tools (copy from available tools initially)
let registeredTools = new Map<string, AppTool>(availableTools);
const toolsConfigPath = options.toolsConfig as string | undefined;
let enabledToolNames: Set<string> | null = null; // Use Set for efficient lookup
if (toolsConfigPath) {
try {
const resolvedPath = path.resolve(toolsConfigPath);
console.error(`Attempting to load tool configuration from: ${resolvedPath}`);
if (!fs.existsSync(resolvedPath)) {
throw new Error(`Tool configuration file not found at ${resolvedPath}`);
}
const configFileContent = fs.readFileSync(resolvedPath, 'utf-8');
const configJson = JSON.parse(configFileContent);
if (!configJson || typeof configJson !== 'object' || !Array.isArray(configJson.enabledTools)) {
throw new Error('Invalid config file format. Expected { "enabledTools": ["tool1", ...] }.');
}
// Validate that enabledTools contains only strings
const toolNames = configJson.enabledTools as unknown[];
if (!toolNames.every((name): name is string => typeof name === 'string')) {
throw new Error('Invalid config file content. "enabledTools" must be an array of strings.');
}
enabledToolNames = new Set(toolNames.map(name => name.trim()).filter(name => name.length > 0));
} catch (error: unknown) {
console.error(`Error loading or parsing tool config file '${toolsConfigPath}':`, error instanceof Error ? error.message : String(error));
console.error('Falling back to enabling all tools due to config error.');
enabledToolNames = null; // Reset to null to signify fallback
}
}
if (enabledToolNames !== null) { // Check if we successfully got names from config
console.error(`Whitelisting tools based on config: ${Array.from(enabledToolNames).join(', ')}`);
// Create new Map with only whitelisted tools
registeredTools = new Map<string, AppTool>();
for (const [toolName, tool] of availableTools) {
if (enabledToolNames.has(toolName)) {
registeredTools.set(toolName, tool);
} else {
console.error(`Tool ${toolName} disabled (not in config whitelist).`);
}
}
// Check if any tools specified in the config were not found in availableTools
// Map.has() is safe from prototype pollution
for (const requestedName of enabledToolNames) {
if (!availableTools.has(requestedName)) {
console.warn(`Warning: Tool "${requestedName}" specified in config file not found.`);
}
}
} else {
console.error("No valid --tools-config specified or error loading config, enabling all available tools.");
// registeredTools already defaults to all tools, so no action needed here
}
// --- End Tool Filtering Logic ---
// Prepare capabilities for the Server constructor
const capabilitiesTools: Record<string, McpToolSchema> = {};
// Use the potentially filtered 'registeredTools' map (using Map.values())
for (const tool of registeredTools.values()) {
capabilitiesTools[tool.name] = {
name: tool.name,
description: tool.description || 'Tool description missing',
inputSchema: tool.mcpInputSchema,
};
}
const capabilities = { tools: capabilitiesTools };
// Factory function to create a configured MCP server instance
// This is needed for HTTP mode where each request may need a fresh server
// In HTTP mode, userContext is provided for privilege-level enforcement
const createMcpServer = (userContext?: UserContext): Server => {
const server = new Server(
{
name: 'self-hosted-supabase-mcp',
version: '1.3.0',
},
{
capabilities,
},
);
// The ListTools handler should return the array matching McpToolSchema structure
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: Object.values(capabilities.tools),
}));
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const toolName = request.params.name;
// Look up the tool in the filtered 'registeredTools' Map
// Map.has() and Map.get() are safe from prototype pollution
const tool = registeredTools.get(toolName);
if (!tool) {
// Check if it existed originally but was filtered out
if (availableTools.has(toolName)) {
throw new McpError(ErrorCode.MethodNotFound, `Tool "${toolName}" is available but not enabled by the current server configuration.`);
}
// If the tool wasn't in the original list either, it's unknown
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${toolName}`);
}
// SECURITY: Check privilege level in HTTP mode
// In stdio mode (no userContext), all tools are accessible (trusted local process)
if (userContext) {
const toolPrivilegeLevel = tool.privilegeLevel ?? 'regular';
if (!canAccessTool(userContext.role, toolPrivilegeLevel)) {
console.error(`[SECURITY] Access denied: User ${userContext.email || userContext.userId} (role: ${userContext.role}) attempted to access ${toolName} (requires: ${toolPrivilegeLevel})`);
throw new McpError(
ErrorCode.InvalidRequest,
`Access denied: Tool '${toolName}' requires '${toolPrivilegeLevel}' privilege. ` +
`Your role '${userContext.role}' does not have sufficient permissions.`
);
}
}
try {
if (typeof tool.execute !== 'function') {
throw new Error(`Tool ${toolName} does not have an execute method.`);
}
// Validate and parse arguments using Zod schema
const parsedArgs = (tool.inputSchema as z.ZodTypeAny).parse(
request.params.arguments
) as Record<string, unknown>;
// Create the context object using the imported type
const context: ToolContext = {
selfhostedClient,
workspacePath: options.workspacePath as string,
user: userContext, // Pass user context for audit logging
log: (message, level = 'info') => {
// Simple logger using console.error (consistent with existing logs)
console.error(`[${level.toUpperCase()}] ${message}`);
}
};
// Call the tool's execute method with validated arguments
const result = await tool.execute(parsedArgs, context);
return {
content: [
{
type: 'text',
text: typeof result === 'string' ? result : JSON.stringify(result, null, 2),
},
],
};
} catch (error: unknown) {
console.error(`Error executing tool ${toolName}:`, error);
let errorMessage = `Error executing tool ${toolName}: `;
if (error instanceof z.ZodError) {
errorMessage += `Input validation failed: ${error.issues.map((e) => `${e.path.join('.')}: ${e.message}`).join(', ')}`;
} else if (error instanceof Error) {
errorMessage += error.message;
} else {
errorMessage += String(error);
}
return {
content: [{ type: 'text', text: errorMessage }],
isError: true,
};
}
});
return server;
};
// Start the appropriate transport
if (transport === 'http') {
console.error('Starting MCP Server in HTTP mode...');
// Parse CORS origins if provided
const corsOrigins = options.corsOrigins
? (options.corsOrigins as string).split(',').map(o => o.trim()).filter(o => o.length > 0)
: undefined;
const httpServer = new HttpMcpServer(
{
port: parseInt(options.port as string, 10),
host: options.host as string,
jwtSecret: options.jwtSecret as string,
corsOrigins,
rateLimitWindowMs: parseInt(options.rateLimitWindow as string, 10),
rateLimitMaxRequests: parseInt(options.rateLimitMax as string, 10),
requestTimeoutMs: parseInt(options.requestTimeout as string, 10),
},
createMcpServer
);
await httpServer.start();
// Handle graceful shutdown
// Use void to properly handle async handlers in process.on callbacks
process.on('SIGINT', () => {
void (async () => {
console.error('Shutting down...');
await httpServer.stop();
process.exit(0);
})();
});
process.on('SIGTERM', () => {
void (async () => {
console.error('Shutting down...');
await httpServer.stop();
process.exit(0);
})();
});
} else {
// WARNING: Stdio mode has NO authentication - all tools accessible
console.error('Starting MCP Server in stdio mode...');
console.error('');
console.error('================================================================================');
console.error('WARNING: Stdio mode has NO authentication. All tools (including privileged');
console.error(' tools) are accessible. Only use stdio mode with trusted local clients.');
console.error(' For remote access, use HTTP mode with JWT authentication.');
console.error('================================================================================');
console.error('');
const server = createMcpServer();
const stdioTransport = new StdioServerTransport();
await server.connect(stdioTransport);
console.error('MCP Server connected to stdio.');
}
} catch (error) {
console.error('Failed to initialize or start the MCP server:', error);
throw error; // Rethrow to ensure the process exits non-zero if init fails
}
}
main().catch((error) => {
console.error('Unhandled error in main function:', error);
process.exit(1); // Exit with error code
});

View File

@ -0,0 +1,135 @@
/**
* JWT Authentication Middleware for HTTP transport mode.
*
* Validates Supabase JWT tokens and extracts user information.
* Required for all /mcp endpoints in HTTP mode.
*/
import type { Request, Response, NextFunction } from 'express';
import jwt from 'jsonwebtoken';
export interface AuthenticatedUser {
userId: string;
email: string | null;
role: string;
exp: number;
}
export interface AuthenticatedRequest extends Request {
user?: AuthenticatedUser;
}
interface SupabaseJwtPayload {
sub: string; // User ID
email?: string;
role?: string;
aud?: string;
exp?: number;
iat?: number;
}
/**
* Error response messages for authentication failures.
* Using constants ensures these are not flagged as user-controlled content.
*/
const AUTH_ERROR_MESSAGES = {
MISSING_HEADER: 'Missing Authorization header',
INVALID_FORMAT: 'Invalid Authorization header format. Expected: Bearer [token]',
MISSING_TOKEN: 'Missing token in Authorization header',
MISSING_SUBJECT: 'Invalid token: missing subject (sub) claim',
TOKEN_EXPIRED: 'Token has expired',
VERIFICATION_FAILED: 'Failed to verify authentication token',
} as const;
/**
* Creates JWT authentication middleware.
*
* @param jwtSecret - The Supabase JWT secret for verification
* @returns Express middleware function
*/
export function createAuthMiddleware(jwtSecret: string) {
return (req: AuthenticatedRequest, res: Response, next: NextFunction): void => {
const authHeader = req.headers.authorization;
if (!authHeader) {
res.status(401).json({
error: 'Unauthorized',
message: AUTH_ERROR_MESSAGES.MISSING_HEADER,
});
return;
}
if (!authHeader.startsWith('Bearer ')) {
res.status(401).json({
error: 'Unauthorized',
message: AUTH_ERROR_MESSAGES.INVALID_FORMAT,
});
return;
}
const token = authHeader.slice(7); // Remove 'Bearer ' prefix
if (!token) {
res.status(401).json({
error: 'Unauthorized',
message: AUTH_ERROR_MESSAGES.MISSING_TOKEN,
});
return;
}
try {
// Verify and decode the JWT
const decoded = jwt.verify(token, jwtSecret, {
algorithms: ['HS256'],
}) as SupabaseJwtPayload;
// Validate required fields
if (!decoded.sub) {
res.status(401).json({
error: 'Unauthorized',
message: AUTH_ERROR_MESSAGES.MISSING_SUBJECT,
});
return;
}
// NOTE: Expiration is already checked by jwt.verify() above.
// It throws TokenExpiredError if expired, which is caught below.
// Attach user info to request
req.user = {
userId: decoded.sub,
email: decoded.email || null,
role: decoded.role || 'authenticated',
exp: decoded.exp || 0,
};
// Log authenticated request (for audit purposes)
console.error(`[AUTH] Authenticated request from user: ${req.user.email || req.user.userId}`);
next();
} catch (error) {
if (error instanceof jwt.JsonWebTokenError) {
// Note: error.message is from the jwt library, not user input
res.status(401).json({
error: 'Unauthorized',
message: `Invalid token: ${error.message}`,
});
return;
}
if (error instanceof jwt.TokenExpiredError) {
res.status(401).json({
error: 'Unauthorized',
message: AUTH_ERROR_MESSAGES.TOKEN_EXPIRED,
});
return;
}
console.error('[AUTH] Unexpected error during token verification:', error);
res.status(500).json({
error: 'Internal Server Error',
message: AUTH_ERROR_MESSAGES.VERIFICATION_FAILED,
});
}
};
}

View File

@ -0,0 +1,477 @@
/**
* HTTP Server for MCP using Streamable HTTP Transport.
*
* Implements the official MCP Streamable HTTP specification (2025-03-26).
* Runs in stateless mode: each request creates a new transport instance.
*
* Security features:
* - Configurable CORS (default: localhost only)
* - Rate limiting
* - Security headers
* - Request timeouts
* - Privilege-based tool access control
*/
import express, { type Express, type Request, type Response } from 'express';
import type { Server as HttpServer } from 'node:http';
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
import { createAuthMiddleware, type AuthenticatedRequest } from './auth-middleware.js';
import type { UserContext } from '../tools/types.js';
export interface HttpMcpServerOptions {
port: number;
host: string;
jwtSecret: string;
corsOrigins?: string[];
rateLimitWindowMs?: number;
rateLimitMaxRequests?: number;
requestTimeoutMs?: number;
}
/**
* Factory function type that creates MCP servers with optional user context.
* User context is provided for privilege-level enforcement in HTTP mode.
*/
export type McpServerFactory = (userContext?: UserContext) => Server;
export class HttpMcpServer {
private app: Express;
private httpServer: HttpServer | null = null;
private readonly options: HttpMcpServerOptions;
private readonly mcpServerFactory: McpServerFactory;
private requestCounts: Map<string, { count: number; resetTime: number }> = new Map();
private cleanupIntervalId: ReturnType<typeof setInterval> | null = null;
private readonly CLEANUP_INTERVAL_MS = 60000; // Clean up expired entries every minute
constructor(options: HttpMcpServerOptions, mcpServerFactory: McpServerFactory) {
this.options = options;
this.mcpServerFactory = mcpServerFactory;
this.app = express();
this.setupMiddleware();
this.setupRoutes();
// Start periodic cleanup of expired rate limit entries to prevent memory leak
this.cleanupIntervalId = setInterval(
() => { this.cleanupExpiredRateLimitEntries(); },
this.CLEANUP_INTERVAL_MS
);
}
/**
* Cleans up expired rate limit entries to prevent unbounded memory growth.
* Called periodically by the cleanup interval.
*/
private cleanupExpiredRateLimitEntries(): void {
const now = Date.now();
let cleanedCount = 0;
for (const [key, record] of this.requestCounts.entries()) {
if (now >= record.resetTime) {
this.requestCounts.delete(key);
cleanedCount++;
}
}
if (cleanedCount > 0) {
console.error(`[HTTP] Rate limiter cleanup: removed ${cleanedCount} expired entries`);
}
}
/**
* Simple in-memory rate limiter.
* Returns rate limit status and info for response headers.
*/
private checkRateLimit(clientKey: string): {
allowed: boolean;
remaining: number;
resetTime: number;
limit: number;
} {
const windowMs = this.options.rateLimitWindowMs ?? 60000; // 1 minute default
const maxRequests = this.options.rateLimitMaxRequests ?? 100; // 100 requests default
const now = Date.now();
let record = this.requestCounts.get(clientKey);
if (!record || now >= record.resetTime) {
// Start new window
record = { count: 1, resetTime: now + windowMs };
this.requestCounts.set(clientKey, record);
return {
allowed: true,
remaining: maxRequests - 1,
resetTime: record.resetTime,
limit: maxRequests,
};
}
if (record.count >= maxRequests) {
return {
allowed: false,
remaining: 0,
resetTime: record.resetTime,
limit: maxRequests,
};
}
record.count++;
return {
allowed: true,
remaining: maxRequests - record.count,
resetTime: record.resetTime,
limit: maxRequests,
};
}
/**
* Get client identifier for rate limiting (IP address).
*/
private getClientKey(req: Request): string {
// Support for proxies (X-Forwarded-For)
const forwarded = req.headers['x-forwarded-for'];
if (typeof forwarded === 'string') {
return forwarded.split(',')[0].trim();
}
return req.ip || req.socket.remoteAddress || 'unknown';
}
/**
* Gets the list of allowed origins from configuration.
*/
private getAllowedOrigins(): string[] {
return this.options.corsOrigins ?? [
`http://localhost:${this.options.port}`,
`http://127.0.0.1:${this.options.port}`,
`http://${this.options.host}:${this.options.port}`,
];
}
/**
* Check if origin is allowed by CORS configuration.
*/
private isOriginAllowed(origin: string | undefined): boolean {
// No origin header = same-origin or non-browser request (allow)
if (!origin) {
return true;
}
const allowedOrigins = this.getAllowedOrigins();
// Check for explicit wildcard
if (allowedOrigins.includes('*')) {
return true;
}
// Check exact match
if (allowedOrigins.includes(origin)) {
return true;
}
// Check wildcard patterns (e.g., http://localhost:*)
for (const allowed of allowedOrigins) {
if (allowed.endsWith(':*')) {
const baseUrl = allowed.slice(0, -2); // Remove ':*'
if (origin.startsWith(baseUrl + ':')) {
return true;
}
}
}
return false;
}
/**
* Gets the appropriate Access-Control-Allow-Origin header value for a request.
* Returns the origin if it's in the allowlist, '*' for same-origin requests,
* or null if the origin is not allowed.
*
* This method returns a value from our trusted allowlist, not user input directly.
*/
private getCorsAllowOriginValue(origin: string | undefined): string | null {
// No origin header = same-origin or non-browser request
if (!origin) {
return '*';
}
const allowedOrigins = this.getAllowedOrigins();
// Check for explicit wildcard configuration
if (allowedOrigins.includes('*')) {
return '*';
}
// Check exact match - return the allowlist entry, not user input
for (const allowed of allowedOrigins) {
if (allowed === origin) {
return allowed; // Return from allowlist, not user input
}
}
// Check wildcard patterns (e.g., http://localhost:*)
// For wildcard port patterns, we need to return the specific origin
// but only after validating it matches a trusted pattern
for (const allowed of allowedOrigins) {
if (allowed.endsWith(':*')) {
const baseUrl = allowed.slice(0, -2); // Remove ':*'
if (origin.startsWith(baseUrl + ':')) {
// Extract just the port portion and rebuild a safe value
const portMatch = origin.slice(baseUrl.length + 1);
// Validate port is numeric to prevent injection
if (/^\d+$/.test(portMatch)) {
return `${baseUrl}:${portMatch}`;
}
}
}
}
// Origin not allowed
return null;
}
private setupMiddleware(): void {
// Security headers (first - before other middleware)
this.app.use((_req, res, next) => {
// Prevent XSS and clickjacking
res.header('X-Content-Type-Options', 'nosniff');
res.header('X-Frame-Options', 'DENY');
res.header('X-XSS-Protection', '1; mode=block');
// HTTPS enforcement hint (useful when behind proxy)
res.header('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
// CSP for API responses
res.header('Content-Security-Policy', "default-src 'none'; frame-ancestors 'none'");
// Referrer policy
res.header('Referrer-Policy', 'no-referrer');
// Remove X-Powered-By
res.removeHeader('X-Powered-By');
next();
});
// Parse JSON bodies
this.app.use(express.json());
// Request timeout
const timeoutMs = this.options.requestTimeoutMs ?? 30000;
this.app.use((req, res, next) => {
res.setTimeout(timeoutMs, () => {
if (!res.headersSent) {
res.status(504).json({
error: 'Gateway Timeout',
message: `Request timed out after ${timeoutMs}ms`,
});
}
});
next();
});
// Rate limiting (skip for health endpoint)
this.app.use((req, res, next) => {
// Skip rate limiting for health checks
if (req.path === '/health') {
next();
return;
}
const clientKey = this.getClientKey(req);
const { allowed, remaining, resetTime, limit } = this.checkRateLimit(clientKey);
// Always add rate limit headers (standard practice)
res.header('X-RateLimit-Limit', String(limit));
res.header('X-RateLimit-Remaining', String(remaining));
res.header('X-RateLimit-Reset', String(Math.ceil(resetTime / 1000)));
if (!allowed) {
const retryAfterSeconds = Math.max(1, Math.ceil((resetTime - Date.now()) / 1000));
res.header('Retry-After', String(retryAfterSeconds));
res.status(429).json({
error: 'Too Many Requests',
message: 'Rate limit exceeded. Please try again later.',
retryAfter: retryAfterSeconds,
});
return;
}
next();
});
// CORS with configurable origins (default: localhost only)
this.app.use((req, res, next) => {
const origin = req.headers.origin;
// Get the validated CORS origin value (from allowlist, not user input)
const corsOriginValue = this.getCorsAllowOriginValue(origin);
if (corsOriginValue === null) {
// Origin not in allowlist
if (req.method === 'OPTIONS') {
res.sendStatus(403);
return;
}
res.status(403).json({
error: 'Forbidden',
message: 'Origin not allowed by CORS policy',
});
return;
}
// Set CORS headers with validated value from allowlist
// Uses helper method to satisfy static analysis
this.setCorsHeaders(res, corsOriginValue);
if (req.method === 'OPTIONS') {
res.sendStatus(204);
return;
}
next();
});
}
/**
* Sets CORS headers on the response.
* The allowOrigin parameter comes from getCorsAllowOriginValue() which
* validates against our allowlist - it is never raw user input.
*/
private setCorsHeaders(res: Response, allowOrigin: string): void {
const headers: Array<[string, string]> = [
['Access-Control-Allow-Origin', allowOrigin],
['Access-Control-Allow-Methods', 'GET, POST, DELETE, OPTIONS'],
['Access-Control-Allow-Headers', 'Content-Type, Authorization, Accept, Mcp-Session-Id'],
['Access-Control-Expose-Headers', 'Mcp-Session-Id'],
['Access-Control-Allow-Credentials', 'true'],
];
for (const [headerName, headerValue] of headers) {
res.header(headerName, headerValue);
}
}
private setupRoutes(): void {
// Health check endpoint (no auth required, minimal info)
this.app.get('/health', (_req: Request, res: Response) => {
res.json({ status: 'healthy' });
});
// Apply JWT authentication to /mcp routes
const authMiddleware = createAuthMiddleware(this.options.jwtSecret);
this.app.use('/mcp', authMiddleware);
// POST /mcp - Handle MCP JSON-RPC requests (stateless mode)
this.app.post('/mcp', (req: AuthenticatedRequest, res: Response) => {
void (async () => {
try {
// Create a new transport and server for each request (stateless)
const transport = new StreamableHTTPServerTransport({
sessionIdGenerator: undefined, // Stateless mode
});
// Extract user context for privilege-level enforcement
const userContext: UserContext | undefined = req.user
? {
userId: req.user.userId,
email: req.user.email,
role: req.user.role,
}
: undefined;
const server = this.mcpServerFactory(userContext);
// Connect server to transport
await server.connect(transport);
// Handle the request
await transport.handleRequest(req, res, req.body);
// Clean up after request completes
res.on('finish', () => {
transport.close().catch((err) => {
console.error('[HTTP] Error closing transport:', err);
});
server.close().catch((err) => {
console.error('[HTTP] Error closing server:', err);
});
});
} catch (error) {
console.error('[HTTP] Error handling MCP request:', error);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: '2.0',
error: {
code: -32603,
message: 'Internal server error',
},
id: null,
});
}
}
})();
});
// GET /mcp - SSE stream for server-initiated messages
// In stateless mode, we return 405 Method Not Allowed
this.app.get('/mcp', (_req: Request, res: Response) => {
res.status(405).json({
error: 'Method Not Allowed',
message: 'GET requests are not supported in stateless mode. Use POST for MCP requests.',
});
});
// DELETE /mcp - Session termination
// In stateless mode, we return 405 Method Not Allowed
this.app.delete('/mcp', (_req: Request, res: Response) => {
res.status(405).json({
error: 'Method Not Allowed',
message: 'Session termination is not supported in stateless mode.',
});
});
}
async start(): Promise<void> {
return new Promise((resolve) => {
this.httpServer = this.app.listen(this.options.port, this.options.host, () => {
// Set server-level timeouts
if (this.httpServer) {
this.httpServer.timeout = this.options.requestTimeoutMs ?? 30000;
this.httpServer.keepAliveTimeout = 65000; // Slightly higher than common load balancer timeouts
}
console.error(`[HTTP] MCP Server listening on http://${this.options.host}:${this.options.port}`);
console.error('[HTTP] Endpoints:');
console.error(` POST http://${this.options.host}:${this.options.port}/mcp - MCP requests (JWT required)`);
console.error(` GET http://${this.options.host}:${this.options.port}/health - Health check`);
console.error('[HTTP] Security:');
console.error(` CORS origins: ${(this.options.corsOrigins ?? ['localhost']).join(', ')}`);
console.error(` Rate limit: ${this.options.rateLimitMaxRequests ?? 100} requests per ${(this.options.rateLimitWindowMs ?? 60000) / 1000}s`);
resolve();
});
});
}
async stop(): Promise<void> {
// Clear rate limit cleanup interval to prevent memory leak
if (this.cleanupIntervalId) {
clearInterval(this.cleanupIntervalId);
this.cleanupIntervalId = null;
}
return new Promise((resolve, reject) => {
if (!this.httpServer) {
resolve();
return;
}
this.httpServer.close((err) => {
if (err) {
reject(err);
} else {
console.error('[HTTP] Server stopped.');
resolve();
}
});
});
}
}

View File

@ -0,0 +1,79 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import type { PoolClient } from 'pg';
// Input schema
const ApplyMigrationInputSchema = z.object({
version: z.string().describe("The migration version string (e.g., '20240101120000')."),
name: z.string().optional().describe("An optional descriptive name for the migration."),
sql: z.string().describe("The SQL DDL content of the migration."),
});
type ApplyMigrationInput = z.infer<typeof ApplyMigrationInputSchema>;
// Output schema
const ApplyMigrationOutputSchema = z.object({
success: z.boolean(),
version: z.string(),
message: z.string().optional(),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
version: { type: 'string', description: "The migration version string (e.g., '20240101120000')." },
name: { type: 'string', description: 'An optional descriptive name for the migration.' },
sql: { type: 'string', description: 'The SQL DDL content of the migration.' },
},
required: ['version', 'sql'],
};
// The tool definition - No explicit McpToolDefinition type needed
export const applyMigrationTool = {
name: 'apply_migration',
description: 'Applies a SQL migration script and records it in the supabase_migrations.schema_migrations table within a transaction.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: ApplyMigrationInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ApplyMigrationOutputSchema,
execute: async (input: ApplyMigrationInput, context: ToolContext) => {
const client = context.selfhostedClient;
try {
// Ensure pg is configured and available
if (!client.isPgAvailable()) {
throw new Error('Direct database connection (DATABASE_URL) is required for applying migrations but is not configured or available.');
}
await client.executeTransactionWithPg(async (pgClient: PoolClient) => {
// 1. Execute the provided migration SQL
console.error(`Executing migration SQL for version ${input.version}...`);
await pgClient.query(input.sql);
console.error('Migration SQL executed successfully.');
// 2. Insert the record into the migrations table
console.error(`Recording migration version ${input.version} in schema_migrations...`);
await pgClient.query(
'INSERT INTO supabase_migrations.schema_migrations (version, name) ' +
'VALUES ($1, $2);',
[input.version, input.name ?? '']
);
console.error(`Migration version ${input.version} recorded.`);
});
return {
success: true,
version: input.version,
message: `Migration ${input.version} applied successfully.`,
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`Failed to apply migration ${input.version}:`, errorMessage);
// Return a structured error response recognized by handleSqlResponse if needed,
// or let the SDK handle the thrown error.
// Here, we'll just rethrow to let SDK handle it.
// Alternatively, return { success: false, version: input.version, message: 'Failed: ' + errorMessage };
throw new Error(`Failed to apply migration ${input.version}: ${errorMessage}`);
}
},
};

View File

@ -0,0 +1,142 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { PoolClient } from 'pg';
import type { SqlSuccessResponse, AuthUser } from '../types/index.js'; // Import AuthUser
// Input schema
const CreateAuthUserInputSchema = z.object({
email: z.string().email('Invalid email address').describe('The email address for the new user.'),
password: z.string().min(6, 'Password must be at least 6 characters').describe('Plain text password (min 6 chars). WARNING: Insecure.'),
role: z.optional(z.string()).describe('User role.'),
app_metadata: z.optional(z.record(z.string(), z.unknown())).describe('Optional app metadata.'),
user_metadata: z.optional(z.record(z.string(), z.unknown())).describe('Optional user metadata.'),
});
type CreateAuthUserInput = z.infer<typeof CreateAuthUserInputSchema>;
// Output schema - Zod validation for the created user (should match AuthUser structure)
const CreatedAuthUserZodSchema = z.object({
id: z.string().uuid(),
email: z.string().email('Invalid email').nullable(),
role: z.string().nullable(),
created_at: z.string().nullable(),
last_sign_in_at: z.string().nullable(), // Will likely be null on creation
raw_app_meta_data: z.record(z.string(), z.unknown()).nullable(),
raw_user_meta_data: z.record(z.string(), z.unknown()).nullable(),
// Add other fields returned by the INSERT if necessary
});
// Use AuthUser for the output type hint
type CreateAuthUserOutput = AuthUser;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
email: { type: 'string', format: 'email', description: 'The email address for the new user.' },
password: { type: 'string', minLength: 6, description: 'Plain text password (min 6 chars). WARNING: Insecure.' },
role: { type: 'string', default: 'authenticated', description: 'User role.' },
user_metadata: { type: 'object', description: 'Optional user metadata.' },
app_metadata: { type: 'object', description: 'Optional app metadata.' },
},
required: ['email', 'password'],
};
// Tool definition
export const createAuthUserTool = {
name: 'create_auth_user',
description: 'Creates a new user directly in auth.users. WARNING: Requires plain password, insecure. Use with extreme caution.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: CreateAuthUserInputSchema,
mcpInputSchema: mcpInputSchema, // Ensure defined above
outputSchema: CreatedAuthUserZodSchema,
execute: async (input: CreateAuthUserInput, context: ToolContext): Promise<CreateAuthUserOutput> => { // Use CreateAuthUserOutput
const client = context.selfhostedClient;
const { email, password, role, app_metadata, user_metadata } = input;
// Direct DB connection is absolutely required for this direct insert
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to create an auth user directly.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to create an auth user directly.');
}
context.log(`Creating user ${email}...`, 'info');
// Use transaction to ensure atomicity and get pg client
const createdUser = await client.executeTransactionWithPg(async (pgClient: PoolClient) => {
// Check if pgcrypto extension is available (needed for crypt)
try {
await pgClient.query("SELECT crypt('test', gen_salt('bf'))");
} catch (err) {
throw new Error('Failed to execute crypt function. Ensure pgcrypto extension is enabled in the database.');
}
// Construct the INSERT statement with parameterization
const sql = `
INSERT INTO auth.users (
instance_id, email, encrypted_password, role,
raw_app_meta_data, raw_user_meta_data,
aud, email_confirmed_at, confirmation_sent_at -- Set required defaults
)
VALUES (
COALESCE(current_setting('app.instance_id', TRUE), '00000000-0000-0000-0000-000000000000')::uuid,
$1, crypt($2, gen_salt('bf')),
$3,
$4::jsonb,
$5::jsonb,
'authenticated', now(), now()
)
RETURNING id, email, role, raw_app_meta_data, raw_user_meta_data, created_at::text, last_sign_in_at::text;
`;
const params = [
email,
password,
role || 'authenticated', // Default role
JSON.stringify(app_metadata || {}),
JSON.stringify(user_metadata || {})
];
try {
const result = await pgClient.query(sql, params);
if (result.rows.length === 0) {
throw new Error('User creation failed, no user returned after insert.');
}
return CreatedAuthUserZodSchema.parse(result.rows[0]);
} catch (dbError: unknown) {
let errorMessage = 'Unknown database error during user creation';
if (typeof dbError === 'object' && dbError !== null && 'code' in dbError) {
// Safely extract code and message with proper type narrowing
const errorCode = String((dbError as { code: unknown }).code);
const errorMsg = 'message' in dbError && typeof (dbError as { message: unknown }).message === 'string'
? (dbError as { message: string }).message
: undefined;
// Check PG error code for unique violation
if (errorCode === '23505') {
errorMessage = `User creation failed: Email '${email}' likely already exists.`;
} else if (errorMsg) {
errorMessage = `Database error (${errorCode}): ${errorMsg}`;
} else {
errorMessage = `Database error code: ${errorCode}`;
}
} else if (dbError instanceof Error) {
errorMessage = `Database error during user creation: ${dbError.message}`;
} else {
errorMessage = `Database error during user creation: ${String(dbError)}`;
}
// Log sanitized error (not full object to avoid leaking sensitive info)
console.error('Error creating user in DB:', errorMessage);
// Throw a specific error message
throw new Error(errorMessage);
}
});
console.error(`Successfully created user ${email} with ID ${createdUser.id}.`);
context.log(`Successfully created user ${email} with ID ${createdUser.id}.`);
return createdUser; // Matches CreateAuthUserOutput (AuthUser)
},
};

View File

@ -0,0 +1,83 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, isSqlErrorResponse } from './utils.js';
// Input schema: User ID
const DeleteAuthUserInputSchema = z.object({
user_id: z.string().uuid().describe('The UUID of the user to delete.'),
});
type DeleteAuthUserInput = z.infer<typeof DeleteAuthUserInputSchema>;
// Output schema: Success status and message
const DeleteAuthUserOutputSchema = z.object({
success: z.boolean(),
message: z.string(),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
user_id: {
type: 'string',
format: 'uuid',
description: 'The UUID of the user to delete.',
},
},
required: ['user_id'],
};
// Tool definition
export const deleteAuthUserTool = {
name: 'delete_auth_user',
description: 'Deletes a user from auth.users by their ID. Requires service_role key and direct DB connection.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: DeleteAuthUserInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: DeleteAuthUserOutputSchema,
execute: async (input: DeleteAuthUserInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { user_id } = input;
// This operation requires elevated privileges and modifies data.
// Prefer direct DB connection if available and service key is configured.
if (!client.isPgAvailable()) {
throw new Error('Direct database connection (DATABASE_URL) is required for deleting users but is not configured or available.');
}
// Service role key check remains relevant for awareness, but remove console.warn
// if (!client.getServiceRoleKey()) {
// console.warn('Service role key not explicitly configured, direct DB connection might fail if privileges are insufficient.');
// }
try {
// Use executeTransactionWithPg for safety, though it's a single statement
const result = await client.executeTransactionWithPg(async (pgClient) => {
// Use parameter binding for safety
const deleteResult = await pgClient.query(
'DELETE FROM auth.users WHERE id = $1',
[user_id]
);
return deleteResult;
});
if (result.rowCount === 1) {
return {
success: true,
message: `Successfully deleted user with ID: ${user_id}`,
};
}
// If rowCount was not 1, the user wasn't found/deleted
return {
success: false,
message: `User with ID ${user_id} not found or could not be deleted.`,
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`Error deleting user ${user_id}:`, errorMessage);
// Rethrow for the main handler to format the error response
throw new Error(`Failed to delete user ${user_id}: ${errorMessage}`);
}
},
};

View File

@ -0,0 +1,64 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
// Input schema
const ExecuteSqlInputSchema = z.object({
sql: z.string().describe('The SQL query to execute.'),
read_only: z.boolean().optional().default(false).describe('Hint for the RPC function whether the query is read-only (best effort).'),
// Future enhancement: Add option to force direct connection?
// use_direct_connection: z.boolean().optional().default(false).describe('Attempt to use direct DB connection instead of RPC.'),
});
type ExecuteSqlInput = z.infer<typeof ExecuteSqlInputSchema>;
// Output schema - expects an array of results (rows)
const ExecuteSqlOutputSchema = z.array(z.unknown()).describe('The array of rows returned by the SQL query.');
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
sql: { type: 'string', description: 'The SQL query to execute.' },
read_only: { type: 'boolean', default: false, description: 'Hint for the RPC function whether the query is read-only (best effort).' },
},
required: ['sql'],
};
// The tool definition - No explicit McpToolDefinition type needed
export const executeSqlTool = {
name: 'execute_sql',
description: 'Executes an arbitrary SQL query against the database. SECURITY: Requires service_role key or direct database connection.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: ExecuteSqlInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ExecuteSqlOutputSchema,
execute: async (input: ExecuteSqlInput, context: ToolContext) => {
const client = context.selfhostedClient;
// SECURITY: Verify privilege requirements before executing arbitrary SQL
if (!client.isPgAvailable() && !client.isServiceRoleAvailable()) {
throw new Error(
'execute_sql requires either a direct database connection (DATABASE_URL) ' +
'or a service role key (SUPABASE_SERVICE_ROLE_KEY) to be configured. ' +
'This tool cannot be used with only the anon key for security reasons.'
);
}
// AUDIT: Log SQL execution with user context
const userInfo = context.user
? `user=${context.user.email || context.user.userId} role=${context.user.role}`
: 'user=unknown (stdio mode)';
// Log query for audit (truncate long queries)
const queryPreview = input.sql.length > 200
? `${input.sql.substring(0, 200)}... [truncated, ${input.sql.length} chars total]`
: input.sql;
console.error(`[AUDIT] SQL execution by ${userInfo}: ${queryPreview}`);
context.log(`Executing SQL (readOnly: ${input.read_only})`, 'info');
const result = await executeSqlWithFallback(client, input.sql, input.read_only);
return handleSqlResponse(result, ExecuteSqlOutputSchema);
},
};

View File

@ -0,0 +1,210 @@
/**
* explain_query - Gets the execution plan for a SQL query.
*
* SECURITY WARNING: This tool can execute arbitrary SQL when analyze=true.
* The ANALYZE option actually runs the query to get real timing data.
* For write queries (INSERT/UPDATE/DELETE), this WILL modify data.
*
* This tool is marked as 'privileged' because:
* 1. It can execute any SQL the user provides
* 2. With ANALYZE, it runs the query (not just plans it)
* 3. Even without ANALYZE, it reveals schema information
*/
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
// Output schema for query explanation
const ExplainQueryOutputSchema = z.object({
query: z.string(),
plan: z.unknown(), // JSON plan output
format: z.string(),
analyzed: z.boolean(),
planning_time_ms: z.number().optional(),
execution_time_ms: z.number().optional(),
warnings: z.array(z.string()).optional(),
});
// Input schema
const ExplainQueryInputSchema = z.object({
sql: z.string().describe('The SQL query to analyze.'),
analyze: z.boolean().optional().default(false).describe('Actually execute the query to get real timing. WARNING: This WILL execute the query and modify data for write operations!'),
format: z.enum(['json', 'text', 'yaml', 'xml']).optional().default('json').describe('Output format for the plan.'),
verbose: z.boolean().optional().default(false).describe('Include additional details in the plan.'),
costs: z.boolean().optional().default(true).describe('Include estimated costs.'),
buffers: z.boolean().optional().default(false).describe('Include buffer usage statistics (requires ANALYZE).'),
timing: z.boolean().optional().default(true).describe('Include actual timing (requires ANALYZE).'),
settings: z.boolean().optional().default(false).describe('Include non-default configuration settings.'),
});
type ExplainQueryInput = z.infer<typeof ExplainQueryInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
sql: {
type: 'string',
description: 'The SQL query to analyze.',
},
analyze: {
type: 'boolean',
default: false,
description: 'Actually execute the query to get real timing. WARNING: This WILL execute the query and modify data for write operations!',
},
format: {
type: 'string',
enum: ['json', 'text', 'yaml', 'xml'],
default: 'json',
description: 'Output format for the plan.',
},
verbose: {
type: 'boolean',
default: false,
description: 'Include additional details in the plan.',
},
costs: {
type: 'boolean',
default: true,
description: 'Include estimated costs.',
},
buffers: {
type: 'boolean',
default: false,
description: 'Include buffer usage statistics (requires ANALYZE).',
},
timing: {
type: 'boolean',
default: true,
description: 'Include actual timing (requires ANALYZE).',
},
settings: {
type: 'boolean',
default: false,
description: 'Include non-default configuration settings.',
},
},
required: ['sql'],
};
// Helper to detect dangerous write patterns
// Note: This is best-effort detection - complex queries may bypass this
function detectWriteQuery(sql: string): { isWrite: boolean; queryType: string; confidence: 'high' | 'medium' | 'low' } {
const upperSql = sql.toUpperCase().trim();
// High confidence - starts with known write keywords
if (upperSql.startsWith('INSERT')) return { isWrite: true, queryType: 'INSERT', confidence: 'high' };
if (upperSql.startsWith('UPDATE')) return { isWrite: true, queryType: 'UPDATE', confidence: 'high' };
if (upperSql.startsWith('DELETE')) return { isWrite: true, queryType: 'DELETE', confidence: 'high' };
if (upperSql.startsWith('TRUNCATE')) return { isWrite: true, queryType: 'TRUNCATE', confidence: 'high' };
if (upperSql.startsWith('DROP')) return { isWrite: true, queryType: 'DROP', confidence: 'high' };
if (upperSql.startsWith('ALTER')) return { isWrite: true, queryType: 'ALTER', confidence: 'high' };
if (upperSql.startsWith('CREATE')) return { isWrite: true, queryType: 'CREATE', confidence: 'high' };
// Medium confidence - contains write keywords (could be in CTEs, subqueries)
if (/\b(INSERT|UPDATE|DELETE|TRUNCATE|DROP|ALTER|CREATE)\b/.test(upperSql)) {
return { isWrite: true, queryType: 'MIXED/CTE', confidence: 'medium' };
}
// Low confidence - appears to be a SELECT
return { isWrite: false, queryType: 'SELECT', confidence: 'low' };
}
export const explainQueryTool = {
name: 'explain_query',
description: 'Gets the execution plan for a SQL query. WARNING: With ANALYZE enabled, the query is actually executed which will modify data for write operations.',
// SECURITY: This tool is privileged because it can execute arbitrary SQL
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: ExplainQueryInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ExplainQueryOutputSchema,
execute: async (input: ExplainQueryInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { sql, analyze, format, verbose, costs, buffers, timing, settings } = input;
const warnings: string[] = [];
// Check for write queries when using ANALYZE
const { isWrite, queryType, confidence } = detectWriteQuery(sql);
if (analyze) {
if (isWrite) {
if (confidence === 'high') {
warnings.push(`⚠️ CRITICAL: ANALYZE will EXECUTE this ${queryType} query and MODIFY DATA!`);
} else {
warnings.push(`⚠️ WARNING: Query may contain ${queryType} operations. ANALYZE will execute the query.`);
}
} else {
warnings.push('Note: ANALYZE will execute the query to measure actual timing.');
}
}
// Build the EXPLAIN options
const options: string[] = [];
options.push(`FORMAT ${format.toUpperCase()}`);
if (analyze) options.push('ANALYZE true');
if (verbose) options.push('VERBOSE true');
options.push(`COSTS ${costs}`);
if (analyze && buffers) options.push('BUFFERS true');
if (analyze) options.push(`TIMING ${timing}`);
if (settings) options.push('SETTINGS true');
const explainSql = `EXPLAIN (${options.join(', ')}) ${sql}`;
// SECURITY: Use read_only=false when ANALYZE is true since it executes the query
// Use read_only=true when just planning (ANALYZE=false)
const isReadOnly = !analyze;
try {
const result = await executeSqlWithFallback(client, explainSql, isReadOnly);
if (isSqlErrorResponse(result)) {
throw new Error(result.error.message || 'Failed to explain query');
}
const rows = result as unknown[];
// Parse the result based on format
let plan: unknown;
let planningTime: number | undefined;
let executionTime: number | undefined;
if (format === 'json') {
// JSON format returns an array with a single object containing 'QUERY PLAN'
if (rows.length > 0) {
const firstRow = rows[0] as Record<string, unknown>;
const queryPlan = firstRow['QUERY PLAN'] || firstRow['query plan'];
if (Array.isArray(queryPlan)) {
plan = queryPlan;
// Extract timing from JSON plan
const planObj = queryPlan[0] as Record<string, unknown> | undefined;
planningTime = planObj?.['Planning Time'] as number | undefined;
executionTime = planObj?.['Execution Time'] as number | undefined;
} else {
plan = queryPlan;
}
}
} else {
// Text/YAML/XML format returns multiple rows
plan = rows.map(row => {
const r = row as Record<string, unknown>;
return r['QUERY PLAN'] || r['query plan'] || row;
});
}
return {
query: sql,
plan,
format,
analyzed: analyze,
planning_time_ms: planningTime,
execution_time_ms: executionTime,
warnings: warnings.length > 0 ? warnings : undefined,
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
throw new Error(`Failed to explain query: ${errorMessage}`);
}
},
};

View File

@ -0,0 +1,256 @@
import { z } from 'zod';
import { writeFileSync } from 'fs';
import * as nodePath from 'path';
import { mkdirSync } from 'fs';
import type { SelfhostedSupabaseClient } from '../client/index.js';
// import type { McpToolDefinition } from '@modelcontextprotocol/sdk/types.js'; // Removed incorrect import
import type { ToolContext } from './types.js';
import { runExternalCommand, redactDatabaseUrl } from './utils.js';
/**
* Sanitizes a schema name to prevent command injection.
* Only allows alphanumeric characters, underscores, and hyphens.
*
* @param schema - The schema name to sanitize
* @returns The sanitized schema name
* @throws Error if the schema name contains invalid characters
*/
function sanitizeSchemaName(schema: string): string {
// PostgreSQL identifiers: letters, digits, underscores (and $ but we exclude it for safety)
// Also allow hyphens as they're sometimes used
const validPattern = /^[a-zA-Z_][a-zA-Z0-9_-]*$/;
if (!validPattern.test(schema)) {
// Sanitize the schema name in error message to prevent log injection
const sanitizedForDisplay = schema.slice(0, 50).replace(/[^\w-]/g, '?');
throw new Error(`Invalid schema name "${sanitizedForDisplay}": must start with a letter or underscore and contain only alphanumeric characters, underscores, or hyphens`);
}
return schema;
}
/**
* Path utilities wrapped to satisfy static analysis.
* These functions perform path resolution with security validation.
*/
const pathUtils = {
/**
* Resolves a path to an absolute path.
* The caller MUST validate the result before using it for file operations.
*
* SECURITY: Path traversal is prevented by isWithinWorkspace() validation
* which ensures output stays within the configured workspace directory.
*/
toAbsolute(pathString: string): string {
// Sanitize path: remove null bytes and normalize path separators
const sanitized = pathString.replace(/\0/g, '').replace(/\\/g, '/');
return nodePath.resolve(sanitized);
},
/**
* Gets the directory portion of a path.
*/
getDirectory(pathString: string): string {
return nodePath.dirname(pathString);
},
};
/**
* Validates that a resolved path is within a workspace boundary.
* This is the security check that prevents path traversal attacks.
*
* @param normalizedPath - The already-resolved absolute path
* @param workspacePath - The workspace boundary path
* @returns true if the path is within the workspace
*/
function isWithinWorkspace(normalizedPath: string, workspacePath: string): boolean {
const resolvedWorkspace = pathUtils.toAbsolute(workspacePath);
return normalizedPath.startsWith(resolvedWorkspace + '/') || normalizedPath === resolvedWorkspace;
}
/**
* Normalizes and validates the output path for cross-platform compatibility.
* Includes path traversal protection when workspacePath is provided.
*
* @param inputPath - The user-provided path
* @param workspacePath - Optional workspace path to restrict output within
* @returns The normalized absolute path
* @throws Error if path traversal is detected or path is invalid
*/
function normalizeOutputPath(inputPath: string, workspacePath?: string): string {
// Handle Windows drive letters in Unix-style paths (e.g., "/c:/path" -> "C:/path")
let processedPath = inputPath;
if (process.platform === 'win32' && processedPath.match(/^\/[a-zA-Z]:/)) {
processedPath = processedPath.substring(1); // Remove leading slash
processedPath = processedPath.charAt(0).toUpperCase() + processedPath.slice(1); // Uppercase drive letter
}
// Use Node.js resolve to normalize the path (resolves .. and . segments)
// SECURITY: Path is validated below via isWithinWorkspace check
const normalized = pathUtils.toAbsolute(processedPath);
// Path traversal protection: ensure output is within workspace if specified
if (workspacePath && !isWithinWorkspace(normalized, workspacePath)) {
const resolvedWorkspace = pathUtils.toAbsolute(workspacePath);
throw new Error(`Output path must be within workspace directory: ${resolvedWorkspace}`);
}
return normalized;
}
// Input schema
const GenerateTypesInputSchema = z.object({
included_schemas: z.array(z.string()).optional().default(['public']).describe('Database schemas to include in type generation.'),
output_filename: z.string().optional().default('database.types.ts').describe('Filename to save the generated types to in the workspace root.'),
output_path: z.string().describe('Absolute path where to save the file. If provided, output_filename will be ignored.'),
});
type GenerateTypesInput = z.infer<typeof GenerateTypesInputSchema>;
// Output schema
const GenerateTypesOutputSchema = z.object({
success: z.boolean(),
message: z.string().describe('Output message from the generation process.'),
types: z.string().optional().describe('The generated TypeScript types, if successful.'),
file_path: z.string().optional().describe('The absolute path to the saved types file, if successful.'),
platform: z.string().describe('Operating system platform (win32, darwin, linux).'),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
included_schemas: {
type: 'array',
items: { type: 'string' },
default: ['public'],
description: 'Database schemas to include in type generation.',
},
output_filename: {
type: 'string',
default: 'database.types.ts',
description: 'Filename to save the generated types to in the workspace root.',
},
output_path: {
type: 'string',
description: 'Absolute path where to download the generated TypeScript file. Examples: Windows: "C:\\\\path\\\\to\\\\project\\\\database.types.ts", macOS/Linux: "/path/to/project/database.types.ts". This parameter is required.',
},
},
required: ['output_path'], // output_path is required for file download
};
// The tool definition - No explicit McpToolDefinition type needed
export const generateTypesTool = {
name: 'generate_typescript_types',
description: 'Generates TypeScript types from the database schema using the Supabase CLI (`supabase gen types`) and downloads the file to the specified absolute path. The tool returns the current platform (win32, darwin, linux) to help with path formatting. Requires DATABASE_URL configuration and Supabase CLI installed.',
inputSchema: GenerateTypesInputSchema,
mcpInputSchema: mcpInputSchema, // Add static JSON schema
outputSchema: GenerateTypesOutputSchema,
execute: async (input: GenerateTypesInput, context: ToolContext) => {
const client = context.selfhostedClient;
const dbUrl = client.getDbUrl(); // Need this getter in the client
if (!dbUrl) {
return {
success: false,
message: 'Error: DATABASE_URL is not configured. Cannot generate types.',
platform: process.platform,
};
}
// Construct the command
// Sanitize schema names to prevent command injection
let sanitizedSchemas: string[];
try {
sanitizedSchemas = input.included_schemas.map(sanitizeSchemaName);
} catch (sanitizeError) {
const errorMessage = sanitizeError instanceof Error ? sanitizeError.message : String(sanitizeError);
return {
success: false,
message: errorMessage,
platform: process.platform,
};
}
const schemas = sanitizedSchemas.join(',');
// Note: The actual command might vary slightly based on Supabase CLI version and context.
// Using --db-url is generally safer for self-hosted.
const command = `supabase gen types typescript --db-url "${dbUrl}" --schema "${schemas}"`;
// Log command with redacted credentials for security
console.error(`Running command: supabase gen types typescript --db-url "${redactDatabaseUrl(dbUrl)}" --schema "${schemas}"`);
try {
const { stdout, stderr, error } = await runExternalCommand(command);
if (error) {
console.error(`Error executing supabase gen types: ${stderr || error.message}`);
return {
success: false,
message: `Command failed: ${stderr || error.message}`,
platform: process.platform,
};
}
if (stderr) {
console.error(`supabase gen types produced stderr output: ${stderr}`);
// Treat stderr as non-fatal for now, maybe just warnings
}
// Normalize and save the generated types to the specified absolute path
// Path traversal protection: restrict to workspace directory if configured
let outputPath: string;
try {
outputPath = normalizeOutputPath(input.output_path, context.workspacePath);
console.error(`Normalized output path: ${outputPath}`);
} catch (pathError) {
const pathErrorMessage = pathError instanceof Error ? pathError.message : String(pathError);
console.error(`Invalid output path: ${pathErrorMessage}`);
return {
success: false,
message: `Invalid output path "${input.output_path}": ${pathErrorMessage}`,
platform: process.platform,
};
}
try {
// Ensure the directory exists
const outputDir = pathUtils.getDirectory(outputPath);
try {
mkdirSync(outputDir, { recursive: true });
} catch (dirError) {
// Ignore error if directory already exists
if ((dirError as NodeJS.ErrnoException).code !== 'EEXIST') {
throw dirError;
}
}
writeFileSync(outputPath, stdout, 'utf8');
console.error(`Types saved to: ${outputPath}`);
} catch (writeError) {
const writeErrorMessage = writeError instanceof Error ? writeError.message : String(writeError);
console.error(`Failed to write types file: ${writeErrorMessage}`);
return {
success: false,
message: `Type generation succeeded but failed to save file: ${writeErrorMessage}. Platform: ${process.platform}. Attempted path: ${outputPath}`,
types: stdout,
platform: process.platform,
};
}
console.error('Type generation and file save successful.');
return {
success: true,
message: `Types generated successfully and saved to ${outputPath}.${stderr ? `\nWarnings:\n${stderr}` : ''}`,
types: stdout,
file_path: outputPath,
platform: process.platform,
};
} catch (err: unknown) {
const errorMessage = err instanceof Error ? err.message : String(err);
console.error(`Exception during type generation: ${errorMessage}`);
return {
success: false,
message: `Exception during type generation: ${errorMessage}. Platform: ${process.platform}`,
platform: process.platform,
};
}
},
};

View File

@ -0,0 +1,273 @@
import { z } from 'zod';
import { executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
import type { ToolContext } from './types.js';
// Advisor types
const AdvisorTypeSchema = z.enum(['security', 'performance']);
type AdvisorType = z.infer<typeof AdvisorTypeSchema>;
// Schema for advisor issue output
const AdvisorIssueSchema = z.object({
code: z.string(),
name: z.string(),
level: z.enum(['warning', 'error', 'info']),
description: z.string(),
detail: z.string().nullable(),
remediation: z.string().nullable(),
affected_object: z.string().nullable(),
});
const GetAdvisorsOutputSchema = z.object({
issues: z.array(AdvisorIssueSchema),
type: AdvisorTypeSchema,
total_count: z.number(),
});
type GetAdvisorsOutput = z.infer<typeof GetAdvisorsOutputSchema>;
// Input schema
const GetAdvisorsInputSchema = z.object({
type: AdvisorTypeSchema.describe('The type of advisors to retrieve (security or performance)'),
});
type GetAdvisorsInput = z.infer<typeof GetAdvisorsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
type: {
type: 'string',
enum: ['security', 'performance'],
description: 'The type of advisors to retrieve (security or performance)',
},
},
required: ['type'],
};
// SQL queries for security checks (ported from Supabase Splinter)
const securityChecks = {
// 0013 - RLS disabled in public schema
rls_disabled_in_public: `
SELECT
'0013' as code,
'rls_disabled_in_public' as name,
'warning' as level,
'Tables in the public schema without Row Level Security enabled' as description,
format('Table: %I.%I', n.nspname, c.relname) as detail,
'Enable RLS with: ALTER TABLE ' || quote_ident(n.nspname) || '.' || quote_ident(c.relname) || ' ENABLE ROW LEVEL SECURITY;' as remediation,
format('%I.%I', n.nspname, c.relname) as affected_object
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'r'
AND n.nspname = 'public'
AND NOT c.relrowsecurity
AND c.relname NOT LIKE 'pg_%'
AND c.relname NOT LIKE '_pg_%'
`,
// 0007 - Policy exists but RLS disabled
policy_exists_rls_disabled: `
SELECT
'0007' as code,
'policy_exists_rls_disabled' as name,
'warning' as level,
'Tables with RLS policies defined but RLS is disabled' as description,
format('Table: %I.%I has policies but RLS is disabled', n.nspname, c.relname) as detail,
'Enable RLS with: ALTER TABLE ' || quote_ident(n.nspname) || '.' || quote_ident(c.relname) || ' ENABLE ROW LEVEL SECURITY;' as remediation,
format('%I.%I', n.nspname, c.relname) as affected_object
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'r'
AND NOT c.relrowsecurity
AND EXISTS (
SELECT 1 FROM pg_catalog.pg_policy p WHERE p.polrelid = c.oid
)
`,
// 0008 - RLS enabled but no policy
rls_enabled_no_policy: `
SELECT
'0008' as code,
'rls_enabled_no_policy' as name,
'error' as level,
'Tables with RLS enabled but no policies defined (blocks all access)' as description,
format('Table: %I.%I has RLS enabled but no policies', n.nspname, c.relname) as detail,
'Add a policy or disable RLS if not needed' as remediation,
format('%I.%I', n.nspname, c.relname) as affected_object
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'r'
AND c.relrowsecurity
AND NOT EXISTS (
SELECT 1 FROM pg_catalog.pg_policy p WHERE p.polrelid = c.oid
)
AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
`,
// 0002 - Auth users exposed via view
auth_users_exposed: `
SELECT
'0002' as code,
'auth_users_exposed' as name,
'error' as level,
'Views exposing auth.users data' as description,
format('View: %I.%I may expose auth.users', n.nspname, c.relname) as detail,
'Review and restrict the view definition or add proper RLS' as remediation,
format('%I.%I', n.nspname, c.relname) as affected_object
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_depend d ON d.objid = c.oid
JOIN pg_catalog.pg_class dep_c ON dep_c.oid = d.refobjid
JOIN pg_catalog.pg_namespace dep_n ON dep_n.oid = dep_c.relnamespace
WHERE c.relkind IN ('v', 'm')
AND n.nspname = 'public'
AND dep_n.nspname = 'auth'
AND dep_c.relname = 'users'
`,
};
// SQL queries for performance checks (ported from Supabase Splinter)
const performanceChecks = {
// 0001 - Unindexed foreign keys
unindexed_foreign_keys: `
SELECT
'0001' as code,
'unindexed_foreign_keys' as name,
'warning' as level,
'Foreign keys without covering indexes can impact performance' as description,
format('FK on %I.%I (%s) lacks an index', cn.nspname, c.conrelid::regclass::text,
array_to_string(ARRAY(
SELECT a.attname FROM unnest(c.conkey) WITH ORDINALITY AS u(attnum, i)
JOIN pg_attribute a ON a.attnum = u.attnum AND a.attrelid = c.conrelid
ORDER BY u.i
), ', ')) as detail,
'Create an index on the foreign key columns' as remediation,
c.conrelid::regclass::text as affected_object
FROM pg_catalog.pg_constraint c
JOIN pg_catalog.pg_namespace cn ON cn.oid = c.connamespace
WHERE c.contype = 'f'
AND NOT EXISTS (
SELECT 1 FROM pg_catalog.pg_index i
WHERE i.indrelid = c.conrelid
AND c.conkey <@ i.indkey::int2[]
)
AND cn.nspname NOT IN ('pg_catalog', 'information_schema')
`,
// 0004 - Missing primary keys
missing_primary_keys: `
SELECT
'0004' as code,
'missing_primary_key' as name,
'warning' as level,
'Tables without primary keys are inefficient at scale' as description,
format('Table: %I.%I has no primary key', n.nspname, c.relname) as detail,
'Add a primary key to the table' as remediation,
format('%I.%I', n.nspname, c.relname) as affected_object
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'r'
AND NOT EXISTS (
SELECT 1 FROM pg_catalog.pg_constraint con
WHERE con.conrelid = c.oid AND con.contype = 'p'
)
AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast', 'cron', 'extensions', 'graphql', 'graphql_public', 'realtime', 'storage', 'supabase_functions', 'supabase_migrations', 'vault', 'pgsodium', 'pgsodium_masks', 'auth', 'net', '_realtime')
`,
// 0005 - Unused indexes
unused_indexes: `
SELECT
'0005' as code,
'unused_index' as name,
'info' as level,
'Indexes with zero scans that may be candidates for removal' as description,
format('Index: %I.%I on %I.%I has had 0 scans', sn.nspname, i.relname, tn.nspname, t.relname) as detail,
'Consider dropping the index if it is not needed' as remediation,
format('%I.%I', sn.nspname, i.relname) as affected_object
FROM pg_catalog.pg_stat_user_indexes s
JOIN pg_catalog.pg_index ix ON ix.indexrelid = s.indexrelid
JOIN pg_catalog.pg_class i ON i.oid = s.indexrelid
JOIN pg_catalog.pg_class t ON t.oid = s.relid
JOIN pg_catalog.pg_namespace sn ON sn.oid = i.relnamespace
JOIN pg_catalog.pg_namespace tn ON tn.oid = t.relnamespace
WHERE s.idx_scan = 0
AND NOT ix.indisunique
AND NOT ix.indisprimary
AND sn.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
`,
// 0009 - Duplicate indexes
duplicate_indexes: `
SELECT
'0009' as code,
'duplicate_index' as name,
'warning' as level,
'Duplicate indexes waste storage and slow writes' as description,
format('Indexes %I and %I on %I.%I have identical definitions',
i1.relname, i2.relname, tn.nspname, t.relname) as detail,
'Consider dropping one of the duplicate indexes' as remediation,
format('%I.%I', sn.nspname, i1.relname) as affected_object
FROM pg_catalog.pg_index x1
JOIN pg_catalog.pg_index x2 ON x1.indrelid = x2.indrelid AND x1.indexrelid < x2.indexrelid
JOIN pg_catalog.pg_class i1 ON i1.oid = x1.indexrelid
JOIN pg_catalog.pg_class i2 ON i2.oid = x2.indexrelid
JOIN pg_catalog.pg_class t ON t.oid = x1.indrelid
JOIN pg_catalog.pg_namespace sn ON sn.oid = i1.relnamespace
JOIN pg_catalog.pg_namespace tn ON tn.oid = t.relnamespace
WHERE x1.indkey = x2.indkey
AND x1.indclass = x2.indclass
AND x1.indoption = x2.indoption
AND sn.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
`,
};
// Tool definition
export const getAdvisorsTool = {
name: 'get_advisors',
description: 'Gets security or performance advisory notices for the database. Based on Supabase Splinter linting rules. Helps identify issues like missing RLS policies, unindexed foreign keys, and other common problems.',
inputSchema: GetAdvisorsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetAdvisorsOutputSchema,
execute: async (input: GetAdvisorsInput, context: ToolContext): Promise<GetAdvisorsOutput> => {
const client = context.selfhostedClient;
const { type } = input;
const checks = type === 'security' ? securityChecks : performanceChecks;
const allIssues: z.infer<typeof AdvisorIssueSchema>[] = [];
for (const [checkName, sql] of Object.entries(checks)) {
try {
const result = await executeSqlWithFallback(client, sql, true);
if (isSqlErrorResponse(result)) {
context.log(`Error running ${checkName}: ${result.error.message}`, 'warn');
continue;
}
if (Array.isArray(result)) {
for (const row of result) {
allIssues.push({
code: String(row.code || ''),
name: String(row.name || checkName),
level: (row.level as 'warning' | 'error' | 'info' | undefined) ?? 'warning',
description: String(row.description || ''),
detail: row.detail ? String(row.detail) : null,
remediation: row.remediation ? String(row.remediation) : null,
affected_object: row.affected_object ? String(row.affected_object) : null,
});
}
}
} catch (error) {
context.log(`Failed to run ${checkName}: ${error}`, 'warn');
}
}
// Sort by level (error first, then warning, then info)
const levelOrder = { error: 0, warning: 1, info: 2 };
allIssues.sort((a, b) => levelOrder[a.level] - levelOrder[b.level]);
return {
issues: allIssues,
type,
total_count: allIssues.length,
};
},
};

View File

@ -0,0 +1,102 @@
import { z } from 'zod';
import type { ToolContext } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { PoolClient } from 'pg';
import type { SqlSuccessResponse, AuthUser } from '../types/index.js'; // Import AuthUser
// Input schema
const GetAuthUserInputSchema = z.object({
user_id: z.string().uuid().describe('The UUID of the user to retrieve.'),
});
type GetAuthUserInput = z.infer<typeof GetAuthUserInputSchema>;
// Output schema - Zod for validation (single user)
const AuthUserZodSchema = z.object({
id: z.string().uuid(),
email: z.string().email('Invalid email').nullable(),
role: z.string().nullable(),
created_at: z.string().nullable(),
last_sign_in_at: z.string().nullable(),
raw_app_meta_data: z.record(z.string(), z.unknown()).nullable(),
raw_user_meta_data: z.record(z.string(), z.unknown()).nullable(),
// Add more fields as needed
});
// Use AuthUser for the output type hint
type GetAuthUserOutput = AuthUser;
// Static JSON Schema for MCP
const mcpInputSchema = {
type: 'object',
properties: {
user_id: {
type: 'string',
description: 'The UUID of the user to retrieve.',
format: 'uuid', // Hint format if possible
},
},
required: ['user_id'],
};
// Tool definition
export const getAuthUserTool = {
name: 'get_auth_user',
description: 'Retrieves details for a specific user from auth.users by their ID.',
inputSchema: GetAuthUserInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: AuthUserZodSchema, // Use the single user Zod schema
execute: async (input: GetAuthUserInput, context: ToolContext): Promise<GetAuthUserOutput> => { // Use GetAuthUserOutput
const client = context.selfhostedClient;
const { user_id } = input;
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to get auth user details.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to get auth user details.');
}
const sql = `
SELECT
id,
email,
role,
raw_app_meta_data,
raw_user_meta_data,
created_at::text,
last_sign_in_at::text
FROM auth.users
WHERE id = $1
`;
const params = [user_id];
console.error(`Attempting to get auth user ${user_id} using direct DB connection...`);
// Use transaction for parameterized query
const user = await client.executeTransactionWithPg(async (pgClient: PoolClient) => {
const result = await pgClient.query(sql, params);
if (result.rows.length === 0) {
throw new Error(`User with ID ${user_id} not found.`);
}
// handleSqlResponse expects SqlExecutionResult (SuccessResponse | ErrorResponse)
// We pass the single row which structurally matches SqlSuccessResponse[0]
// but handleSqlResponse expects the array wrapper or error.
// So, we validate the single object directly.
try {
const singleUser = AuthUserZodSchema.parse(result.rows[0]);
return singleUser;
} catch (validationError) {
if (validationError instanceof z.ZodError) {
console.error("Zod validation failed:", validationError.issues);
throw new Error(`Output validation failed: ${validationError.issues.map((e) => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
}
throw validationError; // Rethrow other errors
}
});
console.error(`Found user ${user_id}.`);
context.log(`Found user ${user_id}.`);
// The return type is already AuthUser (via GetAuthUserOutput)
return user;
},
};

View File

@ -0,0 +1,138 @@
/**
* get_cron_job_history - Gets execution history for pg_cron jobs.
*
* Requires the pg_cron extension to be installed.
* Shows recent job runs with status and timing information.
*/
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
const CronJobRunSchema = z.object({
jobid: z.number(),
jobname: z.string().nullable(),
schedule: z.string(),
runid: z.number().nullable(),
status: z.string().nullable(),
start_time: z.string().nullable(),
end_time: z.string().nullable(),
return_message: z.string().nullable(),
});
const GetCronJobHistoryOutputSchema = z.array(CronJobRunSchema);
const GetCronJobHistoryInputSchema = z.object({
jobid: z.number().optional().describe('Filter by specific job ID.'),
jobname: z.string().optional().describe('Filter by job name pattern.'),
limit: z.number().optional().default(100).describe('Maximum number of records to return.'),
status: z.enum(['succeeded', 'failed']).optional().describe('Filter by execution status.'),
});
type GetCronJobHistoryInput = z.infer<typeof GetCronJobHistoryInputSchema>;
const mcpInputSchema = {
type: 'object',
properties: {
jobid: {
type: 'number',
description: 'Filter by specific job ID.',
},
jobname: {
type: 'string',
description: 'Filter by job name pattern.',
},
limit: {
type: 'number',
description: 'Maximum number of records to return.',
default: 100,
},
status: {
type: 'string',
enum: ['succeeded', 'failed'],
description: 'Filter by execution status.',
},
},
required: [],
};
// Safe pattern for job name search - alphanumeric, underscore, hyphen, space
const jobNamePattern = /^[a-zA-Z0-9_\-\s]+$/;
export const getCronJobHistoryTool = {
name: 'get_cron_job_history',
description: 'Gets execution history for pg_cron jobs including status and timing. Requires pg_cron to be installed.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetCronJobHistoryInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetCronJobHistoryOutputSchema,
execute: async (input: GetCronJobHistoryInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { jobid, jobname, limit = 100, status } = input;
// Validate jobname if provided
if (jobname && !jobNamePattern.test(jobname)) {
throw new Error(`Invalid job name pattern: ${jobname}. Use only alphanumeric, underscore, hyphen, and space characters.`);
}
// First check if pg_cron extension is installed
const checkExtensionSql = `
SELECT EXISTS (
SELECT 1 FROM pg_extension WHERE extname = 'pg_cron'
) as installed;
`;
const extensionCheck = await executeSqlWithFallback(client, checkExtensionSql, true);
if (!Array.isArray(extensionCheck) || extensionCheck.length === 0) {
throw new Error('Failed to check pg_cron extension status.');
}
if (!extensionCheck[0].installed) {
throw new Error('pg_cron extension is not installed. Install it with: CREATE EXTENSION pg_cron;');
}
// Build query with filters
const conditions: string[] = [];
if (jobid !== undefined) {
// jobid is a number from Zod validation, safe to use directly
conditions.push(`j.jobid = ${jobid}`);
}
if (jobname) {
conditions.push(`j.jobname ILIKE '%${jobname}%'`);
}
if (status) {
// status is validated by Zod enum, safe to use directly
conditions.push(`r.status = '${status}'`);
}
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
// Ensure limit is within bounds
const safeLimit = Math.min(Math.max(1, limit), 1000);
const historySql = `
SELECT
j.jobid,
j.jobname,
j.schedule,
r.runid,
r.status,
r.start_time::text,
r.end_time::text,
r.return_message
FROM cron.job j
LEFT JOIN cron.job_run_details r ON j.jobid = r.jobid
${whereClause}
ORDER BY r.start_time DESC NULLS LAST
LIMIT ${safeLimit}
`;
const result = await executeSqlWithFallback(client, historySql, true);
return handleSqlResponse(result, GetCronJobHistoryOutputSchema);
},
};

View File

@ -0,0 +1,65 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for the output: array of connection details
const GetDbConnectionsOutputSchema = z.array(z.object({
datname: z.string().nullable().describe('Database name'),
usename: z.string().nullable().describe('User name'),
application_name: z.string().nullable().describe('Application name (e.g., PostgREST, psql)'),
client_addr: z.string().nullable().describe('Client IP address'),
backend_start: z.string().nullable().describe('Time when the backend process started'),
state: z.string().nullable().describe('Current connection state (e.g., active, idle)'),
query: z.string().nullable().describe('Last or current query being executed'),
pid: z.number().describe('Process ID of the backend'),
}));
// Input schema (allow filtering by user or database later if needed)
const GetDbConnectionsInputSchema = z.object({});
type GetDbConnectionsInput = z.infer<typeof GetDbConnectionsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const getDatabaseConnectionsTool = {
name: 'get_database_connections',
description: 'Retrieves information about active database connections from pg_stat_activity.',
inputSchema: GetDbConnectionsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetDbConnectionsOutputSchema,
execute: async (input: GetDbConnectionsInput, context: ToolContext) => {
const client = context.selfhostedClient;
// Query pg_stat_activity
// Note: Access to pg_stat_activity might require superuser or specific grants.
const getConnectionsSql = `
SELECT
pid,
datname,
usename,
application_name,
client_addr::text, -- Cast inet to text
backend_start::text, -- Cast timestamp to text
state,
query
FROM
pg_stat_activity
WHERE
backend_type = 'client backend' -- Exclude background workers, etc.
-- Optionally filter out self?
-- AND pid != pg_backend_pid()
ORDER BY
backend_start
`;
const result = await executeSqlWithFallback(client, getConnectionsSql, true);
return handleSqlResponse(result, GetDbConnectionsOutputSchema);
},
};

View File

@ -0,0 +1,128 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for combined stats output
// Note: Types are often bigint from pg_stat, returned as string by JSON/RPC.
// Casting to numeric/float in SQL or parsing carefully later might be needed for calculations.
const GetDbStatsOutputSchema = z.object({
database_stats: z.array(z.object({
datname: z.string().nullable(),
numbackends: z.number().nullable(),
xact_commit: z.string().nullable(), // bigint as string
xact_rollback: z.string().nullable(), // bigint as string
blks_read: z.string().nullable(), // bigint as string
blks_hit: z.string().nullable(), // bigint as string
tup_returned: z.string().nullable(), // bigint as string
tup_fetched: z.string().nullable(), // bigint as string
tup_inserted: z.string().nullable(), // bigint as string
tup_updated: z.string().nullable(), // bigint as string
tup_deleted: z.string().nullable(), // bigint as string
conflicts: z.string().nullable(), // bigint as string
temp_files: z.string().nullable(), // bigint as string
temp_bytes: z.string().nullable(), // bigint as string
deadlocks: z.string().nullable(), // bigint as string
checksum_failures: z.string().nullable(), // bigint as string
checksum_last_failure: z.string().nullable(), // timestamp as string
blk_read_time: z.number().nullable(), // double precision
blk_write_time: z.number().nullable(), // double precision
stats_reset: z.string().nullable(), // timestamp as string
})).describe("Statistics per database from pg_stat_database"),
bgwriter_stats: z.array(z.object({ // Usually a single row
checkpoints_timed: z.string().nullable(),
checkpoints_req: z.string().nullable(),
checkpoint_write_time: z.number().nullable(),
checkpoint_sync_time: z.number().nullable(),
buffers_checkpoint: z.string().nullable(),
buffers_clean: z.string().nullable(),
maxwritten_clean: z.string().nullable(),
buffers_backend: z.string().nullable(),
buffers_backend_fsync: z.string().nullable(),
buffers_alloc: z.string().nullable(),
stats_reset: z.string().nullable(),
})).describe("Statistics from the background writer process from pg_stat_bgwriter"),
});
// Input schema (allow filtering by database later if needed)
const GetDbStatsInputSchema = z.object({});
type GetDbStatsInput = z.infer<typeof GetDbStatsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const getDatabaseStatsTool = {
name: 'get_database_stats',
description: 'Retrieves statistics about database activity and the background writer from pg_stat_database and pg_stat_bgwriter.',
inputSchema: GetDbStatsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetDbStatsOutputSchema,
execute: async (input: GetDbStatsInput, context: ToolContext) => {
const client = context.selfhostedClient;
// Combine queries for efficiency if possible, but RPC might handle separate calls better.
// Using two separate calls for clarity.
const getDbStatsSql = `
SELECT
datname,
numbackends,
xact_commit::text,
xact_rollback::text,
blks_read::text,
blks_hit::text,
tup_returned::text,
tup_fetched::text,
tup_inserted::text,
tup_updated::text,
tup_deleted::text,
conflicts::text,
temp_files::text,
temp_bytes::text,
deadlocks::text,
checksum_failures::text,
checksum_last_failure::text,
blk_read_time,
blk_write_time,
stats_reset::text
FROM pg_stat_database
`;
const getBgWriterStatsSql = `
SELECT
checkpoints_timed::text,
checkpoints_req::text,
checkpoint_write_time,
checkpoint_sync_time,
buffers_checkpoint::text,
buffers_clean::text,
maxwritten_clean::text,
buffers_backend::text,
buffers_backend_fsync::text,
buffers_alloc::text,
stats_reset::text
FROM pg_stat_bgwriter
`;
// Execute both queries
const [dbStatsResult, bgWriterStatsResult] = await Promise.all([
executeSqlWithFallback(client, getDbStatsSql, true),
executeSqlWithFallback(client, getBgWriterStatsSql, true),
]);
// Use handleSqlResponse for each part; it throws on error.
const dbStats = handleSqlResponse(dbStatsResult, GetDbStatsOutputSchema.shape.database_stats);
const bgWriterStats = handleSqlResponse(bgWriterStatsResult, GetDbStatsOutputSchema.shape.bgwriter_stats);
// Combine results into the final schema
return {
database_stats: dbStats,
bgwriter_stats: bgWriterStats,
};
},
};

View File

@ -0,0 +1,120 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for edge function details output
const EdgeFunctionDetailsSchema = z.object({
id: z.string().uuid(),
name: z.string(),
slug: z.string(),
status: z.string().nullable(),
version: z.number().nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
verify_jwt: z.boolean().nullable(),
import_map: z.boolean().nullable(),
});
const GetEdgeFunctionDetailsOutputSchema = z.array(EdgeFunctionDetailsSchema);
type GetEdgeFunctionDetailsOutput = z.infer<typeof EdgeFunctionDetailsSchema> | null;
// Input schema
const GetEdgeFunctionDetailsInputSchema = z.object({
function_identifier: z.string().describe('The function ID (UUID) or slug to look up'),
});
type GetEdgeFunctionDetailsInput = z.infer<typeof GetEdgeFunctionDetailsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
function_identifier: {
type: 'string',
description: 'The function ID (UUID) or slug to look up',
},
},
required: ['function_identifier'],
};
// Tool definition
export const getEdgeFunctionDetailsTool = {
name: 'get_edge_function_details',
description: 'Gets detailed information about a specific Supabase Edge Function by ID or slug. Returns null if not found or edge functions are not available.',
inputSchema: GetEdgeFunctionDetailsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: EdgeFunctionDetailsSchema.nullable(),
execute: async (input: GetEdgeFunctionDetailsInput, context: ToolContext): Promise<GetEdgeFunctionDetailsOutput> => {
const client = context.selfhostedClient;
const { function_identifier } = input;
// First check if supabase_functions schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = 'supabase_functions'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
if (Array.isArray(schemaCheckResult) && schemaCheckResult.length > 0) {
const exists = schemaCheckResult[0]?.exists;
if (!exists) {
context.log('supabase_functions schema not found - Edge Functions may not be available in this installation', 'info');
return null;
}
} else {
context.log('Could not verify supabase_functions schema', 'warn');
return null;
}
// Check if the functions table exists
const checkTableSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_tables
WHERE schemaname = 'supabase_functions' AND tablename = 'functions'
) AS exists
`;
const tableCheckResult = await executeSqlWithFallback(client, checkTableSql, true);
if (Array.isArray(tableCheckResult) && tableCheckResult.length > 0) {
const exists = tableCheckResult[0]?.exists;
if (!exists) {
context.log('supabase_functions.functions table not found', 'info');
return null;
}
} else {
context.log('Could not verify functions table', 'warn');
return null;
}
// Escape single quotes in the identifier to prevent SQL injection
const escapedIdentifier = function_identifier.replace(/'/g, "''");
// Query edge function details - try matching both id and slug
const getEdgeFunctionDetailsSql = `
SELECT
id,
name,
slug,
status,
version,
created_at::text,
updated_at::text,
verify_jwt,
import_map
FROM supabase_functions.functions
WHERE id::text = '${escapedIdentifier}' OR slug = '${escapedIdentifier}'
LIMIT 1
`;
const result = await executeSqlWithFallback(client, getEdgeFunctionDetailsSql, true);
const functions = handleSqlResponse(result, GetEdgeFunctionDetailsOutputSchema);
if (functions.length === 0) {
context.log(`Edge function not found: ${function_identifier}`, 'info');
return null;
}
return functions[0];
},
};

View File

@ -0,0 +1,120 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
// Output schema for function definition
const GetFunctionDefinitionOutputSchema = z.object({
schema_name: z.string(),
function_name: z.string(),
arguments: z.string(),
return_type: z.string(),
language: z.string(),
volatility: z.string(),
security_definer: z.boolean(),
definition: z.string(),
});
// Input schema
const GetFunctionDefinitionInputSchema = z.object({
schema: z.string().default('public').describe('Schema name (defaults to public).'),
function_name: z.string().describe('Name of the function.'),
argument_types: z.string().optional().describe('Argument types to disambiguate overloaded functions (e.g., "integer, text").'),
});
type GetFunctionDefinitionInput = z.infer<typeof GetFunctionDefinitionInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
default: 'public',
description: 'Schema name (defaults to public).',
},
function_name: {
type: 'string',
description: 'Name of the function.',
},
argument_types: {
type: 'string',
description: 'Argument types to disambiguate overloaded functions (e.g., "integer, text").',
},
},
required: ['function_name'],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
// Pattern for argument types - allow common type names and modifiers
const argTypesPattern = /^[a-zA-Z0-9_$,\s\[\]()]+$/;
export const getFunctionDefinitionTool = {
name: 'get_function_definition',
description: 'Gets the full source code definition of a database function. Use argument_types if there are overloaded functions with the same name.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetFunctionDefinitionInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetFunctionDefinitionOutputSchema,
execute: async (input: GetFunctionDefinitionInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, function_name, argument_types } = input;
// Validate identifiers
if (!identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (!identifierPattern.test(function_name)) {
throw new Error(`Invalid function name: ${function_name}`);
}
if (argument_types && !argTypesPattern.test(argument_types)) {
throw new Error(`Invalid argument types format: ${argument_types}`);
}
// Build WHERE conditions
let whereClause = `n.nspname = '${schema}' AND p.proname = '${function_name}'`;
if (argument_types) {
// Use pg_get_function_arguments to match the argument signature
whereClause += ` AND pg_catalog.pg_get_function_arguments(p.oid) = '${argument_types}'`;
}
const sql = `
SELECT
n.nspname AS schema_name,
p.proname AS function_name,
pg_catalog.pg_get_function_arguments(p.oid) AS arguments,
pg_catalog.pg_get_function_result(p.oid) AS return_type,
l.lanname AS language,
CASE p.provolatile
WHEN 'i' THEN 'IMMUTABLE'
WHEN 's' THEN 'STABLE'
WHEN 'v' THEN 'VOLATILE'
ELSE p.provolatile::text
END AS volatility,
p.prosecdef AS security_definer,
pg_catalog.pg_get_functiondef(p.oid) AS definition
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
JOIN pg_catalog.pg_language l ON l.oid = p.prolang
WHERE ${whereClause}
AND p.prokind = 'f'
LIMIT 1
`;
const result = await executeSqlWithFallback(client, sql, true);
// Handle the response - expect single result
if (isSqlErrorResponse(result)) {
throw new Error(result.error.message || 'Failed to get function definition');
}
const rows = result as unknown[];
if (rows.length === 0) {
throw new Error(`Function ${schema}.${function_name}${argument_types ? `(${argument_types})` : ''} not found.`);
}
// Return the first (and should be only) result
return GetFunctionDefinitionOutputSchema.parse(rows[0]);
},
};

View File

@ -0,0 +1,121 @@
/**
* get_index_stats - Gets detailed statistics for a specific index.
*
* Shows usage counts, size information, and effectiveness metrics.
*/
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
// SQL identifier validation - prevents SQL injection via identifier names
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
// Output schema for index stats
const GetIndexStatsOutputSchema = z.object({
schema_name: z.string(),
table_name: z.string(),
index_name: z.string(),
index_type: z.string(),
is_unique: z.boolean(),
is_primary: z.boolean(),
is_valid: z.boolean(),
number_of_scans: z.number(),
tuples_read: z.number(),
tuples_fetched: z.number(),
index_size: z.string(),
table_size: z.string(),
index_size_bytes: z.number(),
table_size_bytes: z.number(),
usage_ratio: z.string().nullable(), // Percentage of table accesses that used this index
});
// Input schema
const GetIndexStatsInputSchema = z.object({
schema: z.string().default('public').describe('Schema name (defaults to public).'),
index_name: z.string().describe('Name of the index.'),
});
type GetIndexStatsInput = z.infer<typeof GetIndexStatsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
default: 'public',
description: 'Schema name (defaults to public).',
},
index_name: {
type: 'string',
description: 'Name of the index.',
},
},
required: ['index_name'],
};
export const getIndexStatsTool = {
name: 'get_index_stats',
description: 'Gets detailed statistics for a specific index including usage counts and size.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetIndexStatsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetIndexStatsOutputSchema,
execute: async (input: GetIndexStatsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, index_name } = input;
// SECURITY: Validate identifiers to prevent SQL injection
if (!identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}. Must be a valid SQL identifier.`);
}
if (!identifierPattern.test(index_name)) {
throw new Error(`Invalid index name: ${index_name}. Must be a valid SQL identifier.`);
}
const sql = `
SELECT
s.schemaname AS schema_name,
s.relname AS table_name,
s.indexrelname AS index_name,
am.amname AS index_type,
i.indisunique AS is_unique,
i.indisprimary AS is_primary,
i.indisvalid AS is_valid,
COALESCE(s.idx_scan, 0)::bigint AS number_of_scans,
COALESCE(s.idx_tup_read, 0)::bigint AS tuples_read,
COALESCE(s.idx_tup_fetch, 0)::bigint AS tuples_fetched,
pg_size_pretty(pg_relation_size(s.indexrelid)) AS index_size,
pg_size_pretty(pg_relation_size(s.relid)) AS table_size,
pg_relation_size(s.indexrelid)::bigint AS index_size_bytes,
pg_relation_size(s.relid)::bigint AS table_size_bytes,
CASE
WHEN (st.seq_scan + st.idx_scan) > 0
THEN ROUND((st.idx_scan::numeric / (st.seq_scan + st.idx_scan)::numeric) * 100, 2)::text || '%'
ELSE NULL
END AS usage_ratio
FROM pg_stat_user_indexes s
JOIN pg_catalog.pg_index i ON i.indexrelid = s.indexrelid
JOIN pg_catalog.pg_class c ON c.oid = s.indexrelid
JOIN pg_catalog.pg_am am ON am.oid = c.relam
JOIN pg_stat_user_tables st ON st.relid = s.relid
WHERE s.schemaname = '${schema}'
AND s.indexrelname = '${index_name}'
LIMIT 1
`;
const result = await executeSqlWithFallback(client, sql, true);
if (isSqlErrorResponse(result)) {
throw new Error(result.error.message || 'Failed to get index stats');
}
const rows = result as unknown[];
if (rows.length === 0) {
throw new Error(`Index "${index_name}" not found in schema "${schema}".`);
}
return GetIndexStatsOutputSchema.parse(rows[0]);
},
};

View File

@ -0,0 +1,278 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
import type { ToolContext } from './types.js';
// Service types that can be queried for logs
const LogServiceSchema = z.enum(['postgres', 'auth', 'storage', 'realtime', 'postgrest']);
type LogService = z.infer<typeof LogServiceSchema>;
// Schema for log entry output
const LogEntrySchema = z.object({
timestamp: z.string().nullable(),
level: z.string().nullable(),
message: z.string().nullable(),
metadata: z.record(z.string(), z.unknown()).nullable(),
});
const GetLogsOutputSchema = z.object({
logs: z.array(LogEntrySchema),
source: z.string(),
service: LogServiceSchema,
message: z.string().optional(),
});
type GetLogsOutput = z.infer<typeof GetLogsOutputSchema>;
// Input schema
const GetLogsInputSchema = z.object({
service: LogServiceSchema.describe('The service to fetch logs for (postgres, auth, storage, realtime, postgrest)'),
limit: z.number().min(1).max(1000).optional().describe('Maximum number of log entries to return (default: 100, max: 1000)'),
});
type GetLogsInput = z.infer<typeof GetLogsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
service: {
type: 'string',
enum: ['postgres', 'auth', 'storage', 'realtime', 'postgrest'],
description: 'The service to fetch logs for (postgres, auth, storage, realtime, postgrest)',
},
limit: {
type: 'number',
minimum: 1,
maximum: 1000,
description: 'Maximum number of log entries to return (default: 100, max: 1000)',
},
},
required: ['service'],
};
// Tool definition
export const getLogsTool = {
name: 'get_logs',
description: 'Gets logs for a Supabase service. Attempts to query the analytics stack first, then falls back to PostgreSQL CSV logs. Returns logs from the last 24 hours. Note: Log availability depends on your self-hosted installation configuration.',
inputSchema: GetLogsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetLogsOutputSchema,
execute: async (input: GetLogsInput, context: ToolContext): Promise<GetLogsOutput> => {
const client = context.selfhostedClient;
const { service, limit = 100 } = input;
// Try analytics stack first (_analytics schema)
const analyticsResult = await tryAnalyticsLogs(client, service, limit, context);
if (analyticsResult) {
return analyticsResult;
}
// For postgres service, try CSV log file approach
if (service === 'postgres') {
const csvResult = await tryPostgresCsvLogs(client, limit, context);
if (csvResult) {
return csvResult;
}
}
// No log source available
return {
logs: [],
source: 'none',
service,
message: `Log access not available for ${service}. Self-hosted installations may need to configure the analytics stack or enable PostgreSQL CSV logging.`,
};
},
};
// Try to get logs from the analytics stack
async function tryAnalyticsLogs(
client: ToolContext['selfhostedClient'],
service: LogService,
limit: number,
context: ToolContext
): Promise<GetLogsOutput | null> {
// Check if _analytics schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = '_analytics'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
if (!Array.isArray(schemaCheckResult) || schemaCheckResult.length === 0 || !schemaCheckResult[0]?.exists) {
context.log('_analytics schema not found - analytics stack not deployed', 'info');
return null;
}
// Map service to analytics table using Map to prevent object injection
const tableMap = new Map<LogService, string>([
['postgres', 'postgres_logs'],
['auth', 'auth_logs'],
['storage', 'storage_logs'],
['realtime', 'realtime_logs'],
['postgrest', 'postgrest_logs'],
]);
const tableName = tableMap.get(service);
if (!tableName) {
context.log(`Unknown service: ${service}`, 'error');
return null;
}
// Check if the specific logs table exists
const checkTableSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_tables
WHERE schemaname = '_analytics' AND tablename = '${tableName}'
) AS exists
`;
const tableCheckResult = await executeSqlWithFallback(client, checkTableSql, true);
if (!Array.isArray(tableCheckResult) || tableCheckResult.length === 0 || !tableCheckResult[0]?.exists) {
context.log(`_analytics.${tableName} table not found`, 'info');
return null;
}
// Query logs from analytics table (last 24 hours)
const queryLogsSql = `
SELECT
timestamp::text,
COALESCE(level, 'info') as level,
message,
metadata::jsonb as metadata
FROM _analytics.${tableName}
WHERE timestamp > NOW() - INTERVAL '24 hours'
ORDER BY timestamp DESC
LIMIT ${limit}
`;
try {
const result = await executeSqlWithFallback(client, queryLogsSql, true);
if (isSqlErrorResponse(result)) {
context.log(`Error querying analytics logs: ${result.error.message}`, 'warn');
return null;
}
const logsSchema = z.array(
z.object({
timestamp: z.string().nullable(),
level: z.string().nullable(),
message: z.string().nullable(),
metadata: z.record(z.string(), z.unknown()).nullable(),
})
);
const logs = handleSqlResponse(result, logsSchema);
return {
logs,
source: 'analytics',
service,
};
} catch (error) {
context.log(`Failed to query analytics logs: ${error}`, 'warn');
return null;
}
}
// Try to get PostgreSQL logs from CSV log files using file_fdw
async function tryPostgresCsvLogs(
client: ToolContext['selfhostedClient'],
limit: number,
context: ToolContext
): Promise<GetLogsOutput | null> {
// Check if file_fdw extension exists
const checkExtensionSql = `
SELECT EXISTS (
SELECT 1 FROM pg_extension WHERE extname = 'file_fdw'
) AS exists
`;
const extensionCheckResult = await executeSqlWithFallback(client, checkExtensionSql, true);
if (!Array.isArray(extensionCheckResult) || extensionCheckResult.length === 0 || !extensionCheckResult[0]?.exists) {
context.log('file_fdw extension not installed - cannot access CSV logs', 'info');
return null;
}
// Get current log file path
const getLogFileSql = `SELECT pg_current_logfile() as logfile`;
const logFileResult = await executeSqlWithFallback(client, getLogFileSql, true);
if (!Array.isArray(logFileResult) || logFileResult.length === 0 || !logFileResult[0]?.logfile) {
context.log('Could not determine current log file path', 'info');
return null;
}
const logFile = String(logFileResult[0].logfile);
// Check if we have a foreign table set up for logs, or try to query directly
// This is a simplified approach - full implementation would need proper foreign table setup
const checkForeignTableSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_foreign_table ft
JOIN pg_catalog.pg_class c ON c.oid = ft.ftrelid
WHERE c.relname = 'pglog'
) AS exists
`;
const foreignTableResult = await executeSqlWithFallback(client, checkForeignTableSql, true);
if (Array.isArray(foreignTableResult) && foreignTableResult.length > 0 && foreignTableResult[0]?.exists) {
// Query existing foreign table
const queryLogsSql = `
SELECT
log_time::text as timestamp,
CASE
WHEN error_severity = 'ERROR' THEN 'error'
WHEN error_severity = 'WARNING' THEN 'warn'
WHEN error_severity = 'LOG' THEN 'info'
ELSE 'debug'
END as level,
message,
jsonb_build_object(
'user_name', user_name,
'database_name', database_name,
'process_id', process_id,
'sql_state_code', sql_state_code
) as metadata
FROM pglog
WHERE log_time > NOW() - INTERVAL '24 hours'
ORDER BY log_time DESC
LIMIT ${limit}
`;
try {
const result = await executeSqlWithFallback(client, queryLogsSql, true);
if (isSqlErrorResponse(result)) {
context.log(`Error querying CSV logs: ${result.error.message}`, 'warn');
return null;
}
const logsSchema = z.array(
z.object({
timestamp: z.string().nullable(),
level: z.string().nullable(),
message: z.string().nullable(),
metadata: z.record(z.string(), z.unknown()).nullable(),
})
);
const logs = handleSqlResponse(result, logsSchema);
return {
logs,
source: 'csv',
service: 'postgres',
};
} catch (error) {
context.log(`Failed to query CSV logs: ${error}`, 'warn');
return null;
}
}
context.log(`CSV log file found at ${logFile} but no pglog foreign table configured`, 'info');
return null;
}

View File

@ -0,0 +1,33 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import type { ToolContext } from './types.js';
// Input schema (none needed)
const GetProjectUrlInputSchema = z.object({});
type GetProjectUrlInput = z.infer<typeof GetProjectUrlInputSchema>;
// Output schema
const GetProjectUrlOutputSchema = z.object({
project_url: z.string().url(),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const getProjectUrlTool = {
name: 'get_project_url',
description: 'Returns the configured Supabase project URL for this server.',
inputSchema: GetProjectUrlInputSchema,
mcpInputSchema: mcpInputSchema, // Add static JSON schema
outputSchema: GetProjectUrlOutputSchema,
execute: async (input: GetProjectUrlInput, context: ToolContext) => {
const client = context.selfhostedClient;
const url = client.getSupabaseUrl(); // Use getter from client
return { project_url: url };
},
};

View File

@ -0,0 +1,93 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for RLS status
const GetRlsStatusOutputSchema = z.array(z.object({
schema_name: z.string(),
table_name: z.string(),
rls_enabled: z.boolean(),
rls_forced: z.boolean(),
policy_count: z.number(),
}));
// Input schema with optional filters
const GetRlsStatusInputSchema = z.object({
schema: z.string().optional().describe('Filter by schema name.'),
table: z.string().optional().describe('Filter by table name.'),
});
type GetRlsStatusInput = z.infer<typeof GetRlsStatusInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter by schema name.',
},
table: {
type: 'string',
description: 'Filter by table name.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const getRlsStatusTool = {
name: 'get_rls_status',
description: 'Checks if Row Level Security (RLS) is enabled on tables and shows the number of policies. Can filter by schema and/or table.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetRlsStatusInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetRlsStatusOutputSchema,
execute: async (input: GetRlsStatusInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions
const conditions: string[] = [
"c.relkind = 'r'", // ordinary tables only
"n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast', 'auth', 'storage', 'extensions', 'graphql', 'graphql_public', 'pgbouncer', 'realtime', 'supabase_functions', 'supabase_migrations', '_realtime')",
];
if (schema) {
conditions.push(`n.nspname = '${schema}'`);
}
if (table) {
conditions.push(`c.relname = '${table}'`);
}
const whereClause = conditions.join(' AND ');
const sql = `
SELECT
n.nspname AS schema_name,
c.relname AS table_name,
c.relrowsecurity AS rls_enabled,
c.relforcerowsecurity AS rls_forced,
COUNT(pol.polname)::int AS policy_count
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_policy pol ON pol.polrelid = c.oid
WHERE ${whereClause}
GROUP BY n.nspname, c.relname, c.relrowsecurity, c.relforcerowsecurity
ORDER BY n.nspname, c.relname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, GetRlsStatusOutputSchema);
},
};

View File

@ -0,0 +1,146 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for bucket configuration
const BucketConfigSchema = z.object({
id: z.string(),
name: z.string(),
public: z.boolean(),
file_size_limit: z.number().nullable(),
allowed_mime_types: z.array(z.string()).nullable(),
avif_autodetection: z.boolean().nullable(),
owner: z.string().nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
});
// Schema for output
const GetStorageConfigOutputSchema = z.object({
buckets: z.array(BucketConfigSchema),
global_config: z.object({
max_file_size_limit: z.number().nullable(),
bucket_count: z.number(),
}),
});
type GetStorageConfigOutput = z.infer<typeof GetStorageConfigOutputSchema>;
// Input schema
const GetStorageConfigInputSchema = z.object({
bucket_id: z.string().optional().describe('Optional bucket ID to get config for a specific bucket. If omitted, returns all buckets.'),
});
type GetStorageConfigInput = z.infer<typeof GetStorageConfigInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
bucket_id: {
type: 'string',
description: 'Optional bucket ID to get config for a specific bucket. If omitted, returns all buckets.',
},
},
required: [],
};
// Tool definition
export const getStorageConfigTool = {
name: 'get_storage_config',
description: 'Gets storage configuration for Supabase Storage buckets. Returns bucket settings including file size limits, allowed MIME types, and public/private status.',
inputSchema: GetStorageConfigInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetStorageConfigOutputSchema,
execute: async (input: GetStorageConfigInput, context: ToolContext): Promise<GetStorageConfigOutput> => {
const client = context.selfhostedClient;
const { bucket_id } = input;
// Check if storage schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = 'storage'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
if (!Array.isArray(schemaCheckResult) || schemaCheckResult.length === 0 || !schemaCheckResult[0]?.exists) {
context.log('storage schema not found - Storage may not be configured', 'info');
return {
buckets: [],
global_config: {
max_file_size_limit: null,
bucket_count: 0,
},
};
}
// Build query for buckets
let bucketQuery = `
SELECT
id,
name,
public,
file_size_limit,
allowed_mime_types,
avif_autodetection,
owner::text,
created_at::text,
updated_at::text
FROM storage.buckets
`;
if (bucket_id) {
// Escape single quotes
const escapedBucketId = bucket_id.replace(/'/g, "''");
bucketQuery += ` WHERE id = '${escapedBucketId}'`;
}
bucketQuery += ' ORDER BY name';
const bucketsResult = await executeSqlWithFallback(client, bucketQuery, true);
const bucketsSchema = z.array(
z.object({
id: z.string(),
name: z.string(),
public: z.boolean(),
file_size_limit: z.number().nullable(),
allowed_mime_types: z.array(z.string()).nullable(),
avif_autodetection: z.boolean().nullable(),
owner: z.string().nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
})
);
const buckets = handleSqlResponse(bucketsResult, bucketsSchema);
// Get global stats
const statsQuery = `
SELECT
MAX(file_size_limit) as max_file_size_limit,
COUNT(*) as bucket_count
FROM storage.buckets
`;
const statsResult = await executeSqlWithFallback(client, statsQuery, true);
let globalConfig = {
max_file_size_limit: null as number | null,
bucket_count: buckets.length,
};
if (Array.isArray(statsResult) && statsResult.length > 0) {
const maxLimit = statsResult[0]?.max_file_size_limit;
globalConfig = {
max_file_size_limit: typeof maxLimit === 'number' ? maxLimit : null,
bucket_count: Number(statsResult[0]?.bucket_count) || 0,
};
}
return {
buckets,
global_config: globalConfig,
};
},
};

View File

@ -0,0 +1,133 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
// Output schema for trigger definition
const GetTriggerDefinitionOutputSchema = z.object({
schema_name: z.string(),
table_name: z.string(),
trigger_name: z.string(),
trigger_timing: z.string(),
trigger_level: z.string(),
events: z.array(z.string()),
function_schema: z.string(),
function_name: z.string(),
enabled: z.string(),
definition: z.string(),
function_definition: z.string().nullable(),
});
// Input schema
const GetTriggerDefinitionInputSchema = z.object({
schema: z.string().default('public').describe('Schema name (defaults to public).'),
table: z.string().describe('Table name the trigger is on.'),
trigger_name: z.string().describe('Name of the trigger.'),
include_function: z.boolean().optional().default(true).describe('Include the trigger function source code.'),
});
type GetTriggerDefinitionInput = z.infer<typeof GetTriggerDefinitionInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
default: 'public',
description: 'Schema name (defaults to public).',
},
table: {
type: 'string',
description: 'Table name the trigger is on.',
},
trigger_name: {
type: 'string',
description: 'Name of the trigger.',
},
include_function: {
type: 'boolean',
default: true,
description: 'Include the trigger function source code.',
},
},
required: ['table', 'trigger_name'],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const getTriggerDefinitionTool = {
name: 'get_trigger_definition',
description: 'Gets the full definition of a trigger, optionally including its function source code.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetTriggerDefinitionInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetTriggerDefinitionOutputSchema,
execute: async (input: GetTriggerDefinitionInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table, trigger_name, include_function } = input;
// Validate identifiers
if (!identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (!identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
if (!identifierPattern.test(trigger_name)) {
throw new Error(`Invalid trigger name: ${trigger_name}`);
}
const sql = `
SELECT
n.nspname AS schema_name,
c.relname AS table_name,
t.tgname AS trigger_name,
CASE
WHEN t.tgtype::int & 2 > 0 THEN 'BEFORE'
WHEN t.tgtype::int & 64 > 0 THEN 'INSTEAD OF'
ELSE 'AFTER'
END AS trigger_timing,
CASE WHEN t.tgtype::int & 1 > 0 THEN 'ROW' ELSE 'STATEMENT' END AS trigger_level,
ARRAY_REMOVE(ARRAY[
CASE WHEN t.tgtype::int & 4 > 0 THEN 'INSERT' END,
CASE WHEN t.tgtype::int & 8 > 0 THEN 'DELETE' END,
CASE WHEN t.tgtype::int & 16 > 0 THEN 'UPDATE' END,
CASE WHEN t.tgtype::int & 32 > 0 THEN 'TRUNCATE' END
], NULL) AS events,
pn.nspname AS function_schema,
p.proname AS function_name,
CASE t.tgenabled
WHEN 'O' THEN 'ENABLED'
WHEN 'D' THEN 'DISABLED'
WHEN 'R' THEN 'REPLICA'
WHEN 'A' THEN 'ALWAYS'
ELSE t.tgenabled::text
END AS enabled,
pg_catalog.pg_get_triggerdef(t.oid, true) AS definition,
${include_function ? 'pg_catalog.pg_get_functiondef(p.oid)' : 'NULL'} AS function_definition
FROM pg_catalog.pg_trigger t
JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_proc p ON p.oid = t.tgfoid
JOIN pg_catalog.pg_namespace pn ON pn.oid = p.pronamespace
WHERE n.nspname = '${schema}'
AND c.relname = '${table}'
AND t.tgname = '${trigger_name}'
LIMIT 1
`;
const result = await executeSqlWithFallback(client, sql, true);
if (isSqlErrorResponse(result)) {
throw new Error(result.error.message || 'Failed to get trigger definition');
}
const rows = result as unknown[];
if (rows.length === 0) {
throw new Error(`Trigger "${trigger_name}" not found on ${schema}.${table}.`);
}
return GetTriggerDefinitionOutputSchema.parse(rows[0]);
},
};

View File

@ -0,0 +1,150 @@
/**
* get_vector_index_stats - Gets detailed statistics for vector indexes.
*
* Requires the pgvector extension to be installed.
* Shows usage statistics and size information for vector indexes.
*/
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
// SQL identifier validation - prevents SQL injection via identifier names
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
const VectorIndexStatsSchema = z.object({
schemaname: z.string(),
tablename: z.string(),
indexname: z.string(),
index_type: z.string(),
idx_scan: z.number(),
idx_tup_read: z.number(),
idx_tup_fetch: z.number(),
index_size: z.string(),
index_size_bytes: z.number(),
});
const GetVectorIndexStatsOutputSchema = z.array(VectorIndexStatsSchema);
const GetVectorIndexStatsInputSchema = z.object({
schema: z.string().optional().describe('Filter by schema name.'),
table: z.string().optional().describe('Filter by table name.'),
indexname: z.string().optional().describe('Filter by index name.'),
});
type GetVectorIndexStatsInput = z.infer<typeof GetVectorIndexStatsInputSchema>;
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter by schema name.',
},
table: {
type: 'string',
description: 'Filter by table name.',
},
indexname: {
type: 'string',
description: 'Filter by index name.',
},
},
required: [],
};
export const getVectorIndexStatsTool = {
name: 'get_vector_index_stats',
description: 'Gets usage statistics and size information for pgvector indexes.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: GetVectorIndexStatsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: GetVectorIndexStatsOutputSchema,
execute: async (input: GetVectorIndexStatsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table, indexname } = input;
// SECURITY: Validate identifiers to prevent SQL injection
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}. Must be a valid SQL identifier.`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}. Must be a valid SQL identifier.`);
}
if (indexname && !identifierPattern.test(indexname)) {
throw new Error(`Invalid index name: ${indexname}. Must be a valid SQL identifier.`);
}
// First check if pgvector extension is installed
const checkExtensionSql = `
SELECT EXISTS (
SELECT 1 FROM pg_extension WHERE extname = 'vector'
) as installed;
`;
const extensionCheck = await executeSqlWithFallback(client, checkExtensionSql, true);
if (isSqlErrorResponse(extensionCheck)) {
throw new Error(extensionCheck.error.message || 'Failed to check pgvector extension status.');
}
const checkRows = extensionCheck as Array<{ installed: boolean }>;
if (checkRows.length === 0 || !checkRows[0].installed) {
throw new Error('pgvector extension is not installed. Install it with: CREATE EXTENSION vector;');
}
// Build conditions - identifiers are validated above
const statsConditions: string[] = [];
if (schema) {
statsConditions.push(`s.schemaname = '${schema}'`);
}
if (table) {
statsConditions.push(`s.relname = '${table}'`);
}
if (indexname) {
statsConditions.push(`s.indexrelname = '${indexname}'`);
}
const statsWhereClause = statsConditions.length > 0 ? `AND ${statsConditions.join(' AND ')}` : '';
// Query vector index statistics
const statsSql = `
WITH vector_indexes AS (
SELECT
schemaname,
tablename,
indexname,
CASE
WHEN indexdef LIKE '%USING ivfflat%' THEN 'IVFFlat'
WHEN indexdef LIKE '%USING hnsw%' THEN 'HNSW'
ELSE 'Unknown'
END AS index_type
FROM pg_indexes
WHERE indexdef LIKE '%vector_%ops%'
)
SELECT
vi.schemaname,
vi.tablename,
vi.indexname,
vi.index_type,
COALESCE(s.idx_scan, 0) AS idx_scan,
COALESCE(s.idx_tup_read, 0) AS idx_tup_read,
COALESCE(s.idx_tup_fetch, 0) AS idx_tup_fetch,
pg_size_pretty(pg_relation_size(s.indexrelid)) AS index_size,
pg_relation_size(s.indexrelid) AS index_size_bytes
FROM vector_indexes vi
JOIN pg_stat_user_indexes s
ON vi.schemaname = s.schemaname
AND vi.indexname = s.indexrelname
WHERE 1=1 ${statsWhereClause}
ORDER BY s.idx_scan DESC, index_size_bytes DESC
`;
const result = await executeSqlWithFallback(client, statsSql, true);
return handleSqlResponse(result, GetVectorIndexStatsOutputSchema);
},
};

View File

@ -0,0 +1,95 @@
import { z } from 'zod';
import type { ToolContext } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { SqlSuccessResponse, AuthUser } from '../types/index.js';
// Input schema (initially no filters, add later)
const ListAuthUsersInputSchema = z.object({
limit: z.number().int().positive().optional().default(50).describe('Max number of users to return'),
offset: z.number().int().nonnegative().optional().default(0).describe('Number of users to skip'),
// Add filters later (e.g., by email pattern, role)
});
type ListAuthUsersInput = z.infer<typeof ListAuthUsersInputSchema>;
// Output schema - Zod for validation
const AuthUserZodSchema = z.object({
id: z.string().uuid(),
email: z.string().email('Invalid email').nullable(),
role: z.string().nullable(),
// Timestamps returned as text from DB might not strictly be ISO 8601 / Zod datetime compliant
created_at: z.string().nullable(),
last_sign_in_at: z.string().nullable(),
raw_app_meta_data: z.record(z.string(), z.unknown()).nullable(),
raw_user_meta_data: z.record(z.string(), z.unknown()).nullable(),
// Add more fields as needed (e.g., email_confirmed_at, phone)
});
const ListAuthUsersOutputSchema = z.array(AuthUserZodSchema);
// Use AuthUser[] for the output type hint
type ListAuthUsersOutput = AuthUser[];
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
limit: {
type: 'number',
description: 'Max number of users to return',
default: 50,
},
offset: {
type: 'number',
description: 'Number of users to skip',
default: 0,
},
},
required: [],
};
// Tool definition
export const listAuthUsersTool = {
name: 'list_auth_users',
description: 'Lists users from the auth.users table.',
inputSchema: ListAuthUsersInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListAuthUsersOutputSchema,
execute: async (input: ListAuthUsersInput, context: ToolContext): Promise<ListAuthUsersOutput> => {
const client = context.selfhostedClient;
const { limit, offset } = input;
// Check if direct DB connection is available, as it's likely needed for auth.users
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to list auth users.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to list auth users.');
}
// Construct SQL query - ensure schema name is correct
const listUsersSql = `
SELECT
id,
email,
role,
raw_app_meta_data,
raw_user_meta_data,
created_at::text, -- Cast timestamp to text for JSON
last_sign_in_at::text -- Cast timestamp to text for JSON
FROM
auth.users
ORDER BY
created_at DESC
LIMIT ${limit}
OFFSET ${offset}
`; // No semicolon needed here
console.error('Attempting to list auth users using direct DB connection...');
// Use direct connection (executeSqlWithPg) as it likely has necessary privileges
const result = await client.executeSqlWithPg(listUsersSql);
// Validate and return
const validatedUsers = handleSqlResponse(result, ListAuthUsersOutputSchema);
console.error(`Found ${validatedUsers.length} users.`);
context.log(`Found ${validatedUsers.length} users.`);
return validatedUsers;
},
};

View File

@ -0,0 +1,85 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for available extensions
const ListAvailableExtensionsOutputSchema = z.array(z.object({
name: z.string(),
default_version: z.string(),
installed_version: z.string().nullable(),
is_installed: z.boolean(),
comment: z.string().nullable(),
}));
// Input schema
const ListAvailableExtensionsInputSchema = z.object({
show_installed: z.boolean().optional().default(true).describe('Include already installed extensions.'),
name_pattern: z.string().optional().describe('Filter by extension name pattern (SQL LIKE).'),
});
type ListAvailableExtensionsInput = z.infer<typeof ListAvailableExtensionsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
show_installed: {
type: 'boolean',
default: true,
description: 'Include already installed extensions.',
},
name_pattern: {
type: 'string',
description: 'Filter by extension name pattern (SQL LIKE).',
},
},
required: [],
};
// Safe pattern for LIKE expressions - allow wildcards but escape dangerous chars
const likePattern = /^[a-zA-Z0-9_$%\-]+$/;
export const listAvailableExtensionsTool = {
name: 'list_available_extensions',
description: 'Lists all PostgreSQL extensions available for installation, including those already installed.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListAvailableExtensionsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListAvailableExtensionsOutputSchema,
execute: async (input: ListAvailableExtensionsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { show_installed, name_pattern } = input;
// Validate name pattern if provided
if (name_pattern && !likePattern.test(name_pattern)) {
throw new Error(`Invalid name pattern: ${name_pattern}. Use only alphanumeric, underscore, hyphen, dollar sign, and % wildcard.`);
}
// Build WHERE conditions
const conditions: string[] = [];
if (!show_installed) {
conditions.push('installed_version IS NULL');
}
if (name_pattern) {
conditions.push(`name LIKE '${name_pattern}'`);
}
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
const sql = `
SELECT
name,
default_version,
installed_version,
installed_version IS NOT NULL AS is_installed,
comment
FROM pg_available_extensions
${whereClause}
ORDER BY name
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListAvailableExtensionsOutputSchema);
},
};

View File

@ -0,0 +1,137 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for constraints
const ListConstraintsOutputSchema = z.array(z.object({
schema_name: z.string(),
table_name: z.string(),
constraint_name: z.string(),
constraint_type: z.string(), // PRIMARY KEY, FOREIGN KEY, UNIQUE, CHECK, EXCLUDE
columns: z.array(z.string()),
definition: z.string(),
is_deferrable: z.boolean(),
initially_deferred: z.boolean(),
}));
// Input schema with optional filters
const ListConstraintsInputSchema = z.object({
schema: z.string().optional().describe('Filter by schema name.'),
table: z.string().optional().describe('Filter by table name.'),
constraint_type: z.enum(['PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE', 'CHECK', 'EXCLUDE']).optional().describe('Filter by constraint type.'),
include_system: z.boolean().optional().default(false).describe('Include constraints in system schemas.'),
});
type ListConstraintsInput = z.infer<typeof ListConstraintsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter by schema name.',
},
table: {
type: 'string',
description: 'Filter by table name.',
},
constraint_type: {
type: 'string',
enum: ['PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE', 'CHECK', 'EXCLUDE'],
description: 'Filter by constraint type.',
},
include_system: {
type: 'boolean',
default: false,
description: 'Include constraints in system schemas.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const listConstraintsTool = {
name: 'list_constraints',
description: 'Lists all constraints (PRIMARY KEY, FOREIGN KEY, UNIQUE, CHECK, EXCLUDE) in the database. Can filter by schema, table, and type.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListConstraintsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListConstraintsOutputSchema,
execute: async (input: ListConstraintsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table, constraint_type, include_system } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions
const conditions: string[] = [];
if (!include_system) {
conditions.push("n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast', 'auth', 'storage', 'extensions', 'graphql', 'graphql_public', 'pgbouncer', 'realtime', 'supabase_functions', 'supabase_migrations', '_realtime')");
}
if (schema) {
conditions.push(`n.nspname = '${schema}'`);
}
if (table) {
conditions.push(`rel.relname = '${table}'`);
}
if (constraint_type) {
// Use Map to prevent object injection attacks
const typeMap = new Map<string, string>([
['PRIMARY KEY', 'p'],
['FOREIGN KEY', 'f'],
['UNIQUE', 'u'],
['CHECK', 'c'],
['EXCLUDE', 'x'],
]);
const typeCode = typeMap.get(constraint_type);
if (typeCode) {
conditions.push(`c.contype = '${typeCode}'`);
}
}
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
const sql = `
SELECT
n.nspname AS schema_name,
rel.relname AS table_name,
c.conname AS constraint_name,
CASE c.contype
WHEN 'p' THEN 'PRIMARY KEY'
WHEN 'f' THEN 'FOREIGN KEY'
WHEN 'u' THEN 'UNIQUE'
WHEN 'c' THEN 'CHECK'
WHEN 'x' THEN 'EXCLUDE'
ELSE c.contype::text
END AS constraint_type,
ARRAY(
SELECT a.attname
FROM unnest(c.conkey) WITH ORDINALITY AS k(attnum, ord)
JOIN pg_catalog.pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = k.attnum
ORDER BY k.ord
) AS columns,
pg_get_constraintdef(c.oid) AS definition,
c.condeferrable AS is_deferrable,
c.condeferred AS initially_deferred
FROM pg_catalog.pg_constraint c
JOIN pg_catalog.pg_class rel ON rel.oid = c.conrelid
JOIN pg_catalog.pg_namespace n ON n.oid = rel.relnamespace
${whereClause}
ORDER BY n.nspname, rel.relname, c.conname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListConstraintsOutputSchema);
},
};

View File

@ -0,0 +1,79 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for cron job output
const CronJobSchema = z.object({
jobid: z.number(),
schedule: z.string(),
command: z.string(),
nodename: z.string(),
nodeport: z.number(),
database: z.string(),
username: z.string(),
active: z.boolean(),
});
const ListCronJobsOutputSchema = z.array(CronJobSchema);
type ListCronJobsOutput = z.infer<typeof ListCronJobsOutputSchema>;
// Input schema (none needed)
const ListCronJobsInputSchema = z.object({});
type ListCronJobsInput = z.infer<typeof ListCronJobsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Tool definition
export const listCronJobsTool = {
name: 'list_cron_jobs',
description: 'Lists all scheduled cron jobs from pg_cron extension. Returns empty array if pg_cron is not installed.',
inputSchema: ListCronJobsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListCronJobsOutputSchema,
execute: async (input: ListCronJobsInput, context: ToolContext): Promise<ListCronJobsOutput> => {
const client = context.selfhostedClient;
// First check if cron schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = 'cron'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
// Handle the schema check result
if (Array.isArray(schemaCheckResult) && schemaCheckResult.length > 0) {
const exists = schemaCheckResult[0]?.exists;
if (!exists) {
context.log('pg_cron extension not installed (cron schema not found)', 'info');
return [];
}
} else {
context.log('Could not verify pg_cron installation', 'warn');
return [];
}
// Query cron jobs
const listCronJobsSql = `
SELECT
jobid,
schedule,
command,
nodename,
nodeport,
database,
username,
active
FROM cron.job
ORDER BY jobid
`;
const result = await executeSqlWithFallback(client, listCronJobsSql, true);
return handleSqlResponse(result, ListCronJobsOutputSchema);
},
};

View File

@ -0,0 +1,117 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for database functions
const ListDatabaseFunctionsOutputSchema = z.array(z.object({
schema_name: z.string(),
function_name: z.string(),
arguments: z.string(),
return_type: z.string(),
language: z.string(),
volatility: z.string(), // IMMUTABLE, STABLE, or VOLATILE
security_definer: z.boolean(),
description: z.string().nullable(),
}));
// Input schema with optional filters
const ListDatabaseFunctionsInputSchema = z.object({
schema: z.string().optional().describe('Filter functions by schema name.'),
name_pattern: z.string().optional().describe('Filter functions by name pattern (SQL LIKE pattern).'),
language: z.string().optional().describe('Filter by language (e.g., plpgsql, sql).'),
});
type ListDatabaseFunctionsInput = z.infer<typeof ListDatabaseFunctionsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter functions by schema name.',
},
name_pattern: {
type: 'string',
description: 'Filter functions by name pattern (SQL LIKE pattern).',
},
language: {
type: 'string',
description: 'Filter by language (e.g., plpgsql, sql).',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
// Safe pattern for LIKE expressions - allow wildcards but escape dangerous chars
const likePattern = /^[a-zA-Z0-9_$%]+$/;
export const listDatabaseFunctionsTool = {
name: 'list_database_functions',
description: 'Lists all user-defined database functions (stored procedures). Can filter by schema, name pattern, or language. Identifies SECURITY DEFINER functions which may have elevated privileges.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListDatabaseFunctionsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListDatabaseFunctionsOutputSchema,
execute: async (input: ListDatabaseFunctionsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, name_pattern, language } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (language && !identifierPattern.test(language)) {
throw new Error(`Invalid language name: ${language}`);
}
if (name_pattern && !likePattern.test(name_pattern)) {
throw new Error(`Invalid name pattern: ${name_pattern}. Use only alphanumeric, underscore, dollar sign, and % wildcard.`);
}
// Build WHERE conditions
const conditions: string[] = [
"n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')",
"n.nspname NOT LIKE 'pg_temp_%'",
"p.prokind = 'f'", // Functions only, not procedures or aggregates
];
if (schema) {
conditions.push(`n.nspname = '${schema}'`);
}
if (name_pattern) {
conditions.push(`p.proname LIKE '${name_pattern}'`);
}
if (language) {
conditions.push(`l.lanname = '${language}'`);
}
const whereClause = conditions.join(' AND ');
const sql = `
SELECT
n.nspname AS schema_name,
p.proname AS function_name,
pg_catalog.pg_get_function_arguments(p.oid) AS arguments,
pg_catalog.pg_get_function_result(p.oid) AS return_type,
l.lanname AS language,
CASE p.provolatile
WHEN 'i' THEN 'IMMUTABLE'
WHEN 's' THEN 'STABLE'
WHEN 'v' THEN 'VOLATILE'
ELSE p.provolatile::text
END AS volatility,
p.prosecdef AS security_definer,
obj_description(p.oid, 'pg_proc') AS description
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
JOIN pg_catalog.pg_language l ON l.oid = p.prolang
WHERE ${whereClause}
ORDER BY n.nspname, p.proname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListDatabaseFunctionsOutputSchema);
},
};

View File

@ -0,0 +1,141 @@
/**
* list_edge_function_logs - Lists execution logs for edge functions.
*
* Queries the function_edge_logs table if available in the Supabase instance.
* This table is automatically created by Supabase for edge function logging.
*/
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
const EdgeFunctionLogSchema = z.object({
execution_id: z.string().nullable(),
function_id: z.string(),
status_code: z.number().nullable(),
request_start_time: z.string(),
request_duration_ms: z.number().nullable(),
error_message: z.string().nullable(),
request_path: z.string().nullable(),
request_method: z.string().nullable(),
});
const ListEdgeFunctionLogsOutputSchema = z.array(EdgeFunctionLogSchema);
const ListEdgeFunctionLogsInputSchema = z.object({
function_id: z.string().optional().describe('Filter by function ID/slug.'),
status_code: z.number().optional().describe('Filter by HTTP status code.'),
errors_only: z.boolean().optional().describe('Only show logs with errors (status >= 400).'),
limit: z.number().optional().default(100).describe('Maximum number of log entries to return.'),
});
type ListEdgeFunctionLogsInput = z.infer<typeof ListEdgeFunctionLogsInputSchema>;
const mcpInputSchema = {
type: 'object',
properties: {
function_id: {
type: 'string',
description: 'Filter by function ID/slug.',
},
status_code: {
type: 'number',
description: 'Filter by HTTP status code.',
},
errors_only: {
type: 'boolean',
description: 'Only show logs with errors (status >= 400).',
},
limit: {
type: 'number',
description: 'Maximum number of log entries to return.',
default: 100,
},
},
required: [],
};
// Pattern for function IDs (UUIDs or slugs)
const functionIdPattern = /^[a-zA-Z0-9_\-]+$/;
export const listEdgeFunctionLogsTool = {
name: 'list_edge_function_logs',
description: 'Lists execution logs for edge functions from the function_edge_logs table.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListEdgeFunctionLogsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListEdgeFunctionLogsOutputSchema,
execute: async (input: ListEdgeFunctionLogsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { function_id, status_code, errors_only, limit = 100 } = input;
// Validate function_id if provided
if (function_id && !functionIdPattern.test(function_id)) {
throw new Error(`Invalid function ID: ${function_id}. Use only alphanumeric, underscore, and hyphen characters.`);
}
// Check if function_edge_logs table exists
const checkTableSql = `
SELECT EXISTS (
SELECT 1
FROM information_schema.tables
WHERE table_name = 'function_edge_logs'
) as exists;
`;
const tableCheck = await executeSqlWithFallback(client, checkTableSql, true);
if (!Array.isArray(tableCheck) || tableCheck.length === 0) {
throw new Error('Failed to check for function_edge_logs table.');
}
if (!tableCheck[0].exists) {
throw new Error(
'Edge function logs table (function_edge_logs) not found. ' +
'This table is automatically created by Supabase when edge functions are invoked. ' +
'Ensure edge functions have been executed at least once.'
);
}
// Build query with filters
const conditions: string[] = [];
if (function_id) {
conditions.push(`function_id = '${function_id}'`);
}
if (status_code !== undefined) {
// status_code is a number from Zod validation, safe to use directly
conditions.push(`status_code = ${status_code}`);
}
if (errors_only) {
conditions.push('status_code >= 400');
}
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
// Ensure limit is within bounds
const safeLimit = Math.min(Math.max(1, limit), 1000);
const logsSql = `
SELECT
execution_id::text,
function_id,
status_code,
request_start_time::text,
EXTRACT(EPOCH FROM (request_end_time - request_start_time)) * 1000 as request_duration_ms,
error_message,
request_path,
request_method
FROM function_edge_logs
${whereClause}
ORDER BY request_start_time DESC
LIMIT ${safeLimit}
`;
const result = await executeSqlWithFallback(client, logsSql, true);
return handleSqlResponse(result, ListEdgeFunctionLogsOutputSchema);
},
};

View File

@ -0,0 +1,98 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for edge function output
const EdgeFunctionSchema = z.object({
id: z.string().uuid(),
name: z.string(),
slug: z.string(),
status: z.string().nullable(),
version: z.number().nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
});
const ListEdgeFunctionsOutputSchema = z.array(EdgeFunctionSchema);
type ListEdgeFunctionsOutput = z.infer<typeof ListEdgeFunctionsOutputSchema>;
// Input schema (none needed)
const ListEdgeFunctionsInputSchema = z.object({});
type ListEdgeFunctionsInput = z.infer<typeof ListEdgeFunctionsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Tool definition
export const listEdgeFunctionsTool = {
name: 'list_edge_functions',
description: 'Lists all deployed Supabase Edge Functions. Returns empty array if edge functions are not available or none are deployed.',
inputSchema: ListEdgeFunctionsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListEdgeFunctionsOutputSchema,
execute: async (input: ListEdgeFunctionsInput, context: ToolContext): Promise<ListEdgeFunctionsOutput> => {
const client = context.selfhostedClient;
// First check if supabase_functions schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = 'supabase_functions'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
// Handle the schema check result
if (Array.isArray(schemaCheckResult) && schemaCheckResult.length > 0) {
const exists = schemaCheckResult[0]?.exists;
if (!exists) {
context.log('supabase_functions schema not found - Edge Functions may not be available in this installation', 'info');
return [];
}
} else {
context.log('Could not verify supabase_functions schema', 'warn');
return [];
}
// Check if the functions table exists
const checkTableSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_tables
WHERE schemaname = 'supabase_functions' AND tablename = 'functions'
) AS exists
`;
const tableCheckResult = await executeSqlWithFallback(client, checkTableSql, true);
if (Array.isArray(tableCheckResult) && tableCheckResult.length > 0) {
const exists = tableCheckResult[0]?.exists;
if (!exists) {
context.log('supabase_functions.functions table not found', 'info');
return [];
}
} else {
context.log('Could not verify functions table', 'warn');
return [];
}
// Query edge functions
const listEdgeFunctionsSql = `
SELECT
id,
name,
slug,
status,
version,
created_at::text,
updated_at::text
FROM supabase_functions.functions
ORDER BY name
`;
const result = await executeSqlWithFallback(client, listEdgeFunctionsSql, true);
return handleSqlResponse(result, ListEdgeFunctionsOutputSchema);
},
};

View File

@ -0,0 +1,57 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for the output: array of extension details
const ListExtensionsOutputSchema = z.array(z.object({
name: z.string(),
schema: z.string(),
version: z.string(),
description: z.string().nullable().optional(),
}));
// Input schema (none needed for this tool)
const ListExtensionsInputSchema = z.object({});
type ListExtensionsInput = z.infer<typeof ListExtensionsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const listExtensionsTool = {
name: 'list_extensions',
description: 'Lists all installed PostgreSQL extensions in the database.',
inputSchema: ListExtensionsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListExtensionsOutputSchema,
execute: async (input: ListExtensionsInput, context: ToolContext) => {
const client = context.selfhostedClient;
// SQL based on pg_extension
const listExtensionsSql = `
SELECT
pe.extname AS name,
pn.nspname AS schema,
pe.extversion AS version,
pd.description
FROM
pg_catalog.pg_extension pe
LEFT JOIN
pg_catalog.pg_namespace pn ON pn.oid = pe.extnamespace
LEFT JOIN
pg_catalog.pg_description pd ON pd.objoid = pe.oid AND pd.classoid = 'pg_catalog.pg_extension'::regclass
WHERE
pe.extname != 'plpgsql' -- Exclude the default plpgsql extension
ORDER BY
pe.extname
`;
const result = await executeSqlWithFallback(client, listExtensionsSql, true);
return handleSqlResponse(result, ListExtensionsOutputSchema);
},
};

View File

@ -0,0 +1,121 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for foreign keys
const ListForeignKeysOutputSchema = z.array(z.object({
constraint_name: z.string(),
schema_name: z.string(),
table_name: z.string(),
column_name: z.string(),
referenced_schema: z.string(),
referenced_table: z.string(),
referenced_column: z.string(),
update_rule: z.string(),
delete_rule: z.string(),
is_deferrable: z.boolean(),
initially_deferred: z.boolean(),
}));
// Input schema with optional filters
const ListForeignKeysInputSchema = z.object({
schema: z.string().optional().describe('Filter by schema name.'),
table: z.string().optional().describe('Filter by table name.'),
include_system: z.boolean().optional().default(false).describe('Include foreign keys in system schemas.'),
});
type ListForeignKeysInput = z.infer<typeof ListForeignKeysInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter by schema name.',
},
table: {
type: 'string',
description: 'Filter by table name.',
},
include_system: {
type: 'boolean',
default: false,
description: 'Include foreign keys in system schemas.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const listForeignKeysTool = {
name: 'list_foreign_keys',
description: 'Lists all foreign key relationships in the database. Can filter by schema and/or table.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListForeignKeysInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListForeignKeysOutputSchema,
execute: async (input: ListForeignKeysInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table, include_system } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions
const conditions: string[] = [];
if (!include_system) {
conditions.push("tc.table_schema NOT IN ('pg_catalog', 'information_schema', 'auth', 'storage', 'extensions', 'graphql', 'graphql_public', 'pgbouncer', 'realtime', 'supabase_functions', 'supabase_migrations', '_realtime')");
}
if (schema) {
conditions.push(`tc.table_schema = '${schema}'`);
}
if (table) {
conditions.push(`tc.table_name = '${table}'`);
}
const whereClause = conditions.length > 0 ? `AND ${conditions.join(' AND ')}` : '';
const sql = `
SELECT
tc.constraint_name,
tc.table_schema AS schema_name,
tc.table_name,
kcu.column_name,
ccu.table_schema AS referenced_schema,
ccu.table_name AS referenced_table,
ccu.column_name AS referenced_column,
rc.update_rule,
rc.delete_rule,
c.condeferrable AS is_deferrable,
c.condeferred AS initially_deferred
FROM information_schema.table_constraints tc
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage ccu
ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
JOIN information_schema.referential_constraints rc
ON tc.constraint_name = rc.constraint_name
AND tc.table_schema = rc.constraint_schema
JOIN pg_catalog.pg_constraint c
ON c.conname = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
${whereClause}
ORDER BY tc.table_schema, tc.table_name, tc.constraint_name
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListForeignKeysOutputSchema);
},
};

View File

@ -0,0 +1,111 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for indexes
const ListIndexesOutputSchema = z.array(z.object({
schema_name: z.string(),
table_name: z.string(),
index_name: z.string(),
index_type: z.string(), // btree, hash, gist, gin, brin
is_unique: z.boolean(),
is_primary: z.boolean(),
is_valid: z.boolean(),
columns: z.string(),
size: z.string(),
definition: z.string(),
}));
// Input schema with optional filters
const ListIndexesInputSchema = z.object({
schema: z.string().optional().describe('Filter indexes by schema name.'),
table: z.string().optional().describe('Filter indexes by table name.'),
include_system: z.boolean().optional().default(false).describe('Include indexes on system tables.'),
});
type ListIndexesInput = z.infer<typeof ListIndexesInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter indexes by schema name.',
},
table: {
type: 'string',
description: 'Filter indexes by table name.',
},
include_system: {
type: 'boolean',
default: false,
description: 'Include indexes on system tables.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const listIndexesTool = {
name: 'list_indexes',
description: 'Lists all indexes in the database with their definitions and sizes. Can filter by schema and/or table.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListIndexesInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListIndexesOutputSchema,
execute: async (input: ListIndexesInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table, include_system } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions
const conditions: string[] = [];
if (!include_system) {
conditions.push("schemaname NOT IN ('pg_catalog', 'information_schema', 'pg_toast', 'auth', 'storage', 'extensions', 'graphql', 'graphql_public', 'pgbouncer', 'realtime', 'supabase_functions', 'supabase_migrations', '_realtime')");
}
if (schema) {
conditions.push(`schemaname = '${schema}'`);
}
if (table) {
conditions.push(`tablename = '${table}'`);
}
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
const sql = `
SELECT
i.schemaname AS schema_name,
i.tablename AS table_name,
i.indexname AS index_name,
am.amname AS index_type,
ix.indisunique AS is_unique,
ix.indisprimary AS is_primary,
ix.indisvalid AS is_valid,
pg_catalog.pg_get_indexdef(ix.indexrelid, 0, true) AS columns,
pg_size_pretty(pg_relation_size(ix.indexrelid)) AS size,
i.indexdef AS definition
FROM pg_indexes i
JOIN pg_catalog.pg_class c ON c.relname = i.indexname
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace AND n.nspname = i.schemaname
JOIN pg_catalog.pg_index ix ON ix.indexrelid = c.oid
JOIN pg_catalog.pg_am am ON am.oid = c.relam
${whereClause}
ORDER BY i.schemaname, i.tablename, i.indexname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListIndexesOutputSchema);
},
};

View File

@ -0,0 +1,52 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import type { ToolContext } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Schema for the output: array of migration details
const ListMigrationsOutputSchema = z.array(z.object({
version: z.string(),
name: z.string(),
inserted_at: z.string(), // Keep as string from DB
}));
// Input schema (none needed for this tool)
const ListMigrationsInputSchema = z.object({});
type ListMigrationsInput = z.infer<typeof ListMigrationsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const listMigrationsTool = {
name: 'list_migrations',
description: 'Lists applied database migrations recorded in supabase_migrations.schema_migrations table.',
inputSchema: ListMigrationsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListMigrationsOutputSchema,
execute: async (input: ListMigrationsInput, context: ToolContext) => {
const client = context.selfhostedClient;
// SQL to query the Supabase migrations table
const listMigrationsSql = `
SELECT
version,
name,
inserted_at
FROM
supabase_migrations.schema_migrations
ORDER BY
version
`;
// This table might not exist if migrations haven't been run
// The RPC call will handle the error, which handleSqlResponse will catch
const result = await executeSqlWithFallback(client, listMigrationsSql, true);
return handleSqlResponse(result, ListMigrationsOutputSchema);
},
};

View File

@ -0,0 +1,81 @@
import { z } from 'zod';
import type { ToolContext } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { SqlSuccessResponse } from '../types/index.js';
// Input schema (no parameters needed)
const ListRealtimePublicationsInputSchema = z.object({});
type ListRealtimePublicationsInput = z.infer<typeof ListRealtimePublicationsInputSchema>;
// Output schema based on pg_publication columns
const PublicationSchema = z.object({
oid: z.number().int(),
pubname: z.string(),
pubowner: z.number().int(), // Owner OID
puballtables: z.boolean(),
pubinsert: z.boolean(),
pubupdate: z.boolean(),
pubdelete: z.boolean(),
pubtruncate: z.boolean(),
pubviaroot: z.boolean(),
// Potentially add pubownername if needed via join
});
const ListRealtimePublicationsOutputSchema = z.array(PublicationSchema);
type ListRealtimePublicationsOutput = z.infer<typeof ListRealtimePublicationsOutputSchema>;
// Static JSON schema for MCP (no parameters)
export const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Tool definition
export const listRealtimePublicationsTool = {
name: 'list_realtime_publications',
description: 'Lists PostgreSQL publications, often used by Supabase Realtime.',
mcpInputSchema,
inputSchema: ListRealtimePublicationsInputSchema,
outputSchema: ListRealtimePublicationsOutputSchema,
execute: async (
input: ListRealtimePublicationsInput,
context: ToolContext
): Promise<ListRealtimePublicationsOutput> => {
const client = context.selfhostedClient;
console.error('Listing Realtime publications...');
// Direct DB connection likely needed for pg_catalog access
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to list publications.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to list publications.');
}
const sql = `
SELECT
oid,
pubname,
pubowner,
puballtables,
pubinsert,
pubupdate,
pubdelete,
pubtruncate,
pubviaroot
FROM pg_catalog.pg_publication;
`;
console.error('Attempting to list publications using direct DB connection...');
// Use executeSqlWithPg as it's a simple read query without parameters
const result = await client.executeSqlWithPg(sql);
const validatedPublications = handleSqlResponse(result, ListRealtimePublicationsOutputSchema);
console.error(`Found ${validatedPublications.length} publications.`);
context.log(`Found ${validatedPublications.length} publications.`);
return validatedPublications;
},
};
export default listRealtimePublicationsTool;

View File

@ -0,0 +1,110 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for RLS policies
const ListRlsPoliciesOutputSchema = z.array(z.object({
schema_name: z.string(),
table_name: z.string(),
policy_name: z.string(),
command: z.string(), // SELECT, INSERT, UPDATE, DELETE, or ALL
policy_type: z.string(), // PERMISSIVE or RESTRICTIVE
roles: z.array(z.string()),
using_expression: z.string().nullable(),
with_check_expression: z.string().nullable(),
}));
// Input schema with optional filters
const ListRlsPoliciesInputSchema = z.object({
schema: z.string().optional().describe('Filter policies by schema name.'),
table: z.string().optional().describe('Filter policies by table name.'),
});
type ListRlsPoliciesInput = z.infer<typeof ListRlsPoliciesInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter policies by schema name.',
},
table: {
type: 'string',
description: 'Filter policies by table name.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const listRlsPoliciesTool = {
name: 'list_rls_policies',
description: 'Lists all Row Level Security (RLS) policies in the database. Can filter by schema and/or table name.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListRlsPoliciesInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListRlsPoliciesOutputSchema,
execute: async (input: ListRlsPoliciesInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions based on filters
const conditions: string[] = [
"n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')",
];
if (schema) {
conditions.push(`n.nspname = '${schema}'`);
}
if (table) {
conditions.push(`c.relname = '${table}'`);
}
const whereClause = conditions.join(' AND ');
const sql = `
SELECT
n.nspname AS schema_name,
c.relname AS table_name,
pol.polname AS policy_name,
CASE pol.polcmd
WHEN 'r' THEN 'SELECT'
WHEN 'a' THEN 'INSERT'
WHEN 'w' THEN 'UPDATE'
WHEN 'd' THEN 'DELETE'
WHEN '*' THEN 'ALL'
ELSE pol.polcmd::text
END AS command,
CASE pol.polpermissive
WHEN true THEN 'PERMISSIVE'
ELSE 'RESTRICTIVE'
END AS policy_type,
COALESCE(
ARRAY(SELECT r.rolname FROM pg_catalog.pg_roles r WHERE r.oid = ANY(pol.polroles)),
ARRAY['public']::text[]
) AS roles,
pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS using_expression,
pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS with_check_expression
FROM pg_catalog.pg_policy pol
JOIN pg_catalog.pg_class c ON c.oid = pol.polrelid
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE ${whereClause}
ORDER BY n.nspname, c.relname, pol.polname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListRlsPoliciesOutputSchema);
},
};

View File

@ -0,0 +1,85 @@
import { z } from 'zod';
import type { ToolContext } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { SqlSuccessResponse, StorageBucket } from '../types/index.js';
// Zod schema for the bucket structure (Output Validation)
const BucketSchema = z.object({
id: z.string(),
name: z.string(),
owner: z.string().nullable(),
public: z.boolean(),
avif_autodetection: z.boolean(),
file_size_limit: z.number().nullable(),
allowed_mime_types: z.array(z.string()).nullable(),
// Keep timestamps as strings as returned by DB/pg
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
});
const ListStorageBucketsOutputSchema = z.array(BucketSchema);
type ListStorageBucketsOutput = StorageBucket[];
// Static JSON schema for MCP
export const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Zod schema for runtime input validation
const inputSchema = z.object({});
type Input = z.infer<typeof inputSchema>;
// Tool definition
export const listStorageBucketsTool = {
name: 'list_storage_buckets',
description: 'Lists all storage buckets in the project.',
mcpInputSchema,
inputSchema,
outputSchema: ListStorageBucketsOutputSchema,
execute: async (
input: Input,
context: ToolContext
): Promise<ListStorageBucketsOutput> => {
const client = context.selfhostedClient;
// Use console.error for operational logging
console.error('Listing storage buckets...');
// Check if direct DB connection is available, as it's likely needed for storage schema
if (!client.isPgAvailable()) {
// Log error for MCP client
context.log('Direct database connection (DATABASE_URL) is required to list storage buckets.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to list storage buckets.');
}
const sql = `
SELECT
id,
name,
owner,
public,
avif_autodetection,
file_size_limit,
allowed_mime_types,
created_at::text, -- Cast to text
updated_at::text -- Cast to text
FROM storage.buckets;
`;
console.error('Attempting to list storage buckets using direct DB connection...');
const result = await client.executeSqlWithPg(sql);
// Validate and return using handler
const validatedBuckets = handleSqlResponse(result, ListStorageBucketsOutputSchema);
console.error(`Found ${validatedBuckets.length} buckets.`);
context.log(`Found ${validatedBuckets.length} buckets.`); // Also log for MCP
return validatedBuckets;
},
};
// Default export for potential dynamic loading
export default listStorageBucketsTool;

View File

@ -0,0 +1,123 @@
import { z } from 'zod';
import type { PoolClient } from 'pg'; // Import PoolClient type
import type { ToolContext } from './types.js';
import { handleSqlResponse } from './utils.js';
import type { SqlSuccessResponse } from '../types/index.js'; // Import the type
// Input schema
const ListStorageObjectsInputSchema = z.object({
bucket_id: z.string().describe('The ID of the bucket to list objects from.'),
limit: z.number().int().positive().optional().default(100).describe('Max number of objects to return'),
offset: z.number().int().nonnegative().optional().default(0).describe('Number of objects to skip'),
prefix: z.string().optional().describe('Filter objects by a path prefix (e.g., \'public/\')'),
});
type ListStorageObjectsInput = z.infer<typeof ListStorageObjectsInputSchema>;
// Output schema
const StorageObjectSchema = z.object({
id: z.string().uuid(),
name: z.string().nullable(), // Name can be null according to schema
bucket_id: z.string(),
owner: z.string().uuid().nullable(),
version: z.string().nullable(),
// Get mimetype directly from SQL extraction
mimetype: z.string().nullable(),
// size comes from metadata - use transform instead of pipe for Zod v4
size: z.union([z.string(), z.number(), z.null()]).transform((val) => {
if (val === null) return null;
const num = typeof val === 'number' ? val : parseInt(String(val), 10);
return isNaN(num) ? null : num;
}),
// Keep raw metadata as well
metadata: z.record(z.string(), z.any()).nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(),
last_accessed_at: z.string().nullable(),
});
const ListStorageObjectsOutputSchema = z.array(StorageObjectSchema);
type ListStorageObjectsOutput = z.infer<typeof ListStorageObjectsOutputSchema>;
// Static JSON schema for MCP
export const mcpInputSchema = {
type: 'object',
properties: {
bucket_id: { type: 'string', description: 'The ID of the bucket to list objects from.' },
limit: { type: 'number', description: 'Max number of objects to return', default: 100 },
offset: { type: 'number', description: 'Number of objects to skip', default: 0 },
prefix: { type: 'string', description: "Filter objects by a path prefix (e.g., 'public/')" },
},
required: ['bucket_id'],
};
// Tool definition
export const listStorageObjectsTool = {
name: 'list_storage_objects',
description: 'Lists objects within a specific storage bucket, optionally filtering by prefix.',
mcpInputSchema,
inputSchema: ListStorageObjectsInputSchema,
outputSchema: ListStorageObjectsOutputSchema,
execute: async (
input: ListStorageObjectsInput,
context: ToolContext
): Promise<ListStorageObjectsOutput> => {
const client = context.selfhostedClient;
const { bucket_id, limit, offset, prefix } = input;
console.error(`Listing objects for bucket ${bucket_id} (Prefix: ${prefix || 'N/A'})...`);
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to list storage objects.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to list storage objects.');
}
// Use a transaction to get access to the pg client for parameterized queries
const objects = await client.executeTransactionWithPg(async (pgClient: PoolClient) => {
// Build query with parameters
let sql = `
SELECT
id,
name,
bucket_id,
owner,
version,
metadata ->> 'mimetype' AS mimetype,
metadata ->> 'size' AS size, -- Extract size from metadata
metadata,
created_at::text,
updated_at::text,
last_accessed_at::text
FROM storage.objects
WHERE bucket_id = $1
`;
const params: (string | number)[] = [bucket_id];
let paramIndex = 2;
if (prefix) {
sql += ` AND name LIKE $${paramIndex++}`;
params.push(`${prefix}%`);
}
sql += ' ORDER BY name ASC NULLS FIRST';
sql += ` LIMIT $${paramIndex++}`;
params.push(limit);
sql += ` OFFSET $${paramIndex++}`;
params.push(offset);
sql += ';';
console.error('Executing parameterized SQL to list storage objects within transaction...');
const result = await pgClient.query(sql, params); // Raw pg result
// Explicitly pass result.rows, which matches the expected structure
// of SqlSuccessResponse (unknown[]) for handleSqlResponse.
return handleSqlResponse(result.rows as SqlSuccessResponse, ListStorageObjectsOutputSchema);
});
console.error(`Found ${objects.length} objects.`);
context.log(`Found ${objects.length} objects.`);
return objects;
},
};
export default listStorageObjectsTool;

View File

@ -0,0 +1,123 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
// Output schema for table columns
const ListTableColumnsOutputSchema = z.array(z.object({
column_name: z.string(),
data_type: z.string(),
is_nullable: z.boolean(),
column_default: z.string().nullable(),
description: z.string().nullable(),
ordinal_position: z.number(),
character_maximum_length: z.number().nullable(),
numeric_precision: z.number().nullable(),
numeric_scale: z.number().nullable(),
is_identity: z.boolean(),
identity_generation: z.string().nullable(),
is_generated: z.boolean(),
generation_expression: z.string().nullable(),
}));
// Input schema
const ListTableColumnsInputSchema = z.object({
schema: z.string().default('public').describe('Schema name (defaults to public).'),
table: z.string().describe('Table name to get columns for.'),
});
type ListTableColumnsInput = z.infer<typeof ListTableColumnsInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
default: 'public',
description: 'Schema name (defaults to public).',
},
table: {
type: 'string',
description: 'Table name to get columns for.',
},
},
required: ['table'],
};
export const listTableColumnsTool = {
name: 'list_table_columns',
description: 'Lists all columns for a table with detailed metadata including types, defaults, and constraints.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListTableColumnsInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListTableColumnsOutputSchema,
execute: async (input: ListTableColumnsInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table } = input;
// Basic SQL identifier validation - allow alphanumeric, underscore, and dollar sign
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
if (!identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (!identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
const sql = `
SELECT
a.attname AS column_name,
pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type,
NOT a.attnotnull AS is_nullable,
pg_get_expr(d.adbin, d.adrelid) AS column_default,
col_description(c.oid, a.attnum) AS description,
a.attnum AS ordinal_position,
CASE
WHEN a.atttypid = ANY(ARRAY[1042, 1043]) -- bpchar, varchar
THEN NULLIF(a.atttypmod, -1) - 4
ELSE NULL
END AS character_maximum_length,
CASE
WHEN a.atttypid = ANY(ARRAY[21, 23, 20, 1700]) -- int2, int4, int8, numeric
THEN ((a.atttypmod - 4) >> 16) & 65535
ELSE NULL
END AS numeric_precision,
CASE
WHEN a.atttypid = 1700 -- numeric
THEN (a.atttypmod - 4) & 65535
ELSE NULL
END AS numeric_scale,
a.attidentity != '' AS is_identity,
CASE a.attidentity
WHEN 'a' THEN 'ALWAYS'
WHEN 'd' THEN 'BY DEFAULT'
ELSE NULL
END AS identity_generation,
a.attgenerated != '' AS is_generated,
pg_get_expr(g.adbin, g.adrelid) AS generation_expression
FROM pg_catalog.pg_attribute a
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_attrdef d ON d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.attgenerated = ''
LEFT JOIN pg_catalog.pg_attrdef g ON g.adrelid = a.attrelid AND g.adnum = a.attnum AND a.attgenerated != ''
WHERE n.nspname = '${schema}'
AND c.relname = '${table}'
AND a.attnum > 0
AND NOT a.attisdropped
ORDER BY a.attnum
`;
const result = await executeSqlWithFallback(client, sql, true);
if (isSqlErrorResponse(result)) {
throw new Error(result.error.message || 'Failed to list table columns');
}
const rows = result as unknown[];
if (rows.length === 0) {
throw new Error(`Table ${schema}.${table} not found or has no columns.`);
}
return handleSqlResponse(result, ListTableColumnsOutputSchema);
},
};

View File

@ -0,0 +1,69 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Define the schema for the tool's output (an array of table names)
const ListTablesOutputSchema = z.array(z.object({
schema: z.string(),
name: z.string(),
comment: z.string().nullable().optional(), // Add comment if available
}));
// Define input type from schema
const ListTablesInputSchema = z.object({ // No specific input needed for listing tables
// Optional: add schema filter later if needed
// schema: z.string().optional().describe('Filter tables by schema name.'),
});
type ListTablesInput = z.infer<typeof ListTablesInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Define the tool
export const listTablesTool = {
name: 'list_tables',
description: 'Lists all accessible tables in the connected database, grouped by schema.',
inputSchema: ListTablesInputSchema, // Use defined schema
mcpInputSchema: mcpInputSchema, // Add the static JSON schema for MCP
outputSchema: ListTablesOutputSchema,
// Use explicit types for input and context
execute: async (input: ListTablesInput, context: ToolContext) => {
const client = context.selfhostedClient;
// SQL query to get tables from pg_catalog and information_schema
// Excludes system schemas like pg_catalog, information_schema, and Supabase internal schemas
const listTablesSql = `
SELECT
n.nspname as schema,
c.relname as name,
pgd.description as comment
FROM
pg_catalog.pg_class c
JOIN
pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN
pg_catalog.pg_description pgd ON pgd.objoid = c.oid AND pgd.objsubid = 0
WHERE
c.relkind = 'r' -- r = ordinary table
AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
AND n.nspname NOT LIKE 'pg_temp_%'
AND n.nspname NOT LIKE 'pg_toast_temp_%'
-- Exclude Supabase internal schemas
AND n.nspname NOT IN ('auth', 'storage', 'extensions', 'graphql', 'graphql_public', 'pgbouncer', 'realtime', 'supabase_functions', 'supabase_migrations', '_realtime')
AND has_schema_privilege(n.oid, 'USAGE')
AND has_table_privilege(c.oid, 'SELECT')
ORDER BY
n.nspname,
c.relname
`;
const result = await executeSqlWithFallback(client, listTablesSql, true);
return handleSqlResponse(result, ListTablesOutputSchema); // Use a helper to handle response/errors
},
};

View File

@ -0,0 +1,117 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
// Output schema for triggers
const ListTriggersOutputSchema = z.array(z.object({
schema_name: z.string(),
table_name: z.string(),
trigger_name: z.string(),
trigger_timing: z.string(), // BEFORE, AFTER, INSTEAD OF
trigger_level: z.string(), // ROW or STATEMENT
events: z.array(z.string()), // INSERT, UPDATE, DELETE, TRUNCATE
function_schema: z.string(),
function_name: z.string(),
enabled: z.string(), // O=enabled, D=disabled, R=replica, A=always
}));
// Input schema with optional filters
const ListTriggersInputSchema = z.object({
schema: z.string().optional().describe('Filter triggers by schema name.'),
table: z.string().optional().describe('Filter triggers by table name.'),
});
type ListTriggersInput = z.infer<typeof ListTriggersInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Filter triggers by schema name.',
},
table: {
type: 'string',
description: 'Filter triggers by table name.',
},
},
required: [],
};
// SQL identifier validation pattern
const identifierPattern = /^[a-zA-Z_][a-zA-Z0-9_$]*$/;
export const listTriggersTool = {
name: 'list_triggers',
description: 'Lists all triggers on tables. Can filter by schema and/or table name.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: ListTriggersInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListTriggersOutputSchema,
execute: async (input: ListTriggersInput, context: ToolContext) => {
const client = context.selfhostedClient;
const { schema, table } = input;
// Validate identifiers if provided
if (schema && !identifierPattern.test(schema)) {
throw new Error(`Invalid schema name: ${schema}`);
}
if (table && !identifierPattern.test(table)) {
throw new Error(`Invalid table name: ${table}`);
}
// Build WHERE conditions
const conditions: string[] = [
'NOT t.tgisinternal', // Exclude internal triggers
"n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')",
];
if (schema) {
conditions.push(`n.nspname = '${schema}'`);
}
if (table) {
conditions.push(`c.relname = '${table}'`);
}
const whereClause = conditions.join(' AND ');
const sql = `
SELECT
n.nspname AS schema_name,
c.relname AS table_name,
t.tgname AS trigger_name,
CASE
WHEN t.tgtype::int & 2 > 0 THEN 'BEFORE'
WHEN t.tgtype::int & 64 > 0 THEN 'INSTEAD OF'
ELSE 'AFTER'
END AS trigger_timing,
CASE WHEN t.tgtype::int & 1 > 0 THEN 'ROW' ELSE 'STATEMENT' END AS trigger_level,
ARRAY_REMOVE(ARRAY[
CASE WHEN t.tgtype::int & 4 > 0 THEN 'INSERT' END,
CASE WHEN t.tgtype::int & 8 > 0 THEN 'DELETE' END,
CASE WHEN t.tgtype::int & 16 > 0 THEN 'UPDATE' END,
CASE WHEN t.tgtype::int & 32 > 0 THEN 'TRUNCATE' END
], NULL) AS events,
pn.nspname AS function_schema,
p.proname AS function_name,
CASE t.tgenabled
WHEN 'O' THEN 'ENABLED'
WHEN 'D' THEN 'DISABLED'
WHEN 'R' THEN 'REPLICA'
WHEN 'A' THEN 'ALWAYS'
ELSE t.tgenabled::text
END AS enabled
FROM pg_catalog.pg_trigger t
JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_proc p ON p.oid = t.tgfoid
JOIN pg_catalog.pg_namespace pn ON pn.oid = p.pronamespace
WHERE ${whereClause}
ORDER BY n.nspname, c.relname, t.tgname
`;
const result = await executeSqlWithFallback(client, sql, true);
return handleSqlResponse(result, ListTriggersOutputSchema);
},
};

View File

@ -0,0 +1,68 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext } from './types.js';
// Schema for vector index output
const VectorIndexSchema = z.object({
index_name: z.string(),
table_name: z.string(),
schema_name: z.string(),
index_method: z.string(),
index_definition: z.string(),
});
const ListVectorIndexesOutputSchema = z.array(VectorIndexSchema);
type ListVectorIndexesOutput = z.infer<typeof ListVectorIndexesOutputSchema>;
// Input schema (none needed)
const ListVectorIndexesInputSchema = z.object({});
type ListVectorIndexesInput = z.infer<typeof ListVectorIndexesInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// Tool definition
export const listVectorIndexesTool = {
name: 'list_vector_indexes',
description: 'Lists all pgvector indexes (ivfflat, hnsw) in the database. Returns empty array if pgvector is not installed or no vector indexes exist.',
inputSchema: ListVectorIndexesInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: ListVectorIndexesOutputSchema,
execute: async (input: ListVectorIndexesInput, context: ToolContext): Promise<ListVectorIndexesOutput> => {
const client = context.selfhostedClient;
// Query for pgvector indexes using index access method names
// This will return empty if pgvector is not installed (no ivfflat/hnsw access methods)
const listVectorIndexesSql = `
SELECT
ix.relname AS index_name,
t.relname AS table_name,
n.nspname AS schema_name,
am.amname AS index_method,
pg_get_indexdef(i.indexrelid) AS index_definition
FROM pg_index i
JOIN pg_class t ON t.oid = i.indrelid
JOIN pg_class ix ON ix.oid = i.indexrelid
JOIN pg_namespace n ON n.oid = t.relnamespace
JOIN pg_am am ON am.oid = ix.relam
WHERE am.amname IN ('ivfflat', 'hnsw')
AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
ORDER BY n.nspname, t.relname, ix.relname
`;
const result = await executeSqlWithFallback(client, listVectorIndexesSql, true);
// The query will naturally return empty if pgvector is not installed
// since there won't be any 'ivfflat' or 'hnsw' access methods
const indexes = handleSqlResponse(result, ListVectorIndexesOutputSchema);
if (indexes.length === 0) {
context.log('No pgvector indexes found (pgvector may not be installed or no indexes created)', 'info');
}
return indexes;
},
};

View File

@ -0,0 +1,63 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
// import type { McpToolDefinition } from '@modelcontextprotocol/sdk'; // Removed incorrect import
import { handleSqlResponse, executeSqlWithFallback } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
// Input schema (none needed)
const RebuildHooksInputSchema = z.object({});
type RebuildHooksInput = z.infer<typeof RebuildHooksInputSchema>;
// Output schema
const RebuildHooksOutputSchema = z.object({
success: z.boolean(),
message: z.string(),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition - No explicit McpToolDefinition type needed
export const rebuildHooksTool = {
name: 'rebuild_hooks',
description: 'Attempts to restart the pg_net worker. Requires the pg_net extension to be installed and available.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: RebuildHooksInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: RebuildHooksOutputSchema,
execute: async (input: RebuildHooksInput, context: ToolContext) => {
const client = context.selfhostedClient;
// Attempt to restart the pg_net worker.
// This might fail if pg_net is not installed or the user lacks permissions.
const restartSql = 'SELECT net.worker_restart()'; // Remove semicolon
try {
console.error('Attempting to restart pg_net worker...');
const result = await executeSqlWithFallback(client, restartSql, false);
// Check if the result contains an error
if ('error' in result) {
// Specific check for function not found (pg_net might not be installed/active)
const notFound = result.error.code === '42883'; // undefined_function
const message = `Failed to restart pg_net worker: ${result.error.message}${notFound ? ' (Is pg_net installed and enabled?)' : ''}`;
console.error(message);
return { success: false, message };
}
// If no error, assume success
console.error('pg_net worker restart requested successfully.');
return { success: true, message: 'pg_net worker restart requested successfully.' };
} catch (error: unknown) {
// Catch exceptions during the RPC call itself
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`Exception attempting to restart pg_net worker: ${errorMessage}`);
return { success: false, message: `Exception attempting to restart pg_net worker: ${errorMessage}` };
}
},
};

View File

@ -0,0 +1,67 @@
import type { SelfhostedSupabaseClient } from '../client/index.js';
// Define log function type
type LogFunction = (message: string, level?: 'info' | 'warn' | 'error') => void;
/**
* Privilege levels for tools.
* - 'regular': Safe read-only operations, can be called by any authenticated user
* - 'privileged': Requires service_role key or direct DB connection, performs admin operations
*/
export type ToolPrivilegeLevel = 'regular' | 'privileged';
/**
* User context from JWT authentication (HTTP mode only).
*/
export interface UserContext {
userId: string;
email: string | null;
role: string;
}
/**
* Maps JWT roles to allowed tool privilege levels using a Map to avoid object injection patterns.
* - 'service_role': Can access all tools (regular + privileged)
* - 'authenticated': Can only access regular tools
* - 'anon': No tool access (anonymous users should not access MCP tools directly)
*
* SECURITY NOTE: Anonymous users are blocked from MCP tool access because:
* 1. MCP tools provide admin-level database introspection
* 2. Anon JWTs are meant for public API access, not admin tooling
* 3. If anon access is needed, use authenticated role with appropriate RLS
*/
const ROLE_PRIVILEGE_MAP = new Map<string, Set<ToolPrivilegeLevel>>([
['service_role', new Set<ToolPrivilegeLevel>(['regular', 'privileged'])],
['authenticated', new Set<ToolPrivilegeLevel>(['regular'])],
['anon', new Set<ToolPrivilegeLevel>([])], // No access for anonymous users
]);
// Default permissions for unknown roles (fallback to authenticated level)
const DEFAULT_PRIVILEGES = new Set<ToolPrivilegeLevel>(['regular']);
/**
* Checks if a JWT role can access a tool with the given privilege level.
*
* @param userRole - The role from the JWT token
* @param toolPrivilegeLevel - The privilege level required by the tool
* @returns true if access is allowed, false otherwise
*/
export function canAccessTool(
userRole: string,
toolPrivilegeLevel: ToolPrivilegeLevel
): boolean {
// Use Map.get() which is safe from prototype pollution
const allowedLevels = ROLE_PRIVILEGE_MAP.get(userRole) ?? DEFAULT_PRIVILEGES;
return allowedLevels.has(toolPrivilegeLevel);
}
/**
* Defines the expected shape of the context object passed to tool execute functions.
*/
export interface ToolContext {
selfhostedClient: SelfhostedSupabaseClient;
log: LogFunction; // Explicitly define the log function
workspacePath?: string; // Path to the workspace root
user?: UserContext; // User context from JWT (HTTP mode only)
[key: string]: unknown; // Allow other context properties
}

View File

@ -0,0 +1,163 @@
import { z } from 'zod';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
import type { PoolClient } from 'pg';
import type { AuthUser } from '../types/index.js'; // Import AuthUser
// Input schema
const UpdateAuthUserInputSchema = z.object({
user_id: z.string().uuid().describe('The UUID of the user to update.'),
email: z.optional(z.string().email('Invalid email')).describe('New email address.'),
password: z.optional(z.string().min(6, 'Password must be at least 6 characters')).describe('New plain text password (min 6 chars). WARNING: Insecure.'),
role: z.optional(z.string()).describe('New role.'),
app_metadata: z.optional(z.record(z.string(), z.unknown())).describe('New app metadata (will overwrite existing).'),
user_metadata: z.optional(z.record(z.string(), z.unknown())).describe('New user metadata (will overwrite existing).'),
}).refine(data =>
data.email || data.password || data.role || data.app_metadata || data.user_metadata,
{ message: "At least one field to update (email, password, role, app_metadata, user_metadata) must be provided." }
);
type UpdateAuthUserInput = z.infer<typeof UpdateAuthUserInputSchema>;
// Output schema - Zod validation for the updated user
const UpdatedAuthUserZodSchema = z.object({
id: z.string().uuid(),
email: z.string().email('Invalid email').nullable(),
role: z.string().nullable(),
created_at: z.string().nullable(),
updated_at: z.string().nullable(), // Expect this to be updated
last_sign_in_at: z.string().nullable(),
raw_app_meta_data: z.record(z.string(), z.unknown()).nullable(),
raw_user_meta_data: z.record(z.string(), z.unknown()).nullable(),
});
// Use AuthUser for the output type hint
type UpdateAuthUserOutput = AuthUser;
// Static JSON Schema for MCP
const mcpInputSchema = {
type: 'object',
properties: {
user_id: { type: 'string', format: 'uuid', description: 'The UUID of the user to update.' },
email: { type: 'string', format: 'email', description: 'New email address.' },
password: { type: 'string', minLength: 6, description: 'New plain text password (min 6 chars). WARNING: Insecure.' },
role: { type: 'string', description: 'New role.' },
user_metadata: { type: 'object', description: 'New user metadata (will overwrite existing).' },
app_metadata: { type: 'object', description: 'New app metadata (will overwrite existing).' },
},
required: ['user_id'],
};
// Tool definition
export const updateAuthUserTool = {
name: 'update_auth_user',
description: 'Updates fields for a user in auth.users. WARNING: Password handling is insecure. Requires service_role key and direct DB connection.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: UpdateAuthUserInputSchema,
mcpInputSchema: mcpInputSchema, // Ensure defined
outputSchema: UpdatedAuthUserZodSchema,
execute: async (input: UpdateAuthUserInput, context: ToolContext): Promise<UpdateAuthUserOutput> => { // Use UpdateAuthUserOutput
const client = context.selfhostedClient;
const { user_id, email, password, role, app_metadata, user_metadata } = input;
if (!client.isPgAvailable()) {
context.log('Direct database connection (DATABASE_URL) is required to update auth user details.', 'error');
throw new Error('Direct database connection (DATABASE_URL) is required to update auth user details.');
}
const updates: string[] = [];
const params: (string | object | null)[] = [];
let paramIndex = 1;
// Dynamically build SET clauses and params array
if (email !== undefined) {
updates.push(`email = $${paramIndex++}`);
params.push(email);
}
// SECURITY NOTE: The `password !== undefined` check below is NOT a timing attack.
// We're only checking if the field was provided, not comparing password values.
// Actual password comparison happens in the database via bcrypt which is constant-time.
if (password !== undefined) {
updates.push(`encrypted_password = crypt($${paramIndex++}, gen_salt('bf'))`);
params.push(password);
}
if (role !== undefined) {
updates.push(`role = $${paramIndex++}`);
params.push(role);
}
if (app_metadata !== undefined) {
updates.push(`raw_app_meta_data = $${paramIndex++}::jsonb`);
params.push(JSON.stringify(app_metadata));
}
if (user_metadata !== undefined) {
updates.push(`raw_user_meta_data = $${paramIndex++}::jsonb`);
params.push(JSON.stringify(user_metadata));
}
// Add user_id as the final parameter for the WHERE clause
params.push(user_id);
const userIdParamIndex = paramIndex;
const sql = `
UPDATE auth.users
SET ${updates.join(', ')}, updated_at = NOW()
WHERE id = $${userIdParamIndex}
RETURNING id, email, role, raw_app_meta_data, raw_user_meta_data, created_at::text, updated_at::text, last_sign_in_at::text;
`;
console.error(`Attempting to update auth user ${user_id}...`);
context.log(`Attempting to update auth user ${user_id}...`);
const updatedUser = await client.executeTransactionWithPg(async (pgClient: PoolClient) => {
// Check pgcrypto if password is being updated
if (password !== undefined) {
try {
await pgClient.query("SELECT crypt('test', gen_salt('bf'))");
} catch (err) {
throw new Error('Failed to execute crypt function for password update. Ensure pgcrypto extension is enabled.');
}
}
try {
const result = await pgClient.query(sql, params);
if (result.rows.length === 0) {
throw new Error(`User update failed: User with ID ${user_id} not found or no rows affected.`);
}
return UpdatedAuthUserZodSchema.parse(result.rows[0]);
} catch (dbError: unknown) {
let errorMessage = 'Unknown database error during user update';
// Check for potential email unique constraint violation if email was updated
if (typeof dbError === 'object' && dbError !== null && 'code' in dbError) {
// Safely extract code and message with proper type narrowing
const errorCode = String((dbError as { code: unknown }).code);
const errorMsg = 'message' in dbError && typeof (dbError as { message: unknown }).message === 'string'
? (dbError as { message: string }).message
: undefined;
// Check PG error code for unique violation
if (email !== undefined && errorCode === '23505') {
errorMessage = `User update failed: Email '${email}' likely already exists for another user.`;
} else if (errorMsg) {
errorMessage = `Database error (${errorCode}): ${errorMsg}`;
} else {
errorMessage = `Database error code: ${errorCode}`;
}
} else if (dbError instanceof Error) {
errorMessage = `Database error during user update: ${dbError.message}`;
} else {
errorMessage = `Database error during user update: ${String(dbError)}`;
}
// Log sanitized error (not full object to avoid leaking sensitive info)
console.error('Error updating user in DB:', errorMessage);
// Throw the specific error message
throw new Error(errorMessage);
}
});
console.error(`Successfully updated user ${user_id}.`);
context.log(`Successfully updated user ${user_id}.`);
return updatedUser; // Matches UpdateAuthUserOutput (AuthUser)
},
};

View File

@ -0,0 +1,183 @@
import { z } from 'zod';
import { handleSqlResponse, executeSqlWithFallback, isSqlErrorResponse } from './utils.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
// Schema for updated bucket output
const UpdatedBucketSchema = z.object({
id: z.string(),
name: z.string(),
public: z.boolean(),
file_size_limit: z.number().nullable(),
allowed_mime_types: z.array(z.string()).nullable(),
});
const UpdateStorageConfigOutputSchema = z.object({
success: z.boolean(),
bucket: UpdatedBucketSchema.nullable(),
message: z.string(),
});
type UpdateStorageConfigOutput = z.infer<typeof UpdateStorageConfigOutputSchema>;
// Input schema
const UpdateStorageConfigInputSchema = z.object({
bucket_id: z.string().describe('The bucket ID to update'),
file_size_limit: z.number().min(0).optional().describe('Maximum file size in bytes (0 or null for no limit)'),
allowed_mime_types: z.array(z.string()).optional().describe('Array of allowed MIME types (e.g., ["image/png", "image/jpeg"]). Empty array means all types allowed.'),
public: z.boolean().optional().describe('Whether the bucket is publicly accessible'),
});
type UpdateStorageConfigInput = z.infer<typeof UpdateStorageConfigInputSchema>;
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {
bucket_id: {
type: 'string',
description: 'The bucket ID to update',
},
file_size_limit: {
type: 'number',
minimum: 0,
description: 'Maximum file size in bytes (0 or null for no limit)',
},
allowed_mime_types: {
type: 'array',
items: { type: 'string' },
description: 'Array of allowed MIME types (e.g., ["image/png", "image/jpeg"]). Empty array means all types allowed.',
},
public: {
type: 'boolean',
description: 'Whether the bucket is publicly accessible',
},
},
required: ['bucket_id'],
};
// Tool definition
export const updateStorageConfigTool = {
name: 'update_storage_config',
description: 'Updates storage configuration for a Supabase Storage bucket. Can modify file size limits, allowed MIME types, and public/private status.',
privilegeLevel: 'privileged' as ToolPrivilegeLevel,
inputSchema: UpdateStorageConfigInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: UpdateStorageConfigOutputSchema,
execute: async (input: UpdateStorageConfigInput, context: ToolContext): Promise<UpdateStorageConfigOutput> => {
const client = context.selfhostedClient;
const { bucket_id, file_size_limit, allowed_mime_types, public: isPublic } = input;
// Check if storage schema exists
const checkSchemaSql = `
SELECT EXISTS (
SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = 'storage'
) AS exists
`;
const schemaCheckResult = await executeSqlWithFallback(client, checkSchemaSql, true);
if (!Array.isArray(schemaCheckResult) || schemaCheckResult.length === 0 || !schemaCheckResult[0]?.exists) {
return {
success: false,
bucket: null,
message: 'Storage schema not found - Storage may not be configured',
};
}
// Check if bucket exists
const escapedBucketId = bucket_id.replace(/'/g, "''");
const checkBucketSql = `
SELECT EXISTS (
SELECT 1 FROM storage.buckets WHERE id = '${escapedBucketId}'
) AS exists
`;
const bucketCheckResult = await executeSqlWithFallback(client, checkBucketSql, true);
if (!Array.isArray(bucketCheckResult) || bucketCheckResult.length === 0 || !bucketCheckResult[0]?.exists) {
return {
success: false,
bucket: null,
message: `Bucket '${bucket_id}' not found`,
};
}
// Build update query
const updates: string[] = [];
if (file_size_limit !== undefined) {
updates.push(`file_size_limit = ${file_size_limit === 0 ? 'NULL' : file_size_limit}`);
}
if (allowed_mime_types !== undefined) {
if (allowed_mime_types.length === 0) {
updates.push('allowed_mime_types = NULL');
} else {
const escapedTypes = allowed_mime_types.map((t) => `'${t.replace(/'/g, "''")}'`).join(', ');
updates.push(`allowed_mime_types = ARRAY[${escapedTypes}]`);
}
}
if (isPublic !== undefined) {
updates.push(`public = ${isPublic}`);
}
if (updates.length === 0) {
return {
success: false,
bucket: null,
message: 'No updates specified. Provide at least one of: file_size_limit, allowed_mime_types, or public',
};
}
updates.push('updated_at = NOW()');
const updateSql = `
UPDATE storage.buckets
SET ${updates.join(', ')}
WHERE id = '${escapedBucketId}'
RETURNING id, name, public, file_size_limit, allowed_mime_types
`;
const updateResult = await executeSqlWithFallback(client, updateSql, false);
if (isSqlErrorResponse(updateResult)) {
return {
success: false,
bucket: null,
message: `Failed to update bucket: ${updateResult.error.message}`,
};
}
const resultSchema = z.array(
z.object({
id: z.string(),
name: z.string(),
public: z.boolean(),
file_size_limit: z.number().nullable(),
allowed_mime_types: z.array(z.string()).nullable(),
})
);
try {
const updatedBuckets = handleSqlResponse(updateResult, resultSchema);
if (updatedBuckets.length === 0) {
return {
success: false,
bucket: null,
message: 'Update executed but no rows returned',
};
}
return {
success: true,
bucket: updatedBuckets[0],
message: `Successfully updated bucket '${bucket_id}'`,
};
} catch (error) {
return {
success: false,
bucket: null,
message: `Failed to parse update result: ${error}`,
};
}
},
};

View File

@ -0,0 +1,143 @@
import { z } from 'zod';
import type { SqlExecutionResult, SqlErrorResponse } from '../types/index.js';
import { exec } from 'node:child_process';
import { promisify } from 'node:util';
import type { SelfhostedSupabaseClient } from '../client/index.js';
const execAsync = promisify(exec);
/**
* Redacts sensitive credentials from a database URL for safe logging.
* Replaces password with asterisks while preserving URL structure.
*
* @param url - The database URL potentially containing credentials
* @returns The URL with password replaced by '****'
*
* @example
* redactDatabaseUrl('postgresql://user:secret@localhost:5432/db')
* // Returns: 'postgresql://user:****@localhost:5432/db'
*/
export function redactDatabaseUrl(url: string): string {
try {
const parsed = new URL(url);
if (parsed.password) {
parsed.password = '****';
}
return parsed.toString();
} catch {
// If URL parsing fails, use regex-based redaction as fallback
// Matches :password@ pattern in connection strings
return url.replace(/:([^:@]+)@/, ':****@');
}
}
/**
* Sanitizes an error for safe logging by extracting only safe properties.
* Removes stack traces and sensitive context while preserving useful debug info.
*
* @param error - The error object to sanitize
* @returns A safe string representation of the error
*/
export function sanitizeErrorForLogging(error: unknown): string {
if (error instanceof Error) {
// Include only message and code (common in DB errors)
const code = (error as { code?: string }).code;
return code ? `[${code}] ${error.message}` : error.message;
}
if (typeof error === 'object' && error !== null) {
const errorObj = error as { message?: unknown; code?: unknown };
if (typeof errorObj.message === 'string') {
const code = typeof errorObj.code === 'string' ? errorObj.code : undefined;
return code ? `[${code}] ${errorObj.message}` : errorObj.message;
}
}
return String(error);
}
/**
* Type guard to check if a SQL execution result is an error response.
*/
export function isSqlErrorResponse(result: SqlExecutionResult): result is SqlErrorResponse {
return (result as SqlErrorResponse).error !== undefined;
}
/**
* Handles SQL execution results and validates them against the expected schema.
* Throws an error if the result contains an error or doesn't match the schema.
*/
export function handleSqlResponse<T>(result: SqlExecutionResult, schema: z.ZodSchema<T>): T {
// Check if the result contains an error
if ('error' in result) {
throw new Error(`SQL Error (${result.error.code}): ${result.error.message}`);
}
// Validate the result against the schema
try {
return schema.parse(result);
} catch (validationError) {
if (validationError instanceof z.ZodError) {
throw new Error(`Schema validation failed: ${validationError.issues.map((e) => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
}
throw new Error(`Unexpected validation error: ${validationError}`);
}
}
/**
* Executes an external shell command asynchronously.
* Returns stdout, stderr, and any execution error.
*/
export async function runExternalCommand(command: string): Promise<{
stdout: string;
stderr: string;
error: Error | null;
}> {
try {
const { stdout, stderr } = await execAsync(command);
return { stdout, stderr, error: null };
} catch (error: unknown) {
// execAsync throws on non-zero exit code, includes stdout/stderr in the error object
const execError = error as Error & { stdout?: string; stderr?: string };
return {
stdout: execError.stdout || '',
stderr: execError.stderr || execError.message, // Use message if stderr is empty
error: execError,
};
}
}
/**
* Executes SQL using the best available method with proper privilege escalation.
*
* Execution order:
* 1. Direct database connection (bypasses all auth, most reliable for dev)
* 2. Service role RPC (uses execute_sql function with service_role privileges)
* 3. Fails if neither is available
*
* SECURITY NOTE: This function is for PRIVILEGED operations only.
* The execute_sql RPC function is restricted to service_role - authenticated users cannot call it.
*/
export async function executeSqlWithFallback(
client: SelfhostedSupabaseClient,
sql: string,
readOnly: boolean = true
): Promise<SqlExecutionResult> {
// Try direct database connection first (bypasses JWT authentication)
if (client.isPgAvailable()) {
console.info('Using direct database connection (bypassing JWT)...');
return await client.executeSqlWithPg(sql);
}
// Try service role RPC (required since execute_sql is restricted to service_role)
if (client.isServiceRoleAvailable()) {
console.info('Using service role RPC method...');
return await client.executeSqlViaServiceRoleRpc(sql, readOnly);
}
// Neither method available - fail with clear error
return {
error: {
message: 'Neither direct database connection (DATABASE_URL) nor service role key (SUPABASE_SERVICE_ROLE_KEY) is configured. Cannot execute SQL.',
code: 'MCP_CONFIG_ERROR',
},
};
}

View File

@ -0,0 +1,40 @@
import { z } from 'zod';
import type { SelfhostedSupabaseClient } from '../client/index.js';
import type { ToolContext, ToolPrivilegeLevel } from './types.js';
// Input schema (none needed)
const VerifyJwtInputSchema = z.object({});
type VerifyJwtInput = z.infer<typeof VerifyJwtInputSchema>;
// Output schema - SECURITY: Removed jwt_secret_preview to avoid leaking secret info
const VerifyJwtOutputSchema = z.object({
jwt_secret_status: z.enum(['found', 'not_configured']).describe('Whether the JWT secret was provided to the server.'),
});
// Static JSON Schema for MCP capabilities
const mcpInputSchema = {
type: 'object',
properties: {},
required: [],
};
// The tool definition
export const verifyJwtSecretTool = {
name: 'verify_jwt_secret',
description: 'Checks if the Supabase JWT secret is configured for this server.',
privilegeLevel: 'regular' as ToolPrivilegeLevel,
inputSchema: VerifyJwtInputSchema,
mcpInputSchema: mcpInputSchema,
outputSchema: VerifyJwtOutputSchema,
execute: async (input: VerifyJwtInput, context: ToolContext) => {
const client = context.selfhostedClient;
const secret = client.getJwtSecret();
if (secret) {
// SECURITY: Only return status, no preview of the secret
return { jwt_secret_status: 'found' as const };
}
return { jwt_secret_status: 'not_configured' as const };
},
};

View File

@ -0,0 +1,85 @@
import type { SupabaseClientOptions } from '@supabase/supabase-js';
/**
* Configuration options for the SelfhostedSupabaseClient.
*/
export interface SelfhostedSupabaseClientOptions {
supabaseUrl: string;
supabaseAnonKey: string;
supabaseServiceRoleKey?: string; // Optional, but needed for some operations like auto-creating helpers
databaseUrl?: string; // Optional, but needed for direct DB access/transactions
jwtSecret?: string; // Add JWT Secret
supabaseClientOptions?: SupabaseClientOptions<"public">;
}
/**
* Represents the structure of a successful SQL execution result via the RPC function.
*/
export type SqlSuccessResponse = Record<string, unknown>[];
/**
* Represents the structure of an error during SQL execution.
*/
export interface SqlErrorResponse {
error: {
message: string;
code?: string; // e.g., PostgreSQL error code
details?: string;
hint?: string;
};
}
/**
* Represents the result of an SQL execution, which can be success or error.
*/
export type SqlExecutionResult = SqlSuccessResponse | SqlErrorResponse;
// --- Core Data Structure Interfaces ---
/**
* Represents a user object from the auth.users table.
* Based on fields selected in listAuthUsersTool, getAuthUserTool etc.
*/
export interface AuthUser {
id: string; // uuid
email: string | null;
role: string | null;
created_at: string | null; // Timestamps returned as text from DB
last_sign_in_at: string | null;
raw_app_meta_data: Record<string, unknown> | null;
raw_user_meta_data: Record<string, unknown> | null;
// Add other relevant fields if needed, e.g., email_confirmed_at
}
/**
* Represents a storage bucket from the storage.buckets table.
*/
export interface StorageBucket {
id: string;
name: string;
owner: string | null;
public: boolean;
avif_autodetection: boolean;
file_size_limit: number | null;
allowed_mime_types: string[] | null;
created_at: string | null; // Timestamps returned as text from DB
updated_at: string | null;
}
/**
* Represents a storage object from the storage.objects table.
* Based on fields selected in listStorageObjectsTool.
*/
export interface StorageObject {
id: string; // uuid
name: string | null;
bucket_id: string;
owner: string | null; // uuid
version: string | null;
mimetype: string | null; // Extracted from metadata
size: number | null; // Extracted from metadata, parsed as number
metadata: Record<string, unknown> | null; // Use unknown instead of any
created_at: string | null; // Timestamps returned as text from DB
updated_at: string | null;
last_accessed_at: string | null;
}

View File

@ -0,0 +1,18 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"rootDir": "./src",
"outDir": "./dist",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"sourceMap": true,
"isolatedModules": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@ -223,4 +223,57 @@ services:
config: config:
hide_groups_header: true hide_groups_header: true
allow: allow:
- admin - admin
## MCP Server routes - Model Context Protocol for AI integrations
## Authentication is handled by the MCP server itself (JWT validation)
- name: mcp-v1
_comment: 'MCP Server: /mcp/v1/* -> http://mcp:3100/mcp'
url: http://mcp:3100
routes:
- name: mcp-v1-all
strip_path: true
paths:
- /mcp/v1/
plugins:
- name: request-transformer
config:
replace:
uri: /mcp
- name: cors
config:
origins:
- "http://localhost:3000"
- "http://127.0.0.1:3000"
- "http://192.168.0.94:50001"
methods:
- GET
- POST
- DELETE
- OPTIONS
headers:
- Accept
- Authorization
- Content-Type
- X-Client-Info
- apikey
- Mcp-Session-Id
exposed_headers:
- Mcp-Session-Id
credentials: true
max_age: 3600
## Protected Dashboard - catch all remaining routes
#- name: dashboard
# _comment: 'Studio: /* -> http://studio:3000/*'
# url: http://studio:3000/
# routes:
# - name: dashboard-all
# strip_path: true
# paths:
# - /
# plugins:
# - name: cors
# - name: basic-auth
# config:
# hide_credentials: true

View File

@ -0,0 +1,364 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Core schema setup for ClassConcepts with neoFS filesystem integration
-- Dependencies: auth.users (Supabase Auth)
--[ Validation ]--
do $$
begin
-- Verify required extensions
if not exists (select 1 from pg_extension where extname = 'uuid-ossp') then
raise exception 'Required extension uuid-ossp is not installed';
end if;
-- Verify auth schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'auth') then
raise exception 'Required auth schema is not available';
end if;
-- Verify storage schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'storage') then
raise exception 'Required storage schema is not available';
end if;
end $$;
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
-- Create completed_at trigger function for document artefacts
create or replace function public.set_completed_at()
returns trigger as $$
begin
if NEW.status = 'completed' and OLD.status != 'completed' then
NEW.completed_at = now();
end if;
return NEW;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (
user_type in (
'teacher',
'student',
'email_teacher',
'email_student',
'developer',
'superadmin'
)
),
username text not null unique,
full_name text,
display_name text,
metadata jsonb default '{}'::jsonb,
user_db_name text,
school_db_name text,
neo4j_sync_status text default 'pending' check (neo4j_sync_status in ('pending', 'ready', 'failed')),
neo4j_synced_at timestamp with time zone,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: teacher or student';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
geo_coordinates jsonb default '{}'::jsonb,
neo4j_uuid_string text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
comment on column public.institutes.geo_coordinates is 'Geospatial coordinates from OSM search (latitude, longitude, boundingbox)';
--[ 6. neoFS Filesystem Tables ]--
-- File cabinets for organizing files
create table if not exists public.file_cabinets (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.file_cabinets is 'User file cabinets for organizing documents and files';
-- Files stored in cabinets
create table if not exists public.files (
id uuid primary key default uuid_generate_v4(),
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
name text not null,
path text not null,
bucket text default 'file-cabinets' not null,
created_at timestamp with time zone default timezone('utc'::text, now()),
mime_type text,
metadata jsonb default '{}'::jsonb,
size text,
category text generated always as (
case
when mime_type like 'image/%' then 'image'
when mime_type = 'application/pdf' then 'document'
when mime_type in ('application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document') then 'document'
when mime_type in ('application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') then 'spreadsheet'
when mime_type in ('application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation') then 'presentation'
when mime_type like 'audio/%' then 'audio'
when mime_type like 'video/%' then 'video'
else 'other'
end
) stored
);
comment on table public.files is 'Files stored in user cabinets with automatic categorization';
comment on column public.files.category is 'Automatically determined file category based on MIME type';
-- AI brains for processing files
create table if not exists public.brains (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
purpose text,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.brains is 'AI brains for processing and analyzing user files';
-- Brain-file associations
create table if not exists public.brain_files (
brain_id uuid not null references public.brains(id) on delete cascade,
file_id uuid not null references public.files(id) on delete cascade,
primary key (brain_id, file_id)
);
comment on table public.brain_files is 'Associations between AI brains and files for processing';
-- Document artefacts from file processing
create table if not exists public.document_artefacts (
id uuid primary key default uuid_generate_v4(),
file_id uuid references public.files(id) on delete cascade,
page_number integer default 0 not null,
type text not null,
rel_path text not null,
size_tag text,
language text,
chunk_index integer,
extra jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
status text default 'completed' not null check (status in ('pending', 'processing', 'completed', 'failed')),
started_at timestamp with time zone default timezone('utc'::text, now()),
completed_at timestamp with time zone,
error_message text
);
comment on table public.document_artefacts is 'Extracted artefacts from document processing';
comment on column public.document_artefacts.status is 'Extraction status: pending, processing, completed, or failed';
comment on column public.document_artefacts.started_at is 'Timestamp when extraction process started';
comment on column public.document_artefacts.completed_at is 'Timestamp when extraction process completed (success or failure)';
comment on column public.document_artefacts.error_message is 'Error details if extraction failed';
-- Function execution logs
create table if not exists public.function_logs (
id serial primary key,
file_id uuid references public.files(id) on delete cascade,
timestamp timestamp with time zone default timezone('utc'::text, now()),
step text,
message text,
data jsonb
);
comment on table public.function_logs is 'Logs of function executions and processing steps';
--[ 7. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 8. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 9. Exam Specifications ]--
create table if not exists public.eb_specifications (
id uuid primary key default uuid_generate_v4(),
spec_code text unique,
exam_board_code text,
award_code text,
subject_code text,
first_teach text,
spec_ver text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_specifications is 'Exam board specifications and their primary document';
comment on column public.eb_specifications.spec_code is 'Unique code for the specification, used for linking exams';
comment on column public.eb_specifications.doc_details is 'Tika extract of the specification document';
comment on column public.eb_specifications.docling_docs is 'Docling extracts settings and storage locations for the specification document';
--[ 10. Exam Papers / Entries ]--
create table if not exists public.eb_exams (
id uuid primary key default uuid_generate_v4(),
exam_code text unique,
spec_code text references public.eb_specifications(spec_code) on delete cascade,
paper_code text,
tier text,
session text,
type_code text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_exams is 'Exam papers and related documents linked to specifications';
comment on column public.eb_exams.exam_code is 'Unique code for the exam paper, used for linking questions';
comment on column public.eb_exams.type_code is 'Type code for the exam document: Question Paper (QP), Mark Scheme (MS), Examiner Report (ER), Other (OT)';
comment on column public.eb_exams.doc_details is 'Tika extract of the exam paper document';
comment on column public.eb_exams.docling_docs is 'Docling extracts settings and storage locations for the exam paper document';
--[ 11. Indexes ]--
-- Index for geospatial queries
create index if not exists idx_institutes_geo_coordinates on public.institutes using gin(geo_coordinates);
create index if not exists idx_institutes_urn on public.institutes(urn);
-- Document artefacts indexes
create index if not exists idx_document_artefacts_file_status on public.document_artefacts(file_id, status);
create index if not exists idx_document_artefacts_file_type on public.document_artefacts(file_id, type);
create index if not exists idx_document_artefacts_status on public.document_artefacts(status);
-- File indexes
create index if not exists idx_files_cabinet_id on public.files(cabinet_id);
create index if not exists idx_files_mime_type on public.files(mime_type);
create index if not exists idx_files_category on public.files(category);
-- Brain indexes
create index if not exists idx_brains_user_id on public.brains(user_id);
-- Exam board indexes
create index if not exists idx_eb_exams_exam_code on public.eb_exams(exam_code);
create index if not exists idx_eb_exams_spec_code on public.eb_exams(spec_code);
create index if not exists idx_eb_exams_paper_code on public.eb_exams(paper_code);
create index if not exists idx_eb_exams_tier on public.eb_exams(tier);
create index if not exists idx_eb_exams_session on public.eb_exams(session);
create index if not exists idx_eb_exams_type_code on public.eb_exams(type_code);
create index if not exists idx_eb_specifications_spec_code on public.eb_specifications(spec_code);
create index if not exists idx_eb_specifications_exam_board_code on public.eb_specifications(exam_board_code);
create index if not exists idx_eb_specifications_award_code on public.eb_specifications(award_code);
create index if not exists idx_eb_specifications_subject_code on public.eb_specifications(subject_code);
--[ 12. Triggers ]--
-- Set completed_at when document artefact status changes to completed
create trigger trigger_set_completed_at
before update on public.document_artefacts
for each row
execute function public.set_completed_at();
-- Set updated_at on profile updates
create trigger trigger_profiles_updated_at
before update on public.profiles
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute updates
create trigger trigger_institutes_updated_at
before update on public.institutes
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_memberships updates
create trigger trigger_institute_memberships_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_membership_requests updates
create trigger trigger_institute_membership_requests_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_specifications updates
create trigger trigger_eb_specifications_updated_at
before update on public.eb_specifications
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_exams updates
create trigger trigger_eb_exams_updated_at
before update on public.eb_exams
for each row
execute function public.handle_updated_at();

View File

@ -1,6 +1,6 @@
--[ 8. Auth Functions ]-- --[ 8. Auth Functions ]--
-- Create a secure function to check admin status -- Create a secure function to check admin status
create or replace function auth.is_admin() create or replace function public.is_admin()
returns boolean as $$ returns boolean as $$
select coalesce( select coalesce(
(select true (select true
@ -12,32 +12,24 @@ returns boolean as $$
$$ language sql security definer; $$ language sql security definer;
-- Create a secure function to check super admin status -- Create a secure function to check super admin status
create or replace function auth.is_super_admin() create or replace function public.is_super_admin()
returns boolean as $$ returns boolean as $$
select coalesce( select coalesce(
(select role = 'supabase_admin' (select true
from auth.users from public.profiles
where id = auth.uid()), where id = auth.uid()
and user_type = 'admin'),
false false
); );
$$ language sql security definer; $$ language sql security definer;
-- Create public wrappers for the auth functions -- Create public wrapper functions
create or replace function public.is_admin() -- Note: These are now the main implementation functions, not wrappers
returns boolean as $$ -- The original auth schema functions have been moved to public schema
select auth.is_admin();
$$ language sql security definer;
create or replace function public.is_super_admin()
returns boolean as $$
select auth.is_super_admin();
$$ language sql security definer;
-- Grant execute permissions -- Grant execute permissions
grant execute on function public.is_admin to authenticated; grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated; grant execute on function public.is_super_admin to authenticated;
grant execute on function auth.is_admin to authenticated;
grant execute on function auth.is_super_admin to authenticated;
-- Initial admin setup function -- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text) create or replace function public.setup_initial_admin(admin_email text)
@ -48,7 +40,7 @@ as $$
declare declare
result json; result json;
begin begin
-- Only allow this to run as service role or supabase_admin -- Only allow this to run as service role or superuser
if not ( if not (
current_user = 'service_role' current_user = 'service_role'
or exists ( or exists (
@ -84,7 +76,7 @@ $$;
-- Grant execute permissions -- Grant execute permissions
revoke execute on function public.setup_initial_admin from public; revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role, supabase_admin; grant execute on function public.setup_initial_admin to authenticated, service_role;
-- Create RPC wrapper for REST API access -- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text) create or replace function rpc.setup_initial_admin(admin_email text)
@ -98,7 +90,7 @@ end;
$$; $$;
-- Grant execute permissions for RPC wrapper -- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role, supabase_admin; grant execute on function rpc.setup_initial_admin to authenticated, service_role;
--[ 9. Utility Functions ]-- --[ 9. Utility Functions ]--
-- Check if database is ready -- Check if database is ready

Some files were not shown because too many files have changed in this diff Show More